# import data
library(readr)
dat <- read_csv("/Users/melissalagunas/Desktop/Lab/Contextualizing Stigma and Trauma/LatineData_FINAL_Dataset_Scales.csv")
## New names:
## Rows: 259 Columns: 48
## ── Column specification
## ──────────────────────────────────────────────────────── Delimiter: "," chr
## (4): GEN_8_TEXT, ETH_7_TEXT, BORN_1, AHELP dbl (43): ...1, AGE, GEN, EDU, EMP,
## INS_1, INS_2, ETH, INC, BORN, BORN_2, PR... lgl (1): EDU_11_TEXT
## ℹ Use `spec()` to retrieve the full column specification for this data. ℹ
## Specify the column types or set `show_col_types = FALSE` to quiet this message.
## • `` -> `...1`
#data set with only familismo (PHFS), stressful life events (SLESQ), and central sensitization (CSA)
# Load dplyr package
# Load dplyr package
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
# Selecting the columns from the dataset
d_cutoff <- dat %>%
dplyr::select(GEN, PCL5_AVG, PCL5_total, CSA_AVG, CSA_total, PHFS_AVG, PHFS_total)
# Print the new dataset to verify
print(d_cutoff)
## # A tibble: 259 × 7
## GEN PCL5_AVG PCL5_total CSA_AVG CSA_total PHFS_AVG PHFS_total
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 7 0.75 3 1.28 32 4 20
## 2 2 0 0 0.24 6 5 25
## 3 1 1.25 5 2.08 52 3.2 16
## 4 1 1.25 5 2.76 69 1 5
## 5 2 0 0 0.68 17 5 25
## 6 2 1.5 6 1.64 41 3.8 19
## 7 8 3.5 14 2.04 51 3.8 19
## 8 4 1.5 6 1.71 41 3.8 19
## 9 8 2.25 9 2.16 54 3.4 17
## 10 2 0.25 1 1.04 26 3.4 17
## # ℹ 249 more rows
d <- data.frame(dat$PHFS_AVG, dat$SLESQ_total, dat$CSA_total, dat$PCL5_AVG)
d <- rename(d, familism = dat.PHFS_AVG, trauma = dat.PCL5_AVG, central_sensitization = dat.CSA_total, stressful_life_events = dat.SLESQ_total)
# remove NA variables
d <- na.omit(d)
# loading packages
library(lavaan)
## This is lavaan 0.6-16
## lavaan is FREE software! Please report any bugs.
library(semPlot)
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ forcats 1.0.0 ✔ stringr 1.5.0
## ✔ ggplot2 3.4.4 ✔ tibble 3.2.1
## ✔ lubridate 1.9.3 ✔ tidyr 1.3.0
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(psych)
##
## Attaching package: 'psych'
##
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
##
## The following object is masked from 'package:lavaan':
##
## cor2cov
library(jtools)
library(interactions)
library(ggplot2)
library(tidySEM)
## Loading required package: OpenMx
## OpenMx may run faster if it is compiled to take advantage of multiple cores.
##
## Attaching package: 'OpenMx'
##
## The following object is masked from 'package:psych':
##
## tr
##
## Registered S3 method overwritten by 'tidySEM':
## method from
## predict.MxModel OpenMx
##
## Attaching package: 'tidySEM'
##
## The following object is masked from 'package:jtools':
##
## get_data
# Install and load required packages
if (!require("WebPower")) install.packages("WebPower")
## Loading required package: WebPower
## Loading required package: MASS
##
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
##
## select
## Loading required package: lme4
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
## The following objects are masked from 'package:OpenMx':
##
## %&%, expm
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
## Loading required package: parallel
## Loading required package: PearsonDS
library(WebPower)
# Set expected effect sizes based on the study results
a <- 0.3 # Expected effect of trauma exposure on PTSD symptoms (Path a)
b <- 0.4 # Expected effect of PTSD symptoms on central pain symptoms (Path b)
# Set desired power and significance level
desired_power <- 0.80
alpha <- 0.05
# Perform power analysis for mediation model
power_analysis <- wp.mediation(n = NULL, power = desired_power,
alpha = alpha, a = a, b = b)
# Print results
print(power_analysis)
## Power for simple mediation
##
## n power a b varx varm vary alpha
## 133.2677 0.8 0.3 0.4 1 1 1 0.05
##
## URL: http://psychstat.org/mediation
# Calculate power for a range of sample sizes
sample_sizes <- seq(100, 500, by = 50)
power_results <- sapply(sample_sizes, function(n) {
wp.mediation(n = n, alpha = alpha, a = a, b = b)$power
})
# Plot sensitivity analysis
plot(sample_sizes, power_results, type = "b",
xlab = "Sample Size", ylab = "Power",
main = "Power vs. Sample Size for Mediation Model")
abline(h = 0.80, col = "red", lty = 2)
# Calculate power for a range of effect sizes
a_values <- seq(0.1, 0.5, by = 0.1)
b_values <- seq(0.2, 0.6, by = 0.1)
sample_size <- ceiling(power_analysis$n) # Use the calculated sample size
power_matrix <- outer(a_values, b_values, function(a, b) {
wp.mediation(n = sample_size, alpha = alpha, a = a, b = b)$power
})
# Plot heatmap of power for different effect sizes
heatmap(power_matrix,
xlab = "Effect b", ylab = "Effect a",
main = paste("Power for Sample Size =", sample_size),
col = heat.colors(100))
# Correlation analysis
table <- apaTables::apa.cor.table(d, table.number = 1, show.sig.stars = TRUE,
landscape = TRUE, filename = "table.doc")
## Registered S3 methods overwritten by 'broom':
## method from
## tidy.glht jtools
## tidy.summary.glht jtools
print(table)
##
##
## Table 1
##
## Means, standard deviations, and correlations with confidence intervals
##
##
## Variable M SD 1 2 3
## 1. familism 3.67 0.99
##
## 2. stressful_life_events 3.70 2.85 -.24**
## [-.35, -.12]
##
## 3. central_sensitization 40.28 18.17 -.35** .34**
## [-.46, -.24] [.23, .45]
##
## 4. trauma 1.50 0.98 -.38** .32** .69**
## [-.48, -.27] [.20, .42] [.62, .75]
##
##
## Note. M and SD are used to represent mean and standard deviation, respectively.
## Values in square brackets indicate the 95% confidence interval.
## The confidence interval is a plausible range of population correlations
## that could have caused the sample correlation (Cumming, 2014).
## * indicates p < .05. ** indicates p < .01.
##
psych::pairs.panels(d)
# Checking the table of means, standard deviations, and correlations
table <- apaTables::apa.cor.table(d, table.number = 1, show.sig.stars = TRUE,
landscape = TRUE, filename = "Lewis_Corr.doc")
print(table)
##
##
## Table 1
##
## Means, standard deviations, and correlations with confidence intervals
##
##
## Variable M SD 1 2 3
## 1. familism 3.67 0.99
##
## 2. stressful_life_events 3.70 2.85 -.24**
## [-.35, -.12]
##
## 3. central_sensitization 40.28 18.17 -.35** .34**
## [-.46, -.24] [.23, .45]
##
## 4. trauma 1.50 0.98 -.38** .32** .69**
## [-.48, -.27] [.20, .42] [.62, .75]
##
##
## Note. M and SD are used to represent mean and standard deviation, respectively.
## Values in square brackets indicate the 95% confidence interval.
## The confidence interval is a plausible range of population correlations
## that could have caused the sample correlation (Cumming, 2014).
## * indicates p < .05. ** indicates p < .01.
##
## CENTRAL SENSITIZATION
# Count participants with scores between 1 and 29
count_0_to_29 <- sum(d_cutoff$CSA_total >= 0 & d_cutoff$CSA_total <= 29)
# Count participants with scores over 30
count_over_30 <- sum(d_cutoff$CSA_total > 30)
# Print the count
print(count_0_to_29)
## [1] 70
print(count_over_30)
## [1] 183
# Create new columns for clinically significant CSA_total scores based on gender-specific cutoffs
d_cutoff <- d_cutoff %>%
mutate(
CSA_significant = case_when(
GEN == "1" & CSA_total >= 33 ~ "Clinically Significant",
GEN == "2" & CSA_total >= 25 ~ "Clinically Significant",
TRUE ~ "Not Clinically Significant"
)
)
# Frequency table for gender and clinically significant CSA_total scores
frequency_table <- d_cutoff %>%
group_by(GEN, CSA_significant) %>%
summarize(Count = n(), .groups = 'drop') %>%
pivot_wider(names_from = CSA_significant, values_from = Count, values_fill = list(Count = 0))
print(frequency_table)
## # A tibble: 8 × 3
## GEN `Clinically Significant` `Not Clinically Significant`
## <dbl> <int> <int>
## 1 1 94 33
## 2 2 76 32
## 3 3 0 2
## 4 4 0 5
## 5 5 0 8
## 6 6 0 2
## 7 7 0 2
## 8 8 0 5
# Frequency of clinically significant CSA_total scores by gender
gender_count <- d_cutoff %>%
group_by(GEN) %>%
summarize(Clinically_Significant_Count = sum(CSA_significant == "Clinically Significant"), .groups = 'drop')
print(gender_count)
## # A tibble: 8 × 2
## GEN Clinically_Significant_Count
## <dbl> <int>
## 1 1 94
## 2 2 76
## 3 3 0
## 4 4 0
## 5 5 0
## 6 6 0
## 7 7 0
## 8 8 0
# Filter for Cisgender Woman and Cisgender Man
d_cutoff_filtered <- d_cutoff %>%
filter(GEN %in% c("1", "2"))
# Ensure GEN is a factor
d_cutoff_filtered$GEN <- factor(d_cutoff_filtered$GEN, levels = c("1", "2"))
# T-test (for normally distributed CSA_total scores)
t_test_result <- t.test(CSA_total ~ GEN, data = d_cutoff_filtered)
print(t_test_result)
##
## Welch Two Sample t-test
##
## data: CSA_total by GEN
## t = 4.1961, df = 230.07, p-value = 3.88e-05
## alternative hypothesis: true difference in means between group 1 and group 2 is not equal to 0
## 95 percent confidence interval:
## 5.143769 14.250806
## sample estimates:
## mean in group 1 mean in group 2
## 44.14173 34.44444
# Mann-Whitney U test (if CSA_total is not normally distributed)
wilcox_test_result <- wilcox.test(CSA_total ~ GEN, data = d_cutoff_filtered)
print(wilcox_test_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: CSA_total by GEN
## W = 8930.5, p-value = 6.6e-05
## alternative hypothesis: true location shift is not equal to 0
# Compute standard deviations for each group
sd_cisgender_women <- d_cutoff_filtered %>%
filter(GEN == "1") %>%
summarize(SD = sd(CSA_total)) %>%
pull(SD)
sd_cisgender_men <- d_cutoff_filtered %>%
filter(GEN == "2") %>%
summarize(SD = sd(CSA_total)) %>%
pull(SD)
# Print standard deviations
sd_cisgender_women
## [1] 18.12948
sd_cisgender_men
## [1] 17.24281
## PTSD SYMPTOMS
# Count participants with PCL5_total scores between 31 and 35
count_31_to_35 <- sum(d_cutoff$PCL5_total >= 31 & d_cutoff$PCL5_total <= 35)
# Count participants with scores over 35
count_over_35 <- sum(d_cutoff$PCL5_total > 35)
# Print the count
print(count_31_to_35)
## [1] 0
print(count_over_35)
## [1] 0
# Create new columns for clinically significant PCL5_total scores based on gender-specific cutoffs
d_cutoff <- d_cutoff %>%
mutate(
PCL5_significant = case_when(
GEN == "1" & PCL5_total >= 31 & PCL5_total <= 35 ~ "Clinically Significant",
GEN == "2" & PCL5_total >= 31 & PCL5_total <= 35 ~ "Clinically Significant",
TRUE ~ "Not Clinically Significant"
)
)
# Frequency table for gender and clinically significant PCL5_total scores
frequency_table <- d_cutoff %>%
group_by(GEN, PCL5_significant) %>%
summarize(Count = n(), .groups = 'drop') %>%
pivot_wider(names_from = PCL5_significant, values_from = Count, values_fill = list(Count = 0))
print(frequency_table)
## # A tibble: 8 × 2
## GEN `Not Clinically Significant`
## <dbl> <int>
## 1 1 127
## 2 2 108
## 3 3 2
## 4 4 5
## 5 5 8
## 6 6 2
## 7 7 2
## 8 8 5
# Frequency of clinically significant PCL5_total scores by gender
gender_count <- d_cutoff %>%
group_by(GEN) %>%
summarize(Clinically_Significant_Count = sum(PCL5_significant == "Clinically Significant"), .groups = 'drop')
print(gender_count)
## # A tibble: 8 × 2
## GEN Clinically_Significant_Count
## <dbl> <int>
## 1 1 0
## 2 2 0
## 3 3 0
## 4 4 0
## 5 5 0
## 6 6 0
## 7 7 0
## 8 8 0
# Filter for Cisgender Woman and Cisgender Man
d_cutoff_filtered <- d_cutoff %>%
filter(GEN %in% c("1", "2"))
# Ensure GEN is a factor
d_cutoff_filtered$GEN <- factor(d_cutoff_filtered$GEN, levels = c("1", "2"))
# T-test (for normally distributed PCL5_total scores)
t_test_result <- t.test(PCL5_total ~ GEN, data = d_cutoff_filtered)
print(t_test_result)
##
## Welch Two Sample t-test
##
## data: PCL5_total by GEN
## t = 1.3875, df = 229.13, p-value = 0.1666
## alternative hypothesis: true difference in means between group 1 and group 2 is not equal to 0
## 95 percent confidence interval:
## -0.3024787 1.7425487
## sample estimates:
## mean in group 1 mean in group 2
## 6.275591 5.555556
# Mann-Whitney U test (if PCL5_total is not normally distributed)
wilcox_test_result <- wilcox.test(PCL5_total ~ GEN, data = d_cutoff_filtered)
print(wilcox_test_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: PCL5_total by GEN
## W = 7591.5, p-value = 0.1568
## alternative hypothesis: true location shift is not equal to 0
# Compute standard deviations for each group
sd_cisgender_women <- d_cutoff_filtered %>%
filter(GEN == "1") %>%
summarize(SD = sd(PCL5_total)) %>%
pull(SD)
sd_cisgender_men <- d_cutoff_filtered %>%
filter(GEN == "2") %>%
summarize(SD = sd(PCL5_total)) %>%
pull(SD)
# Print standard deviations
sd_cisgender_women
## [1] 4.034921
sd_cisgender_men
## [1] 3.903828
# Filter for Cisgender Woman and Cisgender Man
d_cutoff_filtered <- d_cutoff %>%
filter(GEN %in% c("1", "2"))
# Ensure GEN is a factor
d_cutoff_filtered$GEN <- factor(d_cutoff_filtered$GEN, levels = c("1", "2"))
# T-test (for normally distributed PFHS_AVG scores)
t_test_result <- t.test(PHFS_AVG ~ GEN, data = d_cutoff_filtered)
print(t_test_result)
##
## Welch Two Sample t-test
##
## data: PHFS_AVG by GEN
## t = -1.8269, df = 231.52, p-value = 0.06899
## alternative hypothesis: true difference in means between group 1 and group 2 is not equal to 0
## 95 percent confidence interval:
## -0.48863758 0.01844219
## sample estimates:
## mean in group 1 mean in group 2
## 3.588976 3.824074
# Mann-Whitney U test (if PFHS_AVG is not normally distributed)
wilcox_test_result <- wilcox.test(PHFS_AVG ~ GEN, data = d_cutoff_filtered)
print(wilcox_test_result)
##
## Wilcoxon rank sum test with continuity correction
##
## data: PHFS_AVG by GEN
## W = 5906, p-value = 0.06591
## alternative hypothesis: true location shift is not equal to 0
# Compute standard deviations for each group
sd_cisgender_women <- d_cutoff_filtered %>%
filter(GEN == "1") %>%
summarize(SD = sd(PHFS_AVG)) %>%
pull(SD)
sd_cisgender_men <- d_cutoff_filtered %>%
filter(GEN == "2") %>%
summarize(SD = sd(PHFS_AVG)) %>%
pull(SD)
# Print standard deviations
sd_cisgender_women
## [1] 1.026415
sd_cisgender_men
## [1] 0.9447217
# Y = central sensitization; X = stressful life events; W = familism
Mod_a_path <- lm(central_sensitization ~ stressful_life_events* familism, data = d)
jtools::summ(Mod_a_path, digits = 3)
## MODEL INFO:
## Observations: 259
## Dependent Variable: central_sensitization
## Type: OLS linear regression
##
## MODEL FIT:
## F(3,255) = 20.830, p = 0.000
## R² = 0.197
## Adj. R² = 0.187
##
## Standard errors: OLS
## ----------------------------------------------------------------------
## Est. S.E. t val. p
## ------------------------------------ -------- ------- -------- -------
## (Intercept) 56.024 7.770 7.210 0.000
## stressful_life_events 1.154 1.466 0.787 0.432
## familism -6.009 1.947 -3.086 0.002
## stressful_life_events:familism 0.158 0.382 0.414 0.679
## ----------------------------------------------------------------------
#graph interaction
interactions::interact_plot(Mod_a_path, pred = stressful_life_events, modx = familism) +
ylim(0, 95)
#probing the interaction with simple slopes
interactions::sim_slopes(Mod_a_path, pred = stressful_life_events, modx = familism)
## JOHNSON-NEYMAN INTERVAL
##
## When familism is INSIDE the interval [2.01, 6.44], the slope of
## stressful_life_events is p < .05.
##
## Note: The range of observed values of familism is [1.00, 5.00]
##
## SIMPLE SLOPES ANALYSIS
##
## Slope of stressful_life_events when familism = 2.683385 (- 1 SD):
##
## Est. S.E. t val. p
## ------ ------ -------- ------
## 1.58 0.54 2.93 0.00
##
## Slope of stressful_life_events when familism = 3.671815 (Mean):
##
## Est. S.E. t val. p
## ------ ------ -------- ------
## 1.73 0.37 4.71 0.00
##
## Slope of stressful_life_events when familism = 4.660244 (+ 1 SD):
##
## Est. S.E. t val. p
## ------ ------ -------- ------
## 1.89 0.51 3.67 0.00
# Y = trauma symptoms; X = stressful life events; W = familism
Mod_b_path <- lm(trauma ~ stressful_life_events * familism, data = d)
jtools::summ(Mod_b_path, digits = 3)
## MODEL INFO:
## Observations: 259
## Dependent Variable: trauma
## Type: OLS linear regression
##
## MODEL FIT:
## F(3,255) = 21.845, p = 0.000
## R² = 0.204
## Adj. R² = 0.195
##
## Standard errors: OLS
## ----------------------------------------------------------------------
## Est. S.E. t val. p
## ------------------------------------ -------- ------- -------- -------
## (Intercept) 2.807 0.418 6.706 0.000
## stressful_life_events -0.014 0.079 -0.179 0.858
## familism -0.434 0.105 -4.134 0.000
## stressful_life_events:familism 0.026 0.021 1.272 0.204
## ----------------------------------------------------------------------
#graph interaction
interactions::interact_plot(Mod_b_path, pred = stressful_life_events, modx = familism) +
ylim(0, 13)
#probing the interaction with simple slopes
interactions::sim_slopes(Mod_b_path, pred = stressful_life_events, modx = familism)
## JOHNSON-NEYMAN INTERVAL
##
## When familism is INSIDE the interval [2.70, 9.28], the slope of
## stressful_life_events is p < .05.
##
## Note: The range of observed values of familism is [1.00, 5.00]
##
## SIMPLE SLOPES ANALYSIS
##
## Slope of stressful_life_events when familism = 2.683385 (- 1 SD):
##
## Est. S.E. t val. p
## ------ ------ -------- ------
## 0.06 0.03 1.93 0.05
##
## Slope of stressful_life_events when familism = 3.671815 (Mean):
##
## Est. S.E. t val. p
## ------ ------ -------- ------
## 0.08 0.02 4.13 0.00
##
## Slope of stressful_life_events when familism = 4.660244 (+ 1 SD):
##
## Est. S.E. t val. p
## ------ ------ -------- ------
## 0.11 0.03 3.89 0.00
# Y = central sensitization; X = stressful life events; M = trauma symptoms
LMedModel <- "
central_sensitization ~ b*trauma + c_p*stressful_life_events
trauma ~a*stressful_life_events
#intercepts
trauma ~ trauma.mean*1
central_sensitization ~ central_sensitization.mean*1
indirect := a*b
direct := c_p
total_c := c_p + (a*b)
"
set.seed(230925) #required for reproducible results because lavaan introduces randomness into the calculations
LMed_fit <- lavaan::sem(LMedModel, data = d, se = "bootstrap", missing = "fiml")
LMed_Sum <- lavaan::summary(LMed_fit, standardized = T, rsq = T, ci = TRUE)
LMed_ParEsts <- lavaan::parameterEstimates(LMed_fit, boot.ci.type = "bca.simple",
standardized = TRUE)
LMed_Sum
## lavaan 0.6.16 ended normally after 1 iteration
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 7
##
## Number of observations 259
## Number of missing patterns 1
##
## Model Test User Model:
##
## Test statistic 0.000
## Degrees of freedom 0
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 1000
## Number of successful bootstrap draws 1000
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## central_sensitization ~
## trauma (b) 12.030 0.918 13.105 0.000 10.323 13.866
## strssf__ (c_p) 0.861 0.326 2.641 0.008 0.156 1.462
## trauma ~
## strssf__ (a) 0.109 0.021 5.180 0.000 0.068 0.153
## Std.lv Std.all
##
## 12.030 0.651
## 0.861 0.135
##
## 0.109 0.317
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## .trauma (trm.) 1.096 0.095 11.569 0.000 0.916 1.297
## .cntrl_s (cn_.) 19.033 1.616 11.777 0.000 15.628 22.188
## Std.lv Std.all
## 1.096 1.117
## 19.033 1.049
##
## Variances:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## .centrl_snstztn 165.176 13.792 11.976 0.000 136.798 190.122
## .trauma 0.867 0.063 13.699 0.000 0.736 0.990
## Std.lv Std.all
## 165.176 0.502
## 0.867 0.899
##
## R-Square:
## Estimate
## centrl_snstztn 0.498
## trauma 0.101
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## indirect 1.315 0.282 4.671 0.000 0.804 1.940
## direct 0.861 0.326 2.640 0.008 0.156 1.462
## total_c 2.176 0.381 5.713 0.000 1.408 2.933
## Std.lv Std.all
## 1.315 0.207
## 0.861 0.135
## 2.176 0.342
LMed_ParEsts
## lhs op rhs label
## 1 central_sensitization ~ trauma b
## 2 central_sensitization ~ stressful_life_events c_p
## 3 trauma ~ stressful_life_events a
## 4 trauma ~1 trauma.mean
## 5 central_sensitization ~1 central_sensitization.mean
## 6 central_sensitization ~~ central_sensitization
## 7 trauma ~~ trauma
## 8 stressful_life_events ~~ stressful_life_events
## 9 stressful_life_events ~1
## 10 indirect := a*b indirect
## 11 direct := c_p direct
## 12 total_c := c_p+(a*b) total_c
## est se z pvalue ci.lower ci.upper std.lv std.all std.nox
## 1 12.030 0.918 13.105 0.000 10.047 13.711 12.030 0.651 0.651
## 2 0.861 0.326 2.641 0.008 0.204 1.487 0.861 0.135 0.047
## 3 0.109 0.021 5.180 0.000 0.067 0.152 0.109 0.317 0.111
## 4 1.096 0.095 11.569 0.000 0.912 1.285 1.096 1.117 1.117
## 5 19.033 1.616 11.777 0.000 15.968 22.355 19.033 1.049 1.049
## 6 165.176 13.792 11.976 0.000 142.808 200.030 165.176 0.502 0.502
## 7 0.867 0.063 13.699 0.000 0.756 1.012 0.867 0.899 0.899
## 8 8.116 0.000 NA NA 8.116 8.116 8.116 1.000 8.116
## 9 3.703 0.000 NA NA 3.703 3.703 3.703 1.300 3.703
## 10 1.315 0.282 4.671 0.000 0.799 1.920 1.315 0.207 0.073
## 11 0.861 0.326 2.640 0.008 0.204 1.487 0.861 0.135 0.047
## 12 2.176 0.381 5.713 0.000 1.419 2.952 2.176 0.342 0.120
# Y = central sensitization, X = stressful life events, M = trauma symptoms, W = familismo
Combined <- '
#equations
trauma ~ a1*stressful_life_events + a2*familism + a3*stressful_life_events:familism
central_sensitization ~ c_p1*stressful_life_events + c_p2*familism + c_p3*stressful_life_events:familism + b*trauma
#intercepts
trauma ~ trauma.mean*1
central_sensitization ~ central_sensitization.mean*1
#means, variances of W for simple slopes
familism ~ familism.mean*1
familism ~~ familism.var*familism
#index of moderated mediation, there will be an a and b path in the product
#if the a and/or b path is moderated, select the label that represents the moderation
imm := a3*b
#Note that we first create the indirect product, then add to it the product of the imm and the W level
indirect.SDbelow := a1*b + imm*(familism.mean - sqrt(familism.var))
indirect.mean := a1*b + imm*(familism.mean)
indirect.SDabove := a1*b + imm*(familism.mean + sqrt(familism.var))
#direct effect is also moderated so calculate with c_p1 + c_p3
direct.SDbelow := c_p1 + c_p3*(familism.mean - sqrt(familism.var))
direct.Smean := c_p1 + c_p3*(familism.mean)
direct.SDabove := c_p1 + c_p3*(familism.mean + sqrt(familism.var))
'
set.seed(230925) #required for reproducible results because lavaan introduces randomness into the calculations
Combined_fit <- lavaan::sem(Combined, data = d, se = "bootstrap", missing = 'fiml', bootstrap = 1000)
## Warning in lav_partable_vnames(FLAT, "ov.x", warn = TRUE): lavaan WARNING:
## model syntax contains variance/covariance/intercept formulas
## involving (an) exogenous variable(s): [familism]; These variables
## will now be treated as random introducing additional free
## parameters. If you wish to treat those variables as fixed, remove
## these formulas from the model syntax. Otherwise, consider adding
## the fixed.x = FALSE option.
cFITsum <- lavaan::summary(Combined_fit, standardized = TRUE, rsq=T, ci=TRUE)
cParamEsts <- lavaan::parameterEstimates(Combined_fit, boot.ci.type = "bca.simple", standardized=TRUE)
cFITsum
## lavaan 0.6.16 ended normally after 8 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 13
##
## Number of observations 259
## Number of missing patterns 1
##
## Model Test User Model:
##
## Test statistic 328.901
## Degrees of freedom 2
## P-value (Chi-square) 0.000
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 1000
## Number of successful bootstrap draws 1000
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## trauma ~
## strss__ (a1) -0.014 0.089 -0.159 0.873 -0.195 0.148
## familsm (a2) -0.434 0.102 -4.248 0.000 -0.636 -0.239
## strs__: (a3) 0.026 0.023 1.158 0.247 -0.016 0.072
## central_sensitization ~
## strss__ (c_p1) 1.316 1.196 1.101 0.271 -0.924 3.786
## familsm (c_p2) -1.022 1.613 -0.634 0.526 -4.183 2.337
## strs__: (c_p3) -0.143 0.328 -0.436 0.663 -0.792 0.507
## trauma (b) 11.504 0.991 11.605 0.000 9.649 13.576
## Std.lv Std.all
##
## -0.014 -0.040
## -0.434 -0.425
## 0.026 0.280
##
## 1.316 0.209
## -1.022 -0.056
## -0.143 -0.086
## 11.504 0.643
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## .trauma (trm.) 2.807 0.411 6.828 0.000 2.031 3.668
## .cntrl_s (cn_.) 23.738 6.729 3.528 0.000 9.897 36.572
## familsm (fml.) 3.672 0.060 61.358 0.000 3.557 3.781
## Std.lv Std.all
## 2.807 2.792
## 23.738 1.321
## 3.672 3.722
##
## Variances:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## familsm (fml.) 0.973 0.087 11.161 0.000 0.804 1.153
## .trauma 0.767 0.060 12.712 0.000 0.637 0.875
## .cntrl_s 162.821 13.733 11.857 0.000 134.182 187.165
## Std.lv Std.all
## 0.973 1.000
## 0.767 0.759
## 162.821 0.504
##
## R-Square:
## Estimate
## trauma 0.241
## centrl_snstztn 0.496
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|) ci.lower ci.upper
## imm 0.301 0.265 1.135 0.256 -0.179 0.851
## indirect.SDblw 0.646 0.380 1.699 0.089 -0.123 1.374
## indirect.mean 0.943 0.252 3.735 0.000 0.487 1.470
## indirect.SDabv 1.240 0.342 3.624 0.000 0.612 1.975
## direct.SDbelow 0.933 0.416 2.242 0.025 0.157 1.832
## direct.Smean 0.792 0.317 2.497 0.013 0.143 1.399
## direct.SDabove 0.651 0.486 1.338 0.181 -0.333 1.607
## Std.lv Std.all
## 0.301 0.180
## 0.646 0.465
## 0.943 0.645
## 1.240 0.826
## 0.933 -0.024
## 0.792 -0.110
## 0.651 -0.195
cParamEsts
## lhs op
## 1 trauma ~
## 2 trauma ~
## 3 trauma ~
## 4 central_sensitization ~
## 5 central_sensitization ~
## 6 central_sensitization ~
## 7 central_sensitization ~
## 8 trauma ~1
## 9 central_sensitization ~1
## 10 familism ~1
## 11 familism ~~
## 12 trauma ~~
## 13 central_sensitization ~~
## 14 stressful_life_events ~~
## 15 stressful_life_events ~~
## 16 stressful_life_events:familism ~~
## 17 stressful_life_events ~1
## 18 stressful_life_events:familism ~1
## 19 imm :=
## 20 indirect.SDbelow :=
## 21 indirect.mean :=
## 22 indirect.SDabove :=
## 23 direct.SDbelow :=
## 24 direct.Smean :=
## 25 direct.SDabove :=
## rhs label
## 1 stressful_life_events a1
## 2 familism a2
## 3 stressful_life_events:familism a3
## 4 stressful_life_events c_p1
## 5 familism c_p2
## 6 stressful_life_events:familism c_p3
## 7 trauma b
## 8 trauma.mean
## 9 central_sensitization.mean
## 10 familism.mean
## 11 familism familism.var
## 12 trauma
## 13 central_sensitization
## 14 stressful_life_events
## 15 stressful_life_events:familism
## 16 stressful_life_events:familism
## 17
## 18
## 19 a3*b imm
## 20 a1*b+imm*(familism.mean-sqrt(familism.var)) indirect.SDbelow
## 21 a1*b+imm*(familism.mean) indirect.mean
## 22 a1*b+imm*(familism.mean+sqrt(familism.var)) indirect.SDabove
## 23 c_p1+c_p3*(familism.mean-sqrt(familism.var)) direct.SDbelow
## 24 c_p1+c_p3*(familism.mean) direct.Smean
## 25 c_p1+c_p3*(familism.mean+sqrt(familism.var)) direct.SDabove
## est se z pvalue ci.lower ci.upper std.lv std.all std.nox
## 1 -0.014 0.089 -0.159 0.873 -0.200 0.145 -0.014 -0.040 -0.014
## 2 -0.434 0.102 -4.248 0.000 -0.634 -0.238 -0.434 -0.425 -1.229
## 3 0.026 0.023 1.158 0.247 -0.015 0.073 0.026 0.280 0.026
## 4 1.316 1.196 1.101 0.271 -1.065 3.667 1.316 0.209 0.073
## 5 -1.022 1.613 -0.634 0.526 -4.306 2.038 -1.022 -0.056 -0.162
## 6 -0.143 0.328 -0.436 0.663 -0.781 0.535 -0.143 -0.086 -0.008
## 7 11.504 0.991 11.605 0.000 9.550 13.405 11.504 0.643 0.643
## 8 2.807 0.411 6.828 0.000 2.035 3.670 2.807 2.792 2.792
## 9 23.738 6.729 3.528 0.000 10.981 37.859 23.738 1.321 1.321
## 10 3.672 0.060 61.358 0.000 3.566 3.793 3.672 3.722 3.722
## 11 0.973 0.087 11.161 0.000 0.814 1.164 0.973 1.000 1.000
## 12 0.767 0.060 12.712 0.000 0.658 0.897 0.767 0.759 0.759
## 13 162.821 13.733 11.857 0.000 141.794 199.547 162.821 0.504 0.504
## 14 8.116 0.000 NA NA 8.116 8.116 8.116 1.000 8.116
## 15 27.334 0.000 NA NA 27.334 27.334 27.334 0.891 27.334
## 16 115.963 0.000 NA NA 115.963 115.963 115.963 1.000 115.963
## 17 3.703 0.000 NA NA 3.703 3.703 3.703 1.300 3.703
## 18 12.934 0.000 NA NA 12.934 12.934 12.934 1.201 12.934
## 19 0.301 0.265 1.135 0.256 -0.160 0.873 0.301 0.180 0.017
## 20 0.646 0.380 1.699 0.089 -0.181 1.356 0.646 0.465 0.037
## 21 0.943 0.252 3.735 0.000 0.476 1.464 0.943 0.645 0.053
## 22 1.240 0.342 3.624 0.000 0.594 1.960 1.240 0.826 0.070
## 23 0.933 0.416 2.242 0.025 0.150 1.822 0.933 -0.024 0.052
## 24 0.792 0.317 2.497 0.013 0.169 1.420 0.792 -0.110 0.044
## 25 0.651 0.486 1.338 0.181 -0.226 1.678 0.651 -0.195 0.036