library(haven)
library(RMediation)
## Loading required package: lavaan
## Warning: package 'lavaan' was built under R version 4.3.3
## This is lavaan 0.6-19
## lavaan is FREE software! Please report any bugs.
## Loading required package: e1071
## Warning: package 'e1071' was built under R version 4.3.3
## Loading required package: OpenMx
## OpenMx may run faster if it is compiled to take advantage of multiple cores.
## Loading required package: MASS
library(lme4)
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
## The following objects are masked from 'package:OpenMx':
##
## %&%, expm
library(lmtest)
## Loading required package: zoo
## Warning: package 'zoo' was built under R version 4.3.3
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
library(lmerTest)
##
## Attaching package: 'lmerTest'
## The following object is masked from 'package:lme4':
##
## lmer
## The following object is masked from 'package:stats':
##
## step
library(dplyr)
##
## Attaching package: 'dplyr'
## The following object is masked from 'package:MASS':
##
## select
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(ggplot2)
## Warning: package 'ggplot2' was built under R version 4.3.3
# set working directory
setwd("/Users/lawrencehouston/Library/CloudStorage/Dropbox/Ambient Discrimination Study/R Syntax")
# Study 1
df <- read_sav("~/Library/CloudStorage/Dropbox/Ambient Discrimination Study/vignette experiment/UH students Experiment.sav")
# Study 2
df_Study2 <- read_sav("~/Library/CloudStorage/Dropbox/Ambient Discrimination Study/survey/Discrm & DSE Survey 1&2 - Clean.sav")
# Study 1
df$X <- df$ambdis
df$M1 <- df$hdrapp
df$M2 <- df$chgapp
df$Y1 <- df$SCWDRL
df$Y2 <- df$Dvoice
df$C <- df$pdb
# Study 2
## Grandmean center ambient discrimination and intergroup management self-efficacy
df_Study2 <- df_Study2 %>% mutate(AMBDIS_c = AMBDIS - mean(AMBDIS, na.rm = TRUE), DSEMGMT_c = DSEMGMT - mean(DSEMGMT, na.rm = TRUE))
df_Study2$X <- df_Study2$AMBDIS_c
df_Study2$M1 <- df_Study2$HDRAPS
df_Study2$M2 <- df_Study2$CHGAPS
df_Study2$Y1 <- df_Study2$scwd
df_Study2$Y2 <- df_Study2$dv
df_Study2$W <- df_Study2$DSEMGMT_c
df_Study2$XW <- df_Study2$X * df_Study2$W
df_Study2$C <- df_Study2$pdb
Note: Better fit without direct effect (full mediation). DFs is zero with direct effect
model <- '
M1 ~ C + a1*X
M2 ~ C + a2*X
Y1 ~ C + b11*M1 + b21*M2
Y2 ~ C + b12*M1 + b22*M2
M1 ~~ M2
Y1 ~~ Y2
'
fit_Study1 <- sem(model, data = df, se = "bootstrap", bootstrap = 5000)
fit_Study2 <- sem(model, data = df_Study2, se = "bootstrap", bootstrap = 5000)
summary(fit_Study1, fit.measures = TRUE, rsquare = TRUE)
## lavaan 0.6-19 ended normally after 10 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 16
##
## Used Total
## Number of observations 294 312
##
## Model Test User Model:
##
## Test statistic 11.734
## Degrees of freedom 2
## P-value (Chi-square) 0.003
##
## Model Test Baseline Model:
##
## Test statistic 309.741
## Degrees of freedom 14
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.967
## Tucker-Lewis Index (TLI) 0.770
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -1477.378
## Loglikelihood unrestricted model (H1) -1471.511
##
## Akaike (AIC) 2986.755
## Bayesian (BIC) 3045.692
## Sample-size adjusted Bayesian (SABIC) 2994.952
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.129
## 90 Percent confidence interval - lower 0.064
## 90 Percent confidence interval - upper 0.204
## P-value H_0: RMSEA <= 0.050 0.024
## P-value H_0: RMSEA >= 0.080 0.901
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.034
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 5000
## Number of successful bootstrap draws 5000
##
## Regressions:
## Estimate Std.Err z-value P(>|z|)
## M1 ~
## C -0.009 0.077 -0.114 0.909
## X (a1) 0.386 0.038 10.101 0.000
## M2 ~
## C 0.220 0.066 3.346 0.001
## X (a2) 0.141 0.033 4.302 0.000
## Y1 ~
## C -0.068 0.079 -0.858 0.391
## M1 (b11) 0.398 0.051 7.838 0.000
## M2 (b21) -0.027 0.080 -0.335 0.738
## Y2 ~
## C 0.348 0.076 4.561 0.000
## M1 (b12) 0.194 0.052 3.772 0.000
## M2 (b22) 0.370 0.073 5.036 0.000
##
## Covariances:
## Estimate Std.Err z-value P(>|z|)
## .M1 ~~
## .M2 0.116 0.049 2.362 0.018
## .Y1 ~~
## .Y2 0.116 0.051 2.289 0.022
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## .M1 0.895 0.066 13.612 0.000
## .M2 0.591 0.058 10.115 0.000
## .Y1 0.759 0.061 12.426 0.000
## .Y2 0.713 0.072 9.929 0.000
##
## R-Square:
## Estimate
## M1 0.274
## M2 0.102
## Y1 0.204
## Y2 0.264
summary(fit_Study2, fit.measures = TRUE, rsquare = TRUE)
## lavaan 0.6-19 ended normally after 10 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 16
##
## Number of observations 163
##
## Model Test User Model:
##
## Test statistic 7.954
## Degrees of freedom 2
## P-value (Chi-square) 0.019
##
## Model Test Baseline Model:
##
## Test statistic 197.679
## Degrees of freedom 14
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.968
## Tucker-Lewis Index (TLI) 0.773
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -839.992
## Loglikelihood unrestricted model (H1) -836.015
##
## Akaike (AIC) 1711.984
## Bayesian (BIC) 1761.484
## Sample-size adjusted Bayesian (SABIC) 1710.830
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.135
## 90 Percent confidence interval - lower 0.047
## 90 Percent confidence interval - upper 0.239
## P-value H_0: RMSEA <= 0.050 0.055
## P-value H_0: RMSEA >= 0.080 0.869
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.038
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 5000
## Number of successful bootstrap draws 5000
##
## Regressions:
## Estimate Std.Err z-value P(>|z|)
## M1 ~
## C 0.255 0.075 3.414 0.001
## X (a1) 0.644 0.102 6.308 0.000
## M2 ~
## C 0.512 0.067 7.664 0.000
## X (a2) 0.427 0.083 5.172 0.000
## Y1 ~
## C -0.099 0.080 -1.237 0.216
## M1 (b11) 0.162 0.067 2.410 0.016
## M2 (b21) 0.001 0.071 0.007 0.994
## Y2 ~
## C 0.177 0.085 2.076 0.038
## M1 (b12) 0.250 0.080 3.140 0.002
## M2 (b22) 0.235 0.095 2.476 0.013
##
## Covariances:
## Estimate Std.Err z-value P(>|z|)
## .M1 ~~
## .M2 0.190 0.075 2.541 0.011
## .Y1 ~~
## .Y2 -0.080 0.057 -1.388 0.165
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## .M1 0.935 0.102 9.203 0.000
## .M2 0.788 0.087 9.041 0.000
## .Y1 0.630 0.056 11.341 0.000
## .Y2 0.807 0.073 11.122 0.000
##
## R-Square:
## Estimate
## M1 0.278
## M2 0.332
## Y1 0.051
## Y2 0.272
# Study 1
estimates <- parameterEstimates(fit_Study1, ci = TRUE, boot.ci.type = "bca.simple")
a_path <- estimates[estimates$label == "a2", "est"]
a_path_se <- estimates[estimates$label == "a2", "se"]
b_path <- estimates[estimates$label == "b22", "est"]
b_path_se <- estimates[estimates$label == "b22", "se"]
# Compute Monte Carlo confidence interval
mc_ci <- medci(mu.x = a_path, mu.y = b_path, se.x = a_path_se, se.y = b_path_se,
rho = 0, alpha = .05, sims = 10000, method = "parametric")
cat("indirect effect (Study 1):", a_path * b_path, "\n")
## indirect effect (Study 1): 0.05204624
cat("Monte Carlo 95% CI (Study 1): [", mc_ci$`95% CI`[1], ",", mc_ci$`95% CI`[2], "]\n\n")
## Monte Carlo 95% CI (Study 1): [ 0.02397789 , 0.0867399 ]
# Study 2
estimates <- parameterEstimates(fit_Study2, ci = TRUE, boot.ci.type = "bca.simple")
a_path <- estimates[estimates$label == "a2", "est"]
a_path_se <- estimates[estimates$label == "a2", "se"]
b_path <- estimates[estimates$label == "b22", "est"]
b_path_se <- estimates[estimates$label == "b22", "se"]
# Compute Monte Carlo confidence interval
mc_ci <- medci(mu.x = a_path, mu.y = b_path, se.x = a_path_se, se.y = b_path_se,
rho = 0, alpha = .05, sims = 10000, method = "parametric")
cat("indirect effect (Study 2):", a_path * b_path, "\n")
## indirect effect (Study 2): 0.1003514
cat("Monte Carlo 95% CI (Study 2): [", mc_ci$`95% CI`[1], ",", mc_ci$`95% CI`[2], "]\n")
## Monte Carlo 95% CI (Study 2): [ 0.01952913 , 0.1982289 ]
model <- '
M1 ~ C + a1*X + a1w*XW + d1*W
M2 ~ C + a2*X + a2w*XW + d2*W
Y1 ~ C + W + b11*M1 + b21*M2
Y2 ~ C + W + b12*M1 + b22*M2
M1 ~~ M2
Y1 ~~ Y2
# Low/High Ambient Discrimination
LowAmbdis := -.82
HighAmbdis := .82
# Low/High Intergroup Management Efficacy
LowEfficacy := -.97
HighEfficacy := .97
# Simple Slopes for Low/High Intergroup Management Self-Efficacy
SS_LowEfficacy_challenge := a2 + a2w*LowEfficacy
SS_HighEfficacy_challenge := a2 + a2w*HighEfficacy
# Compute predicted values for plotting at Low/High values of X and W
LowEfficacy_LowAmbdis := (a2 + a2w*LowEfficacy)*LowAmbdis + (1.285 + d2*LowEfficacy)
LowEfficacy_HighAmbdis := (a2 + a2w*LowEfficacy)*HighAmbdis + (1.285 + d2*LowEfficacy)
HighEfficacy_LowAmbdis := (a2 + a2w*HighEfficacy)*LowAmbdis + (1.285 + d2*HighEfficacy)
HighEfficacy_HighAmbdis := (a2 + a2w*HighEfficacy)*HighAmbdis + (1.285 + d2*HighEfficacy)
'
results <- sem(model, data = df_Study2, meanstructure = TRUE, se = "bootstrap", bootstrap = 5000)
summary(results, standardized = TRUE, fit.measures = TRUE, rsquare = TRUE)
## lavaan 0.6-19 ended normally after 22 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 26
##
## Number of observations 163
##
## Model Test User Model:
##
## Test statistic 9.551
## Degrees of freedom 4
## P-value (Chi-square) 0.049
##
## Model Test Baseline Model:
##
## Test statistic 240.412
## Degrees of freedom 22
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.975
## Tucker-Lewis Index (TLI) 0.860
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -819.424
## Loglikelihood unrestricted model (H1) -814.648
##
## Akaike (AIC) 1690.847
## Bayesian (BIC) 1771.285
## Sample-size adjusted Bayesian (SABIC) 1688.972
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.092
## 90 Percent confidence interval - lower 0.006
## 90 Percent confidence interval - upper 0.169
## P-value H_0: RMSEA <= 0.050 0.143
## P-value H_0: RMSEA >= 0.080 0.671
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.030
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 5000
## Number of successful bootstrap draws 5000
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## M1 ~
## C 0.236 0.079 2.991 0.003 0.236 0.199
## X (a1) 0.637 0.105 6.094 0.000 0.637 0.460
## XW (a1w) 0.057 0.108 0.531 0.595 0.057 0.038
## W (d1) 0.052 0.090 0.581 0.562 0.052 0.044
## M2 ~
## C 0.455 0.077 5.926 0.000 0.455 0.402
## X (a2) 0.400 0.085 4.701 0.000 0.400 0.303
## XW (a2w) 0.220 0.104 2.117 0.034 0.220 0.151
## W (d2) 0.157 0.085 1.855 0.064 0.157 0.140
## Y1 ~
## C -0.029 0.087 -0.337 0.736 -0.029 -0.034
## W -0.200 0.071 -2.792 0.005 -0.200 -0.236
## M1 (b11) 0.159 0.064 2.481 0.013 0.159 0.223
## M2 (b21) 0.022 0.066 0.331 0.741 0.022 0.029
## Y2 ~
## C 0.037 0.081 0.459 0.646 0.037 0.034
## W 0.399 0.077 5.175 0.000 0.399 0.365
## M1 (b12) 0.255 0.069 3.667 0.000 0.255 0.275
## M2 (b22) 0.193 0.085 2.261 0.024 0.193 0.199
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .M1 ~~
## .M2 0.179 0.070 2.571 0.010 0.179 0.215
## .Y1 ~~
## .Y2 -0.018 0.053 -0.343 0.731 -0.018 -0.028
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .M1 1.648 0.322 5.110 0.000 1.648 1.448
## .M2 1.285 0.307 4.191 0.000 1.285 1.183
## .Y1 1.941 0.367 5.293 0.000 1.941 2.380
## .Y2 1.379 0.352 3.914 0.000 1.379 1.310
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .M1 0.932 0.101 9.235 0.000 0.932 0.719
## .M2 0.750 0.082 9.140 0.000 0.750 0.636
## .Y1 0.600 0.056 10.653 0.000 0.600 0.902
## .Y2 0.684 0.068 10.094 0.000 0.684 0.617
##
## R-Square:
## Estimate
## M1 0.281
## M2 0.364
## Y1 0.098
## Y2 0.383
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## LowAmbdis -0.820 -0.820 -0.820
## HighAmbdis 0.820 0.820 0.820
## LowEfficacy -0.970 -0.970 -0.970
## HighEfficacy 0.970 0.970 0.970
## SS_LwEffccy_ch 0.187 0.138 1.352 0.176 0.187 0.157
## SS_HghEffccy_c 0.613 0.125 4.895 0.000 0.613 0.449
## LwEffccy_LwAmb 0.979 0.154 6.377 0.000 0.979 1.021
## LwEffccy_HghAm 1.286 0.125 10.291 0.000 1.286 1.278
## HghEffccy_LwAm 0.934 0.139 6.722 0.000 0.934 1.052
## HghEffccy_HghA 1.940 0.124 15.679 0.000 1.940 1.788
estimates <- parameterEstimates(results)
LowEfficacy <- 0 - sd(df_Study2$W)
HighEfficacy <- 0 + sd(df_Study2$W)
# A-path
a1 <- estimates[estimates$label == "a2", "est"]
a3 <- estimates[estimates$label == "a2w", "est"]
a1_se <- estimates[estimates$label == "a2", "se"]
a3_se <- estimates[estimates$label == "a2w", "se"]
# Conditional a-paths
a_low <- estimates[estimates$label == "SS_LowEfficacy_challenge", "est"]
a_high <- estimates[estimates$label == "SS_HighEfficacy_challenge", "est"]
# SE for conditional a-paths
a_high_se <- estimates[estimates$label == "SS_LowEfficacy_challenge", "se"]
a_low_se <- estimates[estimates$label == "SS_HighEfficacy_challenge", "se"]
# B-path
b1 <- estimates[estimates$label == "b11", "est"]
b1_se <- estimates[estimates$label == "b11", "se"]
# Compute Monte Carlo CI for Low Intergroup Management Self-Efficacy
mc_ci_low <- medci(mu.x = a_low, mu.y = b1,
se.x = a_low_se, se.y = b1_se,
rho = 0, alpha = .05, sims = 10000)
# Compute Monte Carlo CI for High Intergroup Management Self-Efficacy
mc_ci_high <- medci(mu.x = a_high, mu.y = b1,
se.x = a_high_se, se.y = b1_se,
rho = 0, alpha = .05, sims = 10000)
# Conditional ab
cat("Indirect Effect for Low Intergroup Management Self-Efficacy:", a_low * b1, "\n")
## Indirect Effect for Low Intergroup Management Self-Efficacy: 0.02979571
cat("Monte Carlo 95% CI (Low Intergroup Management Self-Efficacy): [", round(mc_ci_low$`95% CI`, 3), "]\n\n")
## Monte Carlo 95% CI (Low Intergroup Management Self-Efficacy): [ -0.009 0.087 ]
cat("Indirect Effect for High Intergroup Management Self-Efficacy:", a_high * b1, "\n")
## Indirect Effect for High Intergroup Management Self-Efficacy: 0.09783585
cat("Monte Carlo 95% CI (High Intergroup Management Self-Efficacy): [", round(mc_ci_high$`95% CI`, 3), "]\n")
## Monte Carlo 95% CI (High Intergroup Management Self-Efficacy): [ 0.018 0.198 ]
# Moderated Mediation Index
mc_ci <- medci(mu.x = a3, mu.y = b1, se.x = a3_se, se.y = b1_se,
rho = 0, alpha = .05, sims = 10000, method = "parametric")
cat("Moderated Mediation Index:", a3 * b1, "\n")
## Moderated Mediation Index: 0.03507224
cat("Monte Carlo 95% CI: [", mc_ci$`95% CI`[1], ",", mc_ci$`95% CI`[2], "]\n")
## Monte Carlo 95% CI: [ 0.000289469 , 0.08772729 ]
# Extract parameter estimates
estimates <- parameterEstimates(results)
# Define moderator levels (efficacy) and predictor levels (X)
LowEfficacy <- 0 - sd(df_Study2$W, na.rm = TRUE)
HighEfficacy <- 0 + sd(df_Study2$W, na.rm = TRUE)
X_LOW <- -sd(df_Study2$X)
X_HIGH <- sd(df_Study2$X)
# Coefficients
a2 <- estimates$est[estimates$label == "a2"] # X → M2
a2w <- estimates$est[estimates$label == "a2w"] # XW → M2
d2 <- estimates$est[estimates$label == "d2"] # W → M2
# Intercept for M2
int_M2 <- estimates$est[estimates$lhs == "M2" &
estimates$op == "~1"]
# Low efficacy
LOWEFF_LOWX <- int_M2 + a2*X_LOW + a2w*(X_LOW * LowEfficacy) + d2*LowEfficacy
LOWEFF_HIGHX <- int_M2 + a2*X_HIGH + a2w*(X_HIGH * LowEfficacy) + d2*LowEfficacy
# High efficacy
HIGHEFF_LOWX <- int_M2 + a2*X_LOW + a2w*(X_LOW * HighEfficacy) + d2*HighEfficacy
HIGHEFF_HIGHX <- int_M2 + a2*X_HIGH + a2w*(X_HIGH * HighEfficacy) + d2*HighEfficacy
# Data frame for plotting the interaction
plot_data <- data.frame(
Efficacy = rep(c("Low efficacy", "High efficacy"), each = 2),
X_Level = rep(c("Low X", "High X"), times = 2),
Predicted_M2 = c(
LOWEFF_LOWX, LOWEFF_HIGHX,
HIGHEFF_LOWX, HIGHEFF_HIGHX
)
)
plot_data$X_Level <- factor(plot_data$X_Level, levels = c("Low X", "High X"))
plot_data$Efficacy <- factor(plot_data$Efficacy, levels = c("Low efficacy", "High efficacy"))
# Plot interaction
ggplot(plot_data, aes(x = X_Level, y = Predicted_M2,
group = Efficacy, color = Efficacy)) +
geom_line(size = 1.2) +
geom_point(size = 3) +
labs(x = "Ambient Discrimination",
y = "Challenge Appraisal",
color = "Intergroup Management Self-Efficacy") +
theme_minimal(base_size = 14) +
theme(legend.position = "bottom")
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.