#Load packages
#install tidyverse
#install.packages(c("tidyverse", "psych", "ggdist", "gghalves", "ggcorrplot", "RColorBrewer", "lavaan", "openssl", "ggpubr", "rstatix", "ggprism", "patchwork", "magrittr", "GGally"))
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──
## ✓ ggplot2 3.3.5 ✓ purrr 0.3.4
## ✓ tibble 3.1.6 ✓ dplyr 1.0.7
## ✓ tidyr 1.1.4 ✓ stringr 1.4.0
## ✓ readr 2.0.2 ✓ forcats 0.5.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
library(psych)
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
library(ggdist)
library(gghalves)
library(ggcorrplot)
library(RColorBrewer)
library(lavaan)
## This is lavaan 0.6-9
## lavaan is FREE software! Please report any bugs.
##
## Attaching package: 'lavaan'
## The following object is masked from 'package:psych':
##
## cor2cov
library(openssl)
## Linking to: OpenSSL 1.1.1k 25 Mar 2021
library(ggpubr)
library(rstatix)
##
## Attaching package: 'rstatix'
## The following object is masked from 'package:ggcorrplot':
##
## cor_pmat
## The following object is masked from 'package:stats':
##
## filter
#install.packages("ggprism")
library(ggprism)
library(patchwork)
library(magrittr)
##
## Attaching package: 'magrittr'
## The following object is masked from 'package:purrr':
##
## set_names
## The following object is masked from 'package:tidyr':
##
## extract
#install.packages("GGally")
library(GGally)
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
#read in data
#read in data and filter out people who did not provide assent or left it blank
MMIWG_d1 <- read.csv("Study1_TidyData.csv") #%>% #filter()
MMIWG_d2 <- read.csv("Study2_TidyData.csv") #%>% #filter()
#Descriptives for invisibility (Study 2)
##Create Matrix for Invisibility
#Create matrix of invisbility items
invis_vars <- MMIWG_d2[ , c('Invisibility_1', 'Invisibility_2', 'Invisibility_3', 'Invisibility_4', 'Invisibility_5', 'Invisibility_6', 'Invisibility_7', 'Invisibility_8')]
#create a correlation matrix for invis vars
Invismatrix <-cor(invis_vars, use = "complete.obs")
describe(invis_vars)
## vars n mean sd median trimmed mad min max range skew
## Invisibility_1 1 3996 3.13 1.68 3 2.99 1.48 1 7 6 0.41
## Invisibility_2 2 3995 2.71 1.63 2 2.51 1.48 1 7 6 0.74
## Invisibility_3 3 3996 4.62 1.74 5 4.73 1.48 1 7 6 -0.53
## Invisibility_4 4 3998 3.80 1.71 4 3.80 1.48 1 7 6 -0.03
## Invisibility_5 5 3994 3.37 1.81 3 3.28 1.48 1 7 6 0.26
## Invisibility_6 6 3996 3.82 1.68 4 3.84 1.48 1 7 6 -0.08
## Invisibility_7 7 3996 4.07 1.68 4 4.12 1.48 1 7 6 -0.21
## Invisibility_8 8 3995 3.74 1.66 4 3.75 1.48 1 7 6 -0.05
## kurtosis se
## Invisibility_1 -0.76 0.03
## Invisibility_2 -0.30 0.03
## Invisibility_3 -0.65 0.03
## Invisibility_4 -0.92 0.03
## Invisibility_5 -1.06 0.03
## Invisibility_6 -0.87 0.03
## Invisibility_7 -0.79 0.03
## Invisibility_8 -0.81 0.03
ggcorrplot(Invismatrix, hc.order = TRUE, type = "lower", lab = TRUE, ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
#CFA for Invisibility Study 2
#CFA for invisibility
Invis_CFA2 <- 'Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8'
Invis_CFA2 <- sem(Invis_CFA2, data = MMIWG_d2, estimator = "MLR")
summary(Invis_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 25 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 12
##
## Used Total
## Number of observations 3992 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 315.310 203.720
## Degrees of freedom 9 9
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.548
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 10529.266 6344.658
## Degrees of freedom 15 15
## P-value 0.000 0.000
## Scaling correction factor 1.660
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.971 0.969
## Tucker-Lewis Index (TLI) 0.951 0.949
##
## Robust Comparative Fit Index (CFI) 0.971
## Robust Tucker-Lewis Index (TLI) 0.952
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -41763.809 -41763.809
## Scaling correction factor 1.202
## for the MLR correction
## Loglikelihood unrestricted model (H1) -41606.154 -41606.154
## Scaling correction factor 1.350
## for the MLR correction
##
## Akaike (AIC) 83551.618 83551.618
## Bayesian (BIC) 83627.123 83627.123
## Sample-size adjusted Bayesian (BIC) 83588.992 83588.992
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.092 0.074
## 90 Percent confidence interval - lower 0.084 0.067
## 90 Percent confidence interval - upper 0.101 0.081
## P-value RMSEA <= 0.05 0.000 0.000
##
## Robust RMSEA 0.092
## 90 Percent confidence interval - lower 0.081
## 90 Percent confidence interval - upper 0.103
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.030 0.030
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Invis =~
## Invisibility_3 1.000 0.971 0.558
## Invisibility_4 1.347 0.038 35.135 0.000 1.308 0.764
## Invisibility_5 1.245 0.042 29.551 0.000 1.209 0.669
## Invisibility_6 1.408 0.045 31.472 0.000 1.367 0.814
## Invisibility_7 1.277 0.041 31.000 0.000 1.239 0.739
## Invisibility_8 1.354 0.043 31.143 0.000 1.314 0.791
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .Invisibility_3 2.088 0.051 41.182 0.000 2.088 0.689
## .Invisibility_4 1.220 0.048 25.437 0.000 1.220 0.416
## .Invisibility_5 1.807 0.055 32.579 0.000 1.807 0.553
## .Invisibility_6 0.952 0.042 22.401 0.000 0.952 0.337
## .Invisibility_7 1.278 0.042 30.100 0.000 1.278 0.454
## .Invisibility_8 1.036 0.040 25.938 0.000 1.036 0.375
## Invis 0.942 0.056 16.866 0.000 1.000 1.000
##
## R-Square:
## Estimate
## Invisibility_3 0.311
## Invisibility_4 0.584
## Invisibility_5 0.447
## Invisibility_6 0.663
## Invisibility_7 0.546
## Invisibility_8 0.625
#Descriptives for Racism Minimization (Study 2)
##Create Matrix for Racism Minimization
#Create matrix of racism minimization items
RM_vars <- MMIWG_d2[ , c('RacismMini_8', 'RacismMini_9', 'RacismMini_10')]
describe(RM_vars)
## vars n mean sd median trimmed mad min max range skew
## RacismMini_8 1 3994 2.84 1.62 3 2.67 1.48 1 7 6 0.69
## RacismMini_9 2 3994 3.58 1.66 4 3.54 1.48 1 7 6 0.14
## RacismMini_10 3 3994 3.13 1.63 3 3.01 1.48 1 7 6 0.44
## kurtosis se
## RacismMini_8 -0.28 0.03
## RacismMini_9 -0.72 0.03
## RacismMini_10 -0.55 0.03
hist(MMIWG_d2$RacismMini_8)
hist(MMIWG_d2$RacismMini_9)
hist(MMIWG_d2$RacismMini_10)
#create a correlation matrix for racism mini vars
RMmatrix <-cor(RM_vars, use = "complete.obs")
ggcorrplot(RMmatrix, hc.order = TRUE, type = "lower", lab = TRUE, ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
#CFA for Racism Minimization Study 2
#CFA for racism mini
RMini_CFA2 <- 'RMini =~ 1*RacismMini_8 + RacismMini_9 + RacismMini_10'
RMini_CFA2 <- sem(RMini_CFA2, data = MMIWG_d2, estimator = "MLR")
#the model is saturated or just identified and so we cannot get fit indices.
summary(RMini_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 23 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 6
##
## Used Total
## Number of observations 3990 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 0.000 0.000
## Degrees of freedom 0 0
##
## Model Test Baseline Model:
##
## Test statistic 3160.664 1435.855
## Degrees of freedom 3 3
## P-value 0.000 0.000
## Scaling correction factor 2.201
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 1.000 1.000
## Tucker-Lewis Index (TLI) 1.000 1.000
##
## Robust Comparative Fit Index (CFI) NA
## Robust Tucker-Lewis Index (TLI) NA
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -21301.326 -21301.326
## Loglikelihood unrestricted model (H1) -21301.326 -21301.326
##
## Akaike (AIC) 42614.653 42614.653
## Bayesian (BIC) 42652.402 42652.402
## Sample-size adjusted Bayesian (BIC) 42633.337 42633.337
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000 0.000
## 90 Percent confidence interval - lower 0.000 0.000
## 90 Percent confidence interval - upper 0.000 0.000
## P-value RMSEA <= 0.05 NA NA
##
## Robust RMSEA 0.000
## 90 Percent confidence interval - lower 0.000
## 90 Percent confidence interval - upper 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.000 0.000
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## RMini =~
## RacismMini_8 1.000 1.324 0.815
## RacismMini_9 0.588 0.026 22.425 0.000 0.779 0.469
## RacismMini_10 1.010 0.039 25.673 0.000 1.337 0.823
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .RacismMini_8 0.885 0.077 11.474 0.000 0.885 0.335
## .RacismMini_9 2.150 0.058 37.099 0.000 2.150 0.780
## .RacismMini_10 0.854 0.077 11.047 0.000 0.854 0.323
## RMini 1.754 0.086 20.339 0.000 1.000 1.000
##
## R-Square:
## Estimate
## RacismMini_8 0.665
## RacismMini_9 0.220
## RacismMini_10 0.677
#Descriptives for Victim Blaming (Study 2)
##Create Matrix for Victim Blaming
#Create matrix of vb items
VB_vars <- MMIWG_d2[ , c('VB_1','VB_2', 'VB_3', 'VB_4')]
describe(VB_vars)
## vars n mean sd median trimmed mad min max range skew kurtosis se
## VB_1 1 3997 3.22 1.48 4 3.17 1.48 1 7 6 0.17 -0.40 0.02
## VB_2 2 3999 3.47 1.48 4 3.46 1.48 1 7 6 -0.05 -0.38 0.02
## VB_3 3 3998 3.44 1.46 4 3.43 1.48 1 7 6 0.03 -0.42 0.02
## VB_4 4 3994 3.56 1.52 4 3.54 1.48 1 7 6 0.00 -0.39 0.02
hist(MMIWG_d2$VB_1)
hist(MMIWG_d2$VB_2)
hist(MMIWG_d2$VB_3)
hist(MMIWG_d2$VB_4)
#create a correlation matrix for vb vars
VBmatrix <-cor(VB_vars, use = "complete.obs")
ggcorrplot(VBmatrix, hc.order = TRUE, type = "lower", lab = TRUE, ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
#CFA for Victim Blaming Study 2
#CFA for VB
Vblame_CFA2 <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4'
Vblame_CFA2 <- sem(Vblame_CFA2, data = MMIWG_d2, estimator = "MLR")
summary(Vblame_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 24 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 8
##
## Used Total
## Number of observations 3994 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 6.379 3.406
## Degrees of freedom 2 2
## P-value (Chi-square) 0.041 0.182
## Scaling correction factor 1.873
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 6358.228 2736.568
## Degrees of freedom 6 6
## P-value 0.000 0.000
## Scaling correction factor 2.323
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.999 0.999
## Tucker-Lewis Index (TLI) 0.998 0.998
##
## Robust Comparative Fit Index (CFI) 1.000
## Robust Tucker-Lewis Index (TLI) 0.999
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -25822.701 -25822.701
## Scaling correction factor 1.676
## for the MLR correction
## Loglikelihood unrestricted model (H1) -25819.512 -25819.512
## Scaling correction factor 1.715
## for the MLR correction
##
## Akaike (AIC) 51661.402 51661.402
## Bayesian (BIC) 51711.743 51711.743
## Sample-size adjusted Bayesian (BIC) 51686.322 51686.322
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.023 0.013
## 90 Percent confidence interval - lower 0.004 0.000
## 90 Percent confidence interval - upper 0.045 0.030
## P-value RMSEA <= 0.05 0.982 1.000
##
## Robust RMSEA 0.018
## 90 Percent confidence interval - lower 0.000
## 90 Percent confidence interval - upper 0.050
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.005 0.005
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.193 0.806
## VB_2 1.010 0.023 44.082 0.000 1.205 0.815
## VB_3 0.828 0.024 34.584 0.000 0.988 0.676
## VB_4 0.933 0.023 39.955 0.000 1.113 0.730
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.770 0.044 17.646 0.000 0.770 0.351
## .VB_2 0.733 0.037 19.760 0.000 0.733 0.336
## .VB_3 1.162 0.046 25.430 0.000 1.162 0.544
## .VB_4 1.087 0.049 22.332 0.000 1.087 0.468
## Vblame 1.422 0.053 26.904 0.000 1.000 1.000
##
## R-Square:
## Estimate
## VB_1 0.649
## VB_2 0.664
## VB_3 0.456
## VB_4 0.532
#Descriptives for Advocacy Intention (Study 2)
##Create Matrix for Advocacy Intention
#Create matrix of cadvocacy items
Advoc_vars <- MMIWG_d2[ , c('Sup_Hash','Sup_Donate', 'Sup_Letter', 'Sup_Authority', 'Sup_Prosecute', 'Sup_Access', 'Sup_Training')]
Advoc_vars <- MMIWG_d2[ , c('Sup_Hash','Sup_Donate', 'Sup_Letter', 'Sup_Access', 'Sup_Training')]
describe(Advoc_vars)
## vars n mean sd median trimmed mad min max range skew
## Sup_Hash 1 3996 3.36 1.29 3 3.45 1.48 1 5 4 -0.25
## Sup_Donate 2 3998 3.22 1.16 3 3.26 1.48 1 5 4 -0.09
## Sup_Letter 3 3996 3.18 1.21 3 3.21 1.48 1 5 4 0.01
## Sup_Access 4 3995 3.77 1.11 4 3.89 1.48 1 5 4 -0.57
## Sup_Training 5 3992 3.68 1.17 4 3.78 1.48 1 5 4 -0.48
## kurtosis se
## Sup_Hash -1.02 0.02
## Sup_Donate -0.69 0.02
## Sup_Letter -0.91 0.02
## Sup_Access -0.46 0.02
## Sup_Training -0.67 0.02
hist(MMIWG_d2$Sup_Hash)
hist(MMIWG_d2$Sup_Donate)
hist(MMIWG_d2$Sup_Letter)
hist(MMIWG_d2$Sup_Authority)
hist(MMIWG_d2$Sup_Prosecute)
hist(MMIWG_d2$Sup_Access)
hist(MMIWG_d2$Sup_Training)
#create a correlation matrix for advocacy vars
Advoc_matrix <-cor(Advoc_vars, use = "complete.obs")
ggcorrplot(Advoc_matrix, hc.order = TRUE, type = "lower", lab = TRUE, ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
#CFA for Advocacy Intentions Study 2
#CFA for advocacy
Advoc_CFA2 <- 'Advoc =~ 1*Sup_Hash + Sup_Donate + Sup_Letter + Sup_Authority + Sup_Prosecute + Sup_Access + Sup_Training'
Advoc_CFA2 <- 'Advoc =~ 1*Sup_Hash + Sup_Donate + Sup_Letter + Sup_Access + Sup_Training' #maybe need to drop authority and prosecute because they are the weakest loadings and bring the fit down quite a bit. Though I like those items a lot.
Advoc_CFA2 <- sem(Advoc_CFA2, data = MMIWG_d2, estimator = "MLR")
summary(Advoc_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 17 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 10
##
## Used Total
## Number of observations 3990 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 429.993 280.908
## Degrees of freedom 5 5
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.531
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 9861.114 6021.077
## Degrees of freedom 10 10
## P-value 0.000 0.000
## Scaling correction factor 1.638
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.957 0.954
## Tucker-Lewis Index (TLI) 0.914 0.908
##
## Robust Comparative Fit Index (CFI) 0.957
## Robust Tucker-Lewis Index (TLI) 0.914
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -26996.368 -26996.368
## Scaling correction factor 1.185
## for the MLR correction
## Loglikelihood unrestricted model (H1) -26781.371 -26781.371
## Scaling correction factor 1.301
## for the MLR correction
##
## Akaike (AIC) 54012.735 54012.735
## Bayesian (BIC) 54075.651 54075.651
## Sample-size adjusted Bayesian (BIC) 54043.875 54043.875
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.146 0.118
## 90 Percent confidence interval - lower 0.134 0.108
## 90 Percent confidence interval - upper 0.158 0.127
## P-value RMSEA <= 0.05 0.000 0.000
##
## Robust RMSEA 0.145
## 90 Percent confidence interval - lower 0.131
## 90 Percent confidence interval - upper 0.160
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.036 0.036
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Advoc =~
## Sup_Hash 1.000 0.968 0.748
## Sup_Donate 0.884 0.017 52.719 0.000 0.856 0.739
## Sup_Letter 0.987 0.017 58.773 0.000 0.956 0.791
## Sup_Access 0.879 0.022 39.906 0.000 0.851 0.766
## Sup_Training 0.958 0.021 46.059 0.000 0.928 0.796
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .Sup_Hash 0.739 0.027 27.354 0.000 0.739 0.441
## .Sup_Donate 0.610 0.021 28.831 0.000 0.610 0.454
## .Sup_Letter 0.547 0.020 27.506 0.000 0.547 0.374
## .Sup_Access 0.510 0.021 24.399 0.000 0.510 0.413
## .Sup_Training 0.498 0.021 24.262 0.000 0.498 0.366
## Advoc 0.938 0.032 29.440 0.000 1.000 1.000
##
## R-Square:
## Estimate
## Sup_Hash 0.559
## Sup_Donate 0.546
## Sup_Letter 0.626
## Sup_Access 0.587
## Sup_Training 0.634
#Descriptives for Apathy (Study 2)
##Create Matrix for Apathy Intentions
#Create matrix of crit reflection items
Apathy_vars <- MMIWG_d2[ , c('Apathy_1', 'Apathy_2', 'Apathy_3', 'Apathy_4', 'Apathy_5')]
describe(Apathy_vars)
## vars n mean sd median trimmed mad min max range skew kurtosis
## Apathy_1 1 3996 2.35 1.50 2 2.11 1.48 1 7 6 1.19 0.87
## Apathy_2 2 3996 2.33 1.50 2 2.09 1.48 1 7 6 1.18 0.84
## Apathy_3 3 3996 5.93 1.57 6 6.29 1.48 1 7 6 -1.89 3.00
## Apathy_4 4 3997 2.28 1.46 2 2.04 1.48 1 7 6 1.27 1.13
## Apathy_5 5 3994 3.14 1.67 3 3.02 1.48 1 7 6 0.39 -0.73
## se
## Apathy_1 0.02
## Apathy_2 0.02
## Apathy_3 0.02
## Apathy_4 0.02
## Apathy_5 0.03
hist(MMIWG_d2$Apathy_1)
hist(MMIWG_d2$Apathy_2)
hist(MMIWG_d2$Apathy_3)
hist(MMIWG_d2$Apathy_4)
hist(MMIWG_d2$Apathy_5)
#create a correlation matrix for cr vars
Apathy_matrix <-cor(Apathy_vars, use = "complete.obs")
ggcorrplot(Apathy_matrix, hc.order = TRUE, type = "lower", lab = TRUE, ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
#CFA for Apathy Study 2
#CFA for apathy
Apathy_CFA2 <- 'Apathy =~ 1*Apathy_1 + Apathy_2 + Apathy_3 + Apathy_4 + Apathy_5'
Apathy_CFA2 <- sem(Apathy_CFA2, data = MMIWG_d2, estimator = "MLR")
summary(Apathy_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 24 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 10
##
## Used Total
## Number of observations 3990 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 45.411 31.456
## Degrees of freedom 5 5
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.444
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 9258.042 3536.013
## Degrees of freedom 10 10
## P-value 0.000 0.000
## Scaling correction factor 2.618
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.996 0.992
## Tucker-Lewis Index (TLI) 0.991 0.985
##
## Robust Comparative Fit Index (CFI) 0.996
## Robust Tucker-Lewis Index (TLI) 0.992
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -32295.403 -32295.403
## Scaling correction factor 2.653
## for the MLR correction
## Loglikelihood unrestricted model (H1) -32272.698 -32272.698
## Scaling correction factor 2.250
## for the MLR correction
##
## Akaike (AIC) 64610.806 64610.806
## Bayesian (BIC) 64673.722 64673.722
## Sample-size adjusted Bayesian (BIC) 64641.946 64641.946
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.045 0.036
## 90 Percent confidence interval - lower 0.034 0.027
## 90 Percent confidence interval - upper 0.057 0.047
## P-value RMSEA <= 0.05 0.731 0.985
##
## Robust RMSEA 0.044
## 90 Percent confidence interval - lower 0.030
## 90 Percent confidence interval - upper 0.059
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.019 0.019
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Apathy =~
## Apathy_1 1.000 1.228 0.819
## Apathy_2 1.074 0.020 54.349 0.000 1.319 0.880
## Apathy_3 -0.261 0.022 -12.021 0.000 -0.320 -0.204
## Apathy_4 1.048 0.020 52.536 0.000 1.287 0.878
## Apathy_5 0.886 0.022 40.960 0.000 1.088 0.652
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .Apathy_1 0.738 0.049 15.141 0.000 0.738 0.328
## .Apathy_2 0.508 0.042 12.003 0.000 0.508 0.226
## .Apathy_3 2.355 0.094 25.144 0.000 2.355 0.958
## .Apathy_4 0.491 0.039 12.568 0.000 0.491 0.229
## .Apathy_5 1.602 0.047 34.245 0.000 1.602 0.575
## Apathy 1.509 0.059 25.371 0.000 1.000 1.000
##
## R-Square:
## Estimate
## Apathy_1 0.672
## Apathy_2 0.774
## Apathy_3 0.042
## Apathy_4 0.771
## Apathy_5 0.425
#Descriptives for System Blame (Study 2)
##Create Matrix for System Blame
#Create matrix of SB items
SB_vars <- MMIWG_d2[ , c('SB_1', 'SB_2', 'SB_3', 'SB_4')]
describe(SB_vars)
## vars n mean sd median trimmed mad min max range skew kurtosis se
## SB_1 1 3996 4.74 1.48 5 4.83 1.48 1 7 6 -0.43 -0.01 0.02
## SB_2 2 3996 4.55 1.75 5 4.65 1.48 1 7 6 -0.38 -0.70 0.03
## SB_3 3 3998 4.69 1.59 5 4.81 1.48 1 7 6 -0.53 -0.23 0.03
## SB_4 4 3998 5.62 1.45 6 5.82 1.48 1 7 6 -1.13 0.95 0.02
hist(MMIWG_d2$SB_1)
hist(MMIWG_d2$SB_2)
hist(MMIWG_d2$SB_3)
hist(MMIWG_d2$SB_4)
#create a correlation matrix for cr vars
SB_matrix <-cor(SB_vars, use = "complete.obs")
ggcorrplot(SB_matrix, hc.order = TRUE, type = "lower", lab = TRUE, ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
#CFA for System Blame Study 2
#CFA for SB
Sblame_CFA2 <- 'Sblame =~ 1*SB_1 + SB_2 + SB_3 + SB_4'
Sblame_CFA2 <- sem(Sblame_CFA2, data = MMIWG_d2, estimator = "ML")
summary(Sblame_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 32 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 8
##
## Used Total
## Number of observations 3993 4000
##
## Model Test User Model:
##
## Test statistic 6.011
## Degrees of freedom 2
## P-value (Chi-square) 0.050
##
## Model Test Baseline Model:
##
## Test statistic 1899.864
## Degrees of freedom 6
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.998
## Tucker-Lewis Index (TLI) 0.994
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -28819.952
## Loglikelihood unrestricted model (H1) -28816.946
##
## Akaike (AIC) 57655.904
## Bayesian (BIC) 57706.242
## Sample-size adjusted Bayesian (BIC) 57680.822
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.022
## 90 Percent confidence interval - lower 0.001
## 90 Percent confidence interval - upper 0.044
## P-value RMSEA <= 0.05 0.985
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.008
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Sblame =~
## SB_1 1.000 0.887 0.599
## SB_2 1.162 0.053 21.882 0.000 1.031 0.591
## SB_3 0.900 0.044 20.529 0.000 0.799 0.504
## SB_4 0.899 0.042 21.438 0.000 0.798 0.552
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .SB_1 1.405 0.046 30.582 0.000 1.405 0.641
## .SB_2 1.984 0.064 31.195 0.000 1.984 0.651
## .SB_3 1.876 0.052 36.264 0.000 1.876 0.746
## .SB_4 1.453 0.043 33.742 0.000 1.453 0.696
## Sblame 0.787 0.050 15.623 0.000 1.000 1.000
##
## R-Square:
## Estimate
## SB_1 0.359
## SB_2 0.349
## SB_3 0.254
## SB_4 0.304
#CFA for all model variables Study 2
#CFA fo full CFA model
Full_CFA2 <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4
RMini =~ 1*RacismMini_8 + RacismMini_9 + RacismMini_10
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
Apathy =~ 1*Apathy_1 + Apathy_2 + Apathy_3 + Apathy_4 + Apathy_5
Advoc =~ 1*Sup_Hash + Sup_Donate + Sup_Letter + Sup_Access + Sup_Training
Sblame =~ 1*SB_1 + SB_2 + SB_3 + SB_4'
#dropping apathy_3 slightly improves model fit (it's reverse scored)
Full_CFA2 <- sem(Full_CFA2, data = MMIWG_d2, estimator = "ML")
summary(Full_CFA2, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 60 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 69
##
## Used Total
## Number of observations 3966 4000
##
## Model Test User Model:
##
## Test statistic 3501.567
## Degrees of freedom 309
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48541.237
## Degrees of freedom 351
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.934
## Tucker-Lewis Index (TLI) 0.925
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -173200.110
## Loglikelihood unrestricted model (H1) -171449.327
##
## Akaike (AIC) 346538.220
## Bayesian (BIC) 346971.920
## Sample-size adjusted Bayesian (BIC) 346752.669
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.051
## 90 Percent confidence interval - lower 0.050
## 90 Percent confidence interval - upper 0.053
## P-value RMSEA <= 0.05 0.129
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.056
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.200 0.811
## VB_2 0.995 0.019 51.562 0.000 1.194 0.808
## VB_3 0.837 0.019 43.507 0.000 1.004 0.688
## VB_4 0.916 0.020 45.835 0.000 1.098 0.721
## RMini =~
## RacismMini_8 1.000 1.374 0.846
## RacismMini_9 0.569 0.020 27.863 0.000 0.782 0.472
## RacismMini_10 0.937 0.021 44.522 0.000 1.287 0.792
## Invis =~
## Invisibility_3 1.000 0.956 0.550
## Invisibility_4 1.380 0.041 33.866 0.000 1.319 0.771
## Invisibility_5 1.285 0.041 31.521 0.000 1.228 0.680
## Invisibility_6 1.420 0.041 34.698 0.000 1.357 0.809
## Invisibility_7 1.282 0.039 32.901 0.000 1.226 0.732
## Invisibility_8 1.376 0.040 34.348 0.000 1.315 0.793
## Apathy =~
## Apathy_1 1.000 1.231 0.822
## Apathy_2 1.064 0.016 64.806 0.000 1.311 0.874
## Apathy_3 -0.273 0.021 -13.124 0.000 -0.336 -0.216
## Apathy_4 1.045 0.016 65.106 0.000 1.286 0.878
## Apathy_5 0.897 0.020 44.691 0.000 1.105 0.662
## Advoc =~
## Sup_Hash 1.000 0.963 0.744
## Sup_Donate 0.874 0.020 44.573 0.000 0.842 0.728
## Sup_Letter 0.977 0.020 47.759 0.000 0.941 0.778
## Sup_Access 0.895 0.019 47.675 0.000 0.862 0.777
## Sup_Training 0.982 0.020 49.845 0.000 0.946 0.812
## Sblame =~
## SB_1 1.000 0.775 0.525
## SB_2 1.318 0.055 23.985 0.000 1.021 0.585
## SB_3 0.892 0.045 19.974 0.000 0.691 0.437
## SB_4 1.208 0.048 25.231 0.000 0.936 0.652
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame ~~
## RMini 0.781 0.036 21.963 0.000 0.474 0.474
## Invis 0.353 0.024 14.844 0.000 0.308 0.308
## Apathy 0.720 0.031 23.176 0.000 0.487 0.487
## Advoc -0.235 0.022 -10.744 0.000 -0.204 -0.204
## Sblame -0.142 0.020 -6.974 0.000 -0.153 -0.153
## RMini ~~
## Invis 0.429 0.028 15.348 0.000 0.327 0.327
## Apathy 0.954 0.037 25.619 0.000 0.564 0.564
## Advoc -0.403 0.026 -15.217 0.000 -0.304 -0.304
## Sblame -0.499 0.029 -17.449 0.000 -0.469 -0.469
## Invis ~~
## Apathy 0.485 0.026 18.726 0.000 0.412 0.412
## Advoc -0.221 0.018 -12.226 0.000 -0.241 -0.241
## Sblame -0.013 0.016 -0.865 0.387 -0.018 -0.018
## Apathy ~~
## Advoc -0.527 0.025 -21.490 0.000 -0.445 -0.445
## Sblame -0.386 0.024 -16.279 0.000 -0.405 -0.405
## Advoc ~~
## Sblame 0.481 0.023 20.818 0.000 0.645 0.645
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.747 0.025 29.898 0.000 0.747 0.342
## .VB_2 0.756 0.025 30.191 0.000 0.756 0.347
## .VB_3 1.121 0.030 37.864 0.000 1.121 0.527
## .VB_4 1.117 0.031 36.472 0.000 1.117 0.481
## .RacismMini_8 0.749 0.037 20.154 0.000 0.749 0.284
## .RacismMini_9 2.136 0.051 42.006 0.000 2.136 0.778
## .RacismMini_10 0.982 0.037 26.701 0.000 0.982 0.372
## .Invisibility_3 2.106 0.050 42.032 0.000 2.106 0.697
## .Invisibility_4 1.184 0.033 35.921 0.000 1.184 0.405
## .Invisibility_5 1.752 0.044 39.534 0.000 1.752 0.537
## .Invisibility_6 0.971 0.029 33.397 0.000 0.971 0.345
## .Invisibility_7 1.302 0.034 37.806 0.000 1.302 0.464
## .Invisibility_8 1.023 0.030 34.601 0.000 1.023 0.371
## .Apathy_1 0.729 0.021 35.124 0.000 0.729 0.325
## .Apathy_2 0.529 0.018 29.603 0.000 0.529 0.235
## .Apathy_3 2.314 0.052 44.320 0.000 2.314 0.953
## .Apathy_4 0.494 0.017 29.135 0.000 0.494 0.230
## .Apathy_5 1.563 0.038 41.107 0.000 1.563 0.562
## .Sup_Hash 0.748 0.020 37.434 0.000 0.748 0.446
## .Sup_Donate 0.628 0.016 38.082 0.000 0.628 0.470
## .Sup_Letter 0.576 0.016 35.703 0.000 0.576 0.394
## .Sup_Access 0.488 0.014 35.782 0.000 0.488 0.396
## .Sup_Training 0.462 0.014 33.382 0.000 0.462 0.341
## .SB_1 1.581 0.041 38.405 0.000 1.581 0.725
## .SB_2 2.005 0.056 36.044 0.000 2.005 0.658
## .SB_3 2.027 0.050 40.771 0.000 2.027 0.809
## .SB_4 1.184 0.037 32.278 0.000 1.184 0.575
## Vblame 1.439 0.050 28.943 0.000 1.000 1.000
## RMini 1.887 0.066 28.711 0.000 1.000 1.000
## Invis 0.913 0.051 17.795 0.000 1.000 1.000
## Apathy 1.516 0.049 30.720 0.000 1.000 1.000
## Advoc 0.927 0.035 26.227 0.000 1.000 1.000
## Sblame 0.600 0.040 15.133 0.000 1.000 1.000
##
## R-Square:
## Estimate
## VB_1 0.658
## VB_2 0.653
## VB_3 0.473
## VB_4 0.519
## RacismMini_8 0.716
## RacismMini_9 0.222
## RacismMini_10 0.628
## Invisibility_3 0.303
## Invisibility_4 0.595
## Invisibility_5 0.463
## Invisibility_6 0.655
## Invisibility_7 0.536
## Invisibility_8 0.629
## Apathy_1 0.675
## Apathy_2 0.765
## Apathy_3 0.047
## Apathy_4 0.770
## Apathy_5 0.438
## Sup_Hash 0.554
## Sup_Donate 0.530
## Sup_Letter 0.606
## Sup_Access 0.604
## Sup_Training 0.659
## SB_1 0.275
## SB_2 0.342
## SB_3 0.191
## SB_4 0.425
#Structural Equation Modeling of the Hypothesized Model ##Invisibility to Victim Blame SEM A –> B path
Inv_VBsem <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
Vblame ~ Invis'
Inv_VBsem <- sem(Inv_VBsem, data = MMIWG_d2, estimator = "MLR")
summary(Inv_VBsem, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 30 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 21
##
## Used Total
## Number of observations 3988 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 547.738 400.742
## Degrees of freedom 34 34
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.367
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 17399.072 11374.946
## Degrees of freedom 45 45
## P-value 0.000 0.000
## Scaling correction factor 1.530
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.970 0.968
## Tucker-Lewis Index (TLI) 0.961 0.957
##
## Robust Comparative Fit Index (CFI) 0.971
## Robust Tucker-Lewis Index (TLI) 0.962
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -67325.301 -67325.301
## Scaling correction factor 1.382
## for the MLR correction
## Loglikelihood unrestricted model (H1) -67051.432 -67051.432
## Scaling correction factor 1.373
## for the MLR correction
##
## Akaike (AIC) 134692.601 134692.601
## Bayesian (BIC) 134824.713 134824.713
## Sample-size adjusted Bayesian (BIC) 134757.985 134757.985
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.062 0.052
## 90 Percent confidence interval - lower 0.057 0.048
## 90 Percent confidence interval - upper 0.066 0.056
## P-value RMSEA <= 0.05 0.000 0.193
##
## Robust RMSEA 0.061
## 90 Percent confidence interval - lower 0.056
## 90 Percent confidence interval - upper 0.066
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.038 0.038
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.191 0.806
## VB_2 1.011 0.022 45.791 0.000 1.205 0.815
## VB_3 0.831 0.024 35.150 0.000 0.991 0.678
## VB_4 0.931 0.023 40.715 0.000 1.110 0.728
## Invis =~
## Invisibility_3 1.000 0.961 0.553
## Invisibility_4 1.363 0.039 34.650 0.000 1.311 0.766
## Invisibility_5 1.263 0.043 29.260 0.000 1.215 0.672
## Invisibility_6 1.418 0.045 31.220 0.000 1.363 0.812
## Invisibility_7 1.282 0.042 30.762 0.000 1.232 0.735
## Invisibility_8 1.370 0.044 30.872 0.000 1.317 0.793
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame ~
## Invis 0.381 0.030 12.746 0.000 0.308 0.308
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.767 0.042 18.061 0.000 0.767 0.351
## .VB_2 0.731 0.036 20.337 0.000 0.731 0.335
## .VB_3 1.156 0.046 25.384 0.000 1.156 0.541
## .VB_4 1.092 0.048 22.630 0.000 1.092 0.470
## .Invisibility_3 2.103 0.051 41.448 0.000 2.103 0.695
## .Invisibility_4 1.207 0.048 25.400 0.000 1.207 0.413
## .Invisibility_5 1.787 0.055 32.370 0.000 1.787 0.548
## .Invisibility_6 0.958 0.043 22.514 0.000 0.958 0.340
## .Invisibility_7 1.291 0.042 30.395 0.000 1.291 0.460
## .Invisibility_8 1.021 0.039 25.957 0.000 1.021 0.370
## .Vblame 1.285 0.048 26.847 0.000 0.905 0.905
## Invis 0.924 0.056 16.631 0.000 1.000 1.000
##
## R-Square:
## Estimate
## VB_1 0.649
## VB_2 0.665
## VB_3 0.459
## VB_4 0.530
## Invisibility_3 0.305
## Invisibility_4 0.587
## Invisibility_5 0.452
## Invisibility_6 0.660
## Invisibility_7 0.540
## Invisibility_8 0.630
## Vblame 0.095
##Invisibility to Victim Blame SEM A –> C path
Inv_VBsem <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
Vblame ~ Invis'
Inv_VBsem <- sem(Inv_VBsem, data = MMIWG_d2, estimator = "MLR")
#the model is saturated or just identified and so we cannot get fit indices.
summary(Inv_VBsem, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 30 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 21
##
## Used Total
## Number of observations 3988 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 547.738 400.742
## Degrees of freedom 34 34
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.367
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 17399.072 11374.946
## Degrees of freedom 45 45
## P-value 0.000 0.000
## Scaling correction factor 1.530
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.970 0.968
## Tucker-Lewis Index (TLI) 0.961 0.957
##
## Robust Comparative Fit Index (CFI) 0.971
## Robust Tucker-Lewis Index (TLI) 0.962
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -67325.301 -67325.301
## Scaling correction factor 1.382
## for the MLR correction
## Loglikelihood unrestricted model (H1) -67051.432 -67051.432
## Scaling correction factor 1.373
## for the MLR correction
##
## Akaike (AIC) 134692.601 134692.601
## Bayesian (BIC) 134824.713 134824.713
## Sample-size adjusted Bayesian (BIC) 134757.985 134757.985
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.062 0.052
## 90 Percent confidence interval - lower 0.057 0.048
## 90 Percent confidence interval - upper 0.066 0.056
## P-value RMSEA <= 0.05 0.000 0.193
##
## Robust RMSEA 0.061
## 90 Percent confidence interval - lower 0.056
## 90 Percent confidence interval - upper 0.066
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.038 0.038
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.191 0.806
## VB_2 1.011 0.022 45.791 0.000 1.205 0.815
## VB_3 0.831 0.024 35.150 0.000 0.991 0.678
## VB_4 0.931 0.023 40.715 0.000 1.110 0.728
## Invis =~
## Invisibility_3 1.000 0.961 0.553
## Invisibility_4 1.363 0.039 34.650 0.000 1.311 0.766
## Invisibility_5 1.263 0.043 29.260 0.000 1.215 0.672
## Invisibility_6 1.418 0.045 31.220 0.000 1.363 0.812
## Invisibility_7 1.282 0.042 30.762 0.000 1.232 0.735
## Invisibility_8 1.370 0.044 30.872 0.000 1.317 0.793
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame ~
## Invis 0.381 0.030 12.746 0.000 0.308 0.308
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.767 0.042 18.061 0.000 0.767 0.351
## .VB_2 0.731 0.036 20.337 0.000 0.731 0.335
## .VB_3 1.156 0.046 25.384 0.000 1.156 0.541
## .VB_4 1.092 0.048 22.630 0.000 1.092 0.470
## .Invisibility_3 2.103 0.051 41.448 0.000 2.103 0.695
## .Invisibility_4 1.207 0.048 25.400 0.000 1.207 0.413
## .Invisibility_5 1.787 0.055 32.370 0.000 1.787 0.548
## .Invisibility_6 0.958 0.043 22.514 0.000 0.958 0.340
## .Invisibility_7 1.291 0.042 30.395 0.000 1.291 0.460
## .Invisibility_8 1.021 0.039 25.957 0.000 1.021 0.370
## .Vblame 1.285 0.048 26.847 0.000 0.905 0.905
## Invis 0.924 0.056 16.631 0.000 1.000 1.000
##
## R-Square:
## Estimate
## VB_1 0.649
## VB_2 0.665
## VB_3 0.459
## VB_4 0.530
## Invisibility_3 0.305
## Invisibility_4 0.587
## Invisibility_5 0.452
## Invisibility_6 0.660
## Invisibility_7 0.540
## Invisibility_8 0.630
## Vblame 0.095
##Invisibility to Racism Minimization SEM A –> B path
Inv_RMsem <- 'RMini =~ 1*RacismMini_8 + RacismMini_9 + RacismMini_10
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
RMini ~ Invis'
Inv_RMsem <- sem(Inv_RMsem, data = MMIWG_d2, estimator = "MLR")
#the model is saturated or just identified and so we cannot get fit indices.
summary(Inv_RMsem, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 35 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 19
##
## Used Total
## Number of observations 3984 4000
##
## Model Test User Model:
## Standard Robust
## Test Statistic 899.209 666.758
## Degrees of freedom 26 26
## P-value (Chi-square) 0.000 0.000
## Scaling correction factor 1.349
## Yuan-Bentler correction (Mplus variant)
##
## Model Test Baseline Model:
##
## Test statistic 14574.125 9731.472
## Degrees of freedom 36 36
## P-value 0.000 0.000
## Scaling correction factor 1.498
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.940 0.934
## Tucker-Lewis Index (TLI) 0.917 0.908
##
## Robust Comparative Fit Index (CFI) 0.940
## Robust Tucker-Lewis Index (TLI) 0.918
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -62774.871 -62774.871
## Scaling correction factor 1.291
## for the MLR correction
## Loglikelihood unrestricted model (H1) -62325.267 -62325.267
## Scaling correction factor 1.324
## for the MLR correction
##
## Akaike (AIC) 125587.743 125587.743
## Bayesian (BIC) 125707.253 125707.253
## Sample-size adjusted Bayesian (BIC) 125646.880 125646.880
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.092 0.079
## 90 Percent confidence interval - lower 0.087 0.074
## 90 Percent confidence interval - upper 0.097 0.083
## P-value RMSEA <= 0.05 0.000 0.000
##
## Robust RMSEA 0.091
## 90 Percent confidence interval - lower 0.085
## 90 Percent confidence interval - upper 0.097
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.064 0.064
##
## Parameter Estimates:
##
## Standard errors Sandwich
## Information bread Observed
## Observed information based on Hessian
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## RMini =~
## RacismMini_8 1.000 1.296 0.798
## RacismMini_9 0.616 0.026 24.120 0.000 0.798 0.481
## RacismMini_10 1.049 0.032 33.026 0.000 1.359 0.836
## Invis =~
## Invisibility_3 1.000 0.962 0.553
## Invisibility_4 1.368 0.039 34.722 0.000 1.316 0.769
## Invisibility_5 1.278 0.044 29.289 0.000 1.230 0.680
## Invisibility_6 1.410 0.045 31.252 0.000 1.357 0.809
## Invisibility_7 1.276 0.042 30.661 0.000 1.227 0.732
## Invisibility_8 1.364 0.044 30.985 0.000 1.313 0.790
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## RMini ~
## Invis 0.452 0.032 14.052 0.000 0.335 0.335
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .RacismMini_8 0.960 0.063 15.281 0.000 0.960 0.364
## .RacismMini_9 2.118 0.059 36.117 0.000 2.118 0.769
## .RacismMini_10 0.795 0.066 11.977 0.000 0.795 0.301
## .Invisibility_3 2.099 0.050 41.622 0.000 2.099 0.694
## .Invisibility_4 1.197 0.047 25.266 0.000 1.197 0.409
## .Invisibility_5 1.756 0.056 31.481 0.000 1.756 0.537
## .Invisibility_6 0.975 0.043 22.677 0.000 0.975 0.346
## .Invisibility_7 1.303 0.043 30.343 0.000 1.303 0.464
## .Invisibility_8 1.037 0.040 26.237 0.000 1.037 0.376
## .RMini 1.490 0.069 21.500 0.000 0.888 0.888
## Invis 0.926 0.055 16.692 0.000 1.000 1.000
##
## R-Square:
## Estimate
## RacismMini_8 0.636
## RacismMini_9 0.231
## RacismMini_10 0.699
## Invisibility_3 0.306
## Invisibility_4 0.591
## Invisibility_5 0.463
## Invisibility_6 0.654
## Invisibility_7 0.536
## Invisibility_8 0.624
## RMini 0.112
##Invisibility to Racism Minimization to Victim Blame SEM A –> B –> C path
#CFA for ShoCCS
Inv_RM_VBsem <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4
RMini =~ 1*RacismMini_8 + RacismMini_9 + RacismMini_10
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
#indirect effect a
RMini ~ a*Invis
#indirect effect b
Vblame ~ b*RMini
#direct effect c
Vblame ~ c*Invis
#indirect effect (a*b)
ab := a*b
# total effect
total := c + (a*b)'
Inv_RM_VBsem <- sem(Inv_RM_VBsem, data = MMIWG_d2, se = "bootstrap", estimator = "ML")
#the model is saturated or just identified and so we cannot get fit indices.
summary(Inv_RM_VBsem, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 38 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 29
##
## Used Total
## Number of observations 3980 4000
##
## Model Test User Model:
##
## Test statistic 1110.393
## Degrees of freedom 62
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 21890.179
## Degrees of freedom 78
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.952
## Tucker-Lewis Index (TLI) 0.940
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -88005.352
## Loglikelihood unrestricted model (H1) -87450.155
##
## Akaike (AIC) 176068.703
## Bayesian (BIC) 176251.085
## Sample-size adjusted Bayesian (BIC) 176158.936
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.065
## 90 Percent confidence interval - lower 0.062
## 90 Percent confidence interval - upper 0.069
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.053
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 1000
## Number of successful bootstrap draws 1000
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.197 0.810
## VB_2 1.000 0.021 48.290 0.000 1.197 0.810
## VB_3 0.833 0.023 36.918 0.000 0.997 0.683
## VB_4 0.921 0.022 41.327 0.000 1.102 0.724
## RMini =~
## RacismMini_8 1.000 1.337 0.823
## RacismMini_9 0.598 0.025 24.239 0.000 0.800 0.482
## RacismMini_10 0.985 0.026 37.376 0.000 1.317 0.810
## Invis =~
## Invisibility_3 1.000 0.956 0.550
## Invisibility_4 1.376 0.040 34.785 0.000 1.316 0.769
## Invisibility_5 1.285 0.044 29.194 0.000 1.228 0.680
## Invisibility_6 1.419 0.045 31.196 0.000 1.357 0.809
## Invisibility_7 1.280 0.042 30.349 0.000 1.224 0.731
## Invisibility_8 1.376 0.044 31.098 0.000 1.315 0.792
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## RMini ~
## Invis (a) 0.465 0.033 14.205 0.000 0.333 0.333
## Vblame ~
## RMini (b) 0.371 0.021 17.771 0.000 0.414 0.414
## Invis (c) 0.213 0.027 7.866 0.000 0.170 0.170
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.753 0.041 18.408 0.000 0.753 0.344
## .VB_2 0.749 0.034 22.345 0.000 0.749 0.343
## .VB_3 1.135 0.045 25.236 0.000 1.135 0.533
## .VB_4 1.105 0.049 22.668 0.000 1.105 0.476
## .RacismMini_8 0.850 0.059 14.401 0.000 0.850 0.322
## .RacismMini_9 2.111 0.060 35.337 0.000 2.111 0.767
## .RacismMini_10 0.908 0.060 15.226 0.000 0.908 0.344
## .Invisibility_3 2.107 0.051 41.714 0.000 2.107 0.697
## .Invisibility_4 1.194 0.046 25.703 0.000 1.194 0.408
## .Invisibility_5 1.754 0.057 30.644 0.000 1.754 0.538
## .Invisibility_6 0.971 0.043 22.567 0.000 0.971 0.345
## .Invisibility_7 1.307 0.043 30.218 0.000 1.307 0.466
## .Invisibility_8 1.025 0.040 25.383 0.000 1.025 0.372
## .Vblame 1.078 0.042 25.548 0.000 0.753 0.753
## .RMini 1.590 0.070 22.713 0.000 0.889 0.889
## Invis 0.914 0.056 16.290 0.000 1.000 1.000
##
## R-Square:
## Estimate
## VB_1 0.656
## VB_2 0.657
## VB_3 0.467
## VB_4 0.524
## RacismMini_8 0.678
## RacismMini_9 0.233
## RacismMini_10 0.656
## Invisibility_3 0.303
## Invisibility_4 0.592
## Invisibility_5 0.462
## Invisibility_6 0.655
## Invisibility_7 0.534
## Invisibility_8 0.628
## Vblame 0.247
## RMini 0.111
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ab 0.173 0.016 11.044 0.000 0.138 0.138
## total 0.385 0.030 12.962 0.000 0.308 0.308
##Structural Model from Julisa’s Brownbag
#CFA for full model
Fullsem <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4
RMini =~ 1*RacismMini_8 + RacismMini_9 + RacismMini_10
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
Sblame =~ 1*SB_1 + SB_2 + SB_3 + SB_4
Apathy =~ 1*Apathy_1 + Apathy_2 + Apathy_3 + Apathy_4 + Apathy_5
Advoc =~ 1*Sup_Hash + Sup_Donate + Sup_Letter + Sup_Access + Sup_Training
#indirect effect a
RMini ~ a*Invis
#indirect effect b
Vblame ~ b*RMini
#indirect effect
Sblame ~ c*RMini
#indirect effect
Apathy ~ d*RMini + e*Vblame + f*Sblame
#indirect effect
Advoc ~ g*Apathy + h*Invis
#indirect effect (a*b*c*d*e*f*g)
indirect := a*b*c*d*e*f*g
# total effect
total := h + (a*b*c*d*e*f*g)'
Fullsem <- sem(Fullsem, data = MMIWG_d2, se = "bootstrap", estimator = "ML")
summary(Fullsem, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 51 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 62
##
## Used Total
## Number of observations 3966 4000
##
## Model Test User Model:
##
## Test statistic 4655.833
## Degrees of freedom 316
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48541.237
## Degrees of freedom 351
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.910
## Tucker-Lewis Index (TLI) 0.900
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -173777.243
## Loglikelihood unrestricted model (H1) -171449.327
##
## Akaike (AIC) 347678.487
## Bayesian (BIC) 348068.188
## Sample-size adjusted Bayesian (BIC) 347871.180
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.059
## 90 Percent confidence interval - lower 0.057
## 90 Percent confidence interval - upper 0.060
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.081
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 1000
## Number of successful bootstrap draws 1000
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.201 0.812
## VB_2 0.994 0.022 45.719 0.000 1.194 0.808
## VB_3 0.835 0.023 36.000 0.000 1.003 0.687
## VB_4 0.913 0.023 40.087 0.000 1.097 0.720
## RMini =~
## RacismMini_8 1.000 1.350 0.832
## RacismMini_9 0.591 0.023 25.344 0.000 0.798 0.481
## RacismMini_10 0.958 0.021 45.195 0.000 1.293 0.796
## Invis =~
## Invisibility_3 1.000 0.957 0.551
## Invisibility_4 1.378 0.041 33.934 0.000 1.319 0.771
## Invisibility_5 1.289 0.044 29.148 0.000 1.234 0.683
## Invisibility_6 1.417 0.046 30.807 0.000 1.356 0.808
## Invisibility_7 1.280 0.043 30.009 0.000 1.225 0.731
## Invisibility_8 1.372 0.045 30.600 0.000 1.313 0.792
## Sblame =~
## SB_1 1.000 0.780 0.528
## SB_2 1.273 0.060 21.203 0.000 0.993 0.569
## SB_3 0.910 0.047 19.406 0.000 0.710 0.448
## SB_4 1.208 0.091 13.249 0.000 0.942 0.656
## Apathy =~
## Apathy_1 1.000 1.233 0.822
## Apathy_2 1.065 0.018 58.063 0.000 1.314 0.875
## Apathy_3 -0.278 0.021 -13.136 0.000 -0.343 -0.220
## Apathy_4 1.043 0.019 55.619 0.000 1.287 0.877
## Apathy_5 0.893 0.021 42.328 0.000 1.101 0.660
## Advoc =~
## Sup_Hash 1.000 0.962 0.746
## Sup_Donate 0.879 0.016 53.930 0.000 0.846 0.734
## Sup_Letter 0.982 0.016 60.192 0.000 0.945 0.784
## Sup_Access 0.886 0.022 39.954 0.000 0.852 0.770
## Sup_Training 0.967 0.021 46.609 0.000 0.931 0.801
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## RMini ~
## Invis (a) 0.514 0.034 14.936 0.000 0.364 0.364
## Vblame ~
## RMini (b) 0.429 0.020 21.034 0.000 0.482 0.482
## Sblame ~
## RMini (c) -0.259 0.017 -15.543 0.000 -0.448 -0.448
## Apathy ~
## RMini (d) 0.317 0.028 11.394 0.000 0.347 0.347
## Vblame (e) 0.285 0.021 13.413 0.000 0.278 0.278
## Sblame (f) -0.359 0.064 -5.644 0.000 -0.227 -0.227
## Advoc ~
## Apathy (g) -0.334 0.020 -16.790 0.000 -0.428 -0.428
## Invis (h) -0.081 0.021 -3.944 0.000 -0.081 -0.081
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.743 0.041 17.972 0.000 0.743 0.340
## .VB_2 0.756 0.034 21.959 0.000 0.756 0.347
## .VB_3 1.123 0.045 24.848 0.000 1.123 0.527
## .VB_4 1.120 0.049 22.842 0.000 1.120 0.482
## .RacismMini_8 0.813 0.051 16.020 0.000 0.813 0.308
## .RacismMini_9 2.111 0.056 37.981 0.000 2.111 0.768
## .RacismMini_10 0.967 0.056 17.344 0.000 0.967 0.367
## .Invisibility_3 2.104 0.052 40.208 0.000 2.104 0.697
## .Invisibility_4 1.184 0.047 25.161 0.000 1.184 0.405
## .Invisibility_5 1.739 0.054 32.471 0.000 1.739 0.533
## .Invisibility_6 0.975 0.043 22.813 0.000 0.975 0.346
## .Invisibility_7 1.305 0.042 30.977 0.000 1.305 0.465
## .Invisibility_8 1.028 0.040 25.573 0.000 1.028 0.373
## .SB_1 1.572 0.065 24.090 0.000 1.572 0.721
## .SB_2 2.063 0.083 24.958 0.000 2.063 0.677
## .SB_3 2.001 0.067 30.042 0.000 2.001 0.799
## .SB_4 1.172 0.078 15.097 0.000 1.172 0.569
## .Apathy_1 0.730 0.050 14.705 0.000 0.730 0.324
## .Apathy_2 0.528 0.041 12.748 0.000 0.528 0.234
## .Apathy_3 2.310 0.094 24.487 0.000 2.310 0.951
## .Apathy_4 0.499 0.036 13.980 0.000 0.499 0.231
## .Apathy_5 1.575 0.047 33.812 0.000 1.575 0.565
## .Sup_Hash 0.740 0.027 27.584 0.000 0.740 0.444
## .Sup_Donate 0.614 0.021 29.614 0.000 0.614 0.462
## .Sup_Letter 0.560 0.019 29.116 0.000 0.560 0.385
## .Sup_Access 0.497 0.021 23.256 0.000 0.497 0.406
## .Sup_Training 0.483 0.020 24.383 0.000 0.483 0.358
## .Vblame 1.108 0.044 25.137 0.000 0.768 0.768
## .RMini 1.581 0.065 24.281 0.000 0.867 0.867
## Invis 0.916 0.056 16.284 0.000 1.000 1.000
## .Sblame 0.486 0.052 9.375 0.000 0.799 0.799
## .Apathy 0.852 0.041 20.545 0.000 0.560 0.560
## .Advoc 0.736 0.028 26.083 0.000 0.795 0.795
##
## R-Square:
## Estimate
## VB_1 0.660
## VB_2 0.653
## VB_3 0.473
## VB_4 0.518
## RacismMini_8 0.692
## RacismMini_9 0.232
## RacismMini_10 0.633
## Invisibility_3 0.303
## Invisibility_4 0.595
## Invisibility_5 0.467
## Invisibility_6 0.654
## Invisibility_7 0.535
## Invisibility_8 0.627
## SB_1 0.279
## SB_2 0.323
## SB_3 0.201
## SB_4 0.431
## Apathy_1 0.676
## Apathy_2 0.766
## Apathy_3 0.049
## Apathy_4 0.769
## Apathy_5 0.435
## Sup_Hash 0.556
## Sup_Donate 0.538
## Sup_Letter 0.615
## Sup_Access 0.594
## Sup_Training 0.642
## Vblame 0.232
## RMini 0.133
## Sblame 0.201
## Apathy 0.440
## Advoc 0.205
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## indirect -0.001 0.000 -5.671 0.000 -0.001 -0.001
## total -0.082 0.021 -3.973 0.000 -0.082 -0.082
#Model from Preregistration
#CFA for prereg model
Fullsemb <- 'Vblame =~ 1*VB_1 + VB_2 + VB_3 + VB_4
RMini =~ 1*RacismMini_8 + RacismMini_9 + RacismMini_10
Invis =~ 1*Invisibility_3 + Invisibility_4 + Invisibility_5 + Invisibility_6 + Invisibility_7 + Invisibility_8
Sblame =~ 1*SB_1 + SB_2 + SB_3 + SB_4
Advoc =~ 1*Sup_Hash + Sup_Donate + Sup_Letter + Sup_Access + Sup_Training
#indirect effect a
RMini ~ a*Invis
#indirect effect b
Vblame ~ b*Invis + c*RMini
#indirect effect
Sblame ~ d*Invis + e*RMini
#indirect effect
Advoc ~ f*Invis + g*RMini + h*Vblame + i*Sblame
#indirect effect (a*b*c*d*e*g*h*i)
indirect := a*b*c*d*e*g*h*i
# total effect
total := f + (a*b*c*d*e*g*h*i)'
Fullsemb <- sem(Fullsemb, data = MMIWG_d2, se = "bootstrap", estimator = "ML")
summary(Fullsemb, fit.measures = T, standardized = T, rsquare = T)
## lavaan 0.6-9 ended normally after 48 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 53
##
## Used Total
## Number of observations 3972 4000
##
## Model Test User Model:
##
## Test statistic 2588.163
## Degrees of freedom 200
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 36422.604
## Degrees of freedom 231
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.934
## Tucker-Lewis Index (TLI) 0.924
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -142403.336
## Loglikelihood unrestricted model (H1) -141109.254
##
## Akaike (AIC) 284912.672
## Bayesian (BIC) 285245.884
## Sample-size adjusted Bayesian (BIC) 285077.474
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.055
## 90 Percent confidence interval - lower 0.053
## 90 Percent confidence interval - upper 0.057
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.054
##
## Parameter Estimates:
##
## Standard errors Bootstrap
## Number of requested bootstrap draws 1000
## Number of successful bootstrap draws 1000
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Vblame =~
## VB_1 1.000 1.198 0.810
## VB_2 0.999 0.022 45.282 0.000 1.197 0.811
## VB_3 0.833 0.023 36.408 0.000 0.998 0.684
## VB_4 0.919 0.023 39.287 0.000 1.101 0.723
## RMini =~
## RacismMini_8 1.000 1.366 0.841
## RacismMini_9 0.573 0.026 21.768 0.000 0.782 0.472
## RacismMini_10 0.950 0.026 36.238 0.000 1.298 0.799
## Invis =~
## Invisibility_3 1.000 0.957 0.551
## Invisibility_4 1.375 0.041 33.225 0.000 1.316 0.770
## Invisibility_5 1.282 0.045 28.252 0.000 1.226 0.679
## Invisibility_6 1.420 0.046 30.876 0.000 1.358 0.810
## Invisibility_7 1.282 0.042 30.431 0.000 1.227 0.732
## Invisibility_8 1.376 0.046 30.228 0.000 1.316 0.793
## Sblame =~
## SB_1 1.000 0.785 0.531
## SB_2 1.334 0.056 23.799 0.000 1.047 0.600
## SB_3 0.896 0.043 20.818 0.000 0.704 0.444
## SB_4 1.158 0.060 19.282 0.000 0.909 0.632
## Advoc =~
## Sup_Hash 1.000 0.966 0.746
## Sup_Donate 0.875 0.016 54.949 0.000 0.845 0.730
## Sup_Letter 0.978 0.016 60.558 0.000 0.944 0.781
## Sup_Access 0.890 0.022 40.386 0.000 0.860 0.774
## Sup_Training 0.979 0.021 46.827 0.000 0.945 0.810
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## RMini ~
## Invis (a) 0.466 0.033 14.221 0.000 0.326 0.326
## Vblame ~
## Invis (b) 0.218 0.028 7.666 0.000 0.174 0.174
## RMini (c) 0.359 0.020 18.043 0.000 0.410 0.410
## Sblame ~
## Invis (d) 0.132 0.022 6.074 0.000 0.161 0.161
## RMini (e) -0.297 0.017 -17.152 0.000 -0.516 -0.516
## Advoc ~
## Invis (f) -0.259 0.024 -10.915 0.000 -0.256 -0.256
## RMini (g) 0.096 0.021 4.588 0.000 0.136 0.136
## Vblame (h) -0.059 0.017 -3.506 0.000 -0.073 -0.073
## Sblame (i) 0.846 0.053 16.079 0.000 0.688 0.688
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .VB_1 0.752 0.042 17.878 0.000 0.752 0.344
## .VB_2 0.747 0.035 21.214 0.000 0.747 0.343
## .VB_3 1.135 0.044 25.583 0.000 1.135 0.533
## .VB_4 1.110 0.050 22.044 0.000 1.110 0.478
## .RacismMini_8 0.772 0.055 14.115 0.000 0.772 0.293
## .RacismMini_9 2.137 0.058 36.898 0.000 2.137 0.777
## .RacismMini_10 0.954 0.062 15.408 0.000 0.954 0.362
## .Invisibility_3 2.104 0.050 41.853 0.000 2.104 0.697
## .Invisibility_4 1.191 0.049 24.425 0.000 1.191 0.408
## .Invisibility_5 1.761 0.057 31.170 0.000 1.761 0.539
## .Invisibility_6 0.968 0.045 21.465 0.000 0.968 0.344
## .Invisibility_7 1.301 0.042 31.082 0.000 1.301 0.464
## .Invisibility_8 1.022 0.039 25.952 0.000 1.022 0.371
## .SB_1 1.565 0.053 29.696 0.000 1.565 0.718
## .SB_2 1.949 0.065 30.145 0.000 1.949 0.640
## .SB_3 2.012 0.065 31.087 0.000 2.012 0.803
## .SB_4 1.242 0.056 22.079 0.000 1.242 0.600
## .Sup_Hash 0.744 0.027 27.895 0.000 0.744 0.444
## .Sup_Donate 0.625 0.021 30.473 0.000 0.625 0.467
## .Sup_Letter 0.570 0.019 29.634 0.000 0.570 0.390
## .Sup_Access 0.493 0.020 25.198 0.000 0.493 0.400
## .Sup_Training 0.466 0.020 23.580 0.000 0.466 0.343
## .Vblame 1.083 0.043 25.408 0.000 0.755 0.755
## .RMini 1.667 0.072 23.140 0.000 0.894 0.894
## Invis 0.915 0.056 16.389 0.000 1.000 1.000
## .Sblame 0.469 0.041 11.486 0.000 0.762 0.762
## .Advoc 0.488 0.026 18.519 0.000 0.523 0.523
##
## R-Square:
## Estimate
## VB_1 0.656
## VB_2 0.657
## VB_3 0.467
## VB_4 0.522
## RacismMini_8 0.707
## RacismMini_9 0.223
## RacismMini_10 0.638
## Invisibility_3 0.303
## Invisibility_4 0.592
## Invisibility_5 0.461
## Invisibility_6 0.656
## Invisibility_7 0.536
## Invisibility_8 0.629
## SB_1 0.282
## SB_2 0.360
## SB_3 0.197
## SB_4 0.400
## Sup_Hash 0.556
## Sup_Donate 0.533
## Sup_Letter 0.610
## Sup_Access 0.600
## Sup_Training 0.657
## Vblame 0.245
## RMini 0.106
## Sblame 0.238
## Advoc 0.477
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## indirect 0.000 0.000 1.888 0.059 0.000 0.000
## total -0.259 0.024 -10.910 0.000 -0.256 -0.256