http://sites.bu.edu/tabrown/cfabook/
http://www.amarkos.gr/cfa-lavaan/
https://github.com/janlammertyn/lavaan-material/blob/master/brown-cfa/allExamples.R
http://lavaan.ugent.be/index.html
“The joy of discovery is certainly the liveliest that the mind of man can ever feel”
- Claude Bernard -
factor correlations vs structural parameter# lavaan Package is used to analysis for SEM and CFA
library(lavaan)
## This is lavaan 0.5-23.1097
## lavaan is BETA software! Please report any bugs.
library(ggplot2)
fig4.1 <- read.delim("http://quantpsy.cau.ac.kr/wp-content/data/tabrown_cfa/fig4.1.dat",sep="")
# sep="" : any length whitespace as being the deliminator
colnames(fig4.1) <- c("N1","N2","N3","N4","E1","E2","E3","E4")
fig4.1_sd <- as.numeric(strsplit(readLines("http://quantpsy.cau.ac.kr/wp-content/data/tabrown_cfa/fig4.1.dat",n=1)," ")[[1]])
fig4.1 # correlation matrix
## N1 N2 N3 N4 E1 E2 E3 E4
## 1 1.000 NA NA NA NA NA NA NA
## 2 0.767 1.000 NA NA NA NA NA NA
## 3 0.731 0.709 1.000 NA NA NA NA NA
## 4 0.778 0.738 0.762 1.000 NA NA NA NA
## 5 -0.351 -0.302 -0.356 -0.318 1.000 NA NA NA
## 6 -0.316 -0.280 -0.300 -0.267 0.675 1.000 NA NA
## 7 -0.296 -0.289 -0.297 -0.296 0.634 0.651 1.000 NA
## 8 -0.282 -0.254 -0.292 -0.245 0.534 0.593 0.566 1
fig4.1_sd # standard deviation
## [1] 5.7 5.6 6.4 5.7 6.0 6.2 5.7 5.6
# Obtain covariance matrix from correlation and standard deviation
# To fill the upper half off-diagonal
fig4.1[upper.tri(fig4.1)] <- t(fig4.1)[upper.tri(fig4.1)]
# changing correlation matrix to covariance matrix
fig4.1_covmat <- cor2cov(as.matrix(fig4.1),fig4.1_sd) # then we can get the covariance matrix of fig4.1
# covariance matrix
fig4.1_covmat
## N1 N2 N3 N4 E1 E2 E3
## [1,] 32.49000 24.48264 26.66688 25.27722 -12.0042 -11.16744 -9.61704
## [2,] 24.48264 31.36000 25.41056 23.55696 -10.1472 -9.72160 -9.22488
## [3,] 26.66688 25.41056 40.96000 27.79776 -13.6704 -11.90400 -10.83456
## [4,] 25.27722 23.55696 27.79776 32.49000 -10.8756 -9.43578 -9.61704
## [5,] -12.00420 -10.14720 -13.67040 -10.87560 36.0000 25.11000 21.68280
## [6,] -11.16744 -9.72160 -11.90400 -9.43578 25.1100 38.44000 23.00634
## [7,] -9.61704 -9.22488 -10.83456 -9.61704 21.6828 23.00634 32.49000
## [8,] -9.00144 -7.96544 -10.46528 -7.82040 17.9424 20.58896 18.06672
## E4
## [1,] -9.00144
## [2,] -7.96544
## [3,] -10.46528
## [4,] -7.82040
## [5,] 17.94240
## [6,] 20.58896
## [7,] 18.06672
## [8,] 31.36000
# N1 & E1 are used as a marker (by default)
fig4.1_model_1 <- '
Neuro =~ N1 + N2 + N3 + N4
Extra =~ E1 + E2 + E3 + E4
Neuro ~~ Neuro
Extra ~~ Extra
'
# Fixed factor variance equal to 1 and N1 and E1 are specified to freely estimated parameters
fig4.1_model_2 <- '
Neuro =~ NA*N1 + N2 + N3 + N4
Extra =~ NA*E1 + E2 + E3 + E4
Neuro ~~ 1*Neuro
Extra ~~ 1*Extra'
# marker
fit_4.1_1 <- lavaan::cfa(fig4.1_model_1,
sample.cov=fig4.1_covmat,
sample.nobs=250,
mimic="Mplus")
# fixed
fit_4.1_2 <- lavaan::cfa(fig4.1_model_2,
sample.cov=fig4.1_covmat,
sample.nobs=250,
mimic="Mplus")
lavaan::cfa,fig4.1_model_1(marker를 활용한 모델)이며 분석할 데이터는 공분산 행렬임을 알려주고 행렬을 지정해줍니다 sample.cov=fig4.1_covmatsample.nobs=250 샘플의 수를 지정해주어야 합니다.summary 함수를 입히면 결과의 summary를 보여주라는 의미입니다.parametersEstiamtes,fitted,resid 함수를 활용할 수 있으며 의미는 함수의 이름과 같습니다.# marker, summary statistics
summary(fit_4.1_1,standardized=TRUE, fit.measures=TRUE, rsq=TRUE, modindices=TRUE)
## lavaan (0.5-23.1097) converged normally after 61 iterations
##
## Number of observations 250
##
## Estimator ML
## Minimum Function Test Statistic 13.285
## Degrees of freedom 19
## P-value (Chi-square) 0.824
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1253.791
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.007
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -5748.501
## Loglikelihood unrestricted model (H1) -5741.858
##
## Number of free parameters 25
## Akaike (AIC) 11547.002
## Bayesian (BIC) 11635.038
## Sample-size adjusted Bayesian (BIC) 11555.786
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent Confidence Interval 0.000 0.034
## P-value RMSEA <= 0.05 0.990
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.018
##
## Parameter Estimates:
##
## Information Observed
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Neuro =~
## N1 1.000 5.033 0.885
## N2 0.942 0.052 18.146 0.000 4.742 0.849
## N3 1.071 0.061 17.592 0.000 5.389 0.844
## N4 0.997 0.052 19.217 0.000 5.017 0.882
## Extra =~
## E1 1.000 4.801 0.802
## E2 1.074 0.078 13.713 0.000 5.159 0.834
## E3 0.935 0.072 12.921 0.000 4.491 0.789
## E4 0.814 0.073 11.111 0.000 3.907 0.699
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Neuro ~~
## Extra -10.512 1.932 -5.442 0.000 -0.435 -0.435
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .N1 0.000 0.360 0.000 1.000 0.000 0.000
## .N2 0.000 0.353 0.000 1.000 0.000 0.000
## .N3 0.000 0.404 0.000 1.000 0.000 0.000
## .N4 0.000 0.360 0.000 1.000 0.000 0.000
## .E1 0.000 0.379 0.000 1.000 0.000 0.000
## .E2 0.000 0.391 0.000 1.000 0.000 0.000
## .E3 0.000 0.360 0.000 1.000 0.000 0.000
## .E4 0.000 0.353 0.000 1.000 0.000 0.000
## Neuro 0.000 0.000 0.000
## Extra 0.000 0.000 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Neuro 25.335 2.902 8.730 0.000 1.000 1.000
## Extra 23.054 3.187 7.233 0.000 1.000 1.000
## .N1 7.025 0.914 7.688 0.000 7.025 0.217
## .N2 8.746 1.002 8.731 0.000 8.746 0.280
## .N3 11.760 1.336 8.799 0.000 11.760 0.288
## .N4 7.188 0.921 7.802 0.000 7.188 0.222
## .E1 12.802 1.580 8.104 0.000 12.802 0.357
## .E2 11.671 1.603 7.283 0.000 11.671 0.305
## .E3 12.192 1.464 8.327 0.000 12.192 0.377
## .E4 15.972 1.668 9.574 0.000 15.972 0.511
##
## R-Square:
## Estimate
## N1 0.783
## N2 0.720
## N3 0.712
## N4 0.778
## E1 0.643
## E2 0.695
## E3 0.623
## E4 0.489
##
## Modification Indices:
##
## lhs op rhs mi epc sepc.lv sepc.all sepc.nox
## 30 Neuro =~ E1 1.328 -0.073 -0.368 -0.061 -0.061
## 31 Neuro =~ E2 1.121 0.068 0.342 0.055 0.055
## 32 Neuro =~ E3 0.014 0.007 0.037 0.006 0.006
## 33 Neuro =~ E4 0.013 -0.007 -0.036 -0.007 -0.007
## 34 Extra =~ N1 0.273 -0.027 -0.129 -0.023 -0.023
## 35 Extra =~ N2 0.143 0.020 0.098 0.017 0.017
## 36 Extra =~ N3 0.904 -0.059 -0.283 -0.044 -0.044
## 37 Extra =~ N4 1.067 0.053 0.255 0.045 0.045
## 38 N1 ~~ N2 3.227 1.538 1.538 0.048 0.048
## 39 N1 ~~ N3 2.747 -1.615 -1.615 -0.044 -0.044
## 40 N1 ~~ N4 0.121 -0.317 -0.317 -0.010 -0.010
## 41 N1 ~~ E1 0.255 -0.405 -0.405 -0.012 -0.012
## 42 N1 ~~ E2 0.500 -0.563 -0.563 -0.016 -0.016
## 43 N1 ~~ E3 0.919 0.740 0.740 0.023 0.023
## 44 N1 ~~ E4 0.016 -0.107 -0.107 -0.003 -0.003
## 45 N2 ~~ N3 0.311 -0.525 -0.525 -0.015 -0.015
## 46 N2 ~~ N4 1.253 -0.956 -0.956 -0.030 -0.030
## 47 N2 ~~ E1 0.658 0.689 0.689 0.021 0.021
## 48 N2 ~~ E2 0.004 -0.056 -0.056 -0.002 -0.002
## 49 N2 ~~ E3 0.222 -0.385 -0.385 -0.012 -0.012
## 50 N2 ~~ E4 0.002 0.041 0.041 0.001 0.001
## 51 N3 ~~ N4 3.507 1.820 1.820 0.050 0.050
## 52 N3 ~~ E1 1.101 -1.028 -1.028 -0.027 -0.027
## 53 N3 ~~ E2 0.107 0.319 0.319 0.008 0.008
## 54 N3 ~~ E3 0.254 0.476 0.476 0.013 0.013
## 55 N3 ~~ E4 0.587 -0.785 -0.785 -0.022 -0.022
## 56 N4 ~~ E1 0.000 -0.005 -0.005 0.000 0.000
## 57 N4 ~~ E2 1.276 0.906 0.906 0.026 0.026
## 58 N4 ~~ E3 0.838 -0.711 -0.711 -0.022 -0.022
## 59 N4 ~~ E4 0.480 0.584 0.584 0.018 0.018
## 60 E1 ~~ E2 0.428 1.071 1.071 0.029 0.029
## 61 E1 ~~ E3 0.006 0.107 0.107 0.003 0.003
## 62 E1 ~~ E4 1.983 -1.827 -1.827 -0.055 -0.055
## 63 E2 ~~ E3 0.463 -1.037 -1.037 -0.029 -0.029
## 64 E2 ~~ E4 0.412 0.873 0.873 0.025 0.025
## 65 E3 ~~ E4 0.503 0.872 0.872 0.027 0.027
parameterEstimates(fit_4.1_1,standardized = TRUE) #coefficients with se, 95%CI and z values etc.
## lhs op rhs est se z pvalue ci.lower ci.upper std.lv
## 1 Neuro =~ N1 1.000 0.000 NA NA 1.000 1.000 5.033
## 2 Neuro =~ N2 0.942 0.052 18.146 0 0.840 1.044 4.742
## 3 Neuro =~ N3 1.071 0.061 17.592 0 0.951 1.190 5.389
## 4 Neuro =~ N4 0.997 0.052 19.217 0 0.895 1.098 5.017
## 5 Extra =~ E1 1.000 0.000 NA NA 1.000 1.000 4.801
## 6 Extra =~ E2 1.074 0.078 13.713 0 0.921 1.228 5.159
## 7 Extra =~ E3 0.935 0.072 12.921 0 0.793 1.077 4.491
## 8 Extra =~ E4 0.814 0.073 11.111 0 0.670 0.957 3.907
## 9 Neuro ~~ Neuro 25.335 2.902 8.730 0 19.647 31.023 1.000
## 10 Extra ~~ Extra 23.054 3.187 7.233 0 16.807 29.301 1.000
## 11 N1 ~~ N1 7.025 0.914 7.688 0 5.234 8.816 7.025
## 12 N2 ~~ N2 8.746 1.002 8.731 0 6.783 10.710 8.746
## 13 N3 ~~ N3 11.760 1.336 8.799 0 9.141 14.380 11.760
## 14 N4 ~~ N4 7.188 0.921 7.802 0 5.382 8.994 7.188
## 15 E1 ~~ E1 12.802 1.580 8.104 0 9.706 15.898 12.802
## 16 E2 ~~ E2 11.671 1.603 7.283 0 8.530 14.812 11.671
## 17 E3 ~~ E3 12.192 1.464 8.327 0 9.322 15.062 12.192
## 18 E4 ~~ E4 15.972 1.668 9.574 0 12.702 19.242 15.972
## 19 Neuro ~~ Extra -10.512 1.932 -5.442 0 -14.298 -6.726 -0.435
## 20 N1 ~1 0.000 0.360 0.000 1 -0.705 0.705 0.000
## 21 N2 ~1 0.000 0.353 0.000 1 -0.693 0.693 0.000
## 22 N3 ~1 0.000 0.404 0.000 1 -0.792 0.792 0.000
## 23 N4 ~1 0.000 0.360 0.000 1 -0.705 0.705 0.000
## 24 E1 ~1 0.000 0.379 0.000 1 -0.742 0.742 0.000
## 25 E2 ~1 0.000 0.391 0.000 1 -0.767 0.767 0.000
## 26 E3 ~1 0.000 0.360 0.000 1 -0.705 0.705 0.000
## 27 E4 ~1 0.000 0.353 0.000 1 -0.693 0.693 0.000
## 28 Neuro ~1 0.000 0.000 NA NA 0.000 0.000 0.000
## 29 Extra ~1 0.000 0.000 NA NA 0.000 0.000 0.000
## std.all std.nox
## 1 0.885 0.885
## 2 0.849 0.849
## 3 0.844 0.844
## 4 0.882 0.882
## 5 0.802 0.802
## 6 0.834 0.834
## 7 0.789 0.789
## 8 0.699 0.699
## 9 1.000 1.000
## 10 1.000 1.000
## 11 0.217 0.217
## 12 0.280 0.280
## 13 0.288 0.288
## 14 0.222 0.222
## 15 0.357 0.357
## 16 0.305 0.305
## 17 0.377 0.377
## 18 0.511 0.511
## 19 -0.435 -0.435
## 20 0.000 0.000
## 21 0.000 0.000
## 22 0.000 0.000
## 23 0.000 0.000
## 24 0.000 0.000
## 25 0.000 0.000
## 26 0.000 0.000
## 27 0.000 0.000
## 28 0.000 0.000
## 29 0.000 0.000
fitted(fit_4.1_1) #the model implied variance-covariance matrix, slightly different values from Table 4.2 p. 117 (input matrix)
## $cov
## N1 N2 N3 N4 E1 E2 E3 E4
## N1 32.360
## N2 23.869 31.235
## N3 27.122 25.553 40.796
## N4 25.253 23.792 27.035 32.360
## E1 -10.512 -9.904 -11.254 -10.478 35.856
## E2 -11.295 -10.641 -12.092 -11.258 24.771 38.286
## E3 -9.832 -9.263 -10.526 -9.800 21.563 23.168 32.360
## E4 -8.553 -8.058 -9.157 -8.526 18.758 20.155 17.545 31.235
##
## $mean
## N1 N2 N3 N4 E1 E2 E3 E4
## 0 0 0 0 0 0 0 0
resid(fit_4.1_1) # unstandardized residuals matrix
## $type
## [1] "raw"
##
## $cov
## N1 N2 N3 N4 E1 E2 E3 E4
## N1 0.000
## N2 0.516 0.000
## N3 -0.562 -0.244 0.000
## N4 -0.077 -0.330 0.651 0.000
## E1 -1.444 -0.203 -2.362 -0.354 0.000
## E2 0.172 0.958 0.235 1.860 0.239 0.000
## E3 0.253 0.075 -0.266 0.222 0.033 -0.254 0.000
## E4 -0.412 0.125 -1.267 0.737 -0.888 0.352 0.450 0.000
##
## $mean
## N1 N2 N3 N4 E1 E2 E3 E4
## 0 0 0 0 0 0 0 0
resid(fit_4.1_1,type="standardized") #standardized residuals matrix, there are differences with the values found in Table 4.2 p. 117
## $type
## [1] "standardized"
##
## $cov
## N1 N2 N3 N4 E1 E2 E3 E4
## N1 0.000
## N2 1.371 NA
## N3 -3.227 -0.620 0.000
## N4 -0.388 -1.545 1.436 0.000
## E1 -1.187 -0.164 -1.578 -0.297 NA
## E2 0.152 0.805 0.168 1.708 0.628 NA
## E3 0.223 0.062 -0.190 0.192 0.076 -0.720 NA
## E4 -0.312 0.093 -0.803 0.565 -1.544 0.620 0.684 NA
##
## $mean
## N1 N2 N3 N4 E1 E2 E3 E4
## NA NA 0 0 NA NA NA NA
# factor variance is fixed to 1, summary statistics
summary(fit_4.1_2,standardized=TRUE, fit.measures=TRUE, rsq=TRUE, modindices=TRUE)
## lavaan (0.5-23.1097) converged normally after 49 iterations
##
## Number of observations 250
##
## Estimator ML
## Minimum Function Test Statistic 13.285
## Degrees of freedom 19
## P-value (Chi-square) 0.824
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1253.791
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.007
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -5748.501
## Loglikelihood unrestricted model (H1) -5741.858
##
## Number of free parameters 25
## Akaike (AIC) 11547.002
## Bayesian (BIC) 11635.038
## Sample-size adjusted Bayesian (BIC) 11555.786
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent Confidence Interval 0.000 0.034
## P-value RMSEA <= 0.05 0.990
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.018
##
## Parameter Estimates:
##
## Information Observed
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Neuro =~
## N1 5.033 0.288 17.459 0.000 5.033 0.885
## N2 4.742 0.290 16.332 0.000 4.742 0.849
## N3 5.389 0.333 16.176 0.000 5.389 0.844
## N4 5.017 0.289 17.371 0.000 5.017 0.882
## Extra =~
## E1 4.801 0.332 14.466 0.000 4.801 0.802
## E2 5.159 0.337 15.293 0.000 5.159 0.834
## E3 4.491 0.318 14.135 0.000 4.491 0.789
## E4 3.907 0.326 11.967 0.000 3.907 0.699
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Neuro ~~
## Extra -0.435 0.059 -7.397 0.000 -0.435 -0.435
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .N1 0.000 0.360 0.000 1.000 0.000 0.000
## .N2 0.000 0.353 0.000 1.000 0.000 0.000
## .N3 0.000 0.404 0.000 1.000 0.000 0.000
## .N4 0.000 0.360 0.000 1.000 0.000 0.000
## .E1 0.000 0.379 0.000 1.000 0.000 0.000
## .E2 0.000 0.391 0.000 1.000 0.000 0.000
## .E3 0.000 0.360 0.000 1.000 0.000 0.000
## .E4 0.000 0.353 0.000 1.000 0.000 0.000
## Neuro 0.000 0.000 0.000
## Extra 0.000 0.000 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## Neuro 1.000 1.000 1.000
## Extra 1.000 1.000 1.000
## .N1 7.025 0.914 7.688 0.000 7.025 0.217
## .N2 8.746 1.002 8.731 0.000 8.746 0.280
## .N3 11.760 1.336 8.799 0.000 11.760 0.288
## .N4 7.188 0.921 7.802 0.000 7.188 0.222
## .E1 12.802 1.580 8.104 0.000 12.802 0.357
## .E2 11.671 1.603 7.283 0.000 11.671 0.305
## .E3 12.192 1.464 8.327 0.000 12.192 0.377
## .E4 15.972 1.668 9.574 0.000 15.972 0.511
##
## R-Square:
## Estimate
## N1 0.783
## N2 0.720
## N3 0.712
## N4 0.778
## E1 0.643
## E2 0.695
## E3 0.623
## E4 0.489
##
## Modification Indices:
##
## lhs op rhs mi epc sepc.lv sepc.all sepc.nox
## 30 Neuro =~ E1 1.328 -0.368 -0.368 -0.061 -0.061
## 31 Neuro =~ E2 1.121 0.342 0.342 0.055 0.055
## 32 Neuro =~ E3 0.014 0.037 0.037 0.006 0.006
## 33 Neuro =~ E4 0.013 -0.036 -0.036 -0.007 -0.007
## 34 Extra =~ N1 0.273 -0.129 -0.129 -0.023 -0.023
## 35 Extra =~ N2 0.143 0.098 0.098 0.017 0.017
## 36 Extra =~ N3 0.904 -0.283 -0.283 -0.044 -0.044
## 37 Extra =~ N4 1.067 0.255 0.255 0.045 0.045
## 38 N1 ~~ N2 3.227 1.538 1.538 0.048 0.048
## 39 N1 ~~ N3 2.747 -1.615 -1.615 -0.044 -0.044
## 40 N1 ~~ N4 0.121 -0.317 -0.317 -0.010 -0.010
## 41 N1 ~~ E1 0.255 -0.405 -0.405 -0.012 -0.012
## 42 N1 ~~ E2 0.500 -0.563 -0.563 -0.016 -0.016
## 43 N1 ~~ E3 0.919 0.740 0.740 0.023 0.023
## 44 N1 ~~ E4 0.016 -0.107 -0.107 -0.003 -0.003
## 45 N2 ~~ N3 0.311 -0.525 -0.525 -0.015 -0.015
## 46 N2 ~~ N4 1.253 -0.956 -0.956 -0.030 -0.030
## 47 N2 ~~ E1 0.658 0.689 0.689 0.021 0.021
## 48 N2 ~~ E2 0.004 -0.056 -0.056 -0.002 -0.002
## 49 N2 ~~ E3 0.222 -0.385 -0.385 -0.012 -0.012
## 50 N2 ~~ E4 0.002 0.041 0.041 0.001 0.001
## 51 N3 ~~ N4 3.507 1.820 1.820 0.050 0.050
## 52 N3 ~~ E1 1.101 -1.028 -1.028 -0.027 -0.027
## 53 N3 ~~ E2 0.107 0.319 0.319 0.008 0.008
## 54 N3 ~~ E3 0.254 0.476 0.476 0.013 0.013
## 55 N3 ~~ E4 0.587 -0.785 -0.785 -0.022 -0.022
## 56 N4 ~~ E1 0.000 -0.005 -0.005 0.000 0.000
## 57 N4 ~~ E2 1.276 0.906 0.906 0.026 0.026
## 58 N4 ~~ E3 0.838 -0.711 -0.711 -0.022 -0.022
## 59 N4 ~~ E4 0.480 0.584 0.584 0.018 0.018
## 60 E1 ~~ E2 0.428 1.071 1.071 0.029 0.029
## 61 E1 ~~ E3 0.006 0.107 0.107 0.003 0.003
## 62 E1 ~~ E4 1.983 -1.827 -1.827 -0.055 -0.055
## 63 E2 ~~ E3 0.463 -1.037 -1.037 -0.029 -0.029
## 64 E2 ~~ E4 0.412 0.873 0.873 0.025 0.025
## 65 E3 ~~ E4 0.503 0.872 0.872 0.027 0.027
parameterEstimates(fit_4.1_2,standardized = TRUE) #coefficients with se, 95%CI and z values etc.
## lhs op rhs est se z pvalue ci.lower ci.upper std.lv
## 1 Neuro =~ N1 5.033 0.288 17.459 0 4.468 5.598 5.033
## 2 Neuro =~ N2 4.742 0.290 16.332 0 4.173 5.311 4.742
## 3 Neuro =~ N3 5.389 0.333 16.176 0 4.736 6.041 5.389
## 4 Neuro =~ N4 5.017 0.289 17.371 0 4.451 5.583 5.017
## 5 Extra =~ E1 4.801 0.332 14.466 0 4.151 5.452 4.801
## 6 Extra =~ E2 5.159 0.337 15.293 0 4.498 5.820 5.159
## 7 Extra =~ E3 4.491 0.318 14.135 0 3.868 5.114 4.491
## 8 Extra =~ E4 3.907 0.326 11.967 0 3.267 4.547 3.907
## 9 Neuro ~~ Neuro 1.000 0.000 NA NA 1.000 1.000 1.000
## 10 Extra ~~ Extra 1.000 0.000 NA NA 1.000 1.000 1.000
## 11 N1 ~~ N1 7.025 0.914 7.688 0 5.234 8.816 7.025
## 12 N2 ~~ N2 8.746 1.002 8.731 0 6.783 10.710 8.746
## 13 N3 ~~ N3 11.760 1.336 8.799 0 9.141 14.380 11.760
## 14 N4 ~~ N4 7.188 0.921 7.802 0 5.382 8.994 7.188
## 15 E1 ~~ E1 12.802 1.580 8.104 0 9.706 15.898 12.802
## 16 E2 ~~ E2 11.671 1.603 7.283 0 8.530 14.812 11.671
## 17 E3 ~~ E3 12.192 1.464 8.327 0 9.322 15.062 12.192
## 18 E4 ~~ E4 15.972 1.668 9.574 0 12.702 19.242 15.972
## 19 Neuro ~~ Extra -0.435 0.059 -7.397 0 -0.550 -0.320 -0.435
## 20 N1 ~1 0.000 0.360 0.000 1 -0.705 0.705 0.000
## 21 N2 ~1 0.000 0.353 0.000 1 -0.693 0.693 0.000
## 22 N3 ~1 0.000 0.404 0.000 1 -0.792 0.792 0.000
## 23 N4 ~1 0.000 0.360 0.000 1 -0.705 0.705 0.000
## 24 E1 ~1 0.000 0.379 0.000 1 -0.742 0.742 0.000
## 25 E2 ~1 0.000 0.391 0.000 1 -0.767 0.767 0.000
## 26 E3 ~1 0.000 0.360 0.000 1 -0.705 0.705 0.000
## 27 E4 ~1 0.000 0.353 0.000 1 -0.693 0.693 0.000
## 28 Neuro ~1 0.000 0.000 NA NA 0.000 0.000 0.000
## 29 Extra ~1 0.000 0.000 NA NA 0.000 0.000 0.000
## std.all std.nox
## 1 0.885 0.885
## 2 0.849 0.849
## 3 0.844 0.844
## 4 0.882 0.882
## 5 0.802 0.802
## 6 0.834 0.834
## 7 0.789 0.789
## 8 0.699 0.699
## 9 1.000 1.000
## 10 1.000 1.000
## 11 0.217 0.217
## 12 0.280 0.280
## 13 0.288 0.288
## 14 0.222 0.222
## 15 0.357 0.357
## 16 0.305 0.305
## 17 0.377 0.377
## 18 0.511 0.511
## 19 -0.435 -0.435
## 20 0.000 0.000
## 21 0.000 0.000
## 22 0.000 0.000
## 23 0.000 0.000
## 24 0.000 0.000
## 25 0.000 0.000
## 26 0.000 0.000
## 27 0.000 0.000
## 28 0.000 0.000
## 29 0.000 0.000
fitted(fit_4.1_2) #the model implied variance-covariance matrix, slightly different values from Table 4.2 p. 117 (input matrix)
## $cov
## N1 N2 N3 N4 E1 E2 E3 E4
## N1 32.360
## N2 23.869 31.235
## N3 27.122 25.553 40.796
## N4 25.253 23.792 27.035 32.360
## E1 -10.512 -9.904 -11.254 -10.478 35.856
## E2 -11.295 -10.641 -12.092 -11.258 24.771 38.286
## E3 -9.832 -9.263 -10.526 -9.800 21.563 23.168 32.360
## E4 -8.553 -8.058 -9.157 -8.526 18.758 20.155 17.545 31.235
##
## $mean
## N1 N2 N3 N4 E1 E2 E3 E4
## 0 0 0 0 0 0 0 0
resid(fit_4.1_2) # unstandardized residuals matrix
## $type
## [1] "raw"
##
## $cov
## N1 N2 N3 N4 E1 E2 E3 E4
## N1 0.000
## N2 0.516 0.000
## N3 -0.562 -0.244 0.000
## N4 -0.077 -0.330 0.651 0.000
## E1 -1.444 -0.203 -2.362 -0.354 0.000
## E2 0.172 0.958 0.235 1.860 0.239 0.000
## E3 0.253 0.075 -0.266 0.222 0.033 -0.254 0.000
## E4 -0.412 0.125 -1.267 0.737 -0.888 0.352 0.450 0.000
##
## $mean
## N1 N2 N3 N4 E1 E2 E3 E4
## 0 0 0 0 0 0 0 0
resid(fit_4.1_2,type="standardized") #standardized residuals matrix, there are differences with the values found in Table 4.2 p. 117
## $type
## [1] "standardized"
##
## $cov
## N1 N2 N3 N4 E1 E2 E3 E4
## N1 0.000
## N2 1.371 NA
## N3 -3.227 -0.620 0.000
## N4 -0.388 -1.545 1.436 0.000
## E1 -1.187 -0.164 -1.578 -0.297 0.000
## E2 0.152 0.805 0.168 1.708 0.628 0.000
## E3 0.223 0.062 -0.190 0.192 0.076 -0.720 0.000
## E4 -0.312 0.093 -0.803 0.565 -1.544 0.620 0.684 0.003
##
## $mean
## N1 N2 N3 N4 E1 E2 E3 E4
## 0 NA 0 0 0 0 0 0
(euqation 4.1 ~ 6) \[ \begin{aligned} \phi_{11} &= \lambda_{11}^2\sigma_1^2=.8858^{2}(32.49)=25.44 \\ \\ \phi_{22} &= \lambda_{52}^2\sigma_5^2=.8018^{2}(36.00)=23.15 \\ \\ \delta_2 &= \lambda_{52}^2(\sigma_5^2) =.28(31.36)=8.78 \\ &= \sigma_2^2 - \sigma_2^2(\lambda_{21}^2) = 31.36-(.8485^2)(31.36)=8.78 \\ \\ \phi_{21} &= r_{21}(SD_1)(SD_2) \\ &= -.435(5.04)(4.81)=-10.55 \end{aligned} \]
\(\delta_X=VAR(X)(1-\rho)\) (equ 4.14)
\(\delta_X=(42)(1-.85)=6.3\) (equ 4.15)
fig4.3 <- read.table("http://quantpsy.cau.ac.kr/wp-content/data/tabrown_cfa/fig4.3.dat",header=F)
colnames(fig4.3) <- c("SubjID","ACTIV","SOMA","PAIN","MENTH","SOCF","VITAL","GENHLTH","AGE")
head(fig4.3)
## SubjID ACTIV SOMA PAIN MENTH SOCF VITAL GENHLTH AGE
## 1 1 48 10 9 6 26 34 38 31
## 2 2 57 51 9 11 14 30 38 25
## 3 3 56 24 6 1 15 16 32 30
## 4 4 10 30 4 1 3 0 10 50
## 5 5 31 26 9 12 26 34 29 30
## 6 6 6 28 7 0 6 16 11 34
fig4.3 <- fig4.3[,c(2:9)] # remove subject ID variable
head(fig4.3)
## ACTIV SOMA PAIN MENTH SOCF VITAL GENHLTH AGE
## 1 48 10 9 6 26 34 38 31
## 2 57 51 9 11 14 30 38 25
## 3 56 24 6 1 15 16 32 30
## 4 10 30 4 1 3 0 10 50
## 5 31 26 9 12 26 34 29 30
## 6 6 28 7 0 6 16 11 34
fig4.3_model <- '
# measurement model
psysfun =~ ACTIV + SOMA + PAIN
mentfun =~ MENTH + SOCF + VITAL
AGEF =~ AGE
GWB =~ GENHLTH
# error theory
AGE ~~ 0*AGE
GENHLTH ~~ 7.861*GENHLTH
# residual correlations
ACTIV ~~ SOMA
'
fig4.3_fit <- lavaan::cfa(model=fig4.3_model,
data=fig4.3,
mimic="Mplus")
summary(fig4.3_fit,standardized=TRUE, fit.measures=TRUE, rsq=TRUE,modindices=TRUE)
## lavaan (0.5-23.1097) converged normally after 233 iterations
##
## Number of observations 500
##
## Number of missing patterns 1
##
## Estimator ML
## Minimum Function Test Statistic 45.009
## Degrees of freedom 15
## P-value (Chi-square) 0.000
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1942.397
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 0.984
## Tucker-Lewis Index (TLI) 0.971
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -13787.363
## Loglikelihood unrestricted model (H1) -13764.859
##
## Number of free parameters 29
## Akaike (AIC) 27632.726
## Bayesian (BIC) 27754.950
## Sample-size adjusted Bayesian (BIC) 27662.902
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.063
## 90 Percent Confidence Interval 0.043 0.085
## P-value RMSEA <= 0.05 0.137
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.026
##
## Parameter Estimates:
##
## Information Observed
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## psysfun =~
## ACTIV 1.000 13.496 0.677
## SOMA 0.868 0.050 17.527 0.000 11.715 0.740
## PAIN 0.150 0.018 8.480 0.000 2.027 0.620
## mentfun =~
## MENTH 1.000 4.705 0.849
## SOCF 1.850 0.074 25.077 0.000 8.705 0.930
## VITAL 2.357 0.109 21.659 0.000 11.089 0.802
## AGEF =~
## AGE 1.000 11.853 1.000
## GWB =~
## GENHLTH 1.000 7.975 0.943
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACTIV ~~
## .SOMA 87.987 17.805 4.942 0.000 87.987 0.563
## psysfun ~~
## mentfun 28.370 4.307 6.586 0.000 0.447 0.447
## AGEF -30.305 9.003 -3.366 0.001 -0.189 -0.189
## GWB 69.057 7.958 8.677 0.000 0.642 0.642
## mentfun ~~
## AGEF -5.290 2.627 -2.014 0.044 -0.095 -0.095
## GWB 25.179 2.280 11.044 0.000 0.671 0.671
## AGEF ~~
## GWB -9.664 4.502 -2.147 0.032 -0.102 -0.102
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACTIV 34.268 0.892 38.426 0.000 34.268 1.718
## .SOMA 21.936 0.708 30.975 0.000 21.936 1.385
## .PAIN 8.212 0.146 56.202 0.000 8.212 2.513
## .MENTH 7.060 0.248 28.497 0.000 7.060 1.274
## .SOCF 14.750 0.418 35.249 0.000 14.750 1.576
## .VITAL 16.016 0.618 25.906 0.000 16.016 1.159
## .AGE 33.210 0.530 62.651 0.000 33.210 2.802
## .GENHLTH 28.322 0.378 74.915 0.000 28.322 3.350
## psysfun 0.000 0.000 0.000
## mentfun 0.000 0.000 0.000
## AGEF 0.000 0.000 0.000
## GWB 0.000 0.000 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .AGE 0.000 0.000 0.000
## .GENHLTH 7.861 7.861 0.110
## .ACTIV 215.497 24.583 8.766 0.000 215.497 0.542
## .SOMA 113.524 15.800 7.185 0.000 113.524 0.453
## .PAIN 6.567 0.583 11.260 0.000 6.567 0.615
## .MENTH 8.548 0.782 10.935 0.000 8.548 0.279
## .SOCF 11.779 1.949 6.045 0.000 11.779 0.135
## .VITAL 68.148 5.272 12.926 0.000 68.148 0.357
## psysfun 182.151 29.416 6.192 0.000 1.000 1.000
## mentfun 22.140 1.948 11.367 0.000 1.000 1.000
## AGEF 140.490 8.885 15.811 0.000 1.000 1.000
## GWB 63.601 4.520 14.072 0.000 1.000 1.000
##
## R-Square:
## Estimate
## AGE 1.000
## GENHLTH 0.890
## ACTIV 0.458
## SOMA 0.547
## PAIN 0.385
## MENTH 0.721
## SOCF 0.865
## VITAL 0.643
##
## Modification Indices:
##
## lhs op rhs mi epc sepc.lv sepc.all sepc.nox
## 40 psysfun =~ MENTH 9.314 0.049 0.666 0.120 0.120
## 41 psysfun =~ SOCF 0.804 -0.024 -0.322 -0.034 -0.034
## 42 psysfun =~ VITAL 5.292 -0.099 -1.330 -0.096 -0.096
## 45 mentfun =~ ACTIV 3.843 0.311 1.464 0.073 0.073
## 46 mentfun =~ SOMA 5.317 -0.301 -1.419 -0.090 -0.090
## 47 mentfun =~ PAIN 0.793 0.040 0.190 0.058 0.058
## 50 AGEF =~ ACTIV 0.001 -0.001 -0.014 -0.001 -0.001
## 51 AGEF =~ SOMA 0.366 0.024 0.286 0.018 0.018
## 52 AGEF =~ PAIN 1.078 -0.013 -0.151 -0.046 -0.046
## 53 AGEF =~ MENTH 0.008 -0.001 -0.013 -0.002 -0.002
## 54 AGEF =~ SOCF 2.041 0.029 0.346 0.037 0.037
## 55 AGEF =~ VITAL 3.166 -0.061 -0.724 -0.052 -0.052
## 57 GWB =~ ACTIV 0.405 0.089 0.708 0.036 0.036
## 58 GWB =~ SOMA 0.132 -0.044 -0.348 -0.022 -0.022
## 59 GWB =~ PAIN 1.376 -0.107 -0.855 -0.262 -0.262
## 60 GWB =~ MENTH 0.173 0.013 0.105 0.019 0.019
## 61 GWB =~ SOCF 4.805 0.118 0.945 0.101 0.101
## 62 GWB =~ VITAL 9.278 -0.248 -1.976 -0.143 -0.143
## 64 ACTIV ~~ PAIN 0.551 -1.532 -1.532 -0.024 -0.024
## 65 ACTIV ~~ MENTH 14.761 6.982 6.982 0.063 0.063
## 66 ACTIV ~~ SOCF 4.522 -5.811 -5.811 -0.031 -0.031
## 67 ACTIV ~~ VITAL 0.171 2.040 2.040 0.007 0.007
## 68 ACTIV ~~ AGE 0.000 0.125 0.125 0.001 0.001
## 69 ACTIV ~~ GENHLTH 0.728 -3.400 -3.400 -0.020 -0.020
## 70 SOMA ~~ PAIN 0.551 1.330 1.330 0.026 0.026
## 71 SOMA ~~ MENTH 1.137 -1.490 -1.490 -0.017 -0.017
## 72 SOMA ~~ SOCF 0.167 0.865 0.865 0.006 0.006
## 73 SOMA ~~ VITAL 2.279 -5.726 -5.726 -0.026 -0.026
## 74 SOMA ~~ AGE 0.297 2.944 2.944 0.016 0.016
## 75 SOMA ~~ GENHLTH 2.053 4.753 4.753 0.036 0.036
## 76 PAIN ~~ MENTH 0.366 0.251 0.251 0.014 0.014
## 77 PAIN ~~ SOCF 0.008 -0.056 -0.056 -0.002 -0.002
## 78 PAIN ~~ VITAL 0.134 0.413 0.413 0.009 0.009
## 79 PAIN ~~ AGE 1.010 -1.680 -1.680 -0.043 -0.043
## 80 PAIN ~~ GENHLTH 1.880 -1.758 -1.758 -0.064 -0.064
## 81 MENTH ~~ SOCF 8.687 -5.760 -5.760 -0.111 -0.111
## 82 MENTH ~~ VITAL 4.025 4.230 4.230 0.055 0.055
## 83 MENTH ~~ AGE 0.119 0.606 0.606 0.009 0.009
## 84 MENTH ~~ GENHLTH 1.845 -1.358 -1.358 -0.029 -0.029
## 85 SOCF ~~ VITAL 0.279 2.218 2.218 0.017 0.017
## 86 SOCF ~~ AGE 1.566 3.530 3.530 0.032 0.032
## 87 SOCF ~~ GENHLTH 9.929 5.308 5.308 0.067 0.067
## 88 VITAL ~~ AGE 4.178 -9.674 -9.674 -0.059 -0.059
## 89 VITAL ~~ GENHLTH 5.425 -6.083 -6.083 -0.052 -0.052
indicator가 하나의 요인에 특정되었는데 둘 이상의 요인에 걸리는게 더 현저할 경우 + specification searches based on modification indices are more likely to be successful when the model contains only minor misspecifications + Table 5.2와 Table 5.3을 비교할 때 expected change 값이랑 MI 값을 비교. + Minor 문제가 있을 지라도 Global indices는 적합하게 도출됨.
# Data preparation for Figure 5.1
sds <- '2.06 1.52 1.92 1.41 1.73 1.77 2.49 2.27 2.68 1.75 2.57 2.66'
cors <- '
1.000
0.300 1.000
0.229 0.261 1.000
0.411 0.406 0.429 1.000
0.172 0.252 0.218 0.481 1.000
0.214 0.268 0.267 0.579 0.484 1.000
0.200 0.214 0.241 0.543 0.426 0.492 1.000
0.185 0.230 0.185 0.545 0.463 0.548 0.522 1.000
0.134 0.146 0.108 0.186 0.122 0.131 0.108 0.151 1.000
0.134 0.099 0.061 0.223 0.133 0.188 0.105 0.170 0.448 1.000
0.160 0.131 0.158 0.161 0.044 0.124 0.066 0.061 0.370 0.350 1.000
0.087 0.088 0.101 0.198 0.077 0.177 0.128 0.112 0.356 0.359 0.507 1.000'
covs <- getCov(cors, sds = sds, names = paste("x", 1:12, sep = ""))
## Table 5.3: without cross-loading x4
tab5.3_model <- '
# measurement model
copingm =~ x1 + x2 + x3 + x4
socialm =~ x5 + x6 + x7 + x8
enhancem =~ x9 + x10 + x11 + x12
# error covariance
x11 ~~ x12
'
tab5.3_fit <- cfa(tab5.3_model, sample.cov=covs,sample.nobs=500,mimic="mplus")
summary(tab5.3_fit, fit.measures = TRUE,standardized=TRUE)
## lavaan (0.5-23.1097) converged normally after 62 iterations
##
## Number of observations 500
##
## Estimator ML
## Minimum Function Test Statistic 61.658
## Degrees of freedom 50
## P-value (Chi-square) 0.125
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1664.026
## Degrees of freedom 66
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 0.993
## Tucker-Lewis Index (TLI) 0.990
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -11934.521
## Loglikelihood unrestricted model (H1) -11903.692
##
## Number of free parameters 40
## Akaike (AIC) 23949.042
## Bayesian (BIC) 24117.627
## Sample-size adjusted Bayesian (BIC) 23990.664
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.022
## 90 Percent Confidence Interval 0.000 0.038
## P-value RMSEA <= 0.05 0.999
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.030
##
## Parameter Estimates:
##
## Information Observed
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## copingm =~
## x1 1.000 0.887 0.431
## x2 0.744 0.102 7.311 0.000 0.660 0.435
## x3 0.973 0.131 7.456 0.000 0.864 0.450
## x4 1.516 0.163 9.284 0.000 1.345 0.955
## socialm =~
## x5 1.000 1.095 0.633
## x6 1.208 0.092 13.129 0.000 1.322 0.748
## x7 1.567 0.127 12.306 0.000 1.715 0.690
## x8 1.511 0.118 12.853 0.000 1.653 0.729
## enhancem =~
## x9 1.000 1.784 0.666
## x10 0.655 0.070 9.300 0.000 1.169 0.669
## x11 0.773 0.092 8.386 0.000 1.379 0.537
## x12 0.807 0.097 8.310 0.000 1.439 0.541
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x11 ~~
## .x12 1.476 0.301 4.908 0.000 1.476 0.305
## copingm ~~
## socialm 0.775 0.110 7.063 0.000 0.798 0.798
## enhancem 0.530 0.113 4.709 0.000 0.335 0.335
## socialm ~~
## enhancem 0.570 0.128 4.439 0.000 0.292 0.292
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x1 0.000 0.092 0.000 1.000 0.000 0.000
## .x2 0.000 0.068 0.000 1.000 0.000 0.000
## .x3 0.000 0.086 0.000 1.000 0.000 0.000
## .x4 0.000 0.063 0.000 1.000 0.000 0.000
## .x5 0.000 0.077 0.000 1.000 0.000 0.000
## .x6 0.000 0.079 0.000 1.000 0.000 0.000
## .x7 0.000 0.111 0.000 1.000 0.000 0.000
## .x8 0.000 0.101 0.000 1.000 0.000 0.000
## .x9 0.000 0.120 0.000 1.000 0.000 0.000
## .x10 0.000 0.078 0.000 1.000 0.000 0.000
## .x11 0.000 0.115 0.000 1.000 0.000 0.000
## .x12 0.000 0.119 0.000 1.000 0.000 0.000
## copingm 0.000 0.000 0.000
## socialm 0.000 0.000 0.000
## enhancem 0.000 0.000 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x1 3.448 0.224 15.374 0.000 3.448 0.814
## .x2 1.870 0.123 15.164 0.000 1.870 0.811
## .x3 2.933 0.192 15.302 0.000 2.933 0.797
## .x4 0.175 0.089 1.961 0.050 0.175 0.088
## .x5 1.789 0.130 13.806 0.000 1.789 0.599
## .x6 1.378 0.115 11.948 0.000 1.378 0.441
## .x7 3.246 0.249 13.057 0.000 3.246 0.525
## .x8 2.409 0.195 12.331 0.000 2.409 0.468
## .x9 3.986 0.399 9.991 0.000 3.986 0.556
## .x10 1.689 0.171 9.854 0.000 1.689 0.553
## .x11 4.691 0.373 12.591 0.000 4.691 0.712
## .x12 4.992 0.399 12.525 0.000 4.992 0.707
## copingm 0.787 0.164 4.796 0.000 1.000 1.000
## socialm 1.198 0.164 7.307 0.000 1.000 1.000
## enhancem 3.182 0.487 6.528 0.000 1.000 1.000
modindices(tab5.3_fit) # expected change for Lambda_X : check the EPC column
## lhs op rhs mi epc sepc.lv sepc.all sepc.nox
## 47 copingm =~ x5 0.026 -0.028 -0.025 -0.014 -0.014
## 48 copingm =~ x6 0.443 0.121 0.107 0.061 0.061
## 49 copingm =~ x7 0.809 0.223 0.198 0.080 0.080
## 50 copingm =~ x8 1.947 -0.321 -0.285 -0.126 -0.126
## 51 copingm =~ x9 0.763 -0.132 -0.117 -0.044 -0.044
## 52 copingm =~ x10 0.185 0.042 0.038 0.022 0.022
## 53 copingm =~ x11 0.164 -0.051 -0.045 -0.018 -0.018
## 54 copingm =~ x12 0.769 0.115 0.102 0.038 0.038
## 55 socialm =~ x1 6.644 -0.508 -0.556 -0.270 -0.270
## 56 socialm =~ x2 0.030 -0.025 -0.028 -0.018 -0.018
## 57 socialm =~ x3 1.924 -0.257 -0.282 -0.147 -0.147
## 58 socialm =~ x4 18.074 1.290 1.412 1.003 1.003
## 59 socialm =~ x9 0.354 -0.074 -0.081 -0.030 -0.030
## 60 socialm =~ x10 0.533 0.060 0.065 0.037 0.037
## 61 socialm =~ x11 1.538 -0.131 -0.143 -0.056 -0.056
## 62 socialm =~ x12 1.167 0.118 0.129 0.049 0.049
## 63 enhancem =~ x1 1.896 0.085 0.152 0.074 0.074
## 64 enhancem =~ x2 0.896 0.043 0.077 0.051 0.051
## 65 enhancem =~ x3 0.022 0.009 0.015 0.008 0.008
## 66 enhancem =~ x4 2.166 -0.082 -0.146 -0.104 -0.104
## 67 enhancem =~ x5 0.190 -0.021 -0.037 -0.021 -0.021
## 68 enhancem =~ x6 1.372 0.054 0.097 0.055 0.055
## 69 enhancem =~ x7 1.064 -0.069 -0.122 -0.049 -0.049
## 70 enhancem =~ x8 0.013 0.007 0.012 0.005 0.005
## 71 x1 ~~ x2 10.393 0.381 0.381 0.122 0.122
## 72 x1 ~~ x3 1.024 0.150 0.150 0.038 0.038
## 73 x1 ~~ x4 0.028 -0.020 -0.020 -0.007 -0.007
## 74 x1 ~~ x5 0.450 -0.080 -0.080 -0.023 -0.023
## 75 x1 ~~ x6 0.470 -0.077 -0.077 -0.021 -0.021
## 76 x1 ~~ x7 0.284 -0.088 -0.088 -0.017 -0.017
## 77 x1 ~~ x8 1.536 -0.181 -0.181 -0.039 -0.039
## 78 x1 ~~ x9 0.502 0.136 0.136 0.025 0.025
## 79 x1 ~~ x10 0.045 0.027 0.027 0.007 0.007
## 80 x1 ~~ x11 4.992 0.399 0.399 0.076 0.076
## 81 x1 ~~ x12 1.939 -0.257 -0.257 -0.047 -0.047
## 82 x2 ~~ x3 3.589 0.207 0.207 0.071 0.071
## 83 x2 ~~ x4 7.055 -0.234 -0.234 -0.110 -0.110
## 84 x2 ~~ x5 1.455 0.106 0.106 0.040 0.040
## 85 x2 ~~ x6 0.111 0.027 0.027 0.010 0.010
## 86 x2 ~~ x7 1.122 -0.129 -0.129 -0.034 -0.034
## 87 x2 ~~ x8 0.362 -0.065 -0.065 -0.019 -0.019
## 88 x2 ~~ x9 2.188 0.209 0.209 0.051 0.051
## 89 x2 ~~ x10 0.704 -0.077 -0.077 -0.029 -0.029
## 90 x2 ~~ x11 2.066 0.189 0.189 0.049 0.049
## 91 x2 ~~ x12 0.876 -0.127 -0.127 -0.032 -0.032
## 92 x3 ~~ x4 0.069 -0.030 -0.030 -0.011 -0.011
## 93 x3 ~~ x5 0.028 0.018 0.018 0.006 0.006
## 94 x3 ~~ x6 0.234 0.050 0.050 0.015 0.015
## 95 x3 ~~ x7 0.013 0.017 0.017 0.004 0.004
## 96 x3 ~~ x8 5.219 -0.308 -0.308 -0.071 -0.071
## 97 x3 ~~ x9 0.269 0.092 0.092 0.018 0.018
## 98 x3 ~~ x10 3.393 -0.213 -0.213 -0.063 -0.063
## 99 x3 ~~ x11 5.931 0.402 0.402 0.082 0.082
## 100 x3 ~~ x12 0.320 -0.096 -0.096 -0.019 -0.019
## 101 x4 ~~ x5 0.100 -0.020 -0.020 -0.008 -0.008
## 102 x4 ~~ x6 0.081 0.018 0.018 0.007 0.007
## 103 x4 ~~ x7 2.223 0.134 0.134 0.038 0.038
## 104 x4 ~~ x8 0.139 0.030 0.030 0.010 0.010
## 105 x4 ~~ x9 2.595 -0.160 -0.160 -0.043 -0.043
## 106 x4 ~~ x10 0.488 0.045 0.045 0.018 0.018
## 107 x4 ~~ x11 1.371 -0.103 -0.103 -0.028 -0.028
## 108 x4 ~~ x12 1.145 0.097 0.097 0.026 0.026
## 109 x5 ~~ x6 0.367 0.057 0.057 0.019 0.019
## 110 x5 ~~ x7 0.280 -0.071 -0.071 -0.016 -0.016
## 111 x5 ~~ x8 0.005 0.008 0.008 0.002 0.002
## 112 x5 ~~ x9 0.452 0.098 0.098 0.021 0.021
## 113 x5 ~~ x10 0.097 0.030 0.030 0.010 0.010
## 114 x5 ~~ x11 0.723 -0.115 -0.115 -0.026 -0.026
## 115 x5 ~~ x12 0.739 -0.120 -0.120 -0.026 -0.026
## 116 x6 ~~ x7 2.456 -0.211 -0.211 -0.048 -0.048
## 117 x6 ~~ x8 0.044 0.026 0.026 0.007 0.007
## 118 x6 ~~ x9 1.287 -0.154 -0.154 -0.033 -0.033
## 119 x6 ~~ x10 0.828 0.081 0.081 0.026 0.026
## 120 x6 ~~ x11 0.124 0.044 0.044 0.010 0.010
## 121 x6 ~~ x12 1.530 0.160 0.160 0.034 0.034
## 122 x7 ~~ x8 1.436 0.207 0.207 0.037 0.037
## 123 x7 ~~ x9 0.010 -0.020 -0.020 -0.003 -0.003
## 124 x7 ~~ x10 1.715 -0.171 -0.171 -0.039 -0.039
## 125 x7 ~~ x11 0.406 -0.118 -0.118 -0.019 -0.019
## 126 x7 ~~ x12 0.685 0.159 0.159 0.024 0.024
## 127 x8 ~~ x9 0.930 0.171 0.171 0.028 0.028
## 128 x8 ~~ x10 0.684 0.096 0.096 0.024 0.024
## 129 x8 ~~ x11 1.585 -0.206 -0.206 -0.035 -0.035
## 130 x8 ~~ x12 0.233 -0.082 -0.082 -0.014 -0.014
## 131 x9 ~~ x10 0.127 0.153 0.153 0.033 0.033
## 132 x9 ~~ x11 0.669 0.241 0.241 0.035 0.035
## 133 x9 ~~ x12 0.261 -0.157 -0.157 -0.022 -0.022
## 134 x10 ~~ x11 0.243 -0.095 -0.095 -0.021 -0.021
## 135 x10 ~~ x12 0.000 -0.004 -0.004 -0.001 -0.001
standardizedSolution(tab5.3_fit)
## lhs op rhs est.std se z pvalue
## 1 copingm =~ x1 0.431 0.039 11.015 0.000
## 2 copingm =~ x2 0.435 0.040 10.811 0.000
## 3 copingm =~ x3 0.450 0.039 11.689 0.000
## 4 copingm =~ x4 0.955 0.024 40.206 0.000
## 5 socialm =~ x5 0.633 0.032 20.065 0.000
## 6 socialm =~ x6 0.748 0.025 29.339 0.000
## 7 socialm =~ x7 0.690 0.029 24.077 0.000
## 8 socialm =~ x8 0.729 0.027 27.465 0.000
## 9 enhancem =~ x9 0.666 0.041 16.320 0.000
## 10 enhancem =~ x10 0.669 0.041 16.287 0.000
## 11 enhancem =~ x11 0.537 0.045 11.928 0.000
## 12 enhancem =~ x12 0.541 0.045 12.071 0.000
## 13 x11 ~~ x12 0.305 0.050 6.155 0.000
## 14 x1 ~~ x1 0.814 0.034 24.131 0.000
## 15 x2 ~~ x2 0.811 0.035 23.176 0.000
## 16 x3 ~~ x3 0.797 0.035 22.989 0.000
## 17 x4 ~~ x4 0.088 0.045 1.948 0.051
## 18 x5 ~~ x5 0.599 0.040 14.982 0.000
## 19 x6 ~~ x6 0.441 0.038 11.564 0.000
## 20 x7 ~~ x7 0.525 0.039 13.281 0.000
## 21 x8 ~~ x8 0.468 0.039 12.101 0.000
## 22 x9 ~~ x9 0.556 0.054 10.222 0.000
## 23 x10 ~~ x10 0.553 0.055 10.062 0.000
## 24 x11 ~~ x11 0.712 0.048 14.721 0.000
## 25 x12 ~~ x12 0.707 0.049 14.556 0.000
## 26 copingm ~~ copingm 1.000 0.000 NA NA
## 27 socialm ~~ socialm 1.000 0.000 NA NA
## 28 enhancem ~~ enhancem 1.000 0.000 NA NA
## 29 copingm ~~ socialm 0.798 0.029 27.895 0.000
## 30 copingm ~~ enhancem 0.335 0.053 6.371 0.000
## 31 socialm ~~ enhancem 0.292 0.057 5.136 0.000
## 32 x1 ~1 0.000 0.045 0.000 1.000
## 33 x2 ~1 0.000 0.045 0.000 1.000
## 34 x3 ~1 0.000 0.045 0.000 1.000
## 35 x4 ~1 0.000 0.045 0.000 1.000
## 36 x5 ~1 0.000 0.045 0.000 1.000
## 37 x6 ~1 0.000 0.045 0.000 1.000
## 38 x7 ~1 0.000 0.045 0.000 1.000
## 39 x8 ~1 0.000 0.045 0.000 1.000
## 40 x9 ~1 0.000 0.045 0.000 1.000
## 41 x10 ~1 0.000 0.045 0.000 1.000
## 42 x11 ~1 0.000 0.045 0.000 1.000
## 43 x12 ~1 0.000 0.045 0.000 1.000
## 44 copingm ~1 0.000 0.000 NA NA
## 45 socialm ~1 0.000 0.000 NA NA
## 46 enhancem ~1 0.000 0.000 NA NA
table5.4_model <- '
# measurement model
copingm =~ x1 + x2 + x3 + x4
socialm =~ x4 +x5 + x6 + x7 + x8 + x12
enhancem =~ x9 + x10 + x11
# error covariance
x11 ~~ x12
'
table5.4_fit <- cfa(table5.4_model, sample.cov = covs, sample.nobs = 500, mimic = "mplus")
summary(table5.4_fit, fit.measures = TRUE,standardized=TRUE)
## lavaan (0.5-23.1097) converged normally after 68 iterations
##
## Number of observations 500
##
## Estimator ML
## Minimum Function Test Statistic 126.590
## Degrees of freedom 49
## P-value (Chi-square) 0.000
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1664.026
## Degrees of freedom 66
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 0.951
## Tucker-Lewis Index (TLI) 0.935
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -11966.987
## Loglikelihood unrestricted model (H1) -11903.692
##
## Number of free parameters 41
## Akaike (AIC) 24015.974
## Bayesian (BIC) 24188.773
## Sample-size adjusted Bayesian (BIC) 24058.637
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.056
## 90 Percent Confidence Interval 0.044 0.068
## P-value RMSEA <= 0.05 0.185
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.057
##
## Parameter Estimates:
##
## Information Observed
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## copingm =~
## x1 1.000 1.062 0.516
## x2 0.740 0.093 7.988 0.000 0.786 0.518
## x3 0.931 0.120 7.756 0.000 0.989 0.516
## x4 0.703 0.117 5.995 0.000 0.747 0.530
## socialm =~
## x4 1.000 0.629 0.447
## x5 1.729 0.260 6.658 0.000 1.088 0.630
## x6 2.099 0.303 6.922 0.000 1.321 0.747
## x7 2.722 0.398 6.842 0.000 1.713 0.689
## x8 2.628 0.382 6.886 0.000 1.654 0.729
## x12 0.864 0.221 3.914 0.000 0.543 0.205
## enhancem =~
## x9 1.000 1.789 0.668
## x10 0.645 0.088 7.350 0.000 1.154 0.660
## x11 0.525 0.086 6.093 0.000 0.939 0.382
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x12 ~~
## .x11 2.581 0.313 8.241 0.000 2.581 0.437
## copingm ~~
## socialm 0.407 0.072 5.668 0.000 0.608 0.608
## enhancem 0.670 0.154 4.356 0.000 0.352 0.352
## socialm ~~
## enhancem 0.363 0.090 4.016 0.000 0.322 0.322
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x1 0.000 0.092 0.000 1.000 0.000 0.000
## .x2 0.000 0.068 0.000 1.000 0.000 0.000
## .x3 0.000 0.086 0.000 1.000 0.000 0.000
## .x4 0.000 0.063 0.000 1.000 0.000 0.000
## .x5 0.000 0.077 0.000 1.000 0.000 0.000
## .x6 0.000 0.079 0.000 1.000 0.000 0.000
## .x7 0.000 0.111 0.000 1.000 0.000 0.000
## .x8 0.000 0.101 0.000 1.000 0.000 0.000
## .x12 0.000 0.119 0.000 1.000 0.000 0.000
## .x9 0.000 0.120 0.000 1.000 0.000 0.000
## .x10 0.000 0.078 0.000 1.000 0.000 0.000
## .x11 0.000 0.110 0.000 1.000 0.000 0.000
## copingm 0.000 0.000 0.000
## socialm 0.000 0.000 0.000
## enhancem 0.000 0.000 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x1 3.106 0.232 13.397 0.000 3.106 0.733
## .x2 1.688 0.127 13.272 0.000 1.688 0.732
## .x3 2.700 0.198 13.670 0.000 2.700 0.734
## .x4 0.459 0.070 6.575 0.000 0.459 0.231
## .x5 1.803 0.130 13.875 0.000 1.803 0.603
## .x6 1.382 0.115 12.025 0.000 1.382 0.442
## .x7 3.253 0.248 13.117 0.000 3.253 0.526
## .x8 2.407 0.194 12.381 0.000 2.407 0.468
## .x12 6.748 0.431 15.665 0.000 6.748 0.958
## .x9 3.966 0.484 8.193 0.000 3.966 0.553
## .x10 1.726 0.205 8.434 0.000 1.726 0.565
## .x11 5.177 0.387 13.365 0.000 5.177 0.854
## copingm 1.129 0.220 5.135 0.000 1.000 1.000
## socialm 0.396 0.110 3.591 0.000 1.000 1.000
## enhancem 3.202 0.560 5.714 0.000 1.000 1.000
modindices(table5.4_fit) # expected change for Lambda_X : check the EPC column
## lhs op rhs mi epc sepc.lv sepc.all sepc.nox
## 48 copingm =~ x5 0.135 0.043 0.046 0.027 0.027
## 49 copingm =~ x6 0.778 0.106 0.112 0.063 0.063
## 50 copingm =~ x7 0.177 0.071 0.075 0.030 0.030
## 51 copingm =~ x8 3.620 -0.291 -0.309 -0.136 -0.136
## 52 copingm =~ x12 1.117 0.194 0.206 0.078 0.078
## 53 copingm =~ x9 0.981 -0.175 -0.186 -0.070 -0.070
## 54 copingm =~ x10 0.337 -0.066 -0.070 -0.040 -0.040
## 55 copingm =~ x11 4.599 0.272 0.289 0.117 0.117
## 56 socialm =~ x1 1.987 -0.394 -0.248 -0.121 -0.121
## 57 socialm =~ x2 0.676 0.170 0.107 0.070 0.070
## 58 socialm =~ x3 0.344 0.153 0.096 0.050 0.050
## 59 socialm =~ x9 1.216 -0.294 -0.185 -0.069 -0.069
## 60 socialm =~ x10 0.675 0.141 0.089 0.051 0.051
## 61 socialm =~ x11 0.170 0.084 0.053 0.021 0.021
## 62 enhancem =~ x1 1.031 0.069 0.124 0.060 0.060
## 63 enhancem =~ x2 0.290 0.027 0.049 0.032 0.032
## 64 enhancem =~ x3 0.277 -0.034 -0.060 -0.031 -0.031
## 65 enhancem =~ x4 0.640 -0.030 -0.054 -0.038 -0.038
## 66 enhancem =~ x5 0.094 -0.015 -0.027 -0.016 -0.016
## 67 enhancem =~ x6 0.090 0.014 0.026 0.014 0.014
## 68 enhancem =~ x7 2.603 -0.111 -0.199 -0.080 -0.080
## 69 enhancem =~ x8 0.003 -0.003 -0.006 -0.003 -0.003
## 70 enhancem =~ x12 77.160 0.786 1.406 0.530 0.530
## 71 x1 ~~ x2 1.738 0.178 0.178 0.057 0.057
## 72 x1 ~~ x3 2.238 -0.255 -0.255 -0.065 -0.065
## 73 x1 ~~ x4 0.807 0.101 0.101 0.035 0.035
## 74 x1 ~~ x5 0.265 -0.061 -0.061 -0.017 -0.017
## 75 x1 ~~ x6 0.146 -0.042 -0.042 -0.012 -0.012
## 76 x1 ~~ x7 0.013 -0.019 -0.019 -0.004 -0.004
## 77 x1 ~~ x8 0.371 -0.088 -0.088 -0.019 -0.019
## 78 x1 ~~ x12 0.591 -0.150 -0.150 -0.028 -0.028
## 79 x1 ~~ x9 0.027 0.032 0.032 0.006 0.006
## 80 x1 ~~ x10 0.010 0.013 0.013 0.004 0.004
## 81 x1 ~~ x11 2.772 0.294 0.294 0.058 0.058
## 82 x2 ~~ x3 0.060 -0.031 -0.031 -0.011 -0.011
## 83 x2 ~~ x4 3.167 -0.148 -0.148 -0.069 -0.069
## 84 x2 ~~ x5 2.449 0.136 0.136 0.052 0.052
## 85 x2 ~~ x6 0.603 0.063 0.063 0.024 0.024
## 86 x2 ~~ x7 0.427 -0.078 -0.078 -0.021 -0.021
## 87 x2 ~~ x8 0.017 0.014 0.014 0.004 0.004
## 88 x2 ~~ x12 0.377 -0.089 -0.089 -0.022 -0.022
## 89 x2 ~~ x9 1.052 0.149 0.149 0.037 0.037
## 90 x2 ~~ x10 1.189 -0.103 -0.103 -0.039 -0.039
## 91 x2 ~~ x11 0.760 0.113 0.113 0.030 0.030
## 92 x3 ~~ x4 1.563 0.131 0.131 0.049 0.049
## 93 x3 ~~ x5 0.168 0.045 0.045 0.014 0.014
## 94 x3 ~~ x6 0.831 0.094 0.094 0.028 0.028
## 95 x3 ~~ x7 0.373 0.093 0.093 0.019 0.019
## 96 x3 ~~ x8 2.960 -0.231 -0.231 -0.053 -0.053
## 97 x3 ~~ x12 0.529 -0.133 -0.133 -0.026 -0.026
## 98 x3 ~~ x9 0.021 0.026 0.026 0.005 0.005
## 99 x3 ~~ x10 3.844 -0.235 -0.235 -0.070 -0.070
## 100 x3 ~~ x11 3.974 0.328 0.328 0.069 0.069
## 101 x4 ~~ x5 0.387 -0.040 -0.040 -0.016 -0.016
## 102 x4 ~~ x6 0.034 -0.012 -0.012 -0.005 -0.005
## 103 x4 ~~ x7 1.149 0.098 0.098 0.028 0.028
## 104 x4 ~~ x8 0.117 -0.029 -0.029 -0.009 -0.009
## 105 x4 ~~ x12 0.362 0.059 0.059 0.016 0.016
## 106 x4 ~~ x9 1.780 -0.134 -0.134 -0.035 -0.035
## 107 x4 ~~ x10 0.696 0.054 0.054 0.022 0.022
## 108 x4 ~~ x11 0.200 -0.039 -0.039 -0.011 -0.011
## 109 x5 ~~ x6 0.601 0.072 0.072 0.024 0.024
## 110 x5 ~~ x7 0.139 -0.050 -0.050 -0.012 -0.012
## 111 x5 ~~ x8 0.041 0.024 0.024 0.006 0.006
## 112 x5 ~~ x12 1.854 -0.207 -0.207 -0.045 -0.045
## 113 x5 ~~ x9 0.028 0.025 0.025 0.005 0.005
## 114 x5 ~~ x10 0.069 -0.026 -0.026 -0.009 -0.009
## 115 x5 ~~ x11 0.296 -0.073 -0.073 -0.017 -0.017
## 116 x6 ~~ x7 2.154 -0.196 -0.196 -0.045 -0.045
## 117 x6 ~~ x8 0.054 0.029 0.029 0.007 0.007
## 118 x6 ~~ x12 0.122 0.050 0.050 0.011 0.011
## 119 x6 ~~ x9 1.314 -0.160 -0.160 -0.034 -0.034
## 120 x6 ~~ x10 0.718 0.077 0.077 0.025 0.025
## 121 x6 ~~ x11 0.660 0.102 0.102 0.023 0.023
## 122 x7 ~~ x8 1.484 0.208 0.208 0.037 0.037
## 123 x7 ~~ x12 0.202 -0.094 -0.094 -0.014 -0.014
## 124 x7 ~~ x9 0.046 -0.044 -0.044 -0.007 -0.007
## 125 x7 ~~ x10 2.306 -0.204 -0.204 -0.047 -0.047
## 126 x7 ~~ x11 0.044 -0.039 -0.039 -0.006 -0.006
## 127 x8 ~~ x12 1.153 -0.199 -0.199 -0.033 -0.033
## 128 x8 ~~ x9 0.258 0.092 0.092 0.015 0.015
## 129 x8 ~~ x10 0.070 0.031 0.031 0.008 0.008
## 130 x8 ~~ x11 0.686 -0.135 -0.135 -0.024 -0.024
## 131 x12 ~~ x9 21.004 1.221 1.221 0.172 0.172
## 132 x12 ~~ x10 22.767 0.830 0.830 0.179 0.179
## 133 x9 ~~ x10 2.377 0.996 0.996 0.213 0.213
## 134 x9 ~~ x11 0.001 0.013 0.013 0.002 0.002
## 135 x10 ~~ x11 1.339 -0.286 -0.286 -0.066 -0.066
resid(table5.4_fit, type = "standardized")$cov
## x1 x2 x3 x4 x5 x6 x7 x8 x12 x9
## x1 0.000
## x2 1.363 NA
## x3 -1.461 -0.241 NA
## x4 -0.358 -0.999 6.638 NA
## x5 -0.755 1.561 0.601 -0.210 0.000
## x6 -0.678 1.085 1.079 0.375 0.761 0.000
## x7 -0.500 -0.092 0.772 0.924 -0.376 -1.531 0.000
## x8 -1.416 0.009 -1.456 -1.357 0.203 0.233 1.185 NA
## x12 0.546 0.567 0.883 1.492 -1.563 0.855 -0.415 -1.290 0.296
## x9 0.350 0.679 -0.378 -1.557 -0.395 -1.004 -1.248 -0.197 6.884 0.000
## x10 0.390 -0.602 -1.647 0.217 -0.026 1.008 -1.330 0.503 6.965 NA
## x11 2.242 1.550 2.191 1.100 -0.743 0.913 -0.380 -0.635 4.862 5.292
## x10 x11
## x1
## x2
## x3
## x4
## x5
## x6
## x7
## x8
## x12
## x9
## x10 0.000
## x11 4.301 2.515
2. 12번 문항의 Factor loading이 작음.
\[ \begin{aligned} observed \ correlation \ between \ 11 \& 12 &= .507 \\ IN \ CORRECT \ SOLUTION\\ reproduced \ sample \ correlation \ +\ correlated \ error&= \lambda_{11,3}\lambda_{12,3} + \delta_{12,11}\\&=.542(.541)+.214\\&=.293+.214 \\&=.507\\ \end{aligned} \]
하지만 X12를 다른 요인에 지정한 (오지정한) 이 모형은 다른 요인에 걸렸기에 .293 부분을 추정할 수 없다.
때문에 \(\delta_{12,11}\) 값만 가지고 관찰된 값을 추정해야하는 문제가 발생함.
overall goodness of ift, focal areas of ill fit, interpretability/strength of parameter estimates를 통해 모형을 비교해야 함.nested가 아닌 상황에서도 모형을 비교할 수있는 방법. AIC, BIC
-2*log-likelihood 또는 -2LL 값에 penalty를 추가한 값. 복잡도-간명성을 나타내는 지수인데 숫자가 작을 수록 좋음.
복잡한 모형에 부과하는 페널티는 BIC가 더 큼.
Heywood cases or offending estimates. 가장 흔한 형태는 오차 분산이 음의 값을 갖는 경우.input variance-covarinace matrix, model-implied variance-covariance matrix 두개가 positive definite 인 것.input, model-implied, principal submatrix 셋 모두 determinant가 반드시 0보다 커야함.# Table 5.6
library(psych)
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
## The following object is masked from 'package:lavaan':
##
## cor2cov
table5.6_data <- read.delim("http://quantpsy.cau.ac.kr/wp-content/data/tabrown_cfa/efa.dat",sep="",header=F)
table5.6_data <- table5.6_data[,-13]
names(table5.6_data) <- paste("x", 1:12, sep = "")
table5.6_result <- psych::fa(table5.6_data,nfactors=3,rotate="promax")
## Loading required namespace: GPArotation
table5.6_result$e.values # eigen values
## [1] 3.8756999 1.9056307 1.1504269 0.8369832 0.7215895 0.6690601 0.5762896
## [8] 0.5571568 0.4872959 0.4708105 0.4261969 0.3228602
summary(table5.6_result)
##
## Factor analysis with Call: psych::fa(r = table5.6_data, nfactors = 3, rotate = "promax")
##
## Test of the hypothesis that 3 factors are sufficient.
## The degrees of freedom for the model is 33 and the objective function was 0.11
## The number of observations was 500 with Chi Square = 56.11 with prob < 0.0073
##
## The root mean square of the residuals (RMSA) is 0.02
## The df corrected root mean square of the residuals is 0.03
##
## Tucker Lewis Index of factoring reliability = 0.971
## RMSEA index = 0.038 and the 10 % confidence intervals are 0.019 0.054
## BIC = -148.97
## With factor correlations of
## MR1 MR2 MR3
## MR1 1.00 0.27 0.59
## MR2 0.27 1.00 0.35
## MR3 0.59 0.35 1.00
# Table 5.7 starting value fixation is needed
table5.7_data <- read.delim("http://quantpsy.cau.ac.kr/wp-content/data/tabrown_cfa/efa.dat",sep="",header=F)
names(table5.7_data) <- paste("x", 1:13, sep = "") # x13 is not used in the model
table5.7_model <- '
# Factor
coping =~ NA*x1 + start(0.5)*x2 + start(0.5)*x3 + start(0.5)*x4 + start(0.5)*x5 + start(0.5)*x6 + start(0.5)*x7 + 0*x8 + start(0.5)*x9 + start(0.5)*x10 + start(0.5)*x11 + 0*x12
social =~ NA*x8 + 0*x1 + start(0.5)*x2 + start(0.5)*x3 + start(0.5)*x4 + start(0.5)*x5 + start(0.5)*x6 + start(0.5)*x7 + start(0.5)*x9 + start(0.5)*x10 + start(0.5)*x11 + 0*x12
enhance =~ NA*x12 + 0*x1 + start(0.5)*x2 + start(0.5)*x3 + start(0.5)*x4 + start(0.5)*x5 + start(0.5)*x6 + start(0.5)*x7 + 0*x8 + start(0.5)*x9 + start(0.5)*x10 + start(0.5)*x11
# Fixing Factor Variance to 1
coping ~~ 1*coping
social ~~ 1*social
enhance ~~ 1*enhance
'
table5.7_fit <- cfa(data = table5.7_data,model = table5.7_model, control=list(iter.max=10^5,
rel.tol=1e-4))
summary(table5.7_fit)
## lavaan (0.5-23.1097) converged normally after 19 iterations
##
## Number of observations 500
##
## Estimator ML
## Minimum Function Test Statistic 55.548
## Degrees of freedom 33
## P-value (Chi-square) 0.008
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|)
## coping =~
## x1 0.613 0.060 10.277 0.000
## x2 0.514 0.075 6.887 0.000
## x3 0.487 0.074 6.596 0.000
## x4 0.535 0.068 7.856 0.000
## x5 0.096 0.068 1.420 0.156
## x6 0.113 0.067 1.675 0.094
## x7 0.109 0.071 1.539 0.124
## x8 0.000
## x9 0.032 0.075 0.434 0.664
## x10 -0.009 0.076 -0.114 0.910
## x11 0.150 0.076 1.966 0.049
## x12 0.000
## social =~
## x8 0.791 0.046 17.226 0.000
## x1 0.000
## x2 0.102 0.076 1.349 0.177
## x3 0.106 0.074 1.429 0.153
## x4 0.548 0.066 8.255 0.000
## x5 0.574 0.055 10.461 0.000
## x6 0.652 0.053 12.285 0.000
## x7 0.650 0.056 11.518 0.000
## x9 0.021 0.064 0.321 0.748
## x10 0.084 0.065 1.297 0.195
## x11 -0.157 0.068 -2.298 0.022
## x12 0.000
## enhance =~
## x12 0.671 0.048 13.931 0.000
## x1 0.000
## x2 -0.029 0.067 -0.427 0.670
## x3 -0.019 0.066 -0.279 0.780
## x4 -0.023 0.062 -0.361 0.718
## x5 -0.042 0.053 -0.787 0.431
## x6 0.034 0.051 0.672 0.502
## x7 -0.039 0.054 -0.720 0.472
## x8 0.000
## x9 0.581 0.056 10.440 0.000
## x10 0.580 0.056 10.311 0.000
## x11 0.692 0.057 12.096 0.000
##
## Covariances:
## Estimate Std.Err z-value P(>|z|)
## coping ~~
## social 0.399 0.100 3.979 0.000
## enhance 0.313 0.105 2.995 0.003
## social ~~
## enhance 0.254 0.081 3.143 0.002
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## coping 1.000
## social 1.000
## enhance 1.000
## .x1 0.791 0.070 11.285 0.000
## .x2 0.762 0.059 12.868 0.000
## .x3 0.787 0.059 13.302 0.000
## .x4 0.276 0.035 7.780 0.000
## .x5 0.562 0.041 13.725 0.000
## .x6 0.427 0.035 12.143 0.000
## .x7 0.536 0.041 12.980 0.000
## .x8 0.477 0.047 10.107 0.000
## .x9 0.685 0.054 12.801 0.000
## .x10 0.694 0.055 12.723 0.000
## .x11 0.534 0.054 9.853 0.000
## .x12 0.538 0.051 10.576 0.000
Mplus 활용법
| Number | Method | Trait | Represent | Meaning |
|---|---|---|---|---|
| (1) | 같은(대각) | 같은(대각) | Cronbach alpha | (얘만 Reliability) |
| (2) | 같은(대각) | 다른(비대각) | Method effect | 측정 방법에 기인하는 상관. 3번과 비교 |
| (3) | 다른(비대각) | 같은(대각) | Convergent validity | 다른 방법을 활용해도 같은 특성을 측정한면 상관이 높아야함 |
| (4) | 다른(비대각) | 다른(비대각) | Discriminant validity | 다른 방법과 다른 특성은 높은 상관이 있어선 안돼 |
sds_7.1 <- '2.610 2.660 2.590 1.940 2.030 2.050'
cors_7.1 <-'
1.000
0.661 1.000
0.630 0.643 1.000
0.270 0.300 0.268 1.000
0.297 0.265 0.225 0.805 1.000
0.290 0.287 0.248 0.796 0.779 1.000'
covs_7.1 <- getCov(cors_7.1, sds = sds_7.1, names = paste("x", 1:6, sep = ""))
model_7.1 <- '
auditoryM =~ x1 + x2 + x3
visualM =~ x4 + x5 + x6
'
model_7.1_fit <- cfa(model_7.1, sample.cov = covs_7.1, sample.nobs = 200, std.lv = TRUE)
summary(model_7.1_fit, fit.measures = TRUE, standardized = TRUE, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after 22 iterations
##
## Number of observations 200
##
## Estimator ML
## Minimum Function Test Statistic 4.877
## Degrees of freedom 8
## P-value (Chi-square) 0.771
##
## Model test baseline model:
##
## Minimum Function Test Statistic 719.515
## Degrees of freedom 15
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.008
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -2337.980
## Loglikelihood unrestricted model (H1) -2335.541
##
## Number of free parameters 13
## Akaike (AIC) 4701.959
## Bayesian (BIC) 4744.837
## Sample-size adjusted Bayesian (BIC) 4703.652
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent Confidence Interval 0.000 0.057
## P-value RMSEA <= 0.05 0.929
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.012
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## auditoryM =~
## x1 2.101 0.166 12.663 0.000 2.101 0.807
## x2 2.182 0.168 12.976 0.000 2.182 0.823
## x3 2.013 0.166 12.124 0.000 2.013 0.779
## visualM =~
## x4 1.756 0.108 16.183 0.000 1.756 0.907
## x5 1.795 0.115 15.608 0.000 1.795 0.887
## x6 1.796 0.117 15.378 0.000 1.796 0.878
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## auditoryM ~~
## visualM 0.382 0.070 5.463 0.000 0.382 0.382
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .x1 2.366 0.372 6.365 0.000 2.366 0.349
## .x2 2.277 0.383 5.940 0.000 2.277 0.323
## .x3 2.621 0.373 7.027 0.000 2.621 0.393
## .x4 0.662 0.117 5.668 0.000 0.662 0.177
## .x5 0.877 0.134 6.554 0.000 0.877 0.214
## .x6 0.956 0.139 6.866 0.000 0.956 0.229
## auditoryM 1.000 1.000 1.000
## visualM 1.000 1.000 1.000
##
## R-Square:
## Estimate
## x1 0.651
## x2 0.677
## x3 0.607
## x4 0.823
## x5 0.786
## x6 0.771
# tau equivalent: auditory memory only
model.tau.a <- '
auditorymemory =~ x1 + v1*x1 + v1*x2 + v1*x3
visualmemory =~ x4 + x5 + x6
'
# tau equivalent: both auditory & visual memory
model.tau.av <- '
auditorymemory =~ x1 + v1*x1 + v1*x2 + v1*x3
visualmemory =~ x4 + v2*x4 + v2*x5 + v2*x6
'
# parallel: auditory memory only
model.parallel.a <- '
auditorymemory =~ x1 + v1*x1 + v1*x2 + v1*x3
visualmemory =~ x4 + v2*x4 + v2*x5 + v2*x6
x1 ~~ v3 * x1
x2 ~~ v3 * x2
x3 ~~ v3 * x3
'
# parallel: both auditory & visual memory
model.parallel.av <- '
auditorymemory =~ x1 + v1*x1 + v1*x2 + v1*x3
visualmemory =~ x4 + v2*x4 + v2*x5 + v2*x6
x1 ~~ v3 * x1
x2 ~~ v3 * x2
x3 ~~ v3 * x3
x4 ~~ v4 * x4
x5 ~~ v4 * x5
x6 ~~ v4 * x6
'
AUDITORY BY X1* X2 X3 (1)lmi_sds <- '1.940 2.030 2.050 1.990 2.610 2.660 2.590 2.550'
lmi_cors <- '
1.000
0.736 1.000
0.731 0.648 1.000
0.771 0.694 0.700 1.000
0.685 0.512 0.496 0.508 1.000
0.481 0.638 0.431 0.449 0.726 1.000
0.485 0.442 0.635 0.456 0.743 0.672 1.000
0.508 0.469 0.453 0.627 0.759 0.689 0.695 1.000'
lmi_covs <- getCov(lmi_cors, sds = lmi_sds, names = c("A1", "B1", "C1", "D1", "A2", "B2", "C2", "D2"))
lmi_ms <- c(1.500, 1.320, 1.450, 1.410, 6.600, 6.420, 6.560, 6.310) # this should be a numeric vector, not a character string
model.equalform <- '
satis1 =~ A1 + B1 + C1 + D1
satis2 =~ A2 + B2 + C2 + D2
A1 ~~ A2
B1 ~~ B2
C1 ~~ C2
D1 ~~ D2
# fix indicator intercepts to 0
A1 ~ 0*1
A2 ~ 0*1
# free factor intercepts
satis1 ~ 1
satis2 ~ 1
'
fit.equalforms <- cfa(model.equalform, sample.cov = lmi_covs, sample.nobs = 250, sample.mean = lmi_ms, meanstructure = TRUE)
summary(fit.equalforms, standardized = TRUE, fit.measures = TRUE)
## lavaan (0.5-23.1097) converged normally after 74 iterations
##
## Number of observations 250
##
## Estimator ML
## Minimum Function Test Statistic 2.093
## Degrees of freedom 15
## P-value (Chi-square) 1.000
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1823.749
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.013
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -3573.572
## Loglikelihood unrestricted model (H1) -3572.526
##
## Number of free parameters 29
## Akaike (AIC) 7205.145
## Bayesian (BIC) 7307.267
## Sample-size adjusted Bayesian (BIC) 7215.335
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent Confidence Interval 0.000 0.000
## P-value RMSEA <= 0.05 1.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.010
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## satis1 =~
## A1 1.000 1.728 0.899
## B1 0.951 0.050 19.212 0.000 1.643 0.811
## C1 0.970 0.049 19.616 0.000 1.676 0.814
## D1 0.990 0.046 21.395 0.000 1.710 0.859
## satis2 =~
## A2 1.000 2.327 0.896
## B2 0.916 0.048 18.973 0.000 2.131 0.808
## C2 0.922 0.046 20.085 0.000 2.144 0.828
## D2 0.934 0.045 20.770 0.000 2.173 0.849
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 ~~
## .A2 0.701 0.119 5.915 0.000 0.701 0.723
## .B1 ~~
## .B2 1.043 0.163 6.407 0.000 1.043 0.565
## .C1 ~~
## .C2 1.042 0.158 6.576 0.000 1.042 0.598
## .D1 ~~
## .D2 0.770 0.134 5.735 0.000 0.770 0.558
## satis1 ~~
## satis2 2.680 0.353 7.586 0.000 0.667 0.667
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.000 0.000 0.000
## .A2 0.000 0.000 0.000
## satis1 1.500 0.122 12.343 0.000 0.868 0.868
## satis2 6.600 0.164 40.183 0.000 2.836 2.836
## .B1 -0.107 0.117 -0.910 0.363 -0.107 -0.053
## .C1 -0.005 0.118 -0.042 0.967 -0.005 -0.002
## .D1 -0.075 0.108 -0.692 0.489 -0.075 -0.038
## .B2 0.375 0.340 1.102 0.270 0.375 0.142
## .C2 0.477 0.324 1.475 0.140 0.477 0.184
## .D2 0.146 0.316 0.462 0.644 0.146 0.057
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.707 0.105 6.722 0.000 0.707 0.192
## .B1 1.409 0.153 9.178 0.000 1.409 0.343
## .C1 1.434 0.157 9.137 0.000 1.434 0.338
## .D1 1.038 0.127 8.165 0.000 1.038 0.262
## .A2 1.330 0.195 6.815 0.000 1.330 0.197
## .B2 2.418 0.263 9.183 0.000 2.418 0.347
## .C2 2.113 0.239 8.856 0.000 2.113 0.315
## .D2 1.836 0.219 8.390 0.000 1.836 0.280
## satis1 2.985 0.321 9.297 0.000 1.000 1.000
## satis2 5.414 0.586 9.244 0.000 1.000 1.000
model.equalfl <- '
# equality of factor loadings
satis1 =~ v1*A1 + v2*B1 + v3*C1 + v4*D1
satis2 =~ v1*A2 + v2*B2 + v3*C2 + v4*D2
A1 ~~ A2
B1 ~~ B2
C1 ~~ C2
D1 ~~ D2
# fix indicator intercepts to 0
A1 ~ 0*1
A2 ~ 0*1
# free factor intercepts
satis1 ~ 1
satis2 ~ 1
'
fit.equalfl <- cfa(model.equalfl, sample.cov = lmi_covs, sample.nobs = 250, sample.mean = lmi_ms, meanstructure = TRUE)
summary(fit.equalfl, standardized = TRUE, fit.measures = TRUE)
## lavaan (0.5-23.1097) converged normally after 72 iterations
##
## Number of observations 250
##
## Estimator ML
## Minimum Function Test Statistic 3.882
## Degrees of freedom 18
## P-value (Chi-square) 1.000
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1823.749
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.012
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -3574.467
## Loglikelihood unrestricted model (H1) -3572.526
##
## Number of free parameters 26
## Akaike (AIC) 7200.934
## Bayesian (BIC) 7292.492
## Sample-size adjusted Bayesian (BIC) 7210.070
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent Confidence Interval 0.000 0.000
## P-value RMSEA <= 0.05 1.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.014
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## satis1 =~
## A1 (v1) 1.000 1.748 0.904
## B1 (v2) 0.935 0.041 22.576 0.000 1.635 0.809
## C1 (v3) 0.946 0.041 23.281 0.000 1.653 0.808
## D1 (v4) 0.963 0.039 24.802 0.000 1.683 0.854
## satis2 =~
## A2 (v1) 1.000 2.290 0.890
## B2 (v2) 0.935 0.041 22.576 0.000 2.142 0.810
## C2 (v3) 0.946 0.041 23.281 0.000 2.166 0.831
## D2 (v4) 0.963 0.039 24.802 0.000 2.206 0.854
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 ~~
## .A2 0.699 0.118 5.901 0.000 0.699 0.721
## .B1 ~~
## .B2 1.044 0.163 6.413 0.000 1.044 0.566
## .C1 ~~
## .C2 1.044 0.159 6.586 0.000 1.044 0.599
## .D1 ~~
## .D2 0.771 0.134 5.738 0.000 0.771 0.557
## satis1 ~~
## satis2 2.664 0.351 7.579 0.000 0.666 0.666
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.000 0.000 0.000
## .A2 0.000 0.000 0.000
## satis1 1.500 0.122 12.264 0.000 0.858 0.858
## satis2 6.600 0.163 40.572 0.000 2.882 2.882
## .B1 -0.083 0.109 -0.760 0.447 -0.083 -0.041
## .C1 0.031 0.109 0.286 0.775 0.031 0.015
## .D1 -0.035 0.101 -0.346 0.729 -0.035 -0.018
## .B2 0.247 0.299 0.826 0.409 0.247 0.093
## .C2 0.318 0.292 1.088 0.276 0.318 0.122
## .D2 -0.048 0.279 -0.170 0.865 -0.048 -0.018
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.685 0.105 6.554 0.000 0.685 0.183
## .B1 1.415 0.153 9.258 0.000 1.415 0.346
## .C1 1.448 0.156 9.264 0.000 1.448 0.346
## .D1 1.056 0.126 8.376 0.000 1.056 0.271
## .A2 1.373 0.192 7.145 0.000 1.373 0.208
## .B2 2.406 0.262 9.187 0.000 2.406 0.344
## .C2 2.100 0.238 8.826 0.000 2.100 0.309
## .D2 1.813 0.218 8.327 0.000 1.813 0.271
## satis1 3.054 0.321 9.507 0.000 1.000 1.000
## satis2 5.243 0.559 9.385 0.000 1.000 1.000
anova(fit.equalforms, fit.equalfl, test = "chisq")
## Chi Square Difference Test
##
## Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
## fit.equalforms 15 7205.1 7307.3 2.0927
## fit.equalfl 18 7200.9 7292.5 3.8821 1.7893 3 0.6173
\[ \begin{aligned} \hat{Y}&=a+bX \\ Y&=a+bX+e \\ a &= M_y - bM_x\\ M_y &= a + bM_x \end{aligned} \]
\(VAR(X)=\lambda_x^2\phi + \delta\)
\(X=\tau_x+\Lambda_x\xi+\Theta_\delta\) intercept(latent mean)을 투입한 CFA 식.
\(M_x=\tau_x+\lambda_x\kappa\) tau X에 대해서 일반화, \(\kappa\)는 latent exogenous variable의 평균
종단 측정 불변성 예시로 돌아가서, 이 분석은 mean strucrue 투입 하거나 안하거나 둘 다 활용가능하다.
만약 최종 목표가 covariance struce에 기반한 모형 검증이라면 means structure에 대한 분석은 별로 적절치 않을 것이다.
그런데, 주어진 구성개념에서 변화하는 궤적(trajectory)이 관심의 대상이면, 반드시 indicator들의 평균에 관한 분석에 measurement invariance 평가도 추가 해야한다.
즉, 평균치에 대한 비교는, 요인 부하량과 measurement intercepts가 변화하지 않는 경우에만 의미를 가짐.
indicator A의 intercept를 0으로 고정해서 mean structrue 부분을 identify 하려 했음.
이 때문에 A1과 A2의 estimates 값이 0으로 도출되고
결과적으로 잠재 변수의 평균도 A1과 A2의 관찰한 평균값과 동일
\[ \begin{aligned} M_y &= a + bM_x \\ 1.32 &= -0.107 +0.951(1.5) \end{aligned} \]
eqaul()# equality constraint를 주기 위해서 equal() 활용: B1 ~ equal("B2~1")*1
# B1의 intercept를 B2의 것과 동일하도록 제약 주는 것.
model.equali <- '
# equality of factor loadings
satis1 =~ v1*A1 + v2*B1 + v3*C1 + v4*D1
satis2 =~ v1*A2 + v2*B2 + v3*C2 + v4*D2
A1 ~~ A2
B1 ~~ B2
C1 ~~ C2
D1 ~~ D2
# fix indicator intercepts to 0
A1 ~ 0*1
A2 ~ 0*1
# free factor intercepts
satis1 ~ 1
satis2 ~ 1
# equal indicator intercepts
B1 ~ equal("B2~1")*1
C1 ~ equal("C2~1")*1
D1 ~ equal("D2~1")*1
'
fit.equali <- cfa(model.equali, sample.cov = lmi_covs, sample.nobs = 250, sample.mean = lmi_ms, meanstructure = TRUE)
summary(fit.equali, standardized = TRUE, fit.measures = TRUE)
## lavaan (0.5-23.1097) converged normally after 72 iterations
##
## Number of observations 250
##
## Estimator ML
## Minimum Function Test Statistic 7.254
## Degrees of freedom 21
## P-value (Chi-square) 0.998
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1823.749
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 1.000
## Tucker-Lewis Index (TLI) 1.010
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -3576.153
## Loglikelihood unrestricted model (H1) -3572.526
##
## Number of free parameters 23
## Akaike (AIC) 7198.306
## Bayesian (BIC) 7279.300
## Sample-size adjusted Bayesian (BIC) 7206.388
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.000
## 90 Percent Confidence Interval 0.000 0.000
## P-value RMSEA <= 0.05 1.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.026
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## satis1 =~
## A1 (v1) 1.000 1.713 0.897
## B1 (v2) 0.989 0.017 56.700 0.000 1.695 0.821
## C1 (v3) 0.993 0.017 60.079 0.000 1.702 0.818
## D1 (v4) 0.962 0.016 60.428 0.000 1.649 0.847
## satis2 =~
## A2 (v1) 1.000 2.239 0.882
## B2 (v2) 0.989 0.017 56.700 0.000 2.215 0.822
## C2 (v3) 0.993 0.017 60.079 0.000 2.224 0.840
## D2 (v4) 0.962 0.016 60.428 0.000 2.155 0.846
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 ~~
## .A2 0.723 0.117 6.187 0.000 0.723 0.717
## .B1 ~~
## .B2 1.023 0.162 6.298 0.000 1.023 0.564
## .C1 ~~
## .C2 1.031 0.158 6.515 0.000 1.031 0.601
## .D1 ~~
## .D2 0.785 0.133 5.918 0.000 0.785 0.560
## satis1 ~~
## satis2 2.547 0.321 7.923 0.000 0.664 0.664
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.000 0.000 0.000
## .A2 0.000 0.000 0.000
## satis1 1.500 0.121 12.421 0.000 0.876 0.876
## satis2 6.617 0.160 41.421 0.000 2.955 2.955
## .B1 (B2~1) -0.156 0.098 -1.583 0.113 -0.156 -0.075
## .C1 (C2~1) -0.032 0.099 -0.328 0.743 -0.032 -0.016
## .D1 (D2~1) -0.039 0.089 -0.436 0.663 -0.039 -0.020
## .B2 (.17.) -0.156 0.098 -1.583 0.113 -0.156 -0.058
## .C2 (.18.) -0.032 0.099 -0.328 0.743 -0.032 -0.012
## .D2 (.19.) -0.039 0.089 -0.436 0.663 -0.039 -0.015
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.711 0.101 7.060 0.000 0.711 0.195
## .B1 1.391 0.152 9.149 0.000 1.391 0.326
## .C1 1.427 0.155 9.185 0.000 1.427 0.330
## .D1 1.070 0.124 8.655 0.000 1.070 0.282
## .A2 1.428 0.188 7.609 0.000 1.428 0.222
## .B2 2.363 0.260 9.082 0.000 2.363 0.325
## .C2 2.066 0.236 8.753 0.000 2.066 0.295
## .D2 1.839 0.214 8.610 0.000 1.839 0.284
## satis1 2.936 0.291 10.099 0.000 1.000 1.000
## satis2 5.013 0.499 10.050 0.000 1.000 1.000
############################################
# 틀렸음
# 확인 필요!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# https://groups.google.com/forum/#!topic/lavaan/mUkfB3HQLmM
############################################
model.effect <- '
# Measurement Model
statis1 =~ NA*A1 + v1*A1 + v2*B1 + v3*C1 + v4*D1
statis2 =~ NA*A2 + u1*A2 + u2*B2 + u3*C2 + u4*D2
# Defining intercepts
A1 ~ t1*1
B1 ~ t2*1
C1 ~ t3*1
D1 ~ t4*1
A2 ~ s1*1
B2 ~ s2*1
C2 ~ s3*1
D2 ~ s4*1
# Effect coding
v1 == 4 - v2 - v3 - v4
u1 == 4 - u2 - u3 - u4
t1 == 4 - t2 - t3 - t4
s1 == 4 - s2 - s3 - s4
'
fit.effect <- cfa(model.effect, sample.cov = lmi_covs, sample.nobs = 250, sample.mean = lmi_ms, meanstructure = TRUE)
summary(fit.effect, standardized = TRUE, fit.measures = TRUE)
## lavaan (0.5-23.1097) converged normally after 57 iterations
##
## Number of observations 250
##
## Estimator ML
## Minimum Function Test Statistic 921.148
## Degrees of freedom 21
## P-value (Chi-square) 0.000
##
## Model test baseline model:
##
## Minimum Function Test Statistic 1823.749
## Degrees of freedom 28
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 0.499
## Tucker-Lewis Index (TLI) 0.332
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -4033.100
## Loglikelihood unrestricted model (H1) -3572.526
##
## Number of free parameters 23
## Akaike (AIC) 8112.200
## Bayesian (BIC) 8193.194
## Sample-size adjusted Bayesian (BIC) 8120.282
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.414
## 90 Percent Confidence Interval 0.391 0.437
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 2.229
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## statis1 =~
## A1 (v1) 1.041 0.032 32.131 0.000 1.809 0.910
## B1 (v2) 0.971 0.039 25.122 0.000 1.689 0.818
## C1 (v3) 0.980 0.039 25.121 0.000 1.703 0.817
## D1 (v4) 1.008 0.035 28.445 0.000 1.752 0.863
## statis2 =~
## A2 (u1) 1.033 0.012 88.361 0.000 6.092 0.983
## B2 (u2) 1.005 0.014 71.239 0.000 5.924 0.967
## C2 (u3) 0.984 0.014 72.818 0.000 5.800 0.970
## D2 (u4) 0.978 0.013 75.926 0.000 5.768 0.973
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## statis1 ~~
## statis2 5.011 0.751 6.674 0.000 0.489 0.489
##
## Intercepts:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 (t1) 1.045 0.050 20.792 0.000 1.045 0.525
## .B1 (t2) 0.922 0.063 14.621 0.000 0.922 0.447
## .C1 (t3) 1.050 0.064 16.505 0.000 1.050 0.504
## .D1 (t4) 0.983 0.057 17.216 0.000 0.983 0.484
## .A2 (s1) 0.976 0.068 14.412 0.000 0.976 0.157
## .B2 (s2) 0.901 0.082 10.953 0.000 0.901 0.147
## .C2 (s3) 1.166 0.079 14.807 0.000 1.166 0.195
## .D2 (s4) 0.958 0.075 12.760 0.000 0.958 0.162
## statis1 0.000 0.000 0.000
## statis2 0.000 0.000 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .A1 0.682 0.105 6.474 0.000 0.682 0.172
## .B1 1.411 0.153 9.212 0.000 1.411 0.331
## .C1 1.447 0.157 9.230 0.000 1.447 0.333
## .D1 1.056 0.128 8.278 0.000 1.056 0.256
## .A2 1.309 0.192 6.826 0.000 1.309 0.034
## .B2 2.420 0.270 8.971 0.000 2.420 0.065
## .C2 2.133 0.243 8.766 0.000 2.133 0.060
## .D2 1.852 0.220 8.410 0.000 1.852 0.053
## statis1 3.022 0.296 10.200 0.000 1.000 1.000
## statis2 34.763 3.152 11.027 0.000 1.000 1.000
##
## Constraints:
## |Slack|
## v1 - (4-v2-v3-v4) 0.000
## u1 - (4-u2-u3-u4) 0.000
## t1 - (4-t2-t3-t4) 0.000
## s1 - (4-s2-s3-s4) 0.000
요인의 분산은 잠재 변수에 의해 설명되는 indicator의 분산의 평균을 반영함.
이 점이 marker approach와 effect coding approach의 중요한 차이임.
marker를 활용하게 되면 요인의 metric은 선택한 하나의 마커 측정 값에 의해서 정의되기에,
구성개념의 true mean 과 variance에 관하여 도출된 추정 값이 언제나 해석가능하지는 않다.
구성개념을 측정하는 모든 측정이 요인의 metric에 기여함과 동시에 요인의 평균과 분산 추정치는 true-score의 측정치의 평균의 평균과 분산을 나타내기 떄문에 effect coding이 더 좋다.
때문에 effect coding은measurement invariance 평가 상황 또는 요인을 정의하는 indicator들이 해석 가능한 metric을 지니는 상황에서 더 유용하다.
만약 metric이 별로 중요하지 않다면 요인 분산을 1로 고정한 표준화 솔루션을 원할 수도 있다.
scaling 방법을 무엇을 쓰건 간에 상관 없이 모형 적합도는 동일!
equal form, equal factor loadings, equal interceptsstep-down을 권함 (가장 제약이 큰 모형부터 제약을 풀면서 검증)그룹에 따라 공분산 행렬에대한 overall test를 해야하는지에 관해서 약간 논쟁이있는데
omnibus test가 \(\Sigma_1 = \Sigma_2\)로 결과가 도출된다 할지라도 measruement나 structural parameter는 다르게 도출될 수도 있고 반대의 경우도 가능함.
overall test가 주는 정보는 별로 크지않은 것 같다. 약간의 가이드만 줄 뿐
실제 데이터 예시
MODEL = CONFIGURAL METRIC SCALARData <- read.table("http://people.bu.edu/tabrown/Ch7/MDDALL.dat")
names(Data) <- c("sex", paste("mdd", 1:9, sep = ""))
Data$sex <- factor(Data$sex, levels = c(0, 1), labels = c("female", "male"))
model.mdd <- '
MDD =~ mdd1 + mdd2 + mdd3 + mdd4 + mdd5 + mdd6 + mdd7 + mdd8 + mdd9
mdd1 ~~ mdd2
'
# Single group solution (men)
fit.men <- cfa(model.mdd, data = Data[Data$sex == "male",])
summary(fit.men, fit.measures = TRUE)
## lavaan (0.5-23.1097) converged normally after 36 iterations
##
## Number of observations 375
##
## Estimator ML
## Minimum Function Test Statistic 45.957
## Degrees of freedom 26
## P-value (Chi-square) 0.009
##
## Model test baseline model:
##
## Minimum Function Test Statistic 643.402
## Degrees of freedom 36
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 0.967
## Tucker-Lewis Index (TLI) 0.955
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -6882.587
## Loglikelihood unrestricted model (H1) -6859.609
##
## Number of free parameters 19
## Akaike (AIC) 13803.174
## Bayesian (BIC) 13877.786
## Sample-size adjusted Bayesian (BIC) 13817.504
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.045
## 90 Percent Confidence Interval 0.022 0.066
## P-value RMSEA <= 0.05 0.617
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.041
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|)
## MDD =~
## mdd1 1.000
## mdd2 1.236 0.098 12.580 0.000
## mdd3 0.786 0.133 5.912 0.000
## mdd4 1.166 0.152 7.656 0.000
## mdd5 0.959 0.139 6.916 0.000
## mdd6 1.132 0.145 7.790 0.000
## mdd7 0.766 0.143 5.360 0.000
## mdd8 1.019 0.144 7.075 0.000
## mdd9 0.632 0.113 5.616 0.000
##
## Covariances:
## Estimate Std.Err z-value P(>|z|)
## .mdd1 ~~
## .mdd2 0.920 0.160 5.743 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## .mdd1 1.499 0.152 9.889 0.000
## .mdd2 2.459 0.244 10.085 0.000
## .mdd3 3.727 0.290 12.830 0.000
## .mdd4 3.547 0.304 11.671 0.000
## .mdd5 3.467 0.282 12.304 0.000
## .mdd6 3.111 0.270 11.516 0.000
## .mdd7 4.599 0.353 13.030 0.000
## .mdd8 3.626 0.297 12.192 0.000
## .mdd9 2.770 0.214 12.943 0.000
## MDD 1.048 0.183 5.718 0.000
# Single group solution (women)
fit.women <- cfa(model.mdd, data = Data[Data$sex == "female",])
summary(fit.women, fit.measures = TRUE)
## lavaan (0.5-23.1097) converged normally after 34 iterations
##
## Number of observations 375
##
## Estimator ML
## Minimum Function Test Statistic 52.954
## Degrees of freedom 26
## P-value (Chi-square) 0.001
##
## Model test baseline model:
##
## Minimum Function Test Statistic 700.173
## Degrees of freedom 36
## P-value 0.000
##
## User model versus baseline model:
##
## Comparative Fit Index (CFI) 0.959
## Tucker-Lewis Index (TLI) 0.944
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -6824.311
## Loglikelihood unrestricted model (H1) -6797.834
##
## Number of free parameters 19
## Akaike (AIC) 13686.621
## Bayesian (BIC) 13761.233
## Sample-size adjusted Bayesian (BIC) 13700.951
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.053
## 90 Percent Confidence Interval 0.032 0.073
## P-value RMSEA <= 0.05 0.390
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.044
##
## Parameter Estimates:
##
## Information Expected
## Standard Errors Standard
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|)
## MDD =~
## mdd1 1.000
## mdd2 1.107 0.086 12.911 0.000
## mdd3 0.729 0.101 7.221 0.000
## mdd4 0.911 0.108 8.407 0.000
## mdd5 0.812 0.103 7.845 0.000
## mdd6 0.924 0.100 9.240 0.000
## mdd7 0.611 0.098 6.222 0.000
## mdd8 0.979 0.107 9.132 0.000
## mdd9 0.484 0.085 5.709 0.000
##
## Covariances:
## Estimate Std.Err z-value P(>|z|)
## .mdd1 ~~
## .mdd2 0.393 0.147 2.684 0.007
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## .mdd1 1.375 0.155 8.853 0.000
## .mdd2 2.132 0.223 9.576 0.000
## .mdd3 3.551 0.277 12.837 0.000
## .mdd4 3.583 0.290 12.351 0.000
## .mdd5 3.501 0.278 12.610 0.000
## .mdd6 2.677 0.226 11.823 0.000
## .mdd7 3.658 0.279 13.113 0.000
## .mdd8 3.137 0.264 11.905 0.000
## .mdd9 2.831 0.214 13.223 0.000
## MDD 1.564 0.224 6.993 0.000
# Measurement Invariance using semTools
require(semTools)
## Loading required package: semTools
##
## ###############################################################################
## This is semTools 0.4-14
## All users of R (or SEM) are invited to submit functions or ideas for functions.
## ###############################################################################
##
## Attaching package: 'semTools'
## The following object is masked from 'package:psych':
##
## skew
measurementInvariance(model.mdd, data = Data, group = "sex", strict = TRUE)
##
## Measurement invariance models:
##
## Model 1 : fit.configural
## Model 2 : fit.loadings
## Model 3 : fit.intercepts
## Model 4 : fit.residuals
## Model 5 : fit.means
##
## Chi Square Difference Test
##
## Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
## fit.configural 52 27526 27785 98.911
## fit.loadings 60 27514 27736 102.839 3.9286 8 0.8635
## fit.intercepts 68 27510 27695 115.309 12.4699 8 0.1314
## fit.residuals 77 27502 27645 125.021 9.7115 9 0.3743
## fit.means 78 27502 27640 126.935 1.9144 1 0.1665
##
##
## Fit measures:
##
## cfi rmsea cfi.delta rmsea.delta
## fit.configural 0.963 0.049 NA NA
## fit.loadings 0.966 0.044 0.003 0.005
## fit.intercepts 0.963 0.043 0.004 0.001
## fit.residuals 0.962 0.041 0.001 0.002
## fit.means 0.962 0.041 0.001 0.000
# using lavaan
# measurementInvariance doesn't do equal factor variance. But, this can be accomplished as follows
fit.ef <- cfa(model.mdd, data = Data, group = "sex", meanstructure = TRUE) # equal form
fit.efl <- update(fit.ef, group.equal = c("loadings")) # equal factor laodings
fit.eii <- update(fit.efl, group.equal = c("loadings", "intercepts")) # equal indicator intercepts
fit.eir <- update(fit.eii, group.equal = c("loadings", "intercepts", "residuals")) # equal indicator error variances
fit.fv <- update(fit.eir, group.equal = c("loadings", "intercepts", "residuals", "lv.variances")) # equal factor variances
fit.fm <- update(fit.fv, group.equal = c("loadings", "intercepts", "residuals", "lv.variances", "means")) # equal latent means
# chi-squared diff tests
anova(fit.ef, fit.efl, fit.eii, fit.eir, fit.fv, fit.fm, test = "chisq")
## Chi Square Difference Test
##
## Df AIC BIC Chisq Chisq diff Df diff Pr(>Chisq)
## fit.ef 52 27526 27785 98.911
## fit.efl 60 27514 27736 102.839 3.9286 8 0.8635
## fit.eii 68 27510 27695 115.309 12.4699 8 0.1314
## fit.eir 77 27502 27645 125.021 9.7115 9 0.3743
## fit.fv 78 27501 27639 125.814 0.7931 1 0.3732
## fit.fm 79 27501 27635 127.734 1.9201 1 0.1659
sds <- '2.26 2.73 2.11 2.32 2.61 2.44 0.50'
cors <- '
1.000
0.705 1.000
0.724 0.646 1.000
0.213 0.195 0.190 1.000
0.149 0.142 0.128 0.521 1.000
0.155 0.162 0.135 0.557 0.479 1.000
-0.019 -0.024 -0.029 -0.110 -0.074 -0.291 1.000'
covs <- getCov(cors, sds = sds, names = c("S1", "S2", "S3", "A1", "A2", "A3", "sex"))