library(pacman); p_load(kirkegaard, lavaan, knitr, semPlot, umx, psych)
The data used for this analysis can be downloaded at its public repository, here: https://ves.emilkirkegaard.dk/data/.
VES %<>% rename(
WBD = WAIS_BD, #WAIS Block Design
WGI = WAIS_GI, #WAIS General Information
GPTR = GPT_right, #Grooved Pegboard Task - Right Hand
GPTL = GPT_left, #Grooved Pegboard Task - Left Hand
CD = copy_direct, #Rey-Osterrieth Complex Figure Drawing - Direct, i.e., in sight
CI = copy_immediate, #Complex Figure Drawing - Immediate(ly after removal from sight)
CY = copy_delayed, #Complex Figure Drawing - Delayed, i.e., asked to draw the figure after time has passed from viewing
ACVE = ACB_verbal_early, #Army Classification Battery Verbal subtest administered at introduction
ACVL = ACB_verbal_later, #Verbal subtest administered at follow-up
ACAE = ACB_arithmetic_early, #Arithmetic subtest administered at introduction
ACAL = ACB_arithmetic_later) #Arithmetic subtest administered at follow-up
VES$CC = (standardize(VES$CI) + standardize(VES$CY))/2 #Very similar, which unnecessarily harms path model fit.
early_tests = c("ACVE", "ACAE", "PA", "GIT", "AFQT") #Tests taken at introduction
later_tests = c("WRAT", "CVLT", "WCST", "WBD", "WGI", "GPTR", "GPTL", "PASAT", "CD", "CC", "WLGT", "ACVL", "ACAL") #Tests taken at follow-up. Unlisted above are the Wisconsin Card Sorting Test (WCST), California Verbal Learning Test (CVLT), Wide-Range Achievement Test (WRAT), Word List Generation Test (WLGT), and Paced Auditory Serial Addition Test (PASAT)
ses = c("income", "education")
tests = c(early_tests, later_tests) #All tests
vars = c(tests, ses)
#Model fit measures
FITM <- c("chisq", "df", "nPar", "cfi", "rmsea", "rmsea.ci.lower", "rmsea.ci.upper", "aic", "bic", "srmr")
Residual covariances between early and late tests were allowed if they were identical, but overall, residual covariances were kept to a minimum. In some cases, the same tests were used at different timepoints, and so these were needed. These would still likely be necessary with more factors, but we could not have modeled those with the early testing data in any case. This analytic choice was chosen for the same reason Ritchie, Bates & Deary (2015) made it, and as such, we followed them and allowed and principally used time-specific residual covariances.
GE.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
AFQT ~~ PA'
GL.model <- '
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
CC ~~ CD + CVLT + WBD
GPTL ~~ GPTR
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT + ACVL'
GE.fit <- cfa(GE.model, data = VES, std.lv = T, std.ov = T)
GL.fit <- cfa(GL.model, data = VES, std.lv = T, std.ov = T)
summary(GE.fit, stand = T, fit = T, modindices = F)
## lavaan 0.6-9 ended normally after 20 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 11
##
## Used Total
## Number of observations 4355 4462
##
## Model Test User Model:
##
## Test statistic 127.913
## Degrees of freedom 4
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 13497.703
## Degrees of freedom 10
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.991
## Tucker-Lewis Index (TLI) 0.977
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -24209.991
## Loglikelihood unrestricted model (H1) -24146.035
##
## Akaike (AIC) 48441.983
## Bayesian (BIC) 48512.153
## Sample-size adjusted Bayesian (BIC) 48477.199
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.084
## 90 Percent confidence interval - lower 0.072
## 90 Percent confidence interval - upper 0.097
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.018
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.840 0.013 66.011 0.000 0.840 0.840
## ACAE 0.832 0.013 65.080 0.000 0.832 0.832
## AFQT 0.867 0.013 69.223 0.000 0.867 0.867
## GIT 0.744 0.013 55.343 0.000 0.744 0.744
## PA 0.645 0.014 44.827 0.000 0.645 0.645
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .AFQT ~~
## .PA 0.170 0.009 19.651 0.000 0.170 0.447
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.294 0.009 33.323 0.000 0.294 0.294
## .ACAE 0.307 0.009 34.177 0.000 0.307 0.307
## .AFQT 0.248 0.008 29.856 0.000 0.248 0.248
## .GIT 0.446 0.011 39.965 0.000 0.446 0.446
## .PA 0.584 0.014 41.506 0.000 0.584 0.584
## ge 1.000 1.000 1.000
summary(GL.fit, stand = T, fit = T, modindices = F)
## lavaan 0.6-9 ended normally after 29 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 35
##
## Used Total
## Number of observations 4426 4462
##
## Model Test User Model:
##
## Test statistic 907.221
## Degrees of freedom 56
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 23463.123
## Degrees of freedom 78
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.964
## Tucker-Lewis Index (TLI) 0.949
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -70358.433
## Loglikelihood unrestricted model (H1) -69904.823
##
## Akaike (AIC) 140786.866
## Bayesian (BIC) 141010.700
## Sample-size adjusted Bayesian (BIC) 140899.484
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.059
## 90 Percent confidence interval - lower 0.055
## 90 Percent confidence interval - upper 0.062
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.037
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl =~
## WRAT 0.672 0.014 47.850 0.000 0.672 0.678
## CVLT 0.432 0.015 28.379 0.000 0.432 0.432
## WCST 0.476 0.015 31.642 0.000 0.476 0.476
## WBD 0.633 0.014 44.487 0.000 0.633 0.633
## WGI 0.721 0.014 51.704 0.000 0.721 0.722
## CD 0.455 0.015 30.115 0.000 0.455 0.455
## CC 0.470 0.015 31.364 0.000 0.470 0.474
## PASAT 0.628 0.014 43.785 0.000 0.628 0.628
## GPTL 0.312 0.016 20.004 0.000 0.312 0.312
## GPTR 0.314 0.016 20.164 0.000 0.314 0.314
## WLGT 0.499 0.015 33.452 0.000 0.499 0.499
## ACVL 0.777 0.013 58.040 0.000 0.777 0.777
## ACAL 0.852 0.013 66.734 0.000 0.852 0.852
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .CD ~~
## .CC 0.241 0.012 19.995 0.000 0.241 0.311
## .CVLT ~~
## .CC 0.125 0.011 11.096 0.000 0.125 0.159
## .WBD ~~
## .CC 0.169 0.011 16.125 0.000 0.169 0.251
## .GPTL ~~
## .GPTR 0.535 0.016 33.356 0.000 0.535 0.593
## .WRAT ~~
## .WGI 0.157 0.010 16.016 0.000 0.157 0.311
## .WLGT 0.118 0.009 13.704 0.000 0.118 0.187
## .ACVL 0.230 0.010 23.806 0.000 0.230 0.500
## .WGI ~~
## .PASAT -0.071 0.008 -8.631 0.000 -0.071 -0.133
## .ACVL 0.153 0.010 15.985 0.000 0.153 0.351
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .WRAT 0.532 0.013 40.969 0.000 0.532 0.541
## .CVLT 0.814 0.018 45.566 0.000 0.814 0.814
## .WCST 0.774 0.017 45.158 0.000 0.774 0.774
## .WBD 0.599 0.014 42.670 0.000 0.599 0.599
## .WGI 0.477 0.013 37.447 0.000 0.477 0.479
## .CD 0.793 0.017 45.356 0.000 0.793 0.793
## .CC 0.761 0.017 45.678 0.000 0.761 0.775
## .PASAT 0.606 0.014 42.447 0.000 0.606 0.606
## .GPTL 0.902 0.019 46.347 0.000 0.902 0.903
## .GPTR 0.901 0.019 46.336 0.000 0.901 0.901
## .WLGT 0.751 0.017 44.895 0.000 0.751 0.751
## .ACVL 0.396 0.011 35.811 0.000 0.396 0.397
## .ACAL 0.274 0.009 29.068 0.000 0.274 0.274
## gl 1.000 1.000 1.000
GCOR.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
ge ~ gl
ACVE ~~ ACVL + WRAT
ACAE ~~ ACAL
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT'
GCOR0.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
ge ~ 0*gl
ACVE ~~ ACVL + WRAT
ACAE ~~ ACAL
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT'
GCOR1.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
ge ~ 1*gl
ACVE ~~ ACVL + WRAT
ACAE ~~ ACAL
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT'
GCOR.fit <- cfa(GCOR.model, data = VES, std.lv = T, std.ov = T)
GCOR0.fit <- cfa(GCOR0.model, data = VES, std.lv = T, std.ov = T)
GCOR1.fit <- cfa(GCOR1.model, data = VES, std.lv = T, std.ov = T)
fitMeasures(GCOR.fit, FITM)
## chisq df npar cfi rmsea
## 2101.090 119.000 52.000 0.957 0.062
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.060 0.064 176637.448 176968.741 0.042
fitMeasures(GCOR0.fit, FITM)
## chisq df npar cfi rmsea
## 8439.536 120.000 51.000 0.819 0.127
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.124 0.129 182973.894 183298.816 0.273
fitMeasures(GCOR1.fit, FITM)
## chisq df npar cfi rmsea
## 3615.279 120.000 51.000 0.924 0.082
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.080 0.084 178149.637 178474.559 0.151
parameterEstimates(GCOR.fit, stand = T) %>%
filter(op == "~") %>%
dplyr::select(Early = lhs, Later = rhs, "Unstandardized Regression" = est, SE = se, Z = z, 'p-value' = pvalue, Beta = std.all) %>%
kable(digits = 3, format = "pandoc")
| Early | Later | Unstandardized Regression | SE | Z | p-value | Beta |
|---|---|---|---|---|---|---|
| ge | gl | 2.884 | 0.086 | 33.695 | 0 | 0.945 |
First finding: General intelligence is highly stable over time, with a correlation of r = 0.945. Second finding: A model with strong but imperfect stability for trait intelligence fits better than a model with perfect stability and far better than a model with no stability.
The stability at the latent level shown above must be contrasted with the stability at the level of the observed scores. For that purpose, we need to assess fullscale IQ (FSIQ) correlations, but we can also output a correlation matrix of all the subtests. First, the subtests, then, the ACA/V combined correlations, then the FSIQ correlation.
describe(VES[vars]) #does not knit
For these variables, missingness tops out at 1.93%.
TestCors <- round(cor(VES[vars], use = "pairwise.complete"), 3); TestCors
## ACVE ACAE PA GIT AFQT WRAT CVLT WCST WBD WGI GPTR
## ACVE 1.000 0.699 0.516 0.659 0.714 0.746 0.317 0.327 0.437 0.725 0.204
## ACAE 0.699 1.000 0.576 0.589 0.737 0.589 0.331 0.360 0.502 0.635 0.201
## PA 0.516 0.576 1.000 0.467 0.728 0.412 0.264 0.331 0.634 0.482 0.261
## GIT 0.659 0.589 0.467 1.000 0.645 0.517 0.250 0.282 0.418 0.582 0.174
## AFQT 0.714 0.737 0.728 0.645 1.000 0.578 0.312 0.368 0.629 0.626 0.253
## WRAT 0.746 0.589 0.412 0.517 0.578 1.000 0.309 0.292 0.382 0.652 0.196
## CVLT 0.317 0.331 0.264 0.250 0.312 0.309 1.000 0.192 0.269 0.329 0.117
## WCST 0.327 0.360 0.331 0.282 0.368 0.292 0.192 1.000 0.356 0.330 0.186
## WBD 0.437 0.502 0.634 0.418 0.629 0.382 0.269 0.356 1.000 0.453 0.301
## WGI 0.725 0.635 0.482 0.582 0.626 0.652 0.329 0.330 0.453 1.000 0.173
## GPTR 0.204 0.201 0.261 0.174 0.253 0.196 0.117 0.186 0.301 0.173 1.000
## GPTL 0.208 0.212 0.263 0.192 0.269 0.204 0.115 0.196 0.307 0.186 0.634
## PASAT 0.408 0.521 0.371 0.365 0.432 0.417 0.289 0.285 0.388 0.366 0.226
## CD 0.290 0.333 0.380 0.254 0.372 0.269 0.205 0.291 0.398 0.278 0.223
## CC 0.309 0.343 0.464 0.316 0.461 0.275 0.329 0.274 0.502 0.351 0.201
## WLGT 0.443 0.370 0.289 0.310 0.360 0.504 0.278 0.209 0.281 0.414 0.168
## ACVL 0.824 0.658 0.484 0.620 0.670 0.766 0.333 0.361 0.453 0.719 0.220
## ACAL 0.642 0.785 0.545 0.548 0.688 0.585 0.356 0.396 0.532 0.622 0.240
## income 0.360 0.367 0.241 0.303 0.323 0.273 0.165 0.212 0.222 0.312 0.169
## education 0.532 0.487 0.343 0.377 0.430 0.511 0.202 0.241 0.275 0.555 0.134
## GPTL PASAT CD CC WLGT ACVL ACAL income education
## ACVE 0.208 0.408 0.290 0.309 0.443 0.824 0.642 0.360 0.532
## ACAE 0.212 0.521 0.333 0.343 0.370 0.658 0.785 0.367 0.487
## PA 0.263 0.371 0.380 0.464 0.289 0.484 0.545 0.241 0.343
## GIT 0.192 0.365 0.254 0.316 0.310 0.620 0.548 0.303 0.377
## AFQT 0.269 0.432 0.372 0.461 0.360 0.670 0.688 0.323 0.430
## WRAT 0.204 0.417 0.269 0.275 0.504 0.766 0.585 0.273 0.511
## CVLT 0.115 0.289 0.205 0.329 0.278 0.333 0.356 0.165 0.202
## WCST 0.196 0.285 0.291 0.274 0.209 0.361 0.396 0.212 0.241
## WBD 0.307 0.388 0.398 0.502 0.281 0.453 0.532 0.222 0.275
## WGI 0.186 0.366 0.278 0.351 0.414 0.719 0.622 0.312 0.555
## GPTR 0.634 0.226 0.223 0.201 0.168 0.220 0.240 0.169 0.134
## GPTL 1.000 0.216 0.223 0.232 0.157 0.226 0.236 0.165 0.149
## PASAT 0.216 1.000 0.247 0.288 0.357 0.440 0.562 0.268 0.304
## CD 0.223 0.247 1.000 0.489 0.176 0.325 0.384 0.186 0.209
## CC 0.232 0.288 0.489 1.000 0.220 0.322 0.394 0.175 0.222
## WLGT 0.157 0.357 0.176 0.220 1.000 0.463 0.365 0.189 0.338
## ACVL 0.226 0.440 0.325 0.322 0.463 1.000 0.691 0.341 0.506
## ACAL 0.236 0.562 0.384 0.394 0.365 0.691 1.000 0.392 0.467
## income 0.165 0.268 0.186 0.175 0.189 0.341 0.392 1.000 0.349
## education 0.149 0.304 0.209 0.222 0.338 0.506 0.467 0.349 1.000
max(TestCors[lower.tri(TestCors, diag = F)]) #Highest non-diagonal correlation
## [1] 0.824
min(TestCors) #Lowest correlation
## [1] 0.115
ACE = rowMeans(scale(VES[c("ACVE", "ACAE")]))
ACL = rowMeans(scale(VES[c("ACVL", "ACAL")]))
cor.test(ACE, ACL)
##
## Pearson's product-moment correlation
##
## data: ACE and ACL
## t = 110.53, df = 4381, p-value < 2.2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.8499109 0.8655506
## sample estimates:
## cor
## 0.8579293
FSIQE = rowMeans(scale(VES[early_tests]))
FSIQL = rowMeans(scale(VES[later_tests]))
cor.test(FSIQE, FSIQL)
##
## Pearson's product-moment correlation
##
## data: FSIQE and FSIQL
## t = 93.06, df = 4318, p-value < 2.2e-16
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.8067058 0.8265616
## sample estimates:
## cor
## 0.8168755
Third finding: The stability of general intelligence (r = 0.945) is greater than the stability any of our observed scores (highest r = 0.824, lowest r = 0.115), the stability of our combined ACV/A tests (r = 0.858) and the stability of observed FSIQs (r = 0.817).
Correcting the FSIQ score for an underestimated reliability of 0.8, it is still not as high as the g correlation. The ACV/A score could be, but we do know that the reliability is greater than 0.8, and since composite reliabilities exceed individual subtest reliabilities, we can only equate the stability of general intelligence and some of these subtests if we make serious assumptions that are known to be errant.
0.807/sqrt(0.8)
## [1] 0.9022534
0.817/sqrt(0.8)
## [1] 0.9134338
0.827/sqrt(0.8)
## [1] 0.9246141
Nonsignificant paths were continually pruned based until the model did not contain them. The first model features education affecting g. Nonsignificant paths for GS2 were found by looking at the education - gl inverted model, to gauge the standard errors. These match up with the lowest coefficients in GS and are clearly appropriate as such. Models post-pruning were compared in terms of model fit and a \(\chi^2\) exact fit test to determine if the models with removed nonsignificant paths should have been used.
EduG.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
gl ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduGS.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
gl + WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduGS2.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
gl + WRAT + CVLT + WBD + WGI + GPTR + WLGT + ACVL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduGS3.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + WBD + WGI + GPTR + WLGT + ACVL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduS.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduS2.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + WBD + WGI + CC + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduS3.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + WBD + WGI + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduG.fit <- cfa(EduG.model, data = VES, std.lv = T, std.ov = T)
EduGS.fit <- cfa(EduGS.model, data = VES, std.lv = T, std.ov = T)
## Warning in lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, : lavaan WARNING:
## Could not compute standard errors! The information matrix could
## not be inverted. This may be a symptom that the model is not
## identified.
EduGS2.fit <- cfa(EduGS2.model, data = VES, std.lv = T, std.ov = T)
EduGS3.fit <- cfa(EduGS3.model, data = VES, std.lv = T, std.ov = T)
EduS.fit <- cfa(EduS.model, data = VES, std.lv = T, std.ov = T)
EduS2.fit <- cfa(EduS2.model, data = VES, std.lv = T, std.ov = T)
EduS3.fit <- cfa(EduS3.model, data = VES, std.lv = T, std.ov = T)
round(cbind(JUSTG = fitMeasures(EduG.fit, FITM),
GPLUS = fitMeasures(EduGS.fit, FITM),
JUSTS = fitMeasures(EduS.fit, FITM),
GPLUS2 = fitMeasures(EduGS2.fit, FITM),
JUSTS2 = fitMeasures(EduS2.fit, FITM),
GPLUS3 = fitMeasures(EduGS3.fit, FITM),
JUSTS3 = fitMeasures(EduS3.fit, FITM)), 3)
## JUSTG GPLUS JUSTS GPLUS2 JUSTS2
## chisq 2456.636 2154.666 2154.666 2171.200 2162.574
## df 135.000 122.000 123.000 128.000 129.000
## npar 55.000 68.000 67.000 62.000 61.000
## cfi 0.952 0.958 0.958 0.958 0.958
## rmsea 0.063 0.062 0.062 0.061 0.060
## rmsea.ci.lower 0.061 0.060 0.060 0.059 0.058
## rmsea.ci.upper 0.065 0.064 0.064 0.063 0.063
## aic 187090.259 186814.289 186812.289 186818.823 186808.197
## bic 187440.639 187247.486 187239.115 187213.797 187196.801
## srmr 0.043 0.039 0.039 0.040 0.039
## GPLUS3 JUSTS3
## chisq 2174.241 2165.636
## df 130.000 130.000
## npar 60.000 60.000
## cfi 0.958 0.958
## rmsea 0.060 0.060
## rmsea.ci.lower 0.058 0.058
## rmsea.ci.upper 0.063 0.062
## aic 186817.863 186809.259
## bic 187200.096 187191.492
## srmr 0.040 0.040
#G + S models
pchisq(2171.2-2154.666, 6, lower.tail = F) #Gplus2 preferred
## [1] 0.01115742
pchisq(2174.241-2171.2, 2, lower.tail = F)
## [1] 0.2186026
#S models
pchisq(2162.574-2154.666, 6) #JustS preferred
## [1] 0.7550787
pchisq(2165.636-2154.666, 7)
## [1] 0.860067
round(cbind(JUSTG = fitMeasures(EduG.fit, FITM),
GPLUS2 = fitMeasures(EduGS2.fit, FITM),
JUSTS = fitMeasures(EduS.fit, FITM)), 3)
## JUSTG GPLUS2 JUSTS
## chisq 2456.636 2171.200 2154.666
## df 135.000 128.000 123.000
## npar 55.000 62.000 67.000
## cfi 0.952 0.958 0.958
## rmsea 0.063 0.061 0.062
## rmsea.ci.lower 0.061 0.059 0.060
## rmsea.ci.upper 0.065 0.063 0.064
## aic 187090.259 186818.823 186812.289
## bic 187440.639 187213.797 187239.115
## srmr 0.043 0.040 0.039
#G + S vs S
pchisq(2171.2-2154.666, 5, lower.tail = F)
## [1] 0.005473869
The g correlations in the EduG, EduGS2, and EduS models 0.901, 0.933, and 0.941, respectively. The relationships between early g and education were 0.574, 0.564, and 0.566.
Fourth finding: A model in which education affects g fits poorly, and a model in which education affects g and specific subtests does not fit in a theoretically coherent fashion. The reason for this is that the path from education to later g was not significant: it is not even theoretically admissible. It also fits significantly worse than a model in which only specific subtests are affected, and features worse AIC and indistinguishable CFI and RMSEA, although the BIC is better. Fifth finding: In terms of model fit, a model where education affects only specific subtests fits best. This replicates Ritchie, Bates & Deary (2015).
summary(EduGS2.fit, stand = T, fit = T) #For interest
## lavaan 0.6-9 ended normally after 63 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 62
##
## Used Total
## Number of observations 4318 4462
##
## Model Test User Model:
##
## Test statistic 2171.200
## Degrees of freedom 128
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48282.485
## Degrees of freedom 171
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.958
## Tucker-Lewis Index (TLI) 0.943
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -93347.412
## Loglikelihood unrestricted model (H1) -92261.811
##
## Akaike (AIC) 186818.823
## Bayesian (BIC) 187213.797
## Sample-size adjusted Bayesian (BIC) 187016.787
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.061
## 90 Percent confidence interval - lower 0.059
## 90 Percent confidence interval - upper 0.063
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.040
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.848 0.012 68.251 0.000 0.848 0.851
## ACAE 0.839 0.013 66.710 0.000 0.839 0.839
## AFQT 0.851 0.012 68.219 0.000 0.851 0.851
## GIT 0.733 0.013 54.626 0.000 0.733 0.733
## PA 0.656 0.014 47.287 0.000 0.656 0.660
## gl =~
## WRAT 0.220 0.007 32.546 0.000 0.657 0.664
## CVLT 0.144 0.007 21.092 0.000 0.430 0.430
## WCST 0.151 0.006 24.937 0.000 0.452 0.452
## WBD 0.214 0.007 30.808 0.000 0.641 0.647
## WGI 0.231 0.007 33.402 0.000 0.691 0.692
## CD 0.144 0.006 23.984 0.000 0.430 0.430
## CC 0.155 0.006 25.786 0.000 0.465 0.471
## PASAT 0.199 0.007 30.162 0.000 0.597 0.597
## GPTL 0.101 0.006 17.785 0.000 0.301 0.301
## GPTR 0.105 0.006 16.580 0.000 0.315 0.315
## WLGT 0.148 0.007 22.325 0.000 0.444 0.444
## ACVL 0.272 0.007 37.364 0.000 0.813 0.814
## ACAL 0.277 0.007 37.294 0.000 0.829 0.829
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.792 0.086 32.539 0.000 0.933 0.933
## education ~
## ge 0.564 0.015 38.635 0.000 0.564 0.564
## gl ~
## education 0.051 0.036 1.418 0.156 0.017 0.017
## WRAT ~
## education 0.128 0.013 9.860 0.000 0.128 0.130
## CVLT ~
## education -0.020 0.017 -1.204 0.229 -0.020 -0.020
## WBD ~
## education -0.046 0.014 -3.296 0.001 -0.046 -0.046
## WGI ~
## education 0.182 0.013 13.854 0.000 0.182 0.182
## GPTR ~
## education -0.030 0.014 -2.089 0.037 -0.030 -0.030
## WLGT ~
## education 0.105 0.016 6.364 0.000 0.105 0.105
## ACVL ~
## education 0.040 0.012 3.392 0.001 0.040 0.040
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.150 0.006 23.701 0.000 0.150 0.520
## .ACAE ~~
## .ACAL 0.128 0.006 20.105 0.000 0.128 0.423
## .CD ~~
## .CC 0.247 0.012 20.033 0.000 0.247 0.314
## .GPTL ~~
## .GPTR 0.544 0.016 33.345 0.000 0.544 0.598
## .ACVE ~~
## .WRAT 0.139 0.007 20.739 0.000 0.139 0.399
## .AFQT ~~
## .PA 0.160 0.008 20.615 0.000 0.160 0.409
## .WBD 0.120 0.007 16.195 0.000 0.120 0.296
## .PA ~~
## .WBD 0.238 0.010 23.376 0.000 0.238 0.411
## .CC 0.093 0.009 10.717 0.000 0.093 0.142
## .WRAT ~~
## .WGI 0.024 0.006 4.050 0.000 0.024 0.061
## .WLGT 0.103 0.008 12.681 0.000 0.103 0.180
## .ACVL 0.147 0.007 20.456 0.000 0.147 0.404
## .CVLT ~~
## .CC 0.122 0.011 10.715 0.000 0.122 0.155
## .WBD ~~
## .CC 0.137 0.010 14.099 0.000 0.137 0.203
## .WGI ~~
## .PASAT -0.106 0.008 -13.105 0.000 -0.106 -0.223
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.275 0.008 35.772 0.000 0.275 0.277
## .ACAE 0.296 0.008 36.704 0.000 0.296 0.296
## .AFQT 0.275 0.008 35.669 0.000 0.275 0.275
## .GIT 0.462 0.011 41.782 0.000 0.462 0.462
## .PA 0.557 0.013 43.011 0.000 0.557 0.564
## .WRAT 0.441 0.011 41.354 0.000 0.441 0.449
## .CVLT 0.824 0.018 45.484 0.000 0.824 0.824
## .WCST 0.795 0.018 45.386 0.000 0.795 0.796
## .WBD 0.601 0.014 43.556 0.000 0.601 0.612
## .WGI 0.351 0.009 39.765 0.000 0.351 0.352
## .CD 0.815 0.018 45.508 0.000 0.815 0.815
## .CC 0.757 0.017 45.739 0.000 0.757 0.778
## .PASAT 0.644 0.015 43.667 0.000 0.644 0.644
## .GPTL 0.909 0.020 46.047 0.000 0.909 0.909
## .GPTR 0.910 0.020 45.998 0.000 0.910 0.910
## .WLGT 0.741 0.016 45.304 0.000 0.741 0.741
## .ACVL 0.301 0.008 36.753 0.000 0.301 0.301
## .ACAL 0.312 0.008 36.800 0.000 0.312 0.312
## .education 0.682 0.015 44.170 0.000 0.682 0.682
## ge 1.000 1.000 1.000
## .gl 1.000 0.112 0.112
summary(EduS.fit, stand = T, fit = T)
## lavaan 0.6-9 ended normally after 64 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 67
##
## Used Total
## Number of observations 4318 4462
##
## Model Test User Model:
##
## Test statistic 2154.666
## Degrees of freedom 123
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48282.485
## Degrees of freedom 171
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.958
## Tucker-Lewis Index (TLI) 0.941
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -93339.144
## Loglikelihood unrestricted model (H1) -92261.811
##
## Akaike (AIC) 186812.289
## Bayesian (BIC) 187239.115
## Sample-size adjusted Bayesian (BIC) 187026.217
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.062
## 90 Percent confidence interval - lower 0.060
## 90 Percent confidence interval - upper 0.064
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.039
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.848 0.012 68.237 0.000 0.848 0.850
## ACAE 0.840 0.013 66.741 0.000 0.840 0.839
## AFQT 0.851 0.012 68.187 0.000 0.851 0.851
## GIT 0.733 0.013 54.597 0.000 0.733 0.733
## PA 0.656 0.014 47.267 0.000 0.656 0.660
## gl =~
## WRAT 0.220 0.007 32.557 0.000 0.651 0.657
## CVLT 0.145 0.007 21.187 0.000 0.431 0.431
## WCST 0.152 0.007 22.148 0.000 0.450 0.451
## WBD 0.216 0.007 30.842 0.000 0.639 0.645
## WGI 0.231 0.007 33.336 0.000 0.683 0.683
## CD 0.150 0.007 21.731 0.000 0.444 0.444
## CC 0.165 0.007 23.916 0.000 0.488 0.495
## PASAT 0.205 0.007 28.082 0.000 0.609 0.609
## GPTL 0.102 0.007 15.369 0.000 0.303 0.303
## GPTR 0.106 0.007 15.882 0.000 0.315 0.315
## WLGT 0.149 0.007 22.352 0.000 0.441 0.441
## ACVL 0.272 0.007 37.389 0.000 0.805 0.806
## ACAL 0.274 0.007 36.874 0.000 0.811 0.811
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.789 0.086 32.549 0.000 0.941 0.941
## education ~
## ge 0.566 0.015 38.773 0.000 0.566 0.566
## WRAT ~
## education 0.139 0.012 11.443 0.000 0.139 0.141
## CVLT ~
## education -0.021 0.017 -1.228 0.219 -0.021 -0.021
## WCST ~
## education 0.004 0.017 0.233 0.816 0.004 0.004
## WBD ~
## education -0.041 0.014 -3.042 0.002 -0.041 -0.042
## WGI ~
## education 0.196 0.013 15.718 0.000 0.196 0.196
## CD ~
## education -0.022 0.017 -1.303 0.193 -0.022 -0.022
## CC ~
## education -0.038 0.016 -2.320 0.020 -0.038 -0.038
## PASAT ~
## education -0.018 0.016 -1.168 0.243 -0.018 -0.018
## GPTL ~
## education -0.002 0.018 -0.109 0.913 -0.002 -0.002
## GPTR ~
## education -0.029 0.018 -1.636 0.102 -0.029 -0.029
## WLGT ~
## education 0.111 0.016 6.827 0.000 0.111 0.111
## ACVL ~
## education 0.053 0.010 5.086 0.000 0.053 0.053
## ACAL ~
## education 0.032 0.011 2.870 0.004 0.032 0.032
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.150 0.006 23.788 0.000 0.150 0.522
## .ACAE ~~
## .ACAL 0.129 0.006 20.172 0.000 0.129 0.423
## .CD ~~
## .CC 0.245 0.012 19.860 0.000 0.245 0.313
## .GPTL ~~
## .GPTR 0.543 0.016 33.316 0.000 0.543 0.598
## .ACVE ~~
## .WRAT 0.139 0.007 20.794 0.000 0.139 0.400
## .AFQT ~~
## .PA 0.161 0.008 20.683 0.000 0.161 0.410
## .WBD 0.120 0.007 16.219 0.000 0.120 0.296
## .PA ~~
## .WBD 0.238 0.010 23.379 0.000 0.238 0.411
## .CC 0.093 0.009 10.717 0.000 0.093 0.143
## .WRAT ~~
## .WGI 0.024 0.006 4.049 0.000 0.024 0.060
## .WLGT 0.103 0.008 12.672 0.000 0.103 0.180
## .ACVL 0.148 0.007 20.529 0.000 0.148 0.405
## .CVLT ~~
## .CC 0.121 0.011 10.619 0.000 0.121 0.154
## .WBD ~~
## .CC 0.136 0.010 13.995 0.000 0.136 0.202
## .WGI ~~
## .PASAT -0.107 0.008 -13.196 0.000 -0.107 -0.225
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.275 0.008 35.810 0.000 0.275 0.277
## .ACAE 0.296 0.008 36.719 0.000 0.296 0.296
## .AFQT 0.276 0.008 35.720 0.000 0.276 0.276
## .GIT 0.463 0.011 41.800 0.000 0.463 0.463
## .PA 0.558 0.013 43.029 0.000 0.558 0.564
## .WRAT 0.441 0.011 41.381 0.000 0.441 0.450
## .CVLT 0.823 0.018 45.461 0.000 0.823 0.823
## .WCST 0.795 0.018 45.335 0.000 0.795 0.795
## .WBD 0.600 0.014 43.522 0.000 0.600 0.611
## .WGI 0.352 0.009 39.781 0.000 0.352 0.352
## .CD 0.813 0.018 45.388 0.000 0.813 0.813
## .CC 0.754 0.017 45.532 0.000 0.754 0.774
## .PASAT 0.641 0.015 43.362 0.000 0.641 0.641
## .GPTL 0.909 0.020 46.018 0.000 0.909 0.909
## .GPTR 0.910 0.020 45.983 0.000 0.910 0.910
## .WLGT 0.741 0.016 45.300 0.000 0.741 0.741
## .ACVL 0.302 0.008 36.827 0.000 0.302 0.302
## .ACAL 0.313 0.008 36.970 0.000 0.313 0.313
## .education 0.680 0.015 44.105 0.000 0.680 0.680
## ge 1.000 1.000 1.000
## .gl 1.000 0.114 0.114
Sixth finding: The effects of education, thoguh specific, were modest-to-large. For the WRAT subtest, each year of education added 0.141 SDs, for WBD, it subtracted 0.042 (and this was likely not supposed to remain significant, but remained because \(\alpha\) = 0.05) SDs, for WGI, it added 0.196 SDs, for CC, it subtracted 0.022 SDs in a situation like WBD, while for WLGT, it added 0.111 SDs, and for ACVL, 0.053 SDs, or slightly more than the amount for ACAL, 0.032 SDs.
EDUVESLATS <- list(
ge = early_tests,
gl = later_tests,
education = c("WRAT", "WGI", "WLGT", "ACVL"))
semPaths(EduG.fit, "model", "std", title = F, residuals = F, groups = "EDUVESLATS", pastel = T, mar = c(4, 1, 3, 1), intercepts = F, layout = "tree", bifactor = c("gl", "ge"), exoCov = F, curvePivot = T)
## Warning in semPaths(EduG.fit, "model", "std", title = F, residuals = F, :
## 'bifactor' argument only supported in layouts 'tree2', 'tree3', 'circle2' and
## 'circle3'
semPaths(EduGS2.fit, "model", "std", title = F, residuals = F, groups = "EDUVESLATS", pastel = T, mar = c(4, 1, 3, 1), intercepts = F, layout = "tree", bifactor = c("gl", "ge"), exoCov = F, curvePivot = T)
## Warning in semPaths(EduGS2.fit, "model", "std", title = F, residuals = F, :
## 'bifactor' argument only supported in layouts 'tree2', 'tree3', 'circle2' and
## 'circle3'
semPaths(EduS.fit, "model", "std", title = F, residuals = F, groups = "EDUVESLATS", pastel = T, mar = c(4, 1, 3, 1), intercepts = F, layout = "tree", bifactor = c("gl", "ge"), exoCov = F, curvePivot = T)
## Warning in semPaths(EduS.fit, "model", "std", title = F, residuals = F, :
## 'bifactor' argument only supported in layouts 'tree2', 'tree3', 'circle2' and
## 'circle3'
To fully replicate Ritchie, Bates & Deary’s (2015) method, we should (1) compare the baseline models, and (2) the specific effects only model without dropped paths to the other finished models (since this was the main model, it is presented to show that we acknowledged this possibility rather than that we needed to make the robustness check), and we should (3) rerun all of the baseline and (4) finished models without residual covariances included.
#(1) - Consistent
round(cbind(JUSTG = fitMeasures(EduG.fit, FITM),
GPLUS = fitMeasures(EduGS.fit, FITM),
JUSTS = fitMeasures(EduS.fit, FITM)), 3)
## JUSTG GPLUS JUSTS
## chisq 2456.636 2154.666 2154.666
## df 135.000 122.000 123.000
## npar 55.000 68.000 67.000
## cfi 0.952 0.958 0.958
## rmsea 0.063 0.062 0.062
## rmsea.ci.lower 0.061 0.060 0.060
## rmsea.ci.upper 0.065 0.064 0.064
## aic 187090.259 186814.289 186812.289
## bic 187440.639 187247.486 187239.115
## srmr 0.043 0.039 0.039
#(2) - Consistent
round(cbind(JUSTG = fitMeasures(EduG.fit, FITM),
GPLUS = fitMeasures(EduGS2.fit, FITM),
JUSTS = fitMeasures(EduS.fit, FITM)), 3)
## JUSTG GPLUS JUSTS
## chisq 2456.636 2171.200 2154.666
## df 135.000 128.000 123.000
## npar 55.000 62.000 67.000
## cfi 0.952 0.958 0.958
## rmsea 0.063 0.061 0.062
## rmsea.ci.lower 0.061 0.059 0.060
## rmsea.ci.upper 0.065 0.063 0.064
## aic 187090.259 186818.823 186812.289
## bic 187440.639 187213.797 187239.115
## srmr 0.043 0.040 0.039
pchisq(2171.2-2154.666, 5, lower.tail = F)
## [1] 0.005473869
#(3) - Consistent
EduGCV.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
gl ~ education'
EduGSCV.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
gl + WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education'
EduSCV.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education'
EduGCV.fit <- cfa(EduGCV.model, data = VES, std.lv = T, std.ov = T)
EduGSCV.fit <- cfa(EduGSCV.model, data = VES, std.lv = T, std.ov = T)
## Warning in lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, : lavaan WARNING:
## Could not compute standard errors! The information matrix could
## not be inverted. This may be a symptom that the model is not
## identified.
EduSCV.fit <- cfa(EduSCV.model, data = VES, std.lv = T, std.ov = T)
round(cbind(JUSTG = fitMeasures(EduGCV.fit, FITM),
GPLUS = fitMeasures(EduGSCV.fit, FITM),
JUSTS = fitMeasures(EduSCV.fit, FITM)), 3)
## JUSTG GPLUS JUSTS
## chisq 9761.484 9377.637 9377.637
## df 150.000 137.000 138.000
## npar 40.000 53.000 52.000
## cfi 0.800 0.808 0.808
## rmsea 0.122 0.125 0.125
## rmsea.ci.lower 0.120 0.123 0.122
## rmsea.ci.upper 0.124 0.127 0.127
## aic 194365.107 194007.260 194005.260
## bic 194619.929 194344.899 194336.528
## srmr 0.072 0.069 0.069
#(4) - Consistent
EduGS2CV.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
gl + WRAT + CVLT + WBD + WGI + GPTR + WLGT + ACVL ~ education'
EduGS2CV.fit <- cfa(EduGS2CV.model, data = VES, std.lv = T, std.ov = T)
round(cbind(JUSTG = fitMeasures(EduGCV.fit, FITM),
GPLUS = fitMeasures(EduGS2CV.fit, FITM),
JUSTS = fitMeasures(EduSCV.fit, FITM)), 3)
## JUSTG GPLUS JUSTS
## chisq 9761.484 9404.353 9377.637
## df 150.000 143.000 138.000
## npar 40.000 47.000 52.000
## cfi 0.800 0.808 0.808
## rmsea 0.122 0.122 0.125
## rmsea.ci.lower 0.120 0.120 0.122
## rmsea.ci.upper 0.124 0.125 0.127
## aic 194365.107 194021.976 194005.260
## bic 194619.929 194321.391 194336.528
## srmr 0.072 0.070 0.069
pchisq(9404.353-9377.637, 5, lower.tail = F)
## [1] 6.478461e-05
To finish fitting all the models fit by Ritchie, Bates & Deary (2015), we had to fit a model in which the later subtests were adjusted for early g and then the same models were fit, but with the early g path to later g severed. Results are consistent.
GE.model<-'
EarlyG =~ ACVE + ACAE + AFQT + GIT + PA
AFQT ~~ PA'
GE.fit <- cfa(GE.model, data = VES, std.lv = T, std.ov = T, missing = "ML"); fitMeasures(GE.fit, FITM)
## Warning in lav_data_full(data = data, group = group, cluster = cluster, : lavaan WARNING: some cases are empty and will be ignored:
## 2040 2312 2830
## chisq df npar cfi rmsea
## 129.851 4.000 16.000 0.991 0.084
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.072 0.097 48923.424 49025.867 0.016
FacData <- lavPredict(GE.fit); VESTest <- cbind(VES, FacData)
VESAdj <- umx_residualize(c("WRAT", "CVLT", "WCST", "WBD", "WGI", "CD", "CC", "PASAT", "GPTL" ,"GPTR", "WLGT", "ACVL", "ACAL"), "EarlyG", data = VESTest)
## (Intercept) B = 61.162 [60.852, 61.471], t = 387.313, p < 0.001
## EarlyG B = 10.875 [10.548, 11.202], t = 65.192, p < 0.001
## R² = 0.488 (adj = 0.488)
## 3 cases of var 'WRAT'lost due to missing covariates
## (Intercept) B = 11.058 [10.994, 11.123], t = 337.686, p < 0.001
## EarlyG B = 0.862 [0.794, 0.93], t = 24.933, p < 0.001
## R² = 0.122 (adj = 0.122)
## 3 cases of var 'CVLT'lost due to missing covariates
## (Intercept) B = 0.789 [0.785, 0.794], t = 328.77, p < 0.001
## EarlyG B = 0.072 [0.067, 0.077], t = 28.267, p < 0.001
## R² = 0.152 (adj = 0.152)
## 3 cases of var 'WCST'lost due to missing covariates
## (Intercept) B = 10.519 [10.457, 10.582], t = 328.863, p < 0.001
## EarlyG B = 1.632 [1.566, 1.699], t = 48.334, p < 0.001
## R² = 0.344 (adj = 0.344)
## 3 cases of var 'WBD'lost due to missing covariates
## (Intercept) B = 10.073 [10.017, 10.129], t = 354.976, p < 0.001
## EarlyG B = 2.18 [2.121, 2.238], t = 72.75, p < 0.001
## R² = 0.543 (adj = 0.543)
## 3 cases of var 'WGI'lost due to missing covariates
## (Intercept) B = 32.733 [32.642, 32.823], t = 710.599, p < 0.001
## EarlyG B = 1.286 [1.19, 1.381], t = 26.44, p < 0.001
## R² = 0.136 (adj = 0.135)
## 3 cases of var 'CD'lost due to missing covariates
## (Intercept) B = 0 [-0.026, 0.026], t = -0.018, p = 0.985
## EarlyG B = 0.437 [0.409, 0.464], t = 31.158, p < 0.001
## R² = 0.179 (adj = 0.179)
## 3 cases of var 'CC'lost due to missing covariates
## (Intercept) B = 108.779 [107.485, 110.072], t = 164.833, p < 0.001
## EarlyG B = 26.668 [25.3, 28.036], t = 38.22, p < 0.001
## R² = 0.247 (adj = 0.247)
## 3 cases of var 'PASAT'lost due to missing covariates
## (Intercept) B = -77.358 [-77.747, -76.969], t = -389.903, p < 0.001
## EarlyG B = 3.723 [3.312, 4.134], t = 17.774, p < 0.001
## R² = 0.066 (adj = 0.066)
## 3 cases of var 'GPTL'lost due to missing covariates
## (Intercept) B = -73.661 [-73.998, -73.324], t = -428.91, p < 0.001
## EarlyG B = 3.026 [2.671, 3.382], t = 16.686, p < 0.001
## R² = 0.059 (adj = 0.059)
## 3 cases of var 'GPTR'lost due to missing covariates
## (Intercept) B = 35.117 [34.827, 35.407], t = 237.261, p < 0.001
## EarlyG B = 4.919 [4.613, 5.226], t = 31.48, p < 0.001
## R² = 0.182 (adj = 0.182)
## 3 cases of var 'WLGT'lost due to missing covariates
## (Intercept) B = 116.526 [116.116, 116.936], t = 557.674, p < 0.001
## EarlyG B = 19.345 [18.912, 19.777], t = 87.689, p < 0.001
## R² = 0.633 (adj = 0.633)
## 3 cases of var 'ACVL'lost due to missing covariates
## (Intercept) B = 104.573 [104.118, 105.029], t = 450.088, p < 0.001
## EarlyG B = 19.892 [19.411, 20.373], t = 81.094, p < 0.001
## R² = 0.596 (adj = 0.596)
## 3 cases of var 'ACAL'lost due to missing covariates
EduG.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
education ~ ge
gl ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduGS.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
education ~ ge
gl + WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduGS2.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
education ~ ge
gl + WRAT + WBD + WGI + WLGT ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduS.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduS2.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
education ~ ge
WRAT + CVLT + WCST + 0*WBD + WGI + CD + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT'
EduG.fit <- cfa(EduG.model, data = VESAdj, std.lv = T, std.ov = T)
EduGS.fit <- cfa(EduGS.model, data = VESAdj, std.lv = T, std.ov = T)
## Warning in lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, : lavaan WARNING:
## Could not compute standard errors! The information matrix could
## not be inverted. This may be a symptom that the model is not
## identified.
EduGS2.fit <- cfa(EduGS2.model, data = VESAdj, std.lv = T, std.ov = T)
EduS.fit <- cfa(EduS.model, data = VESAdj, std.lv = T, std.ov = T)
EduS2.fit <- cfa(EduS2.model, data = VESAdj, std.lv = T, std.ov = T) #Has to be modified to keep WBD on with a 0 loading; functionally equivalent to other method of dropping parameters.
round(cbind(JUSTG = fitMeasures(EduG.fit, FITM),
GPLUS = fitMeasures(EduGS.fit, FITM),
GPLUS2 = fitMeasures(EduGS2.fit, FITM),
JUSTS = fitMeasures(EduS.fit, FITM),
JUSTS2 = fitMeasures(EduS2.fit, FITM)), 3)
## JUSTG GPLUS GPLUS2 JUSTS JUSTS2
## chisq 2504.931 2220.165 2248.185 2115.386 2118.781
## df 136.000 123.000 132.000 123.000 124.000
## npar 54.000 67.000 58.000 67.000 66.000
## cfi 0.910 0.920 0.919 0.924 0.924
## rmsea 0.064 0.063 0.061 0.061 0.061
## rmsea.ci.lower 0.061 0.061 0.059 0.059 0.059
## rmsea.ci.upper 0.066 0.065 0.063 0.064 0.063
## aic 209011.922 208753.155 208763.175 208648.376 208649.772
## bic 209355.931 209179.982 209132.667 209075.203 209070.228
## srmr 0.064 0.062 0.061 0.057 0.057
pchisq(2248.185-2220.165, 9, lower.tail = F) #Gplus preferred
## [1] 0.0009465549
pchisq(2118.781-2115.386, 1, lower.tail = F) #JustS2 preferred
## [1] 0.06539436
pchisq(2220.165-2118.781, 1, lower.tail = F) #JustS2 vs GPlus
## [1] 7.577338e-24
pchisq(2248.185-2115.386, 9, lower.tail = F) #JustS vs GPlus2
## [1] 3.148777e-24
EduSVG.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT
income ~ gl'
EduSVE.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT
income ~ education'
EduSVGE.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT
income ~ education + gl'
EduSVE2.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT
income ~ education + WRAT + WBD + WGI + CC + WLGT + ACVL + ACAL'
EduSV1.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT
income ~ gl + WRAT + WBD + WGI + CC + WLGT + ACVL + ACAL'
EduSV2.model <- '
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
gl + education ~ ge
WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL ~ education
ACVE ~~ ACVL
ACAE ~~ ACAL
CD ~~ CC
GPTL ~~ GPTR
ACVE ~~ WRAT
AFQT ~~ PA + WBD
PA ~~ WBD + CC
WRAT ~~ WGI + WLGT + ACVL
CC ~~ CVLT + WBD
WGI ~~ PASAT
income ~ gl + WRAT + WBD + WGI + CC + WLGT + ACVL + ACAL + education'
EduSVG.fit <- cfa(EduSVG.model, data = VES, std.lv = T, std.ov = T)
EduSVE.fit <- cfa(EduSVE.model, data = VES, std.lv = T, std.ov = T)
EduSVGE.fit <- cfa(EduSVGE.model, data = VES, std.lv = T, std.ov = T)
EduSVE2.fit <- cfa(EduSVE2.model, data = VES, std.lv = T, std.ov = T) #Not valid
EduSV1.fit <- cfa(EduSV1.model, data = VES, std.lv = T, std.ov = T)
EduSV2.fit <- cfa(EduSV2.model, data = VES, std.lv = T, std.ov = T)
summary(EduSV1.fit, stand = T, fit = T)
## lavaan 0.6-9 ended normally after 70 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 76
##
## Used Total
## Number of observations 4237 4462
##
## Model Test User Model:
##
## Test statistic 2278.609
## Degrees of freedom 134
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48411.141
## Degrees of freedom 190
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.956
## Tucker-Lewis Index (TLI) 0.937
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -97164.584
## Loglikelihood unrestricted model (H1) -96025.280
##
## Akaike (AIC) 194481.169
## Bayesian (BIC) 194963.891
## Sample-size adjusted Bayesian (BIC) 194722.395
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.061
## 90 Percent confidence interval - lower 0.059
## 90 Percent confidence interval - upper 0.064
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.038
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.851 0.013 68.040 0.000 0.851 0.854
## ACAE 0.839 0.013 66.059 0.000 0.839 0.838
## AFQT 0.848 0.013 67.161 0.000 0.848 0.847
## GIT 0.732 0.014 54.042 0.000 0.732 0.732
## PA 0.656 0.014 46.819 0.000 0.656 0.659
## gl =~
## WRAT 0.215 0.007 31.901 0.000 0.658 0.664
## CVLT 0.144 0.007 21.049 0.000 0.439 0.439
## WCST 0.152 0.007 22.170 0.000 0.463 0.463
## WBD 0.213 0.007 30.316 0.000 0.649 0.655
## WGI 0.227 0.007 32.590 0.000 0.692 0.693
## CD 0.148 0.007 21.653 0.000 0.453 0.453
## CC 0.164 0.007 23.815 0.000 0.502 0.508
## PASAT 0.202 0.007 27.724 0.000 0.617 0.617
## GPTL 0.102 0.007 15.541 0.000 0.313 0.313
## GPTR 0.107 0.007 16.134 0.000 0.327 0.327
## WLGT 0.148 0.007 22.286 0.000 0.453 0.453
## ACVL 0.267 0.007 36.442 0.000 0.816 0.816
## ACAL 0.269 0.007 35.936 0.000 0.822 0.823
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.887 0.090 31.962 0.000 0.945 0.945
## education ~
## ge 0.577 0.015 39.423 0.000 0.577 0.577
## WRAT ~
## education 0.127 0.012 10.276 0.000 0.127 0.128
## CVLT ~
## education -0.035 0.017 -2.016 0.044 -0.035 -0.035
## WCST ~
## education -0.015 0.017 -0.904 0.366 -0.015 -0.015
## WBD ~
## education -0.057 0.014 -4.146 0.000 -0.057 -0.058
## WGI ~
## education 0.176 0.013 13.871 0.000 0.176 0.176
## CD ~
## education -0.037 0.017 -2.138 0.032 -0.037 -0.037
## CC ~
## education -0.058 0.017 -3.505 0.000 -0.058 -0.059
## PASAT ~
## education -0.036 0.016 -2.279 0.023 -0.036 -0.036
## GPTL ~
## education -0.018 0.018 -0.991 0.322 -0.018 -0.018
## GPTR ~
## education -0.045 0.018 -2.513 0.012 -0.045 -0.045
## WLGT ~
## education 0.093 0.017 5.607 0.000 0.093 0.093
## ACVL ~
## education 0.033 0.010 3.135 0.002 0.033 0.033
## ACAL ~
## education 0.010 0.011 0.932 0.351 0.010 0.010
## income ~
## gl 0.223 0.025 9.073 0.000 0.681 0.681
## WRAT -0.080 0.024 -3.320 0.001 -0.080 -0.079
## WBD -0.105 0.020 -5.329 0.000 -0.105 -0.104
## WGI -0.077 0.027 -2.837 0.005 -0.077 -0.077
## CC -0.033 0.017 -1.984 0.047 -0.033 -0.033
## WLGT -0.025 0.017 -1.488 0.137 -0.025 -0.025
## ACVL -0.080 0.033 -2.399 0.016 -0.080 -0.080
## ACAL 0.053 0.032 1.694 0.090 0.053 0.053
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.147 0.006 23.349 0.000 0.147 0.516
## .ACAE ~~
## .ACAL 0.128 0.006 19.938 0.000 0.128 0.419
## .CD ~~
## .CC 0.246 0.012 19.785 0.000 0.246 0.315
## .GPTL ~~
## .GPTR 0.541 0.016 32.934 0.000 0.541 0.596
## .ACVE ~~
## .WRAT 0.137 0.007 20.448 0.000 0.137 0.397
## .AFQT ~~
## .PA 0.165 0.008 20.967 0.000 0.165 0.415
## .WBD 0.123 0.008 16.286 0.000 0.123 0.298
## .PA ~~
## .WBD 0.239 0.010 23.256 0.000 0.239 0.412
## .CC 0.092 0.009 10.618 0.000 0.092 0.142
## .WRAT ~~
## .WGI 0.025 0.006 4.245 0.000 0.025 0.064
## .WLGT 0.104 0.008 12.655 0.000 0.104 0.181
## .ACVL 0.148 0.007 20.420 0.000 0.148 0.406
## .CVLT ~~
## .CC 0.120 0.011 10.415 0.000 0.120 0.152
## .WBD ~~
## .CC 0.136 0.010 13.957 0.000 0.136 0.203
## .WGI ~~
## .PASAT -0.106 0.008 -12.906 0.000 -0.106 -0.221
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.270 0.008 35.433 0.000 0.270 0.271
## .ACAE 0.298 0.008 36.699 0.000 0.298 0.297
## .AFQT 0.282 0.008 35.994 0.000 0.282 0.282
## .GIT 0.464 0.011 41.568 0.000 0.464 0.464
## .PA 0.559 0.013 42.782 0.000 0.559 0.565
## .WRAT 0.441 0.011 41.060 0.000 0.441 0.450
## .CVLT 0.823 0.018 45.073 0.000 0.823 0.823
## .WCST 0.793 0.018 44.929 0.000 0.793 0.793
## .WBD 0.598 0.014 43.101 0.000 0.598 0.609
## .WGI 0.355 0.009 39.504 0.000 0.355 0.356
## .CD 0.811 0.018 44.988 0.000 0.811 0.811
## .CC 0.751 0.017 45.064 0.000 0.751 0.771
## .PASAT 0.643 0.015 43.117 0.000 0.643 0.643
## .GPTL 0.908 0.020 45.589 0.000 0.908 0.908
## .GPTR 0.907 0.020 45.549 0.000 0.907 0.907
## .WLGT 0.739 0.016 44.838 0.000 0.739 0.740
## .ACVL 0.303 0.008 36.485 0.000 0.303 0.303
## .ACAL 0.313 0.009 36.598 0.000 0.313 0.313
## .education 0.667 0.015 43.667 0.000 0.667 0.667
## .income 0.782 0.020 40.058 0.000 0.782 0.781
## ge 1.000 1.000 1.000
## .gl 1.000 0.107 0.107
summary(EduSV2.fit, stand = T, fit = T)
## lavaan 0.6-9 ended normally after 72 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 77
##
## Used Total
## Number of observations 4237 4462
##
## Model Test User Model:
##
## Test statistic 2150.833
## Degrees of freedom 133
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48411.141
## Degrees of freedom 190
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.958
## Tucker-Lewis Index (TLI) 0.940
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -97100.696
## Loglikelihood unrestricted model (H1) -96025.280
##
## Akaike (AIC) 194355.392
## Bayesian (BIC) 194844.466
## Sample-size adjusted Bayesian (BIC) 194599.792
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.060
## 90 Percent confidence interval - lower 0.058
## 90 Percent confidence interval - upper 0.062
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.037
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.850 0.013 67.991 0.000 0.850 0.853
## ACAE 0.839 0.013 66.070 0.000 0.839 0.839
## AFQT 0.850 0.013 67.408 0.000 0.850 0.850
## GIT 0.734 0.014 54.181 0.000 0.734 0.734
## PA 0.656 0.014 46.857 0.000 0.656 0.660
## gl =~
## WRAT 0.220 0.007 32.468 0.000 0.651 0.658
## CVLT 0.146 0.007 21.087 0.000 0.431 0.431
## WCST 0.153 0.007 22.196 0.000 0.454 0.454
## WBD 0.216 0.007 30.726 0.000 0.640 0.645
## WGI 0.231 0.007 33.225 0.000 0.683 0.683
## CD 0.150 0.007 21.658 0.000 0.444 0.444
## CC 0.166 0.007 23.902 0.000 0.492 0.498
## PASAT 0.204 0.007 27.969 0.000 0.605 0.605
## GPTL 0.103 0.007 15.438 0.000 0.306 0.306
## GPTR 0.108 0.007 16.035 0.000 0.320 0.320
## WLGT 0.151 0.007 22.402 0.000 0.446 0.446
## ACVL 0.272 0.007 37.311 0.000 0.805 0.806
## ACAL 0.274 0.007 36.767 0.000 0.810 0.811
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.787 0.086 32.487 0.000 0.941 0.941
## education ~
## ge 0.566 0.015 38.405 0.000 0.566 0.566
## WRAT ~
## education 0.141 0.012 11.458 0.000 0.141 0.142
## CVLT ~
## education -0.021 0.017 -1.194 0.232 -0.021 -0.021
## WCST ~
## education 0.000 0.017 0.016 0.987 0.000 0.000
## WBD ~
## education -0.041 0.014 -2.976 0.003 -0.041 -0.041
## WGI ~
## education 0.197 0.013 15.611 0.000 0.197 0.197
## CD ~
## education -0.021 0.017 -1.247 0.212 -0.021 -0.021
## CC ~
## education -0.041 0.016 -2.515 0.012 -0.041 -0.042
## PASAT ~
## education -0.016 0.016 -0.996 0.319 -0.016 -0.016
## GPTL ~
## education -0.007 0.018 -0.388 0.698 -0.007 -0.007
## GPTR ~
## education -0.034 0.018 -1.896 0.058 -0.034 -0.034
## WLGT ~
## education 0.107 0.016 6.531 0.000 0.107 0.107
## ACVL ~
## education 0.052 0.010 4.970 0.000 0.052 0.052
## ACAL ~
## education 0.032 0.011 2.862 0.004 0.032 0.032
## income ~
## gl 0.204 0.025 8.311 0.000 0.605 0.605
## WRAT -0.110 0.024 -4.646 0.000 -0.110 -0.109
## WBD -0.087 0.019 -4.477 0.000 -0.087 -0.086
## WGI -0.124 0.027 -4.570 0.000 -0.124 -0.124
## CC -0.029 0.017 -1.718 0.086 -0.029 -0.028
## WLGT -0.032 0.017 -1.907 0.057 -0.032 -0.032
## ACVL -0.067 0.033 -2.051 0.040 -0.067 -0.067
## ACAL 0.050 0.031 1.627 0.104 0.050 0.050
## education 0.203 0.018 11.473 0.000 0.203 0.203
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.148 0.006 23.380 0.000 0.148 0.517
## .ACAE ~~
## .ACAL 0.128 0.006 20.002 0.000 0.128 0.421
## .CD ~~
## .CC 0.246 0.012 19.816 0.000 0.246 0.315
## .GPTL ~~
## .GPTR 0.541 0.016 32.946 0.000 0.541 0.596
## .ACVE ~~
## .WRAT 0.137 0.007 20.416 0.000 0.137 0.397
## .AFQT ~~
## .PA 0.163 0.008 20.740 0.000 0.163 0.413
## .WBD 0.123 0.008 16.314 0.000 0.123 0.300
## .PA ~~
## .WBD 0.239 0.010 23.271 0.000 0.239 0.413
## .CC 0.092 0.009 10.627 0.000 0.092 0.143
## .WRAT ~~
## .WGI 0.024 0.006 4.012 0.000 0.024 0.060
## .WLGT 0.103 0.008 12.567 0.000 0.103 0.180
## .ACVL 0.147 0.007 20.253 0.000 0.147 0.404
## .CVLT ~~
## .CC 0.120 0.011 10.424 0.000 0.120 0.152
## .WBD ~~
## .CC 0.137 0.010 13.980 0.000 0.137 0.203
## .WGI ~~
## .PASAT -0.106 0.008 -13.047 0.000 -0.106 -0.224
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.270 0.008 35.335 0.000 0.270 0.272
## .ACAE 0.297 0.008 36.576 0.000 0.297 0.297
## .AFQT 0.279 0.008 35.677 0.000 0.279 0.278
## .GIT 0.462 0.011 41.468 0.000 0.462 0.462
## .PA 0.558 0.013 42.702 0.000 0.558 0.564
## .WRAT 0.439 0.011 40.959 0.000 0.439 0.448
## .CVLT 0.823 0.018 45.074 0.000 0.823 0.823
## .WCST 0.793 0.018 44.935 0.000 0.793 0.794
## .WBD 0.600 0.014 43.120 0.000 0.600 0.610
## .WGI 0.352 0.009 39.441 0.000 0.352 0.352
## .CD 0.812 0.018 44.997 0.000 0.812 0.812
## .CC 0.752 0.017 45.086 0.000 0.752 0.772
## .PASAT 0.643 0.015 43.122 0.000 0.643 0.643
## .GPTL 0.908 0.020 45.593 0.000 0.908 0.908
## .GPTR 0.908 0.020 45.553 0.000 0.908 0.908
## .WLGT 0.739 0.016 44.842 0.000 0.739 0.739
## .ACVL 0.302 0.008 36.452 0.000 0.302 0.302
## .ACAL 0.313 0.009 36.644 0.000 0.313 0.314
## .education 0.680 0.016 43.699 0.000 0.680 0.680
## .income 0.767 0.019 40.907 0.000 0.767 0.766
## ge 1.000 1.000 1.000
## .gl 1.000 0.114 0.114
Seventh finding: The validity of g for predicting subsequent income is largely independent of education. Eight finding: The validity of education for predicting subsequent income exists net of its effects on subtests. Ninth finding: The validity of specific subtests for predicting subsequent income is minor, but what amount there is, is largely independent of education effects.
Education does not make people more intelligent, and education does not explain why g predicts socioeconomic status, at least as measured by income. Additionally, educational effects on specific subtests do not explain their validity. To the extent education affects subsequent socioeconomic status, it is not through enhancing cognitive ability or through specific skills of the sorts measured in our dataset. Educational effects on intelligence appear to be “hollow”.
Measurement invariance was tested with high school graduation as the cutoff for group membership, and it was observed that strict invariance was untenable, while scalar and metric were. Partial strict invariance could not be attained. This finding is curious, as it suggests that, though educational effects on means were constrained to certain subtests, the effects on variances were considerably broader. That is, education affected the subtest residual variances, meaning that the influences on the subtests were different and greater for the college-educated group than the only high school-educated group. Though income prediction with intelligence was equal for college and non-college educated men, prediction was less accurate for the college-educated if individual subtests were used (though not g, since it was modeled as a latent variable and that latent variance was equal). In other words, the effect of college on cognitive tests was heterogeneous - some people gained from it, while others may have lost, and for others, nothing may have occurred. But, in general, education cannot be expected to increase cognitive ability.
Early g was also found to affect higher nerve conduction velocity in adulthood, as was education, albeit to with an effect that was 75.53% as large. Given the small relationship between NCV and g, it is even more curious that there was one with education, but this dataset is sufficiently powerful to showcase both. Potential mechanisms may need to be proposed theoretically. For example, that finding is consistent with a formative rather than a model of “biological g”. Additionally, it may be that more educated people, through the socioeconomic status-cultivating effects (that are, again, net of g or our affected tests) of education attain better health because they can, leading to improved nerve conduction velocity down the line. Further investigation is warranted.
Ritchie, S. J., Bates, T. C., & Deary, I. J. (2015). Is Education Associated With Improvements in General Cognitive Ability, or in Specific Skills? Developmental Psychology, 51(5), 573–582. https://doi.org/10.1037/a0038981
sessionInfo()
## R version 4.1.2 (2021-11-01)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 19042)
##
## Matrix products: default
##
## locale:
## [1] LC_COLLATE=English_United States.1252
## [2] LC_CTYPE=English_United States.1252
## [3] LC_MONETARY=English_United States.1252
## [4] LC_NUMERIC=C
## [5] LC_TIME=English_United States.1252
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] umx_4.9.0 OpenMx_2.19.8 semPlot_1.1.5
## [4] knitr_1.36 lavaan_0.6-9 kirkegaard_2022-04-12
## [7] rlang_0.4.12 metafor_3.0-2 Matrix_1.3-4
## [10] psych_2.1.9 magrittr_2.0.1 assertthat_0.2.1
## [13] weights_1.0.4 Hmisc_4.7-0 Formula_1.2-4
## [16] survival_3.2-13 lattice_0.20-45 forcats_0.5.1
## [19] stringr_1.4.0 dplyr_1.0.7 purrr_0.3.4
## [22] readr_2.0.2 tidyr_1.1.4 tibble_3.1.5
## [25] ggplot2_3.3.5 tidyverse_1.3.1 pacman_0.5.1
##
## loaded via a namespace (and not attached):
## [1] readxl_1.4.0 backports_1.3.0 systemfonts_1.0.3
## [4] plyr_1.8.6 igraph_1.2.7 splines_4.1.2
## [7] polycor_0.7-10 digest_0.6.28 htmltools_0.5.2
## [10] gdata_2.18.0 fansi_0.5.0 checkmate_2.0.0
## [13] lisrelToR_0.1.4 cluster_2.1.2 tzdb_0.2.0
## [16] openxlsx_4.2.5 modelr_0.1.8 RcppParallel_5.1.4
## [19] svglite_2.0.0 jpeg_0.1-9 sem_3.1-15
## [22] colorspace_2.0-2 rvest_1.0.2 haven_2.4.3
## [25] xfun_0.27 crayon_1.4.2 jsonlite_1.7.2
## [28] lme4_1.1-27.1 glue_1.4.2 kableExtra_1.3.4
## [31] gtable_0.3.0 webshot_0.5.2 mi_1.0
## [34] V8_3.4.2 abind_1.4-5 scales_1.1.1
## [37] DBI_1.1.2 Rcpp_1.0.7 viridisLite_0.4.0
## [40] xtable_1.8-4 htmlTable_2.4.0 tmvnsim_1.0-2
## [43] foreign_0.8-81 rsvg_2.1.2 stats4_4.1.2
## [46] htmlwidgets_1.5.4 httr_1.4.2 DiagrammeR_1.0.6.1
## [49] RColorBrewer_1.1-2 ellipsis_0.3.2 mice_3.14.0
## [52] pkgconfig_2.0.3 XML_3.99-0.9 nnet_7.3-16
## [55] sass_0.4.0 kutils_1.70 dbplyr_2.1.1
## [58] utf8_1.2.2 tidyselect_1.1.1 reshape2_1.4.4
## [61] munsell_0.5.0 cellranger_1.1.0 tools_4.1.2
## [64] visNetwork_2.1.0 cli_3.1.0 generics_0.1.1
## [67] broom_0.7.10 mathjaxr_1.4-0 fdrtool_1.2.17
## [70] evaluate_0.14 fastmap_1.1.0 arm_1.12-2
## [73] yaml_2.2.1 fs_1.5.0 zip_2.2.0
## [76] glasso_1.11 pbapply_1.5-0 nlme_3.1-153
## [79] xml2_1.3.2 compiler_4.1.2 rstudioapi_0.13
## [82] curl_4.3.2 png_0.1-7 reprex_2.0.1
## [85] DiagrammeRsvg_0.1 bslib_0.3.1 pbivnorm_0.6.0
## [88] stringi_1.7.5 highr_0.9 qgraph_1.9.2
## [91] rockchalk_1.8.151 nloptr_1.2.2.3 vctrs_0.3.8
## [94] pillar_1.6.4 lifecycle_1.0.1 jquerylib_0.1.4
## [97] data.table_1.14.2 cowplot_1.1.1 corpcor_1.6.10
## [100] R6_2.5.1 latticeExtra_0.6-29 MuMIn_1.43.17
## [103] gridExtra_2.3 boot_1.3-28 MASS_7.3-54
## [106] gtools_3.9.2 withr_2.4.2 mnormt_2.0.2
## [109] parallel_4.1.2 hms_1.1.1 grid_4.1.2
## [112] rpart_4.1-15 coda_0.19-4 minqa_1.2.4
## [115] rmarkdown_2.11 carData_3.0-4 lubridate_1.8.0
## [118] base64enc_0.1-3
summary(EduSVG.fit, stand = T, fit = T)
## lavaan 0.6-9 ended normally after 62 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 69
##
## Used Total
## Number of observations 4237 4462
##
## Model Test User Model:
##
## Test statistic 2347.921
## Degrees of freedom 141
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48411.141
## Degrees of freedom 190
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.954
## Tucker-Lewis Index (TLI) 0.938
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -97199.240
## Loglikelihood unrestricted model (H1) -96025.280
##
## Akaike (AIC) 194536.480
## Bayesian (BIC) 194974.741
## Sample-size adjusted Bayesian (BIC) 194755.488
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.061
## 90 Percent confidence interval - lower 0.059
## 90 Percent confidence interval - upper 0.063
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.039
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.849 0.013 67.817 0.000 0.849 0.852
## ACAE 0.841 0.013 66.225 0.000 0.841 0.840
## AFQT 0.849 0.013 67.332 0.000 0.849 0.849
## GIT 0.732 0.014 54.053 0.000 0.732 0.732
## PA 0.656 0.014 46.862 0.000 0.656 0.660
## gl =~
## WRAT 0.216 0.007 31.959 0.000 0.652 0.658
## CVLT 0.144 0.007 21.011 0.000 0.436 0.436
## WCST 0.152 0.007 22.120 0.000 0.460 0.460
## WBD 0.212 0.007 30.312 0.000 0.642 0.648
## WGI 0.227 0.007 32.642 0.000 0.685 0.686
## CD 0.149 0.007 21.603 0.000 0.450 0.450
## CC 0.164 0.007 23.774 0.000 0.496 0.503
## PASAT 0.204 0.007 27.754 0.000 0.615 0.615
## GPTL 0.102 0.007 15.375 0.000 0.308 0.308
## GPTR 0.107 0.007 15.969 0.000 0.322 0.322
## WLGT 0.148 0.007 22.214 0.000 0.448 0.448
## ACVL 0.268 0.007 36.601 0.000 0.810 0.811
## ACAL 0.272 0.008 36.186 0.000 0.822 0.822
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.852 0.089 31.987 0.000 0.944 0.944
## education ~
## ge 0.572 0.015 38.971 0.000 0.572 0.572
## WRAT ~
## education 0.133 0.012 10.839 0.000 0.133 0.135
## CVLT ~
## education -0.029 0.017 -1.695 0.090 -0.029 -0.029
## WCST ~
## education -0.009 0.017 -0.549 0.583 -0.009 -0.009
## WBD ~
## education -0.048 0.014 -3.520 0.000 -0.048 -0.049
## WGI ~
## education 0.186 0.013 14.674 0.000 0.186 0.186
## CD ~
## education -0.031 0.017 -1.789 0.074 -0.031 -0.031
## CC ~
## education -0.050 0.016 -3.050 0.002 -0.050 -0.051
## PASAT ~
## education -0.030 0.016 -1.874 0.061 -0.030 -0.030
## GPTL ~
## education -0.012 0.018 -0.688 0.491 -0.012 -0.012
## GPTR ~
## education -0.040 0.018 -2.205 0.027 -0.040 -0.040
## WLGT ~
## education 0.100 0.016 6.061 0.000 0.100 0.100
## ACVL ~
## education 0.041 0.010 3.863 0.000 0.041 0.041
## ACAL ~
## education 0.017 0.011 1.549 0.121 0.017 0.017
## income ~
## gl 0.141 0.006 23.114 0.000 0.425 0.425
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.150 0.006 23.648 0.000 0.150 0.521
## .ACAE ~~
## .ACAL 0.125 0.006 19.623 0.000 0.125 0.414
## .CD ~~
## .CC 0.246 0.012 19.806 0.000 0.246 0.315
## .GPTL ~~
## .GPTR 0.542 0.016 32.948 0.000 0.542 0.596
## .ACVE ~~
## .WRAT 0.139 0.007 20.710 0.000 0.139 0.400
## .AFQT ~~
## .PA 0.163 0.008 20.768 0.000 0.163 0.413
## .WBD 0.123 0.008 16.361 0.000 0.123 0.300
## .PA ~~
## .WBD 0.239 0.010 23.274 0.000 0.239 0.413
## .CC 0.092 0.009 10.631 0.000 0.092 0.143
## .WRAT ~~
## .WGI 0.026 0.006 4.373 0.000 0.026 0.065
## .WLGT 0.104 0.008 12.709 0.000 0.104 0.181
## .ACVL 0.151 0.007 20.751 0.000 0.151 0.410
## .CVLT ~~
## .CC 0.120 0.011 10.433 0.000 0.120 0.152
## .WBD ~~
## .CC 0.137 0.010 14.048 0.000 0.137 0.204
## .WGI ~~
## .PASAT -0.106 0.008 -12.996 0.000 -0.106 -0.223
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.273 0.008 35.512 0.000 0.273 0.274
## .ACAE 0.295 0.008 36.468 0.000 0.295 0.295
## .AFQT 0.280 0.008 35.739 0.000 0.280 0.279
## .GIT 0.463 0.011 41.497 0.000 0.463 0.464
## .PA 0.558 0.013 42.711 0.000 0.558 0.564
## .WRAT 0.443 0.011 41.312 0.000 0.443 0.452
## .CVLT 0.823 0.018 45.046 0.000 0.823 0.823
## .WCST 0.793 0.018 44.899 0.000 0.793 0.793
## .WBD 0.601 0.014 43.224 0.000 0.601 0.612
## .WGI 0.357 0.009 39.728 0.000 0.357 0.357
## .CD 0.811 0.018 44.966 0.000 0.811 0.811
## .CC 0.752 0.017 45.116 0.000 0.752 0.772
## .PASAT 0.640 0.015 43.020 0.000 0.640 0.640
## .GPTL 0.909 0.020 45.584 0.000 0.909 0.909
## .GPTR 0.908 0.020 45.543 0.000 0.908 0.908
## .WLGT 0.741 0.017 44.881 0.000 0.741 0.741
## .ACVL 0.305 0.008 36.810 0.000 0.305 0.305
## .ACAL 0.308 0.008 36.565 0.000 0.308 0.308
## .education 0.673 0.015 43.664 0.000 0.673 0.673
## .income 0.819 0.018 45.130 0.000 0.819 0.820
## ge 1.000 1.000 1.000
## .gl 1.000 0.109 0.109
summary(EduSVE.fit, stand = T, fit = T)
## lavaan 0.6-9 ended normally after 67 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 70
##
## Used Total
## Number of observations 4237 4462
##
## Model Test User Model:
##
## Test statistic 2534.299
## Degrees of freedom 140
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48411.141
## Degrees of freedom 190
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.950
## Tucker-Lewis Index (TLI) 0.933
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -97292.429
## Loglikelihood unrestricted model (H1) -96025.280
##
## Akaike (AIC) 194724.859
## Bayesian (BIC) 195169.472
## Sample-size adjusted Bayesian (BIC) 194947.041
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.064
## 90 Percent confidence interval - lower 0.061
## 90 Percent confidence interval - upper 0.066
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.053
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.848 0.013 67.719 0.000 0.848 0.851
## ACAE 0.840 0.013 66.076 0.000 0.840 0.839
## AFQT 0.852 0.013 67.592 0.000 0.852 0.851
## GIT 0.733 0.014 54.098 0.000 0.733 0.733
## PA 0.658 0.014 46.988 0.000 0.658 0.662
## gl =~
## WRAT 0.222 0.007 32.525 0.000 0.640 0.649
## CVLT 0.148 0.007 20.947 0.000 0.426 0.427
## WCST 0.155 0.007 22.030 0.000 0.448 0.449
## WBD 0.218 0.007 30.756 0.000 0.630 0.638
## WGI 0.233 0.007 33.335 0.000 0.672 0.675
## CD 0.152 0.007 21.532 0.000 0.439 0.440
## CC 0.168 0.007 23.820 0.000 0.485 0.493
## PASAT 0.208 0.007 27.914 0.000 0.600 0.602
## GPTL 0.104 0.007 15.125 0.000 0.299 0.300
## GPTR 0.108 0.007 15.713 0.000 0.313 0.313
## WLGT 0.152 0.007 22.250 0.000 0.439 0.440
## ACVL 0.275 0.007 37.661 0.000 0.793 0.799
## ACAL 0.277 0.007 37.153 0.000 0.801 0.806
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.709 0.083 32.728 0.000 0.938 0.938
## education ~
## ge 0.565 0.015 38.371 0.000 0.565 0.565
## WRAT ~
## education 0.147 0.012 11.986 0.000 0.147 0.149
## CVLT ~
## education -0.017 0.017 -0.991 0.322 -0.017 -0.017
## WCST ~
## education 0.004 0.017 0.258 0.796 0.004 0.004
## WBD ~
## education -0.034 0.014 -2.458 0.014 -0.034 -0.034
## WGI ~
## education 0.204 0.013 16.186 0.000 0.204 0.205
## CD ~
## education -0.018 0.017 -1.039 0.299 -0.018 -0.018
## CC ~
## education -0.037 0.016 -2.239 0.025 -0.037 -0.037
## PASAT ~
## education -0.011 0.016 -0.724 0.469 -0.011 -0.011
## GPTL ~
## education -0.003 0.018 -0.146 0.884 -0.003 -0.003
## GPTR ~
## education -0.029 0.018 -1.642 0.101 -0.029 -0.029
## WLGT ~
## education 0.112 0.016 6.824 0.000 0.112 0.112
## ACVL ~
## education 0.059 0.010 5.639 0.000 0.059 0.060
## ACAL ~
## education 0.039 0.011 3.464 0.001 0.039 0.039
## income ~
## education 0.347 0.014 24.072 0.000 0.347 0.347
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.150 0.006 23.535 0.000 0.150 0.521
## .ACAE ~~
## .ACAL 0.128 0.006 19.910 0.000 0.128 0.421
## .CD ~~
## .CC 0.246 0.012 19.795 0.000 0.246 0.315
## .GPTL ~~
## .GPTR 0.543 0.016 32.962 0.000 0.543 0.597
## .ACVE ~~
## .WRAT 0.138 0.007 20.473 0.000 0.138 0.397
## .AFQT ~~
## .PA 0.160 0.008 20.391 0.000 0.160 0.408
## .WBD 0.121 0.007 16.168 0.000 0.121 0.298
## .PA ~~
## .WBD 0.238 0.010 23.162 0.000 0.238 0.411
## .CC 0.092 0.009 10.621 0.000 0.092 0.143
## .WRAT ~~
## .WGI 0.025 0.006 4.143 0.000 0.025 0.062
## .WLGT 0.103 0.008 12.612 0.000 0.103 0.181
## .ACVL 0.148 0.007 20.434 0.000 0.148 0.406
## .CVLT ~~
## .CC 0.120 0.011 10.413 0.000 0.120 0.152
## .WBD ~~
## .CC 0.137 0.010 14.020 0.000 0.137 0.204
## .WGI ~~
## .PASAT -0.107 0.008 -13.047 0.000 -0.107 -0.224
## .gl ~~
## .income 0.122 0.021 5.887 0.000 0.122 0.131
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.273 0.008 35.328 0.000 0.273 0.275
## .ACAE 0.297 0.008 36.344 0.000 0.297 0.296
## .AFQT 0.275 0.008 35.283 0.000 0.275 0.275
## .GIT 0.462 0.011 41.375 0.000 0.462 0.462
## .PA 0.555 0.013 42.563 0.000 0.555 0.562
## .WRAT 0.441 0.011 41.071 0.000 0.441 0.454
## .CVLT 0.823 0.018 45.031 0.000 0.823 0.825
## .WCST 0.793 0.018 44.890 0.000 0.793 0.796
## .WBD 0.600 0.014 43.140 0.000 0.600 0.615
## .WGI 0.354 0.009 39.515 0.000 0.354 0.356
## .CD 0.812 0.018 44.953 0.000 0.812 0.814
## .CC 0.752 0.017 45.089 0.000 0.752 0.775
## .PASAT 0.641 0.015 42.979 0.000 0.641 0.645
## .GPTL 0.909 0.020 45.584 0.000 0.909 0.911
## .GPTR 0.909 0.020 45.543 0.000 0.909 0.911
## .WLGT 0.739 0.016 44.853 0.000 0.739 0.742
## .ACVL 0.303 0.008 36.564 0.000 0.303 0.307
## .ACAL 0.311 0.008 36.560 0.000 0.311 0.315
## .education 0.680 0.016 43.704 0.000 0.680 0.681
## .income 0.879 0.019 46.027 0.000 0.879 0.880
## ge 1.000 1.000 1.000
## .gl 1.000 0.120 0.120
summary(EduSVGE.fit, stand = T, fit = T)
## lavaan 0.6-9 ended normally after 65 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 70
##
## Used Total
## Number of observations 4237 4462
##
## Model Test User Model:
##
## Test statistic 2241.284
## Degrees of freedom 140
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48411.141
## Degrees of freedom 190
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.956
## Tucker-Lewis Index (TLI) 0.941
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -97145.922
## Loglikelihood unrestricted model (H1) -96025.280
##
## Akaike (AIC) 194431.844
## Bayesian (BIC) 194876.456
## Sample-size adjusted Bayesian (BIC) 194654.026
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.060
## 90 Percent confidence interval - lower 0.057
## 90 Percent confidence interval - upper 0.062
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.038
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.848 0.013 67.739 0.000 0.848 0.851
## ACAE 0.841 0.013 66.212 0.000 0.841 0.840
## AFQT 0.851 0.013 67.528 0.000 0.851 0.851
## GIT 0.733 0.014 54.128 0.000 0.733 0.733
## PA 0.657 0.014 46.931 0.000 0.657 0.661
## gl =~
## WRAT 0.219 0.007 32.315 0.000 0.648 0.654
## CVLT 0.146 0.007 21.042 0.000 0.431 0.431
## WCST 0.154 0.007 22.152 0.000 0.455 0.455
## WBD 0.216 0.007 30.616 0.000 0.638 0.644
## WGI 0.229 0.007 33.023 0.000 0.679 0.679
## CD 0.150 0.007 21.642 0.000 0.445 0.445
## CC 0.166 0.007 23.865 0.000 0.491 0.498
## PASAT 0.206 0.007 27.916 0.000 0.608 0.609
## GPTL 0.103 0.007 15.321 0.000 0.305 0.305
## GPTR 0.108 0.007 15.915 0.000 0.318 0.318
## WLGT 0.150 0.007 22.281 0.000 0.443 0.443
## ACVL 0.272 0.007 37.158 0.000 0.804 0.805
## ACAL 0.275 0.007 36.710 0.000 0.814 0.815
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.786 0.086 32.353 0.000 0.941 0.941
## education ~
## ge 0.565 0.015 38.371 0.000 0.565 0.565
## WRAT ~
## education 0.142 0.012 11.565 0.000 0.142 0.143
## CVLT ~
## education -0.021 0.017 -1.198 0.231 -0.021 -0.021
## WCST ~
## education 0.000 0.017 0.004 0.997 0.000 0.000
## WBD ~
## education -0.039 0.014 -2.881 0.004 -0.039 -0.040
## WGI ~
## education 0.199 0.013 15.770 0.000 0.199 0.199
## CD ~
## education -0.022 0.017 -1.271 0.204 -0.022 -0.022
## CC ~
## education -0.041 0.016 -2.485 0.013 -0.041 -0.041
## PASAT ~
## education -0.017 0.016 -1.086 0.277 -0.017 -0.017
## GPTL ~
## education -0.006 0.018 -0.329 0.742 -0.006 -0.006
## GPTR ~
## education -0.033 0.018 -1.835 0.066 -0.033 -0.033
## WLGT ~
## education 0.109 0.016 6.626 0.000 0.109 0.109
## ACVL ~
## education 0.052 0.010 4.967 0.000 0.052 0.052
## ACAL ~
## education 0.031 0.011 2.751 0.006 0.031 0.031
## income ~
## education 0.176 0.017 10.468 0.000 0.176 0.176
## gl 0.108 0.006 16.837 0.000 0.321 0.321
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.151 0.006 23.674 0.000 0.151 0.523
## .ACAE ~~
## .ACAL 0.125 0.006 19.664 0.000 0.125 0.415
## .CD ~~
## .CC 0.246 0.012 19.799 0.000 0.246 0.315
## .GPTL ~~
## .GPTR 0.542 0.016 32.952 0.000 0.542 0.597
## .ACVE ~~
## .WRAT 0.139 0.007 20.705 0.000 0.139 0.401
## .AFQT ~~
## .PA 0.161 0.008 20.560 0.000 0.161 0.410
## .WBD 0.122 0.007 16.259 0.000 0.122 0.299
## .PA ~~
## .WBD 0.238 0.010 23.215 0.000 0.238 0.412
## .CC 0.092 0.009 10.626 0.000 0.092 0.143
## .WRAT ~~
## .WGI 0.025 0.006 4.262 0.000 0.025 0.064
## .WLGT 0.104 0.008 12.659 0.000 0.104 0.181
## .ACVL 0.150 0.007 20.628 0.000 0.150 0.408
## .CVLT ~~
## .CC 0.120 0.011 10.425 0.000 0.120 0.152
## .WBD ~~
## .CC 0.137 0.010 14.029 0.000 0.137 0.204
## .WGI ~~
## .PASAT -0.107 0.008 -13.014 0.000 -0.107 -0.223
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.273 0.008 35.459 0.000 0.273 0.275
## .ACAE 0.295 0.008 36.380 0.000 0.295 0.295
## .AFQT 0.277 0.008 35.484 0.000 0.277 0.276
## .GIT 0.462 0.011 41.430 0.000 0.462 0.462
## .PA 0.557 0.013 42.639 0.000 0.557 0.563
## .WRAT 0.442 0.011 41.208 0.000 0.442 0.452
## .CVLT 0.823 0.018 45.042 0.000 0.823 0.823
## .WCST 0.793 0.018 44.896 0.000 0.793 0.793
## .WBD 0.600 0.014 43.175 0.000 0.600 0.611
## .WGI 0.356 0.009 39.669 0.000 0.356 0.356
## .CD 0.811 0.018 44.962 0.000 0.811 0.812
## .CC 0.752 0.017 45.106 0.000 0.752 0.772
## .PASAT 0.640 0.015 43.008 0.000 0.640 0.641
## .GPTL 0.909 0.020 45.584 0.000 0.909 0.909
## .GPTR 0.908 0.020 45.543 0.000 0.908 0.909
## .WLGT 0.740 0.016 44.873 0.000 0.740 0.740
## .ACVL 0.304 0.008 36.695 0.000 0.304 0.305
## .ACAL 0.308 0.008 36.518 0.000 0.308 0.309
## .education 0.680 0.016 43.704 0.000 0.680 0.681
## .income 0.806 0.018 45.475 0.000 0.806 0.806
## ge 1.000 1.000 1.000
## .gl 1.000 0.114 0.114
summary(EduSVE2.fit, stand = T, fit = T) #Not valid
## lavaan 0.6-9 ended normally after 67 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 77
##
## Used Total
## Number of observations 4237 4462
##
## Model Test User Model:
##
## Test statistic 2195.954
## Degrees of freedom 133
## P-value (Chi-square) 0.000
##
## Model Test Baseline Model:
##
## Test statistic 48411.141
## Degrees of freedom 190
## P-value 0.000
##
## User Model versus Baseline Model:
##
## Comparative Fit Index (CFI) 0.957
## Tucker-Lewis Index (TLI) 0.939
##
## Loglikelihood and Information Criteria:
##
## Loglikelihood user model (H0) -97123.257
## Loglikelihood unrestricted model (H1) -96025.280
##
## Akaike (AIC) 194400.514
## Bayesian (BIC) 194889.588
## Sample-size adjusted Bayesian (BIC) 194644.914
##
## Root Mean Square Error of Approximation:
##
## RMSEA 0.061
## 90 Percent confidence interval - lower 0.058
## 90 Percent confidence interval - upper 0.063
## P-value RMSEA <= 0.05 0.000
##
## Standardized Root Mean Square Residual:
##
## SRMR 0.038
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.850 0.013 67.822 0.000 0.850 0.852
## ACAE 0.839 0.013 66.074 0.000 0.839 0.839
## AFQT 0.851 0.013 67.522 0.000 0.851 0.851
## GIT 0.733 0.014 54.146 0.000 0.733 0.734
## PA 0.657 0.014 46.916 0.000 0.657 0.661
## gl =~
## WRAT 0.221 0.007 32.458 0.000 0.653 0.659
## CVLT 0.146 0.007 21.055 0.000 0.432 0.432
## WCST 0.153 0.007 22.062 0.000 0.452 0.452
## WBD 0.216 0.007 30.665 0.000 0.639 0.645
## WGI 0.231 0.007 33.161 0.000 0.683 0.683
## CD 0.150 0.007 21.607 0.000 0.444 0.444
## CC 0.167 0.007 23.894 0.000 0.492 0.499
## PASAT 0.205 0.007 27.824 0.000 0.606 0.606
## GPTL 0.102 0.007 15.139 0.000 0.301 0.301
## GPTR 0.106 0.007 15.715 0.000 0.314 0.314
## WLGT 0.151 0.007 22.399 0.000 0.446 0.446
## ACVL 0.273 0.007 37.266 0.000 0.807 0.808
## ACAL 0.275 0.007 36.708 0.000 0.811 0.812
##
## Regressions:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## gl ~
## ge 2.779 0.086 32.392 0.000 0.941 0.941
## education ~
## ge 0.565 0.015 38.371 0.000 0.565 0.565
## WRAT ~
## education 0.139 0.012 11.369 0.000 0.139 0.141
## CVLT ~
## education -0.021 0.017 -1.202 0.229 -0.021 -0.021
## WCST ~
## education 0.001 0.017 0.080 0.936 0.001 0.001
## WBD ~
## education -0.040 0.014 -2.917 0.004 -0.040 -0.040
## WGI ~
## education 0.197 0.013 15.627 0.000 0.197 0.197
## CD ~
## education -0.021 0.017 -1.238 0.216 -0.021 -0.021
## CC ~
## education -0.041 0.016 -2.502 0.012 -0.041 -0.042
## PASAT ~
## education -0.016 0.016 -0.986 0.324 -0.016 -0.016
## GPTL ~
## education -0.004 0.018 -0.210 0.834 -0.004 -0.004
## GPTR ~
## education -0.030 0.018 -1.702 0.089 -0.030 -0.030
## WLGT ~
## education 0.107 0.016 6.541 0.000 0.107 0.107
## ACVL ~
## education 0.051 0.010 4.850 0.000 0.051 0.051
## ACAL ~
## education 0.032 0.011 2.847 0.004 0.032 0.032
## income ~
## education 0.178 0.018 9.759 0.000 0.178 0.178
## WRAT -0.074 0.023 -3.186 0.001 -0.074 -0.073
## WBD 0.002 0.018 0.134 0.893 0.002 0.002
## WGI 0.015 0.022 0.709 0.479 0.015 0.015
## CC 0.018 0.016 1.131 0.258 0.018 0.018
## WLGT 0.017 0.016 1.042 0.297 0.017 0.017
## ACVL 0.115 0.025 4.569 0.000 0.115 0.115
## ACAL 0.275 0.022 12.761 0.000 0.275 0.275
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE ~~
## .ACVL 0.150 0.006 23.546 0.000 0.150 0.523
## .ACAE ~~
## .ACAL 0.128 0.006 19.889 0.000 0.128 0.420
## .CD ~~
## .CC 0.246 0.012 19.808 0.000 0.246 0.315
## .GPTL ~~
## .GPTR 0.544 0.016 32.995 0.000 0.544 0.597
## .ACVE ~~
## .WRAT 0.138 0.007 20.541 0.000 0.138 0.400
## .AFQT ~~
## .PA 0.161 0.008 20.541 0.000 0.161 0.410
## .WBD 0.121 0.007 16.176 0.000 0.121 0.298
## .PA ~~
## .WBD 0.237 0.010 23.143 0.000 0.237 0.411
## .CC 0.092 0.009 10.601 0.000 0.092 0.142
## .WRAT ~~
## .WGI 0.023 0.006 3.877 0.000 0.023 0.058
## .WLGT 0.102 0.008 12.523 0.000 0.102 0.180
## .ACVL 0.147 0.007 20.197 0.000 0.147 0.403
## .CVLT ~~
## .CC 0.120 0.011 10.411 0.000 0.120 0.152
## .WBD ~~
## .CC 0.137 0.010 13.997 0.000 0.137 0.204
## .WGI ~~
## .PASAT -0.106 0.008 -12.999 0.000 -0.106 -0.224
## .gl ~~
## .income -0.102 0.024 -4.316 0.000 -0.102 -0.114
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.272 0.008 35.346 0.000 0.272 0.274
## .ACAE 0.297 0.008 36.425 0.000 0.297 0.296
## .AFQT 0.276 0.008 35.436 0.000 0.276 0.276
## .GIT 0.462 0.011 41.403 0.000 0.462 0.462
## .PA 0.557 0.013 42.621 0.000 0.557 0.563
## .WRAT 0.439 0.011 40.914 0.000 0.439 0.447
## .CVLT 0.823 0.018 45.033 0.000 0.823 0.823
## .WCST 0.795 0.018 44.902 0.000 0.795 0.795
## .WBD 0.600 0.014 43.126 0.000 0.600 0.610
## .WGI 0.352 0.009 39.435 0.000 0.352 0.352
## .CD 0.812 0.018 44.959 0.000 0.812 0.812
## .CC 0.752 0.017 45.087 0.000 0.752 0.772
## .PASAT 0.643 0.015 43.008 0.000 0.643 0.643
## .GPTL 0.911 0.020 45.593 0.000 0.911 0.911
## .GPTR 0.911 0.020 45.554 0.000 0.911 0.911
## .WLGT 0.739 0.016 44.843 0.000 0.739 0.739
## .ACVL 0.301 0.008 36.408 0.000 0.301 0.301
## .ACAL 0.312 0.009 36.614 0.000 0.312 0.313
## .education 0.680 0.016 43.703 0.000 0.680 0.681
## .income 0.811 0.018 45.918 0.000 0.811 0.812
## ge 1.000 1.000 1.000
## .gl 1.000 0.115 0.115
standardizedSolution(GCOR.fit)
standardizedSolution(EduSV1.fit)
standardizedSolution(EduSV2.fit)
We should also assess the stability of g factors composed of common subtests.
GCOR.model<-'
ge =~ ACVE + ACAE
gl =~ ACVL + ACAL
ge ~~ gl' #had to be a covariance to converge
GCOR0.model<-'
ge =~ ACVE + ACAE
gl =~ ACVL + ACAL
ge ~ 0*gl'
GCOR1.model<-'
ge =~ ACVE + ACAE
gl =~ ACVL + ACAL
ge ~ 1*gl'
GCOR.fit <- cfa(GCOR.model, data = VES, std.lv = T, std.ov = T)
## Warning in lav_object_post_check(object): lavaan WARNING: covariance matrix of latent variables
## is not positive definite;
## use lavInspect(fit, "cov.lv") to investigate.
GCOR0.fit <- cfa(GCOR0.model, data = VES, std.lv = T, std.ov = T)
## Warning in lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, : lavaan WARNING:
## Could not compute standard errors! The information matrix could
## not be inverted. This may be a symptom that the model is not
## identified.
GCOR1.fit <- cfa(GCOR1.model, data = VES, std.lv = T, std.ov = T)
## Warning in lav_object_post_check(object): lavaan WARNING: some estimated ov
## variances are negative
fitMeasures(GCOR.fit, FITM)
## chisq df npar cfi rmsea
## 1295.798 1.000 9.000 0.900 0.544
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.519 0.569 38076.366 38133.835 0.050
fitMeasures(GCOR0.fit, FITM)
## chisq df npar cfi rmsea
## 7196.116 2.000 8.000 0.446 0.906
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.888 0.924 43974.684 44025.768 0.463
fitMeasures(GCOR1.fit, FITM)
## chisq df npar cfi rmsea
## 2552.833 2.000 8.000 0.803 0.539
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.522 0.557 39331.401 39382.485 0.216
summary(GCOR.fit, stand = T)
## lavaan 0.6-9 ended normally after 20 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 9
##
## Used Total
## Number of observations 4383 4462
##
## Model Test User Model:
##
## Test statistic 1295.798
## Degrees of freedom 1
## P-value (Chi-square) 0.000
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge =~
## ACVE 0.861 0.012 69.017 0.000 0.861 0.861
## ACAE 0.812 0.013 63.538 0.000 0.812 0.812
## gl =~
## ACVL 0.862 0.012 69.055 0.000 0.862 0.862
## ACAL 0.802 0.013 62.290 0.000 0.802 0.802
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## ge ~~
## gl 1.041 0.005 199.547 0.000 1.041 1.041
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .ACVE 0.259 0.008 31.872 0.000 0.259 0.259
## .ACAE 0.340 0.009 37.810 0.000 0.340 0.340
## .ACVL 0.257 0.008 31.300 0.000 0.257 0.257
## .ACAL 0.357 0.009 38.513 0.000 0.357 0.357
## ge 1.000 1.000 1.000
## gl 1.000 1.000 1.000
And again, with common subtests removed.
GCOR.model<-'
ge =~ AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ gl
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR0.model<-'
ge =~ AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ 0*gl
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR1.model<-'
ge =~ AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ 1*gl
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR945.model<-'
ge =~ AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ .945*gl
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR.fit <- cfa(GCOR.model, data = VES, std.lv = T, std.ov = T)
GCOR0.fit <- cfa(GCOR0.model, data = VES, std.lv = T, std.ov = T)
## Warning in lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, : lavaan WARNING:
## Could not compute standard errors! The information matrix could
## not be inverted. This may be a symptom that the model is not
## identified.
GCOR1.fit <- cfa(GCOR1.model, data = VES, std.lv = T, std.ov = T)
GCOR945.fit <- cfa(GCOR945.model, data = VES, std.lv = T, std.ov = T)
fitMeasures(GCOR.fit, FITM)
## chisq df npar cfi rmsea
## 1018.727 65.000 40.000 0.961 0.058
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.055 0.061 148508.564 148763.423 0.040
fitMeasures(GCOR0.fit, FITM)
## chisq df npar cfi rmsea
## 4701.216 66.000 39.000 0.808 0.127
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.124 0.131 152189.052 152437.540 0.216
fitMeasures(GCOR1.fit, FITM)
## chisq df npar cfi rmsea
## 1519.463 66.000 39.000 0.940 0.071
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.068 0.075 149007.299 149255.787 0.085
fitMeasures(GCOR945.fit, FITM)
## chisq df npar cfi rmsea
## 1587.454 66.000 39.000 0.937 0.073
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.070 0.076 149075.290 149323.778 0.089
parameterEstimates(GCOR.fit, stand = T) %>%
filter(op == "~") %>%
dplyr::select(Early = lhs, Later = rhs, "Unstandardized Regression" = est, SE = se, Z = z, 'p-value' = pvalue, Beta = std.all) %>%
kable(digits = 3, format = "pandoc")
| Early | Later | Unstandardized Regression | SE | Z | p-value | Beta |
|---|---|---|---|---|---|---|
| ge | gl | 2.475 | 0.135 | 18.312 | 0 | 0.927 |
And with them only removed from the endpoint.
GCOR.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ gl
ACVE ~~ WRAT
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR0.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ 0*gl
ACVE ~~ WRAT
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR1.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ 1*gl
ACVE ~~ WRAT
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR945.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT
ge ~ .945*gl
ACVE ~~ WRAT
CC ~~ CD + CVLT + WBD + PA
GPTL ~~ GPTR
AFQT ~~ PA + WBD
PA ~~ WBD
WRAT ~~ WGI + WLGT
WGI ~~ PASAT'
GCOR.fit <- cfa(GCOR.model, data = VES, std.lv = T, std.ov = T)
GCOR0.fit <- cfa(GCOR0.model, data = VES, std.lv = T, std.ov = T)
GCOR1.fit <- cfa(GCOR1.model, data = VES, std.lv = T, std.ov = T)
GCOR945.fit <- cfa(GCOR945.model, data = VES, std.lv = T, std.ov = T)
fitMeasures(GCOR.fit, FITM)
## chisq df npar cfi rmsea
## 1800.642 91.000 45.000 0.950 0.066
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.063 0.069 163391.687 163678.383 0.044
fitMeasures(GCOR0.fit, FITM)
## chisq df npar cfi rmsea
## 6848.568 92.000 44.000 0.804 0.130
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.128 0.133 168437.613 168717.938 0.254
fitMeasures(GCOR1.fit, FITM)
## chisq df npar cfi rmsea
## 2835.208 92.000 44.000 0.921 0.083
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.080 0.086 164424.254 164704.578 0.124
fitMeasures(GCOR945.fit, FITM)
## chisq df npar cfi rmsea
## 2950.308 92.000 44.000 0.917 0.085
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.082 0.087 164539.353 164819.678 0.129
parameterEstimates(GCOR.fit, stand = T) %>%
filter(op == "~") %>%
dplyr::select(Early = lhs, Later = rhs, "Unstandardized Regression" = est, SE = se, Z = z, 'p-value' = pvalue, Beta = std.all) %>%
kable(digits = 3, format = "pandoc")
| Early | Later | Unstandardized Regression | SE | Z | p-value | Beta |
|---|---|---|---|---|---|---|
| ge | gl | 2.757 | 0.112 | 24.698 | 0 | 0.94 |
And with early g replaced by early FSIQ.
VES$FSIQE <- FSIQE
GCOR.model<-'
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
FSIQE ~ gl
CC ~~ CD + CVLT + WBD
GPTL ~~ GPTR
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT'
GCOR0.model<-'
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
FSIQE ~ 0*gl
CC ~~ CD + CVLT + WBD
GPTL ~~ GPTR
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT'
GCOR1.model<-'
gl =~ WRAT + CVLT + WCST + WBD + WGI + CD + CC + PASAT + GPTL + GPTR + WLGT + ACVL + ACAL
FSIQE ~ 1*gl
CC ~~ CD + CVLT + WBD
GPTL ~~ GPTR
WRAT ~~ WGI + WLGT + ACVL
WGI ~~ PASAT'
GCOR.fit <- cfa(GCOR.model, data = VES, std.lv = T, std.ov = T)
GCOR0.fit <- cfa(GCOR0.model, data = VES, std.lv = T, std.ov = T)
GCOR1.fit <- cfa(GCOR1.model, data = VES, std.lv = T, std.ov = T)
fitMeasures(GCOR.fit, FITM)
## chisq df npar cfi rmsea
## 1558.085 69.000 36.000 0.949 0.071
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.068 0.074 143814.797 144044.153 0.042
fitMeasures(GCOR0.fit, FITM)
## chisq df npar cfi rmsea
## 7556.266 70.000 35.000 0.745 0.157
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.154 0.160 149810.978 150033.963 0.193
fitMeasures(GCOR1.fit, FITM)
## chisq df npar cfi rmsea
## 1604.947 70.000 35.000 0.948 0.071
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.068 0.074 143859.659 144082.645 0.069
parameterEstimates(GCOR.fit, stand = T) %>%
filter(op == "~") %>%
dplyr::select(Early = lhs, Later = rhs, "Unstandardized Regression" = est, SE = se, Z = z, 'p-value' = pvalue, Beta = std.all) %>%
kable(digits = 3, format = "pandoc")
| Early | Later | Unstandardized Regression | SE | Z | p-value | Beta |
|---|---|---|---|---|---|---|
| FSIQE | gl | 0.915 | 0.012 | 77.201 | 0 | 0.915 |
And with later g replaced by FSIQ.
VES$FSIQL <- FSIQL
GCOR.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
ge ~ FSIQL
AFQT ~~ PA'
GCOR0.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
ge ~ FSIQL
AFQT ~~ PA'
GCOR1.model<-'
ge =~ ACVE + ACAE + AFQT + GIT + PA
ge ~ FSIQL
AFQT ~~ PA'
GCOR.fit <- cfa(GCOR.model, data = VES, std.lv = T, std.ov = T)
GCOR0.fit <- cfa(GCOR0.model, data = VES, std.lv = T, std.ov = T)
GCOR1.fit <- cfa(GCOR1.model, data = VES, std.lv = T, std.ov = T)
fitMeasures(GCOR.fit, FITM)
## chisq df npar cfi rmsea
## 346.863 8.000 12.000 0.981 0.099
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.090 0.108 41937.134 42013.586 0.023
fitMeasures(GCOR0.fit, FITM)
## chisq df npar cfi rmsea
## 346.863 8.000 12.000 0.981 0.099
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.090 0.108 41937.134 42013.586 0.023
fitMeasures(GCOR1.fit, FITM)
## chisq df npar cfi rmsea
## 346.863 8.000 12.000 0.981 0.099
## rmsea.ci.lower rmsea.ci.upper aic bic srmr
## 0.090 0.108 41937.134 42013.586 0.023
parameterEstimates(GCOR.fit, stand = T) %>%
filter(op == "~") %>%
dplyr::select(Early = lhs, Later = rhs, "Unstandardized Regression" = est, SE = se, Z = z, 'p-value' = pvalue, Beta = std.all) %>%
kable(digits = 3, format = "pandoc")
| Early | Later | Unstandardized Regression | SE | Z | p-value | Beta |
|---|---|---|---|---|---|---|
| ge | FSIQL | 2.712 | 0.052 | 52.643 | 0 | 0.859 |
What if we just use the first component loadings and the education-test correlations, to assess what MCV might say about the generality of educational effects on intelligence?
CONGO <- function(F1, F2) {
PHI = sum(F1*F2) / sqrt(sum(F1^2)*sum(F2^2))
return(PHI)}
EduEffects <- c(0.532, 0.487, 0.343, 0.377, 0.430, 0.511, 0.202, 0.241, 0.275, 0.555, 0.134, 0.149, 0.304, 0.209, 0.222, 0.338, 0.506, 0.467)
gFA <- principal(VES[tests]); gLoadings <- gFA$loadings[1:18]
cor(EduEffects, gLoadings)
## [1] 0.9209894
CONGO(EduEffects, gLoadings)
## [1] 0.9857538
MCV supports a very strong relationship between the vector of educational relationships and first principal component loadings. The post-selection MCV results are not theoretically valid due to post-selection inference issues, but they might also be interesting to someone.