setwd("C:/Work Files/PSY 8170/Class Exercises")
Class SEM Software Example
Installing Packages
install.packages("haven", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'haven' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("tidyverse", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'tidyverse' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("tidySEM", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'tidySEM' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("corrplot", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'corrplot' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("psych", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'psych' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("finalfit", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'finalfit' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("performance", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'performance' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("MVN", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'MVN' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("lavaan", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'lavaan' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("knitr", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'knitr' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("kableExtra", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'kableExtra' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("apaTables", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'apaTables' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("sjPlot", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'sjPlot' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("semPlot", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'semPlot' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("report", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'report' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("codebookr", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'codebookr' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
install.packages("see", repos = "https://cran.r-project.org")
Installing package into 'C:/Users/partr/AppData/Local/R/win-library/4.4'
(as 'lib' is unspecified)
package 'see' successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\partr\AppData\Local\Temp\RtmpUVkRuI\downloaded_packages
loading the libraries
library(haven) #reading data from spss, sas, stata
library(tidyverse) #Data management
── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
✔ dplyr 1.1.4 ✔ readr 2.1.5
✔ forcats 1.0.0 ✔ stringr 1.5.1
✔ ggplot2 3.5.1 ✔ tibble 3.2.1
✔ lubridate 1.9.4 ✔ tidyr 1.3.1
✔ purrr 1.0.4
── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag() masks stats::lag()
ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidySEM) #Tools for working with SEM output
Loading required package: OpenMx
Registered S3 method overwritten by 'tidySEM':
method from
predict.MxModel OpenMx
library (corrplot) #correlation tables
corrplot 0.95 loaded
library(psych) ## basic psychometrics and statistics
Attaching package: 'psych'
The following object is masked from 'package:OpenMx':
tr
The following objects are masked from 'package:ggplot2':
%+%, alpha
library(finalfit) #Testing Assumptions
library (performance) #Testing Assumptions
library(MVN) #Assessing multivariate normality
library(lavaan) #SEM package
This is lavaan 0.6-19
lavaan is FREE software! Please report any bugs.
Attaching package: 'lavaan'
The following object is masked from 'package:psych':
cor2cov
library(knitr) #Making Tables
library(kableExtra) #Making Tables
Attaching package: 'kableExtra'
The following object is masked from 'package:dplyr':
group_rows
library(apaTables) #Generating APA formated tables
library (sjPlot) #SEM figures and plots
#refugeeswelcome
library (semPlot) #Sem figures and plots
library (report) #Template Results
Attaching package: 'report'
The following object is masked from 'package:tidySEM':
report
library (codebookr) #Generating a code book
library(see)
Setting up the Data:
First, load the data file into your global environment.
<-read.csv("academic_achievement.csv") SEM_Example_Data
Now examine the data.
str(SEM_Example_Data)
'data.frame': 500 obs. of 9 variables:
$ motiv : num -7.91 1.75 14.47 -1.16 -4.22 ...
$ harm : num -5.08 -4.16 -4.54 -5.67 -10.07 ...
$ stabi : num -3.14 3.52 4.07 2.6 -6.03 ...
$ ppsych: num -17.8 7.01 23.73 1.49 -5.99 ...
$ ses : num 4.77 -6.05 -16.97 1.4 -18.38 ...
$ verbal: num -3.63 -7.69 -3.91 21.41 -1.44 ...
$ read : num -3.49 -4.52 -4.82 -3.14 -2.01 ...
$ arith : num -9.989 8.196 7.53 5.731 -0.624 ...
$ spell : num -6.57 8.78 -5.69 -2.92 -1.02 ...
head(SEM_Example_Data)
motiv harm stabi ppsych ses verbal read arith spell
1 -7.907 -5.075 -3.139 -17.800 4.766 -3.633 -3.489 -9.989 -6.568
2 1.751 -4.156 3.521 7.009 -6.049 -7.693 -4.521 8.196 8.779
3 14.473 -4.541 4.071 23.734 -16.971 -3.910 -4.818 7.530 -5.689
4 -1.165 -5.668 2.600 1.493 1.396 21.409 -3.138 5.731 -2.916
5 -4.223 -10.072 -6.031 -5.986 -18.376 -1.439 -2.010 -0.624 -1.025
6 4.869 3.030 -7.648 14.669 -2.235 -6.827 0.823 5.045 0.904
glimpse(SEM_Example_Data)
Rows: 500
Columns: 9
$ motiv <dbl> -7.907, 1.751, 14.473, -1.165, -4.223, 4.869, 10.367, -1.861, -…
$ harm <dbl> -5.075, -4.156, -4.541, -5.668, -10.072, 3.030, 5.039, 0.399, -…
$ stabi <dbl> -3.139, 3.521, 4.071, 2.600, -6.031, -7.648, 6.032, -1.042, -10…
$ ppsych <dbl> -17.800, 7.009, 23.734, 1.493, -5.986, 14.669, -0.952, -14.569,…
$ ses <dbl> 4.766, -6.049, -16.971, 1.396, -18.376, -2.235, -9.258, -15.999…
$ verbal <dbl> -3.633, -7.693, -3.910, 21.409, -1.439, -6.827, 8.486, -0.764, …
$ read <dbl> -3.489, -4.521, -4.818, -3.138, -2.010, 0.823, -5.673, -11.339,…
$ arith <dbl> -9.989, 8.196, 7.530, 5.731, -0.624, 5.045, 8.638, 3.754, -8.26…
$ spell <dbl> -6.568, 8.779, -5.689, -2.916, -1.025, 0.904, -1.526, -7.449, 2…
You can also create a code book for complex datasets
<- codebook(SEM_Example_Data)
example_Codebookprint(example_Codebook, "Example_Codebook.docx")
Preliminary Analyses
describe(SEM_Example_Data)%>%
::kable(digits = 3, format="html", booktabs=TRUE, caption="Table 1. Descriptives")%>%
knitrkable_classic(full_width = F, html_font = "Cambria")
vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
motiv | 1 | 500 | 0 | 10 | 0.316 | 0.078 | 10.495 | -33.971 | 26.469 | 60.440 | -0.101 | -0.164 | 0.447 |
harm | 2 | 500 | 0 | 10 | 0.007 | 0.138 | 9.429 | -32.982 | 31.164 | 64.146 | -0.148 | 0.317 | 0.447 |
stabi | 3 | 500 | 0 | 10 | 0.302 | 0.093 | 10.703 | -25.921 | 29.635 | 55.556 | -0.061 | -0.394 | 0.447 |
ppsych | 4 | 500 | 0 | 10 | -0.544 | -0.082 | 10.045 | -31.094 | 33.283 | 64.377 | 0.094 | -0.065 | 0.447 |
ses | 5 | 500 | 0 | 10 | -0.312 | -0.049 | 9.497 | -32.137 | 29.096 | 61.233 | 0.068 | -0.048 | 0.447 |
verbal | 6 | 500 | 0 | 10 | 0.050 | -0.030 | 9.600 | -37.607 | 27.657 | 65.264 | -0.090 | 0.364 | 0.447 |
read | 7 | 500 | 0 | 10 | -0.043 | 0.075 | 10.044 | -32.839 | 27.606 | 60.445 | -0.112 | 0.090 | 0.447 |
arith | 8 | 500 | 0 | 10 | 0.126 | 0.060 | 9.680 | -26.701 | 33.048 | 59.749 | -0.017 | 0.039 | 0.447 |
spell | 9 | 500 | 0 | 10 | -0.009 | 0.143 | 9.426 | -31.361 | 26.933 | 58.294 | -0.123 | -0.019 | 0.447 |
apa.cor.table(SEM_Example_Data)
Means, standard deviations, and correlations with confidence intervals
Variable M SD 1 2 3 4
1. motiv 0.00 10.00
2. harm 0.00 10.00 .77**
[.73, .80]
3. stabi 0.00 10.00 .59** .58**
[.53, .64] [.52, .64]
4. ppsych -0.00 10.00 -.25** -.25** -.16**
[-.33, -.17] [-.33, -.17] [-.24, -.07]
5. ses -0.00 10.00 .25** .26** .18** -.42**
[.17, .33] [.18, .34] [.09, .26] [-.49, -.35]
6. verbal 0.00 10.00 .32** .25** .27** -.40**
[.24, .40] [.17, .33] [.19, .35] [-.47, -.32]
7. read 0.00 10.00 .53** .42** .36** -.39**
[.46, .59] [.35, .49] [.28, .43] [-.46, -.31]
8. arith -0.00 10.00 .60** .44** .38** -.24**
[.54, .65] [.37, .51] [.30, .45] [-.32, -.16]
9. spell -0.00 10.00 .59** .45** .38** -.31**
[.53, .64] [.38, .52] [.30, .45] [-.39, -.23]
5 6 7 8
.40**
[.32, .47]
.43** .56**
[.36, .50] [.50, .62]
.37** .49** .73**
[.29, .44] [.42, .55] [.69, .77]
.33** .48** .87** .72**
[.25, .41] [.41, .54] [.85, .89] [.67, .76]
Note. M and SD are used to represent mean and standard deviation, respectively.
Values in square brackets indicate the 95% confidence interval.
The confidence interval is a plausible range of population correlations
that could have caused the sample correlation (Cumming, 2014).
* indicates p < .05. ** indicates p < .01.
<- cor(SEM_Example_Data)
res round(res, 2)
motiv harm stabi ppsych ses verbal read arith spell
motiv 1.00 0.77 0.59 -0.25 0.25 0.32 0.53 0.60 0.59
harm 0.77 1.00 0.58 -0.25 0.26 0.25 0.42 0.44 0.45
stabi 0.59 0.58 1.00 -0.16 0.18 0.27 0.36 0.38 0.38
ppsych -0.25 -0.25 -0.16 1.00 -0.42 -0.40 -0.39 -0.24 -0.31
ses 0.25 0.26 0.18 -0.42 1.00 0.40 0.43 0.37 0.33
verbal 0.32 0.25 0.27 -0.40 0.40 1.00 0.56 0.49 0.48
read 0.53 0.42 0.36 -0.39 0.43 0.56 1.00 0.73 0.87
arith 0.60 0.44 0.38 -0.24 0.37 0.49 0.73 1.00 0.72
spell 0.59 0.45 0.38 -0.31 0.33 0.48 0.87 0.72 1.00
corrplot(res, type = "lower", order = "hclust",
tl.col = "black", tl.srt = 45)
Test the basic assumptions
<- rnorm(nrow(SEM_Example_Data), 7)
random #The command above generates a random variable with the same number of rows (values)as the dataset
hist(random)#just to check the distribtuion of this new variable
<-lm(random ~., data = SEM_Example_Data)
fakereg ##runs a regression with the new random variable as the dv and all the variables in the dataset as IVs
##This generates a set of residuals in order to check the assumptions
##The following set of code just scales the residuals
<- rstudent(fakereg)
standardized <- scale(fakereg$fitted.values)
fitted hist(fitted)
check_model (fakereg)
mvn(SEM_Example_Data, subset = NULL, mvnTest = "hz", covariance = TRUE, tol = 1e-25, alpha = 0.5, scale = FALSE, desc = TRUE, transform = "none", R = 1000, univariateTest = "SW", multivariateOutlierMethod = "quan",bc = FALSE, bcType = "rounded", showOutliers = TRUE, showNewData = FALSE)
$multivariateNormality
Test HZ p value MVN
1 Henze-Zirkler 0.9987401 0.114507 YES
$univariateNormality
Test Variable Statistic p value Normality
1 Shapiro-Wilk motiv 0.9978 0.7661 YES
2 Shapiro-Wilk harm 0.9968 0.4195 YES
3 Shapiro-Wilk stabi 0.9961 0.2528 YES
4 Shapiro-Wilk ppsych 0.9982 0.8757 YES
5 Shapiro-Wilk ses 0.9981 0.8534 YES
6 Shapiro-Wilk verbal 0.9954 0.1527 YES
7 Shapiro-Wilk read 0.9972 0.5438 YES
8 Shapiro-Wilk arith 0.9975 0.6698 YES
9 Shapiro-Wilk spell 0.9973 0.5777 YES
$Descriptives
n Mean Std.Dev Median Min Max 25th 75th
motiv 500 4.0e-06 9.999975 0.3155 -33.971 26.469 -7.14825 6.70850
harm 500 4.0e-06 10.000004 0.0070 -32.982 31.164 -6.04975 6.62125
stabi 500 4.0e-06 10.000002 0.3015 -25.921 29.635 -6.93600 7.45500
ppsych 500 -8.0e-06 10.000011 -0.5435 -31.094 33.283 -6.53975 6.73050
ses 500 -4.0e-06 10.000008 -0.3115 -32.137 29.096 -6.32800 6.30375
verbal 500 2.4e-05 9.999990 0.0495 -37.607 27.657 -6.86550 6.23000
read 500 3.4e-05 10.000004 -0.0430 -32.839 27.606 -6.84275 6.65750
arith 500 -1.6e-05 10.000004 0.1265 -26.701 33.048 -6.40125 6.56975
spell 500 -8.0e-06 9.999985 -0.0090 -31.361 26.933 -6.31850 6.34100
Skew Kurtosis
motiv -0.10140728 -0.16374078
harm -0.14847128 0.31719262
stabi -0.06061700 -0.39425190
ppsych 0.09359766 -0.06547724
ses 0.06775350 -0.04837653
verbal -0.09045196 0.36354325
read -0.11195432 0.09045665
arith -0.01703520 0.03895456
spell -0.12283165 -0.01877259
$multivariateOutliers
Observation Mahalanobis Distance Outlier
221 221 25.621 TRUE
480 480 24.901 TRUE
328 328 23.885 TRUE
464 464 23.762 TRUE
288 288 23.058 TRUE
112 112 22.503 TRUE
240 240 21.526 TRUE
87 87 21.341 TRUE
438 438 21.250 TRUE
444 444 21.205 TRUE
100 100 21.032 TRUE
22 22 20.165 TRUE
378 378 19.910 TRUE
230 230 19.539 TRUE
3 3 19.267 TRUE
199 199 19.119 TRUE
Now for the measurement model
<-'
Meas_Model adjust =~ motiv + harm + stabi
risk =~ verbal + ppsych + ses
achieve =~ read + arith + spell
'
<- sem(Meas_Model, estimator= "MLR", data=SEM_Example_Data, mimic = "Mplus", missing = "FIML")
Measure_MDL_fit summary(Measure_MDL_fit, fit.measures = TRUE, standardized=TRUE)
lavaan 0.6-19 ended normally after 128 iterations
Estimator ML
Optimization method NLMINB
Number of model parameters 30
Number of observations 500
Number of missing patterns 1
Model Test User Model:
Standard Scaled
Test Statistic 148.982 152.792
Degrees of freedom 24 24
P-value (Chi-square) 0.000 0.000
Scaling correction factor 0.975
Yuan-Bentler correction (Mplus variant)
Model Test Baseline Model:
Test statistic 2597.976 2642.799
Degrees of freedom 36 36
P-value 0.000 0.000
Scaling correction factor 0.983
User Model versus Baseline Model:
Comparative Fit Index (CFI) 0.951 0.951
Tucker-Lewis Index (TLI) 0.927 0.926
Robust Comparative Fit Index (CFI) 0.951
Robust Tucker-Lewis Index (TLI) 0.927
Loglikelihood and Information Criteria:
Loglikelihood user model (H0) -15517.854 -15517.854
Scaling correction factor 1.003
for the MLR correction
Loglikelihood unrestricted model (H1) -15443.363 -15443.363
Scaling correction factor 0.991
for the MLR correction
Akaike (AIC) 31095.708 31095.708
Bayesian (BIC) 31222.146 31222.146
Sample-size adjusted Bayesian (SABIC) 31126.924 31126.924
Root Mean Square Error of Approximation:
RMSEA 0.102 0.104
90 Percent confidence interval - lower 0.087 0.088
90 Percent confidence interval - upper 0.118 0.120
P-value H_0: RMSEA <= 0.050 0.000 0.000
P-value H_0: RMSEA >= 0.080 0.990 0.993
Robust RMSEA 0.102
90 Percent confidence interval - lower 0.087
90 Percent confidence interval - upper 0.118
P-value H_0: Robust RMSEA <= 0.050 0.000
P-value H_0: Robust RMSEA >= 0.080 0.991
Standardized Root Mean Square Residual:
SRMR 0.037 0.037
Parameter Estimates:
Standard errors Sandwich
Information bread Observed
Observed information based on Hessian
Latent Variables:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
adjust =~
motiv 1.000 9.324 0.933
harm 0.884 0.046 19.193 0.000 8.246 0.825
stabi 0.695 0.047 14.793 0.000 6.478 0.648
risk =~
verbal 1.000 7.319 0.733
ppsych -0.770 0.078 -9.935 0.000 -5.636 -0.564
ses 0.807 0.084 9.649 0.000 5.906 0.591
achieve =~
read 1.000 9.404 0.941
arith 0.837 0.037 22.580 0.000 7.873 0.788
spell 0.976 0.027 36.176 0.000 9.178 0.919
Covariances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
adjust ~~
risk 32.098 4.498 7.136 0.000 0.470 0.470
achieve 55.831 5.184 10.770 0.000 0.637 0.637
risk ~~
achieve 50.828 5.572 9.122 0.000 0.739 0.739
Intercepts:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 0.000 0.447 0.000 1.000 0.000 0.000
.harm 0.000 0.447 0.000 1.000 0.000 0.000
.stabi 0.000 0.447 0.000 1.000 0.000 0.000
.verbal 0.000 0.447 0.000 1.000 0.000 0.000
.ppsych -0.000 0.447 -0.000 1.000 -0.000 -0.000
.ses -0.000 0.447 -0.000 1.000 -0.000 -0.000
.read 0.000 0.447 0.000 1.000 0.000 0.000
.arith -0.000 0.447 -0.000 1.000 -0.000 -0.000
.spell -0.000 0.447 -0.000 1.000 -0.000 -0.000
Variances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 12.871 3.098 4.155 0.000 12.871 0.129
.harm 31.804 3.166 10.045 0.000 31.804 0.319
.stabi 57.836 4.121 14.034 0.000 57.836 0.580
.verbal 46.239 4.648 9.949 0.000 46.239 0.463
.ppsych 68.033 5.244 12.973 0.000 68.033 0.682
.ses 64.916 4.797 13.533 0.000 64.916 0.650
.read 11.372 1.666 6.828 0.000 11.372 0.114
.arith 37.818 2.832 13.353 0.000 37.818 0.379
.spell 15.560 1.700 9.153 0.000 15.560 0.156
adjust 86.929 6.750 12.878 0.000 1.000 1.000
risk 53.561 7.068 7.578 0.000 1.000 1.000
achieve 88.428 6.586 13.427 0.000 1.000 1.000
modindices(Measure_MDL_fit)
lhs op rhs mi epc sepc.lv sepc.all sepc.nox
37 adjust =~ verbal 0.016 -0.008 -0.077 -0.008 -0.008
38 adjust =~ ppsych 0.026 -0.009 -0.088 -0.009 -0.009
39 adjust =~ ses 0.000 -0.001 -0.007 -0.001 -0.001
40 adjust =~ read 34.353 -0.216 -2.014 -0.202 -0.202
41 adjust =~ arith 28.373 0.241 2.247 0.225 0.225
42 adjust =~ spell 5.227 0.084 0.787 0.079 0.079
43 risk =~ motiv 0.567 0.049 0.357 0.036 0.036
44 risk =~ harm 1.371 -0.071 -0.520 -0.052 -0.052
45 risk =~ stabi 0.287 0.037 0.268 0.027 0.027
46 risk =~ read 24.711 0.351 2.568 0.257 0.257
47 risk =~ arith 0.954 0.084 0.618 0.062 0.062
48 risk =~ spell 33.484 -0.409 -2.994 -0.300 -0.300
49 achieve =~ motiv 13.181 0.216 2.029 0.203 0.203
50 achieve =~ harm 12.140 -0.185 -1.743 -0.175 -0.175
51 achieve =~ stabi 0.063 -0.014 -0.130 -0.013 -0.013
52 achieve =~ verbal 14.009 0.545 5.123 0.513 0.513
53 achieve =~ ppsych 6.472 0.265 2.490 0.249 0.249
54 achieve =~ ses 0.865 -0.101 -0.952 -0.095 -0.095
55 motiv ~~ harm 0.063 -1.435 -1.435 -0.071 -0.071
56 motiv ~~ stabi 12.137 -11.920 -11.920 -0.437 -0.437
57 motiv ~~ verbal 0.254 -1.072 -1.072 -0.044 -0.044
58 motiv ~~ ppsych 0.233 1.088 1.088 0.037 0.037
59 motiv ~~ ses 2.433 -3.481 -3.481 -0.120 -0.120
60 motiv ~~ read 13.971 -4.579 -4.579 -0.378 -0.378
61 motiv ~~ arith 24.544 8.198 8.198 0.372 0.372
62 motiv ~~ spell 8.503 3.671 3.671 0.259 0.259
63 harm ~~ stabi 13.176 10.851 10.851 0.253 0.253
64 harm ~~ verbal 2.475 -3.547 -3.547 -0.093 -0.093
65 harm ~~ ppsych 4.420 -5.167 -5.167 -0.111 -0.111
66 harm ~~ ses 5.664 5.771 5.771 0.127 0.127
67 harm ~~ read 0.032 -0.233 -0.233 -0.012 -0.012
68 harm ~~ arith 1.887 -2.497 -2.497 -0.072 -0.072
69 harm ~~ spell 1.599 -1.715 -1.715 -0.077 -0.077
70 stabi ~~ verbal 4.130 5.719 5.719 0.111 0.111
71 stabi ~~ ppsych 0.717 2.607 2.607 0.042 0.042
72 stabi ~~ ses 0.091 -0.916 -0.916 -0.015 -0.015
73 stabi ~~ read 0.048 0.354 0.354 0.014 0.014
74 stabi ~~ arith 0.024 -0.353 -0.353 -0.008 -0.008
75 stabi ~~ spell 0.507 -1.200 -1.200 -0.040 -0.040
76 verbal ~~ ppsych 0.865 4.093 4.093 0.073 0.073
77 verbal ~~ ses 6.470 -11.751 -11.751 -0.214 -0.214
78 verbal ~~ read 3.264 3.177 3.177 0.139 0.139
79 verbal ~~ arith 5.452 5.445 5.445 0.130 0.130
80 verbal ~~ spell 3.413 -3.318 -3.318 -0.124 -0.124
81 ppsych ~~ ses 14.005 -14.339 -14.339 -0.216 -0.216
82 ppsych ~~ read 5.001 -4.137 -4.137 -0.149 -0.149
83 ppsych ~~ arith 10.928 8.401 8.401 0.166 0.166
84 ppsych ~~ spell 3.454 3.550 3.550 0.109 0.109
85 ses ~~ read 4.512 3.894 3.894 0.143 0.143
86 ses ~~ arith 2.077 3.614 3.614 0.073 0.073
87 ses ~~ spell 13.754 -7.008 -7.008 -0.220 -0.220
88 read ~~ arith 8.100 -5.909 -5.909 -0.285 -0.285
89 read ~~ spell 20.943 14.320 14.320 1.077 1.077
90 arith ~~ spell 0.473 -1.382 -1.382 -0.057 -0.057
<-'
Meas_Model_2 adjust =~ motiv + harm + stabi
risk =~ verbal + ppsych + ses
achieve =~ read + arith + spell
motiv~~stabi
motiv~~read
motiv~~arith
harm~~stabi
ppsych~~ses
'
<- sem(Meas_Model_2, estimator= "MLR", data=SEM_Example_Data, mimic = "Mplus", missing = "FIML") Measure_MDL_fit_2
Warning: lavaan->lav_object_post_check():
some estimated ov variances are negative
summary(Measure_MDL_fit_2, fit.measures = TRUE, standardized=TRUE)
lavaan 0.6-19 ended normally after 184 iterations
Estimator ML
Optimization method NLMINB
Number of model parameters 35
Number of observations 500
Number of missing patterns 1
Model Test User Model:
Standard Scaled
Test Statistic 82.315 83.577
Degrees of freedom 19 19
P-value (Chi-square) 0.000 0.000
Scaling correction factor 0.985
Yuan-Bentler correction (Mplus variant)
Model Test Baseline Model:
Test statistic 2597.976 2642.799
Degrees of freedom 36 36
P-value 0.000 0.000
Scaling correction factor 0.983
User Model versus Baseline Model:
Comparative Fit Index (CFI) 0.975 0.975
Tucker-Lewis Index (TLI) 0.953 0.953
Robust Comparative Fit Index (CFI) 0.975
Robust Tucker-Lewis Index (TLI) 0.953
Loglikelihood and Information Criteria:
Loglikelihood user model (H0) -15484.520 -15484.520
Scaling correction factor 0.994
for the MLR correction
Loglikelihood unrestricted model (H1) -15443.363 -15443.363
Scaling correction factor 0.991
for the MLR correction
Akaike (AIC) 31039.040 31039.040
Bayesian (BIC) 31186.552 31186.552
Sample-size adjusted Bayesian (SABIC) 31075.459 31075.459
Root Mean Square Error of Approximation:
RMSEA 0.082 0.082
90 Percent confidence interval - lower 0.064 0.065
90 Percent confidence interval - upper 0.100 0.101
P-value H_0: RMSEA <= 0.050 0.002 0.002
P-value H_0: RMSEA >= 0.080 0.581 0.609
Robust RMSEA 0.082
90 Percent confidence interval - lower 0.064
90 Percent confidence interval - upper 0.100
P-value H_0: Robust RMSEA <= 0.050 0.002
P-value H_0: Robust RMSEA >= 0.080 0.583
Standardized Root Mean Square Residual:
SRMR 0.030 0.030
Parameter Estimates:
Standard errors Sandwich
Information bread Observed
Observed information based on Hessian
Latent Variables:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
adjust =~
motiv 1.000 9.972 1.014
harm 0.749 0.048 15.750 0.000 7.468 0.748
stabi 0.639 0.065 9.878 0.000 6.375 0.638
risk =~
verbal 1.000 7.534 0.754
ppsych -0.683 0.068 -10.121 0.000 -5.149 -0.515
ses 0.721 0.076 9.512 0.000 5.429 0.543
achieve =~
read 1.000 9.472 0.948
arith 0.825 0.036 22.646 0.000 7.814 0.782
spell 0.965 0.027 36.198 0.000 9.137 0.915
Covariances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv ~~
.stabi -6.750 4.851 -1.392 0.164 -6.750 -0.522
.read -4.637 1.379 -3.362 0.001 -4.637 -0.871
.arith 5.872 1.949 3.013 0.003 5.872 0.561
.harm ~~
.stabi 10.277 4.581 2.244 0.025 10.277 0.201
.ppsych ~~
.ses -13.957 4.090 -3.413 0.001 -13.957 -0.194
adjust ~~
risk 33.830 4.605 7.347 0.000 0.450 0.450
achieve 59.544 5.109 11.654 0.000 0.630 0.630
risk ~~
achieve 54.025 5.268 10.254 0.000 0.757 0.757
Intercepts:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 0.000 0.447 0.000 1.000 0.000 0.000
.harm 0.000 0.447 0.000 1.000 0.000 0.000
.stabi 0.000 0.447 0.000 1.000 0.000 0.000
.verbal 0.000 0.447 0.000 1.000 0.000 0.000
.ppsych -0.000 0.447 -0.000 1.000 -0.000 -0.000
.ses -0.000 0.447 -0.000 1.000 -0.000 -0.000
.read 0.000 0.447 0.000 1.000 0.000 0.000
.arith -0.000 0.447 -0.000 1.000 -0.000 -0.000
.spell -0.000 0.447 -0.000 1.000 -0.000 -0.000
Variances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv -2.822 5.335 -0.529 0.597 -2.822 -0.029
.harm 44.035 4.189 10.512 0.000 44.035 0.441
.stabi 59.158 7.121 8.307 0.000 59.158 0.593
.verbal 43.037 5.110 8.421 0.000 43.037 0.431
.ppsych 73.283 5.460 13.422 0.000 73.283 0.734
.ses 70.321 4.959 14.180 0.000 70.321 0.705
.read 10.045 1.620 6.200 0.000 10.045 0.101
.arith 38.786 2.867 13.529 0.000 38.786 0.388
.spell 16.311 1.726 9.451 0.000 16.311 0.163
adjust 99.450 7.903 12.583 0.000 1.000 1.000
risk 56.763 7.523 7.545 0.000 1.000 1.000
achieve 89.713 6.581 13.633 0.000 1.000 1.000
Testing the structural model
<-'
SEM_Model adjust =~ motiv + harm + stabi
risk =~ verbal + ppsych + ses
achieve =~ read + arith + spell
motiv~~read
motiv~~arith
harm~~stabi
ppsych~~ses
motiv~~0*motiv
achieve ~ adjust + risk
'
<- sem(SEM_Model, estimator= "MLR", data=SEM_Example_Data, mimic = "Mplus", missing = "FIML") SEM_MDL_fit
Warning: lavaan->lav_object_post_check():
the covariance matrix of the residuals of the observed variables (theta)
is not positive definite ; use lavInspect(fit, "theta") to investigate.
summary(SEM_MDL_fit, fit.measures = TRUE, standardized=TRUE)
lavaan 0.6-19 ended normally after 320 iterations
Estimator ML
Optimization method NLMINB
Number of model parameters 33
Number of observations 500
Number of missing patterns 1
Model Test User Model:
Standard Scaled
Test Statistic 84.278 85.006
Degrees of freedom 21 21
P-value (Chi-square) 0.000 0.000
Scaling correction factor 0.991
Yuan-Bentler correction (Mplus variant)
Model Test Baseline Model:
Test statistic 2597.976 2642.799
Degrees of freedom 36 36
P-value 0.000 0.000
Scaling correction factor 0.983
User Model versus Baseline Model:
Comparative Fit Index (CFI) 0.975 0.975
Tucker-Lewis Index (TLI) 0.958 0.958
Robust Comparative Fit Index (CFI) 0.975
Robust Tucker-Lewis Index (TLI) 0.958
Loglikelihood and Information Criteria:
Loglikelihood user model (H0) -15485.502 -15485.502
Scaling correction factor 0.990
for the MLR correction
Loglikelihood unrestricted model (H1) -15443.363 -15443.363
Scaling correction factor 0.991
for the MLR correction
Akaike (AIC) 31037.003 31037.003
Bayesian (BIC) 31176.085 31176.085
Sample-size adjusted Bayesian (SABIC) 31071.341 31071.341
Root Mean Square Error of Approximation:
RMSEA 0.078 0.078
90 Percent confidence interval - lower 0.061 0.061
90 Percent confidence interval - upper 0.095 0.096
P-value H_0: RMSEA <= 0.050 0.004 0.004
P-value H_0: RMSEA >= 0.080 0.432 0.449
Robust RMSEA 0.078
90 Percent confidence interval - lower 0.061
90 Percent confidence interval - upper 0.095
P-value H_0: Robust RMSEA <= 0.050 0.004
P-value H_0: Robust RMSEA >= 0.080 0.431
Standardized Root Mean Square Residual:
SRMR 0.032 0.032
Parameter Estimates:
Standard errors Sandwich
Information bread Observed
Observed information based on Hessian
Latent Variables:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
adjust =~
motiv 1.000 9.829 1.000
harm 0.771 0.033 23.576 0.000 7.575 0.758
stabi 0.588 0.034 17.503 0.000 5.783 0.579
risk =~
verbal 1.000 7.534 0.754
ppsych -0.683 0.068 -10.063 0.000 -5.149 -0.515
ses 0.721 0.076 9.489 0.000 5.430 0.544
achieve =~
read 1.000 9.474 0.948
arith 0.824 0.036 22.598 0.000 7.810 0.782
spell 0.965 0.027 36.177 0.000 9.138 0.915
Regressions:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
achieve ~
adjust 0.358 0.039 9.095 0.000 0.372 0.372
risk 0.739 0.082 9.026 0.000 0.588 0.588
Covariances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv ~~
.read -4.603 1.311 -3.512 0.000 -4.603 -Inf
.arith 5.912 1.811 3.265 0.001 5.912 Inf
.harm ~~
.stabi 14.074 2.166 6.497 0.000 14.074 0.265
.ppsych ~~
.ses -13.959 4.095 -3.409 0.001 -13.959 -0.194
adjust ~~
risk 33.689 4.551 7.403 0.000 0.455 0.455
Intercepts:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 0.000 0.447 0.000 1.000 0.000 0.000
.harm 0.000 0.447 0.000 1.000 0.000 0.000
.stabi 0.000 0.447 0.000 1.000 0.000 0.000
.verbal 0.000 0.447 0.000 1.000 0.000 0.000
.ppsych -0.000 0.447 -0.000 1.000 -0.000 -0.000
.ses -0.000 0.447 -0.000 1.000 -0.000 -0.000
.read 0.000 0.447 0.000 1.000 0.000 0.000
.arith -0.000 0.447 -0.000 1.000 -0.000 -0.000
.spell -0.000 0.447 -0.000 1.000 -0.000 -0.000
Variances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 0.000 0.000 0.000
.harm 42.415 2.697 15.728 0.000 42.415 0.425
.stabi 66.354 3.848 17.244 0.000 66.354 0.665
.verbal 43.034 5.122 8.402 0.000 43.034 0.431
.ppsych 73.291 5.466 13.409 0.000 73.291 0.734
.ses 70.317 4.960 14.177 0.000 70.317 0.705
.read 10.047 1.619 6.206 0.000 10.047 0.101
.arith 38.807 2.871 13.518 0.000 38.807 0.389
.spell 16.297 1.708 9.539 0.000 16.297 0.163
adjust 96.608 5.863 16.478 0.000 1.000 1.000
risk 56.766 7.538 7.531 0.000 1.000 1.000
.achieve 28.468 3.448 8.257 0.000 0.317 0.317
lavInspect(SEM_MDL_fit, "theta")
motiv harm stabi verbal ppsych ses read arith spell
motiv 0.000
harm 0.000 42.415
stabi 0.000 14.074 66.354
verbal 0.000 0.000 0.000 43.034
ppsych 0.000 0.000 0.000 0.000 73.291
ses 0.000 0.000 0.000 0.000 -13.959 70.317
read -4.603 0.000 0.000 0.000 0.000 0.000 10.047
arith 5.912 0.000 0.000 0.000 0.000 0.000 0.000 38.807
spell 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 16.297
modindices(SEM_MDL_fit)
Warning: lavaan->lav_start_check_cov():
non-zero covariance element set to zero, due to fixed-to-zero variances
variables involved are: motiv read
Warning: lavaan->lav_start_check_cov():
non-zero covariance element set to zero, due to fixed-to-zero variances
variables involved are: motiv arith
lhs op rhs mi epc sepc.lv sepc.all sepc.nox
4 risk =~ verbal 0.043 0.803 6.052 0.606 0.606
14 motiv ~~ motiv 3.970 11.604 0.000 0.000 0.000
41 adjust =~ verbal 0.452 -0.044 -0.428 -0.043 -0.043
42 adjust =~ ppsych 0.620 -0.039 -0.380 -0.038 -0.038
43 adjust =~ ses 0.087 0.015 0.143 0.014 0.014
44 adjust =~ read 192.765 -0.763 -7.503 -0.751 -0.751
45 adjust =~ arith 74.514 0.478 4.702 0.471 0.471
46 adjust =~ spell 15.134 0.166 1.634 0.164 0.164
47 risk =~ motiv 4.464 -0.138 -1.042 -0.106 -0.106
48 risk =~ harm 0.172 0.021 0.157 0.016 0.016
49 risk =~ stabi 3.794 0.122 0.918 0.092 0.092
50 risk =~ read 29.580 0.476 3.584 0.359 0.359
51 risk =~ arith 1.571 0.108 0.813 0.081 0.081
52 risk =~ spell 26.247 -0.366 -2.758 -0.276 -0.276
53 achieve =~ motiv 2.056 -0.078 -0.737 -0.075 -0.075
54 achieve =~ harm 0.062 -0.010 -0.097 -0.010 -0.010
55 achieve =~ stabi 2.551 0.080 0.762 0.076 0.076
56 achieve =~ verbal 0.022 0.097 0.923 0.092 0.092
57 achieve =~ ppsych 0.805 0.099 0.939 0.094 0.094
58 achieve =~ ses 1.220 0.128 1.216 0.122 0.122
59 motiv ~~ harm 2.421 5.317 5.317 NA NA
60 motiv ~~ stabi 0.430 -2.598 -2.598 NA NA
61 motiv ~~ verbal 0.184 -0.991 -0.991 NA NA
62 motiv ~~ ppsych 0.007 -0.184 -0.184 NA NA
63 motiv ~~ ses 1.686 -2.807 -2.807 NA NA
64 motiv ~~ spell 27.834 14.293 14.293 NA NA
65 harm ~~ verbal 1.828 -3.034 -3.034 -0.071 -0.071
66 harm ~~ ppsych 2.904 -4.140 -4.140 -0.074 -0.074
67 harm ~~ ses 4.274 4.958 4.958 0.091 0.091
68 harm ~~ read 1.503 1.955 1.955 0.095 0.095
69 harm ~~ arith 3.220 -4.081 -4.081 -0.101 -0.101
70 harm ~~ spell 0.693 -1.264 -1.264 -0.048 -0.048
71 stabi ~~ verbal 4.886 6.175 6.175 0.116 0.116
72 stabi ~~ ppsych 1.022 3.071 3.071 0.044 0.044
73 stabi ~~ ses 0.156 -1.183 -1.183 -0.017 -0.017
74 stabi ~~ read 0.449 1.087 1.087 0.042 0.042
75 stabi ~~ arith 0.011 0.243 0.243 0.005 0.005
76 stabi ~~ spell 0.083 -0.493 -0.493 -0.015 -0.015
77 verbal ~~ ppsych 0.989 -4.465 -4.465 -0.080 -0.080
78 verbal ~~ ses 1.073 -4.905 -4.905 -0.089 -0.089
79 verbal ~~ read 1.435 2.172 2.172 0.104 0.104
80 verbal ~~ arith 5.054 5.278 5.278 0.129 0.129
81 verbal ~~ spell 6.207 -4.559 -4.559 -0.172 -0.172
82 ppsych ~~ read 4.464 -3.783 -3.783 -0.139 -0.139
83 ppsych ~~ arith 10.832 8.328 8.328 0.156 0.156
84 ppsych ~~ spell 1.306 2.146 2.146 0.062 0.062
85 ses ~~ read 4.163 3.626 3.626 0.136 0.136
86 ses ~~ arith 4.233 5.141 5.141 0.098 0.098
87 ses ~~ spell 9.232 -5.648 -5.648 -0.167 -0.167
88 read ~~ arith 18.611 -9.993 -9.993 -0.506 -0.506
89 read ~~ spell 21.520 19.046 19.046 1.488 1.488
90 arith ~~ spell 0.001 -0.066 -0.066 -0.003 -0.003
93 adjust ~ achieve 24.714 -2.422 -2.335 -2.335 -2.335
94 adjust ~ risk 142.750 -24.714 -18.944 -18.944 -18.944
96 risk ~ adjust 77.401 -3.530 -4.605 -4.605 -4.605
<-'
SEM_Model_2 adjust =~ motiv + harm + stabi
risk =~ verbal + ppsych + ses
achieve =~ read + arith + spell
motiv~~stabi
motiv~~read
motiv~~arith
harm~~stabi
ppsych~~ses
motiv~~0*motiv
achieve~~0*achieve
achieve ~ adjust + risk
'
<- sem(SEM_Model_2, estimator= "MLR", data=SEM_Example_Data, mimic = "Mplus", missing = "FIML") SEM_MDL_fit_2
Warning: lavaan->lav_object_post_check():
the covariance matrix of the residuals of the observed variables (theta)
is not positive definite ; use lavInspect(fit, "theta") to investigate.
summary(SEM_MDL_fit_2, fit.measures = TRUE, standardized=TRUE)
lavaan 0.6-19 ended normally after 280 iterations
Estimator ML
Optimization method NLMINB
Number of model parameters 33
Number of observations 500
Number of missing patterns 1
Model Test User Model:
Standard Scaled
Test Statistic 121.295 125.911
Degrees of freedom 21 21
P-value (Chi-square) 0.000 0.000
Scaling correction factor 0.963
Yuan-Bentler correction (Mplus variant)
Model Test Baseline Model:
Test statistic 2597.976 2642.799
Degrees of freedom 36 36
P-value 0.000 0.000
Scaling correction factor 0.983
User Model versus Baseline Model:
Comparative Fit Index (CFI) 0.961 0.960
Tucker-Lewis Index (TLI) 0.933 0.931
Robust Comparative Fit Index (CFI) 0.961
Robust Tucker-Lewis Index (TLI) 0.933
Loglikelihood and Information Criteria:
Loglikelihood user model (H0) -15504.010 -15504.010
Scaling correction factor 1.008
for the MLR correction
Loglikelihood unrestricted model (H1) -15443.363 -15443.363
Scaling correction factor 0.991
for the MLR correction
Akaike (AIC) 31074.021 31074.021
Bayesian (BIC) 31213.103 31213.103
Sample-size adjusted Bayesian (SABIC) 31108.359 31108.359
Root Mean Square Error of Approximation:
RMSEA 0.098 0.100
90 Percent confidence interval - lower 0.081 0.083
90 Percent confidence interval - upper 0.115 0.117
P-value H_0: RMSEA <= 0.050 0.000 0.000
P-value H_0: RMSEA >= 0.080 0.961 0.974
Robust RMSEA 0.098
90 Percent confidence interval - lower 0.081
90 Percent confidence interval - upper 0.115
P-value H_0: Robust RMSEA <= 0.050 0.000
P-value H_0: Robust RMSEA >= 0.080 0.963
Standardized Root Mean Square Residual:
SRMR 0.042 0.042
Parameter Estimates:
Standard errors Sandwich
Information bread Observed
Observed information based on Hessian
Latent Variables:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
adjust =~
motiv 1.000 9.823 1.000
harm 0.771 0.033 23.519 0.000 7.570 0.758
stabi 0.646 0.061 10.506 0.000 6.344 0.635
risk =~
verbal 1.000 5.811 0.582
ppsych -0.668 0.072 -9.306 0.000 -3.884 -0.389
ses 0.746 0.076 9.820 0.000 4.333 0.434
achieve =~
read 1.000 9.495 0.950
arith 0.821 0.036 22.673 0.000 7.798 0.781
spell 0.959 0.026 37.221 0.000 9.104 0.911
Regressions:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
achieve ~
adjust 0.074 0.074 1.003 0.316 0.077 0.077
risk 1.557 0.140 11.142 0.000 0.953 0.953
Covariances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv ~~
.stabi -5.580 4.687 -1.190 0.234 -5.580 -Inf
.read -4.439 1.342 -3.309 0.001 -4.439 -Inf
.arith 6.204 1.853 3.349 0.001 6.204 Inf
.harm ~~
.stabi 9.934 4.240 2.343 0.019 9.934 0.197
.ppsych ~~
.ses -25.084 4.147 -6.048 0.000 -25.084 -0.303
adjust ~~
risk 33.620 4.554 7.383 0.000 0.589 0.589
Intercepts:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 0.000 0.447 0.000 1.000 0.000 0.000
.harm 0.000 0.447 0.000 1.000 0.000 0.000
.stabi 0.000 0.447 0.000 1.000 0.000 0.000
.verbal 0.000 0.447 0.000 1.000 0.000 0.000
.ppsych -0.000 0.447 -0.000 1.000 -0.000 -0.000
.ses -0.000 0.447 -0.000 1.000 -0.000 -0.000
.read 0.000 0.447 0.000 1.000 0.000 0.000
.arith -0.000 0.447 -0.000 1.000 -0.000 -0.000
.spell -0.000 0.447 -0.000 1.000 -0.000 -0.000
Variances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.motiv 0.000 0.000 0.000
.achieve 0.000 0.000 0.000
.harm 42.494 2.709 15.686 0.000 42.494 0.426
.stabi 59.600 7.044 8.461 0.000 59.600 0.597
.verbal 66.026 4.348 15.184 0.000 66.026 0.662
.ppsych 84.715 5.582 15.178 0.000 84.715 0.849
.ses 81.019 4.832 16.766 0.000 81.019 0.812
.read 9.770 1.516 6.445 0.000 9.770 0.098
.arith 38.881 2.867 13.562 0.000 38.881 0.390
.spell 16.919 1.657 10.209 0.000 16.919 0.170
adjust 96.490 5.857 16.474 0.000 1.000 1.000
risk 33.771 5.095 6.628 0.000 1.000 1.000
::kable(coef(SEM_MDL_fit_2),booktabs=TRUE, format="markdown") knitr
x | |
---|---|
adjust=~harm | 7.706664e-01 |
adjust=~stabi | 6.458660e-01 |
risk=~ppsych | -6.684268e-01 |
risk=~ses | 7.456397e-01 |
achieve=~arith | 8.213196e-01 |
achieve=~spell | 9.588428e-01 |
motiv~~stabi | -5.579616e+00 |
motiv~~read | -4.439304e+00 |
motiv~~arith | 6.204295e+00 |
harm~~stabi | 9.934010e+00 |
ppsych~~ses | -2.508351e+01 |
achieve~adjust | 7.435989e-02 |
achieve~risk | 1.556635e+00 |
harm~~harm | 4.249353e+01 |
stabi~~stabi | 5.960046e+01 |
verbal~~verbal | 6.602583e+01 |
ppsych~~ppsych | 8.471533e+01 |
ses~~ses | 8.101905e+01 |
read~~read | 9.770239e+00 |
arith~~arith | 3.888140e+01 |
spell~~spell | 1.691859e+01 |
adjust~~adjust | 9.649023e+01 |
risk~~risk | 3.377134e+01 |
adjust~~risk | 3.361985e+01 |
motiv~1 | 4.017098e-06 |
harm~1 | 4.067597e-06 |
stabi~1 | 4.018037e-06 |
verbal~1 | 2.409076e-05 |
ppsych~1 | -7.935747e-06 |
ses~1 | -3.984243e-06 |
read~1 | 3.411120e-05 |
arith~1 | -1.595082e-05 |
spell~1 | -7.976727e-06 |
##Factor Loading Table
parameterEstimates(SEM_MDL_fit, standardized=TRUE) %>%
filter(op == "=~") %>%
select('Latent Factor'=lhs, Indicator=rhs, B=est, SE=se, Z=z, 'p-value'=pvalue, Beta=std.all,CI_Lower=ci.lower, CI_Upper=ci.upper) %>%
::kable(digits = 3, format="html", booktabs=TRUE, caption="Total Sample Factor Loadings")%>%
knitrkable_classic(full_width = F, html_font = "Cambria")
Latent Factor | Indicator | B | SE | Z | p-value | Beta | CI_Lower | CI_Upper |
---|---|---|---|---|---|---|---|---|
adjust | motiv | 1.000 | 0.000 | NA | NA | 1.000 | 1.000 | 1.000 |
adjust | harm | 0.771 | 0.033 | 23.576 | 0 | 0.758 | 0.707 | 0.835 |
adjust | stabi | 0.588 | 0.034 | 17.503 | 0 | 0.579 | 0.523 | 0.654 |
risk | verbal | 1.000 | 0.000 | NA | NA | 0.754 | 1.000 | 1.000 |
risk | ppsych | -0.683 | 0.068 | -10.063 | 0 | -0.515 | -0.816 | -0.550 |
risk | ses | 0.721 | 0.076 | 9.489 | 0 | 0.544 | 0.572 | 0.870 |
achieve | read | 1.000 | 0.000 | NA | NA | 0.948 | 1.000 | 1.000 |
achieve | arith | 0.824 | 0.036 | 22.598 | 0 | 0.782 | 0.753 | 0.896 |
achieve | spell | 0.965 | 0.027 | 36.177 | 0 | 0.915 | 0.912 | 1.017 |
##Regression Table
parameterEstimates(SEM_MDL_fit, standardized=TRUE) %>%
filter(op == "~") %>%
select('LV1'=lhs, 'LV2'=rhs, B=est, SE=se, Z=z, 'p-value'=pvalue, Beta=std.all,CI_Lower=ci.lower, CI_Upper=ci.upper) %>%
::kable(digits = 3, format="html", booktabs=TRUE, caption="Total Sample Regressions")%>%
knitrkable_classic(full_width = F, html_font = "Cambria")
LV1 | LV2 | B | SE | Z | p-value | Beta | CI_Lower | CI_Upper |
---|---|---|---|---|---|---|---|---|
achieve | adjust | 0.358 | 0.039 | 9.095 | 0 | 0.372 | 0.281 | 0.436 |
achieve | risk | 0.739 | 0.082 | 9.026 | 0 | 0.588 | 0.579 | 0.900 |
semPaths(SEM_MDL_fit, whatLabels = "std", edge.label.cex = .5, layout = "tree2",
rotation = 2, style = "lisrel", intercepts = FALSE, residuals = TRUE,
curve = 1, curvature = 3, nCharNodes = 8, sizeMan = 6, sizeMan2 = 3,
optimizeLatRes = TRUE, edge.color = "#000000")
If I wanted to run this model in Mplus I could use the following code:
Title: Example SEM Code for Mplus
Data:
FILE IS academic_achievement.csv;
Variable:
Names are motiv, harm, stabi, ppych, ses, verbal
read, arith, spell;
Usevar = motiv, harm, stabi, ppych, ses, verbal
read, arith, spell;
Model:
adjust BY motiv harm stabi;
risk BY verbal ppsych ses;
achieve by read arith spell;
motiv WITH stabi;
motiv WITH read;
motiv WITH arith;
harm WITH stabi;
ppsych WITH ses;
motiv@0;
achieve@0;
achieve ON adjust risk;
Output: samp stand res;