#Load library
library(MBESS)
library(lavaan)
library(semPlot)
library(readr)
library(psych)
library(CTT)
library(GPArotation)
library(psychometric)
library(cocron)
library(knitr)
library(ltm)
library(mirt)
library(ggplot2)
library(Hmisc)
#Import data 

SCORES <- read_csv("C:/Goran/01 FAKS/4TT/Project/Data/SCORES.csv")
data <- SCORES



#Convert to cathegorical data
data[,c("X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", "X24", "X25")] <-
  lapply(data[,c("X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", "X24", "X25")],
         ordered)

#Import data 2
ctt.data <- read.csv("C:/Goran/01 FAKS/4TT/Project/Data/SCORES.csv", header = TRUE)

#Matrix 2 [,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -18, -19, -20, -21, -23, -24)]
responses <- as.matrix(ctt.data)


#CFA testing

one.model = "model1 =~ X4 + X17 + X2 + X3 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 + X15 + X16 + X1 + X18 + X19 + X20 + X21 + X22 + X23 + X24 + X25"
#Run the model

one.fit = cfa(one.model,
              data = responses,
              estimator = "mlr")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  73 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                         ML      Robust
##   Minimum Function Test Statistic              339.180     347.386
##   Degrees of freedom                               275         275
##   P-value (Chi-square)                           0.005       0.002
##   Scaling correction factor                                  0.976
##     for the Yuan-Bentler correction
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              521.769     517.868
##   Degrees of freedom                               300         300
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    0.711       0.668
##   Tucker-Lewis Index (TLI)                       0.684       0.638
## 
##   Robust Comparative Fit Index (CFI)                         0.678
##   Robust Tucker-Lewis Index (TLI)                            0.649
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)              -1313.466   -1313.466
##   Scaling correction factor                                  1.210
##     for the MLR correction
##   Loglikelihood unrestricted model (H1)      -1143.876   -1143.876
##   Scaling correction factor                                  1.026
##     for the MLR correction
## 
##   Number of free parameters                         75          75
##   Akaike (AIC)                                2776.931    2776.931
##   Bayesian (BIC)                              2975.978    2975.978
##   Sample-size adjusted Bayesian (BIC)         2739.039    2739.039
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.047       0.050
##   90 Percent Confidence Interval          0.027  0.063       0.031  0.066
##   P-value RMSEA <= 0.05                          0.599       0.486
## 
##   Robust RMSEA                                               0.049
##   90 Percent Confidence Interval                             0.031  0.065
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.082       0.082
## 
## Parameter Estimates:
## 
##   Information                                 Observed
##   Standard Errors                   Robust.huber.white
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   model1 =~                                                             
##     X4                1.000                               0.242    0.494
##     X17               0.951    0.256    3.721    0.000    0.230    0.461
##     X2                0.312    0.210    1.488    0.137    0.076    0.285
##     X3                0.532    0.203    2.623    0.009    0.129    0.358
##     X5                0.511    0.257    1.993    0.046    0.124    0.277
##     X6                0.106    0.204    0.522    0.602    0.026    0.066
##     X7                0.413    0.248    1.669    0.095    0.100    0.214
##     X8                0.844    0.273    3.097    0.002    0.204    0.410
##     X9                0.401    0.273    1.470    0.141    0.097    0.203
##     X10               0.256    0.258    0.992    0.321    0.062    0.138
##     X11               0.954    0.289    3.295    0.001    0.231    0.463
##     X12               0.478    0.231    2.064    0.039    0.116    0.322
##     X13               0.941    0.332    2.832    0.005    0.228    0.580
##     X14               0.111    0.214    0.517    0.605    0.027    0.070
##     X15               0.667    0.182    3.668    0.000    0.162    0.365
##     X16               0.214    0.244    0.877    0.380    0.052    0.109
##     X1                0.344    0.183    1.877    0.061    0.083    0.212
##     X18               0.853    0.297    2.878    0.004    0.207    0.467
##     X19               0.550    0.257    2.141    0.032    0.133    0.476
##     X20               0.313    0.242    1.291    0.197    0.076    0.164
##     X21               0.388    0.254    1.527    0.127    0.094    0.441
##     X22               0.607    0.175    3.459    0.001    0.147    0.329
##     X23               0.639    0.329    1.944    0.052    0.155    0.346
##     X24              -0.139    0.246   -0.563    0.573   -0.034   -0.070
##     X25               0.837    0.294    2.846    0.004    0.203    0.427
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.600    0.048   12.550    0.000    0.600    1.225
##    .X17               0.476    0.049    9.770    0.000    0.476    0.953
##    .X2                0.924    0.026   35.681    0.000    0.924    3.482
##    .X3                0.848    0.035   24.167    0.000    0.848    2.358
##    .X5                0.724    0.044   16.588    0.000    0.724    1.619
##    .X6                0.810    0.038   21.125    0.000    0.810    2.062
##    .X7                0.676    0.046   14.808    0.000    0.676    1.445
##    .X8                0.543    0.049   11.166    0.000    0.543    1.090
##    .X9                0.648    0.047   13.891    0.000    0.648    1.356
##    .X10               0.276    0.044    6.330    0.000    0.276    0.618
##    .X11               0.533    0.049   10.954    0.000    0.533    1.069
##    .X12               0.848    0.035   24.167    0.000    0.848    2.358
##    .X13               0.810    0.038   21.125    0.000    0.810    2.062
##    .X14               0.819    0.038   21.801    0.000    0.819    2.128
##    .X15               0.267    0.043    6.179    0.000    0.267    0.603
##    .X16               0.648    0.047   13.891    0.000    0.648    1.356
##    .X1                0.810    0.038   21.125    0.000    0.810    2.062
##    .X18               0.733    0.043   16.993    0.000    0.733    1.658
##    .X19               0.914    0.027   33.466    0.000    0.914    3.266
##    .X20               0.695    0.045   15.477    0.000    0.695    1.510
##    .X21               0.952    0.021   45.826    0.000    0.952    4.472
##    .X22               0.276    0.044    6.330    0.000    0.276    0.618
##    .X23               0.724    0.044   16.588    0.000    0.724    1.619
##    .X24               0.362    0.047    7.717    0.000    0.362    0.753
##    .X25               0.657    0.046   14.186    0.000    0.657    1.384
##     model1            0.000                               0.000    0.000
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.181    0.026    7.074    0.000    0.181    0.756
##    .X17               0.196    0.021    9.524    0.000    0.196    0.787
##    .X2                0.065    0.020    3.304    0.001    0.065    0.919
##    .X3                0.113    0.022    5.125    0.000    0.113    0.872
##    .X5                0.185    0.022    8.436    0.000    0.185    0.923
##    .X6                0.154    0.024    6.501    0.000    0.154    0.996
##    .X7                0.209    0.019   11.180    0.000    0.209    0.954
##    .X8                0.206    0.023    8.838    0.000    0.206    0.832
##    .X9                0.219    0.018   12.388    0.000    0.219    0.959
##    .X10               0.196    0.020    9.569    0.000    0.196    0.981
##    .X11               0.196    0.023    8.502    0.000    0.196    0.786
##    .X12               0.116    0.023    5.124    0.000    0.116    0.896
##    .X13               0.102    0.026    3.979    0.000    0.102    0.663
##    .X14               0.147    0.024    6.206    0.000    0.147    0.995
##    .X15               0.169    0.017    9.898    0.000    0.169    0.867
##    .X16               0.226    0.015   15.083    0.000    0.226    0.988
##    .X1                0.147    0.023    6.425    0.000    0.147    0.955
##    .X18               0.153    0.025    6.028    0.000    0.153    0.782
##    .X19               0.061    0.017    3.608    0.000    0.061    0.774
##    .X20               0.206    0.019   10.895    0.000    0.206    0.973
##    .X21               0.037    0.012    3.102    0.002    0.037    0.805
##    .X22               0.178    0.019    9.606    0.000    0.178    0.892
##    .X23               0.176    0.024    7.302    0.000    0.176    0.880
##    .X24               0.230    0.014   16.793    0.000    0.230    0.995
##    .X25               0.184    0.024    7.724    0.000    0.184    0.818
##     model1            0.059    0.024    2.448    0.014    1.000    1.000
## 
## R-Square:
##                    Estimate
##     X4                0.244
##     X17               0.213
##     X2                0.081
##     X3                0.128
##     X5                0.077
##     X6                0.004
##     X7                0.046
##     X8                0.168
##     X9                0.041
##     X10               0.019
##     X11               0.214
##     X12               0.104
##     X13               0.337
##     X14               0.005
##     X15               0.133
##     X16               0.012
##     X1                0.045
##     X18               0.218
##     X19               0.226
##     X20               0.027
##     X21               0.195
##     X22               0.108
##     X23               0.120
##     X24               0.005
##     X25               0.182
ci.reliability(data = responses, type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7238847
## 
## $se
## [1] 0.03656865
## 
## $ci.lower
## [1] 0.6432098
## 
## $ci.upper
## [1] 0.7827432
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 2
one.model = "model2 =~ X4 + X17 + X3 + X5 + X8 + X11 + X12 + X13 + X15 + X1 + X18 + X19 + X22 + X25"
#Run the model

one.fit = cfa(one.model,
              data = responses,
              estimator = "mlr")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  47 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                         ML      Robust
##   Minimum Function Test Statistic               82.201      81.921
##   Degrees of freedom                                77          77
##   P-value (Chi-square)                           0.322       0.329
##   Scaling correction factor                                  1.003
##     for the Yuan-Bentler correction
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              219.873     212.253
##   Degrees of freedom                                91          91
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    0.960       0.959
##   Tucker-Lewis Index (TLI)                       0.952       0.952
## 
##   Robust Comparative Fit Index (CFI)                         0.961
##   Robust Tucker-Lewis Index (TLI)                            0.954
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)               -759.481    -759.481
##   Scaling correction factor                                  1.049
##     for the MLR correction
##   Loglikelihood unrestricted model (H1)       -718.380    -718.380
##   Scaling correction factor                                  1.020
##     for the MLR correction
## 
##   Number of free parameters                         42          42
##   Akaike (AIC)                                1602.961    1602.961
##   Bayesian (BIC)                              1714.428    1714.428
##   Sample-size adjusted Bayesian (BIC)         1581.742    1581.742
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.025       0.025
##   90 Percent Confidence Interval          0.000  0.062       0.000  0.062
##   P-value RMSEA <= 0.05                          0.837       0.842
## 
##   Robust RMSEA                                               0.025
##   90 Percent Confidence Interval                             0.000  0.062
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.063       0.063
## 
## Parameter Estimates:
## 
##   Information                                 Observed
##   Standard Errors                   Robust.huber.white
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   model2 =~                                                             
##     X4                1.000                               0.278    0.567
##     X17               0.855    0.232    3.679    0.000    0.237    0.475
##     X3                0.494    0.180    2.740    0.006    0.137    0.382
##     X5                0.523    0.204    2.562    0.010    0.145    0.325
##     X8                0.741    0.234    3.172    0.002    0.206    0.413
##     X11               0.784    0.228    3.446    0.001    0.218    0.436
##     X12               0.390    0.181    2.157    0.031    0.108    0.301
##     X13               0.772    0.248    3.111    0.002    0.214    0.546
##     X15               0.592    0.170    3.489    0.000    0.164    0.372
##     X1                0.349    0.163    2.141    0.032    0.097    0.247
##     X18               0.668    0.239    2.800    0.005    0.185    0.419
##     X19               0.462    0.184    2.510    0.012    0.128    0.459
##     X22               0.556    0.159    3.506    0.000    0.154    0.345
##     X25               0.703    0.226    3.115    0.002    0.195    0.411
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.600    0.048   12.550    0.000    0.600    1.225
##    .X17               0.476    0.049    9.770    0.000    0.476    0.953
##    .X3                0.848    0.035   24.167    0.000    0.848    2.358
##    .X5                0.724    0.044   16.588    0.000    0.724    1.619
##    .X8                0.543    0.049   11.166    0.000    0.543    1.090
##    .X11               0.533    0.049   10.954    0.000    0.533    1.069
##    .X12               0.848    0.035   24.167    0.000    0.848    2.358
##    .X13               0.810    0.038   21.125    0.000    0.810    2.062
##    .X15               0.267    0.043    6.179    0.000    0.267    0.603
##    .X1                0.810    0.038   21.125    0.000    0.810    2.062
##    .X18               0.733    0.043   16.993    0.000    0.733    1.658
##    .X19               0.914    0.027   33.466    0.000    0.914    3.266
##    .X22               0.276    0.044    6.330    0.000    0.276    0.618
##    .X25               0.657    0.046   14.186    0.000    0.657    1.384
##     model2            0.000                               0.000    0.000
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.163    0.027    6.123    0.000    0.163    0.679
##    .X17               0.193    0.022    8.722    0.000    0.193    0.774
##    .X3                0.110    0.021    5.173    0.000    0.110    0.854
##    .X5                0.179    0.022    8.285    0.000    0.179    0.895
##    .X8                0.206    0.024    8.582    0.000    0.206    0.829
##    .X11               0.201    0.022    9.267    0.000    0.201    0.809
##    .X12               0.117    0.023    5.212    0.000    0.117    0.909
##    .X13               0.108    0.022    4.839    0.000    0.108    0.702
##    .X15               0.169    0.018    9.257    0.000    0.169    0.862
##    .X1                0.145    0.022    6.466    0.000    0.145    0.939
##    .X18               0.161    0.025    6.573    0.000    0.161    0.824
##    .X19               0.062    0.016    3.890    0.000    0.062    0.790
##    .X22               0.176    0.018    9.735    0.000    0.176    0.881
##    .X25               0.187    0.023    8.086    0.000    0.187    0.831
##     model2            0.077    0.025    3.042    0.002    1.000    1.000
## 
## R-Square:
##                    Estimate
##     X4                0.321
##     X17               0.226
##     X3                0.146
##     X5                0.105
##     X8                0.171
##     X11               0.191
##     X12               0.091
##     X13               0.298
##     X15               0.138
##     X1                0.061
##     X18               0.176
##     X19               0.210
##     X22               0.119
##     X25               0.169
ci.reliability(data = responses[,c( -2, -6, -7, -9, -10, -14, -16, -20, -21, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7327189
## 
## $se
## [1] 0.03466816
## 
## $ci.lower
## [1] 0.6617799
## 
## $ci.upper
## [1] 0.7965369
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 3
one.model = "model3 =~ X19 + X4 + X17 + X3 + X5 + X8 + X11 + X12 + X13 + X15 + X1 + X18 + X22 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  18 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic               50.580      71.771
##   Degrees of freedom                                77          77
##   P-value (Chi-square)                           0.991       0.647
##   Scaling correction factor                                  1.079
##   Shift parameter                                           24.916
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              409.067     284.767
##   Degrees of freedom                                91          91
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.098       1.032
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.048
##   P-value RMSEA <= 0.05                          1.000       0.961
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.117       0.117
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.694       0.694
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   model3 =~                                                             
##     X19               1.000                               0.771    0.771
##     X4                0.956    0.234    4.085    0.000    0.737    0.737
##     X17               0.812    0.209    3.883    0.000    0.626    0.626
##     X3                0.711    0.238    2.994    0.003    0.548    0.548
##     X5                0.574    0.220    2.609    0.009    0.442    0.442
##     X8                0.688    0.228    3.013    0.003    0.530    0.530
##     X11               0.718    0.199    3.612    0.000    0.554    0.554
##     X12               0.595    0.232    2.559    0.010    0.459    0.459
##     X13               0.965    0.249    3.880    0.000    0.744    0.744
##     X15               0.677    0.206    3.279    0.001    0.522    0.522
##     X1                0.476    0.203    2.346    0.019    0.367    0.367
##     X18               0.713    0.204    3.503    0.000    0.550    0.550
##     X22               0.650    0.195    3.339    0.001    0.501    0.501
##     X25               0.700    0.209    3.350    0.001    0.540    0.540
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X19               0.000                               0.000    0.000
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X3                0.000                               0.000    0.000
##    .X5                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X12               0.000                               0.000    0.000
##    .X13               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X1                0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X22               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     model3            0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X19|t1           -1.368    0.175   -7.801    0.000   -1.368   -1.368
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X3|t1            -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X5|t1            -0.594    0.131   -4.532    0.000   -0.594   -0.594
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X12|t1           -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X1|t1            -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X22|t1            0.594    0.131    4.532    0.000    0.594    0.594
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X19               0.405                               0.405    0.405
##    .X4                0.457                               0.457    0.457
##    .X17               0.608                               0.608    0.608
##    .X3                0.699                               0.699    0.699
##    .X5                0.804                               0.804    0.804
##    .X8                0.719                               0.719    0.719
##    .X11               0.693                               0.693    0.693
##    .X12               0.790                               0.790    0.790
##    .X13               0.447                               0.447    0.447
##    .X15               0.728                               0.728    0.728
##    .X1                0.865                               0.865    0.865
##    .X18               0.698                               0.698    0.698
##    .X22               0.749                               0.749    0.749
##    .X25               0.709                               0.709    0.709
##     model3            0.595    0.252    2.356    0.018    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X19               1.000                               1.000    1.000
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X3                1.000                               1.000    1.000
##     X5                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X12               1.000                               1.000    1.000
##     X13               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X1                1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X22               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X19               0.595
##     X4                0.543
##     X17               0.392
##     X3                0.301
##     X5                0.196
##     X8                0.281
##     X11               0.307
##     X12               0.210
##     X13               0.553
##     X15               0.272
##     X1                0.135
##     X18               0.302
##     X22               0.251
##     X25               0.291
ci.reliability(data = responses[,c( -2, -6, -7, -9, -10, -14, -16, -20, -21, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7327189
## 
## $se
## [1] 0.03492094
## 
## $ci.lower
## [1] 0.6586863
## 
## $ci.upper
## [1] 0.7933485
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#Test 4

one.model = "science =~ X4 + X17 + X3 + X5 + X8 + X11 + X12 + X13 + X15 + X1 + X18 + X22 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")
semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  19 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic               43.101      61.022
##   Degrees of freedom                                65          65
##   P-value (Chi-square)                           0.983       0.617
##   Scaling correction factor                                  1.043
##   Shift parameter                                           19.686
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              335.008     243.611
##   Degrees of freedom                                78          78
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.102       1.029
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.052
##   P-value RMSEA <= 0.05                          1.000       0.942
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.111       0.111
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.688       0.688
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   science =~                                                            
##     X4                1.000                               0.717    0.717
##     X17               0.857    0.194    4.418    0.000    0.615    0.615
##     X3                0.780    0.216    3.609    0.000    0.560    0.560
##     X5                0.656    0.210    3.130    0.002    0.471    0.471
##     X8                0.781    0.211    3.703    0.000    0.560    0.560
##     X11               0.775    0.204    3.796    0.000    0.556    0.556
##     X12               0.644    0.257    2.508    0.012    0.462    0.462
##     X13               0.996    0.222    4.493    0.000    0.715    0.715
##     X15               0.718    0.207    3.471    0.001    0.515    0.515
##     X1                0.542    0.195    2.772    0.006    0.389    0.389
##     X18               0.736    0.220    3.355    0.001    0.528    0.528
##     X22               0.686    0.163    4.200    0.000    0.492    0.492
##     X25               0.796    0.198    4.018    0.000    0.571    0.571
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X3                0.000                               0.000    0.000
##    .X5                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X12               0.000                               0.000    0.000
##    .X13               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X1                0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X22               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     science           0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X3|t1            -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X5|t1            -0.594    0.131   -4.532    0.000   -0.594   -0.594
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X12|t1           -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X1|t1            -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X22|t1            0.594    0.131    4.532    0.000    0.594    0.594
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.485                               0.485    0.485
##    .X17               0.622                               0.622    0.622
##    .X3                0.687                               0.687    0.687
##    .X5                0.778                               0.778    0.778
##    .X8                0.686                               0.686    0.686
##    .X11               0.691                               0.691    0.691
##    .X12               0.787                               0.787    0.787
##    .X13               0.489                               0.489    0.489
##    .X15               0.735                               0.735    0.735
##    .X1                0.849                               0.849    0.849
##    .X18               0.721                               0.721    0.721
##    .X22               0.758                               0.758    0.758
##    .X25               0.674                               0.674    0.674
##     science           0.515    0.149    3.463    0.001    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X3                1.000                               1.000    1.000
##     X5                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X12               1.000                               1.000    1.000
##     X13               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X1                1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X22               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X4                0.515
##     X17               0.378
##     X3                0.313
##     X5                0.222
##     X8                0.314
##     X11               0.309
##     X12               0.213
##     X13               0.511
##     X15               0.265
##     X1                0.151
##     X18               0.279
##     X22               0.242
##     X25               0.326
#Reliability
ci.reliability(data = responses[,c( -1, -2, -6, -7, -9, -10, -14, -16, -19, -20, -21, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7173688
## 
## $se
## [1] 0.03631823
## 
## $ci.lower
## [1] 0.6360684
## 
## $ci.upper
## [1] 0.7804222
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 4

one.model = "science =~ X13 + X4 + X17 + X3 + X5 + X8 + X11 + X15 + X18 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  17 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic               19.319      29.678
##   Degrees of freedom                                35          35
##   P-value (Chi-square)                           0.985       0.723
##   Scaling correction factor                                  0.847
##   Shift parameter                                            6.875
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              241.782     187.314
##   Degrees of freedom                                45          45
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.102       1.048
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.054
##   P-value RMSEA <= 0.05                          0.999       0.934
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.094       0.094
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.593       0.593
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   science =~                                                            
##     X13               1.000                               0.732    0.732
##     X4                0.904    0.225    4.017    0.000    0.662    0.662
##     X17               0.782    0.208    3.763    0.000    0.572    0.572
##     X3                0.829    0.209    3.959    0.000    0.607    0.607
##     X5                0.707    0.241    2.930    0.003    0.517    0.517
##     X8                0.814    0.238    3.415    0.001    0.596    0.596
##     X11               0.791    0.201    3.934    0.000    0.579    0.579
##     X15               0.721    0.221    3.262    0.001    0.528    0.528
##     X18               0.780    0.242    3.218    0.001    0.571    0.571
##     X25               0.678    0.212    3.200    0.001    0.496    0.496
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.000                               0.000    0.000
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X3                0.000                               0.000    0.000
##    .X5                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     science           0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X3|t1            -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X5|t1            -0.594    0.131   -4.532    0.000   -0.594   -0.594
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.464                               0.464    0.464
##    .X4                0.562                               0.562    0.562
##    .X17               0.672                               0.672    0.672
##    .X3                0.631                               0.631    0.631
##    .X5                0.732                               0.732    0.732
##    .X8                0.645                               0.645    0.645
##    .X11               0.665                               0.665    0.665
##    .X15               0.721                               0.721    0.721
##    .X18               0.674                               0.674    0.674
##    .X25               0.754                               0.754    0.754
##     science           0.536    0.185    2.899    0.004    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13               1.000                               1.000    1.000
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X3                1.000                               1.000    1.000
##     X5                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X13               0.536
##     X4                0.438
##     X17               0.328
##     X3                0.369
##     X5                0.268
##     X8                0.355
##     X11               0.335
##     X15               0.279
##     X18               0.326
##     X25               0.246
#Test 5

one.model = "science =~ X13 + X4 + X8 + X11 + X15 + X17 + X18 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")
lavInspect(one.fit, "zero.cell.tables")
## (There are no tables with empty cells for this fitted model)
semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  18 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic                8.897      14.259
##   Degrees of freedom                                20          20
##   P-value (Chi-square)                           0.984       0.817
##   Scaling correction factor                                  0.744
##   Shift parameter                                            2.305
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              163.980     135.304
##   Degrees of freedom                                28          28
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.114       1.075
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.053
##   P-value RMSEA <= 0.05                          0.997       0.941
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.074       0.074
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.497       0.497
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   science =~                                                            
##     X13               1.000                               0.737    0.737
##     X4                0.796    0.246    3.236    0.001    0.587    0.587
##     X8                0.793    0.252    3.152    0.002    0.585    0.585
##     X11               0.754    0.221    3.411    0.001    0.556    0.556
##     X15               0.767    0.235    3.265    0.001    0.565    0.565
##     X17               0.848    0.244    3.477    0.001    0.625    0.625
##     X18               0.817    0.272    3.008    0.003    0.602    0.602
##     X25               0.692    0.237    2.921    0.003    0.510    0.510
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.000                               0.000    0.000
##    .X4                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     science           0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.457                               0.457    0.457
##    .X4                0.656                               0.656    0.656
##    .X8                0.658                               0.658    0.658
##    .X11               0.691                               0.691    0.691
##    .X15               0.680                               0.680    0.680
##    .X17               0.609                               0.609    0.609
##    .X18               0.637                               0.637    0.637
##    .X25               0.740                               0.740    0.740
##     science           0.543    0.228    2.386    0.017    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13               1.000                               1.000    1.000
##     X4                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X13               0.543
##     X4                0.344
##     X8                0.342
##     X11               0.309
##     X15               0.320
##     X17               0.391
##     X18               0.363
##     X25               0.260
ci.reliability(data = responses[,c( -1, -2, -3, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.6730051
## 
## $se
## [1] 0.04915866
## 
## $ci.lower
## [1] 0.5587039
## 
## $ci.upper
## [1] 0.74821
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#IRT
#2PL model
IRTmodel = ltm(ctt.data[,c( -1, -2, -3, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)] ~z1, IRT.param = TRUE)
summary(IRTmodel)
## 
## Call:
## ltm(formula = ctt.data[, c(-1, -2, -3, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)] ~ z1, IRT.param = TRUE)
## 
## Model Summary:
##    log.Lik      AIC      BIC
##  -491.1438 1014.288 1056.751
## 
## Coefficients:
##              value std.err  z.vals
## Dffclt.X4  -0.4314  0.2375 -1.8159
## Dffclt.X8  -0.1908  0.2332 -0.8181
## Dffclt.X11 -0.1449  0.2286 -0.6340
## Dffclt.X13 -1.2381  0.2961 -4.1811
## Dffclt.X15  1.0464  0.3343  3.1299
## Dffclt.X17  0.1054  0.1918  0.5499
## Dffclt.X18 -1.0831  0.3410 -3.1767
## Dffclt.X25 -0.7916  0.3266 -2.4237
## Dscrmn.X4   1.1740  0.3955  2.9680
## Dscrmn.X8   1.0646  0.3783  2.8142
## Dscrmn.X11  1.0779  0.3766  2.8622
## Dscrmn.X13  1.7310  0.6037  2.8672
## Dscrmn.X15  1.2587  0.4899  2.5691
## Dscrmn.X17  1.3977  0.4707  2.9692
## Dscrmn.X18  1.1757  0.4067  2.8909
## Dscrmn.X25  0.9769  0.3524  2.7722
## 
## Integration:
## method: Gauss-Hermite
## quadrature points: 21 
## 
## Optimization:
## Convergence: 0 
## max(|grad|): <1e-06 
## quasi-Newton: BFGS
item.fit(IRTmodel) #we want the chi^2 to be NON-SIGNIFICANT! But depends on sample size
## 
## Item-Fit Statistics and P-values
## 
## Call:
## ltm(formula = ctt.data[, c(-1, -2, -3, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)] ~ z1, IRT.param = TRUE)
## 
## Alternative: Items do not fit the model
## Ability Categories: 10
## 
##         X^2 Pr(>X^2)
## X4   8.5255   0.3839
## X8  16.9856   0.0303
## X11 14.7289   0.0646
## X13 10.0286    0.263
## X15 15.0775   0.0577
## X17 27.3406   0.0006
## X18  9.3359   0.3148
## X25 12.3213   0.1374
#3PL model
IRTmodel2 = tpm(data = ctt.data[,c( -1, -2, -3, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                type = "latent.trait", IRT.param = TRUE)
summary(IRTmodel2)
## 
## Call:
## tpm(data = ctt.data[, c(-1, -2, -3, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)], type = "latent.trait", 
##     IRT.param = TRUE)
## 
## Model Summary:
##    log.Lik      AIC      BIC
##  -490.6555 1029.311 1093.006
## 
## Coefficients:
##              value std.err  z.vals
## Gussng.X4   0.2792  0.1747  1.5986
## Gussng.X8   0.0000  0.0027  0.0084
## Gussng.X11  0.0001  0.0040  0.0124
## Gussng.X13  0.0020  0.0432  0.0472
## Gussng.X15  0.0000  0.0010  0.0049
## Gussng.X17  0.0827  0.2306  0.3587
## Gussng.X18  0.0009  0.0233  0.0385
## Gussng.X25  0.2285  0.4172  0.5476
## Dffclt.X4   0.1980  0.3980  0.4974
## Dffclt.X8  -0.1873  0.2332 -0.8031
## Dffclt.X11 -0.1457  0.2336 -0.6237
## Dffclt.X13 -1.2280  0.2988 -4.1095
## Dffclt.X15  1.0213  0.3146  3.2461
## Dffclt.X17  0.2718  0.4718  0.5762
## Dffclt.X18 -1.0476  0.3255 -3.2183
## Dffclt.X25 -0.2137  1.1162 -0.1914
## Dscrmn.X4   2.3637  2.0506  1.1527
## Dscrmn.X8   1.0653  0.3706  2.8745
## Dscrmn.X11  1.0475  0.3705  2.8277
## Dscrmn.X13  1.7487  0.6076  2.8778
## Dscrmn.X15  1.3123  0.4942  2.6555
## Dscrmn.X17  1.6982  1.3495  1.2584
## Dscrmn.X18  1.2316  0.4153  2.9659
## Dscrmn.X25  1.2984  1.0722  1.2110
## 
## Integration:
## method: Gauss-Hermite
## quadrature points: 21 
## 
## Optimization:
## Optimizer: optim (BFGS)
## Convergence: 0 
## max(|grad|): 0.019
item.fit(IRTmodel2) #we want the chi^2 to be NON-SIGNIFICANT! But depends on sample size
## 
## Item-Fit Statistics and P-values
## 
## Call:
## tpm(data = ctt.data[, c(-1, -2, -3, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)], type = "latent.trait", 
##     IRT.param = TRUE)
## 
## Alternative: Items do not fit the model
## Ability Categories: 10
## 
##         X^2 Pr(>X^2)
## X4  15.5121     0.03
## X8  15.6803   0.0282
## X11 14.6777   0.0404
## X13 13.1008   0.0697
## X15  8.1619   0.3185
## X17 15.8600   0.0264
## X18 12.1948   0.0943
## X25 10.4904   0.1624
#Mirt 2PL
mirtmodel2PL = mirt(data = ctt.data[,c( -1, -2, -3, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                    model = 1,
                    itemtype = "2PL")
## 
Iteration: 1, Log-Lik: -495.436, Max-Change: 0.38303
Iteration: 2, Log-Lik: -492.314, Max-Change: 0.21613
Iteration: 3, Log-Lik: -491.488, Max-Change: 0.12017
Iteration: 4, Log-Lik: -491.220, Max-Change: 0.05603
Iteration: 5, Log-Lik: -491.170, Max-Change: 0.03216
Iteration: 6, Log-Lik: -491.153, Max-Change: 0.01817
Iteration: 7, Log-Lik: -491.146, Max-Change: 0.00834
Iteration: 8, Log-Lik: -491.145, Max-Change: 0.00553
Iteration: 9, Log-Lik: -491.144, Max-Change: 0.00252
Iteration: 10, Log-Lik: -491.144, Max-Change: 0.00123
Iteration: 11, Log-Lik: -491.144, Max-Change: 0.00088
Iteration: 12, Log-Lik: -491.144, Max-Change: 0.00103
Iteration: 13, Log-Lik: -491.144, Max-Change: 0.00060
Iteration: 14, Log-Lik: -491.144, Max-Change: 0.00012
Iteration: 15, Log-Lik: -491.144, Max-Change: 0.00009
summary(mirtmodel2PL)
##        F1    h2
## X4  0.568 0.322
## X8  0.530 0.281
## X11 0.535 0.286
## X13 0.713 0.508
## X15 0.595 0.354
## X17 0.635 0.403
## X18 0.568 0.323
## X25 0.498 0.248
## 
## SS loadings:  2.725 
## Proportion Var:  0.341 
## 
## Factor correlations: 
## 
##    F1
## F1  1
itemfit(mirtmodel2PL) #item fit statistics
##   item   S_X2 df.S_X2 p.S_X2
## 1   X4  4.579       4  0.333
## 2   X8 11.403       4  0.022
## 3  X11  6.764       4  0.149
## 4  X13  1.765       3  0.623
## 5  X15  3.379       3  0.337
## 6  X17  3.581       4  0.466
## 7  X18  6.477       4  0.166
## 8  X25  2.149       5  0.828
#Mirt 3PL
mirtmodel3PL = mirt(data = ctt.data[,c( -1, -2, -3, -5, -6, -7, -8, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                    model = 1,
                    itemtype = "3PL")
## 
Iteration: 1, Log-Lik: -439.020, Max-Change: 1.97634
Iteration: 2, Log-Lik: -428.751, Max-Change: 0.68169
Iteration: 3, Log-Lik: -426.287, Max-Change: 0.32360
Iteration: 4, Log-Lik: -425.409, Max-Change: 1.20602
Iteration: 5, Log-Lik: -425.142, Max-Change: 8.95683
Iteration: 6, Log-Lik: -425.020, Max-Change: 0.12767
Iteration: 7, Log-Lik: -425.019, Max-Change: 0.12727
Iteration: 8, Log-Lik: -424.974, Max-Change: 0.10903
Iteration: 9, Log-Lik: -424.948, Max-Change: 0.09775
Iteration: 10, Log-Lik: -424.918, Max-Change: 0.03685
Iteration: 11, Log-Lik: -424.903, Max-Change: 0.02437
Iteration: 12, Log-Lik: -424.901, Max-Change: 0.02087
Iteration: 13, Log-Lik: -424.900, Max-Change: 0.01340
Iteration: 14, Log-Lik: -424.900, Max-Change: 0.00242
Iteration: 15, Log-Lik: -424.900, Max-Change: 0.01014
Iteration: 16, Log-Lik: -424.899, Max-Change: 0.00898
Iteration: 17, Log-Lik: -424.899, Max-Change: 0.00240
Iteration: 18, Log-Lik: -424.899, Max-Change: 0.00142
Iteration: 19, Log-Lik: -424.899, Max-Change: 0.00715
Iteration: 20, Log-Lik: -424.899, Max-Change: 0.00119
Iteration: 21, Log-Lik: -424.899, Max-Change: 0.00049
Iteration: 22, Log-Lik: -424.899, Max-Change: 0.00035
Iteration: 23, Log-Lik: -424.899, Max-Change: 0.00101
Iteration: 24, Log-Lik: -424.899, Max-Change: 0.00099
Iteration: 25, Log-Lik: -424.899, Max-Change: 0.00182
Iteration: 26, Log-Lik: -424.899, Max-Change: 0.00023
Iteration: 27, Log-Lik: -424.899, Max-Change: 0.00087
Iteration: 28, Log-Lik: -424.899, Max-Change: 0.00025
Iteration: 29, Log-Lik: -424.899, Max-Change: 0.00080
Iteration: 30, Log-Lik: -424.899, Max-Change: 0.00019
Iteration: 31, Log-Lik: -424.899, Max-Change: 0.00085
Iteration: 32, Log-Lik: -424.899, Max-Change: 0.00028
Iteration: 33, Log-Lik: -424.899, Max-Change: 0.00084
Iteration: 34, Log-Lik: -424.899, Max-Change: 0.00123
Iteration: 35, Log-Lik: -424.899, Max-Change: 0.00019
Iteration: 36, Log-Lik: -424.899, Max-Change: 0.00073
Iteration: 37, Log-Lik: -424.899, Max-Change: 0.00020
Iteration: 38, Log-Lik: -424.899, Max-Change: 0.00070
Iteration: 39, Log-Lik: -424.899, Max-Change: 0.00017
Iteration: 40, Log-Lik: -424.899, Max-Change: 0.00016
Iteration: 41, Log-Lik: -424.899, Max-Change: 0.00062
Iteration: 42, Log-Lik: -424.899, Max-Change: 0.00068
Iteration: 43, Log-Lik: -424.899, Max-Change: 0.00018
Iteration: 44, Log-Lik: -424.899, Max-Change: 0.00064
Iteration: 45, Log-Lik: -424.899, Max-Change: 0.00014
Iteration: 46, Log-Lik: -424.899, Max-Change: 0.00013
Iteration: 47, Log-Lik: -424.899, Max-Change: 0.00063
Iteration: 48, Log-Lik: -424.899, Max-Change: 0.00065
Iteration: 49, Log-Lik: -424.899, Max-Change: 0.00017
Iteration: 50, Log-Lik: -424.899, Max-Change: 0.00061
Iteration: 51, Log-Lik: -424.899, Max-Change: 0.00014
Iteration: 52, Log-Lik: -424.899, Max-Change: 0.00013
Iteration: 53, Log-Lik: -424.899, Max-Change: 0.00055
Iteration: 54, Log-Lik: -424.899, Max-Change: 0.00059
Iteration: 55, Log-Lik: -424.899, Max-Change: 0.00018
Iteration: 56, Log-Lik: -424.899, Max-Change: 0.00053
Iteration: 57, Log-Lik: -424.899, Max-Change: 0.00013
Iteration: 58, Log-Lik: -424.899, Max-Change: 0.00011
Iteration: 59, Log-Lik: -424.899, Max-Change: 0.00055
Iteration: 60, Log-Lik: -424.899, Max-Change: 0.00053
Iteration: 61, Log-Lik: -424.899, Max-Change: 0.00019
Iteration: 62, Log-Lik: -424.899, Max-Change: 0.00053
Iteration: 63, Log-Lik: -424.899, Max-Change: 0.00014
Iteration: 64, Log-Lik: -424.899, Max-Change: 0.00011
Iteration: 65, Log-Lik: -424.899, Max-Change: 0.00049
Iteration: 66, Log-Lik: -424.899, Max-Change: 0.00051
Iteration: 67, Log-Lik: -424.899, Max-Change: 0.00020
Iteration: 68, Log-Lik: -424.899, Max-Change: 0.00047
Iteration: 69, Log-Lik: -424.899, Max-Change: 0.00014
Iteration: 70, Log-Lik: -424.899, Max-Change: 0.00011
Iteration: 71, Log-Lik: -424.899, Max-Change: 0.00048
Iteration: 72, Log-Lik: -424.899, Max-Change: 0.00045
Iteration: 73, Log-Lik: -424.899, Max-Change: 0.00020
Iteration: 74, Log-Lik: -424.899, Max-Change: 0.00046
Iteration: 75, Log-Lik: -424.899, Max-Change: 0.00015
Iteration: 76, Log-Lik: -424.899, Max-Change: 0.00012
Iteration: 77, Log-Lik: -424.899, Max-Change: 0.00044
Iteration: 78, Log-Lik: -424.899, Max-Change: 0.00009
itemfit(mirtmodel3PL) #item fit statistics
##   item  S_X2 df.S_X2 p.S_X2
## 1   X4 3.744       3  0.291
## 2  X11 2.111       2  0.348
## 3  X13 0.171       1  0.679
## 4  X15 0.303       1  0.582
## 5  X17 2.235       2  0.327
## 6  X18 6.701       2  0.035
## 7  X25 0.715       3  0.870
#compare models & get model fit statistics, the function anova is ment only for model comparrsion!
anova(mirtmodel2PL, mirtmodel3PL)
## 
## Model 1: mirt(data = ctt.data[, c(-1, -2, -3, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)], model = 1, itemtype = "2PL")
## Model 2: mirt(data = ctt.data[, c(-1, -2, -3, -5, -6, -7, -8, -9, -10, 
##     -12, -14, -16, -19, -20, -21, -22, -23, -24)], model = 1, 
##     itemtype = "3PL")
##        AIC     AICc    SABIC      BIC   logLik      X2  df     p
## 1 1014.288 1020.469 1006.204 1056.751 -491.144     NaN NaN   NaN
## 2  891.798  902.931  881.188  947.531 -424.899 132.489 133 0.496
#2PL model2
IRTmodel = ltm(ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)] ~z1, IRT.param = TRUE)
summary(IRTmodel)
## 
## Call:
## ltm(formula = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)] ~ z1, IRT.param = TRUE)
## 
## Model Summary:
##    log.Lik      AIC      BIC
##  -530.7005 1097.401 1145.172
## 
## Coefficients:
##              value std.err  z.vals
## Dffclt.X3  -1.7661  0.5142 -3.4349
## Dffclt.X4  -0.4065  0.2198 -1.8490
## Dffclt.X8  -0.1993  0.2405 -0.8286
## Dffclt.X11 -0.1528  0.2369 -0.6450
## Dffclt.X13 -1.1737  0.2652 -4.4257
## Dffclt.X15  1.0405  0.3261  3.1910
## Dffclt.X17  0.1079  0.2040  0.5291
## Dffclt.X18 -1.1370  0.3664 -3.1032
## Dffclt.X25 -0.7946  0.3242 -2.4511
## Dscrmn.X3   1.2285  0.4743  2.5903
## Dscrmn.X4   1.2973  0.4222  3.0730
## Dscrmn.X8   1.0177  0.3590  2.8346
## Dscrmn.X11  1.0228  0.3566  2.8683
## Dscrmn.X13  1.9609  0.6981  2.8090
## Dscrmn.X15  1.2706  0.4802  2.6462
## Dscrmn.X17  1.2602  0.4172  3.0202
## Dscrmn.X18  1.0954  0.3826  2.8630
## Dscrmn.X25  0.9747  0.3448  2.8266
## 
## Integration:
## method: Gauss-Hermite
## quadrature points: 21 
## 
## Optimization:
## Convergence: 0 
## max(|grad|): 1.1e-06 
## quasi-Newton: BFGS
knitr::kable(coef(IRTmodel), 
             align = "c",
             caption = "")
Dffclt Dscrmn
X3 -1.7661474 1.2284639
X4 -0.4064987 1.2972918
X8 -0.1992840 1.0176513
X11 -0.1528179 1.0228019
X13 -1.1737363 1.9608996
X15 1.0404783 1.2706484
X17 0.1079196 1.2601894
X18 -1.1370237 1.0953515
X25 -0.7946384 0.9746607
plot(IRTmodel, type = "ICC") #add argument items = ... for plot one item at a time

plot(IRTmodel, type = "IIC", items = 0) #test information function

#person.fit(IRTmodel)
item.fit(IRTmodel) #we want the chi^2 to be NON-SIGNIFICANT! But depends on sample size
## 
## Item-Fit Statistics and P-values
## 
## Call:
## ltm(formula = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)] ~ z1, IRT.param = TRUE)
## 
## Alternative: Items do not fit the model
## Ability Categories: 10
## 
##         X^2 Pr(>X^2)
## X3   8.4967   0.3865
## X4  11.6202    0.169
## X8  10.4632    0.234
## X11 24.9142   0.0016
## X13 10.2793    0.246
## X15 12.1137   0.1462
## X17 15.6537   0.0476
## X18  6.6198   0.5782
## X25  5.0104   0.7565
#3PL model2
IRTmodel2 = tpm(data = ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                type = "latent.trait", IRT.param = TRUE)
summary(IRTmodel2) #the summary is EFA
## 
## Call:
## tpm(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -19, -20, -21, -22, -23, -24)], type = "latent.trait", 
##     IRT.param = TRUE)
## 
## Model Summary:
##    log.Lik      AIC      BIC
##  -530.2731 1114.546 1186.203
## 
## Coefficients:
##              value std.err  z.vals
## Gussng.X3   0.5849  0.2163  2.7039
## Gussng.X4   0.2294  0.2106  1.0895
## Gussng.X8   0.0000  0.0003  0.0008
## Gussng.X11  0.0000  0.0006  0.0019
## Gussng.X13  0.0009  0.0415  0.0228
## Gussng.X15  0.0000  0.0001  0.0002
## Gussng.X17  0.0101     NaN     NaN
## Gussng.X18  0.0003  0.0155  0.0216
## Gussng.X25  0.2061  0.4543  0.4536
## Dffclt.X3  -0.3830  0.7117 -0.5382
## Dffclt.X4   0.0839  0.4700  0.1785
## Dffclt.X8  -0.1960  0.2407 -0.8141
## Dffclt.X11 -0.1499  0.2374 -0.6312
## Dffclt.X13 -1.1339  0.2623 -4.3220
## Dffclt.X15  1.0228  0.3129  3.2686
## Dffclt.X17  0.1339     NaN     NaN
## Dffclt.X18 -1.1065  0.3508 -3.1540
## Dffclt.X25 -0.2787  1.2250 -0.2275
## Dscrmn.X3   2.6375  2.8048  0.9404
## Dscrmn.X4   2.0721  1.6706  1.2404
## Dscrmn.X8   1.0173  0.3560  2.8577
## Dscrmn.X11  1.0196  0.3482  2.9283
## Dscrmn.X13  2.1105  0.7956  2.6526
## Dscrmn.X15  1.3077  0.4823  2.7113
## Dscrmn.X17  1.2540     NaN     NaN
## Dscrmn.X18  1.1352  0.3846  2.9519
## Dscrmn.X25  1.2207  1.0156  1.2019
## 
## Integration:
## method: Gauss-Hermite
## quadrature points: 21 
## 
## Optimization:
## Optimizer: optim (BFGS)
## Convergence: 0 
## max(|grad|): 0.0074
knitr::kable(coef(IRTmodel2), 
             align = "c",
             caption = "")
Gussng Dffclt Dscrmn
X3 0.5848557 -0.3830114 2.637543
X4 0.2294450 0.0838892 2.072131
X8 0.0000003 -0.1960008 1.017329
X11 0.0000012 -0.1498838 1.019645
X13 0.0009450 -1.1338687 2.110485
X15 0.0000000 1.0228345 1.307739
X17 0.0100587 0.1339429 1.254030
X18 0.0003351 -1.1064647 1.135187
X25 0.2060903 -0.2786587 1.220691
plot(IRTmodel2, type = "ICC")

plot(IRTmodel2, type = "IIC", items = 0) #test information function

#person.fit(IRTmodel2) #check for outliers in the model
item.fit(IRTmodel2) #we want the chi^2 to be NON-SIGNIFICANT! But depends on sample size
## 
## Item-Fit Statistics and P-values
## 
## Call:
## tpm(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -19, -20, -21, -22, -23, -24)], type = "latent.trait", 
##     IRT.param = TRUE)
## 
## Alternative: Items do not fit the model
## Ability Categories: 10
## 
##         X^2 Pr(>X^2)
## X3  11.9780   0.1013
## X4   9.8336   0.1982
## X8   9.7231   0.2048
## X11 21.5158   0.0031
## X13  9.2195   0.2373
## X15  8.8556   0.2632
## X17 13.9501   0.0521
## X18  9.9985   0.1887
## X25  4.5557    0.714
#Mirt 2PL "2"
mirtmodel2PL2 = mirt(data = ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -20, -21, -22, -23, -24)],
                 model = 1,
                 itemtype = "2PL")
## 
Iteration: 1, Log-Lik: -560.916, Max-Change: 0.51792
Iteration: 2, Log-Lik: -555.560, Max-Change: 0.30790
Iteration: 3, Log-Lik: -554.165, Max-Change: 0.17614
Iteration: 4, Log-Lik: -553.680, Max-Change: 0.08927
Iteration: 5, Log-Lik: -553.575, Max-Change: 0.05189
Iteration: 6, Log-Lik: -553.537, Max-Change: 0.03062
Iteration: 7, Log-Lik: -553.519, Max-Change: 0.00991
Iteration: 8, Log-Lik: -553.516, Max-Change: 0.01401
Iteration: 9, Log-Lik: -553.514, Max-Change: 0.00485
Iteration: 10, Log-Lik: -553.514, Max-Change: 0.00454
Iteration: 11, Log-Lik: -553.513, Max-Change: 0.00553
Iteration: 12, Log-Lik: -553.513, Max-Change: 0.00120
Iteration: 13, Log-Lik: -553.513, Max-Change: 0.00106
Iteration: 14, Log-Lik: -553.513, Max-Change: 0.00075
Iteration: 15, Log-Lik: -553.513, Max-Change: 0.00057
Iteration: 16, Log-Lik: -553.513, Max-Change: 0.00010
# -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -20, -21, -22, -23, -24


reliability(responses[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -20, -21, -22, -23, -24)])
## 
##  Number of Items 
##  10 
## 
##  Number of Examinees 
##  105 
## 
##  Coefficient Alpha 
##  0.707
summary(mirtmodel2PL2)
##        F1    h2
## X3  0.558 0.312
## X4  0.635 0.404
## X8  0.469 0.220
## X11 0.511 0.261
## X13 0.765 0.585
## X15 0.637 0.405
## X17 0.614 0.377
## X18 0.549 0.302
## X19 0.776 0.602
## X25 0.464 0.215
## 
## SS loadings:  3.682 
## Proportion Var:  0.368 
## 
## Factor correlations: 
## 
##    F1
## F1  1
coef(mirtmodel2PL2, IRTpars = T) #coefficiants
## $X3
##         a      b g u
## par 1.146 -1.846 0 1
## 
## $X4
##       a      b g u
## par 1.4 -0.384 0 1
## 
## $X8
##         a      b g u
## par 0.903 -0.217 0 1
## 
## $X11
##         a      b g u
## par 1.011 -0.152 0 1
## 
## $X13
##        a      b g u
## par 2.02 -1.151 0 1
## 
## $X15
##         a    b g u
## par 1.405 0.98 0 1
## 
## $X17
##         a     b g u
## par 1.324 0.108 0 1
## 
## $X18
##         a      b g u
## par 1.119 -1.117 0 1
## 
## $X19
##         a      b g u
## par 2.095 -1.811 0 1
## 
## $X25
##         a      b g u
## par 0.892 -0.846 0 1
## 
## $GroupPars
##     MEAN_1 COV_11
## par      0      1
plot(mirtmodel2PL2, type = "trace") #curves for all items

plot(mirtmodel2PL2, type = "info") # test information curve, optimal information is around 10

plot(mirtmodel2PL2) #expected total score

itemfit(mirtmodel2PL2) #item fit statistics
##    item   S_X2 df.S_X2 p.S_X2
## 1    X3  3.916       5  0.562
## 2    X4  4.502       4  0.342
## 3    X8  9.452       5  0.092
## 4   X11  4.871       5  0.432
## 5   X13  4.696       3  0.195
## 6   X15  2.152       3  0.542
## 7   X17  6.463       4  0.167
## 8   X18  7.299       5  0.199
## 9   X19  2.651       1  0.103
## 10  X25 11.222       6  0.082
M2(mirtmodel2PL2, residmat = TRUE) #model fit statistics
##               X3           X4            X8          X11         X13
## X3            NA           NA            NA           NA          NA
## X4   0.117765686           NA            NA           NA          NA
## X8   0.006720220  0.067684037            NA           NA          NA
## X11 -0.012586639 -0.004659849  0.1296609453           NA          NA
## X13  0.110943428 -0.082859907 -0.0161902426  0.006228264          NA
## X15 -0.005200261  0.042429551 -0.0579038255 -0.062650517 -0.02088310
## X17 -0.083442996 -0.061136324 -0.0449770401  0.070463527  0.01513583
## X18 -0.055685971 -0.047764488  0.1525376935 -0.045533730  0.06829452
## X19 -0.041835114  0.088638932 -0.0976506697  0.021305890  0.07418675
## X25  0.003788198 -0.002120361  0.0006134005 -0.025577498  0.05686918
##              X15        X17          X18         X19 X25
## X3            NA         NA           NA          NA  NA
## X4            NA         NA           NA          NA  NA
## X8            NA         NA           NA          NA  NA
## X11           NA         NA           NA          NA  NA
## X13           NA         NA           NA          NA  NA
## X15           NA         NA           NA          NA  NA
## X17  0.006227021         NA           NA          NA  NA
## X18 -0.054891440 0.02843321           NA          NA  NA
## X19  0.042322293 0.03585802  0.077025773          NA  NA
## X25  0.044706136 0.02437605 -0.001698038 -0.09986365  NA
#Mirt 3PL 2
mirtmodel3PL2 = mirt(data = ctt.data[,c( -1, -2, -5, -6, -7, -8, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                 model = 1,
                 itemtype = "3PL")
## 
Iteration: 1, Log-Lik: -479.402, Max-Change: 1.72924
Iteration: 2, Log-Lik: -468.248, Max-Change: 0.83988
Iteration: 3, Log-Lik: -465.785, Max-Change: 0.34540
Iteration: 4, Log-Lik: -465.149, Max-Change: 1.85636
Iteration: 5, Log-Lik: -464.651, Max-Change: 0.88076
Iteration: 6, Log-Lik: -464.473, Max-Change: 4.45190
Iteration: 7, Log-Lik: -464.376, Max-Change: 0.11278
Iteration: 8, Log-Lik: -464.343, Max-Change: 0.09385
Iteration: 9, Log-Lik: -464.324, Max-Change: 0.07463
Iteration: 10, Log-Lik: -464.298, Max-Change: 0.03671
Iteration: 11, Log-Lik: -464.294, Max-Change: 0.03379
Iteration: 12, Log-Lik: -464.292, Max-Change: 0.03216
Iteration: 13, Log-Lik: -464.286, Max-Change: 0.04331
Iteration: 14, Log-Lik: -464.285, Max-Change: 0.05522
Iteration: 15, Log-Lik: -464.284, Max-Change: 0.05666
Iteration: 16, Log-Lik: -464.282, Max-Change: 0.00689
Iteration: 17, Log-Lik: -464.282, Max-Change: 0.00195
Iteration: 18, Log-Lik: -464.282, Max-Change: 0.01374
Iteration: 19, Log-Lik: -464.281, Max-Change: 0.00124
Iteration: 20, Log-Lik: -464.281, Max-Change: 0.00030
Iteration: 21, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 22, Log-Lik: -464.281, Max-Change: 0.00032
Iteration: 23, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 24, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 25, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 26, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 27, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 28, Log-Lik: -464.281, Max-Change: 0.00033
Iteration: 29, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 30, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 31, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 32, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 33, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 34, Log-Lik: -464.281, Max-Change: 0.00034
Iteration: 35, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 36, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 37, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 38, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 39, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 40, Log-Lik: -464.281, Max-Change: 0.00033
Iteration: 41, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 42, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 43, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 44, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 45, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 46, Log-Lik: -464.281, Max-Change: 0.00031
Iteration: 47, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 48, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 49, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 50, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 51, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 52, Log-Lik: -464.281, Max-Change: 0.00028
Iteration: 53, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 54, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 55, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 56, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 57, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 58, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 59, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 60, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 61, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 62, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 63, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 64, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 65, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 66, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 67, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 68, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 69, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 70, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 71, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 72, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 73, Log-Lik: -464.281, Max-Change: 0.00030
Iteration: 74, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 75, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 76, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 77, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 78, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 79, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 80, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 81, Log-Lik: -464.281, Max-Change: 0.00068
Iteration: 82, Log-Lik: -464.281, Max-Change: 0.00037
Iteration: 83, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 84, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 85, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 86, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 87, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 88, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 89, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 90, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 91, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 92, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 93, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 94, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 95, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 96, Log-Lik: -464.281, Max-Change: 0.00070
Iteration: 97, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 98, Log-Lik: -464.281, Max-Change: 0.00068
Iteration: 99, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 100, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 101, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 102, Log-Lik: -464.281, Max-Change: 0.00070
Iteration: 103, Log-Lik: -464.281, Max-Change: 0.00029
Iteration: 104, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 105, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 106, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 107, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 108, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 109, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 110, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 111, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 112, Log-Lik: -464.281, Max-Change: 0.00034
Iteration: 113, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 114, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 115, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 116, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 117, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 118, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 119, Log-Lik: -464.281, Max-Change: 0.00028
Iteration: 120, Log-Lik: -464.281, Max-Change: 0.00068
Iteration: 121, Log-Lik: -464.281, Max-Change: 0.00041
Iteration: 122, Log-Lik: -464.281, Max-Change: 0.00070
Iteration: 123, Log-Lik: -464.281, Max-Change: 0.00029
Iteration: 124, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 125, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 126, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 127, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 128, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 129, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 130, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 131, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 132, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 133, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 134, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 135, Log-Lik: -464.281, Max-Change: 0.00068
Iteration: 136, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 137, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 138, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 139, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 140, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 141, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 142, Log-Lik: -464.281, Max-Change: 0.00028
Iteration: 143, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 144, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 145, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 146, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 147, Log-Lik: -464.281, Max-Change: 0.00070
Iteration: 148, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 149, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 150, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 151, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 152, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 153, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 154, Log-Lik: -464.281, Max-Change: 0.00028
Iteration: 155, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 156, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 157, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 158, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 159, Log-Lik: -464.281, Max-Change: 0.00070
Iteration: 160, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 161, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 162, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 163, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 164, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 165, Log-Lik: -464.281, Max-Change: 0.00069
Iteration: 166, Log-Lik: -464.281, Max-Change: 0.00028
Iteration: 167, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 168, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 169, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 170, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 171, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 172, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 173, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 174, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 175, Log-Lik: -464.281, Max-Change: 0.00032
Iteration: 176, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 177, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 178, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 179, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 180, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 181, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 182, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 183, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 184, Log-Lik: -464.281, Max-Change: 0.00038
Iteration: 185, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 186, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 187, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 188, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 189, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 190, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 191, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 192, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 193, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 194, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 195, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 196, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 197, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 198, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 199, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 200, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 201, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 202, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 203, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 204, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 205, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 206, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 207, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 208, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 209, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 210, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 211, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 212, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 213, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 214, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 215, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 216, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 217, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 218, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 219, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 220, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 221, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 222, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 223, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 224, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 225, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 226, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 227, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 228, Log-Lik: -464.281, Max-Change: 0.00067
Iteration: 229, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 230, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 231, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 232, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 233, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 234, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 235, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 236, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 237, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 238, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 239, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 240, Log-Lik: -464.281, Max-Change: 0.00066
Iteration: 241, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 242, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 243, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 244, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 245, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 246, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 247, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 248, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 249, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 250, Log-Lik: -464.281, Max-Change: 0.00031
Iteration: 251, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 252, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 253, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 254, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 255, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 256, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 257, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 258, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 259, Log-Lik: -464.281, Max-Change: 0.00036
Iteration: 260, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 261, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 262, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 263, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 264, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 265, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 266, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 267, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 268, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 269, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 270, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 271, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 272, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 273, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 274, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 275, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 276, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 277, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 278, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 279, Log-Lik: -464.281, Max-Change: 0.00065
Iteration: 280, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 281, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 282, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 283, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 284, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 285, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 286, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 287, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 288, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 289, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 290, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 291, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 292, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 293, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 294, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 295, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 296, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 297, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 298, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 299, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 300, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 301, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 302, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 303, Log-Lik: -464.281, Max-Change: 0.00064
Iteration: 304, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 305, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 306, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 307, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 308, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 309, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 310, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 311, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 312, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 313, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 314, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 315, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 316, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 317, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 318, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 319, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 320, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 321, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 322, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 323, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 324, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 325, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 326, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 327, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 328, Log-Lik: -464.281, Max-Change: 0.00032
Iteration: 329, Log-Lik: -464.281, Max-Change: 0.00063
Iteration: 330, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 331, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 332, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 333, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 334, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 335, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 336, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 337, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 338, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 339, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 340, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 341, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 342, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 343, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 344, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 345, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 346, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 347, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 348, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 349, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 350, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 351, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 352, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 353, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 354, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 355, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 356, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 357, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 358, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 359, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 360, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 361, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 362, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 363, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 364, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 365, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 366, Log-Lik: -464.281, Max-Change: 0.00062
Iteration: 367, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 368, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 369, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 370, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 371, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 372, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 373, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 374, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 375, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 376, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 377, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 378, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 379, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 380, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 381, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 382, Log-Lik: -464.281, Max-Change: 0.00026
Iteration: 383, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 384, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 385, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 386, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 387, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 388, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 389, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 390, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 391, Log-Lik: -464.281, Max-Change: 0.00031
Iteration: 392, Log-Lik: -464.281, Max-Change: 0.00061
Iteration: 393, Log-Lik: -464.281, Max-Change: 0.00025
Iteration: 394, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 395, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 396, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 397, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 398, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 399, Log-Lik: -464.281, Max-Change: 0.00052
Iteration: 400, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 401, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 402, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 403, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 404, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 405, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 406, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 407, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 408, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 409, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 410, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 411, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 412, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 413, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 414, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 415, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 416, Log-Lik: -464.281, Max-Change: 0.00054
Iteration: 417, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 418, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 419, Log-Lik: -464.281, Max-Change: 0.00052
Iteration: 420, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 421, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 422, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 423, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 424, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 425, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 426, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 427, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 428, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 429, Log-Lik: -464.281, Max-Change: 0.00060
Iteration: 430, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 431, Log-Lik: -464.281, Max-Change: 0.00052
Iteration: 432, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 433, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 434, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 435, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 436, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 437, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 438, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 439, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 440, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 441, Log-Lik: -464.281, Max-Change: 0.00059
Iteration: 442, Log-Lik: -464.281, Max-Change: 0.00024
Iteration: 443, Log-Lik: -464.281, Max-Change: 0.00051
Iteration: 444, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 445, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 446, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 447, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 448, Log-Lik: -464.281, Max-Change: 0.00051
Iteration: 449, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 450, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 451, Log-Lik: -464.281, Max-Change: 0.00027
Iteration: 452, Log-Lik: -464.281, Max-Change: 0.00051
Iteration: 453, Log-Lik: -464.281, Max-Change: 0.00019
Iteration: 454, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 455, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 456, Log-Lik: -464.281, Max-Change: 0.00013
Iteration: 457, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 458, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 459, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 460, Log-Lik: -464.281, Max-Change: 0.00032
Iteration: 461, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 462, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 463, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 464, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 465, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 466, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 467, Log-Lik: -464.281, Max-Change: 0.00052
Iteration: 468, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 469, Log-Lik: -464.281, Max-Change: 0.00022
Iteration: 470, Log-Lik: -464.281, Max-Change: 0.00051
Iteration: 471, Log-Lik: -464.281, Max-Change: 0.00015
Iteration: 472, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 473, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 474, Log-Lik: -464.281, Max-Change: 0.00053
Iteration: 475, Log-Lik: -464.281, Max-Change: 0.00020
Iteration: 476, Log-Lik: -464.281, Max-Change: 0.00057
Iteration: 477, Log-Lik: -464.281, Max-Change: 0.00017
Iteration: 478, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 479, Log-Lik: -464.281, Max-Change: 0.00052
Iteration: 480, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 481, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 482, Log-Lik: -464.281, Max-Change: 0.00051
Iteration: 483, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 484, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 485, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 486, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 487, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 488, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 489, Log-Lik: -464.281, Max-Change: 0.00018
Iteration: 490, Log-Lik: -464.281, Max-Change: 0.00014
Iteration: 491, Log-Lik: -464.281, Max-Change: 0.00051
Iteration: 492, Log-Lik: -464.281, Max-Change: 0.00058
Iteration: 493, Log-Lik: -464.281, Max-Change: 0.00023
Iteration: 494, Log-Lik: -464.281, Max-Change: 0.00050
Iteration: 495, Log-Lik: -464.281, Max-Change: 0.00016
Iteration: 496, Log-Lik: -464.281, Max-Change: 0.00012
Iteration: 497, Log-Lik: -464.281, Max-Change: 0.00055
Iteration: 498, Log-Lik: -464.281, Max-Change: 0.00056
Iteration: 499, Log-Lik: -464.281, Max-Change: 0.00021
Iteration: 500, Log-Lik: -464.281, Max-Change: 0.00056
summary(mirtmodel3PL2)
##        F1    h2
## X3  0.824 0.679
## X4  0.573 0.329
## X11 0.474 0.224
## X13 0.837 0.700
## X15 0.689 0.475
## X17 0.631 0.398
## X18 0.498 0.248
## X25 0.585 0.342
## 
## SS loadings:  3.395 
## Proportion Var:  0.424 
## 
## Factor correlations: 
## 
##    F1
## F1  1
coef(mirtmodel3PL2, IRTpars = T) #coefficiants
## $X3
##         a      b    g u
## par 2.476 -0.595 0.51 1
## 
## $X4
##         a      b     g u
## par 1.191 -0.428 0.003 1
## 
## $X11
##         a     b g u
## par 0.915 -0.17 0 1
## 
## $X13
##         a      b g u
## par 2.603 -1.051 0 1
## 
## $X15
##         a     b     g u
## par 1.619 1.018 0.033 1
## 
## $X17
##         a     b     g u
## par 1.383 0.162 0.031 1
## 
## $X18
##         a      b g u
## par 0.977 -1.231 0 1
## 
## $X25
##         a      b     g u
## par 1.227 -0.377 0.166 1
## 
## $GroupPars
##     MEAN_1 COV_11
## par      0      1
plot(mirtmodel3PL2, type = "trace") #curves for all items

plot(mirtmodel3PL2, type = "info") # test information curve

plot(mirtmodel3PL2) #expected total score

itemfit(mirtmodel3PL2) #item fit statistics
##   item  S_X2 df.S_X2 p.S_X2
## 1   X3 1.758       2  0.415
## 2   X4 5.629       3  0.131
## 3  X11 3.903       3  0.272
## 4  X13 3.111       1  0.078
## 5  X15 0.029       1  0.865
## 6  X17 1.347       2  0.510
## 7  X18 4.395       2  0.111
## 8  X25 0.662       3  0.882
M2(mirtmodel3PL2)
##             M2 df         p RMSEA RMSEA_5   RMSEA_95      SRMSR      TLI
## stats 7.510697 12 0.8221048     0       0 0.06118191 0.04937744 1.104076
##       CFI
## stats   1
#compare models & model fit statistics
anova(mirtmodel2PL2, mirtmodel3PL2)
## 
## Model 1: mirt(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -20, -21, -22, -23, -24)], model = 1, itemtype = "2PL")
## Model 2: mirt(data = ctt.data[, c(-1, -2, -5, -6, -7, -8, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)], model = 1, itemtype = "3PL")
##        AIC     AICc    SABIC      BIC   logLik      X2  df   p
## 1 1147.026 1157.026 1136.921 1200.105 -553.513     NaN NaN NaN
## 2  976.561  991.561  964.436 1040.256 -464.281 178.464 772   1
#item analysis
responses <- as.matrix(ctt.data[,c( -1, -2, -3, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)])
dimnames(responses) <- NULL
(N <- dim(responses) [2])
## [1] 8
(K <- dim(responses) [1])
## [1] 105
#Histogram

Summed <- data.frame(rowSums(responses))
colnames(Summed) <- c("Total")
cleanup = theme(panel.grid.major = element_blank(),
                panel.grid.minor = element_blank(),
                panel.background = element_blank(),
                axis.line.y = element_line(color = "black"),
                axis.line.x = element_line(color = "black"))
Histogram1 = ggplot(data = Summed, aes(Total))

Histogram1 + #Don't add TITLE (APA)
  geom_histogram(binwidth = .5, #change the binwidh to adjust the columns
                 color = "black", #Google hex color picker
                 fill = "black") +
  xlab("Total Score") +
  ylab("Freqency") + 
  cleanup #You can choose other themes with theme_

#Descriptives
#Percentiles and Z values
#X<=Score Probability

scale(Summed)
##             Total
##   [1,] -2.2236228
##   [2,]  1.6276002
##   [3,]  1.1461973
##   [4,]  1.1461973
##   [5,] -0.7794142
##   [6,]  1.1461973
##   [7,]  0.1833916
##   [8,]  0.1833916
##   [9,] -0.7794142
##  [10,] -0.7794142
##  [11,]  0.6647944
##  [12,] -1.2608170
##  [13,]  0.1833916
##  [14,]  0.1833916
##  [15,]  0.1833916
##  [16,] -0.7794142
##  [17,]  0.1833916
##  [18,] -1.2608170
##  [19,]  0.1833916
##  [20,]  0.1833916
##  [21,]  1.1461973
##  [22,]  0.1833916
##  [23,]  1.1461973
##  [24,]  1.1461973
##  [25,]  0.1833916
##  [26,]  0.6647944
##  [27,]  0.6647944
##  [28,] -0.2980113
##  [29,] -1.7422199
##  [30,]  1.6276002
##  [31,]  0.1833916
##  [32,] -1.7422199
##  [33,]  1.1461973
##  [34,] -0.2980113
##  [35,] -1.2608170
##  [36,]  0.6647944
##  [37,]  0.1833916
##  [38,]  1.6276002
##  [39,] -0.2980113
##  [40,] -0.2980113
##  [41,]  0.1833916
##  [42,] -1.7422199
##  [43,] -0.2980113
##  [44,] -0.2980113
##  [45,] -1.2608170
##  [46,] -1.7422199
##  [47,]  1.1461973
##  [48,]  1.1461973
##  [49,] -1.7422199
##  [50,] -0.2980113
##  [51,]  1.1461973
##  [52,]  0.1833916
##  [53,]  0.6647944
##  [54,] -1.2608170
##  [55,] -1.7422199
##  [56,] -1.2608170
##  [57,]  0.1833916
##  [58,]  0.1833916
##  [59,]  0.1833916
##  [60,] -2.2236228
##  [61,] -0.2980113
##  [62,] -0.7794142
##  [63,]  0.6647944
##  [64,] -1.2608170
##  [65,]  1.1461973
##  [66,]  0.6647944
##  [67,] -0.7794142
##  [68,]  0.6647944
##  [69,] -0.7794142
##  [70,]  1.1461973
##  [71,]  0.6647944
##  [72,]  0.6647944
##  [73,]  0.6647944
##  [74,]  1.1461973
##  [75,]  1.1461973
##  [76,]  0.1833916
##  [77,]  0.6647944
##  [78,]  0.6647944
##  [79,]  1.1461973
##  [80,] -2.2236228
##  [81,]  1.1461973
##  [82,]  1.1461973
##  [83,] -0.2980113
##  [84,]  1.6276002
##  [85,] -0.2980113
##  [86,]  0.6647944
##  [87,] -0.2980113
##  [88,]  0.1833916
##  [89,] -0.7794142
##  [90,]  1.1461973
##  [91,]  0.6647944
##  [92,] -1.7422199
##  [93,] -0.2980113
##  [94,]  0.6647944
##  [95,]  0.6647944
##  [96,] -0.2980113
##  [97,] -0.2980113
##  [98,] -0.7794142
##  [99,] -1.2608170
## [100,] -1.7422199
## [101,]  1.1461973
## [102,] -1.2608170
## [103,] -0.2980113
## [104,]  0.1833916
## [105,]  0.1833916
## attr(,"scaled:center")
##    Total 
## 4.619048 
## attr(,"scaled:scale")
##    Total 
## 2.077262
#cumulative percent
pnorm(q = 0, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.01308768
pnorm(q = 1, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.04073697
pnorm(q = 2, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.1036915
pnorm(q = 3, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.2178746
pnorm(q = 4, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.382856
pnorm(q = 5, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.5727636
pnorm(q = 6, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.7469164
pnorm(q = 7, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.874148
pnorm(q = 8, mean = 4.619, sd = 2.077262, lower.tail = T)
## [1] 0.9481976
#Zscores
qnorm(p = pnorm(q = 0, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] -2.2236
qnorm(p = pnorm(q = 1, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] -1.742197
qnorm(p = pnorm(q = 2, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] -1.260794
qnorm(p = pnorm(q = 3, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] -0.7793913
qnorm(p = pnorm(q = 4, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] -0.2979884
qnorm(p = pnorm(q = 5, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] 0.1834145
qnorm(p = pnorm(q = 6, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] 0.6648174
qnorm(p = pnorm(q = 7, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] 1.14622
qnorm(p = pnorm(q = 8, mean = 4.619, sd = 2.077262, lower.tail = T))
## [1] 1.627623
#Probability density function

Score <- seq(from=0, to=8, by=0.04)
probability_density <- dnorm(Score, mean = 4.619, sd = 2.077262)

plot(Score, probability_density, type = "l")

#Alpha Ver. 1
cronbachs.alpha <-
  function(X){
    
    X <- data.matrix(responses)
    n <- ncol(X) #number of items
    k <- nrow(X) #number of examinees
    
    #formula
    alphaC <- (n/(n - 1))*(1 - sum(apply(
      X, 2, var))/var(rowSums(X)))
    
    return(list("Cronbach's alpha" = alphaC,
                "Number of items" = n,
                "Number of examinees" = k))
  }
dump("cronbachs.alpha", file = "cronbachs.alpha.R")

#Alpha Ver. 2
cronbachs.alpha(responses)
## $`Cronbach's alpha`
## [1] 0.6730051
## 
## $`Number of items`
## [1] 8
## 
## $`Number of examinees`
## [1] 105
#KR-20
KR20 <-
  function(X){
    X <- data.matrix(X)
    k <- ncol(X)
    
    # Person total score variances
    SX <- var(rowSums(X))
    
    # item means
    IM <- colMeans(X)
    
    return(((k/(k - 1))*((SX - sum(IM*(1 - IM)))/SX)))
  }

dump("KR20", file = "KR20.R")

KR20(responses)
## [1] 0.6774799
# Kuder-Richardson formula 21
KR21 <-
  function(X){
    X <- data.matrix(X)
    n <- ncol(X)
    
    return((n/(n-1))*((var(rowSums(X)) - n*(sum(colMeans(X))/n) * 
                         (1-(sum(colMeans(X))/n))))/var(rowSums(X)))
  }

dump("KR21", file = "KR21.R")
KR21(responses)
## [1] 0.625833
#Covariances
covar =  cov(responses, method = "pearson")
covar
##            [,1]       [,2]       [,3]       [,4]       [,5]       [,6]
## [1,] 0.24230769 0.06538462 0.05192308 0.03846154 0.05961538 0.04807692
## [2,] 0.06538462 0.25054945 0.07307692 0.03708791 0.02692308 0.03708791
## [3,] 0.05192308 0.07307692 0.25128205 0.04487179 0.02948718 0.07051282
## [4,] 0.03846154 0.03708791 0.04487179 0.15567766 0.03205128 0.05311355
## [5,] 0.05961538 0.02692308 0.02948718 0.03205128 0.19743590 0.05448718
## [6,] 0.04807692 0.03708791 0.07051282 0.05311355 0.05448718 0.25183150
## [7,] 0.03653846 0.06923077 0.02820513 0.05448718 0.02371795 0.05128205
## [8,] 0.04423077 0.03406593 0.03076923 0.04945055 0.04423077 0.04945055
##            [,7]       [,8]
## [1,] 0.03653846 0.04423077
## [2,] 0.06923077 0.03406593
## [3,] 0.02820513 0.03076923
## [4,] 0.05448718 0.04945055
## [5,] 0.02371795 0.04423077
## [6,] 0.05128205 0.04945055
## [7,] 0.19743590 0.03269231
## [8,] 0.03269231 0.22747253
#the problem with corr tables is that they dont have a cutoff value. We could risk it and declare 0.1 and lower as being a bad value

#Correlation
cor(x = responses, method = "pearson")
##           [,1]      [,2]      [,3]      [,4]      [,5]      [,6]      [,7]
## [1,] 1.0000000 0.2653660 0.2104244 0.1980295 0.2725597 0.1946247 0.1670527
## [2,] 0.2653660 1.0000000 0.2912412 0.1877900 0.1210500 0.1476490 0.3112715
## [3,] 0.2104244 0.2912412 1.0000000 0.2268713 0.1323852 0.2803060 0.1266293
## [4,] 0.1980295 0.1877900 0.2268713 1.0000000 0.1828181 0.2682484 0.3107908
## [5,] 0.2725597 0.1210500 0.1323852 0.1828181 1.0000000 0.2443578 0.1201299
## [6,] 0.1946247 0.1476490 0.2803060 0.2682484 0.2443578 1.0000000 0.2299838
## [7,] 0.1670527 0.3112715 0.1266293 0.3107908 0.1201299 0.2299838 1.0000000
## [8,] 0.1883981 0.1426951 0.1286979 0.2627807 0.2087118 0.2066101 0.1542652
##           [,8]
## [1,] 0.1883981
## [2,] 0.1426951
## [3,] 0.1286979
## [4,] 0.2627807
## [5,] 0.2087118
## [6,] 0.2066101
## [7,] 0.1542652
## [8,] 1.0000000
#CTT

#Reliability

str(reliability(responses, itemal=TRUE), vec.len = 25) #vec.len numeric (>= 0) indicating how many 'first few' elements are displayed of each vector.
## List of 9
##  $ nItem         : int 8
##  $ nPerson       : int 105
##  $ alpha         : num 0.673
##  $ scaleMean     : num 4.62
##  $ scaleSD       : num 2.08
##  $ alphaIfDeleted: num [1:8(1d)] 0.639 0.641 0.645 0.633 0.652 0.634 0.645 0.654
##  $ pBis          : num [1:8(1d)] 0.38 0.373 0.355 0.417 0.322 0.397 0.355 0.318
##  $ bis           : num [1:8(1d)] 0.461 0.449 0.436 0.537 0.476 0.494 0.436 0.392
##  $ itemMean      : num [1:8] 0.6 0.543 0.533 0.81 0.267 0.476 0.733 0.657
##  - attr(*, "class")= chr "reliability"
#CI for discrimination. package psychometric
CIr(r = 0.380, n = 105, level = 0.9)
## [1] 0.2328444 0.5101440
CIr(r = 0.373, n = 105, level = 0.9)
## [1] 0.2251159 0.5040854
CIr(r = 0.355, n = 105, level = 0.9)
## [1] 0.2053277 0.4884465
CIr(r = 0.417, n = 105, level = 0.9)
## [1] 0.2740063 0.5419552
CIr(r = 0.322, n = 105, level = 0.9)
## [1] 0.1693643 0.4595508
CIr(r = 0.397, n = 105, level = 0.9)
## [1] 0.2516914 0.5248043
CIr(r = 0.355, n = 105, level = 0.9)
## [1] 0.2053277 0.4884465
CIr(r = 0.318, n = 105, level = 0.9)
## [1] 0.1650325 0.4560284
#Alpha and Omega

ci.reliability(data = responses, type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.6730051
## 
## $se
## [1] 0.04823633
## 
## $ci.lower
## [1] 0.5736695
## 
## $ci.upper
## [1] 0.756275
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#Item analysis

knitr::kable(item.exam(responses, y = NULL, discrim = TRUE),
             align = "c",
             caption = "Item Analysis")
Item Analysis
Sample.SD Item.total Item.Tot.woi Difficulty Discrimination Item.Criterion Item.Reliab Item.Rel.woi Item.Validity
0.4922476 0.5736164 0.3801324 0.6000000 0.6857143 NA 0.2810135 0.1862261 NA
0.5005491 0.5707085 0.3726389 0.5428571 0.6857143 NA 0.2843041 0.1856337 NA
0.5012804 0.5571241 0.3554569 0.5333333 0.5714286 NA 0.2779423 0.1773330 NA
0.3945601 0.5675925 0.4169282 0.8095238 0.4000000 NA 0.2228803 0.1637180 NA
0.4443376 0.5069836 0.3219158 0.2666667 0.5142857 NA 0.2241966 0.1423565 NA
0.5018282 0.5907766 0.3971923 0.4761905 0.6285714 NA 0.2950532 0.1983709 NA
0.4443376 0.5347635 0.3549832 0.7333333 0.4857143 NA 0.2364813 0.1569795 NA
0.4769408 0.5171561 0.3184780 0.6571429 0.6000000 NA 0.2454755 0.1511701 NA
#Sample.SD =        Standard deviation of the item
#Item.total =         Correlation of the item with the total test score
#Item.Tot.woi =     Correlation of item with total test score (scored without item)
#Difficulty =       Mean of the item (p)
#Discrimination =     Discrimination of the item (u-l)/n
#Item.Criterion =     Correlation of the item with the Criterion (y)
#Item.Reliab =      Item reliability index
#Item.Rel.woi =     Item reliability index (scored without item)
#Item.Validity =      Item validity index

#Spearman.brown
#If n.or.r="n", the function will return a new reliability; if n.or.r="r", the function
#will return the factor by which the test length must change to achieve a desired level of reliability.
#(input) The new test length or a desired level of reliability, depending on n.or.r
cronbach.alpha(responses, standardized = TRUE)
## 
## Standardized Cronbach's alpha for the 'responses' data-set
## 
## Items: 8
## Sample units: 105
## alpha: 0.676
H <- cronbach.alpha(responses, standardized = TRUE)
spearman.brown(H$alpha, input = 0.95, n.or.r = "r")
## $n.new
## [1] 9.126731
rel95 <- spearman.brown(H$alpha, input = 0.95, n.or.r = "r") #returns number of items neede to attain 0.95 reliability
rel.Factor <- rel95$n.new*11 #11=number of items in the model
round(rel.Factor)
## [1] 100
#Splithalf coeff.
splitHalf(responses, raw = FALSE, brute = FALSE, n.sample = 1000, covar = FALSE, check.keys = FALSE, key = NULL, use = "complete") # at use = complete/pairwise
## Split half reliabilities  
## Call: splitHalf(r = responses, raw = FALSE, brute = FALSE, n.sample = 1000, 
##     covar = FALSE, check.keys = FALSE, key = NULL, use = "complete")
## 
## Maximum split half reliability (lambda 4) =  0.75
## Guttman lambda 6                          =  0.66
## Average split half reliability            =  0.66
## Guttman lambda 3 (alpha)                  =  0.68
## Minimum split half reliability  (beta)    =  0.62
#Striatums H-index
H #standardised alpha
## 
## Standardized Cronbach's alpha for the 'responses' data-set
## 
## Items: 8
## Sample units: 105
## alpha: 0.676
G <- sqrt(H$alpha/(1-H$alpha))
G
## [1] 1.442843
Striatums = (4*G+1)/3
Striatums
## [1] 2.257124
round(Striatums)
## [1] 2
#Split items
SPLIT.ITEMS <- 
  function(X, seed = NULL){
    # optional fixed seed
    if (!is.null(seed)) {set.seed(seed)} 
    
    X <- as.matrix(responses)
    
    # if n = 2x, then lengths Y1 = Y2
    # if n = 2x+1, then lenths Y1 = Y2+1
    n <- ncol(responses)
    index <- sample(1:n, ceiling(n/2))
    Y1 <- X[, index ]
    Y2 <- X[, -index]
    return(list(Y1, Y2))
  }

dump("SPLIT.ITEMS", file = "SPLIT.ITEMS.R")

#Computes a bootstrapped (1,000 replicates) Pearson product-moment correlation coefficient between two random subsets of examinees.
pearson <- 
  function(X, seed = NULL, n = NULL){
    source("SPLIT.ITEMS.R")
    
    # optional fixed seed
    if (!is.null(seed)) {set.seed(seed)}
    
    # the number of bootstrap replicates
    if (is.null(n)) {n <- 1e3}   
    
    X <- as.matrix(X)
    r <- rep(NA, n)
    
    for (i in 1:n) {
      # split items
      Y <- SPLIT.ITEMS(X)
      
      # total scores
      S1 <- as.matrix(rowSums(Y[[1]]))
      S2 <- as.matrix(rowSums(Y[[2]]))
      
      # residual scores
      R1 <- S1 - mean(S1)
      R2 <- S2 - mean(S2)
      
      # Pearson product-moment correlation coefficient
      r[i] <- (t(R1) %*% R2) / (sqrt((t(R1) %*% R1)) * sqrt((t(R2) %*% R2)))
    }
    
    return(mean(r))
  }

dump("pearson", file = "pearson.R")


#Compute the Pearson product-moment correlation coefficient
pearson(responses, seed = 456, n = 1)
## [1] 0.5218566
# compare
# split items
set.seed(456)
Y <- SPLIT.ITEMS(responses)

# Total scores
S1 <- as.matrix(rowSums(Y[[1]]))
S2 <- as.matrix(rowSums(Y[[2]]))

cor(S1, S2)
##           [,1]
## [1,] 0.5218566
#Standard Error of measurement
SEM <-
  function(X){
    source("cronbachs.alpha.R")
    X <- data.matrix(responses)
    
    return(sd(rowSums(responses)) * sqrt(1 - cronbachs.alpha(responses)[[1]]))
  }

SEM(responses)
## [1] 1.187851
# 90% confidence interval for the true score
knitr::kable(head(cbind(lower_bound = round(rowSums(responses)-1.65* sd(rowSums(responses))*
                                              sqrt(1-KR20(responses)), 2), observed = rowSums(responses),
                        upper_bound = round(rowSums(responses)+1.65* sd(rowSums(responses))*
                                              sqrt(1-KR20(responses)), 2)), 105),
             align = "c",
             caption = "Item Scores")
Item Scores
lower_bound observed upper_bound
-1.95 0 1.95
6.05 8 9.95
5.05 7 8.95
5.05 7 8.95
1.05 3 4.95
5.05 7 8.95
3.05 5 6.95
3.05 5 6.95
1.05 3 4.95
1.05 3 4.95
4.05 6 7.95
0.05 2 3.95
3.05 5 6.95
3.05 5 6.95
3.05 5 6.95
1.05 3 4.95
3.05 5 6.95
0.05 2 3.95
3.05 5 6.95
3.05 5 6.95
5.05 7 8.95
3.05 5 6.95
5.05 7 8.95
5.05 7 8.95
3.05 5 6.95
4.05 6 7.95
4.05 6 7.95
2.05 4 5.95
-0.95 1 2.95
6.05 8 9.95
3.05 5 6.95
-0.95 1 2.95
5.05 7 8.95
2.05 4 5.95
0.05 2 3.95
4.05 6 7.95
3.05 5 6.95
6.05 8 9.95
2.05 4 5.95
2.05 4 5.95
3.05 5 6.95
-0.95 1 2.95
2.05 4 5.95
2.05 4 5.95
0.05 2 3.95
-0.95 1 2.95
5.05 7 8.95
5.05 7 8.95
-0.95 1 2.95
2.05 4 5.95
5.05 7 8.95
3.05 5 6.95
4.05 6 7.95
0.05 2 3.95
-0.95 1 2.95
0.05 2 3.95
3.05 5 6.95
3.05 5 6.95
3.05 5 6.95
-1.95 0 1.95
2.05 4 5.95
1.05 3 4.95
4.05 6 7.95
0.05 2 3.95
5.05 7 8.95
4.05 6 7.95
1.05 3 4.95
4.05 6 7.95
1.05 3 4.95
5.05 7 8.95
4.05 6 7.95
4.05 6 7.95
4.05 6 7.95
5.05 7 8.95
5.05 7 8.95
3.05 5 6.95
4.05 6 7.95
4.05 6 7.95
5.05 7 8.95
-1.95 0 1.95
5.05 7 8.95
5.05 7 8.95
2.05 4 5.95
6.05 8 9.95
2.05 4 5.95
4.05 6 7.95
2.05 4 5.95
3.05 5 6.95
1.05 3 4.95
5.05 7 8.95
4.05 6 7.95
-0.95 1 2.95
2.05 4 5.95
4.05 6 7.95
4.05 6 7.95
2.05 4 5.95
2.05 4 5.95
1.05 3 4.95
0.05 2 3.95
-0.95 1 2.95
5.05 7 8.95
0.05 2 3.95
2.05 4 5.95
3.05 5 6.95
3.05 5 6.95
#Item analysis
item.analysis <- 
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.15
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    (ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE))
    
    # Mark items that are potentially problematic
    item.analysis <- data.frame(item = seq(1:ctt.analysis$nItem),
                                r.pbis = ctt.analysis$pBis,
                                bis = ctt.analysis$bis,
                                item.mean = ctt.analysis$itemMean,
                                alpha.del = ctt.analysis$alphaIfDeleted)
    
    # code provided by Dr. Gordon Brooks
    if (TRUE) {
      item.analysis$check <- 
        ifelse(item.analysis$r.pbis < cvpb |
                 item.analysis$item.mean < cvdl |
                 item.analysis$item.mean > cvdu, "???", "")
    }
    
    return(item.analysis)
  }

dump("item.analysis", file = "item.analysis.R")

knitr::kable(item.analysis(responses), 
             align = "c",
             caption = "Item Analysis")
Item Analysis
item r.pbis bis item.mean alpha.del check
1 0.3801324 0.4605445 0.6000000 0.6386423
2 0.3726389 0.4486398 0.5428571 0.6406295
3 0.3554569 0.4359285 0.5333333 0.6450951
4 0.4169282 0.5367995 0.8095238 0.6333678
5 0.3219158 0.4758539 0.2666667 0.6523966
6 0.3971923 0.4943629 0.4761905 0.6342028
7 0.3549832 0.4358341 0.7333333 0.6449155
8 0.3184780 0.3924045 0.6571429 0.6537634
#Item Difficulty

item.difficulty <-
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.15
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE)
    
    test_difficulty <- data.frame(item = 1:ctt.analysis$nItem , 
                                  difficulty = ctt.analysis$itemMean)
    
    plot(test_difficulty,
         main = "Test Item Difficulty",
         type = "p",
         pch = 1,
         cex = 2.8,
         col = "purple",
         ylab = "Item Mean (Difficulty)",
         xlab = "Item Number",
         ylim = c(0, 1),
         xlim = c(0, ctt.analysis$nItem))
    
    abline(h = cvdl, col = "tomato")
    abline(h = cvdu, col = "tomato")
    
    abline(h = .3, col = "dodgerblue")
    abline(h = .7, col = "dodgerblue")
    
    text(diff(range(test_difficulty[, 1]))/2, 0.7, 
         "maximum information range", 
         col = "dodger blue", 
         pos = 3)
    
    text(diff(range(test_difficulty[, 1]))/2, cvdu, 
         "rule of thumb acceptable range", 
         col = "tomato", 
         pos = 3)
    
    
    outlier2 <- data.matrix(subset(cbind(test_difficulty[, 1], 
                                         test_difficulty[, 2]),
                                   subset = ((test_difficulty[, 2] > cvdl &
                                                test_difficulty[, 2] < .3) |
                                               (test_difficulty[, 2] < cvdu &
                                                  test_difficulty[, 2] > .7))))
    
    text(outlier2, paste("i", outlier2[,1], sep = ""), 
         col = "dodgerblue", 
         cex = .7)
    
    return(test_difficulty[order(test_difficulty$difficulty),])
  }

dump("item.difficulty", file = "item.difficulty.R")

knitr::kable(item.difficulty(responses), 
             align = "c",
             caption = "Difficulty")

Difficulty
item difficulty
5 5 0.2666667
6 6 0.4761905
3 3 0.5333333
2 2 0.5428571
1 1 0.6000000
8 8 0.6571429
7 7 0.7333333
4 4 0.8095238
#Item discrimination
item.discrimination <-
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.30
    cvdl = 0.20
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE)
    
    item.discrimination <- data.frame(item = 1:ctt.analysis$nItem , 
                                      discrimination = ctt.analysis$pBis)
    
    plot(item.discrimination,
         type = "p",
         pch = 1,
         cex = 3,
         col = "purple",
         ylab = "Item-Total Correlation",
         xlab = "Item Number",
         ylim = c(0, 1),
         main = "Test Item Discriminations")
    
    abline(h = cvpb, col = "green")
    abline(h = cvdl, col = "red")
    
    return(item.discrimination[order(item.discrimination$discrimination),])
  }

dump("item.discrimination", file = "item.discrimination.R")

knitr::kable(item.discrimination(responses),
             align = "c",
             caption = "Item Discrimination")

Item Discrimination
item discrimination
8 8 0.3184780
5 5 0.3219158
7 7 0.3549832
3 3 0.3554569
2 2 0.3726389
1 1 0.3801324
6 6 0.3971923
4 4 0.4169282
#CI for discrimination. package psychometric

# Cohen, J., Cohen, P., West, S. G., & Aiken, L. S. (2003). Applied multiple regression/correlation analysis for the behavioral sciences (3rd ed.). Mahwah, NJ: Lawrence Erlbaum. p45-46
CIr(r = 0.380, n = 105, level = 0.9) 
## [1] 0.2328444 0.5101440
CIr(r = 0.373, n = 105, level = 0.9)
## [1] 0.2251159 0.5040854
CIr(r = 0.355, n = 105, level = 0.9)
## [1] 0.2053277 0.4884465
CIr(r = 0.417, n = 105, level = 0.9)
## [1] 0.2740063 0.5419552
CIr(r = 0.322, n = 105, level = 0.9)
## [1] 0.1693643 0.4595508
CIr(r = 0.397, n = 105, level = 0.9)
## [1] 0.2516914 0.5248043
CIr(r = 0.355, n = 105, level = 0.9)
## [1] 0.2053277 0.4884465
CIr(r = 0.318, n = 105, level = 0.9)
## [1] 0.1650325 0.4560284
# CI for Z scores

CIz(z = -2.223622797, n = 105, level = 0.95)
## [1] -2.417688 -2.029557
CIz(z = -1.742219923, n = 105, level = 0.95)
## [1] -1.936285 -1.548155
CIz(z = -1.26081705, n = 105, level = 0.95)
## [1] -1.454882 -1.066752
CIz(z = -0.779414176, n = 105, level = 0.95)
## [1] -0.9734795 -0.5853488
CIz(z = -0.298011303, n = 105, level = 0.95)
## [1] -0.4920767 -0.1039460
CIz(z = 0.183391571, n = 105, level = 0.95)
## [1] -0.01067378  0.37745692
CIz(z = 0.664794444, n = 105, level = 0.95)
## [1] 0.4707291 0.8588598
CIz(z = 1.146197318, n = 105, level = 0.95)
## [1] 0.952132 1.340263
CIz(z = 1.627600192, n = 105, level = 0.95)
## [1] 1.433535 1.821666
#Item total correlation

test_item.total <-
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.40
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE)
    
    test_item.total <- data.frame(item = 1:ctt.analysis$nItem , 
                                  biserial = ctt.analysis$bis)
    
    plot(test_item.total,
         main = "Test Item-Total Correlation",
         type = "p",
         pch = 1,
         cex = 2.8,
         col = "purple",
         ylab = "Item-Total Correlation",
         xlab = "Item Number",
         ylim = c(0, 1),
         xlim = c(0, ctt.analysis$nItem))
    
    abline(h = cvpb, col = "red")
    abline(h = cvdl, col = "green")
    
   
    return(test_item.total[order(test_item.total$biserial),])
  }

dump("test_item.total", file = "test_item.total.R")

knitr::kable(test_item.total(responses), 
             align = "c",
             caption = "Item Total Correlation")

Item Total Correlation
item biserial
8 8 0.3924045
7 7 0.4358341
3 3 0.4359285
2 2 0.4486398
1 1 0.4605445
5 5 0.4758539
6 6 0.4943629
4 4 0.5367995