#Load library
library(MBESS)
library(lavaan)
library(semPlot)
library(readr)
library(psych)
library(CTT)
library(GPArotation)
library(psychometric)
library(cocron)
library(knitr)
library(ltm)
library(mirt)

#Import data 

SCORES <- read_csv("C:/Goran/01 FAKS/4TT/Project/Data/SCORES.csv")
data <- SCORES


#Convert to cathegorical data
data[,c("X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", "X24", "X25")] <-
  lapply(data[,c("X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", "X24", "X25")],
         ordered)

#Import data 2
ctt.data <- read.csv("C:/Goran/01 FAKS/4TT/Project/Data/SCORES.csv", header = TRUE)

#Matrix 2 [,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -18, -19, -20, -21, -23, -24)]
responses <- as.matrix(ctt.data)

#CFA testing

one.model = "model1 =~ X4 + X17 + X2 + X3 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 + X15 + X16 + X1 + X18 + X19 + X20 + X21 + X22 + X23 + X24 + X25"
#Run the model

one.fit = cfa(one.model,
              data = responses,
              estimator = "mlr")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  73 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                         ML      Robust
##   Minimum Function Test Statistic              339.180     347.386
##   Degrees of freedom                               275         275
##   P-value (Chi-square)                           0.005       0.002
##   Scaling correction factor                                  0.976
##     for the Yuan-Bentler correction
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              521.769     517.868
##   Degrees of freedom                               300         300
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    0.711       0.668
##   Tucker-Lewis Index (TLI)                       0.684       0.638
## 
##   Robust Comparative Fit Index (CFI)                         0.678
##   Robust Tucker-Lewis Index (TLI)                            0.649
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)              -1313.466   -1313.466
##   Scaling correction factor                                  1.210
##     for the MLR correction
##   Loglikelihood unrestricted model (H1)      -1143.876   -1143.876
##   Scaling correction factor                                  1.026
##     for the MLR correction
## 
##   Number of free parameters                         75          75
##   Akaike (AIC)                                2776.931    2776.931
##   Bayesian (BIC)                              2975.978    2975.978
##   Sample-size adjusted Bayesian (BIC)         2739.039    2739.039
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.047       0.050
##   90 Percent Confidence Interval          0.027  0.063       0.031  0.066
##   P-value RMSEA <= 0.05                          0.599       0.486
## 
##   Robust RMSEA                                               0.049
##   90 Percent Confidence Interval                             0.031  0.065
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.082       0.082
## 
## Parameter Estimates:
## 
##   Information                                 Observed
##   Standard Errors                   Robust.huber.white
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   model1 =~                                                             
##     X4                1.000                               0.242    0.494
##     X17               0.951    0.256    3.721    0.000    0.230    0.461
##     X2                0.312    0.210    1.488    0.137    0.076    0.285
##     X3                0.532    0.203    2.623    0.009    0.129    0.358
##     X5                0.511    0.257    1.993    0.046    0.124    0.277
##     X6                0.106    0.204    0.522    0.602    0.026    0.066
##     X7                0.413    0.248    1.669    0.095    0.100    0.214
##     X8                0.844    0.273    3.097    0.002    0.204    0.410
##     X9                0.401    0.273    1.470    0.141    0.097    0.203
##     X10               0.256    0.258    0.992    0.321    0.062    0.138
##     X11               0.954    0.289    3.295    0.001    0.231    0.463
##     X12               0.478    0.231    2.064    0.039    0.116    0.322
##     X13               0.941    0.332    2.832    0.005    0.228    0.580
##     X14               0.111    0.214    0.517    0.605    0.027    0.070
##     X15               0.667    0.182    3.668    0.000    0.162    0.365
##     X16               0.214    0.244    0.877    0.380    0.052    0.109
##     X1                0.344    0.183    1.877    0.061    0.083    0.212
##     X18               0.853    0.297    2.878    0.004    0.207    0.467
##     X19               0.550    0.257    2.141    0.032    0.133    0.476
##     X20               0.313    0.242    1.291    0.197    0.076    0.164
##     X21               0.388    0.254    1.527    0.127    0.094    0.441
##     X22               0.607    0.175    3.459    0.001    0.147    0.329
##     X23               0.639    0.329    1.944    0.052    0.155    0.346
##     X24              -0.139    0.246   -0.563    0.573   -0.034   -0.070
##     X25               0.837    0.294    2.846    0.004    0.203    0.427
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.600    0.048   12.550    0.000    0.600    1.225
##    .X17               0.476    0.049    9.770    0.000    0.476    0.953
##    .X2                0.924    0.026   35.681    0.000    0.924    3.482
##    .X3                0.848    0.035   24.167    0.000    0.848    2.358
##    .X5                0.724    0.044   16.588    0.000    0.724    1.619
##    .X6                0.810    0.038   21.125    0.000    0.810    2.062
##    .X7                0.676    0.046   14.808    0.000    0.676    1.445
##    .X8                0.543    0.049   11.166    0.000    0.543    1.090
##    .X9                0.648    0.047   13.891    0.000    0.648    1.356
##    .X10               0.276    0.044    6.330    0.000    0.276    0.618
##    .X11               0.533    0.049   10.954    0.000    0.533    1.069
##    .X12               0.848    0.035   24.167    0.000    0.848    2.358
##    .X13               0.810    0.038   21.125    0.000    0.810    2.062
##    .X14               0.819    0.038   21.801    0.000    0.819    2.128
##    .X15               0.267    0.043    6.179    0.000    0.267    0.603
##    .X16               0.648    0.047   13.891    0.000    0.648    1.356
##    .X1                0.810    0.038   21.125    0.000    0.810    2.062
##    .X18               0.733    0.043   16.993    0.000    0.733    1.658
##    .X19               0.914    0.027   33.466    0.000    0.914    3.266
##    .X20               0.695    0.045   15.477    0.000    0.695    1.510
##    .X21               0.952    0.021   45.826    0.000    0.952    4.472
##    .X22               0.276    0.044    6.330    0.000    0.276    0.618
##    .X23               0.724    0.044   16.588    0.000    0.724    1.619
##    .X24               0.362    0.047    7.717    0.000    0.362    0.753
##    .X25               0.657    0.046   14.186    0.000    0.657    1.384
##     model1            0.000                               0.000    0.000
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.181    0.026    7.074    0.000    0.181    0.756
##    .X17               0.196    0.021    9.524    0.000    0.196    0.787
##    .X2                0.065    0.020    3.304    0.001    0.065    0.919
##    .X3                0.113    0.022    5.125    0.000    0.113    0.872
##    .X5                0.185    0.022    8.436    0.000    0.185    0.923
##    .X6                0.154    0.024    6.501    0.000    0.154    0.996
##    .X7                0.209    0.019   11.180    0.000    0.209    0.954
##    .X8                0.206    0.023    8.838    0.000    0.206    0.832
##    .X9                0.219    0.018   12.388    0.000    0.219    0.959
##    .X10               0.196    0.020    9.569    0.000    0.196    0.981
##    .X11               0.196    0.023    8.502    0.000    0.196    0.786
##    .X12               0.116    0.023    5.124    0.000    0.116    0.896
##    .X13               0.102    0.026    3.979    0.000    0.102    0.663
##    .X14               0.147    0.024    6.206    0.000    0.147    0.995
##    .X15               0.169    0.017    9.898    0.000    0.169    0.867
##    .X16               0.226    0.015   15.083    0.000    0.226    0.988
##    .X1                0.147    0.023    6.425    0.000    0.147    0.955
##    .X18               0.153    0.025    6.028    0.000    0.153    0.782
##    .X19               0.061    0.017    3.608    0.000    0.061    0.774
##    .X20               0.206    0.019   10.895    0.000    0.206    0.973
##    .X21               0.037    0.012    3.102    0.002    0.037    0.805
##    .X22               0.178    0.019    9.606    0.000    0.178    0.892
##    .X23               0.176    0.024    7.302    0.000    0.176    0.880
##    .X24               0.230    0.014   16.793    0.000    0.230    0.995
##    .X25               0.184    0.024    7.724    0.000    0.184    0.818
##     model1            0.059    0.024    2.448    0.014    1.000    1.000
## 
## R-Square:
##                    Estimate
##     X4                0.244
##     X17               0.213
##     X2                0.081
##     X3                0.128
##     X5                0.077
##     X6                0.004
##     X7                0.046
##     X8                0.168
##     X9                0.041
##     X10               0.019
##     X11               0.214
##     X12               0.104
##     X13               0.337
##     X14               0.005
##     X15               0.133
##     X16               0.012
##     X1                0.045
##     X18               0.218
##     X19               0.226
##     X20               0.027
##     X21               0.195
##     X22               0.108
##     X23               0.120
##     X24               0.005
##     X25               0.182
ci.reliability(data = responses, type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7238847
## 
## $se
## [1] 0.03567848
## 
## $ci.lower
## [1] 0.6470804
## 
## $ci.upper
## [1] 0.7837675
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 2
one.model = "model2 =~ X4 + X17 + X3 + X5 + X8 + X11 + X12 + X13 + X15 + X1 + X18 + X19 + X22 + X25"
#Run the model

one.fit = cfa(one.model,
              data = responses,
              estimator = "mlr")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  47 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                         ML      Robust
##   Minimum Function Test Statistic               82.201      81.921
##   Degrees of freedom                                77          77
##   P-value (Chi-square)                           0.322       0.329
##   Scaling correction factor                                  1.003
##     for the Yuan-Bentler correction
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              219.873     212.253
##   Degrees of freedom                                91          91
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    0.960       0.959
##   Tucker-Lewis Index (TLI)                       0.952       0.952
## 
##   Robust Comparative Fit Index (CFI)                         0.961
##   Robust Tucker-Lewis Index (TLI)                            0.954
## 
## Loglikelihood and Information Criteria:
## 
##   Loglikelihood user model (H0)               -759.481    -759.481
##   Scaling correction factor                                  1.049
##     for the MLR correction
##   Loglikelihood unrestricted model (H1)       -718.380    -718.380
##   Scaling correction factor                                  1.020
##     for the MLR correction
## 
##   Number of free parameters                         42          42
##   Akaike (AIC)                                1602.961    1602.961
##   Bayesian (BIC)                              1714.428    1714.428
##   Sample-size adjusted Bayesian (BIC)         1581.742    1581.742
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.025       0.025
##   90 Percent Confidence Interval          0.000  0.062       0.000  0.062
##   P-value RMSEA <= 0.05                          0.837       0.842
## 
##   Robust RMSEA                                               0.025
##   90 Percent Confidence Interval                             0.000  0.062
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.063       0.063
## 
## Parameter Estimates:
## 
##   Information                                 Observed
##   Standard Errors                   Robust.huber.white
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   model2 =~                                                             
##     X4                1.000                               0.278    0.567
##     X17               0.855    0.232    3.679    0.000    0.237    0.475
##     X3                0.494    0.180    2.740    0.006    0.137    0.382
##     X5                0.523    0.204    2.562    0.010    0.145    0.325
##     X8                0.741    0.234    3.172    0.002    0.206    0.413
##     X11               0.784    0.228    3.446    0.001    0.218    0.436
##     X12               0.390    0.181    2.157    0.031    0.108    0.301
##     X13               0.772    0.248    3.111    0.002    0.214    0.546
##     X15               0.592    0.170    3.489    0.000    0.164    0.372
##     X1                0.349    0.163    2.141    0.032    0.097    0.247
##     X18               0.668    0.239    2.800    0.005    0.185    0.419
##     X19               0.462    0.184    2.510    0.012    0.128    0.459
##     X22               0.556    0.159    3.506    0.000    0.154    0.345
##     X25               0.703    0.226    3.115    0.002    0.195    0.411
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.600    0.048   12.550    0.000    0.600    1.225
##    .X17               0.476    0.049    9.770    0.000    0.476    0.953
##    .X3                0.848    0.035   24.167    0.000    0.848    2.358
##    .X5                0.724    0.044   16.588    0.000    0.724    1.619
##    .X8                0.543    0.049   11.166    0.000    0.543    1.090
##    .X11               0.533    0.049   10.954    0.000    0.533    1.069
##    .X12               0.848    0.035   24.167    0.000    0.848    2.358
##    .X13               0.810    0.038   21.125    0.000    0.810    2.062
##    .X15               0.267    0.043    6.179    0.000    0.267    0.603
##    .X1                0.810    0.038   21.125    0.000    0.810    2.062
##    .X18               0.733    0.043   16.993    0.000    0.733    1.658
##    .X19               0.914    0.027   33.466    0.000    0.914    3.266
##    .X22               0.276    0.044    6.330    0.000    0.276    0.618
##    .X25               0.657    0.046   14.186    0.000    0.657    1.384
##     model2            0.000                               0.000    0.000
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.163    0.027    6.123    0.000    0.163    0.679
##    .X17               0.193    0.022    8.722    0.000    0.193    0.774
##    .X3                0.110    0.021    5.173    0.000    0.110    0.854
##    .X5                0.179    0.022    8.285    0.000    0.179    0.895
##    .X8                0.206    0.024    8.582    0.000    0.206    0.829
##    .X11               0.201    0.022    9.267    0.000    0.201    0.809
##    .X12               0.117    0.023    5.212    0.000    0.117    0.909
##    .X13               0.108    0.022    4.839    0.000    0.108    0.702
##    .X15               0.169    0.018    9.257    0.000    0.169    0.862
##    .X1                0.145    0.022    6.466    0.000    0.145    0.939
##    .X18               0.161    0.025    6.573    0.000    0.161    0.824
##    .X19               0.062    0.016    3.890    0.000    0.062    0.790
##    .X22               0.176    0.018    9.735    0.000    0.176    0.881
##    .X25               0.187    0.023    8.086    0.000    0.187    0.831
##     model2            0.077    0.025    3.042    0.002    1.000    1.000
## 
## R-Square:
##                    Estimate
##     X4                0.321
##     X17               0.226
##     X3                0.146
##     X5                0.105
##     X8                0.171
##     X11               0.191
##     X12               0.091
##     X13               0.298
##     X15               0.138
##     X1                0.061
##     X18               0.176
##     X19               0.210
##     X22               0.119
##     X25               0.169
ci.reliability(data = responses[,c( -2, -6, -7, -9, -10, -14, -16, -20, -21, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7327189
## 
## $se
## [1] 0.03585624
## 
## $ci.lower
## [1] 0.65363
## 
## $ci.upper
## [1] 0.7889003
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 3
one.model = "model3 =~ X19 + X4 + X17 + X3 + X5 + X8 + X11 + X12 + X13 + X15 + X1 + X18 + X22 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  18 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic               50.580      71.771
##   Degrees of freedom                                77          77
##   P-value (Chi-square)                           0.991       0.647
##   Scaling correction factor                                  1.079
##   Shift parameter                                           24.916
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              409.067     284.767
##   Degrees of freedom                                91          91
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.098       1.032
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.048
##   P-value RMSEA <= 0.05                          1.000       0.961
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.117       0.117
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.694       0.694
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   model3 =~                                                             
##     X19               1.000                               0.771    0.771
##     X4                0.956    0.234    4.085    0.000    0.737    0.737
##     X17               0.812    0.209    3.883    0.000    0.626    0.626
##     X3                0.711    0.238    2.994    0.003    0.548    0.548
##     X5                0.574    0.220    2.609    0.009    0.442    0.442
##     X8                0.688    0.228    3.013    0.003    0.530    0.530
##     X11               0.718    0.199    3.612    0.000    0.554    0.554
##     X12               0.595    0.232    2.559    0.010    0.459    0.459
##     X13               0.965    0.249    3.880    0.000    0.744    0.744
##     X15               0.677    0.206    3.279    0.001    0.522    0.522
##     X1                0.476    0.203    2.346    0.019    0.367    0.367
##     X18               0.713    0.204    3.503    0.000    0.550    0.550
##     X22               0.650    0.195    3.339    0.001    0.501    0.501
##     X25               0.700    0.209    3.350    0.001    0.540    0.540
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X19               0.000                               0.000    0.000
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X3                0.000                               0.000    0.000
##    .X5                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X12               0.000                               0.000    0.000
##    .X13               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X1                0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X22               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     model3            0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X19|t1           -1.368    0.175   -7.801    0.000   -1.368   -1.368
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X3|t1            -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X5|t1            -0.594    0.131   -4.532    0.000   -0.594   -0.594
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X12|t1           -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X1|t1            -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X22|t1            0.594    0.131    4.532    0.000    0.594    0.594
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X19               0.405                               0.405    0.405
##    .X4                0.457                               0.457    0.457
##    .X17               0.608                               0.608    0.608
##    .X3                0.699                               0.699    0.699
##    .X5                0.804                               0.804    0.804
##    .X8                0.719                               0.719    0.719
##    .X11               0.693                               0.693    0.693
##    .X12               0.790                               0.790    0.790
##    .X13               0.447                               0.447    0.447
##    .X15               0.728                               0.728    0.728
##    .X1                0.865                               0.865    0.865
##    .X18               0.698                               0.698    0.698
##    .X22               0.749                               0.749    0.749
##    .X25               0.709                               0.709    0.709
##     model3            0.595    0.252    2.356    0.018    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X19               1.000                               1.000    1.000
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X3                1.000                               1.000    1.000
##     X5                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X12               1.000                               1.000    1.000
##     X13               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X1                1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X22               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X19               0.595
##     X4                0.543
##     X17               0.392
##     X3                0.301
##     X5                0.196
##     X8                0.281
##     X11               0.307
##     X12               0.210
##     X13               0.553
##     X15               0.272
##     X1                0.135
##     X18               0.302
##     X22               0.251
##     X25               0.291
ci.reliability(data = responses[,c( -2, -6, -7, -9, -10, -14, -16, -20, -21, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7327189
## 
## $se
## [1] 0.03524142
## 
## $ci.lower
## [1] 0.6567089
## 
## $ci.upper
## [1] 0.7895437
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 4

one.model = "science =~ X4 + X17 + X3 + X5 + X8 + X11 + X12 + X13 + X15 + X1 + X18 + X22 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")
semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  19 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic               43.101      61.022
##   Degrees of freedom                                65          65
##   P-value (Chi-square)                           0.983       0.617
##   Scaling correction factor                                  1.043
##   Shift parameter                                           19.686
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              335.008     243.611
##   Degrees of freedom                                78          78
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.102       1.029
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.052
##   P-value RMSEA <= 0.05                          1.000       0.942
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.111       0.111
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.688       0.688
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   science =~                                                            
##     X4                1.000                               0.717    0.717
##     X17               0.857    0.194    4.418    0.000    0.615    0.615
##     X3                0.780    0.216    3.609    0.000    0.560    0.560
##     X5                0.656    0.210    3.130    0.002    0.471    0.471
##     X8                0.781    0.211    3.703    0.000    0.560    0.560
##     X11               0.775    0.204    3.796    0.000    0.556    0.556
##     X12               0.644    0.257    2.508    0.012    0.462    0.462
##     X13               0.996    0.222    4.493    0.000    0.715    0.715
##     X15               0.718    0.207    3.471    0.001    0.515    0.515
##     X1                0.542    0.195    2.772    0.006    0.389    0.389
##     X18               0.736    0.220    3.355    0.001    0.528    0.528
##     X22               0.686    0.163    4.200    0.000    0.492    0.492
##     X25               0.796    0.198    4.018    0.000    0.571    0.571
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X3                0.000                               0.000    0.000
##    .X5                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X12               0.000                               0.000    0.000
##    .X13               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X1                0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X22               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     science           0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X3|t1            -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X5|t1            -0.594    0.131   -4.532    0.000   -0.594   -0.594
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X12|t1           -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X1|t1            -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X22|t1            0.594    0.131    4.532    0.000    0.594    0.594
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X4                0.485                               0.485    0.485
##    .X17               0.622                               0.622    0.622
##    .X3                0.687                               0.687    0.687
##    .X5                0.778                               0.778    0.778
##    .X8                0.686                               0.686    0.686
##    .X11               0.691                               0.691    0.691
##    .X12               0.787                               0.787    0.787
##    .X13               0.489                               0.489    0.489
##    .X15               0.735                               0.735    0.735
##    .X1                0.849                               0.849    0.849
##    .X18               0.721                               0.721    0.721
##    .X22               0.758                               0.758    0.758
##    .X25               0.674                               0.674    0.674
##     science           0.515    0.149    3.463    0.001    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X3                1.000                               1.000    1.000
##     X5                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X12               1.000                               1.000    1.000
##     X13               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X1                1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X22               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X4                0.515
##     X17               0.378
##     X3                0.313
##     X5                0.222
##     X8                0.314
##     X11               0.309
##     X12               0.213
##     X13               0.511
##     X15               0.265
##     X1                0.151
##     X18               0.279
##     X22               0.242
##     X25               0.326
#Reliability
ci.reliability(data = responses[,c( -2, -6, -7, -9, -10, -14, -16, -19, -20, -21, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7195339
## 
## $se
## [1] 0.0348314
## 
## $ci.lower
## [1] 0.6454797
## 
## $ci.upper
## [1] 0.7792268
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#test 4

one.model = "science =~ X13 + X4 + X17 + X3 + X5 + X8 + X11 + X15 + X18 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  17 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic               19.319      29.678
##   Degrees of freedom                                35          35
##   P-value (Chi-square)                           0.985       0.723
##   Scaling correction factor                                  0.847
##   Shift parameter                                            6.875
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              241.782     187.314
##   Degrees of freedom                                45          45
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.102       1.048
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.054
##   P-value RMSEA <= 0.05                          0.999       0.934
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.094       0.094
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.593       0.593
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   science =~                                                            
##     X13               1.000                               0.732    0.732
##     X4                0.904    0.225    4.017    0.000    0.662    0.662
##     X17               0.782    0.208    3.763    0.000    0.572    0.572
##     X3                0.829    0.209    3.959    0.000    0.607    0.607
##     X5                0.707    0.241    2.930    0.003    0.517    0.517
##     X8                0.814    0.238    3.415    0.001    0.596    0.596
##     X11               0.791    0.201    3.934    0.000    0.579    0.579
##     X15               0.721    0.221    3.262    0.001    0.528    0.528
##     X18               0.780    0.242    3.218    0.001    0.571    0.571
##     X25               0.678    0.212    3.200    0.001    0.496    0.496
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.000                               0.000    0.000
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X3                0.000                               0.000    0.000
##    .X5                0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     science           0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X3|t1            -1.026    0.150   -6.861    0.000   -1.026   -1.026
##     X5|t1            -0.594    0.131   -4.532    0.000   -0.594   -0.594
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.464                               0.464    0.464
##    .X4                0.562                               0.562    0.562
##    .X17               0.672                               0.672    0.672
##    .X3                0.631                               0.631    0.631
##    .X5                0.732                               0.732    0.732
##    .X8                0.645                               0.645    0.645
##    .X11               0.665                               0.665    0.665
##    .X15               0.721                               0.721    0.721
##    .X18               0.674                               0.674    0.674
##    .X25               0.754                               0.754    0.754
##     science           0.536    0.185    2.899    0.004    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13               1.000                               1.000    1.000
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X3                1.000                               1.000    1.000
##     X5                1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X13               0.536
##     X4                0.438
##     X17               0.328
##     X3                0.369
##     X5                0.268
##     X8                0.355
##     X11               0.335
##     X15               0.279
##     X18               0.326
##     X25               0.246
#Reliability
ci.reliability(data = responses[,c( -1, -2, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.7042403
## 
## $se
## [1] 0.03641389
## 
## $ci.lower
## [1] 0.6236427
## 
## $ci.upper
## [1] 0.7681296
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#Test 5

one.model = "science =~ X13 + X4 + X17 + X8 + X11 + X15 + X18 + X25"
#Run the model

one.fit = cfa(one.model,
              data = data,
              estimator = "wlsmv")

semPaths(one.fit, whatLabels = "std", layout = "tree", intercepts = TRUE, residuals = TRUE)

summary(one.fit, standardized = TRUE, fit.measures = T, rsquare = TRUE)
## lavaan (0.5-23.1097) converged normally after  18 iterations
## 
##   Number of observations                           105
## 
##   Estimator                                       DWLS      Robust
##   Minimum Function Test Statistic                8.897      14.259
##   Degrees of freedom                                20          20
##   P-value (Chi-square)                           0.984       0.817
##   Scaling correction factor                                  0.744
##   Shift parameter                                            2.305
##     for simple second-order correction (Mplus variant)
## 
## Model test baseline model:
## 
##   Minimum Function Test Statistic              163.980     135.304
##   Degrees of freedom                                28          28
##   P-value                                        0.000       0.000
## 
## User model versus baseline model:
## 
##   Comparative Fit Index (CFI)                    1.000       1.000
##   Tucker-Lewis Index (TLI)                       1.114       1.075
## 
##   Robust Comparative Fit Index (CFI)                            NA
##   Robust Tucker-Lewis Index (TLI)                               NA
## 
## Root Mean Square Error of Approximation:
## 
##   RMSEA                                          0.000       0.000
##   90 Percent Confidence Interval          0.000  0.000       0.000  0.053
##   P-value RMSEA <= 0.05                          0.997       0.941
## 
##   Robust RMSEA                                                  NA
##   90 Percent Confidence Interval                             0.000     NA
## 
## Standardized Root Mean Square Residual:
## 
##   SRMR                                           0.074       0.074
## 
## Weighted Root Mean Square Residual:
## 
##   WRMR                                           0.497       0.497
## 
## Parameter Estimates:
## 
##   Information                                 Expected
##   Standard Errors                           Robust.sem
## 
## Latent Variables:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##   science =~                                                            
##     X13               1.000                               0.737    0.737
##     X4                0.796    0.246    3.236    0.001    0.587    0.587
##     X17               0.848    0.244    3.477    0.001    0.625    0.625
##     X8                0.793    0.252    3.152    0.002    0.585    0.585
##     X11               0.754    0.221    3.411    0.001    0.556    0.556
##     X15               0.767    0.235    3.265    0.001    0.565    0.565
##     X18               0.817    0.272    3.008    0.003    0.602    0.602
##     X25               0.692    0.237    2.921    0.003    0.510    0.510
## 
## Intercepts:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.000                               0.000    0.000
##    .X4                0.000                               0.000    0.000
##    .X17               0.000                               0.000    0.000
##    .X8                0.000                               0.000    0.000
##    .X11               0.000                               0.000    0.000
##    .X15               0.000                               0.000    0.000
##    .X18               0.000                               0.000    0.000
##    .X25               0.000                               0.000    0.000
##     science           0.000                               0.000    0.000
## 
## Thresholds:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13|t1           -0.876    0.142   -6.184    0.000   -0.876   -0.876
##     X4|t1            -0.253    0.124   -2.038    0.042   -0.253   -0.253
##     X17|t1            0.060    0.123    0.486    0.627    0.060    0.060
##     X8|t1            -0.108    0.123   -0.874    0.382   -0.108   -0.108
##     X11|t1           -0.084    0.123   -0.680    0.497   -0.084   -0.084
##     X15|t1            0.623    0.132    4.720    0.000    0.623    0.623
##     X18|t1           -0.623    0.132   -4.720    0.000   -0.623   -0.623
##     X25|t1           -0.405    0.127   -3.196    0.001   -0.405   -0.405
## 
## Variances:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##    .X13               0.457                               0.457    0.457
##    .X4                0.656                               0.656    0.656
##    .X17               0.609                               0.609    0.609
##    .X8                0.658                               0.658    0.658
##    .X11               0.691                               0.691    0.691
##    .X15               0.680                               0.680    0.680
##    .X18               0.637                               0.637    0.637
##    .X25               0.740                               0.740    0.740
##     science           0.543    0.228    2.386    0.017    1.000    1.000
## 
## Scales y*:
##                    Estimate  Std.Err  z-value  P(>|z|)   Std.lv  Std.all
##     X13               1.000                               1.000    1.000
##     X4                1.000                               1.000    1.000
##     X17               1.000                               1.000    1.000
##     X8                1.000                               1.000    1.000
##     X11               1.000                               1.000    1.000
##     X15               1.000                               1.000    1.000
##     X18               1.000                               1.000    1.000
##     X25               1.000                               1.000    1.000
## 
## R-Square:
##                    Estimate
##     X13               0.543
##     X4                0.344
##     X17               0.391
##     X8                0.342
##     X11               0.309
##     X15               0.320
##     X18               0.363
##     X25               0.260
ci.reliability(data = responses[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)], type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.6882234
## 
## $se
## [1] 0.04338226
## 
## $ci.lower
## [1] 0.5995929
## 
## $ci.upper
## [1] 0.7642254
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
#IRT
#2PL model
IRTmodel = ltm(ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)] ~z1, IRT.param = TRUE)
summary(IRTmodel)
## 
## Call:
## ltm(formula = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)] ~ z1, IRT.param = TRUE)
## 
## Model Summary:
##    log.Lik      AIC      BIC
##  -530.7005 1097.401 1145.172
## 
## Coefficients:
##              value std.err  z.vals
## Dffclt.X3  -1.7661  0.5142 -3.4349
## Dffclt.X4  -0.4065  0.2198 -1.8490
## Dffclt.X8  -0.1993  0.2405 -0.8286
## Dffclt.X11 -0.1528  0.2369 -0.6450
## Dffclt.X13 -1.1737  0.2652 -4.4257
## Dffclt.X15  1.0405  0.3261  3.1910
## Dffclt.X17  0.1079  0.2040  0.5291
## Dffclt.X18 -1.1370  0.3664 -3.1032
## Dffclt.X25 -0.7946  0.3242 -2.4511
## Dscrmn.X3   1.2285  0.4743  2.5903
## Dscrmn.X4   1.2973  0.4222  3.0730
## Dscrmn.X8   1.0177  0.3590  2.8346
## Dscrmn.X11  1.0228  0.3566  2.8683
## Dscrmn.X13  1.9609  0.6981  2.8090
## Dscrmn.X15  1.2706  0.4802  2.6462
## Dscrmn.X17  1.2602  0.4172  3.0202
## Dscrmn.X18  1.0954  0.3826  2.8630
## Dscrmn.X25  0.9747  0.3448  2.8266
## 
## Integration:
## method: Gauss-Hermite
## quadrature points: 21 
## 
## Optimization:
## Convergence: 0 
## max(|grad|): 1.1e-06 
## quasi-Newton: BFGS
knitr::kable(coef(IRTmodel), 
             align = "c",
             caption = "")
Dffclt Dscrmn
X3 -1.7661474 1.2284639
X4 -0.4064987 1.2972918
X8 -0.1992840 1.0176513
X11 -0.1528179 1.0228019
X13 -1.1737363 1.9608996
X15 1.0404783 1.2706484
X17 0.1079196 1.2601894
X18 -1.1370237 1.0953515
X25 -0.7946384 0.9746607
plot(IRTmodel, type = "ICC") #add argument items = ... for plot one item at a time

plot(IRTmodel, type = "IIC", items = 0) #test information function

#person.fit(IRTmodel)
item.fit(IRTmodel) #we want the chi^2 to be NON-SIGNIFICANT! But depends on sample size
## 
## Item-Fit Statistics and P-values
## 
## Call:
## ltm(formula = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, 
##     -14, -16, -19, -20, -21, -22, -23, -24)] ~ z1, IRT.param = TRUE)
## 
## Alternative: Items do not fit the model
## Ability Categories: 10
## 
##         X^2 Pr(>X^2)
## X3   8.4967   0.3865
## X4  11.6202    0.169
## X8  10.4632    0.234
## X11 24.9142   0.0016
## X13 10.2793    0.246
## X15 12.1137   0.1462
## X17 15.6537   0.0476
## X18  6.6198   0.5782
## X25  5.0104   0.7565
#3PL model
IRTmodel2 = tpm(data = ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                type = "latent.trait", IRT.param = TRUE)
summary(IRTmodel2)
## 
## Call:
## tpm(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -19, -20, -21, -22, -23, -24)], type = "latent.trait", 
##     IRT.param = TRUE)
## 
## Model Summary:
##    log.Lik      AIC      BIC
##  -530.2731 1114.546 1186.203
## 
## Coefficients:
##              value std.err  z.vals
## Gussng.X3   0.5849  0.2163  2.7039
## Gussng.X4   0.2294  0.2106  1.0895
## Gussng.X8   0.0000  0.0003  0.0008
## Gussng.X11  0.0000  0.0006  0.0019
## Gussng.X13  0.0009  0.0415  0.0228
## Gussng.X15  0.0000  0.0001  0.0002
## Gussng.X17  0.0101     NaN     NaN
## Gussng.X18  0.0003  0.0155  0.0216
## Gussng.X25  0.2061  0.4543  0.4536
## Dffclt.X3  -0.3830  0.7117 -0.5382
## Dffclt.X4   0.0839  0.4700  0.1785
## Dffclt.X8  -0.1960  0.2407 -0.8141
## Dffclt.X11 -0.1499  0.2374 -0.6312
## Dffclt.X13 -1.1339  0.2623 -4.3220
## Dffclt.X15  1.0228  0.3129  3.2686
## Dffclt.X17  0.1339     NaN     NaN
## Dffclt.X18 -1.1065  0.3508 -3.1540
## Dffclt.X25 -0.2787  1.2250 -0.2275
## Dscrmn.X3   2.6375  2.8048  0.9404
## Dscrmn.X4   2.0721  1.6706  1.2404
## Dscrmn.X8   1.0173  0.3560  2.8577
## Dscrmn.X11  1.0196  0.3482  2.9283
## Dscrmn.X13  2.1105  0.7956  2.6526
## Dscrmn.X15  1.3077  0.4823  2.7113
## Dscrmn.X17  1.2540     NaN     NaN
## Dscrmn.X18  1.1352  0.3846  2.9519
## Dscrmn.X25  1.2207  1.0156  1.2019
## 
## Integration:
## method: Gauss-Hermite
## quadrature points: 21 
## 
## Optimization:
## Optimizer: optim (BFGS)
## Convergence: 0 
## max(|grad|): 0.0074
knitr::kable(coef(IRTmodel2), 
             align = "c",
             caption = "")
Gussng Dffclt Dscrmn
X3 0.5848557 -0.3830114 2.637543
X4 0.2294450 0.0838892 2.072131
X8 0.0000003 -0.1960008 1.017329
X11 0.0000012 -0.1498838 1.019645
X13 0.0009450 -1.1338687 2.110485
X15 0.0000000 1.0228345 1.307739
X17 0.0100587 0.1339429 1.254030
X18 0.0003351 -1.1064647 1.135187
X25 0.2060903 -0.2786587 1.220691
plot(IRTmodel2, type = "ICC")

plot(IRTmodel2, type = "IIC", items = 0) #test information function

#person.fit(IRTmodel2) #check for outliers in the model
item.fit(IRTmodel2) #we want the chi^2 to be NON-SIGNIFICANT! But depends on sample size
## 
## Item-Fit Statistics and P-values
## 
## Call:
## tpm(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -19, -20, -21, -22, -23, -24)], type = "latent.trait", 
##     IRT.param = TRUE)
## 
## Alternative: Items do not fit the model
## Ability Categories: 10
## 
##         X^2 Pr(>X^2)
## X3  11.9780   0.1013
## X4   9.8336   0.1982
## X8   9.7231   0.2048
## X11 21.5158   0.0031
## X13  9.2195   0.2373
## X15  8.8556   0.2632
## X17 13.9501   0.0521
## X18  9.9985   0.1887
## X25  4.5557    0.714
#Compare models
anova(IRTmodel, IRTmodel2) # p=1, but I get an error
## 
##  Likelihood Ratio Table
##               AIC     BIC log.Lik  LRT df p.value
## IRTmodel  1097.40 1145.17 -530.70                
## IRTmodel2 1114.55 1186.20 -530.27 0.85  9       1
#If the P-value is non-sig, then the models are the same, thus it is better to use the simpler (2PL model)

#Mirt 2PL
mirtmodel2PL = mirt(data = ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                 model = 1,
                 itemtype = "2PL")
## 
Iteration: 1, Log-Lik: -535.669, Max-Change: 0.46682
Iteration: 2, Log-Lik: -532.037, Max-Change: 0.26243
Iteration: 3, Log-Lik: -531.106, Max-Change: 0.15055
Iteration: 4, Log-Lik: -530.792, Max-Change: 0.07360
Iteration: 5, Log-Lik: -530.733, Max-Change: 0.04584
Iteration: 6, Log-Lik: -530.713, Max-Change: 0.02503
Iteration: 7, Log-Lik: -530.704, Max-Change: 0.01523
Iteration: 8, Log-Lik: -530.702, Max-Change: 0.00995
Iteration: 9, Log-Lik: -530.701, Max-Change: 0.00669
Iteration: 10, Log-Lik: -530.700, Max-Change: 0.00209
Iteration: 11, Log-Lik: -530.700, Max-Change: 0.00178
Iteration: 12, Log-Lik: -530.700, Max-Change: 0.00026
Iteration: 13, Log-Lik: -530.700, Max-Change: 0.00026
Iteration: 14, Log-Lik: -530.700, Max-Change: 0.00024
Iteration: 15, Log-Lik: -530.700, Max-Change: 0.00022
Iteration: 16, Log-Lik: -530.700, Max-Change: 0.00062
Iteration: 17, Log-Lik: -530.700, Max-Change: 0.00039
Iteration: 18, Log-Lik: -530.700, Max-Change: 0.00009
summary(mirtmodel2PL)
##        F1    h2
## X3  0.585 0.343
## X4  0.606 0.368
## X8  0.513 0.263
## X11 0.515 0.265
## X13 0.755 0.570
## X15 0.598 0.358
## X17 0.595 0.354
## X18 0.541 0.293
## X25 0.497 0.247
## 
## SS loadings:  3.061 
## Proportion Var:  0.34 
## 
## Factor correlations: 
## 
##    F1
## F1  1
coef(mirtmodel2PL, IRTpars = T) #coefficiants
## $X3
##         a      b g u
## par 1.228 -1.766 0 1
## 
## $X4
##         a      b g u
## par 1.297 -0.406 0 1
## 
## $X8
##         a      b g u
## par 1.018 -0.199 0 1
## 
## $X11
##         a      b g u
## par 1.023 -0.153 0 1
## 
## $X13
##        a      b g u
## par 1.96 -1.174 0 1
## 
## $X15
##         a    b g u
## par 1.271 1.04 0 1
## 
## $X17
##        a     b g u
## par 1.26 0.108 0 1
## 
## $X18
##         a      b g u
## par 1.095 -1.137 0 1
## 
## $X25
##         a      b g u
## par 0.975 -0.795 0 1
## 
## $GroupPars
##     MEAN_1 COV_11
## par      0      1
plot(mirtmodel2PL, type = "trace") #curves for all items

plot(mirtmodel2PL, type = "info") # test information curve

plot(mirtmodel2PL) #expected total score

itemfit(mirtmodel2PL) #item fit statistics
##   item   S_X2 df.S_X2 p.S_X2
## 1   X3  2.535       4  0.638
## 2   X4  5.628       5  0.344
## 3   X8 10.033       5  0.074
## 4  X11  5.843       5  0.322
## 5  X13  3.954       3  0.266
## 6  X15  2.437       3  0.487
## 7  X17  5.390       4  0.250
## 8  X18  6.898       4  0.141
## 9  X25  2.844       5  0.724
#Mirt 3PL
mirtmodel3PL = mirt(data = ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)],
                 model = 1,
                 itemtype = "3PL")
## 
Iteration: 1, Log-Lik: -546.002, Max-Change: 1.94463
Iteration: 2, Log-Lik: -534.246, Max-Change: 0.60847
Iteration: 3, Log-Lik: -531.882, Max-Change: 0.39205
Iteration: 4, Log-Lik: -530.852, Max-Change: 0.53347
Iteration: 5, Log-Lik: -530.646, Max-Change: 7.23756
Iteration: 6, Log-Lik: -530.492, Max-Change: 0.33958
Iteration: 7, Log-Lik: -530.490, Max-Change: 0.33891
Iteration: 8, Log-Lik: -530.436, Max-Change: 0.25473
Iteration: 9, Log-Lik: -530.394, Max-Change: 0.19928
Iteration: 10, Log-Lik: -530.390, Max-Change: 0.10935
Iteration: 11, Log-Lik: -530.292, Max-Change: 0.04600
Iteration: 12, Log-Lik: -530.284, Max-Change: 0.03750
Iteration: 13, Log-Lik: -530.273, Max-Change: 0.03269
Iteration: 14, Log-Lik: -530.272, Max-Change: 0.03639
Iteration: 15, Log-Lik: -530.271, Max-Change: 0.02347
Iteration: 16, Log-Lik: -530.270, Max-Change: 0.00915
Iteration: 17, Log-Lik: -530.270, Max-Change: 0.00189
Iteration: 18, Log-Lik: -530.270, Max-Change: 0.01593
Iteration: 19, Log-Lik: -530.270, Max-Change: 0.00024
Iteration: 20, Log-Lik: -530.270, Max-Change: 0.00092
Iteration: 21, Log-Lik: -530.270, Max-Change: 0.00017
Iteration: 22, Log-Lik: -530.270, Max-Change: 0.00074
Iteration: 23, Log-Lik: -530.270, Max-Change: 0.00042
Iteration: 24, Log-Lik: -530.270, Max-Change: 0.00078
Iteration: 25, Log-Lik: -530.270, Max-Change: 0.00045
Iteration: 26, Log-Lik: -530.270, Max-Change: 0.00076
Iteration: 27, Log-Lik: -530.270, Max-Change: 0.00030
Iteration: 28, Log-Lik: -530.270, Max-Change: 0.00024
Iteration: 29, Log-Lik: -530.270, Max-Change: 0.00078
Iteration: 30, Log-Lik: -530.270, Max-Change: 0.00016
Iteration: 31, Log-Lik: -530.270, Max-Change: 0.00076
Iteration: 32, Log-Lik: -530.270, Max-Change: 0.00035
Iteration: 33, Log-Lik: -530.270, Max-Change: 0.00077
Iteration: 34, Log-Lik: -530.270, Max-Change: 0.00038
Iteration: 35, Log-Lik: -530.270, Max-Change: 0.00076
Iteration: 36, Log-Lik: -530.270, Max-Change: 0.00027
Iteration: 37, Log-Lik: -530.270, Max-Change: 0.00021
Iteration: 38, Log-Lik: -530.270, Max-Change: 0.00075
Iteration: 39, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 40, Log-Lik: -530.270, Max-Change: 0.00075
Iteration: 41, Log-Lik: -530.270, Max-Change: 0.00031
Iteration: 42, Log-Lik: -530.270, Max-Change: 0.00075
Iteration: 43, Log-Lik: -530.270, Max-Change: 0.00034
Iteration: 44, Log-Lik: -530.270, Max-Change: 0.00075
Iteration: 45, Log-Lik: -530.270, Max-Change: 0.00024
Iteration: 46, Log-Lik: -530.270, Max-Change: 0.00019
Iteration: 47, Log-Lik: -530.270, Max-Change: 0.00073
Iteration: 48, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 49, Log-Lik: -530.270, Max-Change: 0.00074
Iteration: 50, Log-Lik: -530.270, Max-Change: 0.00028
Iteration: 51, Log-Lik: -530.270, Max-Change: 0.00072
Iteration: 52, Log-Lik: -530.270, Max-Change: 0.00031
Iteration: 53, Log-Lik: -530.270, Max-Change: 0.00075
Iteration: 54, Log-Lik: -530.270, Max-Change: 0.00022
Iteration: 55, Log-Lik: -530.270, Max-Change: 0.00018
Iteration: 56, Log-Lik: -530.270, Max-Change: 0.00071
Iteration: 57, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 58, Log-Lik: -530.270, Max-Change: 0.00074
Iteration: 59, Log-Lik: -530.270, Max-Change: 0.00026
Iteration: 60, Log-Lik: -530.270, Max-Change: 0.00069
Iteration: 61, Log-Lik: -530.270, Max-Change: 0.00030
Iteration: 62, Log-Lik: -530.270, Max-Change: 0.00074
Iteration: 63, Log-Lik: -530.270, Max-Change: 0.00022
Iteration: 64, Log-Lik: -530.270, Max-Change: 0.00017
Iteration: 65, Log-Lik: -530.270, Max-Change: 0.00068
Iteration: 66, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 67, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 68, Log-Lik: -530.270, Max-Change: 0.00072
Iteration: 69, Log-Lik: -530.270, Max-Change: 0.00067
Iteration: 70, Log-Lik: -530.270, Max-Change: 0.00019
Iteration: 71, Log-Lik: -530.270, Max-Change: 0.00072
Iteration: 72, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 73, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 74, Log-Lik: -530.270, Max-Change: 0.00067
Iteration: 75, Log-Lik: -530.270, Max-Change: 0.00072
Iteration: 76, Log-Lik: -530.270, Max-Change: 0.00018
Iteration: 77, Log-Lik: -530.270, Max-Change: 0.00065
Iteration: 78, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 79, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 80, Log-Lik: -530.270, Max-Change: 0.00070
Iteration: 81, Log-Lik: -530.270, Max-Change: 0.00064
Iteration: 82, Log-Lik: -530.270, Max-Change: 0.00018
Iteration: 83, Log-Lik: -530.270, Max-Change: 0.00070
Iteration: 84, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 85, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 86, Log-Lik: -530.270, Max-Change: 0.00065
Iteration: 87, Log-Lik: -530.270, Max-Change: 0.00070
Iteration: 88, Log-Lik: -530.270, Max-Change: 0.00017
Iteration: 89, Log-Lik: -530.270, Max-Change: 0.00063
Iteration: 90, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 91, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 92, Log-Lik: -530.270, Max-Change: 0.00068
Iteration: 93, Log-Lik: -530.270, Max-Change: 0.00062
Iteration: 94, Log-Lik: -530.270, Max-Change: 0.00017
Iteration: 95, Log-Lik: -530.270, Max-Change: 0.00068
Iteration: 96, Log-Lik: -530.270, Max-Change: 0.00012
Iteration: 97, Log-Lik: -530.270, Max-Change: 0.00012
Iteration: 98, Log-Lik: -530.270, Max-Change: 0.00062
Iteration: 99, Log-Lik: -530.270, Max-Change: 0.00068
Iteration: 100, Log-Lik: -530.270, Max-Change: 0.00016
Iteration: 101, Log-Lik: -530.270, Max-Change: 0.00061
Iteration: 102, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 103, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 104, Log-Lik: -530.270, Max-Change: 0.00066
Iteration: 105, Log-Lik: -530.270, Max-Change: 0.00059
Iteration: 106, Log-Lik: -530.270, Max-Change: 0.00016
Iteration: 107, Log-Lik: -530.270, Max-Change: 0.00066
Iteration: 108, Log-Lik: -530.270, Max-Change: 0.00012
Iteration: 109, Log-Lik: -530.270, Max-Change: 0.00012
Iteration: 110, Log-Lik: -530.270, Max-Change: 0.00060
Iteration: 111, Log-Lik: -530.270, Max-Change: 0.00066
Iteration: 112, Log-Lik: -530.270, Max-Change: 0.00016
Iteration: 113, Log-Lik: -530.270, Max-Change: 0.00059
Iteration: 114, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 115, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 116, Log-Lik: -530.270, Max-Change: 0.00064
Iteration: 117, Log-Lik: -530.270, Max-Change: 0.00057
Iteration: 118, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 119, Log-Lik: -530.270, Max-Change: 0.00064
Iteration: 120, Log-Lik: -530.270, Max-Change: 0.00011
Iteration: 121, Log-Lik: -530.270, Max-Change: 0.00011
Iteration: 122, Log-Lik: -530.270, Max-Change: 0.00058
Iteration: 123, Log-Lik: -530.270, Max-Change: 0.00064
Iteration: 124, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 125, Log-Lik: -530.270, Max-Change: 0.00057
Iteration: 126, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 127, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 128, Log-Lik: -530.270, Max-Change: 0.00062
Iteration: 129, Log-Lik: -530.270, Max-Change: 0.00055
Iteration: 130, Log-Lik: -530.270, Max-Change: 0.00015
Iteration: 131, Log-Lik: -530.270, Max-Change: 0.00062
Iteration: 132, Log-Lik: -530.270, Max-Change: 0.00011
Iteration: 133, Log-Lik: -530.270, Max-Change: 0.00011
Iteration: 134, Log-Lik: -530.270, Max-Change: 0.00056
Iteration: 135, Log-Lik: -530.270, Max-Change: 0.00062
Iteration: 136, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 137, Log-Lik: -530.270, Max-Change: 0.00055
Iteration: 138, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 139, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 140, Log-Lik: -530.270, Max-Change: 0.00060
Iteration: 141, Log-Lik: -530.270, Max-Change: 0.00053
Iteration: 142, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 143, Log-Lik: -530.270, Max-Change: 0.00060
Iteration: 144, Log-Lik: -530.270, Max-Change: 0.00010
Iteration: 145, Log-Lik: -530.270, Max-Change: 0.00010
Iteration: 146, Log-Lik: -530.270, Max-Change: 0.00054
Iteration: 147, Log-Lik: -530.270, Max-Change: 0.00061
Iteration: 148, Log-Lik: -530.270, Max-Change: 0.00014
Iteration: 149, Log-Lik: -530.270, Max-Change: 0.00053
Iteration: 150, Log-Lik: -530.270, Max-Change: 0.00012
Iteration: 151, Log-Lik: -530.270, Max-Change: 0.00012
Iteration: 152, Log-Lik: -530.270, Max-Change: 0.00058
Iteration: 153, Log-Lik: -530.270, Max-Change: 0.00051
Iteration: 154, Log-Lik: -530.270, Max-Change: 0.00013
Iteration: 155, Log-Lik: -530.270, Max-Change: 0.00058
Iteration: 156, Log-Lik: -530.270, Max-Change: 0.00010
Iteration: 157, Log-Lik: -530.270, Max-Change: 0.00010
summary(mirtmodel3PL)
##        F1    h2
## X3  0.827 0.684
## X4  0.772 0.596
## X8  0.513 0.263
## X11 0.510 0.261
## X13 0.778 0.605
## X15 0.610 0.372
## X17 0.633 0.401
## X18 0.557 0.310
## X25 0.577 0.333
## 
## SS loadings:  3.825 
## Proportion Var:  0.425 
## 
## Factor correlations: 
## 
##    F1
## F1  1
coef(mirtmodel3PL, IRTpars = T) #coefficiants
## $X3
##         a      b     g u
## par 2.504 -0.414 0.576 1
## 
## $X4
##         a     b    g u
## par 2.069 0.086 0.23 1
## 
## $X8
##         a      b g u
## par 1.017 -0.196 0 1
## 
## $X11
##        a      b g u
## par 1.01 -0.151 0 1
## 
## $X13
##         a      b g u
## par 2.105 -1.136 0 1
## 
## $X15
##         a     b g u
## par 1.311 1.022 0 1
## 
## $X17
##         a    b     g u
## par 1.392 0.23 0.056 1
## 
## $X18
##         a      b g u
## par 1.142 -1.102 0 1
## 
## $X25
##         a      b     g u
## par 1.201 -0.316 0.191 1
## 
## $GroupPars
##     MEAN_1 COV_11
## par      0      1
plot(mirtmodel3PL, type = "trace") #curves for all items

plot(mirtmodel3PL, type = "info") # test information curve

plot(mirtmodel3PL) #expected total score

itemfit(mirtmodel3PL) #item fit statistics
##   item  S_X2 df.S_X2 p.S_X2
## 1   X3 1.928       3  0.587
## 2   X4 4.622       3  0.202
## 3   X8 9.609       4  0.048
## 4  X11 5.624       4  0.229
## 5  X13 4.319       2  0.115
## 6  X15 2.602       2  0.272
## 7  X17 5.128       3  0.163
## 8  X18 6.625       3  0.085
## 9  X25 2.732       4  0.604
#compare models
anova(mirtmodel2PL, mirtmodel3PL)
## 
## Model 1: mirt(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -19, -20, -21, -22, -23, -24)], model = 1, itemtype = "2PL")
## Model 2: mirt(data = ctt.data[, c(-1, -2, -5, -6, -7, -9, -10, -12, -14, 
##     -16, -19, -20, -21, -22, -23, -24)], model = 1, itemtype = "3PL")
##        AIC     AICc    SABIC      BIC  logLik    X2  df   p
## 1 1097.401 1105.354 1088.307 1145.172 -530.70   NaN NaN NaN
## 2 1114.539 1134.176 1100.898 1186.196 -530.27 0.861   9   1
#Matrix 2 [,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -18, -19, -20, -21, -23, -24)]
responses <- as.matrix(ctt.data[,c( -1, -2, -5, -6, -7, -9, -10, -12, -14, -16, -19, -20, -21, -22, -23, -24)])
dimnames(responses) <- NULL
(N <- dim(responses) [2])
## [1] 9
(K <- dim(responses) [1])
## [1] 105
#Alpha Ver. 1
cronbachs.alpha <-
  function(X){
    
    X <- data.matrix(responses)
    n <- ncol(X) #number of items
    k <- nrow(X) #number of examinees
    
    #formula
    alphaC <- (n/(n - 1))*(1 - sum(apply(
      X, 2, var))/var(rowSums(X)))
    
    return(list("Cronbach's alpha" = alphaC,
                "Number of items" = n,
                "Number of examinees" = k))
  }
dump("cronbachs.alpha", file = "cronbachs.alpha.R")

#Alpha Ver. 2
cronbachs.alpha(responses)
## $`Cronbach's alpha`
## [1] 0.6882234
## 
## $`Number of items`
## [1] 9
## 
## $`Number of examinees`
## [1] 105
#KR-20
KR20 <-
  function(X){
    X <- data.matrix(X)
    k <- ncol(X)
    
    # Person total score variances
    SX <- var(rowSums(X))
    
    # item means
    IM <- colMeans(X)
    
    return(((k/(k - 1))*((SX - sum(IM*(1 - IM)))/SX)))
  }

dump("KR20", file = "KR20.R")

KR20(responses)
## [1] 0.6923832
# Kuder-Richardson formula 21
KR21 <-
  function(X){
    X <- data.matrix(X)
    n <- ncol(X)
    
    return((n/(n-1))*((var(rowSums(X)) - n*(sum(colMeans(X))/n) * 
                         (1-(sum(colMeans(X))/n))))/var(rowSums(X)))
  }

dump("KR21", file = "KR21.R")

KR21(responses)
## [1] 0.6327714
#Covariances
covar =  cov(responses, method = "pearson")
covar
##             [,1]       [,2]       [,3]       [,4]       [,5]       [,6]
##  [1,] 0.13040293 0.05384615 0.02582418 0.02435897 0.04761905 0.02179487
##  [2,] 0.05384615 0.24230769 0.06538462 0.05192308 0.03846154 0.05961538
##  [3,] 0.02582418 0.06538462 0.25054945 0.07307692 0.03708791 0.02692308
##  [4,] 0.02435897 0.05192308 0.07307692 0.25128205 0.04487179 0.02948718
##  [5,] 0.04761905 0.03846154 0.03708791 0.04487179 0.15567766 0.03205128
##  [6,] 0.02179487 0.05961538 0.02692308 0.02948718 0.03205128 0.19743590
##  [7,] 0.01556777 0.04807692 0.03708791 0.07051282 0.05311355 0.05448718
##  [8,] 0.01666667 0.03653846 0.06923077 0.02820513 0.05448718 0.02371795
##  [9,] 0.02417582 0.04423077 0.03406593 0.03076923 0.04945055 0.04423077
##             [,7]       [,8]       [,9]
##  [1,] 0.01556777 0.01666667 0.02417582
##  [2,] 0.04807692 0.03653846 0.04423077
##  [3,] 0.03708791 0.06923077 0.03406593
##  [4,] 0.07051282 0.02820513 0.03076923
##  [5,] 0.05311355 0.05448718 0.04945055
##  [6,] 0.05448718 0.02371795 0.04423077
##  [7,] 0.25183150 0.05128205 0.04945055
##  [8,] 0.05128205 0.19743590 0.03269231
##  [9,] 0.04945055 0.03269231 0.22747253
#the problem with corr tables is that they dont have a cutoff value. We could risk it and declare 0.1 and lower as being a bad value

#Correlation
cor(x = responses, method = "pearson")
##             [,1]      [,2]      [,3]      [,4]      [,5]      [,6]
##  [1,] 1.00000000 0.3029196 0.1428684 0.1345658 0.3342134 0.1358306
##  [2,] 0.30291963 1.0000000 0.2653660 0.2104244 0.1980295 0.2725597
##  [3,] 0.14286836 0.2653660 1.0000000 0.2912412 0.1877900 0.1210500
##  [4,] 0.13456577 0.2104244 0.2912412 1.0000000 0.2268713 0.1323852
##  [5,] 0.33421342 0.1980295 0.1877900 0.2268713 1.0000000 0.1828181
##  [6,] 0.13583059 0.2725597 0.1210500 0.1323852 0.1828181 1.0000000
##  [7,] 0.08590681 0.1946247 0.1476490 0.2803060 0.2682484 0.2443578
##  [8,] 0.10387045 0.1670527 0.3112715 0.1266293 0.3107908 0.1201299
##  [9,] 0.14036962 0.1883981 0.1426951 0.1286979 0.2627807 0.2087118
##             [,7]      [,8]      [,9]
##  [1,] 0.08590681 0.1038705 0.1403696
##  [2,] 0.19462474 0.1670527 0.1883981
##  [3,] 0.14764904 0.3112715 0.1426951
##  [4,] 0.28030596 0.1266293 0.1286979
##  [5,] 0.26824843 0.3107908 0.2627807
##  [6,] 0.24435782 0.1201299 0.2087118
##  [7,] 1.00000000 0.2299838 0.2066101
##  [8,] 0.22998383 1.0000000 0.1542652
##  [9,] 0.20661013 0.1542652 1.0000000
#CTT

#Reliability

str(reliability(responses, itemal=TRUE), vec.len = 15) #vec.len numeric (>= 0) indicating how many 'first few' elements are displayed of each vector.
## List of 9
##  $ nItem         : int 9
##  $ nPerson       : int 105
##  $ alpha         : num 0.688
##  $ scaleMean     : num 5.47
##  $ scaleSD       : num 2.21
##  $ alphaIfDeleted: num [1:9(1d)] 0.673 0.652 0.66 0.664 0.648 0.67 0.658 0.665 0.671
##  $ pBis          : num [1:9(1d)] 0.306 0.411 0.372 0.355 0.451 0.324 0.383 0.348 0.322
##  $ bis           : num [1:9(1d)] 0.427 0.496 0.446 0.434 0.579 0.481 0.477 0.426 0.394
##  $ itemMean      : num [1:9] 0.848 0.6 0.543 0.533 0.81 0.267 0.476 0.733 0.657
##  - attr(*, "class")= chr "reliability"
#Alpha and Omega
ci.reliability(data = responses, type = "1", interval.type = "bca", B = 1000)
## $est
## [1] 0.6882234
## 
## $se
## [1] 0.04625676
## 
## $ci.lower
## [1] 0.586372
## 
## $ci.upper
## [1] 0.7647472
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "alpha"
## 
## $interval.type
## [1] "bca bootstrap"
ci.reliability(data = responses, type = "3", interval.type = "bca", B = 1000) #Omega
## $est
## [1] 0.6898112
## 
## $se
## [1] 0.04675821
## 
## $ci.lower
## [1] 0.5889042
## 
## $ci.upper
## [1] 0.7626437
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "omega"
## 
## $interval.type
## [1] "bca bootstrap"
ci.reliability(data = responses, type = "5", interval.type = "bca", B = 1000) #Categorical omega!
## $est
## [1] 0.7106732
## 
## $se
## [1] 0.04599442
## 
## $ci.lower
## [1] 0.5372324
## 
## $ci.upper
## [1] 0.7711012
## 
## $conf.level
## [1] 0.95
## 
## $type
## [1] "categorical omega"
## 
## $interval.type
## [1] "bca bootstrap"
#Item analysis

knitr::kable(item.exam(responses, y = NULL, discrim = TRUE),
             align = "c",
             caption = "Item Analysis")
Item Analysis
Sample.SD Item.total Item.Tot.woi Difficulty Discrimination Item.Criterion Item.Reliab Item.Rel.woi Item.Validity
0.3611135 0.4504461 0.3064193 0.8476190 0.3714286 NA 0.1618857 0.1101240 NA
0.4922476 0.5873975 0.4112586 0.6000000 0.6857143 NA 0.2877648 0.2014748 NA
0.5005491 0.5585739 0.3721480 0.5428571 0.6285714 NA 0.2782591 0.1853892 NA
0.5012804 0.5444791 0.3546408 0.5333333 0.6000000 NA 0.2716339 0.1769259 NA
0.3945601 0.5868499 0.4506073 0.8095238 0.4571429 NA 0.2304423 0.1769430 NA
0.4443376 0.4976575 0.3239787 0.2666667 0.5142857 NA 0.2200724 0.1432688 NA
0.5018282 0.5681087 0.3833020 0.4761905 0.6285714 NA 0.2837321 0.1914336 NA
0.4443376 0.5185018 0.3484520 0.7333333 0.4571429 NA 0.2292901 0.1540913 NA
0.4769408 0.5079386 0.3216245 0.6571429 0.5428571 NA 0.2411003 0.1526636 NA
#Sample.SD =        Standard deviation of the item
#Item.total =         Correlation of the item with the total test score
#Item.Tot.woi =     Correlation of item with total test score (scored without item)
#Difficulty =       Mean of the item (p)
#Discrimination =     Discrimination of the item (u-l)/n
#Item.Criterion =     Correlation of the item with the Criterion (y)
#Item.Reliab =      Item reliability index
#Item.Rel.woi =     Item reliability index (scored without item)
#Item.Validity =      Item validity index

#Spearman.brown
#If n.or.r="n", the function will return a new reliability; if n.or.r="r", the function
#will return the factor by which the test length must change to achieve a desired level of reliability.
#(input) The new test length or a desired level of reliability, depending on n.or.r
cronbach.alpha(responses, standardized = TRUE)
## 
## Standardized Cronbach's alpha for the 'responses' data-set
## 
## Items: 9
## Sample units: 105
## alpha: 0.691
H <- cronbach.alpha(responses, standardized = TRUE)
spearman.brown(H$alpha, input = 0.95, n.or.r = "r")
## $n.new
## [1] 8.500031
rel95 <- spearman.brown(H$alpha, input = 0.95, n.or.r = "r") #returns number of items neede to attain 0.95 reliability
rel.Factor <- rel95$n.new*11 #11=number of items in the model
round(rel.Factor)
## [1] 94
#Splithalf coeff.
splitHalf(responses, raw = FALSE, brute = FALSE, n.sample = 1000, covar = FALSE, check.keys = FALSE, key = NULL, use = "complete") # at use = complete/pairwise
## Split half reliabilities  
## Call: splitHalf(r = responses, raw = FALSE, brute = FALSE, n.sample = 1000, 
##     covar = FALSE, check.keys = FALSE, key = NULL, use = "complete")
## 
## Maximum split half reliability (lambda 4) =  0.77
## Guttman lambda 6                          =  0.68
## Average split half reliability            =  0.67
## Guttman lambda 3 (alpha)                  =  0.69
## Minimum split half reliability  (beta)    =  0.61
#Striatums H-index
H #standardised alpha
## 
## Standardized Cronbach's alpha for the 'responses' data-set
## 
## Items: 9
## Sample units: 105
## alpha: 0.691
G <- sqrt(H$alpha/(1-H$alpha))
G
## [1] 1.495087
Striatums = (4*G+1)/3
Striatums
## [1] 2.326783
round(Striatums)
## [1] 2
#Split items
SPLIT.ITEMS <- 
  function(X, seed = NULL){
    # optional fixed seed
    if (!is.null(seed)) {set.seed(seed)} 
    
    X <- as.matrix(responses)
    
    # if n = 2x, then lengths Y1 = Y2
    # if n = 2x+1, then lenths Y1 = Y2+1
    n <- ncol(responses)
    index <- sample(1:n, ceiling(n/2))
    Y1 <- X[, index ]
    Y2 <- X[, -index]
    return(list(Y1, Y2))
  }

dump("SPLIT.ITEMS", file = "SPLIT.ITEMS.R")

#Computes a bootstrapped (1,000 replicates) Pearson product-moment correlation coefficient between two random subsets of examinees.
pearson <- 
  function(X, seed = NULL, n = NULL){
    source("SPLIT.ITEMS.R")
    
    # optional fixed seed
    if (!is.null(seed)) {set.seed(seed)}
    
    # the number of bootstrap replicates
    if (is.null(n)) {n <- 1e3}   
    
    X <- as.matrix(X)
    r <- rep(NA, n)
    
    for (i in 1:n) {
      # split items
      Y <- SPLIT.ITEMS(X)
      
      # total scores
      S1 <- as.matrix(rowSums(Y[[1]]))
      S2 <- as.matrix(rowSums(Y[[2]]))
      
      # residual scores
      R1 <- S1 - mean(S1)
      R2 <- S2 - mean(S2)
      
      # Pearson product-moment correlation coefficient
      r[i] <- (t(R1) %*% R2) / (sqrt((t(R1) %*% R1)) * sqrt((t(R2) %*% R2)))
    }
    
    return(mean(r))
  }

dump("pearson", file = "pearson.R")


#Compute the Pearson product-moment correlation coefficient
pearson(responses, seed = 456, n = 1)
## [1] 0.4850923
# compare
# split items
set.seed(456)
Y <- SPLIT.ITEMS(responses)

# Total scores
S1 <- as.matrix(rowSums(Y[[1]]))
S2 <- as.matrix(rowSums(Y[[2]]))

cor(S1, S2)
##           [,1]
## [1,] 0.4850923
#Standard Error of measurement
SEM <-
  function(X){
    source("cronbachs.alpha.R")
    X <- data.matrix(responses)
    
    return(sd(rowSums(responses)) * sqrt(1 - cronbachs.alpha(responses)[[1]]))
  }

SEM(responses)
## [1] 1.23665
# 90% confidence interval for the true score
knitr::kable(head(cbind(lower_bound = round(rowSums(responses)-1.65* sd(rowSums(responses))*
                                              sqrt(1-KR20(responses)), 2), observed = rowSums(responses),
                        upper_bound = round(rowSums(responses)+1.65* sd(rowSums(responses))*
                                              sqrt(1-KR20(responses)), 2)), 105),
             align = "c",
             caption = "Item Scores")
Item Scores
lower_bound observed upper_bound
-1.03 1 3.03
6.97 9 11.03
4.97 7 9.03
5.97 8 10.03
1.97 4 6.03
5.97 8 10.03
3.97 6 8.03
3.97 6 8.03
0.97 3 5.03
1.97 4 6.03
4.97 7 9.03
0.97 3 5.03
3.97 6 8.03
3.97 6 8.03
3.97 6 8.03
1.97 4 6.03
3.97 6 8.03
0.97 3 5.03
3.97 6 8.03
2.97 5 7.03
5.97 8 10.03
3.97 6 8.03
5.97 8 10.03
5.97 8 10.03
3.97 6 8.03
4.97 7 9.03
4.97 7 9.03
2.97 5 7.03
-0.03 2 4.03
6.97 9 11.03
3.97 6 8.03
-0.03 2 4.03
5.97 8 10.03
2.97 5 7.03
-0.03 2 4.03
4.97 7 9.03
3.97 6 8.03
6.97 9 11.03
2.97 5 7.03
2.97 5 7.03
3.97 6 8.03
-0.03 2 4.03
2.97 5 7.03
2.97 5 7.03
0.97 3 5.03
-0.03 2 4.03
5.97 8 10.03
5.97 8 10.03
-1.03 1 3.03
2.97 5 7.03
5.97 8 10.03
3.97 6 8.03
4.97 7 9.03
-0.03 2 4.03
-1.03 1 3.03
0.97 3 5.03
3.97 6 8.03
3.97 6 8.03
3.97 6 8.03
-2.03 0 2.03
2.97 5 7.03
1.97 4 6.03
4.97 7 9.03
-0.03 2 4.03
5.97 8 10.03
4.97 7 9.03
1.97 4 6.03
4.97 7 9.03
1.97 4 6.03
4.97 7 9.03
4.97 7 9.03
4.97 7 9.03
4.97 7 9.03
5.97 8 10.03
5.97 8 10.03
3.97 6 8.03
4.97 7 9.03
4.97 7 9.03
5.97 8 10.03
-1.03 1 3.03
5.97 8 10.03
5.97 8 10.03
1.97 4 6.03
6.97 9 11.03
1.97 4 6.03
4.97 7 9.03
1.97 4 6.03
3.97 6 8.03
0.97 3 5.03
5.97 8 10.03
4.97 7 9.03
-1.03 1 3.03
1.97 4 6.03
4.97 7 9.03
4.97 7 9.03
2.97 5 7.03
2.97 5 7.03
1.97 4 6.03
0.97 3 5.03
-0.03 2 4.03
5.97 8 10.03
0.97 3 5.03
2.97 5 7.03
3.97 6 8.03
3.97 6 8.03
#Item analysis
item.analysis <- 
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.15
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    (ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE))
    
    # Mark items that are potentially problematic
    item.analysis <- data.frame(item = seq(1:ctt.analysis$nItem),
                                r.pbis = ctt.analysis$pBis,
                                bis = ctt.analysis$bis,
                                item.mean = ctt.analysis$itemMean,
                                alpha.del = ctt.analysis$alphaIfDeleted)
    
    # code provided by Dr. Gordon Brooks
    if (TRUE) {
      item.analysis$check <- 
        ifelse(item.analysis$r.pbis < cvpb |
                 item.analysis$item.mean < cvdl |
                 item.analysis$item.mean > cvdu, "???", "")
    }
    
    return(item.analysis)
  }

dump("item.analysis", file = "item.analysis.R")

knitr::kable(item.analysis(responses), 
             align = "c",
             caption = "Item Analysis")
Item Analysis
item r.pbis bis item.mean alpha.del check
1 0.3064193 0.4269808 0.8476190 0.6730051
2 0.4112586 0.4956531 0.6000000 0.6515996
3 0.3721480 0.4464529 0.5428571 0.6603436
4 0.3546408 0.4339390 0.5333333 0.6642496
5 0.4506073 0.5786764 0.8095238 0.6475775
6 0.3239787 0.4809361 0.2666667 0.6697127
7 0.3833020 0.4768857 0.4761905 0.6578604
8 0.3484520 0.4257797 0.7333333 0.6649574
9 0.3216245 0.3942851 0.6571429 0.6707615
#Item Difficulty

item.difficulty <-
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.15
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE)
    
    test_difficulty <- data.frame(item = 1:ctt.analysis$nItem , 
                                  difficulty = ctt.analysis$itemMean)
    
    plot(test_difficulty,
         main = "Test Item Difficulty",
         type = "p",
         pch = 1,
         cex = 2.8,
         col = "purple",
         ylab = "Item Mean (Difficulty)",
         xlab = "Item Number",
         ylim = c(0, 1),
         xlim = c(0, ctt.analysis$nItem))
    
    abline(h = cvdl, col = "tomato")
    abline(h = cvdu, col = "tomato")
    
    abline(h = .3, col = "dodgerblue")
    abline(h = .7, col = "dodgerblue")
    
    text(diff(range(test_difficulty[, 1]))/2, 0.7, 
         "maximum information range", 
         col = "dodger blue", 
         pos = 3)
    
    text(diff(range(test_difficulty[, 1]))/2, cvdu, 
         "rule of thumb acceptable range", 
         col = "tomato", 
         pos = 3)
    
    
    outlier2 <- data.matrix(subset(cbind(test_difficulty[, 1], 
                                         test_difficulty[, 2]),
                                   subset = ((test_difficulty[, 2] > cvdl &
                                                test_difficulty[, 2] < .3) |
                                               (test_difficulty[, 2] < cvdu &
                                                  test_difficulty[, 2] > .7))))
    
    text(outlier2, paste("i", outlier2[,1], sep = ""), 
         col = "dodgerblue", 
         cex = .7)
    
    return(test_difficulty[order(test_difficulty$difficulty),])
  }

dump("item.difficulty", file = "item.difficulty.R")

knitr::kable(item.difficulty(responses), 
             align = "c",
             caption = "Difficulty")

Difficulty
item difficulty
6 6 0.2666667
7 7 0.4761905
4 4 0.5333333
3 3 0.5428571
2 2 0.6000000
9 9 0.6571429
8 8 0.7333333
5 5 0.8095238
1 1 0.8476190
#Item discrimination
item.discrimination <-
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.15
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE)
    
    item.discrimination <- data.frame(item = 1:ctt.analysis$nItem , 
                                      discrimination = ctt.analysis$pBis)
    
    plot(item.discrimination,
         type = "p",
         pch = 1,
         cex = 3,
         col = "purple",
         ylab = "Item-Total Correlation",
         xlab = "Item Number",
         ylim = c(0, 1),
         main = "Test Item Discriminations")
    
    abline(h = cvpb, col = "red")
    
    return(item.discrimination[order(item.discrimination$discrimination),])
  }

dump("item.discrimination", file = "item.discrimination.R")

knitr::kable(item.discrimination(responses),
             align = "c",
             caption = "Item Discrimination")

Item Discrimination
item discrimination
1 1 0.3064193
9 9 0.3216245
6 6 0.3239787
8 8 0.3484520
4 4 0.3546408
3 3 0.3721480
7 7 0.3833020
2 2 0.4112586
5 5 0.4506073
#Item total correlation
test_item.total <-
  function(responses){
    # CRITICAL VALUES
    cvpb = 0.20
    cvdl = 0.15
    cvdu = 0.85
    
    require(CTT, warn.conflicts = FALSE, quietly = TRUE)
    ctt.analysis <- CTT::reliability(responses, itemal = TRUE, NA.Delete = TRUE)
    
    test_item.total <- data.frame(item = 1:ctt.analysis$nItem , 
                                  biserial = ctt.analysis$bis)
    
    plot(test_item.total,
         main = "Test Item-Total Correlation",
         type = "p",
         pch = 1,
         cex = 2.8,
         col = "purple",
         ylab = "Item-Total Correlation",
         xlab = "Item Number",
         ylim = c(0, 1),
         xlim = c(0, ctt.analysis$nItem))
    
    abline(h = cvpb, col = "red")
    
   
    return(test_item.total[order(test_item.total$biserial),])
  }

dump("test_item.total", file = "test_item.total.R")

knitr::kable(test_item.total(responses), 
             align = "c",
             caption = "Item Total Correlation")

Item Total Correlation
item biserial
9 9 0.3942851
8 8 0.4257797
1 1 0.4269808
4 4 0.4339390
3 3 0.4464529
7 7 0.4768857
6 6 0.4809361
2 2 0.4956531
5 5 0.5786764