Questão 1:
library(irtoys)
## Loading required package: sm
## Package 'sm', version 2.2-5.4: type help(sm) for summary information
## Loading required package: ltm
## Loading required package: MASS
##
## Attaching package: 'MASS'
## The following object is masked from 'package:sm':
##
## muscle
## Loading required package: msm
## Loading required package: polycor
library(mirt)
## Loading required package: stats4
## Loading required package: lattice
##
## Attaching package: 'mirt'
## The following object is masked from 'package:ltm':
##
## Science
a.par <- c(2.258,2.019,2.250,2.072,2.654,2.873,3.475,3.465,2.949)
b1.par <- c(-0.347,-0.144,0.615,0.317,0.269,-0.321,-0.289,-0.489,-0.547)
b2.par <- c(0.527,0.708,1.342,1.437,1.138,0.444,0.592,0.303,0.311)
b3.par <- c(1.515,1.560,1.959,1.986,1.940,1.452,1.622,1.210,1.409)
n.itens <- length(a.par)
#########################
### Gera??o dos dados ###
#########################
set.seed(2345) # semente
nr <- 1000 # numero de respondentes
### geracao das proficiencias
theta <- rnorm(nr,0,1)
resp <- matrix(0,nr,n.itens)
### geracao das respostas
mat.prob <- matrix(0,n.itens,4)
for (j in 1:nr) {
mat.prob <- cbind(rep(0,n.itens),exp(-a.par*(theta[j]-b1.par)),exp(-a.par*(theta[j]-b2.par)),exp(-a.par*(theta[j]-b3.par)))
mat.prob <- 1/(1+mat.prob)
mat.prob <- cbind(-t(apply(mat.prob,1,diff)),mat.prob[,4])
for (i in 1:n.itens)
resp[j,i] <- sample(4,1,replace=F,mat.prob[i,])
}
### ajuste do modelo de resposta gradual via "mirt" utilizando as respostas simuladas
write(t(resp),file="/Users/Gustavo/Documents/unb/mestrado/TRI/dados.mrg.txt",ncol=n.itens)
resp <- read.table(file="/Users/Gustavo/Documents/unb/mestrado/TRI/dados.mrg.txt")
mrg <- mirt(resp,1,itemtype=c('graded'))
##
Iteration: 1, Log-Lik: -9106.314, Max-Change: 1.73197
Iteration: 2, Log-Lik: -8426.656, Max-Change: 0.94471
Iteration: 3, Log-Lik: -8310.165, Max-Change: 0.47988
Iteration: 4, Log-Lik: -8256.210, Max-Change: 0.25387
Iteration: 5, Log-Lik: -8226.353, Max-Change: 0.18679
Iteration: 6, Log-Lik: -8208.389, Max-Change: 0.11636
Iteration: 7, Log-Lik: -8197.878, Max-Change: 0.16138
Iteration: 8, Log-Lik: -8189.356, Max-Change: 0.11947
Iteration: 9, Log-Lik: -8183.187, Max-Change: 0.07787
Iteration: 10, Log-Lik: -8180.303, Max-Change: 0.07812
Iteration: 11, Log-Lik: -8178.054, Max-Change: 0.05049
Iteration: 12, Log-Lik: -8176.585, Max-Change: 0.04394
Iteration: 13, Log-Lik: -8175.568, Max-Change: 0.04225
Iteration: 14, Log-Lik: -8174.868, Max-Change: 0.03197
Iteration: 15, Log-Lik: -8174.369, Max-Change: 0.02258
Iteration: 16, Log-Lik: -8173.705, Max-Change: 0.01500
Iteration: 17, Log-Lik: -8173.505, Max-Change: 0.01277
Iteration: 18, Log-Lik: -8173.356, Max-Change: 0.00939
Iteration: 19, Log-Lik: -8173.136, Max-Change: 0.00801
Iteration: 20, Log-Lik: -8173.049, Max-Change: 0.00654
Iteration: 21, Log-Lik: -8172.977, Max-Change: 0.00593
Iteration: 22, Log-Lik: -8172.765, Max-Change: 0.00557
Iteration: 23, Log-Lik: -8172.739, Max-Change: 0.00423
Iteration: 24, Log-Lik: -8172.715, Max-Change: 0.00402
Iteration: 25, Log-Lik: -8172.613, Max-Change: 0.00501
Iteration: 26, Log-Lik: -8172.602, Max-Change: 0.00306
Iteration: 27, Log-Lik: -8172.596, Max-Change: 0.00222
Iteration: 28, Log-Lik: -8172.581, Max-Change: 0.00412
Iteration: 29, Log-Lik: -8172.576, Max-Change: 0.00253
Iteration: 30, Log-Lik: -8172.572, Max-Change: 0.00168
Iteration: 31, Log-Lik: -8172.566, Max-Change: 0.00148
Iteration: 32, Log-Lik: -8172.564, Max-Change: 0.00127
Iteration: 33, Log-Lik: -8172.562, Max-Change: 0.00123
Iteration: 34, Log-Lik: -8172.557, Max-Change: 0.00162
Iteration: 35, Log-Lik: -8172.555, Max-Change: 0.00120
Iteration: 36, Log-Lik: -8172.554, Max-Change: 0.00088
Iteration: 37, Log-Lik: -8172.553, Max-Change: 0.00067
Iteration: 38, Log-Lik: -8172.553, Max-Change: 0.00064
Iteration: 39, Log-Lik: -8172.552, Max-Change: 0.00063
Iteration: 40, Log-Lik: -8172.550, Max-Change: 0.00044
Iteration: 41, Log-Lik: -8172.550, Max-Change: 0.00052
Iteration: 42, Log-Lik: -8172.550, Max-Change: 0.00055
Iteration: 43, Log-Lik: -8172.550, Max-Change: 0.00046
Iteration: 44, Log-Lik: -8172.549, Max-Change: 0.00048
Iteration: 45, Log-Lik: -8172.549, Max-Change: 0.00056
Iteration: 46, Log-Lik: -8172.549, Max-Change: 0.00040
Iteration: 47, Log-Lik: -8172.549, Max-Change: 0.00047
Iteration: 48, Log-Lik: -8172.549, Max-Change: 0.00049
Iteration: 49, Log-Lik: -8172.549, Max-Change: 0.00039
Iteration: 50, Log-Lik: -8172.549, Max-Change: 0.00041
Iteration: 51, Log-Lik: -8172.549, Max-Change: 0.00048
Iteration: 52, Log-Lik: -8172.549, Max-Change: 0.00034
Iteration: 53, Log-Lik: -8172.549, Max-Change: 0.00039
Iteration: 54, Log-Lik: -8172.549, Max-Change: 0.00041
Iteration: 55, Log-Lik: -8172.549, Max-Change: 0.00033
Iteration: 56, Log-Lik: -8172.549, Max-Change: 0.00034
Iteration: 57, Log-Lik: -8172.549, Max-Change: 0.00040
Iteration: 58, Log-Lik: -8172.549, Max-Change: 0.00028
Iteration: 59, Log-Lik: -8172.549, Max-Change: 0.00033
Iteration: 60, Log-Lik: -8172.549, Max-Change: 0.00034
Iteration: 61, Log-Lik: -8172.549, Max-Change: 0.00027
Iteration: 62, Log-Lik: -8172.549, Max-Change: 0.00028
Iteration: 63, Log-Lik: -8172.549, Max-Change: 0.00033
Iteration: 64, Log-Lik: -8172.549, Max-Change: 0.00023
Iteration: 65, Log-Lik: -8172.549, Max-Change: 0.00027
Iteration: 66, Log-Lik: -8172.549, Max-Change: 0.00028
Iteration: 67, Log-Lik: -8172.549, Max-Change: 0.00023
Iteration: 68, Log-Lik: -8172.549, Max-Change: 0.00023
Iteration: 69, Log-Lik: -8172.549, Max-Change: 0.00028
Iteration: 70, Log-Lik: -8172.549, Max-Change: 0.00019
Iteration: 71, Log-Lik: -8172.549, Max-Change: 0.00023
Iteration: 72, Log-Lik: -8172.549, Max-Change: 0.00024
Iteration: 73, Log-Lik: -8172.549, Max-Change: 0.00019
Iteration: 74, Log-Lik: -8172.548, Max-Change: 0.00020
Iteration: 75, Log-Lik: -8172.548, Max-Change: 0.00023
Iteration: 76, Log-Lik: -8172.548, Max-Change: 0.00016
Iteration: 77, Log-Lik: -8172.548, Max-Change: 0.00019
Iteration: 78, Log-Lik: -8172.548, Max-Change: 0.00020
Iteration: 79, Log-Lik: -8172.548, Max-Change: 0.00016
Iteration: 80, Log-Lik: -8172.548, Max-Change: 0.00016
Iteration: 81, Log-Lik: -8172.548, Max-Change: 0.00019
Iteration: 82, Log-Lik: -8172.548, Max-Change: 0.00014
Iteration: 83, Log-Lik: -8172.548, Max-Change: 0.00016
Iteration: 84, Log-Lik: -8172.548, Max-Change: 0.00017
Iteration: 85, Log-Lik: -8172.548, Max-Change: 0.00013
Iteration: 86, Log-Lik: -8172.548, Max-Change: 0.00014
Iteration: 87, Log-Lik: -8172.548, Max-Change: 0.00016
Iteration: 88, Log-Lik: -8172.548, Max-Change: 0.00011
Iteration: 89, Log-Lik: -8172.548, Max-Change: 0.00013
Iteration: 90, Log-Lik: -8172.548, Max-Change: 0.00014
Iteration: 91, Log-Lik: -8172.548, Max-Change: 0.00011
Iteration: 92, Log-Lik: -8172.548, Max-Change: 0.00011
Iteration: 93, Log-Lik: -8172.548, Max-Change: 0.00014
Iteration: 94, Log-Lik: -8172.548, Max-Change: 0.00009
prof.est <- fscores(mrg, full.scores=TRUE)
par.est <- coef(mrg,IRTpars=TRUE)
par.est
## $V1
## a b1 b2 b3
## par 2.131 -0.333 0.579 1.613
##
## $V2
## a b1 b2 b3
## par 1.995 -0.092 0.76 1.583
##
## $V3
## a b1 b2 b3
## par 2.286 0.676 1.372 1.942
##
## $V4
## a b1 b2 b3
## par 2.069 0.344 1.461 1.947
##
## $V5
## a b1 b2 b3
## par 2.612 0.296 1.234 1.922
##
## $V6
## a b1 b2 b3
## par 2.991 -0.242 0.481 1.469
##
## $V7
## a b1 b2 b3
## par 3.574 -0.245 0.588 1.645
##
## $V8
## a b1 b2 b3
## par 3.641 -0.423 0.293 1.216
##
## $V9
## a b1 b2 b3
## par 2.933 -0.512 0.314 1.475
##
## $GroupPars
## MEAN_1 COV_11
## par 0 1
Questão 3: Você indicaria itens com categorias que poderiam ser aglutinadas?
Questão 4: Elabore um gráfico das proficiências estimadas versus proficiências verdadeiras.
plot(theta,prof.est)
abline(0,1, col = "red")

Questão 5: Identifique as respostas dos alunos com proficiência estimada mÃnima e máxima. Na sua opinião, qual a explicação para a ocorrência de muitos empates nos valores das proficiências estimadas mÃnimas?
resp[prof.est==min(prof.est),]
## V1 V2 V3 V4 V5 V6 V7 V8 V9
## 1 1 1 1 1 1 1 1 1 1
## 18 1 1 1 1 1 1 1 1 1
## 20 1 1 1 1 1 1 1 1 1
## 34 1 1 1 1 1 1 1 1 1
## 47 1 1 1 1 1 1 1 1 1
## 50 1 1 1 1 1 1 1 1 1
## 64 1 1 1 1 1 1 1 1 1
## 71 1 1 1 1 1 1 1 1 1
## 73 1 1 1 1 1 1 1 1 1
## 80 1 1 1 1 1 1 1 1 1
## 81 1 1 1 1 1 1 1 1 1
## 95 1 1 1 1 1 1 1 1 1
## 119 1 1 1 1 1 1 1 1 1
## 124 1 1 1 1 1 1 1 1 1
## 142 1 1 1 1 1 1 1 1 1
## 149 1 1 1 1 1 1 1 1 1
## 157 1 1 1 1 1 1 1 1 1
## 169 1 1 1 1 1 1 1 1 1
## 173 1 1 1 1 1 1 1 1 1
## 176 1 1 1 1 1 1 1 1 1
## 183 1 1 1 1 1 1 1 1 1
## 190 1 1 1 1 1 1 1 1 1
## 192 1 1 1 1 1 1 1 1 1
## 193 1 1 1 1 1 1 1 1 1
## 207 1 1 1 1 1 1 1 1 1
## 210 1 1 1 1 1 1 1 1 1
## 217 1 1 1 1 1 1 1 1 1
## 219 1 1 1 1 1 1 1 1 1
## 221 1 1 1 1 1 1 1 1 1
## 225 1 1 1 1 1 1 1 1 1
## 231 1 1 1 1 1 1 1 1 1
## 232 1 1 1 1 1 1 1 1 1
## 241 1 1 1 1 1 1 1 1 1
## 279 1 1 1 1 1 1 1 1 1
## 281 1 1 1 1 1 1 1 1 1
## 286 1 1 1 1 1 1 1 1 1
## 304 1 1 1 1 1 1 1 1 1
## 363 1 1 1 1 1 1 1 1 1
## 366 1 1 1 1 1 1 1 1 1
## 371 1 1 1 1 1 1 1 1 1
## 374 1 1 1 1 1 1 1 1 1
## 380 1 1 1 1 1 1 1 1 1
## 399 1 1 1 1 1 1 1 1 1
## 401 1 1 1 1 1 1 1 1 1
## 402 1 1 1 1 1 1 1 1 1
## 408 1 1 1 1 1 1 1 1 1
## 409 1 1 1 1 1 1 1 1 1
## 431 1 1 1 1 1 1 1 1 1
## 436 1 1 1 1 1 1 1 1 1
## 452 1 1 1 1 1 1 1 1 1
## 454 1 1 1 1 1 1 1 1 1
## 457 1 1 1 1 1 1 1 1 1
## 459 1 1 1 1 1 1 1 1 1
## 468 1 1 1 1 1 1 1 1 1
## 481 1 1 1 1 1 1 1 1 1
## 484 1 1 1 1 1 1 1 1 1
## 485 1 1 1 1 1 1 1 1 1
## 486 1 1 1 1 1 1 1 1 1
## 487 1 1 1 1 1 1 1 1 1
## 489 1 1 1 1 1 1 1 1 1
## 501 1 1 1 1 1 1 1 1 1
## 512 1 1 1 1 1 1 1 1 1
## 514 1 1 1 1 1 1 1 1 1
## 520 1 1 1 1 1 1 1 1 1
## 546 1 1 1 1 1 1 1 1 1
## 551 1 1 1 1 1 1 1 1 1
## 557 1 1 1 1 1 1 1 1 1
## 588 1 1 1 1 1 1 1 1 1
## 594 1 1 1 1 1 1 1 1 1
## 603 1 1 1 1 1 1 1 1 1
## 605 1 1 1 1 1 1 1 1 1
## 606 1 1 1 1 1 1 1 1 1
## 611 1 1 1 1 1 1 1 1 1
## 612 1 1 1 1 1 1 1 1 1
## 615 1 1 1 1 1 1 1 1 1
## 632 1 1 1 1 1 1 1 1 1
## 636 1 1 1 1 1 1 1 1 1
## 646 1 1 1 1 1 1 1 1 1
## 647 1 1 1 1 1 1 1 1 1
## 654 1 1 1 1 1 1 1 1 1
## 665 1 1 1 1 1 1 1 1 1
## 667 1 1 1 1 1 1 1 1 1
## 672 1 1 1 1 1 1 1 1 1
## 686 1 1 1 1 1 1 1 1 1
## 698 1 1 1 1 1 1 1 1 1
## 699 1 1 1 1 1 1 1 1 1
## 718 1 1 1 1 1 1 1 1 1
## 732 1 1 1 1 1 1 1 1 1
## 734 1 1 1 1 1 1 1 1 1
## 738 1 1 1 1 1 1 1 1 1
## 741 1 1 1 1 1 1 1 1 1
## 743 1 1 1 1 1 1 1 1 1
## 746 1 1 1 1 1 1 1 1 1
## 752 1 1 1 1 1 1 1 1 1
## 767 1 1 1 1 1 1 1 1 1
## 772 1 1 1 1 1 1 1 1 1
## 773 1 1 1 1 1 1 1 1 1
## 774 1 1 1 1 1 1 1 1 1
## 791 1 1 1 1 1 1 1 1 1
## 796 1 1 1 1 1 1 1 1 1
## 804 1 1 1 1 1 1 1 1 1
## 810 1 1 1 1 1 1 1 1 1
## 818 1 1 1 1 1 1 1 1 1
## 822 1 1 1 1 1 1 1 1 1
## 832 1 1 1 1 1 1 1 1 1
## 840 1 1 1 1 1 1 1 1 1
## 845 1 1 1 1 1 1 1 1 1
## 853 1 1 1 1 1 1 1 1 1
## 854 1 1 1 1 1 1 1 1 1
## 859 1 1 1 1 1 1 1 1 1
## 862 1 1 1 1 1 1 1 1 1
## 871 1 1 1 1 1 1 1 1 1
## 880 1 1 1 1 1 1 1 1 1
## 885 1 1 1 1 1 1 1 1 1
## 895 1 1 1 1 1 1 1 1 1
## 898 1 1 1 1 1 1 1 1 1
## 907 1 1 1 1 1 1 1 1 1
## 914 1 1 1 1 1 1 1 1 1
## 915 1 1 1 1 1 1 1 1 1
## 923 1 1 1 1 1 1 1 1 1
## 937 1 1 1 1 1 1 1 1 1
## 939 1 1 1 1 1 1 1 1 1
## 944 1 1 1 1 1 1 1 1 1
## 945 1 1 1 1 1 1 1 1 1
## 950 1 1 1 1 1 1 1 1 1
## 952 1 1 1 1 1 1 1 1 1
## 961 1 1 1 1 1 1 1 1 1
## 964 1 1 1 1 1 1 1 1 1
## 966 1 1 1 1 1 1 1 1 1
## 970 1 1 1 1 1 1 1 1 1
## 973 1 1 1 1 1 1 1 1 1
## 981 1 1 1 1 1 1 1 1 1
## 982 1 1 1 1 1 1 1 1 1
## 986 1 1 1 1 1 1 1 1 1
resp[prof.est==max(prof.est),]
## V1 V2 V3 V4 V5 V6 V7 V8 V9
## 213 4 4 4 4 4 4 4 4 4
## 236 4 4 4 4 4 4 4 4 4
## 267 4 4 4 4 4 4 4 4 4
## 375 4 4 4 4 4 4 4 4 4
## 474 4 4 4 4 4 4 4 4 4
## 666 4 4 4 4 4 4 4 4 4
Questão 6: Compare graficamente as estimativas dos parâmetros dos 9 itens obtidas a partir das respostas simuladas com aquelas fornecidas originalmente.
mat.est <- matrix(ncol = 4, nrow = 9)
for(i in 1:9) mat.est[i,1] <- par.est[[i]][1] ## a
for(i in 1:9) mat.est[i,2] <- par.est[[i]][2] ## b1
for(i in 1:9) mat.est[i,3] <- par.est[[i]][3] ## b2
for(i in 1:9) mat.est[i,4] <- par.est[[i]][4] ## b3
colnames(mat.est) <- c("a", "b1", "b2", "b3")
data.par <- cbind(a.par,b1.par,b2.par,b3.par)
colnames(data.par) <- c("a", "b1", "b2", "b3")
mat.est <- data.frame(mat.est)
data.par <- data.frame(data.par)
plot(data.par$a, mat.est$a, xlab = "Parâmetro 'a' original", ylab = "'a' estimado",
main = "'a' real vs. 'a' estimado")
abline(0,1, col = "red")

plot(data.par$b1, mat.est$b1, xlab = "Parâmetro 'b1' original", ylab = "'b1' estimado",
main = "'b1' real vs. 'b1' estimado")
abline(0,1, col = "red")

plot(data.par$b2, mat.est$b2, xlab = "Parâmetro 'b2' original", ylab = "'b2' estimado",
main = "'b2' real vs. 'b2' estimado")
abline(0,1, col = "red")

plot(data.par$b3, mat.est$b3, xlab = "Parâmetro 'b3' original", ylab = "'b3' estimado",
main = "'b3' real vs. 'b3' estimado")
abline(0,1, col = "red")

Questão 7: Gere valores de parâmetros para 50 itens, gere as respostas para estes itens para 1000 respondentes, ajuste o modelo de resposta gradual aos dados e elabore o gráfico das proficiências estimadas versus proficiências verdadeiras. Compare este gráfico com aquele para as estimativas obtidas a partir dos 9 itens originais.
## geracao artificial de um numero maior de itens
n.it <- 50 # numero de itens simulados
a.par.sim <- runif(n.it,0.5,3)
b.par.sim <- matrix(rnorm(3*n.it,0,1),n.it,3)
b.par.sim <- t(apply(b.par.sim,1,sort))
b1.par.sim <- b.par.sim[,1]
b2.par.sim <- b.par.sim[,2]
b3.par.sim <- b.par.sim[,3]
theta.sim <- rnorm(nr,0,1)
resp.sim <- matrix(0,nr,n.it)
### geracao das respostas
mat.prob.sim <- matrix(0,n.it,4)
for (j in 1:nr) {
mat.prob.sim <- cbind(rep(0,n.it),exp(-a.par.sim*(theta.sim[j]-b1.par.sim)),
exp(-a.par.sim*(theta.sim[j]-b2.par.sim)),exp(-a.par.sim*(theta.sim[j]-b3.par.sim)))
mat.prob.sim <- 1/(1+mat.prob.sim)
mat.prob.sim <- cbind(-t(apply(mat.prob.sim,1,diff)),mat.prob.sim[,4])
for (i in 1:n.it)
resp.sim[j,i] <- sample(4,1,replace=F,mat.prob.sim[i,])
}
resp.sim <- read.table(file="/Users/Gustavo/Documents/unb/mestrado/dados.mrg.txt")
mrg.sim <- mirt(resp.sim,1,itemtype=c('graded'))
## Item re-scored so that all values are within a distance of 1
##
Iteration: 1, Log-Lik: -50116.631, Max-Change: 1.77786
Iteration: 2, Log-Lik: -49077.065, Max-Change: 0.45459
Iteration: 3, Log-Lik: -48800.185, Max-Change: 0.28616
Iteration: 4, Log-Lik: -48692.168, Max-Change: 0.28517
Iteration: 5, Log-Lik: -48670.122, Max-Change: 0.09794
Iteration: 6, Log-Lik: -48634.695, Max-Change: 0.08362
Iteration: 7, Log-Lik: -48608.903, Max-Change: 0.05986
Iteration: 8, Log-Lik: -48592.898, Max-Change: 0.06550
Iteration: 9, Log-Lik: -48576.843, Max-Change: 0.06070
Iteration: 10, Log-Lik: -48566.967, Max-Change: 0.05229
Iteration: 11, Log-Lik: -48556.452, Max-Change: 0.06055
Iteration: 12, Log-Lik: -48549.316, Max-Change: 0.03654
Iteration: 13, Log-Lik: -48545.593, Max-Change: 0.03258
Iteration: 14, Log-Lik: -48541.740, Max-Change: 0.03131
Iteration: 15, Log-Lik: -48538.543, Max-Change: 0.03048
Iteration: 16, Log-Lik: -48535.543, Max-Change: 0.02714
Iteration: 17, Log-Lik: -48532.885, Max-Change: 0.02089
Iteration: 18, Log-Lik: -48531.955, Max-Change: 0.02663
Iteration: 19, Log-Lik: -48530.235, Max-Change: 0.02537
Iteration: 20, Log-Lik: -48529.106, Max-Change: 0.01383
Iteration: 21, Log-Lik: -48528.657, Max-Change: 0.00979
Iteration: 22, Log-Lik: -48528.064, Max-Change: 0.01367
Iteration: 23, Log-Lik: -48527.730, Max-Change: 0.00815
Iteration: 24, Log-Lik: -48527.450, Max-Change: 0.01300
Iteration: 25, Log-Lik: -48526.856, Max-Change: 0.00523
Iteration: 26, Log-Lik: -48526.711, Max-Change: 0.00591
Iteration: 27, Log-Lik: -48526.592, Max-Change: 0.00632
Iteration: 28, Log-Lik: -48526.370, Max-Change: 0.00438
Iteration: 29, Log-Lik: -48526.295, Max-Change: 0.00276
Iteration: 30, Log-Lik: -48526.269, Max-Change: 0.00477
Iteration: 31, Log-Lik: -48526.184, Max-Change: 0.00138
Iteration: 32, Log-Lik: -48526.183, Max-Change: 0.00337
Iteration: 33, Log-Lik: -48526.138, Max-Change: 0.00369
Iteration: 34, Log-Lik: -48526.079, Max-Change: 0.00289
Iteration: 35, Log-Lik: -48526.057, Max-Change: 0.00080
Iteration: 36, Log-Lik: -48526.055, Max-Change: 0.00162
Iteration: 37, Log-Lik: -48526.045, Max-Change: 0.00074
Iteration: 38, Log-Lik: -48526.042, Max-Change: 0.00017
Iteration: 39, Log-Lik: -48526.041, Max-Change: 0.00027
Iteration: 40, Log-Lik: -48526.041, Max-Change: 0.00238
Iteration: 41, Log-Lik: -48526.027, Max-Change: 0.00013
Iteration: 42, Log-Lik: -48526.026, Max-Change: 0.00006
prof.est.sim <- fscores(mrg.sim, full.scores=TRUE)
par.est.sim <- coef(mrg.sim,IRTpars=TRUE)
plot(theta.sim,prof.est.sim)
abline(0,1, col= "red")
