Load data

data <- readr::read_csv("evaluation-exp3-long.csv", col_names = TRUE) 
## Rows: 17920 Columns: 22
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (15): ID, ReadingCheck1, LookUp?, Gender, Education, Race, Race2, Commen...
## dbl  (7): Duration, ReadingCheck2, Age, Version, Correct, Error, Unsure
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Incorrect lure and correct answer descriptive stats (interest-only judgment, accuracy-only judgment, mixed interest judgment, mixed accuracy judgment) for summaries in Table 3

# Descriptive table

summary1 <- data %>%
  group_by(ID,Study,Instructions,Validity,Difficulty) %>%
  summarise(
    mean_error = mean(Error,na.rm=TRUE),
    mean_correct = mean(Correct, na.rm=TRUE),
  ) %>%
  group_by(Study,Instructions,Validity, Difficulty) %>%
  summarise(
    mean_err = mean(mean_error),
    sd_err = sd(mean_error),
    se_error = sd(mean_error)/sqrt(n()),
    mean_corr = mean(mean_correct),
    sd_corr = sd(mean_correct),
    se_corr = sd(mean_correct)/sqrt(n())
  )
## `summarise()` has grouped output by 'ID', 'Study', 'Instructions', 'Validity'.
## You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Study', 'Instructions', 'Validity'. You
## can override using the `.groups` argument.
print(summary1)
## # A tibble: 16 × 10
## # Groups:   Study, Instructions, Validity [8]
##    Study    Instructions Validity  Difficulty mean_err sd_err se_error mean_corr
##    <chr>    <chr>        <chr>     <chr>         <dbl>  <dbl>    <dbl>     <dbl>
##  1 Mix      Accuracy     Accurate  Easy        0.00282 0.0167  0.00198     0.886
##  2 Mix      Accuracy     Accurate  Hard        0.0268  0.0506  0.00600     0.528
##  3 Mix      Accuracy     Inaccura… Easy        0.0704  0.107   0.0127      0.745
##  4 Mix      Accuracy     Inaccura… Hard        0.193   0.172   0.0204      0.324
##  5 Mix      Interest     Accurate  Easy        0.00704 0.0258  0.00306     0.861
##  6 Mix      Interest     Accurate  Hard        0.0352  0.0563  0.00668     0.475
##  7 Mix      Interest     Inaccura… Easy        0.0944  0.137   0.0163      0.724
##  8 Mix      Interest     Inaccura… Hard        0.194   0.180   0.0214      0.293
##  9 Separate Accuracy     Accurate  Easy        0.00592 0.0163  0.00187     0.853
## 10 Separate Accuracy     Accurate  Hard        0.0309  0.0374  0.00429     0.522
## 11 Separate Accuracy     Inaccura… Easy        0.0684  0.0647  0.00742     0.751
## 12 Separate Accuracy     Inaccura… Hard        0.178   0.122   0.0140      0.291
## 13 Separate Interest     Accurate  Easy        0.00455 0.0145  0.00165     0.821
## 14 Separate Interest     Accurate  Hard        0.0240  0.0340  0.00388     0.533
## 15 Separate Interest     Inaccura… Easy        0.158   0.156   0.0178      0.612
## 16 Separate Interest     Inaccura… Hard        0.213   0.151   0.0172      0.301
## # … with 2 more variables: sd_corr <dbl>, se_corr <dbl>

Incorrect lure and correct answer descriptive stats (interest-only judgment, accuracy-only judgment, mixed jugment) following false information only for summaries in text

# Collapse across interest and accuracy items in mixed judgment condition

data <- data %>%
  mutate(JudgmentCondition = ifelse(Condition == "Interest_Mixed" | Condition == "Accuracy_Mixed","Mixed Judgment",ifelse(Condition == "Interest_Separate","Interest-Only","Accuracy-Only")))

# Descriptive table

summary2 <- data %>%
  filter(Validity == "Inaccurate") %>%
  group_by(ID,Difficulty,JudgmentCondition) %>%
  summarise(
    mean_error = mean(Error,na.rm=TRUE),
    mean_correct = mean(Correct, na.rm=TRUE),
  ) %>%
  group_by(Difficulty,JudgmentCondition) %>%
  summarise(
    mean_err = mean(mean_error),
    sd_err = sd(mean_error),
    se_error = sd(mean_error)/sqrt(n()),
    mean_corr = mean(mean_correct),
    sd_corr = sd(mean_correct),
    se_corr = sd(mean_correct)/sqrt(n())
  )
## `summarise()` has grouped output by 'ID', 'Difficulty'. You can override using
## the `.groups` argument.
## `summarise()` has grouped output by 'Difficulty'. You can override using the
## `.groups` argument.
print(summary2)
## # A tibble: 6 × 8
## # Groups:   Difficulty [2]
##   Difficulty JudgmentConditi… mean_err sd_err se_error mean_corr sd_corr se_corr
##   <chr>      <chr>               <dbl>  <dbl>    <dbl>     <dbl>   <dbl>   <dbl>
## 1 Easy       Accuracy-Only      0.0684 0.0647  0.00742     0.751   0.234  0.0268
## 2 Easy       Interest-Only      0.158  0.156   0.0178      0.612   0.272  0.0310
## 3 Easy       Mixed Judgment     0.0824 0.103   0.0122      0.735   0.259  0.0307
## 4 Hard       Accuracy-Only      0.178  0.122   0.0140      0.291   0.249  0.0285
## 5 Hard       Interest-Only      0.213  0.151   0.0172      0.301   0.257  0.0292
## 6 Hard       Mixed Judgment     0.194  0.158   0.0187      0.308   0.272  0.0323

Model to analyze incorrect lure responses

# Set referent interest-only judgment condition

data <- within(data, JudgmentCondition <- as.factor(JudgmentCondition)) 
data <- within(data, JudgmentCondition <- relevel(JudgmentCondition, ref = "Interest-Only"))

# Incorrect lure model

modelincorrect <- data %>%
  filter(Validity == "Inaccurate") %>%
  dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",1,0)) %>%
glmer(Error ~ JudgmentCondition*Difficulty + (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))

summary(modelincorrect)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Error ~ JudgmentCondition * Difficulty + (1 + Difficulty | ID) +  
##     (1 | Item)
##    Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   6535.7   6606.7  -3257.8   6515.7     8950 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.9678 -0.4041 -0.2576 -0.1464  7.0909 
## 
## Random effects:
##  Groups Name        Variance Std.Dev. Corr 
##  ID     (Intercept) 0.8564   0.9254        
##         Difficulty  0.7922   0.8900   -0.35
##  Item   (Intercept) 0.8091   0.8995        
## Number of obs: 8960, groups:  ID, 224; Item, 80
## 
## Fixed effects:
##                                            Estimate Std. Error z value Pr(>|z|)
## (Intercept)                                 -1.7056     0.1929  -8.844  < 2e-16
## JudgmentConditionAccuracy-Only              -0.2381     0.1819  -1.309 0.190678
## JudgmentConditionMixed Judgment             -0.1839     0.1855  -0.991 0.321702
## Difficulty                                  -0.5228     0.2546  -2.053 0.040035
## JudgmentConditionAccuracy-Only:Difficulty   -0.7603     0.2227  -3.414 0.000640
## JudgmentConditionMixed Judgment:Difficulty  -0.7540     0.2272  -3.319 0.000902
##                                               
## (Intercept)                                ***
## JudgmentConditionAccuracy-Only                
## JudgmentConditionMixed Judgment               
## Difficulty                                 *  
## JudgmentConditionAccuracy-Only:Difficulty  ***
## JudgmentConditionMixed Judgment:Difficulty ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) JdCA-O JdgCMJ Dffclt JCA-O:
## JdgmntCnA-O -0.461                            
## JdgmntCndMJ -0.451  0.481                     
## Difficulty  -0.603  0.188  0.182              
## JdgmnCA-O:D  0.202 -0.442 -0.210 -0.380       
## JdgmntCMJ:D  0.197 -0.210 -0.441 -0.365  0.437
tab_model(modelincorrect)
  Error
Predictors Odds Ratios CI p
(Intercept) 0.18 0.12 – 0.27 <0.001
JudgmentCondition
[Accuracy-Only]
0.79 0.55 – 1.13 0.191
JudgmentCondition [Mixed
Judgment]
0.83 0.58 – 1.20 0.322
Difficulty 0.59 0.36 – 0.98 0.040
JudgmentCondition
[Accuracy-Only] *
Difficulty
0.47 0.30 – 0.72 0.001
JudgmentCondition [Mixed
Judgment] * Difficulty
0.47 0.30 – 0.73 0.001
Random Effects
σ2 3.29
τ00 ID 0.86
τ00 Item 0.81
τ11 ID.Difficulty 0.79
ρ01 ID -0.35
ICC 0.35
N ID 224
N Item 80
Observations 8960
Marginal R2 / Conditional R2 0.068 / 0.395
# test of simple effects

emmeans(modelincorrect, ~ JudgmentCondition * Difficulty) %>%
pairs(., simple = "JudgmentCondition", reverse = TRUE)
## Difficulty = 0:
##  contrast                          estimate    SE  df z.ratio p.value
##  (Accuracy-Only) - (Interest-Only)  -0.2381 0.182 Inf  -1.309  0.3903
##  Mixed Judgment - (Interest-Only)   -0.1839 0.186 Inf  -0.991  0.5826
##  Mixed Judgment - (Accuracy-Only)    0.0542 0.187 Inf   0.289  0.9549
## 
## Difficulty = 1:
##  contrast                          estimate    SE  df z.ratio p.value
##  (Accuracy-Only) - (Interest-Only)  -0.9984 0.216 Inf  -4.611  <.0001
##  Mixed Judgment - (Interest-Only)   -0.9379 0.221 Inf  -4.242  0.0001
##  Mixed Judgment - (Accuracy-Only)    0.0605 0.232 Inf   0.261  0.9632
## 
## Results are given on the log odds ratio (not the response) scale. 
## P value adjustment: tukey method for comparing a family of 3 estimates

Model to analyze correct answer responses

# Correct answer model

modelcorrect <- data %>%
  filter(Validity == "Inaccurate") %>%
  dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",1,0)) %>%
  glmer(Correct ~ JudgmentCondition*Difficulty + (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))

summary(modelcorrect)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Correct ~ JudgmentCondition * Difficulty + (1 + Difficulty |  
##     ID) + (1 | Item)
##    Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   8807.7   8878.7  -4393.8   8787.7     8950 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -6.1236 -0.5111 -0.0783  0.5137  4.8020 
## 
## Random effects:
##  Groups Name        Variance Std.Dev. Corr 
##  ID     (Intercept) 2.6339   1.623         
##         Difficulty  1.7873   1.337    -0.49
##  Item   (Intercept) 0.3982   0.631         
## Number of obs: 8960, groups:  ID, 224; Item, 80
## 
## Fixed effects:
##                                            Estimate Std. Error z value Pr(>|z|)
## (Intercept)                                -1.33265    0.22467  -5.932    3e-09
## JudgmentConditionAccuracy-Only             -0.01772    0.28375  -0.062 0.950212
## JudgmentConditionMixed Judgment             0.07530    0.28898   0.261 0.794436
## Difficulty                                  2.01443    0.23241   8.668  < 2e-16
## JudgmentConditionAccuracy-Only:Difficulty   0.87207    0.26261   3.321 0.000898
## JudgmentConditionMixed Judgment:Difficulty  0.73659    0.26813   2.747 0.006012
##                                               
## (Intercept)                                ***
## JudgmentConditionAccuracy-Only                
## JudgmentConditionMixed Judgment               
## Difficulty                                 ***
## JudgmentConditionAccuracy-Only:Difficulty  ***
## JudgmentConditionMixed Judgment:Difficulty ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) JdCA-O JdgCMJ Dffclt JCA-O:
## JdgmntCnA-O -0.628                            
## JdgmntCndMJ -0.617  0.488                     
## Difficulty  -0.578  0.300  0.295              
## JdgmnCA-O:D  0.335 -0.533 -0.260 -0.549       
## JdgmntCMJ:D  0.329 -0.260 -0.533 -0.538  0.477
tab_model(modelcorrect)
  Correct
Predictors Odds Ratios CI p
(Intercept) 0.26 0.17 – 0.41 <0.001
JudgmentCondition
[Accuracy-Only]
0.98 0.56 – 1.71 0.950
JudgmentCondition [Mixed
Judgment]
1.08 0.61 – 1.90 0.794
Difficulty 7.50 4.75 – 11.82 <0.001
JudgmentCondition
[Accuracy-Only] *
Difficulty
2.39 1.43 – 4.00 0.001
JudgmentCondition [Mixed
Judgment] * Difficulty
2.09 1.23 – 3.53 0.006
Random Effects
σ2 3.29
τ00 ID 2.63
τ00 Item 0.40
τ11 ID.Difficulty 1.79
ρ01 ID -0.49
ICC 0.46
N ID 224
N Item 80
Observations 8960
Marginal R2 / Conditional R2 0.216 / 0.581
# test of simple effects

emmeans(modelcorrect, ~ JudgmentCondition * Difficulty) %>%
pairs(., simple = "JudgmentCondition", reverse = TRUE)
## Difficulty = 0:
##  contrast                          estimate    SE  df z.ratio p.value
##  (Accuracy-Only) - (Interest-Only)  -0.0177 0.284 Inf  -0.062  0.9979
##  Mixed Judgment - (Interest-Only)    0.0753 0.289 Inf   0.261  0.9633
##  Mixed Judgment - (Accuracy-Only)    0.0930 0.290 Inf   0.321  0.9448
## 
## Difficulty = 1:
##  contrast                          estimate    SE  df z.ratio p.value
##  (Accuracy-Only) - (Interest-Only)   0.8544 0.265 Inf   3.229  0.0036
##  Mixed Judgment - (Interest-Only)    0.8119 0.270 Inf   3.008  0.0074
##  Mixed Judgment - (Accuracy-Only)   -0.0425 0.273 Inf  -0.155  0.9868
## 
## Results are given on the log odds ratio (not the response) scale. 
## P value adjustment: tukey method for comparing a family of 3 estimates

Incorrect lure responses by accuracy-only, interest-only, and mixed judgment (Figure 5)

Evaluation_Exp3_Figure5 <- summary2 %>%
  ggplot(.) + aes(x = reorder(Difficulty, -mean_err), y = mean_err, fill = reorder(JudgmentCondition, -mean_err)) +
      geom_bar(stat = "summary", fun.y = "mean", position = "dodge") + 
      xlab("Item Difficulty") + ylab("Proportion Lure Reported") + labs(fill = "Judgment Group") +
      theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
   geom_errorbar(aes(ymin= mean_err - se_error, ymax= mean_err + se_error), position=position_dodge(width=0.9), width=.1) +  geom_text(aes(label=round(mean_err,digits=2)), position=position_dodge(width=.9), vjust=-3) +   scale_fill_manual(values=c("#f0f0f0","#bdbdbd","#636363")) + ylim(0,.25)
## Warning: Ignoring unknown parameters: fun.y
print(Evaluation_Exp3_Figure5)
## No summary function supplied, defaulting to `mean_se()`

ggsave(Evaluation_Exp3_Figure5, file="Evaluation-Exp3-Error.jpeg", width=8, height=4)
## No summary function supplied, defaulting to `mean_se()`

Correct answer responses by accuracy-only, interest-only, and mixed judgment (Figure 6)

Evaluation_Exp3_Figure6 <- summary2 %>%
  ggplot(.) + aes(x = reorder(Difficulty, -mean_err), y = mean_corr, fill = reorder(JudgmentCondition, -mean_err)) +
      geom_bar(stat = "summary", fun.y = "mean", position = "dodge") + 
      xlab("Item Difficulty") + ylab("Proportion Correct Answer Reported") + labs(fill = "Judgment Group") +
      theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
   geom_errorbar(aes(ymin= mean_corr - se_corr, ymax= mean_corr + se_corr), position=position_dodge(width=0.9), width=.1) +  geom_text(aes(label=round(mean_corr,digits=2)), position=position_dodge(width=.9), vjust=-1.7) +   scale_fill_manual(values=c("#f0f0f0","#bdbdbd","#636363")) 
## Warning: Ignoring unknown parameters: fun.y
print(Evaluation_Exp3_Figure6)
## No summary function supplied, defaulting to `mean_se()`

ggsave(Evaluation_Exp3_Figure6, file="Evaluation-Exp3-Correct.jpeg", width=8, height=4)
## No summary function supplied, defaulting to `mean_se()`

Model outputs using post-hoc exploratory orthogonal contrasts (recommended by Reviewer 3)

# incorrect lure responses

modelincorrectcontrasts <- data %>%
  filter(Validity == "Inaccurate") %>%
  dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",-.5,.5)) %>%
  dplyr::mutate(Contrast1 = ifelse(JudgmentCondition == "Interest-Only",2,-1)) %>%
  dplyr::mutate(Contrast2 = ifelse(JudgmentCondition == "Accuracy-Only",-1,ifelse(JudgmentCondition == "Mixed Judgment",1,0))) %>%
glmer(Error ~ Contrast1*Difficulty + Contrast2*Difficulty + (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))

summary(modelincorrectcontrasts)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Error ~ Contrast1 * Difficulty + Contrast2 * Difficulty + (1 +  
##     Difficulty | ID) + (1 | Item)
##    Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   6535.7   6606.7  -3257.8   6515.7     8950 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.9678 -0.4041 -0.2576 -0.1464  7.0909 
## 
## Random effects:
##  Groups Name        Variance Std.Dev. Corr 
##  ID     (Intercept) 0.7675   0.8761        
##         Difficulty  0.7921   0.8900   -0.14
##  Item   (Intercept) 0.8091   0.8995        
## Number of obs: 8960, groups:  ID, 224; Item, 80
## 
## Fixed effects:
##                       Estimate Std. Error z value Pr(>|z|)    
## (Intercept)          -2.360059   0.125841 -18.754  < 2e-16 ***
## Contrast1             0.196514   0.047860   4.106 4.03e-05 ***
## Difficulty            1.027573   0.229207   4.483 7.35e-06 ***
## Contrast2             0.028684   0.086912   0.330    0.741    
## Contrast1:Difficulty -0.252381   0.063565  -3.970 7.17e-05 ***
## Difficulty:Contrast2 -0.003162   0.119344  -0.026    0.979    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Cntrs1 Dffclt Cntrs2 Cnt1:D
## Contrast1   -0.050                            
## Difficulty  -0.077  0.034                     
## Contrast2    0.022 -0.023 -0.006              
## Cntrst1:Dff  0.047 -0.172 -0.066  0.005       
## Dffclty:Cn2 -0.009  0.005  0.020 -0.226 -0.022
tab_model(modelincorrectcontrasts)
  Error
Predictors Odds Ratios CI p
(Intercept) 0.09 0.07 – 0.12 <0.001
Contrast1 1.22 1.11 – 1.34 <0.001
Difficulty 2.79 1.78 – 4.38 <0.001
Contrast2 1.03 0.87 – 1.22 0.741
Contrast1 * Difficulty 0.78 0.69 – 0.88 <0.001
Difficulty * Contrast2 1.00 0.79 – 1.26 0.979
Random Effects
σ2 3.29
τ00 ID 0.77
τ00 Item 0.81
τ11 ID.Difficulty 0.79
ρ01 ID -0.14
ICC 0.35
N ID 224
N Item 80
Observations 8960
Marginal R2 / Conditional R2 0.068 / 0.395
# correct answer responses

modelincorrectcontrasts <- data %>%
  filter(Validity == "Inaccurate") %>%
  dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",.5,-.5)) %>%
  dplyr::mutate(Contrast1 = ifelse(JudgmentCondition == "Interest-Only",-2,1)) %>%
  dplyr::mutate(Contrast2 = ifelse(JudgmentCondition == "Accuracy-Only",1,ifelse(JudgmentCondition == "Mixed Judgment",-1,0))) %>%
glmer(Correct ~ Contrast1*Difficulty + Contrast2*Difficulty + (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))

summary(modelincorrectcontrasts)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: Correct ~ Contrast1 * Difficulty + Contrast2 * Difficulty + (1 +  
##     Difficulty | ID) + (1 | Item)
##    Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
## 
##      AIC      BIC   logLik deviance df.resid 
##   8807.7   8878.7  -4393.8   8787.7     8950 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -6.1236 -0.5111 -0.0784  0.5137  4.8019 
## 
## Random effects:
##  Groups Name        Variance Std.Dev. Corr 
##  ID     (Intercept) 2.0106   1.418         
##         Difficulty  1.7872   1.337    -0.09
##  Item   (Intercept) 0.3981   0.631         
## Number of obs: 8960, groups:  ID, 224; Item, 80
## 
## Fixed effects:
##                      Estimate Std. Error z value Pr(>|z|)    
## (Intercept)          -0.03812    0.12296  -0.310 0.756574    
## Contrast1             0.14365    0.06984   2.057 0.039703 *  
## Difficulty            2.55061    0.18014  14.159  < 2e-16 ***
## Contrast2            -0.01263    0.12335  -0.102 0.918420    
## Contrast1:Difficulty  0.26811    0.07602   3.527 0.000421 ***
## Difficulty:Contrast2  0.06774    0.13569   0.499 0.617620    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) Cntrs1 Dffclt Cntrs2 Cnt1:D
## Contrast1    0.024                            
## Difficulty  -0.043  0.008                     
## Contrast2   -0.024 -0.021  0.000              
## Cntrst1:Dff  0.011 -0.086  0.029  0.001       
## Dffclty:Cn2  0.000  0.001 -0.020 -0.070 -0.024
tab_model(modelincorrectcontrasts)
  Correct
Predictors Odds Ratios CI p
(Intercept) 0.96 0.76 – 1.22 0.757
Contrast1 1.15 1.01 – 1.32 0.040
Difficulty 12.81 9.00 – 18.24 <0.001
Contrast2 0.99 0.78 – 1.26 0.918
Contrast1 * Difficulty 1.31 1.13 – 1.52 <0.001
Difficulty * Contrast2 1.07 0.82 – 1.40 0.618
Random Effects
σ2 3.29
τ00 ID 2.01
τ00 Item 0.40
τ11 ID.Difficulty 1.79
ρ01 ID -0.09
ICC 0.46
N ID 224
N Item 80
Observations 8960
Marginal R2 / Conditional R2 0.216 / 0.581