Load data
data <- readr::read_csv("metacog-exp2-longdata.csv", col_names = TRUE)
## Parsed with column specification:
## cols(
## .default = col_character(),
## Progress = col_double(),
## `Duration (in seconds)` = col_double(),
## `%False` = col_double(),
## ParticipantNumber = col_double(),
## Version = col_double(),
## Age = col_double(),
## Row = col_double(),
## Correct = col_double(),
## Error = col_double(),
## Unsure = col_double()
## )
## See spec(...) for full column specifications.
Incorrect lure and correct answer descriptive stats for table summary
summary_table_exp2 <- data %>%
group_by(ID,Validity,Difficulty,Instructions) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Instructions,Validity,Difficulty) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
)
## `summarise()` has grouped output by 'ID', 'Validity', 'Difficulty'. You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Instructions', 'Validity'. You can override using the `.groups` argument.
print(summary_table_exp2)
## # A tibble: 8 x 9
## # Groups: Instructions, Validity [4]
## Instructions Validity Difficulty mean_err sd_err se_error mean_corr sd_corr
## <chr> <chr> <chr> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 Control Accurate Easy 0.00962 0.0243 0.00275 0.876 0.181
## 2 Control Accurate Hard 0.0359 0.0321 0.00364 0.35 0.204
## 3 Control Inaccurate Easy 0.180 0.206 0.0233 0.645 0.266
## 4 Control Inaccurate Hard 0.240 0.183 0.0208 0.137 0.147
## 5 Disclose Accurate Easy 0.00380 0.0156 0.00175 0.915 0.147
## 6 Disclose Accurate Hard 0.0335 0.0364 0.00410 0.358 0.236
## 7 Disclose Inaccurate Easy 0.0981 0.0969 0.0109 0.760 0.202
## 8 Disclose Inaccurate Hard 0.218 0.156 0.0175 0.176 0.161
## # … with 1 more variable: se_corr <dbl>
Models to analyze incorrect lure responses
modelerror1 <- data %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",-.5,.5)) %>%
dplyr::mutate(Validity = ifelse(Validity == "Accurate",-.5,.5)) %>%
glmer(Error ~ Validity*Difficulty + (1 + Difficulty + Validity | ID) + ( 1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
## boundary (singular) fit: see ?isSingular
summary(modelerror1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Error ~ Validity * Difficulty + (1 + Difficulty + Validity |
## ID) + (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 6010.4 6092.2 -2994.2 5988.4 12549
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.0774 -0.2698 -0.1411 -0.0610 20.1368
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 0.4443 0.6665
## Difficulty 0.8782 0.9371 -0.31
## Validity 1.2028 1.0967 0.92 0.08
## Item (Intercept) 1.1176 1.0572
## Number of obs: 12560, groups: ID, 157; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.4877 0.1494 -23.338 < 2e-16 ***
## Validity 2.6017 0.1574 16.531 < 2e-16 ***
## Difficulty 1.1947 0.2889 4.135 3.56e-05 ***
## Validity:Difficulty -0.9055 0.2742 -3.302 0.00096 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Valdty Dffclt
## Validity -0.114
## Difficulty -0.162 0.231
## Vldty:Dffcl 0.252 -0.499 -0.388
## convergence code: 0
## boundary (singular) fit: see ?isSingular
tab_model(modelerror1)
| Error | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 0.03 | 0.02 – 0.04 | <0.001 |
| Validity | 13.49 | 9.91 – 18.36 | <0.001 |
| Difficulty | 3.30 | 1.87 – 5.82 | <0.001 |
| Validity * Difficulty | 0.40 | 0.24 – 0.69 | 0.001 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 0.44 | ||
| τ00 Item | 1.12 | ||
| τ11 ID.Difficulty | 0.88 | ||
| τ11 ID.Validity | 1.20 | ||
| ρ01 ID.Difficulty | -0.31 | ||
| ρ01 ID.Validity | 0.92 | ||
| N ID | 157 | ||
| N Item | 80 | ||
| Observations | 12560 | ||
| Marginal R2 / Conditional R2 | 0.390 / NA | ||
modelerror2 <- data %>%
dplyr::filter(Validity == "Inaccurate") %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",-.5,.5)) %>%
dplyr::mutate(Instructions = ifelse(Instructions == "Control",.5,-.5)) %>%
glmer(Error ~ Difficulty*Instructions+ (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
summary(modelerror2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Error ~ Difficulty * Instructions + (1 + Difficulty | ID) + (1 |
## Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 5009.3 5063.3 -2496.7 4993.3 6272
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.2561 -0.4253 -0.2517 -0.1266 7.1422
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 1.2992 1.1398
## Difficulty 0.8646 0.9298 -0.21
## Item (Intercept) 0.9766 0.9882
## Number of obs: 6280, groups: ID, 157; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.1609 0.1528 -14.138 < 2e-16 ***
## Difficulty 0.8062 0.2547 3.165 0.00155 **
## Instructions 0.3700 0.2014 1.837 0.06615 .
## Difficulty:Instructions -0.5646 0.2213 -2.551 0.01074 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Dffclt Instrc
## Difficulty -0.069
## Instructins -0.005 0.016
## Dffclty:Ins 0.021 -0.018 -0.204
tab_model(modelerror2)
| Error | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 0.12 | 0.09 – 0.16 | <0.001 |
| Difficulty | 2.24 | 1.36 – 3.69 | 0.002 |
| Instructions | 1.45 | 0.98 – 2.15 | 0.066 |
| Difficulty * Instructions | 0.57 | 0.37 – 0.88 | 0.011 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 1.30 | ||
| τ00 Item | 0.98 | ||
| τ11 ID.Difficulty | 0.86 | ||
| ρ01 ID | -0.21 | ||
| ICC | 0.43 | ||
| N ID | 157 | ||
| N Item | 80 | ||
| Observations | 6280 | ||
| Marginal R2 / Conditional R2 | 0.036 / 0.452 | ||
# tests of simple effects
emmeans(modelerror1, ~ Validity * Difficulty) %>%
pairs(., simple = "Validity", reverse = TRUE)
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 3.05 0.255 Inf 11.970 <.0001
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 2.15 0.148 Inf 14.481 <.0001
##
## Results are given on the log odds ratio (not the response) scale.
emmeans(modelerror2, ~ Instructions * Difficulty) %>%
pairs(., simple = "Instructions", reverse = TRUE)
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 0.6523 0.249 Inf 2.622 0.0087
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 0.0878 0.209 Inf 0.420 0.6747
##
## Results are given on the log odds ratio (not the response) scale.
Graphs of incorrect lure response rates
Exp2_Figure1.1 <- data %>%
group_by(ID,Validity,Difficulty,Instructions) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Validity,Difficulty,Instructions) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
mutate(Instructions = ifelse(Instructions == "Disclose","Disclosure","No Disclosure")) %>%
#filter(Validity == "Inaccurate") %>%
ggplot(.) + aes(x = Validity, y = mean_err, fill = reorder(Instructions, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Accuracy of Information") + ylab("Proportion Lure Reported") + labs(fill = "Instruction Condition") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_err - se_error, ymax= mean_err + se_error), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_err,digits=2)), position=position_dodge(width=.9), vjust=-2.5) + scale_fill_manual(values=c("#f0f0f0", "#636363")) + facet_wrap(vars(Difficulty)) + ylim (0,.27)
## `summarise()` has grouped output by 'ID', 'Validity', 'Difficulty'. You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Validity', 'Difficulty'. You can override using the `.groups` argument.
Exp2_Figure1.1
ggsave(Exp2_Figure1.1, file="INFO-Exp2-Error1.jpeg", width=6, height=4)
# Figure 3 in manuscript
Exp2_Figure1.2 <- data %>%
filter(Validity == "Inaccurate") %>%
group_by(ID,Difficulty,Instructions) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Difficulty,Instructions) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
mutate(Instructions = ifelse(Instructions == "Disclose","Disclosure","No Disclosure")) %>%
#filter(Validity == "Inaccurate") %>%
ggplot(.) + aes(x = reorder(Difficulty, mean_err), y = mean_err, fill = reorder(Instructions, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Statement Difficulty") + ylab("Proportion Lure Reported") + labs(fill = "Instruction Condition") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_err - se_error, ymax= mean_err + se_error), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_err,digits=2)), position=position_dodge(width=.9), vjust=4) + scale_fill_manual(values=c("#f0f0f0", "#636363"))
## `summarise()` has grouped output by 'ID', 'Difficulty'. You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Difficulty'. You can override using the `.groups` argument.
Exp2_Figure1.2
ggsave(Exp2_Figure1.2, file="INFO-Exp2-Error2.jpeg", width=6, height=4)
Models to analyze correct answer responses
modelcorrect1 <- data %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",.5,-.5)) %>%
dplyr::mutate(Validity = ifelse(Validity == "Accurate",.5,-.5)) %>%
glmer(Correct ~ Validity*Difficulty + (1 + Difficulty + Validity | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
summary(modelcorrect1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Correct ~ Validity * Difficulty + (1 + Difficulty + Validity |
## ID) + (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 10383.5 10465.3 -5180.7 10361.5 12549
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.9812 -0.4135 0.1028 0.4034 10.7860
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 1.3338 1.1549
## Difficulty 0.8812 0.9387 0.33
## Validity 0.7511 0.8666 0.08 0.08
## Item (Intercept) 0.8724 0.9340
## Number of obs: 12560, groups: ID, 157; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.22816 0.14339 1.591 0.1116
## Validity 1.62493 0.09104 17.848 <2e-16 ***
## Difficulty 3.72203 0.23228 16.024 <2e-16 ***
## Validity:Difficulty 0.27125 0.12589 2.155 0.0312 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Valdty Dffclt
## Validity 0.056
## Difficulty 0.078 0.094
## Vldty:Dffcl 0.099 0.089 0.033
tab_model(modelcorrect1)
| Correct | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 1.26 | 0.95 – 1.66 | 0.112 |
| Validity | 5.08 | 4.25 – 6.07 | <0.001 |
| Difficulty | 41.35 | 26.23 – 65.19 | <0.001 |
| Validity * Difficulty | 1.31 | 1.02 – 1.68 | 0.031 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 1.33 | ||
| τ00 Item | 0.87 | ||
| τ11 ID.Difficulty | 0.88 | ||
| τ11 ID.Validity | 0.75 | ||
| ρ01 ID.Difficulty | 0.33 | ||
| ρ01 ID.Validity | 0.08 | ||
| ICC | 0.44 | ||
| N ID | 157 | ||
| N Item | 80 | ||
| Observations | 12560 | ||
| Marginal R2 / Conditional R2 | 0.412 / 0.672 | ||
modelcorrect2 <- data %>%
dplyr::filter(Validity == "Inaccurate") %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",.5,-.5)) %>%
dplyr::mutate(Instructions = ifelse(Instructions == "Control",-.5,.5)) %>%
glmer(Correct ~ Difficulty*Instructions+ (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
summary(modelcorrect2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Correct ~ Difficulty * Instructions + (1 + Difficulty | ID) +
## (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 5436.7 5490.7 -2710.4 5420.7 6272
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.8280 -0.4057 -0.1424 0.4388 7.8252
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 1.4282 1.1951
## Difficulty 0.6842 0.8271 0.12
## Item (Intercept) 0.8119 0.9010
## Number of obs: 6280, groups: ID, 157; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.6080 0.1470 -4.136 3.53e-05 ***
## Difficulty 3.6248 0.2334 15.528 < 2e-16 ***
## Instructions 0.5723 0.2063 2.774 0.00554 **
## Difficulty:Instructions 0.2813 0.2085 1.349 0.17720
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Dffclt Instrc
## Difficulty -0.036
## Instructins -0.012 0.021
## Dffclty:Ins 0.038 -0.019 0.010
tab_model(modelcorrect2)
| Correct | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 0.54 | 0.41 – 0.73 | <0.001 |
| Difficulty | 37.52 | 23.74 – 59.28 | <0.001 |
| Instructions | 1.77 | 1.18 – 2.66 | 0.006 |
| Difficulty * Instructions | 1.32 | 0.88 – 1.99 | 0.177 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 1.43 | ||
| τ00 Item | 0.81 | ||
| τ11 ID.Difficulty | 0.68 | ||
| ρ01 ID | 0.12 | ||
| ICC | 0.42 | ||
| N ID | 157 | ||
| N Item | 80 | ||
| Observations | 6280 | ||
| Marginal R2 / Conditional R2 | 0.372 / 0.637 | ||
# tests of simple effects
emmeans(modelcorrect1, ~ Validity * Difficulty) %>%
pairs(., simple = "Validity", reverse = TRUE)
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 1.49 0.106 Inf 14.054 <.0001
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 1.76 0.115 Inf 15.282 <.0001
##
## Results are given on the log odds ratio (not the response) scale.
emmeans(modelcorrect2, ~ Instructions * Difficulty) %>%
pairs(., simple = "Instructions", reverse = TRUE)
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 0.432 0.230 Inf 1.875 0.0608
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - -0.5 0.713 0.232 Inf 3.072 0.0021
##
## Results are given on the log odds ratio (not the response) scale.
Graphs of correct response rates
Exp2_Figure2.1 <- data %>%
group_by(ID,Validity,Difficulty,Instructions) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Validity,Difficulty,Instructions) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
mutate(Instructions = ifelse(Instructions == "Disclose","Disclosure","No Disclosure")) %>%
ggplot(.) + aes(x = Validity, y = mean_corr, fill = reorder(Instructions, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Accuracy of Information") + ylab("Proportion Correct Answer Reported") + labs(fill = "Instruction Condition") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_corr - se_corr, ymax= mean_corr + se_corr), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_corr,digits=2)), position=position_dodge(width=.9), vjust=2.5) + scale_fill_manual(values=c("#f0f0f0", "#636363")) + facet_wrap(vars(Difficulty))
## `summarise()` has grouped output by 'ID', 'Validity', 'Difficulty'. You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Validity', 'Difficulty'. You can override using the `.groups` argument.
Exp2_Figure2.1
ggsave(Exp2_Figure2.1, file="INFO-Exp2-Correct1.jpeg", width=6, height=4)
# Figure 4 in manuscript
Exp2_Figure2.2 <- data %>%
filter(Validity == "Inaccurate") %>%
group_by(ID,Difficulty,Instructions) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Difficulty,Instructions) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
mutate(Instructions = ifelse(Instructions == "Disclose","Disclosure","No Disclosure")) %>%
ggplot(.) + aes(x = reorder(Difficulty, mean_err), y = mean_corr, fill = reorder(Instructions, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Statement Difficulty") + ylab("Proportion Correct Answer Reported") + labs(fill = "Instruction Condition") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_corr - se_corr, ymax= mean_corr + se_corr), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_corr,digits=2)), position=position_dodge(width=.9), vjust=3) + scale_fill_manual(values=c("#f0f0f0", "#636363"))
## `summarise()` has grouped output by 'ID', 'Difficulty'. You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Difficulty'. You can override using the `.groups` argument.
Exp2_Figure2.2
ggsave(Exp2_Figure2.2, file="INFO-Exp2-Correct2.jpeg", width=6, height=4)