Load data
data <- readr::read_csv("feedback-exp1-longdata.csv", col_names = TRUE)
## Rows: 18400 Columns: 13
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (6): LookUp, Feedback, Item, Response, Difficulty, Validity
## dbl (7): Duration (in seconds), ID, Version, Correct, Error, Unsure, Time
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
Incorrect lure and correct answer descriptive stats for table summary
summary_table_exp1 <- data %>%
group_by(ID,Feedback,Validity,Difficulty,Time) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Feedback,Time,Validity,Difficulty) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
)
## `summarise()` has grouped output by 'ID', 'Feedback', 'Validity', 'Difficulty'.
## You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Feedback', 'Time', 'Validity'. You can
## override using the `.groups` argument.
print(summary_table_exp1)
## # A tibble: 24 × 10
## # Groups: Feedback, Time, Validity [12]
## Feedback Time Validity Difficulty mean_err sd_err se_error mean_corr sd_corr
## <chr> <dbl> <chr> <chr> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 Negative 1 Accurate Easy 0.0025 0.0157 0.00176 0.925 0.166
## 2 Negative 1 Accurate Hard 0.0275 0.0449 0.00502 0.466 0.253
## 3 Negative 1 Inaccur… Easy 0.300 0.306 0.0343 0.534 0.328
## 4 Negative 1 Inaccur… Hard 0.361 0.253 0.0283 0.144 0.145
## 5 Negative 2 Accurate Easy 0.0113 0.0318 0.00356 0.926 0.146
## 6 Negative 2 Accurate Hard 0.0375 0.0644 0.00720 0.446 0.266
## 7 Negative 2 Inaccur… Easy 0.164 0.227 0.0254 0.741 0.260
## 8 Negative 2 Inaccur… Hard 0.276 0.244 0.0273 0.202 0.192
## 9 No Feed… 1 Accurate Easy 0.00270 0.0163 0.00190 0.949 0.0667
## 10 No Feed… 1 Accurate Hard 0.0311 0.0466 0.00542 0.480 0.269
## # … with 14 more rows, and 1 more variable: se_corr <dbl>
Set referent condition
data <- within(data, Feedback <- as.factor(Feedback))
data <- within(data, Feedback <- relevel(Feedback, ref = "No Feedback"))
Models to analyze incorrect lure responses
modelerror1 <- data %>%
dplyr::mutate(Validity = ifelse(Validity == "Accurate",-.5,.5)) %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",-.5,.5)) %>%
glmer(Error ~ Validity*Difficulty + (1 + Difficulty + Validity | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
## boundary (singular) fit: see help('isSingular')
summary(modelerror1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Error ~ Validity * Difficulty + (1 + Difficulty + Validity |
## ID) + (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 10733.9 10819.9 -5355.9 10711.9 18389
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.142 -0.298 -0.127 -0.047 32.534
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 0.5819 0.7628
## Difficulty 0.8698 0.9327 -0.50
## Validity 1.0403 1.0199 0.98 -0.33
## Item (Intercept) 1.0399 1.0197
## Number of obs: 18400, groups: ID, 230; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.0437 0.1392 -21.862 < 2e-16 ***
## Validity 3.6656 0.1373 26.705 < 2e-16 ***
## Difficulty 1.2879 0.2690 4.788 1.69e-06 ***
## Validity:Difficulty -1.1058 0.2472 -4.473 7.72e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Valdty Dffclt
## Validity -0.156
## Difficulty -0.173 0.211
## Vldty:Dffcl 0.266 -0.552 -0.416
## optimizer (bobyqa) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
tab_model(modelerror1)
| Error | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 0.05 | 0.04 – 0.06 | <0.001 |
| Validity | 39.08 | 29.86 – 51.14 | <0.001 |
| Difficulty | 3.63 | 2.14 – 6.14 | <0.001 |
| Validity * Difficulty | 0.33 | 0.20 – 0.54 | <0.001 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 0.58 | ||
| τ00 Item | 1.04 | ||
| τ11 ID.Difficulty | 0.87 | ||
| τ11 ID.Validity | 1.04 | ||
| ρ01 ID.Difficulty | -0.50 | ||
| ρ01 ID.Validity | 0.98 | ||
| N ID | 230 | ||
| N Item | 80 | ||
| Observations | 18400 | ||
| Marginal R2 / Conditional R2 | 0.539 / NA | ||
modelerror2 <- data %>%
dplyr::filter(Validity == "Inaccurate") %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",-.5,.5)) %>%
dplyr::mutate(Time = ifelse(Time == "1",.5,-.5)) %>%
glmer(Error ~ Difficulty*Time*Feedback + (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
summary(modelerror2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Error ~ Difficulty * Time * Feedback + (1 + Difficulty | ID) +
## (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 9010.6 9124.6 -4489.3 8978.6 9183
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.8825 -0.5298 -0.2630 0.5659 8.9152
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 1.6897 1.2999
## Difficulty 0.9845 0.9922 -0.57
## Item (Intercept) 1.0693 1.0341
## Number of obs: 9199, groups: ID, 230; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.06608 0.19783 -5.389 7.09e-08 ***
## Difficulty 0.66794 0.27873 2.396 0.016558 *
## Time 0.31951 0.09401 3.399 0.000677 ***
## FeedbackNegative -0.48643 0.22299 -2.181 0.029153 *
## FeedbackPositive -0.15477 0.22463 -0.689 0.490823
## Difficulty:Time -0.43849 0.18780 -2.335 0.019547 *
## Difficulty:FeedbackNegative 0.07187 0.21640 0.332 0.739791
## Difficulty:FeedbackPositive 0.29686 0.21602 1.374 0.169379
## Time:FeedbackNegative 0.50820 0.13601 3.737 0.000187 ***
## Time:FeedbackPositive 0.38083 0.13470 2.827 0.004695 **
## Difficulty:Time:FeedbackNegative -0.13275 0.27128 -0.489 0.624592
## Difficulty:Time:FeedbackPositive -0.31949 0.26904 -1.187 0.235031
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Dffclt Time FdbckN FdbckP Dffc:T Dff:FN Dff:FP Tm:FdN
## Difficulty -0.202
## Time -0.014 0.017
## FeedbckNgtv -0.577 0.177 0.009
## FeedbckPstv -0.573 0.175 0.010 0.508
## Diffclty:Tm 0.012 -0.020 -0.095 -0.009 -0.009
## Dffclty:FdN 0.256 -0.382 -0.018 -0.450 -0.226 0.019
## Dffclty:FdP 0.257 -0.385 -0.019 -0.228 -0.456 0.021 0.497
## Tm:FdbckNgt 0.006 -0.009 -0.689 -0.032 -0.007 0.064 0.045 0.013
## Tm:FdbckPst 0.007 -0.009 -0.695 -0.007 -0.027 0.066 0.013 0.051 0.482
## Dffclt:T:FN -0.006 0.008 0.064 0.021 0.007 -0.690 -0.062 -0.015 -0.117
## Dffclt:T:FP -0.007 0.009 0.066 0.007 0.025 -0.696 -0.015 -0.056 -0.046
## Tm:FdP D:T:FN
## Difficulty
## Time
## FeedbckNgtv
## FeedbckPstv
## Diffclty:Tm
## Dffclty:FdN
## Dffclty:FdP
## Tm:FdbckNgt
## Tm:FdbckPst
## Dffclt:T:FN -0.046
## Dffclt:T:FP -0.136 0.483
tab_model(modelerror2)
| Error | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 0.34 | 0.23 – 0.51 | <0.001 |
| Difficulty | 1.95 | 1.13 – 3.37 | 0.017 |
| Time | 1.38 | 1.14 – 1.65 | 0.001 |
| Feedback [Negative] | 0.61 | 0.40 – 0.95 | 0.029 |
| Feedback [Positive] | 0.86 | 0.55 – 1.33 | 0.491 |
| Difficulty * Time | 0.65 | 0.45 – 0.93 | 0.020 |
|
Difficulty * Feedback [Negative] |
1.07 | 0.70 – 1.64 | 0.740 |
|
Difficulty * Feedback [Positive] |
1.35 | 0.88 – 2.05 | 0.169 |
|
Time * Feedback [Negative] |
1.66 | 1.27 – 2.17 | <0.001 |
|
Time * Feedback [Positive] |
1.46 | 1.12 – 1.91 | 0.005 |
|
(Difficulty * Time) * Feedback [Negative] |
0.88 | 0.51 – 1.49 | 0.625 |
|
(Difficulty * Time) * Feedback [Positive] |
0.73 | 0.43 – 1.23 | 0.235 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 1.69 | ||
| τ00 Item | 1.07 | ||
| τ11 ID.Difficulty | 0.98 | ||
| ρ01 ID | -0.57 | ||
| ICC | 0.48 | ||
| N ID | 230 | ||
| N Item | 80 | ||
| Observations | 9199 | ||
| Marginal R2 / Conditional R2 | 0.050 / 0.504 | ||
# tests of simple effects
emmeans(modelerror1, ~ Validity * Difficulty) %>%
pairs(., simple = "Validity", reverse = TRUE)
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 4.22 0.230 Inf 18.349 <.0001
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 3.11 0.124 Inf 25.091 <.0001
##
## Results are given on the log odds ratio (not the response) scale.
emmeans(modelerror2, ~ Difficulty*Time) %>%
pairs(., simple = "Time", reverse = TRUE)
## NOTE: Results may be misleading due to involvement in interactions
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 0.910 0.0843 Inf 10.800 <.0001
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 0.321 0.0732 Inf 4.387 <.0001
##
## Results are averaged over the levels of: Feedback
## Results are given on the log odds ratio (not the response) scale.
emmeans(modelerror2, ~ Feedback*Time) %>%
pairs(., simple = "Feedback", reverse = FALSE)
## NOTE: Results may be misleading due to involvement in interactions
## Time = -0.5:
## contrast estimate SE df z.ratio p.value
## No Feedback - Negative 0.7405 0.235 Inf 3.148 0.0047
## No Feedback - Positive 0.3452 0.236 Inf 1.461 0.3098
## Negative - Positive -0.3953 0.235 Inf -1.680 0.2129
##
## Time = 0.5:
## contrast estimate SE df z.ratio p.value
## No Feedback - Negative 0.2323 0.231 Inf 1.006 0.5733
## No Feedback - Positive -0.0356 0.233 Inf -0.153 0.9872
## Negative - Positive -0.2680 0.229 Inf -1.168 0.4725
##
## Results are averaged over the levels of: Difficulty
## Results are given on the log odds ratio (not the response) scale.
## P value adjustment: tukey method for comparing a family of 3 estimates
emmeans(modelerror2, ~ Time*Feedback | Difficulty) %>%
contrast(interaction = "pairwise")
## Difficulty = -0.5:
## Time_pairwise Feedback_pairwise estimate SE df z.ratio p.value
## (-0.5) - 0.5 No Feedback - Negative 0.575 0.203 Inf 2.831 0.0046
## (-0.5) - 0.5 No Feedback - Positive 0.541 0.203 Inf 2.664 0.0077
## (-0.5) - 0.5 Negative - Positive -0.034 0.209 Inf -0.163 0.8709
##
## Difficulty = 0.5:
## Time_pairwise Feedback_pairwise estimate SE df z.ratio p.value
## (-0.5) - 0.5 No Feedback - Negative 0.442 0.181 Inf 2.447 0.0144
## (-0.5) - 0.5 No Feedback - Positive 0.221 0.177 Inf 1.250 0.2114
## (-0.5) - 0.5 Negative - Positive -0.221 0.179 Inf -1.234 0.2171
##
## Results are given on the log odds ratio (not the response) scale.
Graphs of incorrect lure response rates
# Figure 1 in paper
Exp1_Figure1.1 <- data %>%
group_by(ID,Feedback,Validity,Difficulty, Time) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Feedback,Validity,Difficulty, Time) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
filter(Validity == "Inaccurate") %>%
ggplot(.) + aes(x = Feedback, y = mean_err, fill = reorder(Time, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Feedback") + ylab("Proportion Lure Reported") + labs(fill = "Time") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_err - se_error, ymax= mean_err + se_error), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_err,digits=2)), position=position_dodge(width=.9), vjust=3.4) + scale_x_discrete(labels=c("Bad" = "Negative", "Control" = "No Feedback","Good" = "Positive")) + scale_fill_manual(values=c("#f0f0f0", "#636363"), name = "Answers Pre- or Post-Feedback", labels = c("Pre-Feedback", "Post-Feedback")) + facet_wrap(vars(Difficulty))
## `summarise()` has grouped output by 'ID', 'Feedback', 'Validity', 'Difficulty'.
## You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Feedback', 'Validity', 'Difficulty'. You
## can override using the `.groups` argument.
## Warning: Ignoring unknown parameters: fun.y
Exp1_Figure1.1
## No summary function supplied, defaulting to `mean_se()`
## No summary function supplied, defaulting to `mean_se()`
ggsave(Exp1_Figure1.1, file="INFO-Exp1-Error.jpeg", width=6, height=4)
## No summary function supplied, defaulting to `mean_se()`
## No summary function supplied, defaulting to `mean_se()`
# collapsing across difficulty
Exp1_Figure1.2 <- data %>%
group_by(ID,Feedback,Validity,Time) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Feedback,Validity,Time) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
filter(Validity == "Inaccurate") %>%
ggplot(.) + aes(x = reorder(Feedback, -mean_err), y = mean_err, fill = reorder(Time, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Feedback") + ylab("Proportion Lure Reported") + labs(fill = "Time") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_err - se_error, ymax= mean_err + se_error), position=position_dodge(width=.9), width=.1) + geom_text(aes(label=round(mean_err,digits=2)), position=position_dodge(width=.9), vjust=3.4) + scale_x_discrete(labels=c("Bad" = "Negative", "Control" = "No Feedback","Good" = "Positive")) + scale_fill_manual(values=c("#f0f0f0", "#636363"), name = "Answers Pre- or Post-Feedback", labels = c("Pre-Feedback", "Post-Feedback"))
## `summarise()` has grouped output by 'ID', 'Feedback', 'Validity'. You can
## override using the `.groups` argument.
## `summarise()` has grouped output by 'Feedback', 'Validity'. You can override
## using the `.groups` argument.
## Warning: Ignoring unknown parameters: fun.y
Exp1_Figure1.2
## No summary function supplied, defaulting to `mean_se()`
ggsave(Exp1_Figure1.2, file="INFO-Exp1-Error2.jpeg", width=6, height=4)
## No summary function supplied, defaulting to `mean_se()`
Models to analyze incorrect lure responses
modelcorrect1 <- data %>%
dplyr::mutate(Validity = ifelse(Validity == "Accurate",.5,-.5)) %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",.5,-.5)) %>%
glmer(Correct ~ Difficulty*Validity + (1 + Difficulty + Validity | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
summary(modelcorrect1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Correct ~ Difficulty * Validity + (1 + Difficulty + Validity |
## ID) + (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 15531.7 15617.7 -7754.8 15509.7 18389
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -13.0449 -0.4337 0.1029 0.4192 8.3209
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 1.2507 1.1184
## Difficulty 0.5613 0.7492 0.29
## Validity 1.5172 1.2318 -0.11 -0.19
## Item (Intercept) 0.7005 0.8370
## Number of obs: 18400, groups: ID, 230; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.43263 0.12267 3.527 0.000421 ***
## Difficulty 3.24433 0.20174 16.082 < 2e-16 ***
## Validity 2.48677 0.09903 25.112 < 2e-16 ***
## Difficulty:Validity 0.81688 0.11457 7.130 1e-12 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Dffclt Valdty
## Difficulty 0.066
## Validity -0.010 0.049
## Dffclty:Vld 0.136 0.091 0.189
tab_model(modelcorrect1)
| Correct | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 1.54 | 1.21 – 1.96 | <0.001 |
| Difficulty | 25.64 | 17.27 – 38.08 | <0.001 |
| Validity | 12.02 | 9.90 – 14.60 | <0.001 |
| Difficulty * Validity | 2.26 | 1.81 – 2.83 | <0.001 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 1.25 | ||
| τ00 Item | 0.70 | ||
| τ11 ID.Difficulty | 0.56 | ||
| τ11 ID.Validity | 1.52 | ||
| ρ01 ID.Difficulty | 0.29 | ||
| ρ01 ID.Validity | -0.11 | ||
| ICC | 0.43 | ||
| N ID | 230 | ||
| N Item | 80 | ||
| Observations | 18400 | ||
| Marginal R2 / Conditional R2 | 0.423 / 0.670 | ||
modelcorrect2 <- data %>%
dplyr::filter(Validity == "Inaccurate") %>%
dplyr::mutate(Difficulty = ifelse(Difficulty == "Easy",.5,-.5)) %>%
dplyr::mutate(Time = ifelse(Time == "1",-.5,.5)) %>%
glmer(Correct ~ Difficulty*Feedback*Time + (1 + Difficulty | ID) + (1 | Item), ., family = binomial, control = glmerControl(optimizer = "bobyqa", optCtrl=list(maxfun=2e5)))
summary(modelcorrect2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Correct ~ Difficulty * Feedback * Time + (1 + Difficulty | ID) +
## (1 | Item)
## Data: .
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 2e+05))
##
## AIC BIC logLik deviance df.resid
## 7985.2 8099.2 -3976.6 7953.2 9183
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -7.4214 -0.4180 -0.1710 0.4216 7.1642
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## ID (Intercept) 2.0671 1.4377
## Difficulty 1.2684 1.1262 0.35
## Item (Intercept) 0.9222 0.9603
## Number of obs: 9199, groups: ID, 230; Item, 80
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.9853 0.2085 -4.727 2.28e-06 ***
## Difficulty 2.9033 0.2809 10.334 < 2e-16 ***
## FeedbackNegative 0.2511 0.2451 1.025 0.305483
## FeedbackPositive 0.1946 0.2487 0.782 0.433940
## Time 0.3788 0.1086 3.488 0.000487 ***
## Difficulty:FeedbackNegative 0.1886 0.2418 0.780 0.435293
## Difficulty:FeedbackPositive 0.2667 0.2472 1.079 0.280601
## Difficulty:Time 1.0422 0.2170 4.802 1.57e-06 ***
## FeedbackNegative:Time 0.6048 0.1495 4.044 5.25e-05 ***
## FeedbackPositive:Time 0.3386 0.1525 2.220 0.026392 *
## Difficulty:FeedbackNegative:Time -0.3102 0.2988 -1.038 0.299097
## Difficulty:FeedbackPositive:Time 0.2833 0.3049 0.929 0.352702
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) Dffclt FdbckN FdbckP Time Dff:FN Dff:FP Dffc:T FdbN:T
## Difficulty 0.088
## FeedbckNgtv -0.615 -0.088
## FeedbckPstv -0.603 -0.088 0.514
## Time 0.012 -0.001 -0.007 -0.006
## Dffclty:FdN -0.120 -0.457 0.203 0.101 -0.003
## Dffclty:FdP -0.119 -0.442 0.101 0.201 -0.004 0.516
## Diffclty:Tm -0.003 0.023 0.001 0.001 -0.209 -0.021 -0.021
## FdbckNgtv:T -0.012 0.007 0.000 0.005 -0.728 0.047 0.003 0.158
## FdbckPstv:T -0.008 0.002 0.005 0.010 -0.705 0.003 0.031 0.147 0.513
## Dffclt:FN:T 0.008 -0.023 0.018 -0.001 0.158 0.011 0.016 -0.728 -0.196
## Dffclt:FP:T 0.004 -0.017 -0.001 0.008 0.147 0.015 0.033 -0.704 -0.107
## FdbP:T D:FN:T
## Difficulty
## FeedbckNgtv
## FeedbckPstv
## Time
## Dffclty:FdN
## Dffclty:FdP
## Diffclty:Tm
## FdbckNgtv:T
## FdbckPstv:T
## Dffclt:FN:T -0.107
## Dffclt:FP:T -0.165 0.513
tab_model(modelcorrect2)
| Correct | |||
|---|---|---|---|
| Predictors | Odds Ratios | CI | p |
| (Intercept) | 0.37 | 0.25 – 0.56 | <0.001 |
| Difficulty | 18.23 | 10.51 – 31.62 | <0.001 |
| Feedback [Negative] | 1.29 | 0.80 – 2.08 | 0.305 |
| Feedback [Positive] | 1.21 | 0.75 – 1.98 | 0.434 |
| Time | 1.46 | 1.18 – 1.81 | <0.001 |
|
Difficulty * Feedback [Negative] |
1.21 | 0.75 – 1.94 | 0.435 |
|
Difficulty * Feedback [Positive] |
1.31 | 0.80 – 2.12 | 0.281 |
| Difficulty * Time | 2.84 | 1.85 – 4.34 | <0.001 |
|
Feedback [Negative] * Time |
1.83 | 1.37 – 2.45 | <0.001 |
|
Feedback [Positive] * Time |
1.40 | 1.04 – 1.89 | 0.026 |
|
(Difficulty * Feedback [Negative]) * Time |
0.73 | 0.41 – 1.32 | 0.299 |
|
(Difficulty * Feedback [Positive]) * Time |
1.33 | 0.73 – 2.41 | 0.353 |
| Random Effects | |||
| σ2 | 3.29 | ||
| τ00 ID | 2.07 | ||
| τ00 Item | 0.92 | ||
| τ11 ID.Difficulty | 1.27 | ||
| ρ01 ID | 0.35 | ||
| ICC | 0.50 | ||
| N ID | 230 | ||
| N Item | 80 | ||
| Observations | 9199 | ||
| Marginal R2 / Conditional R2 | 0.280 / 0.641 | ||
# tests of simple effects
emmeans(modelcorrect1, ~ Validity * Difficulty) %>%
pairs(., simple = "Validity", reverse = TRUE)
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 2.08 0.105 Inf 19.870 <.0001
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 2.90 0.123 Inf 23.456 <.0001
##
## Results are given on the log odds ratio (not the response) scale.
emmeans(modelcorrect2, ~ Difficulty*Time) %>%
pairs(., simple = "Time", reverse = TRUE)
## NOTE: Results may be misleading due to involvement in interactions
## Difficulty = -0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 0.177 0.0937 Inf 1.886 0.0593
##
## Difficulty = 0.5:
## contrast estimate SE df z.ratio p.value
## 0.5 - (-0.5) 1.210 0.0803 Inf 15.063 <.0001
##
## Results are averaged over the levels of: Feedback
## Results are given on the log odds ratio (not the response) scale.
emmeans(modelcorrect2, ~ Feedback*Time) %>%
pairs(., simple = "Feedback", reverse = TRUE)
## NOTE: Results may be misleading due to involvement in interactions
## Time = -0.5:
## contrast estimate SE df z.ratio p.value
## Negative - No Feedback -0.0513 0.256 Inf -0.200 0.9782
## Positive - No Feedback 0.0253 0.259 Inf 0.097 0.9948
## Positive - Negative 0.0765 0.255 Inf 0.301 0.9514
##
## Time = 0.5:
## contrast estimate SE df z.ratio p.value
## Negative - No Feedback 0.5535 0.256 Inf 2.160 0.0781
## Positive - No Feedback 0.3639 0.261 Inf 1.395 0.3434
## Positive - Negative -0.1896 0.255 Inf -0.745 0.7369
##
## Results are averaged over the levels of: Difficulty
## Results are given on the log odds ratio (not the response) scale.
## P value adjustment: tukey method for comparing a family of 3 estimates
emmeans(modelcorrect2, ~ Time*Feedback | Difficulty) %>%
contrast(interaction = "pairwise")
## Difficulty = -0.5:
## Time_pairwise Feedback_pairwise estimate SE df z.ratio p.value
## (-0.5) - 0.5 No Feedback - Negative 0.7599 0.231 Inf 3.287 0.0010
## (-0.5) - 0.5 No Feedback - Positive 0.1969 0.233 Inf 0.846 0.3974
## (-0.5) - 0.5 Negative - Positive -0.5629 0.226 Inf -2.491 0.0127
##
## Difficulty = 0.5:
## Time_pairwise Feedback_pairwise estimate SE df z.ratio p.value
## (-0.5) - 0.5 No Feedback - Negative 0.4497 0.189 Inf 2.373 0.0176
## (-0.5) - 0.5 No Feedback - Positive 0.4803 0.197 Inf 2.437 0.0148
## (-0.5) - 0.5 Negative - Positive 0.0306 0.194 Inf 0.158 0.8746
##
## Results are given on the log odds ratio (not the response) scale.
Graphs of correct answer response rates
# Figure 2 in paper
Exp1_Figure2 <- data %>%
group_by(ID,Feedback,Validity,Difficulty,Time) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Feedback,Validity,Difficulty,Time) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
filter(Validity == "Inaccurate") %>%
ggplot(.) + aes(x = Feedback, y = mean_corr, fill = reorder(Time, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Feedback") + ylab("Proportion Correct Answer Reported") + labs(fill = "Time") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_corr - se_corr, ymax= mean_corr + se_corr), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_corr,digits=2)), position=position_dodge(width=.9), vjust=2.2) + scale_x_discrete(labels=c("Bad" = "Negative", "Control" = "No Feedback","Good" = "Positive")) + scale_fill_manual(values=c("#f0f0f0", "#636363"), name = "Answers Pre- or Post-Feedback", labels = c("Pre-Feedback", "Post-Feedback")) + facet_wrap(vars(Difficulty))
## `summarise()` has grouped output by 'ID', 'Feedback', 'Validity', 'Difficulty'.
## You can override using the `.groups` argument.
## `summarise()` has grouped output by 'Feedback', 'Validity', 'Difficulty'. You
## can override using the `.groups` argument.
## Warning: Ignoring unknown parameters: fun.y
Exp1_Figure2
## No summary function supplied, defaulting to `mean_se()`
## No summary function supplied, defaulting to `mean_se()`
ggsave(Exp1_Figure2, file="INFO-Exp1-Correct.jpeg", width=6, height=4)
## No summary function supplied, defaulting to `mean_se()`
## No summary function supplied, defaulting to `mean_se()`
# collapsing across difficulty
Exp1_Figure2.2 <- data %>%
group_by(ID,Feedback,Validity,Time) %>%
summarise(
mean_error = mean(Error,na.rm=TRUE),
mean_correct = mean(Correct, na.rm=TRUE),
) %>%
group_by(Feedback,Validity, Time) %>%
summarise(
mean_err = mean(mean_error),
sd_err = sd(mean_error),
se_error = sd(mean_error)/sqrt(n()),
mean_corr = mean(mean_correct),
sd_corr = sd(mean_correct),
se_corr = sd(mean_correct)/sqrt(n())
) %>%
filter(Validity == "Inaccurate") %>%
ggplot(.) + aes(x = reorder(Feedback, -mean_err), y = mean_corr, fill = reorder(Time, -mean_err)) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Feedback") + ylab("Correct Answers Reported") + labs(fill = "Time") +
theme_classic() + theme(legend.position="bottom") +
geom_errorbar(aes(ymin= mean_corr - se_corr, ymax= mean_corr + se_corr), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean_corr,digits=2)), position=position_dodge(width=.9), vjust=3.2) + scale_x_discrete(labels=c("Bad" = "Negative", "Control" = "No Feedback","Good" = "Positive")) + scale_fill_manual(values=c("#f0f0f0", "#636363"), name = "Answers Pre- or Post-Feedback", labels = c("Pre-Feedback", "Post-Feedback"))
## `summarise()` has grouped output by 'ID', 'Feedback', 'Validity'. You can
## override using the `.groups` argument.
## `summarise()` has grouped output by 'Feedback', 'Validity'. You can override
## using the `.groups` argument.
## Warning: Ignoring unknown parameters: fun.y
Exp1_Figure2.2
## No summary function supplied, defaulting to `mean_se()`
ggsave(Exp1_Figure2.2, file="INFO-Exp1-Correct2.jpeg", width=6, height=4)
## No summary function supplied, defaulting to `mean_se()`