Download a copy of the RLDM poster here:

https://bit.ly/wvillano_rldm22


Models and results

Learning from real-world prediction errors

Outcome: expectations become more accurate

df.retest$accuracy <- 100 - df.retest$unsigned_pe_1

mdlA <- lmer(accuracy ~ exam + (1 | cohort_class / pt_id), 
             data = df.retest)
kable(coef(summary(mdlA)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) 87.115872 0.5542695 3.962007 157.172417 0
exam 1.064096 0.1255585 1962.788681 8.474899 0
ggplot(data = df.retest[which(df.retest$exam < 5),], 
       aes(x = exam_f, y = accuracy)) + 
  geom_violin(size = 2, alpha = 0.35, fill = "skyblue3", color = NA) +
  geom_boxplot(width = 0.1, fill = "skyblue2", color = "skyblue4", size = 1, alpha = 0.5) +
  xlab("Exam") + 
  ylab("Accuracy") + 
  ggtitle(NULL) + 
  geom_ribbon(data= as.data.frame(get_model_data(mdlA, type = "pred"))[which(as.data.frame(get_model_data(mdlA, type = "pred"))$exam.x < 5),], inherit.aes = F, aes(x = exam.x, y = exam.predicted, ymin = exam.conf.low, ymax = exam.conf.high), fill = "skyblue4", alpha = 0.5) + 
  geom_line(data= as.data.frame(get_model_data(mdlA, type = "pred"))[which(as.data.frame(get_model_data(mdlA, type = "pred"))$exam.x < 5),], inherit.aes = F, aes(x = exam.x, y = exam.predicted), color = "skyblue4", size = 1)

Process: prediction errors drive updates to expectations

mdlA <- lmer(next_update_1 ~ pe_1 + delta_grade + (1 | cohort_class / pt_id), 
             data = df.retest)
kable(coef(summary(mdlA)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) -0.3281408 0.3519797 1.210108 -0.9322719 0.499515
pe_1 0.7099652 0.0233570 1228.633727 30.3962810 0.000000
delta_grade 0.7295535 0.0176037 836.005548 41.4431114 0.000000
plot_model(mdlA, type = "pred", terms = "pe_1")

Valence asymmetry

Positive PEs drive larger updates to expectations than negative PEs

md <- lmer(next_update_1 ~ unsigned_pe_1_flip*pe_1_sign_f_NAzero + delta_grade + (1  | cohort_class / pt_id), data = df.retest)
kable(coef(summary(md)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) -1.6268363 0.7855602 9.09896 -2.0709251 0.0679262
unsigned_pe_1_flip 0.5954569 0.0505578 1494.94575 11.7777358 0.0000000
pe_1_sign_f_NAzero1 -0.4231694 0.9223949 1488.62315 -0.4587725 0.6464645
delta_grade 0.7260725 0.0178186 920.04470 40.7479214 0.0000000
unsigned_pe_1_flip:pe_1_sign_f_NAzero1 0.2933645 0.0719261 1489.93036 4.0786948 0.0000477
# updating rates larger for positive PEs
plot_model(md, type = "pred", terms = c("unsigned_pe_1_flip","pe_1_sign_f_NAzero"))

Updates following positive PEs

md_pos <- lmer(next_update_1 ~ unsigned_pe_1 + delta_grade + (1 | cohort_class / pt_id), data = df.retest[which(df.retest$pe_1_sign_f_NAzero == "1"),])
kable(coef(summary(md_pos)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) -1.9985969 0.6557891 562.6910 -3.047621 0.0024149
unsigned_pe_1 0.9113961 0.0516356 768.9985 17.650530 0.0000000
delta_grade 0.8257130 0.0252274 692.5488 32.730754 0.0000000

Positive updating rate: 0.91

Updates following negative PEs

md_neg <- lmer(next_update_1 ~ unsigned_pe_1 + delta_grade + (1 | cohort_class / pt_id), data = df.retest[which(df.retest$pe_1_sign_f_NAzero == "-1"),])
kable(coef(summary(md_neg)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) -1.7513690 0.9217399 3.916158 -1.900068 0.1317536
unsigned_pe_1 -0.5273256 0.0521641 726.404995 -10.108980 0.0000000
delta_grade 0.6361892 0.0247787 601.680753 25.674792 0.0000000

Negative updating rate: 0.52

Negative emotionality (NE) is linked to altered PE-driven learning

Grades do not vary as a function of NE

kable(coef(summary(lmer(grade ~ NE + (1 | cohort_class / pt_id),
             data = df.retest))))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) 73.7455278 1.7452368 15.07561 42.255314 0.0000000
NE -0.1002286 0.0888389 812.90837 -1.128206 0.2595656

but nonetheless, elevated NE is associated with more pessimistic expectations

kable(coef(summary(lmer(pred_1 ~ NE + (1 | cohort_class / pt_id),
             data = df.retest))))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) 77.0891865 2.2012985 4.177816 35.019869 0.0000026
NE -0.2370911 0.0783997 779.978333 -3.024132 0.0025752

Critically, elevated NE is also linked to less accurate expectations, possibly suggesting less effective PE learning

kable(coef(summary(lmer(unsigned_pe_1 ~ NE + exam + unsigned_lag_pe_1 +(1 | cohort_class / pt_id),
             data = df.retest))))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) 8.7324784 1.0503624 19.27931 8.313776 0.0000001
NE 0.1046017 0.0353553 286.16455 2.958588 0.0033487
exam -0.6869939 0.2052983 940.65575 -3.346320 0.0008513
unsigned_lag_pe_1 0.0855260 0.0243932 1503.29177 3.506138 0.0004680

Elevated NE predicts hyperreactive expectation updating

md <- lmer(next_update_1 ~ pe_1*NE + delta_grade + (1 + NE |  cohort_class / pt_id), data = df.retest)

kable(coef(summary(md)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) 1.5895573 1.1848394 2.138622 1.341580 0.3044158
pe_1 0.5723749 0.0648463 1259.126214 8.826633 0.0000000
NE -0.1280465 0.0688022 2.390498 -1.861083 0.1826401
delta_grade 0.7352116 0.0175184 1157.244291 41.968009 0.0000000
pe_1:NE 0.0098325 0.0037935 1142.636180 2.591897 0.0096666
plot_model(md, type = "pred", terms = c("pe_1", "NE")) +
  scale_color_viridis_d(end = 0.75) + 
  scale_fill_viridis_d(end = 0.75) + 
  ggtitle(NULL) + 
  xlab("PE") + 
  ylab("Next Update")

Hyperreactivity to PEs leads to overcorrection when PEs are small

Expectation accuracy at the next exam is plotted as a function of PE type - that is, whether a PE was positive vs. negative in valence, or small vs. large in magnitude. Results suggest that for individuals with elevated NE, the ability to accurately predict future grades was impaired specifically after small PEs, suggesting a pattern of overlearning from small PEs.

df.retest$`Accuracy (next exam)` <- 100 - df.retest$next_uPE_1

md <- lmer(`Accuracy (next exam)` ~ pe_1_sign*NE + unsigned_pe_1*NE + exam + (1 + NE |  cohort_class / pt_id), data = df.retest)
kable(coef(summary(md)))
Estimate Std. Error df t value Pr(>|t|)
(Intercept) 93.9022955 1.2869527 7.05660 72.9648358 0.0000000
pe_1_sign 0.1713014 0.5730834 1442.22422 0.2989118 0.7650504
NE -0.2329530 0.0595015 46.66814 -3.9150753 0.0002928
unsigned_pe_1 -0.2408288 0.0721219 1430.21136 -3.3391905 0.0008618
exam 0.6425776 0.2048677 1161.50755 3.1365490 0.0017524
pe_1_sign:NE 0.0258244 0.0349658 1326.45427 0.7385615 0.4603040
NE:unsigned_pe_1 0.0105356 0.0042515 1299.27378 2.4780857 0.0133352
md <- lmer(`Accuracy (next exam)` ~ PE1_size*NE + PE1_sign*NE + exam + (1 + NE |  cohort_class / pt_id), data = df.retest)

plot_model(md, type = "pred", terms = c("PE1_size", "NE")) + 
  ggtitle(NULL)

plot_model(md, type = "pred", terms = c("PE1_sign", "NE")) + 
  ggtitle(NULL)