## Load execution‑phase data
setwd("~/Desktop/Motor Sequence Learning Fractal Scaling")
df <- read_excel("hurst_df.xlsx")
df$Block <- factor(df$Block, ordered = FALSE, levels = c('1','2','3','4','5','6','7','8','9','10'))
df$Trial <- as.numeric(df$Trial)
df$Subject <- factor(df$Subject)
df$trial_acc <- factor(df$trial_acc)
df_acc <- df %>%
dplyr::filter(trial_acc == "1")
str(df)
## tibble [10,560 × 12] (S3: tbl_df/tbl/data.frame)
## $ Subject : Factor w/ 22 levels "11","12","13",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ Block : Factor w/ 10 levels "1","2","3","4",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ Trial : num [1:10560] 1 2 3 4 5 6 7 8 9 10 ...
## $ Sequence : chr [1:10560] "1" "2" "1" "1" ...
## $ trial_mean_rt : num [1:10560] 1466 930 981 1210 1494 ...
## $ trial_total_rt: num [1:10560] 8796 5578 5884 7263 8965 ...
## $ trial_acc : Factor w/ 2 levels "0","1": 1 2 1 1 1 2 2 2 2 2 ...
## $ Logtrialmeanrt: num [1:10560] 7.29 6.83 6.89 7.1 7.31 ...
## $ Logtrialsumrt : num [1:10560] 9.08 8.63 8.68 8.89 9.1 ...
## $ Phase : chr [1:10560] "Learning" "Learning" "Learning" "Learning" ...
## $ H_prep_vm : num [1:10560] 0.817 0.995 0.987 0.84 0.797 ...
## $ H_exec_vm : num [1:10560] 0.994 0.993 0.991 0.996 0.993 ...
df$Block_Num <- as.numeric(as.character(df$Block))
# Subset for Learning and Test Phases
df_learning <- df %>% filter(Phase == "Learning")
df_test <- df %>% filter(Phase == "Test")
# Subset for Learning and Test Phases with only accurate Trials
df_learning_acc <- subset(df_acc, Block %in% c("1","2","3","4","5","6","7","8"))
df_test_acc <- subset(df_acc, Block %in% c("9", "10"))
df_learning_acc$Block <- factor(df_learning_acc$Block, levels = c('1','2','3','4','5','6','7','8'))
df_test_acc$Block <- factor(df_test_acc$Block, levels = c('9', '10'))
df_learning_acc$Subject <- factor(df_learning_acc$Subject)
df_test_acc$Subject <- factor(df_test_acc$Subject)
str(df_learning_acc)
## tibble [7,606 × 12] (S3: tbl_df/tbl/data.frame)
## $ Subject : Factor w/ 22 levels "11","12","13",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ Block : Factor w/ 8 levels "1","2","3","4",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ Trial : num [1:7606] 2 6 7 8 9 10 12 13 14 15 ...
## $ Sequence : chr [1:7606] "2" "2" "2" "1" ...
## $ trial_mean_rt : num [1:7606] 930 1237 853 1031 837 ...
## $ trial_total_rt: num [1:7606] 5578 7420 5117 6188 5021 ...
## $ trial_acc : Factor w/ 2 levels "0","1": 2 2 2 2 2 2 2 2 2 2 ...
## $ Logtrialmeanrt: num [1:7606] 6.83 7.12 6.75 6.94 6.73 ...
## $ Logtrialsumrt : num [1:7606] 8.63 8.91 8.54 8.73 8.52 ...
## $ Phase : chr [1:7606] "Learning" "Learning" "Learning" "Learning" ...
## $ H_prep_vm : num [1:7606] 0.995 0.846 0.985 0.991 0.971 ...
## $ H_exec_vm : num [1:7606] 0.993 0.995 0.995 0.993 0.991 ...
str(df_test_acc)
## tibble [1,833 × 12] (S3: tbl_df/tbl/data.frame)
## $ Subject : Factor w/ 22 levels "11","12","13",..: 1 1 1 1 1 1 1 1 1 1 ...
## $ Block : Factor w/ 2 levels "9","10": 1 1 1 1 1 1 1 1 1 1 ...
## $ Trial : num [1:1833] 2 3 4 5 6 9 10 11 12 13 ...
## $ Sequence : chr [1:1833] "2" "1" "2" "2" ...
## $ trial_mean_rt : num [1:1833] 282 217 240 192 243 ...
## $ trial_total_rt: num [1:1833] 1689 1303 1437 1151 1458 ...
## $ trial_acc : Factor w/ 2 levels "0","1": 2 2 2 2 2 2 2 2 2 2 ...
## $ Logtrialmeanrt: num [1:1833] 5.64 5.38 5.48 5.26 5.49 ...
## $ Logtrialsumrt : num [1:1833] 7.43 7.17 7.27 7.05 7.28 ...
## $ Phase : chr [1:1833] "Test" "Test" "Test" "Test" ...
## $ H_prep_vm : num [1:1833] 0.979 0.994 0.715 0.956 0.993 ...
## $ H_exec_vm : num [1:1833] 0.972 0.904 0.975 0.969 0.981 ...
## ===============================================================
## 1) Learning phase model (Blocks 1–8) - Accuracy as predictor
## ===============================================================
mod_rtacc_learn <- glmmTMB(
Logtrialmeanrt ~ Block + trial_acc + (1 | Subject),
family = gaussian(),
data = df_learning
)
summary(mod_rtacc_learn)
## Family: gaussian ( identity )
## Formula: Logtrialmeanrt ~ Block + trial_acc + (1 | Subject)
## Data: df_learning
##
## AIC BIC logLik -2*log(L) df.resid
## 3871.3 3948.8 -1924.6 3849.3 8437
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.11881 0.3447
## Residual 0.09086 0.3014
## Number of obs: 8448, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.0909
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 6.96814 0.07459 93.42 <2e-16 ***
## Block2 -0.35766 0.01321 -27.08 <2e-16 ***
## Block3 -0.42580 0.01322 -32.21 <2e-16 ***
## Block4 -0.49019 0.01321 -37.12 <2e-16 ***
## Block5 -0.53660 0.01320 -40.65 <2e-16 ***
## Block6 -0.55208 0.01320 -41.83 <2e-16 ***
## Block7 -0.60478 0.01319 -45.84 <2e-16 ***
## Block8 -0.64505 0.01321 -48.85 <2e-16 ***
## trial_acc1 -0.58619 0.01123 -52.18 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova(mod_rtacc_learn)
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: Logtrialmeanrt
## Chisq Df Pr(>Chisq)
## Block 3350.2 7 < 2.2e-16 ***
## trial_acc 2722.7 1 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2(mod_rtacc_learn)
## # R2 for Mixed Models
##
## Conditional R2: 0.682
## Marginal R2: 0.266
# Predicted Log RT per Block, separately for each accuracy level
emm_rt_block_by_acc_learn <- emmeans(mod_rtacc_learn, ~ Block | trial_acc)
emm_rt_block_by_acc_learn
## trial_acc = 0:
## Block emmean SE df asymp.LCL asymp.UCL
## 1 6.97 0.0746 Inf 6.82 7.11
## 2 6.61 0.0748 Inf 6.46 6.76
## 3 6.54 0.0748 Inf 6.40 6.69
## 4 6.48 0.0748 Inf 6.33 6.62
## 5 6.43 0.0748 Inf 6.28 6.58
## 6 6.42 0.0748 Inf 6.27 6.56
## 7 6.36 0.0748 Inf 6.22 6.51
## 8 6.32 0.0748 Inf 6.18 6.47
##
## trial_acc = 1:
## Block emmean SE df asymp.LCL asymp.UCL
## 1 6.38 0.0741 Inf 6.24 6.53
## 2 6.02 0.0741 Inf 5.88 6.17
## 3 5.96 0.0741 Inf 5.81 6.10
## 4 5.89 0.0741 Inf 5.75 6.04
## 5 5.85 0.0741 Inf 5.70 5.99
## 6 5.83 0.0741 Inf 5.68 5.98
## 7 5.78 0.0741 Inf 5.63 5.92
## 8 5.74 0.0741 Inf 5.59 5.88
##
## Confidence level used: 0.95
# Pairwise block comparisons within each accuracy level
pairs(emm_rt_block_by_acc_learn)
## trial_acc = 0:
## contrast estimate SE df z.ratio p.value
## Block1 - Block2 0.3577 0.0132 Inf 27.083 <0.0001
## Block1 - Block3 0.4258 0.0132 Inf 32.209 <0.0001
## Block1 - Block4 0.4902 0.0132 Inf 37.119 <0.0001
## Block1 - Block5 0.5366 0.0132 Inf 40.648 <0.0001
## Block1 - Block6 0.5521 0.0132 Inf 41.832 <0.0001
## Block1 - Block7 0.6048 0.0132 Inf 45.841 <0.0001
## Block1 - Block8 0.6451 0.0132 Inf 48.845 <0.0001
## Block2 - Block3 0.0681 0.0131 Inf 5.194 <0.0001
## Block2 - Block4 0.1325 0.0131 Inf 10.103 <0.0001
## Block2 - Block5 0.1789 0.0131 Inf 13.641 <0.0001
## Block2 - Block6 0.1944 0.0131 Inf 14.820 <0.0001
## Block2 - Block7 0.2471 0.0131 Inf 18.837 <0.0001
## Block2 - Block8 0.2874 0.0131 Inf 21.908 <0.0001
## Block3 - Block4 0.0644 0.0131 Inf 4.909 <0.0001
## Block3 - Block5 0.1108 0.0131 Inf 8.446 <0.0001
## Block3 - Block6 0.1263 0.0131 Inf 9.625 <0.0001
## Block3 - Block7 0.1790 0.0131 Inf 13.642 <0.0001
## Block3 - Block8 0.2193 0.0131 Inf 16.713 <0.0001
## Block4 - Block5 0.0464 0.0131 Inf 3.538 0.0096
## Block4 - Block6 0.0619 0.0131 Inf 4.717 <0.0001
## Block4 - Block7 0.1146 0.0131 Inf 8.735 <0.0001
## Block4 - Block8 0.1549 0.0131 Inf 11.805 <0.0001
## Block5 - Block6 0.0155 0.0131 Inf 1.180 0.9378
## Block5 - Block7 0.0682 0.0131 Inf 5.198 <0.0001
## Block5 - Block8 0.1085 0.0131 Inf 8.267 <0.0001
## Block6 - Block7 0.0527 0.0131 Inf 4.018 0.0015
## Block6 - Block8 0.0930 0.0131 Inf 7.087 <0.0001
## Block7 - Block8 0.0403 0.0131 Inf 3.070 0.0446
##
## trial_acc = 1:
## contrast estimate SE df z.ratio p.value
## Block1 - Block2 0.3577 0.0132 Inf 27.083 <0.0001
## Block1 - Block3 0.4258 0.0132 Inf 32.209 <0.0001
## Block1 - Block4 0.4902 0.0132 Inf 37.119 <0.0001
## Block1 - Block5 0.5366 0.0132 Inf 40.648 <0.0001
## Block1 - Block6 0.5521 0.0132 Inf 41.832 <0.0001
## Block1 - Block7 0.6048 0.0132 Inf 45.841 <0.0001
## Block1 - Block8 0.6451 0.0132 Inf 48.845 <0.0001
## Block2 - Block3 0.0681 0.0131 Inf 5.194 <0.0001
## Block2 - Block4 0.1325 0.0131 Inf 10.103 <0.0001
## Block2 - Block5 0.1789 0.0131 Inf 13.641 <0.0001
## Block2 - Block6 0.1944 0.0131 Inf 14.820 <0.0001
## Block2 - Block7 0.2471 0.0131 Inf 18.837 <0.0001
## Block2 - Block8 0.2874 0.0131 Inf 21.908 <0.0001
## Block3 - Block4 0.0644 0.0131 Inf 4.909 <0.0001
## Block3 - Block5 0.1108 0.0131 Inf 8.446 <0.0001
## Block3 - Block6 0.1263 0.0131 Inf 9.625 <0.0001
## Block3 - Block7 0.1790 0.0131 Inf 13.642 <0.0001
## Block3 - Block8 0.2193 0.0131 Inf 16.713 <0.0001
## Block4 - Block5 0.0464 0.0131 Inf 3.538 0.0096
## Block4 - Block6 0.0619 0.0131 Inf 4.717 <0.0001
## Block4 - Block7 0.1146 0.0131 Inf 8.735 <0.0001
## Block4 - Block8 0.1549 0.0131 Inf 11.805 <0.0001
## Block5 - Block6 0.0155 0.0131 Inf 1.180 0.9378
## Block5 - Block7 0.0682 0.0131 Inf 5.198 <0.0001
## Block5 - Block8 0.1085 0.0131 Inf 8.267 <0.0001
## Block6 - Block7 0.0527 0.0131 Inf 4.018 0.0015
## Block6 - Block8 0.0930 0.0131 Inf 7.087 <0.0001
## Block7 - Block8 0.0403 0.0131 Inf 3.070 0.0446
##
## P value adjustment: tukey method for comparing a family of 8 estimates
# accuracy differences *within each block*
emm_acc_by_block_learn <- emmeans(mod_rtacc_learn, ~ trial_acc | Block)
emm_acc_by_block_learn
## Block = 1:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.97 0.0746 Inf 6.82 7.11
## 1 6.38 0.0741 Inf 6.24 6.53
##
## Block = 2:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.61 0.0748 Inf 6.46 6.76
## 1 6.02 0.0741 Inf 5.88 6.17
##
## Block = 3:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.54 0.0748 Inf 6.40 6.69
## 1 5.96 0.0741 Inf 5.81 6.10
##
## Block = 4:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.48 0.0748 Inf 6.33 6.62
## 1 5.89 0.0741 Inf 5.75 6.04
##
## Block = 5:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.43 0.0748 Inf 6.28 6.58
## 1 5.85 0.0741 Inf 5.70 5.99
##
## Block = 6:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.42 0.0748 Inf 6.27 6.56
## 1 5.83 0.0741 Inf 5.68 5.98
##
## Block = 7:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.36 0.0748 Inf 6.22 6.51
## 1 5.78 0.0741 Inf 5.63 5.92
##
## Block = 8:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.32 0.0748 Inf 6.18 6.47
## 1 5.74 0.0741 Inf 5.59 5.88
##
## Confidence level used: 0.95
pairs(emm_acc_by_block_learn)
## Block = 1:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 2:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 3:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 4:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 5:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 6:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 7:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
##
## Block = 8:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.586 0.0112 Inf 52.180 <0.0001
## ===============================================================
## 2) Test phase model (Blocks 9–10) - Accuracy as predictor
## ===============================================================
mod_rtacc_test <- glmmTMB(
Logtrialmeanrt ~ Block + trial_acc + (1 | Subject),
family = gaussian(),
data = df_test
)
summary(mod_rtacc_test)
## Family: gaussian ( identity )
## Formula: Logtrialmeanrt ~ Block + trial_acc + (1 | Subject)
## Data: df_test
##
## AIC BIC logLik -2*log(L) df.resid
## 904.9 933.2 -447.5 894.9 2107
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.1465 0.3828
## Residual 0.0848 0.2912
## Number of obs: 2112, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.0848
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 6.21228 0.08386 74.08 <2e-16 ***
## Block10 0.17217 0.01271 13.55 <2e-16 ***
## trial_acc1 -0.50705 0.01915 -26.48 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova(mod_rtacc_test)
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: Logtrialmeanrt
## Chisq Df Pr(>Chisq)
## Block 183.59 1 < 2.2e-16 ***
## trial_acc 700.99 1 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2(mod_rtacc_test)
## # R2 for Mixed Models
##
## Conditional R2: 0.686
## Marginal R2: 0.144
# Predicted Log RT per Block, separately for each accuracy level
emm_rt_block_by_acc_test <- emmeans(mod_rtacc_test, ~ Block | trial_acc)
emm_rt_block_by_acc_test
## trial_acc = 0:
## Block emmean SE df asymp.LCL asymp.UCL
## 9 6.21 0.0839 Inf 6.05 6.38
## 10 6.38 0.0837 Inf 6.22 6.55
##
## trial_acc = 1:
## Block emmean SE df asymp.LCL asymp.UCL
## 9 5.71 0.0821 Inf 5.54 5.87
## 10 5.88 0.0822 Inf 5.72 6.04
##
## Confidence level used: 0.95
# Pairwise block comparisons within each accuracy level
pairs(emm_rt_block_by_acc_test)
## trial_acc = 0:
## contrast estimate SE df z.ratio p.value
## Block9 - Block10 -0.172 0.0127 Inf -13.549 <0.0001
##
## trial_acc = 1:
## contrast estimate SE df z.ratio p.value
## Block9 - Block10 -0.172 0.0127 Inf -13.549 <0.0001
# accuracy differences *within each block*
emm_acc_by_block_test <- emmeans(mod_rtacc_test, ~ trial_acc | Block)
emm_acc_by_block_test
## Block = 9:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.21 0.0839 Inf 6.05 6.38
## 1 5.71 0.0821 Inf 5.54 5.87
##
## Block = 10:
## trial_acc emmean SE df asymp.LCL asymp.UCL
## 0 6.38 0.0837 Inf 6.22 6.55
## 1 5.88 0.0822 Inf 5.72 6.04
##
## Confidence level used: 0.95
pairs(emm_acc_by_block_test)
## Block = 9:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.507 0.0192 Inf 26.476 <0.0001
##
## Block = 10:
## contrast estimate SE df z.ratio p.value
## trial_acc0 - trial_acc1 0.507 0.0192 Inf 26.476 <0.0001
# make accuracy a factor with readable labels
df <- df %>%
mutate(
trial_acc_fac = factor(
trial_acc,
levels = c(0, 1),
labels = c("Inaccurate", "Accurate")
)
)
p_rt <- ggplot(
df,
aes(
x = Block,
y = Logtrialmeanrt,
colour = trial_acc_fac,
fill = trial_acc_fac
)
) +
geom_boxplot(
position = position_dodge(width = 0.8),
alpha = 0.35,
outlier.shape = NA
) +
geom_jitter(
position = position_jitterdodge(
jitter.width = 0.15,
dodge.width = 0.8
),
alpha = 0.4,
size = 1
) +
geom_vline(
xintercept = 8.5,
linetype = "solid",
colour = "grey30",
linewidth = 1
) +
scale_colour_manual(
name = "Accuracy",
values = c("Inaccurate" = "#3C4F8CFF",
"Accurate" = "#67CC5CFF")
) +
scale_fill_manual(
name = "Accuracy",
values = c("Inaccurate" = "#3C4F8CFF",
"Accurate" = "#67CC5CFF")
) +
labs(
x = "Block",
y = "Log RT",
title = "Trial-wise Log RT across blocks"
) +
common_theme
p_rt

# Count accurate (1) vs inaccurate (0) trials
df_trials <- df %>%
distinct(Subject, Block, Trial, trial_acc)
df_trials %>%
count(trial_acc)
## # A tibble: 2 × 2
## trial_acc n
## <fct> <int>
## 1 0 1121
## 2 1 9439
##
## =============================================================
## LEARNING PHASE – H_exec_vm ~ Block
## Model summary
## =============================================================
## Family: gaussian ( identity )
## Formula: H_exec_vm ~ Block + (1 | Subject)
## Data: df_learning_acc
##
## AIC BIC logLik -2*log(L) df.resid
## -42783.4 -42714.0 21401.7 -42803.4 7596
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.0002558 0.01599
## Residual 0.0002070 0.01439
## Number of obs: 7606, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.000207
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.9913411 0.0034466 287.63 < 2e-16 ***
## Block2 -0.0029996 0.0006814 -4.40 1.07e-05 ***
## Block3 -0.0057795 0.0006798 -8.50 < 2e-16 ***
## Block4 -0.0061641 0.0006814 -9.05 < 2e-16 ***
## Block5 -0.0058289 0.0006820 -8.55 < 2e-16 ***
## Block6 -0.0068351 0.0006826 -10.01 < 2e-16 ***
## Block7 -0.0076956 0.0006834 -11.26 < 2e-16 ***
## Block8 -0.0098482 0.0006814 -14.45 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## LEARNING PHASE – H_exec_vm ~ Block
## Type II/III ANOVA
## -------------------------------------------------------------
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: H_exec_vm
## Chisq Df Pr(>Chisq)
## Block 269.76 7 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## # R2 for Mixed Models
##
## Conditional R2: 0.560
## Marginal R2: 0.016
##
## =============================================================
## LEARNING PHASE – H_prep_vm ~ Block
## Model summary
## =============================================================
## Family: gaussian ( identity )
## Formula: H_prep_vm ~ Block + (1 | Subject)
## Data: df_learning_acc
##
## AIC BIC logLik -2*log(L) df.resid
## -9571.3 -9501.9 4795.6 -9591.3 7596
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.00551 0.07423
## Residual 0.01636 0.12792
## Number of obs: 7606, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.0164
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.824281 0.016441 50.14 < 2e-16 ***
## Block2 0.001372 0.006059 0.23 0.820891
## Block3 0.008767 0.006045 1.45 0.146958
## Block4 0.019094 0.006059 3.15 0.001625 **
## Block5 0.012003 0.006065 1.98 0.047790 *
## Block6 0.021472 0.006070 3.54 0.000404 ***
## Block7 0.036395 0.006077 5.99 2.11e-09 ***
## Block8 0.037566 0.006059 6.20 5.64e-10 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## LEARNING PHASE – H_prep_vm ~ Block
## Type II/III ANOVA
## -------------------------------------------------------------
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: H_prep_vm
## Chisq Df Pr(>Chisq)
## Block 83.051 7 3.283e-15 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## # R2 for Mixed Models
##
## Conditional R2: 0.258
## Marginal R2: 0.008
##
## =============================================================
## TEST PHASE – H_exec_vm ~ Block
## Model summary
## =============================================================
## Family: gaussian ( identity )
## Formula: H_exec_vm ~ Block + (1 | Subject)
## Data: df_test_acc
##
## AIC BIC logLik -2*log(L) df.resid
## -9957.1 -9935.0 4982.5 -9965.1 1829
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.0003976 0.01994
## Residual 0.0002403 0.01550
## Number of obs: 1833, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.00024
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.9812985 0.0042810 229.22 <2e-16 ***
## Block10 0.0008839 0.0007261 1.22 0.223
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## TEST PHASE – H_exec_vm ~ Block
## Type II/III ANOVA
## -------------------------------------------------------------
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: H_exec_vm
## Chisq Df Pr(>Chisq)
## Block 1.4821 1 0.2235
## # R2 for Mixed Models
##
## Conditional R2: 0.623
## Marginal R2: 0.000
##
## =============================================================
## TEST PHASE – H_prep_vm ~ Block
## Model summary
## =============================================================
## Family: gaussian ( identity )
## Formula: H_prep_vm ~ Block + (1 | Subject)
## Data: df_test_acc
##
## AIC BIC logLik -2*log(L) df.resid
## -2503.9 -2481.8 1255.9 -2511.9 1829
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.007384 0.08593
## Residual 0.014212 0.11921
## Number of obs: 1833, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.0142
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.848429 0.018728 45.3 <2e-16 ***
## Block10 0.006705 0.005583 1.2 0.23
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## TEST PHASE – H_prep_vm ~ Block
## Type II/III ANOVA
## -------------------------------------------------------------
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: H_prep_vm
## Chisq Df Pr(>Chisq)
## Block 1.4421 1 0.2298
## # R2 for Mixed Models
##
## Conditional R2: 0.342
## Marginal R2: 0.001
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

## contrast estimate SE df asymp.LCL asymp.UCL z.ratio p.value
## Block1 - Block2 3.00e-03 0.000681 Inf 9.34e-04 0.00506 4.402 0.0003
## Block1 - Block3 5.78e-03 0.000680 Inf 3.72e-03 0.00784 8.502 <0.0001
## Block1 - Block4 6.16e-03 0.000681 Inf 4.10e-03 0.00823 9.046 <0.0001
## Block1 - Block5 5.83e-03 0.000682 Inf 3.76e-03 0.00790 8.546 <0.0001
## Block1 - Block6 6.84e-03 0.000683 Inf 4.77e-03 0.00890 10.013 <0.0001
## Block1 - Block7 7.70e-03 0.000683 Inf 5.62e-03 0.00977 11.261 <0.0001
## Block1 - Block8 9.85e-03 0.000681 Inf 7.78e-03 0.01191 14.453 <0.0001
## Block2 - Block3 2.78e-03 0.000651 Inf 8.05e-04 0.00475 4.267 0.0005
## Block2 - Block4 3.16e-03 0.000653 Inf 1.18e-03 0.00514 4.844 <0.0001
## Block2 - Block5 2.83e-03 0.000654 Inf 8.47e-04 0.00481 4.326 0.0004
## Block2 - Block6 3.84e-03 0.000654 Inf 1.85e-03 0.00582 5.860 <0.0001
## Block2 - Block7 4.70e-03 0.000655 Inf 2.71e-03 0.00668 7.167 <0.0001
## Block2 - Block8 6.85e-03 0.000653 Inf 4.87e-03 0.00883 10.483 <0.0001
## Block3 - Block4 3.85e-04 0.000652 Inf -1.59e-03 0.00236 0.590 0.9990
## Block3 - Block5 4.95e-05 0.000652 Inf -1.93e-03 0.00203 0.076 1.0000
## Block3 - Block6 1.06e-03 0.000653 Inf -9.23e-04 0.00303 1.617 0.7400
## Block3 - Block7 1.92e-03 0.000653 Inf -6.42e-05 0.00390 2.933 0.0662
## Block3 - Block8 4.07e-03 0.000652 Inf 2.09e-03 0.00604 6.245 <0.0001
## Block4 - Block5 -3.35e-04 0.000654 Inf -2.32e-03 0.00165 -0.513 0.9996
## Block4 - Block6 6.71e-04 0.000655 Inf -1.31e-03 0.00265 1.025 0.9708
## Block4 - Block7 1.53e-03 0.000655 Inf -4.55e-04 0.00352 2.337 0.2734
## Block4 - Block8 3.68e-03 0.000653 Inf 1.70e-03 0.00566 5.639 <0.0001
## Block5 - Block6 1.01e-03 0.000655 Inf -9.79e-04 0.00299 1.536 0.7881
## Block5 - Block7 1.87e-03 0.000656 Inf -1.21e-04 0.00385 2.846 0.0840
## Block5 - Block8 4.02e-03 0.000654 Inf 2.04e-03 0.00600 6.146 <0.0001
## Block6 - Block7 8.60e-04 0.000656 Inf -1.13e-03 0.00285 1.311 0.8951
## Block6 - Block8 3.01e-03 0.000655 Inf 1.03e-03 0.00500 4.604 0.0001
## Block7 - Block8 2.15e-03 0.000655 Inf 1.66e-04 0.00414 3.285 0.0228
##
## Confidence level used: 0.95
## Conf-level adjustment: tukey method for comparing a family of 8 estimates
## P value adjustment: tukey method for comparing a family of 8 estimates
## contrast estimate SE df asymp.LCL asymp.UCL z.ratio p.value
## Block1 - Block2 -0.00137 0.00606 Inf -0.0197 0.016993 -0.226 1.0000
## Block1 - Block3 -0.00877 0.00604 Inf -0.0271 0.009554 -1.450 0.8335
## Block1 - Block4 -0.01909 0.00606 Inf -0.0375 -0.000730 -3.151 0.0348
## Block1 - Block5 -0.01200 0.00606 Inf -0.0304 0.006378 -1.979 0.4960
## Block1 - Block6 -0.02147 0.00607 Inf -0.0399 -0.003075 -3.538 0.0096
## Block1 - Block7 -0.03639 0.00608 Inf -0.0548 -0.017977 -5.989 <0.0001
## Block1 - Block8 -0.03757 0.00606 Inf -0.0559 -0.019202 -6.200 <0.0001
## Block2 - Block3 -0.00739 0.00579 Inf -0.0250 0.010163 -1.277 0.9077
## Block2 - Block4 -0.01772 0.00581 Inf -0.0353 -0.000114 -3.051 0.0472
## Block2 - Block5 -0.01063 0.00582 Inf -0.0283 0.006994 -1.828 0.6007
## Block2 - Block6 -0.02010 0.00582 Inf -0.0377 -0.002461 -3.454 0.0129
## Block2 - Block7 -0.03502 0.00583 Inf -0.0527 -0.017364 -6.011 <0.0001
## Block2 - Block8 -0.03619 0.00581 Inf -0.0538 -0.018586 -6.230 <0.0001
## Block3 - Block4 -0.01033 0.00579 Inf -0.0279 0.007233 -1.783 0.6321
## Block3 - Block5 -0.00324 0.00580 Inf -0.0208 0.014341 -0.558 0.9993
## Block3 - Block6 -0.01270 0.00580 Inf -0.0303 0.004885 -2.189 0.3582
## Block3 - Block7 -0.02763 0.00581 Inf -0.0452 -0.010019 -4.755 <0.0001
## Block3 - Block8 -0.02880 0.00579 Inf -0.0464 -0.011240 -4.971 <0.0001
## Block4 - Block5 0.00709 0.00582 Inf -0.0105 0.024717 1.219 0.9265
## Block4 - Block6 -0.00238 0.00582 Inf -0.0200 0.015262 -0.408 0.9999
## Block4 - Block7 -0.01730 0.00583 Inf -0.0350 0.000361 -2.969 0.0598
## Block4 - Block8 -0.01847 0.00581 Inf -0.0361 -0.000864 -3.180 0.0319
## Block5 - Block6 -0.00947 0.00583 Inf -0.0271 0.008188 -1.625 0.7351
## Block5 - Block7 -0.02439 0.00583 Inf -0.0421 -0.006714 -4.182 0.0008
## Block5 - Block8 -0.02556 0.00582 Inf -0.0432 -0.007937 -4.396 0.0003
## Block6 - Block7 -0.01492 0.00584 Inf -0.0326 0.002767 -2.557 0.1720
## Block6 - Block8 -0.01609 0.00582 Inf -0.0337 0.001545 -2.765 0.1038
## Block7 - Block8 -0.00117 0.00583 Inf -0.0188 0.016490 -0.201 1.0000
##
## Confidence level used: 0.95
## Conf-level adjustment: tukey method for comparing a family of 8 estimates
## P value adjustment: tukey method for comparing a family of 8 estimates
## contrast estimate SE df asymp.LCL asymp.UCL z.ratio p.value
## Block9 - Block10 -0.000884 0.000726 Inf -0.00231 0.000539 -1.217 0.2235
##
## Confidence level used: 0.95
## contrast estimate SE df asymp.LCL asymp.UCL z.ratio p.value
## Block9 - Block10 -0.0067 0.00558 Inf -0.0176 0.00424 -1.201 0.2298
##
## Confidence level used: 0.95
## ---------- EXECUTION H: across blocks ----------
p_exec <- ggplot(df_acc, aes(x = Block, y = H_exec_vm)) +
geom_boxplot(fill = "#67CC5CFF", alpha = 0.35, outlier.shape = NA) +
geom_jitter(width = 0.15, alpha = 0.4, size = 1, colour = "#67CC5CFF") +
scale_y_continuous(limits = c(0.8, 1.0)) +
geom_vline(xintercept = 8.5, linetype = "solid", colour = "grey30", linewidth = 1) +
labs(
x = "Block",
y = "Execution H",
title = "Execution H across blocks"
) +
common_theme
## ---------- PREPARATION H: across blocks ----------
p_prep <- ggplot(df_acc, aes(x = Block, y = H_prep_vm)) +
geom_boxplot(fill = "#3C4F8CFF", alpha = 0.35, outlier.shape = NA) +
geom_jitter(width = 0.15, alpha = 0.4, size = 1, colour = "#3C4F8CFF") +
geom_vline(xintercept = 8.5, linetype = "solid", colour = "grey30", linewidth = 1) +
labs(
x = "Block",
y = "Preparation H",
title = "Preparation H across blocks"
) +
common_theme
p_exec
## Warning: Removed 7 rows containing non-finite outside the scale range
## (`stat_boxplot()`).
## Warning: Removed 7 rows containing missing values or values outside the scale range
## (`geom_point()`).

p_prep

## ===============================================================
## 1) Learning phase model (Blocks 1–8) - Logtrialmeanrt as DV
## ===============================================================
model_learning <- glmmTMB(
Logtrialmeanrt ~ H_prep_vm * H_exec_vm * Block +
(1 | Subject),
data = df_learning_acc,
family = gaussian()
)
summary(model_learning)
## Family: gaussian ( identity )
## Formula:
## Logtrialmeanrt ~ H_prep_vm * H_exec_vm * Block + (1 | Subject)
## Data: df_learning_acc
##
## AIC BIC logLik -2*log(L) df.resid
## 1687.9 1923.7 -809.9 1619.9 7572
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.11729 0.3425
## Residual 0.07113 0.2667
## Number of obs: 7606, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.0711
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 25.525 4.611 5.536 3.10e-08 ***
## H_prep_vm -25.449 5.200 -4.894 9.86e-07 ***
## H_exec_vm -19.506 4.650 -4.195 2.73e-05 ***
## Block2 -19.482 5.387 -3.617 0.000298 ***
## Block3 -17.232 5.593 -3.081 0.002061 **
## Block4 -16.461 5.251 -3.135 0.001718 **
## Block5 -16.275 5.389 -3.020 0.002526 **
## Block6 -22.082 5.537 -3.988 6.67e-05 ***
## Block7 -22.889 5.314 -4.307 1.65e-05 ***
## Block8 -15.663 5.147 -3.043 0.002340 **
## H_prep_vm:H_exec_vm 25.902 5.245 4.938 7.88e-07 ***
## H_prep_vm:Block2 23.049 6.214 3.709 0.000208 ***
## H_prep_vm:Block3 20.660 6.316 3.271 0.001071 **
## H_prep_vm:Block4 19.651 5.992 3.279 0.001041 **
## H_prep_vm:Block5 20.795 6.096 3.411 0.000647 ***
## H_prep_vm:Block6 26.685 6.281 4.248 2.15e-05 ***
## H_prep_vm:Block7 27.454 5.988 4.585 4.55e-06 ***
## H_prep_vm:Block8 18.049 5.811 3.106 0.001898 **
## H_exec_vm:Block2 19.529 5.435 3.593 0.000327 ***
## H_exec_vm:Block3 17.342 5.644 3.072 0.002124 **
## H_exec_vm:Block4 16.386 5.300 3.091 0.001992 **
## H_exec_vm:Block5 16.106 5.440 2.960 0.003072 **
## H_exec_vm:Block6 21.906 5.591 3.918 8.93e-05 ***
## H_exec_vm:Block7 22.704 5.366 4.231 2.33e-05 ***
## H_exec_vm:Block8 15.229 5.196 2.931 0.003379 **
## H_prep_vm:H_exec_vm:Block2 -23.519 6.272 -3.750 0.000177 ***
## H_prep_vm:H_exec_vm:Block3 -21.286 6.376 -3.338 0.000843 ***
## H_prep_vm:H_exec_vm:Block4 -20.131 6.051 -3.327 0.000878 ***
## H_prep_vm:H_exec_vm:Block5 -21.238 6.156 -3.450 0.000561 ***
## H_prep_vm:H_exec_vm:Block6 -27.131 6.344 -4.277 1.90e-05 ***
## H_prep_vm:H_exec_vm:Block7 -27.959 6.049 -4.622 3.79e-06 ***
## H_prep_vm:H_exec_vm:Block8 -18.269 5.868 -3.113 0.001851 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova(model_learning)
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: Logtrialmeanrt
## Chisq Df Pr(>Chisq)
## H_prep_vm 0.3157 1 0.5741812
## H_exec_vm 59.5933 1 1.166e-14 ***
## Block 3220.1052 7 < 2.2e-16 ***
## H_prep_vm:H_exec_vm 14.6604 1 0.0001287 ***
## H_prep_vm:Block 38.1020 7 2.898e-06 ***
## H_exec_vm:Block 17.1039 7 0.0167384 *
## H_prep_vm:H_exec_vm:Block 25.9254 7 0.0005194 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2(model_learning)
## # R2 for Mixed Models
##
## Conditional R2: 0.682
## Marginal R2: 0.157
# LEARNING
p_learning <- plot_model(
model_learning,
type = "pred",
terms = c("H_exec_vm", "H_prep_vm", "Block"),
colors = c("#3C4F8CFF", "#21918CFF", "#FDE725FF")
) +
geom_line(linewidth = 1) +
labs(
x = "Execution H",
y = "Log RT",
title = "Predicted values of Log RT",
colour = "Prep H",
fill = "Prep H"
) +
common_theme
p_learning

## ===============================================================
## Post-hoc: H_prep × H_exec interaction per Block
## ===============================================================
## Idea:
## For each block, relevel Block so that this block is the reference,
## refit the SAME model, and read off the coefficient for
## H_prep_vm:H_exec_vm. That coefficient is the simple
## H_prep × H_exec interaction in that block.
## Then collect all block-wise estimates and apply Holm correction.
## ===============================================================
blocks_learning <- levels(df_learning_acc$Block)
interaction_results_learning <- lapply(blocks_learning, function(b) {
## Relevel Block so that block b is the reference level
df_tmp <- df_learning_acc %>%
mutate(Block_ref = relevel(Block, ref = b))
## Refit the same model structure with Block_ref
mod_b <- glmmTMB(
Logtrialmeanrt ~ H_prep_vm * H_exec_vm * Block_ref +
(1 | Subject),
data = df_tmp,
family = gaussian()
)
## Extract the simple interaction term in this reference block:
## coefficient for H_prep_vm:H_exec_vm
coefs <- summary(mod_b)$coefficients$cond
est <- coefs["H_prep_vm:H_exec_vm", "Estimate"]
se <- coefs["H_prep_vm:H_exec_vm", "Std. Error"]
z <- coefs["H_prep_vm:H_exec_vm", "z value"]
p <- coefs["H_prep_vm:H_exec_vm", "Pr(>|z|)"]
data.frame(
Block = b,
Estimate = est,
SE = se,
z_value = z,
p_value = p,
stringsAsFactors = FALSE
)
})
interaction_table_learning <- bind_rows(interaction_results_learning) %>%
mutate(
p_adj_holm = p.adjust(p_value, method = "holm")
)
interaction_table_learning
## Block Estimate SE z_value p_value p_adj_holm
## 1 1 25.902113 5.245137 4.9383097 7.880263e-07 6.304211e-06
## 2 2 2.383639 3.453266 0.6902563 4.900330e-01 1.000000e+00
## 3 3 4.615961 3.656176 1.2625107 2.067651e-01 8.270602e-01
## 4 4 5.770421 3.032639 1.9027720 5.707030e-02 3.424218e-01
## 5 5 4.663641 3.229812 1.4439358 1.487570e-01 7.437852e-01
## 6 6 -1.229573 3.572487 -0.3441784 7.307121e-01 1.000000e+00
## 7 7 -2.056944 3.036947 -0.6773064 4.982116e-01 1.000000e+00
## 8 8 7.632644 2.657739 2.8718565 4.080682e-03 2.856477e-02
## ===============================================================
## 2) Test phase model (Blocks 9–10) - Logtrialmeanrt as DV
## ===============================================================
model_test <- glmmTMB(
Logtrialmeanrt ~ H_prep_vm * H_exec_vm * Block +
(1 | Subject),
family = gaussian(),
data = df_test_acc
)
summary(model_test)
## Family: gaussian ( identity )
## Formula:
## Logtrialmeanrt ~ H_prep_vm * H_exec_vm * Block + (1 | Subject)
## Data: df_test_acc
##
## AIC BIC logLik -2*log(L) df.resid
## 224.9 280.1 -102.5 204.9 1823
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 0.13018 0.3608
## Residual 0.06153 0.2481
## Number of obs: 1833, groups: Subject, 22
##
## Dispersion estimate for gaussian family (sigma^2): 0.0615
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 5.63935 3.33337 1.692 0.0907 .
## H_prep_vm -2.65272 3.57201 -0.743 0.4577
## H_exec_vm -0.09203 3.37772 -0.027 0.9783
## Block10 -6.90770 4.27426 -1.616 0.1061
## H_prep_vm:H_exec_vm 2.87813 3.62177 0.795 0.4268
## H_prep_vm:Block10 6.27672 4.66034 1.347 0.1780
## H_exec_vm:Block10 7.53499 4.32775 1.741 0.0817 .
## H_prep_vm:H_exec_vm:Block10 -6.74790 4.72071 -1.429 0.1529
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova(model_test)
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: Logtrialmeanrt
## Chisq Df Pr(>Chisq)
## H_prep_vm 0.0007 1 0.979249
## H_exec_vm 74.6506 1 < 2.2e-16 ***
## Block 289.5205 1 < 2.2e-16 ***
## H_prep_vm:H_exec_vm 0.1754 1 0.675315
## H_prep_vm:Block 22.8309 1 1.769e-06 ***
## H_exec_vm:Block 8.7220 1 0.003144 **
## H_prep_vm:H_exec_vm:Block 2.0433 1 0.152882
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2(model_test)
## # R2 for Mixed Models
##
## Conditional R2: 0.706
## Marginal R2: 0.085
# TEST
p_test <- plot_model(
model_test,
type = "pred",
terms = c("H_exec_vm", "H_prep_vm", "Block"),
colors = c("#3C4F8CFF", "#21918CFF", "#FDE725FF")
) +
geom_line(linewidth = 1) +
labs(
x = "Execution H",
y = "Log RT",
title = "Predicted values of Log RT",
colour = "Prep H",
fill = "Prep H"
) +
common_theme
p_test

## ===============================================================
## Post-hoc: H_prep × H_exec interaction per Block
## ===============================================================
## Idea:
## For each block, relevel Block so that this block is the reference,
## refit the SAME model, and read off the coefficient for
## H_prep_vm:H_exec_vm. That coefficient is the simple
## H_prep × H_exec interaction in that block.
## Then collect all block-wise estimates and apply Holm correction.
## ===============================================================
blocks_test <- levels(df_test_acc$Block)
interaction_results_test <- lapply(blocks_test, function(b) {
## Relevel Block so that block b is the reference level
df_tmp <- df_test_acc %>%
mutate(Block_ref = relevel(Block, ref = b))
## Same model structure in the test phase
mod_b <- glmmTMB(
Logtrialmeanrt ~ H_prep_vm * H_exec_vm * Block_ref +
(1 | Subject),
data = df_tmp,
family = gaussian()
)
## Simple H_prep × H_exec interaction in block b
coefs <- summary(mod_b)$coefficients$cond
est <- coefs["H_prep_vm:H_exec_vm", "Estimate"]
se <- coefs["H_prep_vm:H_exec_vm", "Std. Error"]
z <- coefs["H_prep_vm:H_exec_vm", "z value"]
p <- coefs["H_prep_vm:H_exec_vm", "Pr(>|z|)"]
data.frame(
Block = b,
Estimate = est,
SE = se,
z_value = z,
p_value = p,
stringsAsFactors = FALSE
)
})
interaction_table_test <- bind_rows(interaction_results_test) %>%
mutate(
p_adj_holm = p.adjust(p_value, method = "holm")
)
interaction_table_test
## Block Estimate SE z_value p_value p_adj_holm
## 1 9 2.878128 3.621771 0.7946741 0.4268031 0.4310406
## 2 10 -3.869776 3.124496 -1.2385281 0.2155203 0.4310406
## ===============================================================
## STITCHED SURFACES: Blocks 1–8 from model_learning,
## Blocks 9–10 from model_test
## ===============================================================
## ---------------------------------------------------------------
## Hyperparameters
## ---------------------------------------------------------------
q_lower <- 0.025
q_upper <- 0.975
q_lower_pct <- q_lower * 100
q_upper_pct <- q_upper * 100
viridis_option <- "viridis"
viridis_begin <- 0
viridis_end <- 1
fast_color <- "light"
viridis_direction <- if (fast_color == "dark") 1L else -1L
## ---------------------------------------------------------------
## Block sets and labels
## ---------------------------------------------------------------
blocks_learning <- levels(df_learning_acc$Block)
blocks_test <- levels(df_test_acc$Block)
blocks_all <- c(blocks_learning, blocks_test)
block_labs_all <- setNames(
paste0("Block ", blocks_all),
blocks_all
)
# block_labs_all["10"] <- "Block 10 (unfamiliar)"
## ===============================================================
## A. WITHIN-BLOCK SURFACES (per-block H ranges)
## ===============================================================
## ---------------------------
## Learning within-block grids
## ---------------------------
grid_list_block_learning <- lapply(blocks_learning, function(b) {
df_b <- df_learning_acc %>% dplyr::filter(Block == b)
H_exec_seq_b <- seq(
quantile(df_b$H_exec_vm, q_lower, na.rm = TRUE),
quantile(df_b$H_exec_vm, q_upper, na.rm = TRUE),
length.out = 100
)
H_prep_seq_b <- seq(
quantile(df_b$H_prep_vm, q_lower, na.rm = TRUE),
quantile(df_b$H_prep_vm, q_upper, na.rm = TRUE),
length.out = 100
)
expand.grid(
H_exec_vm = H_exec_seq_b,
H_prep_vm = H_prep_seq_b,
Block = b,
KEEP.OUT.ATTRS = FALSE,
stringsAsFactors = FALSE
)
})
pred_grid_block_learning <- bind_rows(grid_list_block_learning)
pred_grid_block_learning$Block <- factor(
pred_grid_block_learning$Block,
levels = blocks_all
)
pred_grid_block_learning <- as.data.frame(pred_grid_block_learning)
pred_grid_block_learning$pred_logrt <- predict(
model_learning,
newdata = pred_grid_block_learning,
type = "response",
re.form = NA,
allow.new.levels = TRUE
)
## ---------------------------
## Test within-block grids
## ---------------------------
grid_list_block_test <- lapply(blocks_test, function(b) {
df_b <- df_test_acc %>% dplyr::filter(Block == b)
H_exec_seq_b <- seq(
quantile(df_b$H_exec_vm, q_lower, na.rm = TRUE),
quantile(df_b$H_exec_vm, q_upper, na.rm = TRUE),
length.out = 100
)
H_prep_seq_b <- seq(
quantile(df_b$H_prep_vm, q_lower, na.rm = TRUE),
quantile(df_b$H_prep_vm, q_upper, na.rm = TRUE),
length.out = 100
)
expand.grid(
H_exec_vm = H_exec_seq_b,
H_prep_vm = H_prep_seq_b,
Block = b,
KEEP.OUT.ATTRS = FALSE,
stringsAsFactors = FALSE
)
})
pred_grid_block_test <- bind_rows(grid_list_block_test)
pred_grid_block_test$Block <- factor(
pred_grid_block_test$Block,
levels = blocks_all
)
pred_grid_block_test <- as.data.frame(pred_grid_block_test)
pred_grid_block_test$pred_logrt <- predict(
model_test,
newdata = pred_grid_block_test,
type = "response",
re.form = NA,
allow.new.levels = TRUE
)
## ---------------------------
## Combine within-block grids
## ---------------------------
pred_grid_block_all <- bind_rows(
pred_grid_block_learning,
pred_grid_block_test
)
## ===============================================================
## B. COMMON COLOR RANGE AND THEME FOR MODEL SURFACES
## ===============================================================
rt_range_all <- range(pred_grid_block_all$pred_logrt, na.rm = TRUE)
## ===============================================================
## C. STITCHED MODEL HEATMAPS (WITHIN-BLOCK RANGES)
## ===============================================================
p_block_all <- ggplot() +
geom_tile(
data = pred_grid_block_all,
aes(
x = H_prep_vm,
y = H_exec_vm,
fill = pred_logrt
)
) +
facet_wrap(
~ Block,
ncol = 5,
labeller = labeller(Block = block_labs_all)
) +
scale_fill_viridis_c(
option = viridis_option,
begin = viridis_begin,
end = viridis_end,
direction = viridis_direction,
limits = rt_range_all,
name = "Predicted\nlog RT"
) +
labs(
title = "Predicted RT surfaces",
x = "Preparation H",
y = "Execution H"
) +
common_theme
print(p_block_all)

## ===============================================================
## Mosaic of prep/exec H quadrants by block
## - Area shows number of trials
## - Fill shows mean log RT
## ===============================================================
## --- Global bins across all 10 blocks ---------------------------
prep_breaks <- quantile(df_acc$H_prep_vm, probs = c(0, 0.5, 1), na.rm = TRUE)
exec_breaks <- quantile(df_acc$H_exec_vm, probs = c(0, 0.5, 1), na.rm = TRUE)
df_quad <- df_acc %>%
mutate(
prep_bin = cut(
H_prep_vm,
breaks = prep_breaks,
labels = c("🡻 prep H", " 🢁 prep H"),
include.lowest = TRUE
),
exec_bin = cut(
H_exec_vm,
breaks = exec_breaks,
labels = c("🢀 exec H", " 🢂 exec H"),
include.lowest = TRUE
)
) %>%
group_by(Block, prep_bin, exec_bin) %>%
summarise(
mean_rt = mean(Logtrialmeanrt, na.rm = TRUE),
n = n(),
.groups = "drop"
)
## --- Mosaic coordinates (area proportional to n) -----------------
prep_marg <- df_quad %>%
group_by(Block, prep_bin) %>%
summarise(n_prep = sum(n), .groups = "drop") %>%
group_by(Block) %>%
mutate(
total_n = sum(n_prep),
width = n_prep / total_n,
x_min = dplyr::lag(cumsum(width), default = 0),
x_max = cumsum(width)
) %>%
ungroup()
df_mosaic <- df_quad %>%
left_join(prep_marg, by = c("Block", "prep_bin")) %>%
group_by(Block, prep_bin) %>%
mutate(
height = n / n_prep,
y_min = dplyr::lag(cumsum(height), default = 0),
y_max = cumsum(height)
) %>%
ungroup()
## --- Axis label positions (tighter) ------------------------------------
prep_labels <- prep_marg %>%
mutate(
x = (x_min + x_max) / 2,
y = -0.06
)
exec_marg2 <- df_mosaic %>%
group_by(Block, exec_bin) %>%
summarise(
y_min = min(y_min),
y_max = max(y_max),
.groups = "drop"
) %>%
mutate(
y = (y_min + y_max) / 2,
x = -0.06
)
## --- Facet layout info -----------------------------------------------
n_cols <- 5
n_blocks <- length(levels(df_acc$Block))
n_rows <- ceiling(n_blocks / n_cols)
block_levels <- levels(df_acc$Block)
bottom_row_blocks <- block_levels[((n_rows - 1) * n_cols + 1):n_blocks]
left_col_blocks <- block_levels[seq(1, n_blocks, by = n_cols)]
## --- Plot --------------------------------------------------------------
gg_mosaic <- ggplot() +
geom_rect(
data = df_mosaic,
aes(
xmin = x_min,
xmax = x_max,
ymin = y_min,
ymax = y_max,
fill = mean_rt
),
color = "grey20"
) +
geom_text(
data = prep_labels %>% dplyr::filter(Block %in% bottom_row_blocks),
aes(x = x, y = y, label = prep_bin),
inherit.aes = FALSE,
size = 3
) +
geom_text(
data = exec_marg2 %>% dplyr::filter(Block %in% left_col_blocks),
aes(x = x, y = y, label = exec_bin),
inherit.aes = FALSE,
angle = 90,
hjust = 0.5,
size = 3
) +
facet_wrap(
~ Block,
nrow = 2,
labeller = labeller(Block = block_labs_all)
) +
scale_fill_viridis_c(
option = "viridis",
direction = -1,
name = "Mean\nlog RT"
) +
coord_equal(clip = "off") +
labs(
x = NULL,
y = NULL,
title = "Prep/exec H quadrants over blocks") +
theme_minimal() +
theme(
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
strip.background = element_rect(fill = "grey90", color = NA),
strip.text = element_text(size = 12, face = "bold"),
plot.margin = margin(10, 10, 25, 25),
legend.position = "right",
legend.margin = margin(t = 2, r = 0, b = 0, l = 0)
)
gg_mosaic

##
## =============================================================
## LEARNING PHASE – highprep_lowexec ~ Block
## Model summary
## =============================================================
## Family: binomial ( logit )
## Formula: highprep_lowexec ~ Block + (1 | Subject)
## Data: df_learning_acc
##
## AIC BIC logLik -2*log(L) df.resid
## 7054.5 7116.9 -3518.3 7036.5 7597
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 2.502 1.582
## Number of obs: 7606, groups: Subject, 22
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.6441 0.3596 -7.353 1.94e-13 ***
## Block2 0.5626 0.1442 3.900 9.60e-05 ***
## Block3 1.0761 0.1393 7.726 1.11e-14 ***
## Block4 1.3583 0.1378 9.855 < 2e-16 ***
## Block5 1.1818 0.1390 8.500 < 2e-16 ***
## Block6 1.1215 0.1394 8.043 8.76e-16 ***
## Block7 1.4282 0.1380 10.347 < 2e-16 ***
## Block8 1.6068 0.1371 11.717 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## LEARNING PHASE – highprep_lowexec ~ Block
## Type II ANOVA (Wald chi-square)
## -------------------------------------------------------------
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: highprep_lowexec
## Chisq Df Pr(>Chisq)
## Block 199.75 7 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## LEARNING PHASE – highprep_lowexec ~ Block
## R2 (marginal / conditional)
## -------------------------------------------------------------
## # R2 for Mixed Models
##
## Conditional R2: 0.453
## Marginal R2: 0.037
##
## -------------------------------------------------------------
## LEARNING PHASE – Odds ratios by Block (reference = Block 1)
## -------------------------------------------------------------
## Block 1 (reference): OR = 1.000 (reference level)
## Block 2: OR = 1.755; 95% CI [1.323, 2.329]
## Block 3: OR = 2.933; 95% CI [2.232, 3.854]
## Block 4: OR = 3.890; 95% CI [2.969, 5.096]
## Block 5: OR = 3.260; 95% CI [2.483, 4.282]
## Block 6: OR = 3.069; 95% CI [2.335, 4.034]
## Block 7: OR = 4.171; 95% CI [3.182, 5.467]
## Block 8: OR = 4.987; 95% CI [3.812, 6.525]
##
## =============================================================
## TEST PHASE – highprep_lowexec ~ Block
## Model summary
## =============================================================
## Family: binomial ( logit )
## Formula: highprep_lowexec ~ Block + (1 | Subject)
## Data: df_test_acc
##
## AIC BIC logLik -2*log(L) df.resid
## 1884.8 1901.4 -939.4 1878.8 1830
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Subject (Intercept) 1.964 1.401
## Number of obs: 1833, groups: Subject, 22
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.05362 0.31262 -3.370 0.000751 ***
## Block10 -0.02608 0.11541 -0.226 0.821239
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## -------------------------------------------------------------
## TEST PHASE – highprep_lowexec ~ Block
## Type II ANOVA (Wald chi-square)
## -------------------------------------------------------------
## Analysis of Deviance Table (Type II Wald chisquare tests)
##
## Response: highprep_lowexec
## Chisq Df Pr(>Chisq)
## Block 0.0511 1 0.8212
##
## -------------------------------------------------------------
## TEST PHASE – highprep_lowexec ~ Block
## R2 (marginal / conditional)
## -------------------------------------------------------------
## # R2 for Mixed Models
##
## Conditional R2: 0.374
## Marginal R2: 0.000
##
## -------------------------------------------------------------
## TEST PHASE – Odds ratios by Block (reference = Block 9)
## -------------------------------------------------------------
## Block 9 (reference): OR = 1.000 (reference level)
## Block 10: OR = 0.974; 95% CI [0.777, 1.222]

## contrast odds.ratio SE df asymp.LCL asymp.UCL null z.ratio p.value
## Block1 / Block2 0.570 0.0822 Inf 0.368 0.882 1 -3.900 0.0024
## Block1 / Block3 0.341 0.0475 Inf 0.224 0.520 1 -7.726 <0.0001
## Block1 / Block4 0.257 0.0354 Inf 0.169 0.390 1 -9.855 <0.0001
## Block1 / Block5 0.307 0.0426 Inf 0.201 0.467 1 -8.500 <0.0001
## Block1 / Block6 0.326 0.0454 Inf 0.213 0.497 1 -8.043 <0.0001
## Block1 / Block7 0.240 0.0331 Inf 0.158 0.364 1 -10.347 <0.0001
## Block1 / Block8 0.201 0.0275 Inf 0.132 0.304 1 -11.717 <0.0001
## Block2 / Block3 0.598 0.0725 Inf 0.414 0.864 1 -4.239 0.0006
## Block2 / Block4 0.451 0.0539 Inf 0.314 0.648 1 -6.662 <0.0001
## Block2 / Block5 0.538 0.0651 Inf 0.373 0.777 1 -5.123 <0.0001
## Block2 / Block6 0.572 0.0694 Inf 0.396 0.826 1 -4.606 0.0001
## Block2 / Block7 0.421 0.0503 Inf 0.293 0.605 1 -7.236 <0.0001
## Block2 / Block8 0.352 0.0417 Inf 0.246 0.504 1 -8.808 <0.0001
## Block3 / Block4 0.754 0.0852 Inf 0.535 1.062 1 -2.497 0.1967
## Block3 / Block5 0.900 0.1030 Inf 0.636 1.273 1 -0.923 0.9840
## Block3 / Block6 0.956 0.1100 Inf 0.674 1.354 1 -0.394 0.9999
## Block3 / Block7 0.703 0.0796 Inf 0.499 0.991 1 -3.112 0.0393
## Block3 / Block8 0.588 0.0659 Inf 0.419 0.826 1 -4.739 <0.0001
## Block4 / Block5 1.193 0.1340 Inf 0.848 1.678 1 1.566 0.7706
## Block4 / Block6 1.267 0.1430 Inf 0.899 1.786 1 2.092 0.4200
## Block4 / Block7 0.933 0.1040 Inf 0.666 1.306 1 -0.628 0.9985
## Block4 / Block8 0.780 0.0858 Inf 0.559 1.088 1 -2.260 0.3160
## Block5 / Block6 1.062 0.1220 Inf 0.750 1.504 1 0.526 0.9995
## Block5 / Block7 0.782 0.0882 Inf 0.555 1.100 1 -2.184 0.3615
## Block5 / Block8 0.654 0.0730 Inf 0.466 0.917 1 -3.808 0.0035
## Block6 / Block7 0.736 0.0834 Inf 0.522 1.038 1 -2.706 0.1207
## Block6 / Block8 0.616 0.0690 Inf 0.438 0.865 1 -4.327 0.0004
## Block7 / Block8 0.836 0.0921 Inf 0.599 1.168 1 -1.623 0.7365
##
## Confidence level used: 0.95
## Conf-level adjustment: tukey method for comparing a family of 8 estimates
## Intervals are back-transformed from the log odds ratio scale
## P value adjustment: tukey method for comparing a family of 8 estimates
## Tests are performed on the log odds ratio scale
## contrast odds.ratio SE df asymp.LCL asymp.UCL null z.ratio p.value
## Block9 / Block10 1.03 0.118 Inf 0.819 1.29 1 0.226 0.8212
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log odds ratio scale
## Tests are performed on the log odds ratio scale