This code accompanies “An introduction to mixed effects modeling”

Preliminaries

Install packages if they aren’t already installed

if (!("lme4" %in% installed.packages())) install.packages("lme4")
if (!("tidyverse" %in% installed.packages())) install.packages("tidyverse")
if (!("afex" %in% installed.packages())) install.packages("afex")

Load packages

library(lme4)
## 載入需要的套件:Matrix
library(tidyverse)
## ── Attaching packages
## ───────────────────────────────────────
## tidyverse 1.3.2 ──
## ✔ ggplot2 3.3.6     ✔ purrr   0.3.4
## ✔ tibble  3.1.7     ✔ dplyr   1.0.9
## ✔ tidyr   1.2.0     ✔ stringr 1.4.0
## ✔ readr   2.1.2     ✔ forcats 0.5.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ tidyr::expand() masks Matrix::expand()
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ✖ tidyr::pack()   masks Matrix::pack()
## ✖ tidyr::unpack() masks Matrix::unpack()
library(afex)
## ************
## Welcome to afex. For support visit: http://afex.singmann.science/
## - Functions for ANOVAs: aov_car(), aov_ez(), and aov_4()
## - Methods for calculating p-values with mixed(): 'S', 'KR', 'LRT', and 'PB'
## - 'afex_aov' and 'mixed' objects can be passed to emmeans() for follow-up tests
## - NEWS: emmeans() for ANOVA models now uses model = 'multivariate' as default.
## - Get and set global package options with: afex_options()
## - Set orthogonal sum-to-zero contrasts globally: set_sum_contrasts()
## - For example analyses see: browseVignettes("afex")
## ************
## 
## 載入套件:'afex'
## 
## 下列物件被遮斷自 'package:lme4':
## 
##     lmer

Load data, and name that object “rt_data”

rt_data <- read_csv("rt_dummy_data.csv")
## Rows: 21679 Columns: 4
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (2): modality, stim
## dbl (2): PID, RT
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

View the first six rows of the data frame

head(rt_data)
## # A tibble: 6 × 4
##     PID    RT modality   stim 
##   <dbl> <dbl> <chr>      <chr>
## 1   301  1024 Audio-only gown 
## 2   301   838 Audio-only might
## 3   301  1060 Audio-only fern 
## 4   301   882 Audio-only vane 
## 5   301   971 Audio-only pup  
## 6   301  1064 Audio-only rise

Create an aggregated version that would be used in an ANOVA, and view the first six rows of that data frame

ANOVA_data <- rt_data %>% 
  group_by(PID, modality) %>% 
  dplyr::summarise(RT = mean(RT))
## `summarise()` has grouped output by 'PID'. You can override using the `.groups`
## argument.
head(ANOVA_data)
## # A tibble: 6 × 3
## # Groups:   PID [3]
##     PID modality       RT
##   <dbl> <chr>       <dbl>
## 1   301 Audio-only  1027.
## 2   301 Audiovisual 1002.
## 3   302 Audio-only  1047.
## 4   302 Audiovisual 1043.
## 5   303 Audio-only   883.
## 6   303 Audiovisual  938.

Testing for an effect of modality on response time

Dummy code modality so that audio-only is the reference level

rt_data$modality <- ifelse(rt_data$modality == "Audio-only", 0, 1)

Build a full model

rt_full.mod <- lmer(RT ~ 1 + modality + 
                      (1 + modality|PID) + (1 + modality|stim), 
                    data = rt_data)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00882975 (tol = 0.002, component 1)

This model failed to converge. The first thing we’ll do is try the all_fit() function from the afex package to look for an optimizer that works.

all_fit(rt_full.mod)
## bobyqa. : [OK]
## Nelder_Mead. : [OK]
## optimx.nlminb :
## 載入需要的命名空間:optimx
## [ERROR]
## optimx.L-BFGS-B :
## 載入需要的命名空間:optimx
## [ERROR]
## nloptwrap.NLOPT_LN_NELDERMEAD :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00882975 (tol = 0.002, component 1)
## [OK]
## nloptwrap.NLOPT_LN_BOBYQA :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00882975 (tol = 0.002, component 1)
## [OK]
## nmkbw. : [ERROR]
## $bobyqa.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.72   0.16 
##  PID      (Intercept) 168.98        
##           modality     87.81   -0.17
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## 
## $Nelder_Mead.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.72   0.16 
##  PID      (Intercept) 168.98        
##           modality     87.81   -0.17
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## 
## $optimx.nlminb
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $`optimx.L-BFGS-B`
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $nloptwrap.NLOPT_LN_NELDERMEAD
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.73   0.16 
##  PID      (Intercept) 169.08        
##           modality     87.80   -0.16
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nloptwrap.NLOPT_LN_BOBYQA
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.73   0.16 
##  PID      (Intercept) 169.08        
##           modality     87.80   -0.16
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nmkbw.
## <packageNotFoundError in loadNamespace(x): 不存在叫 'dfoptim' 這個名稱的套件>

The bobyqa optimizer should work.

rt_full.mod <- lmer(RT ~ 1 + modality + 
                      (1 + modality|PID) + (1 + modality|stim), 
                    data = rt_data, 
                    control = lmerControl(optimizer = "bobyqa"))

Run the all_fit() function from the afex() package for demonstration purposes

all_fit(rt_full.mod)
## bobyqa. : [OK]
## Nelder_Mead. : [OK]
## optimx.nlminb :
## 載入需要的命名空間:optimx
## [ERROR]
## optimx.L-BFGS-B :
## 載入需要的命名空間:optimx
## [ERROR]
## nloptwrap.NLOPT_LN_NELDERMEAD :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00882975 (tol = 0.002, component 1)
## [OK]
## nloptwrap.NLOPT_LN_BOBYQA :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00882975 (tol = 0.002, component 1)
## [OK]
## nmkbw. : [ERROR]
## $bobyqa.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.72   0.16 
##  PID      (Intercept) 168.98        
##           modality     87.81   -0.17
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## 
## $Nelder_Mead.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.72   0.16 
##  PID      (Intercept) 168.98        
##           modality     87.81   -0.17
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## 
## $optimx.nlminb
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $`optimx.L-BFGS-B`
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $nloptwrap.NLOPT_LN_NELDERMEAD
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.73   0.16 
##  PID      (Intercept) 169.08        
##           modality     87.80   -0.16
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nloptwrap.NLOPT_LN_BOBYQA
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## REML criterion at convergence: 302385.7
## Random effects:
##  Groups   Name        Std.Dev. Corr 
##  stim     (Intercept)  17.43        
##           modality     14.73   0.16 
##  PID      (Intercept) 169.08        
##           modality     87.80   -0.16
##  Residual             255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
## (Intercept)     modality  
##     1044.14        83.18  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nmkbw.
## <packageNotFoundError in loadNamespace(x): 不存在叫 'dfoptim' 這個名稱的套件>

Build a reduced model that doesn’t contained the fixed effect of modality, but is otherwise identical to the full model (including the random effects structure and control parameter)

rt_reduced.mod <- lmer(RT ~ 1 + 
                         (1 + modality|stim) + (1 + modality|PID), 
                       data = rt_data, 
                       control = lmerControl(optimizer = "bobyqa"))

Test for an effect of modality via a likelihood ratio test

anova(rt_reduced.mod, rt_full.mod)
## refitting model(s) with ML (instead of REML)
## Data: rt_data
## Models:
## rt_reduced.mod: RT ~ 1 + (1 + modality | stim) + (1 + modality | PID)
## rt_full.mod: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##                npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)    
## rt_reduced.mod    8 302449 302513 -151217   302433                         
## rt_full.mod       9 302419 302491 -151200   302401 32.385  1  1.264e-08 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Use the mixed() function from the afex package for demonstration purposes (this appears in the Likelihood Ratio Tests portion of the paper)

mixed(RT ~ 1 + modality + 
         (1 + modality|PID) + (1 + modality|stim), 
       data = rt_data, 
       control = lmerControl(optimizer = "bobyqa"), 
       method = 'LRT')
## Contrasts set to contr.sum for the following variables: stim
## Numerical variables NOT centered on 0: modality
## If in interactions, interpretation of lower order (e.g., main) effects difficult.
## REML argument to lmer() set to FALSE for method = 'PB' or 'LRT'
## Mixed Model Anova Table (Type 3 tests, LRT-method)
## 
## Model: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
## Data: rt_data
## Df full model: 9
##     Effect df     Chisq p.value
## 1 modality  1 32.39 ***   <.001
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1

View summary output

summary(rt_full.mod) 
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: RT ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: rt_data
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 302385.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3646 -0.6964 -0.0140  0.5886  5.0003 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  stim     (Intercept)   303.9   17.43        
##           modality      216.7   14.72   0.16 
##  PID      (Intercept) 28552.7  168.98        
##           modality     7709.8   87.81   -0.17
##  Residual             65258.8  255.46        
## Number of obs: 21679, groups:  stim, 543; PID, 53
## 
## Fixed effects:
##             Estimate Std. Error      df t value Pr(>|t|)    
## (Intercept)  1044.14      23.36   52.14  44.704  < 2e-16 ***
## modality       83.18      12.58   52.09   6.615 2.02e-08 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##          (Intr)
## modality -0.178

The parameter estimate for the effect of condition is 83.18, which means that in this dummy data, participants are on average 83 ms slower in the audiovisual relative to the audio-only condition.

Run the coef() function to examine individual participant and item intercept and slope estimates

coef(rt_full.mod)
## $stim
##        (Intercept) modality
## babe      1038.921 82.11525
## back      1050.914 86.52633
## bad       1041.122 81.12265
## bag       1042.896 86.40611
## bake      1039.395 81.75294
## balk      1042.560 84.17731
## ball      1035.114 80.50257
## bane      1055.741 83.15404
## bang      1048.652 85.51500
## bar       1042.990 82.57040
## base      1043.169 81.00293
## bash      1036.544 79.31509
## batch     1047.446 84.84039
## bath      1039.152 81.59491
## bathe     1043.922 77.52511
## bead      1037.029 78.75312
## beak      1042.654 84.36907
## beam      1040.067 79.28121
## bear      1040.514 83.76448
## beat      1049.312 84.73013
## beg       1044.947 85.99861
## bib       1048.505 83.32993
## bide      1046.706 84.50432
## big       1044.807 84.21708
## bin       1045.735 83.48755
## birth     1022.948 72.74009
## bit       1046.026 84.01293
## boat      1039.346 79.28314
## bog       1041.345 80.92927
## bomb      1045.924 85.51503
## booth     1039.411 80.25990
## booze     1051.868 86.61126
## botch     1040.909 78.78903
## bud       1034.365 79.13436
## budge     1044.947 84.51834
## bun       1034.580 79.54539
## burp      1033.455 75.26892
## bus       1033.021 79.51772
## bush      1035.837 74.66124
## but       1039.495 80.07000
## cab       1040.445 83.75969
## cage      1038.917 78.57022
## call      1049.019 85.14597
## calm      1047.082 87.14911
## cape      1040.686 78.54302
## cash      1042.069 86.83045
## catch     1036.478 79.55030
## caught    1046.641 86.60325
## cave      1054.375 88.71947
## cease     1045.759 86.50905
## cell      1050.966 92.27475
## chair     1029.443 73.05485
## chalk     1044.948 83.93338
## chase     1059.059 88.57697
## cheap     1052.476 87.50213
## cheat     1049.722 90.15119
## cheek     1035.924 79.15045
## chef      1056.613 91.35626
## chewed    1042.900 80.91123
## chin      1040.826 84.85173
## chirp     1033.836 76.85194
## choke     1054.485 90.25233
## choose    1037.296 79.01742
## chug      1042.780 80.73133
## chum      1047.075 86.13044
## cite      1034.088 80.05831
## coat      1053.603 91.02679
## cone      1038.833 81.21503
## cope      1057.129 90.66427
## core      1048.035 83.86178
## cot       1051.651 87.05227
## couch     1040.402 80.76851
## cough     1045.715 78.56355
## cub       1030.915 82.09385
## cuff      1046.868 86.57884
## cup       1037.053 80.95723
## curl      1033.687 75.00306
## curt      1034.330 77.60751
## curve     1051.558 89.37794
## czar      1046.183 82.42060
## dab       1046.619 84.25420
## dash      1037.596 79.10411
## date      1036.570 83.03302
## dawn      1037.905 76.16511
## days      1038.708 81.21411
## dear      1045.196 83.98793
## death     1040.987 80.77035
## deck      1049.092 87.14968
## dice      1035.631 80.64583
## did       1030.882 76.37739
## died      1042.946 80.63656
## dig       1039.994 81.75879
## dim       1040.517 85.43444
## dime      1036.267 75.99743
## dip       1041.740 80.13669
## dire      1048.370 82.92101
## ditch     1058.635 90.70637
## dive      1033.222 79.65593
## dock      1035.795 81.31116
## dodge     1048.735 84.84137
## does      1039.454 82.17609
## dog       1039.891 84.28073
## door      1033.471 77.61836
## dot       1031.332 76.88093
## doubt     1040.535 82.81449
## doug      1035.189 81.62779
## douse     1044.627 86.85954
## dove      1045.114 83.05945
## doze      1039.404 81.31144
## duke      1034.318 77.57753
## dull      1053.447 87.50609
## face      1034.552 76.66392
## fad       1053.157 89.38939
## fake      1047.266 86.25891
## fame      1042.216 76.66416
## fang      1056.606 87.39887
## fate      1042.708 84.33719
## fawn      1050.121 86.16227
## fed       1050.624 86.57670
## fern      1053.202 87.63098
## fetch     1026.848 74.19910
## fief      1038.397 80.43292
## fight     1038.246 82.56983
## fill      1048.568 83.73571
## fin       1055.797 86.71913
## fire      1037.484 78.56708
## fit       1044.405 83.27910
## five      1050.794 84.80885
## fizz      1043.524 81.92916
## foal      1046.643 83.33899
## foam      1038.740 78.93388
## fog       1032.641 76.77793
## foil      1045.298 82.55930
## folk      1041.681 80.78194
## fought    1048.857 84.01323
## foul      1044.304 82.80382
## fudge     1037.576 81.84447
## fuzz      1046.588 85.08889
## gab       1055.214 87.85646
## gag       1040.243 83.27574
## gain      1050.870 85.24608
## gal       1048.022 88.26248
## gall      1045.231 82.68144
## game      1039.056 78.09724
## gang      1048.991 83.11004
## gave      1047.072 84.53769
## gawk      1040.297 82.07803
## gear      1048.268 84.41217
## gel       1040.727 84.26044
## gem       1036.839 77.17611
## get       1038.837 80.07487
## ghoul     1047.729 85.36132
## gig       1049.111 85.43300
## give      1043.300 84.10935
## gnat      1039.070 86.00452
## gnome     1051.898 86.95735
## goal      1047.900 88.15667
## gob       1041.923 80.02934
## gone      1049.454 86.30116
## gong      1042.177 82.48141
## goof      1043.616 82.18384
## goon      1047.555 80.33831
## goose     1043.661 89.58199
## gore      1043.550 87.56224
## got       1028.070 72.92424
## gown      1032.794 77.64282
## guide     1053.576 87.74413
## guile     1049.651 86.22690
## gum       1042.929 81.95104
## hack      1033.212 72.79547
## had       1056.474 89.79899
## hair      1035.562 75.88216
## half      1055.069 90.36747
## hall      1045.000 78.93650
## hash      1045.853 89.40282
## hat       1038.921 79.46537
## hate      1029.835 76.25338
## have      1052.226 89.29449
## head      1053.048 87.21673
## hear      1040.926 79.32764
## heat      1044.058 83.03276
## heed      1044.397 81.81062
## height    1039.468 80.97631
## hem       1047.760 83.68875
## hid       1030.819 77.66557
## hike      1046.925 88.09901
## hill      1045.766 86.79691
## hip       1027.933 75.13222
## hire      1043.358 84.57245
## hiss      1042.363 84.64489
## hit       1040.295 76.04814
## hitch     1044.371 81.72504
## hive      1043.310 83.95397
## hog       1050.315 84.20895
## home      1049.890 87.47965
## hone      1049.453 84.30097
## hoof      1038.460 80.14801
## hook      1029.895 76.23803
## hose      1042.307 82.44220
## house     1041.671 85.98882
## huff      1052.831 84.78244
## hug       1031.841 80.62349
## hull      1044.746 85.12876
## hung      1050.495 89.46252
## hurt      1038.114 80.32461
## hush      1032.224 80.90924
## hut       1057.149 91.93563
## hutch     1041.535 84.58074
## jab       1048.368 82.74083
## jail      1044.345 80.30168
## jam       1045.090 87.85368
## jar       1046.065 82.50539
## jazz      1050.055 87.11875
## jeer      1043.386 90.41969
## jewel     1036.120 80.82960
## job       1051.172 90.10615
## jock      1039.002 74.91864
## jog       1046.207 86.46552
## jowl      1027.884 77.33074
## judge     1036.912 82.00594
## jug       1044.486 84.41410
## juice     1045.157 82.95521
## juke      1046.280 84.24497
## kale      1032.415 77.02195
## kid       1034.977 81.12644
## kill      1046.351 84.34469
## kin       1036.976 80.18415
## kite      1040.837 81.98895
## kneel     1040.972 78.96480
## knit      1058.487 90.19132
## known     1049.701 84.77758
## lab       1041.940 81.94608
## lace      1037.523 81.57207
## lad       1046.570 82.58010
## lamb      1050.551 84.52490
## latch     1035.295 77.17098
## late      1039.832 82.65896
## lawn      1041.522 83.11979
## leaf      1043.802 85.12635
## league    1059.531 92.30755
## leak      1038.067 77.24225
## leave     1055.274 87.47560
## ledge     1038.830 81.44122
## leg       1049.767 84.41874
## less      1046.770 80.75677
## let       1037.587 80.30931
## lied      1060.315 88.13043
## life      1047.393 87.24216
## like      1053.048 89.35595
## lime      1054.262 87.92189
## line      1042.631 80.72753
## load      1034.126 81.92030
## lob       1042.670 82.09141
## lobe      1048.628 84.05315
## lodge     1035.484 77.99530
## log       1046.951 81.64526
## lone      1040.065 82.17625
## loose     1052.169 86.28380
## loot      1042.175 81.71561
## lop       1049.378 88.01635
## lore      1038.699 78.73427
## loud      1042.114 85.30292
## louse     1055.125 90.88445
## lug       1055.800 88.50829
## luge      1057.174 91.53718
## mail      1044.915 85.50506
## maim      1042.724 82.31876
## mall      1048.419 88.54734
## man       1037.297 83.64781
## mass      1036.112 78.22460
## mat       1055.562 89.98436
## math      1049.406 85.80847
## mauve     1054.478 90.32598
## maze      1047.851 86.82605
## mead      1051.825 83.94737
## meal      1032.525 75.84281
## meek      1048.708 93.00321
## men       1031.343 81.29859
## mess      1044.152 82.58485
## might     1044.477 83.70256
## mill      1043.427 88.04731
## miss      1051.055 85.65202
## mitt      1046.520 82.73150
## mob       1046.300 81.09588
## mole      1048.742 87.84253
## moon      1043.335 84.76682
## moot      1046.066 82.33521
## mop       1058.143 89.48150
## mope      1047.581 84.08307
## moth      1043.878 83.01256
## mouse     1040.950 81.26745
## mouth     1043.925 81.07528
## mud       1043.979 82.25268
## muff      1033.502 76.85972
## mum       1051.619 84.76558
## myth      1051.944 87.61109
## nab       1052.703 83.27178
## name      1042.368 85.79230
## nape      1046.271 82.35662
## nash      1044.102 84.36214
## neat      1039.380 79.00126
## neck      1041.276 85.96640
## need      1053.045 84.24175
## nerve     1045.939 83.56609
## newt      1048.357 82.53320
## niece     1037.787 75.62424
## night     1052.631 82.61579
## nod       1063.382 88.83965
## none      1039.936 79.73737
## noon      1049.854 86.69280
## nose      1041.298 79.44295
## not       1048.859 84.43747
## notch     1035.649 81.58994
## null      1036.649 76.29599
## pair      1060.324 93.20108
## pan       1044.253 84.08790
## pass      1047.290 83.56250
## peace     1021.883 76.96571
## peach     1032.528 74.07104
## peak      1043.150 82.73249
## peat      1032.655 76.14689
## peep      1035.021 80.66524
## peeve     1049.469 86.97180
## pen       1045.176 85.58509
## perch     1046.296 87.01473
## perm      1042.549 84.15975
## pet       1035.778 79.20374
## phase     1037.388 81.19849
## phone     1040.226 76.50895
## pick      1048.104 87.14130
## pile      1039.829 84.99871
## pin       1035.327 77.83480
## ping      1038.957 81.51863
## pipe      1038.738 79.05159
## pitch     1049.536 82.15308
## pod       1049.224 82.11421
## poise     1045.467 85.12790
## pooch     1048.726 85.13140
## pool      1031.427 81.95096
## pop       1039.851 83.78211
## pose      1040.228 78.89911
## pouch     1048.508 86.99764
## pour      1020.649 75.59088
## pout      1044.446 85.35964
## puff      1046.550 84.63827
## pun       1046.204 83.41673
## pup       1042.187 82.84786
## putt      1051.320 86.30383
## rack      1052.194 88.42029
## rag       1049.513 85.34206
## raid      1045.075 85.21245
## rail      1043.475 80.06053
## rake      1048.632 85.15192
## ram       1046.203 85.17708
## ran       1033.117 77.91643
## rang      1037.375 82.99776
## rap       1036.655 74.45891
## rash      1062.557 90.51961
## rate      1025.637 75.05728
## rear      1043.327 81.29069
## red       1050.696 82.86366
## ref       1047.366 83.69481
## rev       1041.169 81.84955
## rib       1048.010 89.44298
## rich      1037.605 76.67936
## ridge     1060.859 89.01744
## right     1050.706 91.67927
## ring      1053.756 90.31851
## rise      1044.146 85.83991
## roach     1045.981 82.95098
## rogue     1038.807 80.01359
## roof      1047.225 83.69863
## rook      1041.931 84.79144
## room      1056.964 96.29326
## root      1037.153 75.93269
## rose      1048.279 87.77441
## rot       1038.003 78.05587
## rouge     1047.959 81.11511
## rove      1042.736 82.53234
## rude      1048.663 85.51992
## rule      1045.350 87.68957
## rut       1030.108 76.84240
## sack      1041.282 78.54576
## safe      1047.540 86.21771
## sage      1039.689 82.04671
## sail      1044.078 83.95967
## same      1045.230 85.12617
## sane      1043.497 79.31361
## sang      1043.812 80.94913
## sap       1047.028 84.43259
## sash      1054.596 91.60748
## sauce     1037.759 83.45035
## save      1054.777 88.22357
## seem      1033.716 77.48940
## seen      1035.783 75.03895
## sees      1049.030 85.99759
## serve     1047.696 83.73232
## set       1048.900 87.96613
## sewed     1053.234 86.18326
## shack     1042.167 84.73921
## shade     1046.022 86.01457
## sham      1055.725 89.49449
## shave     1054.706 87.20277
## shear     1031.092 77.33975
## sheath    1052.023 82.78807
## shed      1040.827 84.25497
## sheep     1051.120 85.38788
## sheet     1038.318 79.86358
## shell     1046.058 83.42332
## shim      1053.477 91.12430
## shin      1047.999 83.94248
## shine     1058.401 88.40566
## ship      1058.944 86.36536
## shook     1050.533 86.07391
## shoot     1057.926 87.96600
## shop      1040.877 80.48167
## shore     1041.212 80.23305
## shot      1053.560 85.04700
## shout     1045.631 82.74679
## shown     1047.444 86.41161
## shun      1036.757 82.00083
## shut      1036.340 80.71008
## sick      1043.321 80.00546
## side      1054.130 86.85353
## siege     1053.585 82.58631
## sieve     1049.320 85.29019
## sing      1043.519 77.59419
## sip       1037.098 76.41060
## sit       1055.054 91.27708
## soar      1045.964 83.13866
## sob       1049.354 89.09950
## sock      1041.142 79.43838
## sod       1059.771 90.60538
## soil      1048.732 80.73971
## some      1051.421 89.68947
## song      1053.748 87.10223
## soothe    1048.824 87.14043
## south     1043.370 84.77213
## sown      1055.497 87.34008
## sub       1048.541 83.47899
## such      1041.860 81.79825
## tag       1039.211 80.82305
## tail      1041.578 81.15354
## take      1047.383 83.86043
## talk      1043.455 80.17261
## tall      1045.471 86.83300
## tap       1043.258 80.37819
## tar       1035.998 78.06461
## taught    1043.637 82.89166
## teach     1032.380 76.94184
## tease     1033.791 75.30193
## tech      1039.392 81.65812
## teethe    1069.812 93.19195
## tell      1035.733 78.58388
## ten       1043.146 86.01577
## than      1034.098 81.12558
## thatch    1049.705 84.13229
## their     1039.479 82.88027
## them      1049.661 83.35414
## theme     1038.007 78.09565
## these     1048.204 84.25592
## thin      1046.338 83.67442
## thine     1053.483 91.28770
## those     1037.470 81.00046
## thug      1056.629 88.59633
## thus      1048.983 86.75590
## tide      1055.038 88.95372
## ties      1050.464 84.31061
## tight     1058.002 84.35917
## time      1042.171 79.42640
## tin       1036.582 82.24138
## tip       1046.729 87.79710
## toad      1047.643 84.62190
## toil      1050.046 89.78474
## toll      1040.978 81.53412
## tone      1027.634 76.98448
## top       1050.038 90.07496
## tote      1031.957 74.84664
## touch     1041.758 79.51831
## tough     1048.554 86.93232
## tout      1044.900 86.62065
## town      1048.702 81.11439
## tub       1038.111 77.01845
## tube      1044.586 76.69797
## use       1041.596 83.12448
## vague     1047.785 84.44377
## van       1053.602 85.54442
## vane      1051.523 90.12715
## vase      1042.641 82.29999
## veal      1039.174 79.67358
## verge     1043.832 80.22393
## verse     1046.594 84.56837
## vet       1039.852 80.92729
## vice      1038.963 82.34244
## vile      1044.623 82.69472
## vine      1038.305 80.89253
## vogue     1044.954 77.68250
## voice     1047.961 84.33089
## wade      1054.396 88.92320
## wage      1049.496 92.53761
## walk      1043.360 79.97573
## war       1043.699 83.18415
## watch     1030.853 77.33584
## wave      1043.158 81.72408
## web       1046.780 84.93749
## wed       1050.873 85.64578
## wedge     1049.558 86.09358
## weed      1048.798 84.90527
## week      1042.000 74.80525
## weep      1042.335 81.80561
## well      1041.712 81.23101
## wet       1041.312 79.81604
## whale     1035.809 78.57477
## whole     1051.191 88.42447
## wick      1052.483 85.12260
## wife      1043.994 81.43960
## will      1049.199 84.49693
## wine      1045.220 86.53623
## wipe      1038.198 79.55268
## wire      1034.326 78.78435
## woke      1045.807 85.73519
## womb      1047.489 80.21599
## won       1043.131 86.43082
## wool      1035.060 78.73348
## work      1038.318 75.58131
## wort      1052.710 84.56143
## worth     1040.409 82.85569
## wove      1052.240 87.98745
## wrath     1041.035 80.92772
## wren      1044.410 81.95176
## writhe    1042.264 81.78373
## wrong     1055.399 85.58990
## wrote     1042.828 80.63858
## yak       1044.860 81.99303
## yam       1048.245 90.16355
## yawn      1039.379 81.14485
## year      1037.308 82.97047
## yen       1044.379 83.33265
## yolk      1032.187 81.66656
## young     1032.830 78.03492
## your      1052.890 90.65928
## zeal      1042.742 77.45110
## zing      1045.024 83.55765
## 
## $PID
##     (Intercept)   modality
## 301   1024.0669 -16.936448
## 302   1044.1377   1.842595
## 303    882.8306  57.789322
## 304   1232.7545 -27.919823
## 306   1042.3421  33.886500
## 307   1111.3631  -9.939634
## 308   1250.7674  71.164797
## 309    795.2446  15.481902
## 310   1176.1359 104.151747
## 311   1012.9321  29.956285
## 312   1109.8745 153.103896
## 313   1114.2739  29.601099
## 314   1169.1517 -73.242636
## 315    877.9772  17.015005
## 316   1419.5222 -37.700574
## 318    945.4723  58.220394
## 319   1017.4086  98.276008
## 320    987.4941 101.341100
## 321   1025.1270 139.711286
## 324   1031.3305 136.247281
## 325    826.6336  34.599836
## 326   1048.6754  40.359191
## 328   1236.8999 128.530575
## 329   1042.3467  10.735126
## 331   1406.8491 160.773361
## 333   1644.5199  56.375038
## 334    943.7441  93.903851
## 335   1171.5855  64.055483
## 337    872.3788 169.983223
## 338    806.7055 121.221939
## 340   1179.8487 104.695843
## 341    867.6691 123.864781
## 342   1253.5632  30.102318
## 343    987.7640 208.128226
## 344   1027.5597  96.902635
## 346    895.7248  36.422787
## 348    755.2919 188.633630
## 349    940.6337  54.966491
## 350   1073.1133 302.721626
## 351   1120.9817 214.077528
## 352    796.4338  78.590336
## 353   1103.8173 155.151493
## 354   1074.6542 154.892068
## 355   1192.9343  89.264181
## 356    907.2052 397.764474
## 357    910.5673  81.928200
## 358    963.0962  33.338227
## 359   1087.6474 -39.273840
## 360   1070.4021  26.771316
## 361    982.1810 131.300533
## 362    953.3700  56.194588
## 363    920.7194  43.641059
## 364   1003.5099  75.844906
## 
## attr(,"class")
## [1] "coef.mer"

Testing for an interaction between modality and SNR

Load the data. Note that it’s actually the same as the original data frame, but it has an extra column containing SNR. We could have been dealing with this data frame the whole time, but having an extra variable that we’re not using can be confusing, so I waited to introduce it until now.

rt_data_interaction <- read_csv("rt_dummy_data_interaction.csv")
## Rows: 21679 Columns: 5
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (3): SNR, modality, stim
## dbl (2): PID, RT
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Dummy code modality and SNR so that audio-only and easy are the reference levels

rt_data_interaction$modality <- ifelse(rt_data_interaction$modality == "Audio-only", 0, 1)
rt_data_interaction$SNR <- ifelse(rt_data_interaction$SNR == "Easy", 0, 1)

Build the full model, which includes all by-participant and by-item random effects except the interaction between modality and SNR, which was not included because in my experience models with random effects structures that complex will almost certainly encounter estimation issues for this kind of data and we will need to simplify the random effects structure anyway. I also want to avoid having overly complex random effects structures because this can limit power (see Matuschek et al., 2017).

rt_int.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (1 + modality + SNR|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00694594 (tol = 0.002, component 1)

This model produced a singular fit, indicating that there are some problems with estimation going on. We’ll try using the all_fit() function from the afex package to see if another optimizer will work.

all_fit(rt_int.mod)
## bobyqa. :
## boundary (singular) fit: see help('isSingular')
## [OK]
## Nelder_Mead. :
## boundary (singular) fit: see help('isSingular')
## Warning: Model failed to converge with 1 negative eigenvalue: -4.9e+02
## [OK]
## optimx.nlminb :
## 載入需要的命名空間:optimx
## [ERROR]
## optimx.L-BFGS-B :
## 載入需要的命名空間:optimx
## [ERROR]
## nloptwrap.NLOPT_LN_NELDERMEAD :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00694594 (tol = 0.002, component 1)
## [OK]
## nloptwrap.NLOPT_LN_BOBYQA :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00694594 (tol = 0.002, component 1)
## [OK]
## nmkbw. : [ERROR]
## $bobyqa.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality + SNR |  
##     stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301137.3
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)  18.184             
##           modality      6.729   1.00      
##           SNR           3.401  -1.00 -1.00
##  PID      (Intercept) 159.765             
##           modality     89.722  -0.03      
##           SNR         101.747   0.02 -0.47
##  Residual             247.491             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.52         92.36        -29.56  
## optimizer (bobyqa) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $Nelder_Mead.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality + SNR |  
##     stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301152.4
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)   0.000             
##           modality     18.122   NaN       
##           SNR           9.198   NaN  0.96 
##  PID      (Intercept) 186.856             
##           modality    101.671  -0.32      
##           SNR         109.862   0.27 -0.56
##  Residual             247.651             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.80         98.49         92.34        -29.52  
## optimizer (Nelder_Mead) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $optimx.nlminb
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $`optimx.L-BFGS-B`
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $nloptwrap.NLOPT_LN_NELDERMEAD
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality + SNR |  
##     stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301137.3
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)  18.202             
##           modality      6.728   1.00      
##           SNR           3.422  -1.00 -1.00
##  PID      (Intercept) 159.689             
##           modality     89.682  -0.03      
##           SNR         101.712   0.02 -0.47
##  Residual             247.491             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.52         92.36        -29.56  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nloptwrap.NLOPT_LN_BOBYQA
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality + SNR |  
##     stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301137.3
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)  18.202             
##           modality      6.728   1.00      
##           SNR           3.422  -1.00 -1.00
##  PID      (Intercept) 159.689             
##           modality     89.682  -0.03      
##           SNR         101.712   0.02 -0.47
##  Residual             247.491             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.52         92.36        -29.56  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nmkbw.
## <packageNotFoundError in loadNamespace(x): 不存在叫 'dfoptim' 這個名稱的套件>

All of these produced a singular fit, and the estimation issues seem to be coming from the item random effects. Given that all the optimizers produced very similar estimates for fixed and random effects, and the item random effects (particularly the slopes) are contributing very little to the total variance using all of the optimizers, we’ll try removing the by-item random slopes for modality or SNR, and testing those against the full model via likelihood ratio tests to see if we can remove those (refit = FALSE because we are testing random effects, not fixed effects).

rt_int.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (1 + modality + SNR|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00694594 (tol = 0.002, component 1)
rt_int_no_modality_stim.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (1 + SNR|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction)
## boundary (singular) fit: see help('isSingular')
rt_int_no_SNR_stim.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (1 + modality|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction)
## boundary (singular) fit: see help('isSingular')
anova(rt_int_no_modality_stim.mod, rt_int.mod, refit = FALSE)
## Data: rt_data_interaction
## Models:
## rt_int_no_modality_stim.mod: RT ~ 1 + modality + SNR + modality:SNR + (1 + SNR | stim) + (1 + modality + SNR | PID)
## rt_int.mod: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality + SNR | stim) + (1 + modality + SNR | PID)
##                             npar    AIC    BIC  logLik deviance  Chisq Df
## rt_int_no_modality_stim.mod   14 301166 301278 -150569   301138          
## rt_int.mod                    17 301171 301307 -150569   301137 0.9413  3
##                             Pr(>Chisq)
## rt_int_no_modality_stim.mod           
## rt_int.mod                      0.8154
anova(rt_int_no_SNR_stim.mod, rt_int.mod, refit = FALSE)
## Data: rt_data_interaction
## Models:
## rt_int_no_SNR_stim.mod: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality | stim) + (1 + modality + SNR | PID)
## rt_int.mod: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality + SNR | stim) + (1 + modality + SNR | PID)
##                        npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)
## rt_int_no_SNR_stim.mod   14 301166 301277 -150569   301138                     
## rt_int.mod               17 301171 301307 -150569   301137 0.2683  3     0.9659

It looks like the model with both random slopes does not differ from either reduced model, so we’ll start by removing the random slope that is contributing less to the total variance according to all previous models (the by-item random slope for SNR).

rt_int.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (1 + modality|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction)
## boundary (singular) fit: see help('isSingular')

This one produced a singular fit (we already knew that would happen because we built the same model above), so let’s try all_fit()

all_fit(rt_int.mod)
## bobyqa. :
## boundary (singular) fit: see help('isSingular')
## [OK]
## Nelder_Mead. :
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
## Warning: Model failed to converge with 1 negative eigenvalue: -4.1e+03
## [OK]
## optimx.nlminb :
## 載入需要的命名空間:optimx
## [ERROR]
## optimx.L-BFGS-B :
## 載入需要的命名空間:optimx
## [ERROR]
## nloptwrap.NLOPT_LN_NELDERMEAD :
## boundary (singular) fit: see help('isSingular')
## [OK]
## nloptwrap.NLOPT_LN_BOBYQA :
## boundary (singular) fit: see help('isSingular')
## [OK]
## nmkbw. : [ERROR]
## $bobyqa.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality | stim) +  
##     (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301137.6
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)  16.414             
##           modality      6.928  1.00       
##  PID      (Intercept) 159.747             
##           modality     89.694  -0.03      
##           SNR         101.742   0.02 -0.47
##  Residual             247.498             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.52         92.36        -29.55  
## optimizer (bobyqa) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $Nelder_Mead.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality | stim) +  
##     (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301167.5
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)   3.174             
##           modality     26.456  -0.74      
##  PID      (Intercept) 251.305             
##           modality     89.545  -0.06      
##           SNR         116.118  -0.21 -0.45
##  Residual             247.522             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.79         98.48         92.29        -29.48  
## optimizer (Nelder_Mead) convergence code: 0 (OK) ; 0 optimizer warnings; 2 lme4 warnings 
## 
## $optimx.nlminb
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $`optimx.L-BFGS-B`
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $nloptwrap.NLOPT_LN_NELDERMEAD
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality | stim) +  
##     (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301137.6
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)  16.416             
##           modality      6.925  1.00       
##  PID      (Intercept) 159.747             
##           modality     89.694  -0.03      
##           SNR         101.748   0.02 -0.47
##  Residual             247.498             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.52         92.36        -29.55  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nloptwrap.NLOPT_LN_BOBYQA
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (1 + modality | stim) +  
##     (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301137.6
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     (Intercept)  16.416             
##           modality      6.925  1.00       
##  PID      (Intercept) 159.747             
##           modality     89.694  -0.03      
##           SNR         101.748   0.02 -0.47
##  Residual             247.498             
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.52         92.36        -29.55  
## optimizer (nloptwrap) convergence code: 0 (OK) ; 0 optimizer warnings; 1 lme4 warnings 
## 
## $nmkbw.
## <packageNotFoundError in loadNamespace(x): 不存在叫 'dfoptim' 這個名稱的套件>

The Nelder-Mead optimizer might work, so we’ll try that one

rt_int.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (1 + modality|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction,
                   control = lmerControl(optimizer = 'Nelder_Mead'))
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## unable to evaluate scaled gradient
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
## Warning: Model failed to converge with 1 negative eigenvalue: -4.1e+03

That led to convergence issues. It looks like all of these optimizers lead to estimation issues, so we’ll try removing the correlation between the random intercept for stimulus and the by-stimulus random slope for modality (this is ok in this situation because we aren’t actually interested in that correlation).

rt_int.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (0 + modality|stim) + (1|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction)

This led to a convergence warning, so we’ll try all_fit() again

all_fit(rt_int.mod)
## bobyqa. : [OK]
## Nelder_Mead. : [OK]
## optimx.nlminb :
## 載入需要的命名空間:optimx
## [ERROR]
## optimx.L-BFGS-B :
## 載入需要的命名空間:optimx
## [ERROR]
## nloptwrap.NLOPT_LN_NELDERMEAD : [OK]
## nloptwrap.NLOPT_LN_BOBYQA : [OK]
## nmkbw. : [ERROR]
## $bobyqa.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (0 + modality | stim) +  
##     (1 | stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301138.2
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     modality     12.03              
##  stim.1   (Intercept)  18.88              
##  PID      (Intercept) 159.76              
##           modality     89.69   -0.03      
##           SNR         101.76    0.02 -0.47
##  Residual             247.46              
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.51         92.34        -29.53  
## 
## $Nelder_Mead.
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (0 + modality | stim) +  
##     (1 | stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301138.2
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     modality     12.03              
##  stim.1   (Intercept)  18.88              
##  PID      (Intercept) 159.76              
##           modality     89.69   -0.03      
##           SNR         101.76    0.02 -0.47
##  Residual             247.46              
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.51         92.34        -29.53  
## 
## $optimx.nlminb
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $`optimx.L-BFGS-B`
## <simpleError in getOptfun(optimizer): "optimx" package must be installed order to use "optimizer=\"optimx\"">
## 
## $nloptwrap.NLOPT_LN_NELDERMEAD
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (0 + modality | stim) +  
##     (1 | stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301138.2
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     modality     12.03              
##  stim.1   (Intercept)  18.88              
##  PID      (Intercept) 159.76              
##           modality     89.70   -0.03      
##           SNR         101.76    0.02 -0.47
##  Residual             247.46              
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.51         92.34        -29.53  
## 
## $nloptwrap.NLOPT_LN_BOBYQA
## Linear mixed model fit by REML ['lmerModLmerTest']
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (0 + modality | stim) +  
##     (1 | stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## REML criterion at convergence: 301138.2
## Random effects:
##  Groups   Name        Std.Dev. Corr       
##  stim     modality     12.03              
##  stim.1   (Intercept)  18.88              
##  PID      (Intercept) 159.76              
##           modality     89.70   -0.03      
##           SNR         101.76    0.02 -0.47
##  Residual             247.46              
## Number of obs: 21679, groups:  stim, 543; PID, 53
## Fixed Effects:
##  (Intercept)      modality           SNR  modality:SNR  
##       998.82         98.51         92.34        -29.53  
## 
## $nmkbw.
## <packageNotFoundError in loadNamespace(x): 不存在叫 'dfoptim' 這個名稱的套件>

The bobyqa optimizer might work, so we’ll try that

rt_int.mod <- lmer(RT ~ 1 + modality + SNR + modality:SNR +
                     (0 + modality|stim) + (1|stim) + (1 + modality + SNR|PID), 
                   data = rt_data_interaction,
                   control = lmerControl(optimizer = 'bobyqa'))

Looks like that converged, but let’s examine the random effects output to make sure estimation went smoothly.

summary(rt_int.mod)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: RT ~ 1 + modality + SNR + modality:SNR + (0 + modality | stim) +  
##     (1 | stim) + (1 + modality + SNR | PID)
##    Data: rt_data_interaction
## Control: lmerControl(optimizer = "bobyqa")
## 
## REML criterion at convergence: 301138.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5354 -0.6949 -0.0045  0.5972  4.8706 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr       
##  stim     modality      144.7   12.03              
##  stim.1   (Intercept)   356.3   18.88              
##  PID      (Intercept) 25522.7  159.76              
##           modality     8044.7   89.69   -0.03      
##           SNR         10355.6  101.76    0.02 -0.47
##  Residual             61234.2  247.46              
## Number of obs: 21679, groups:  stim, 543; PID, 53
## 
## Fixed effects:
##               Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)    998.824     22.214    52.729  44.964  < 2e-16 ***
## modality        98.510     13.199    59.065   7.464 4.41e-10 ***
## SNR             92.339     14.790    58.004   6.243 5.39e-08 ***
## modality:SNR   -29.532      6.755 21298.850  -4.372 1.24e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) modlty SNR   
## modality    -0.063              
## SNR         -0.014 -0.354       
## modalty:SNR  0.074 -0.247 -0.232

Looks ok! We’ll stick with this one.

Testing for an effect of modality on intelligibility (binomial)

Load data and name it acc_data

acc_data <- read_csv("acc_dummy_data.csv")
## Rows: 28807 Columns: 4
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (2): modality, stim
## dbl (2): PID, acc
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Dummy code modality with audio-only as the reference level

acc_data$modality <- ifelse(acc_data$modality == "Audio-only", 0, 1)

Make PID and stim factors

acc_data$PID <- as.factor(acc_data$PID)
acc_data$stim <- as.factor(acc_data$stim)

Build a full model

acc_full.mod <- glmer(acc ~ 1 + modality + 
                        (1 + modality|PID) + (1 + modality|stim), 
                      data = acc_data, 
                      family = binomial)

Check random effects output

summary(acc_full.mod)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: acc ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##    Data: acc_data
## 
##      AIC      BIC   logLik deviance df.resid 
##  27988.6  28054.8 -13986.3  27972.6    28799 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -6.2346  0.1411  0.3436  0.5606  2.0937 
## 
## Random effects:
##  Groups Name        Variance Std.Dev. Corr 
##  stim   (Intercept) 0.72084  0.8490        
##         modality    0.46664  0.6831   -0.06
##  PID    (Intercept) 0.04346  0.2085        
##         modality    0.04902  0.2214   -0.15
## Number of obs: 28807, groups:  stim, 543; PID, 53
## 
## Fixed effects:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.77987    0.05042   15.47   <2e-16 ***
## modality     1.43093    0.05734   24.95   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##          (Intr)
## modality -0.201

Build a reduced model lacking the fixed effect for modality

acc_reduced.mod <- glmer(acc ~ 1 + 
                           (1 + modality|PID) + (1 + modality|stim), 
                         data = acc_data, 
                         family = binomial)

Conduct a likelihood ratio test to see if the effect of block (audio-only versus audiovisual) is significant

anova(acc_reduced.mod, acc_full.mod)
## Data: acc_data
## Models:
## acc_reduced.mod: acc ~ 1 + (1 + modality | PID) + (1 + modality | stim)
## acc_full.mod: acc ~ 1 + modality + (1 + modality | PID) + (1 + modality | stim)
##                 npar   AIC   BIC logLik deviance  Chisq Df Pr(>Chisq)    
## acc_reduced.mod    7 28147 28205 -14067    28133                         
## acc_full.mod       8 27989 28055 -13986    27973 160.78  1  < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Fixed-effects only, random intercepts, and random slopes plots

Load data

figuredata <- read_csv("figure_data.csv")
## Rows: 16 Columns: 3
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## dbl (3): PID, xvar, yvar
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Make PID a factor

figuredata$PID <- as.factor(figuredata$PID)

Fixed-effects only regression plot

Build regression model and view the summary output to look at the residuals

ols.mod <- lm(yvar ~ xvar, data = figuredata)

summary(ols.mod)
## 
## Call:
## lm(formula = yvar ~ xvar, data = figuredata)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -721.36 -233.80    3.13  361.59  561.08 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## (Intercept)   582.92     180.88   3.223  0.00614 **
## xvar          119.84      30.64   3.911  0.00157 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 410.2 on 14 degrees of freedom
## Multiple R-squared:  0.5221, Adjusted R-squared:  0.4879 
## F-statistic: 15.29 on 1 and 14 DF,  p-value: 0.001568

Build a fixed effects only plot

ggplot(figuredata, aes(x = xvar, y = yvar)) + 
  stat_smooth(method = lm, se = FALSE, linetype = "solid", 
              color = "black", size = .6) +
  geom_point(aes(shape = PID), size = 3.25, color = "grey70") +
  scale_shape_manual(values = c(15, 16, 17, 18)) + 
  geom_segment(aes(x = xvar, xend = xvar, 
                   y = yvar, yend = fitted(ols.mod)), 
               color = "grey70") +
  scale_y_continuous(expand = c(0, 0), breaks = c(0, 750, 1500, 2250, 3000), 
                     limits = c(0, 2600)) +
  scale_x_continuous(expand = c(0, 0), breaks = c(0, 2, 4, 6, 8, 10), 
                     limits = c(-0.5, 10.5)) +
  theme(panel.background = element_blank(),         
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        panel.border = element_rect(colour = "black", fill = NA),
        legend.position = "none",
        axis.text = element_text(size = 14),
        axis.title = element_text(size = 14)) +
  labs (x = "Word Difficulty", y = "Response Time") 
## `geom_smooth()` using formula 'y ~ x'

Save the figure

ggsave("fixed_effects_plot.png", units = "in", width = 9, height = 6, dpi = 300)
## `geom_smooth()` using formula 'y ~ x'

Random intercepts plot

Build the model with random intercepts and view the summary output to look at the residuals

random_intercepts.mod <- lmer(yvar ~ 1 + xvar + (1|PID), data = figuredata)

summary(random_intercepts.mod)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: yvar ~ 1 + xvar + (1 | PID)
##    Data: figuredata
## 
## REML criterion at convergence: 210.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.6361 -0.6381  0.3516  0.6020  1.3016 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  PID      (Intercept) 108442   329.3   
##  Residual              75324   274.5   
## Number of obs: 16, groups:  PID, 4
## 
## Fixed effects:
##             Estimate Std. Error      df t value Pr(>|t|)    
## (Intercept)  576.362    204.356   5.033   2.820 0.036817 *  
## xvar         121.185     20.507  11.001   5.909 0.000102 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##      (Intr)
## xvar -0.488

Extract the fixed effects estimates for the intercept and slope

model_intercept <- as.numeric(fixef(random_intercepts.mod)[1])
model_slope <- as.numeric(fixef(random_intercepts.mod)[2])

Extract the individual participant intercepts for this model and add it to the data frame

figuredata$intercepts <- rep(coef(random_intercepts.mod)$PID[,1], each = 4)

Build random intercepts plot

ggplot(figuredata, aes(x = xvar, y = yvar)) + 
  geom_abline(slope = model_slope, intercept = model_intercept, 
              linetype = "solid", color = "black", size = 1) +
  geom_abline(mapping = aes(slope = model_slope, intercept = intercepts), 
              linetype = "dashed", color = "grey70", size = .4) + 
  geom_point(aes(shape = PID), size = 3.25, color = "grey70") + 
  scale_shape_manual(values = c(15, 16, 17, 18)) + 
  geom_segment(aes(x = xvar, xend = xvar, 
                   y = yvar, yend = fitted(random_intercepts.mod)),
               color = "grey70") +
  scale_y_continuous(expand = c(0, 0), breaks = c(0, 500, 1000, 1500, 2000, 2500), 
                     limits = c(0, 2600)) +
  scale_x_continuous(expand = c(0, 0), breaks = c(0, 2, 4, 6, 8, 10), 
                     limits = c(-0.5, 10.5)) +
  theme(panel.background = element_blank(),         
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        panel.border = element_rect(colour = "black", fill = NA),
        legend.position = "none",
        axis.text = element_text(size = 14), 
        axis.title = element_text(size = 14)) +
  labs (x = "Word Difficulty", y = "Response Time") 

Save the figure

ggsave("random_intercepts.png", units = "in", width = 9, height = 6, dpi = 300)

Random intercepts and slopes plot

Build the model with random intercepts and slopes and view the summary output to look at the residuals

random_slopes.mod <- lmer(yvar ~ 1 + xvar + (1 + xvar|PID), data = figuredata)

summary(random_slopes.mod)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: yvar ~ 1 + xvar + (1 + xvar | PID)
##    Data: figuredata
## 
## REML criterion at convergence: 191.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -1.0716 -0.4797 -0.1726  0.6937  1.0952 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev. Corr 
##  PID      (Intercept) 61192    247.37        
##           xvar         5854     76.51   -0.40
##  Residual              5638     75.08        
## Number of obs: 16, groups:  PID, 4
## 
## Fixed effects:
##             Estimate Std. Error      df t value Pr(>|t|)  
## (Intercept)  561.105    128.055   2.990   4.382   0.0222 *
## xvar         124.636     38.667   2.997   3.223   0.0485 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##      (Intr)
## xvar -0.415

Extract the individual participant intercepts and slopes from this model and add them to the data frame

figuredata$intercepts2 <- rep(coef(random_slopes.mod)$PID[,1], each = 4)
figuredata$slopes <- rep(coef(random_slopes.mod)$PID[,2], each = 4)

Build plot

ggplot(figuredata, aes(x = xvar, y = yvar)) + 
  geom_abline(slope = model_slope, intercept = model_intercept, 
              linetype = "solid", color = "black", size = 1) + 
  geom_abline(mapping = aes(slope = slopes, 
                            intercept = intercepts2, linetype = PID), 
              linetype = "dashed", color = "grey70", size = .4) +
  geom_point(aes(shape = PID), size = 3.25, color = "grey70") + 
  scale_shape_manual(values = c(15, 16, 17, 18)) + 
  geom_segment(aes(x = xvar, xend = xvar, 
                   y = yvar, yend = fitted(random_slopes.mod)), 
               color = "grey70") +
  scale_y_continuous(expand = c(0, 0), breaks = c(0, 750, 1500, 2250), 
                     limits = c(0, 2600)) +
  scale_x_continuous(expand = c(0, 0), breaks = c(0, 2, 4, 6, 8, 10), 
                     limits = c(-0.5, 10.5)) +
  theme(panel.background = element_blank(),         
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        panel.border = element_rect(colour = "black", fill = NA),
        legend.position = "none", 
        axis.text = element_text(size = 14),
        axis.title = element_text(size = 14)) +
  labs (x = "Word Difficulty", y = "Response Time") 

Save the figure

ggsave("random_slopes.png", units = "in", width = 9, height = 6, dpi = 300)