Load data

Mean centering:

arousal.extended.wbaseline <- arousal.extended.wbaseline %>% 
  # Grand mean centering (CMC)
  mutate(baseline.gmc = baseline-mean(baseline, na.rm=TRUE)) %>%
  # Person mean centering (more generally, centering within cluster)
  group_by(participant) %>% 
  mutate(baseline.cm = mean(baseline, na.rm=TRUE),
         baseline.cwc = baseline-baseline.cm) %>%
  ungroup %>%
  # Grand mean centering of the aggregated variable
  mutate(baseline.cmc = baseline.cm-mean(baseline.cm, na.rm=TRUE))

arousal.extended.wbaseline <-  arousal.extended.wbaseline %>% mutate(rating = as.numeric(rating), baseline=as.numeric(baseline))
m1.nc <- lmer(rating ~ cueing*baseline + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # no centering
m1.gmc <- lmer(rating ~ cueing*baseline.gmc + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # grand mean centering
m1.cwc <- lmer(rating ~ cueing*baseline.cwc + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # centering within cluster
m1.cmc <- lmer(rating ~ cueing*baseline.cwc + cueing*baseline.cmc + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # including the person's mean 

tab_model(m1.nc, m1.gmc, m1.cwc,m1.cmc)
  rating rating rating rating
Predictors Estimates CI p Estimates CI p Estimates CI p Estimates CI p
(Intercept) 2.15 1.78 – 2.51 <0.001 3.23 2.90 – 3.55 <0.001 3.23 2.84 – 3.61 <0.001 3.23 2.96 – 3.50 <0.001
cueing [Uncued] -0.12 -0.44 – 0.21 0.485 0.09 -0.16 – 0.35 0.464 0.09 -0.16 – 0.34 0.481 0.09 -0.17 – 0.34 0.504
baseline 0.35 0.30 – 0.41 <0.001
session -0.12 -0.18 – -0.06 <0.001 -0.12 -0.18 – -0.06 <0.001 -0.12 -0.18 – -0.06 <0.001 -0.12 -0.18 – -0.06 <0.001
cueing [Uncued] ×
baseline
0.07 -0.00 – 0.14 0.050
cueing [Uncued] × session -0.03 -0.11 – 0.06 0.537 -0.03 -0.11 – 0.06 0.537 -0.02 -0.11 – 0.06 0.564 -0.02 -0.10 – 0.06 0.601
baseline gmc 0.35 0.30 – 0.41 <0.001
cueing [Uncued] ×
baseline gmc
0.07 -0.00 – 0.14 0.050
baseline cwc 0.33 0.27 – 0.39 <0.001 0.33 0.27 – 0.39 <0.001
cueing [Uncued] ×
baseline cwc
0.10 0.02 – 0.17 0.010 0.10 0.02 – 0.17 0.010
baseline cmc 1.17 0.81 – 1.52 <0.001
cueing [Uncued] ×
baseline cmc
-0.08 -0.25 – 0.09 0.349
Random Effects
σ2 0.67 0.67 0.67 0.67
τ00 0.14 item 0.14 item 0.14 item 0.14 item
0.29 participant 0.29 participant 0.49 participant 0.14 participant
ICC 0.39 0.39 0.49 0.30
N 18 participant 18 participant 18 participant 18 participant
48 item 48 item 48 item 48 item
Observations 1565 1565 1565 1565
Marginal R2 / Conditional R2 0.179 / 0.498 0.179 / 0.498 0.134 / 0.555 0.346 / 0.540
anova(m1.nc, m1.gmc, m1.cwc,m1.cmc)
## refitting model(s) with ML (instead of REML)
## Data: arousal.extended.wbaseline
## Models:
## m1.nc: rating ~ cueing * baseline + session * cueing + (1 | participant) + (1 | item)
## m1.gmc: rating ~ cueing * baseline.gmc + session * cueing + (1 | participant) + (1 | item)
## m1.cwc: rating ~ cueing * baseline.cwc + session * cueing + (1 | participant) + (1 | item)
## m1.cmc: rating ~ cueing * baseline.cwc + cueing * baseline.cmc + session * cueing + (1 | participant) + (1 | item)
##        npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)    
## m1.nc     9 3989.9 4038.1 -1986.0   3971.9                         
## m1.gmc    9 3989.9 4038.1 -1986.0   3971.9  0.000  0               
## m1.cwc    9 3996.5 4044.7 -1989.2   3978.5  0.000  0               
## m1.cmc   11 3976.9 4035.9 -1977.5   3954.9 23.529  2  7.777e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model.emtrends <- emtrends(m1.cwc, pairwise ~ cueing, var = "baseline.cwc")
emmip(m1.cwc, cueing ~ baseline.cwc, cov.reduce = range)

Model comparison:

# with original model (no mean centering)
arousal.extended.wbaseline <- arousal.extended.wbaseline[!is.na(arousal.extended.wbaseline$baseline), ]

m1 <- lmer(rating ~ session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline)
m2 <- lmer(rating ~ session*cueing + baseline + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline)
m3.nc <- lmer(rating ~ cueing*baseline + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # no centering

anova(m1,m2,m3.nc)
## refitting model(s) with ML (instead of REML)
## Data: arousal.extended.wbaseline
## Models:
## m1: rating ~ session * cueing + (1 | participant) + (1 | item)
## m2: rating ~ session * cueing + baseline + (1 | participant) + (1 | item)
## m3.nc: rating ~ cueing * baseline + session * cueing + (1 | participant) + (1 | item)
##       npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1       7 4260.8 4298.3 -2123.4   4246.8                           
## m2       8 3991.7 4034.6 -1987.9   3975.7 271.0912  1    < 2e-16 ***
## m3.nc    9 3989.9 4038.1 -1986.0   3971.9   3.8387  1    0.05008 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# with mean centering in the end! Note that we can only compare these models if baseline is within-subject mean centered in m2 already.

m1 <- lmer(rating ~ session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline)
m2.cwc <- lmer(rating ~ session*cueing + baseline.cwc + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline)
m3.cwc <- lmer(rating ~ cueing*baseline.cwc + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # centering within cluster

anova(m1,m2.cwc,m3.cwc)
## refitting model(s) with ML (instead of REML)
## Data: arousal.extended.wbaseline
## Models:
## m1: rating ~ session * cueing + (1 | participant) + (1 | item)
## m2.cwc: rating ~ session * cueing + baseline.cwc + (1 | participant) + (1 | item)
## m3.cwc: rating ~ cueing * baseline.cwc + session * cueing + (1 | participant) + (1 | item)
##        npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1        7 4260.8 4298.3 -2123.4   4246.8                           
## m2.cwc    8 4001.1 4043.9 -1992.5   3985.1 261.7520  1    < 2e-16 ***
## m3.cwc    9 3996.5 4044.7 -1989.2   3978.5   6.5984  1    0.01021 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# out of curiosity does the cmc model that splits baseline into "trait" and "state" level outperformance the cwc model? Yes!

m1 <- lmer(rating ~ session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline)
m2.cwc <- lmer(rating ~ session*cueing + baseline.cwc + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline)
m3.cwc <- lmer(rating ~ cueing*baseline.cwc + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # centering within cluster
m4.cmc <- lmer(rating ~ cueing*baseline.cwc + cueing*baseline.cmc + session*cueing + (1 | participant)  + (1 | item) , data=arousal.extended.wbaseline) # including the person's mean 

anova(m1,m2.cwc,m3.cwc,m4.cmc)
## refitting model(s) with ML (instead of REML)
## Data: arousal.extended.wbaseline
## Models:
## m1: rating ~ session * cueing + (1 | participant) + (1 | item)
## m2.cwc: rating ~ session * cueing + baseline.cwc + (1 | participant) + (1 | item)
## m3.cwc: rating ~ cueing * baseline.cwc + session * cueing + (1 | participant) + (1 | item)
## m4.cmc: rating ~ cueing * baseline.cwc + cueing * baseline.cmc + session * cueing + (1 | participant) + (1 | item)
##        npar    AIC    BIC  logLik deviance    Chisq Df Pr(>Chisq)    
## m1        7 4260.8 4298.3 -2123.4   4246.8                           
## m2.cwc    8 4001.1 4043.9 -1992.5   3985.1 261.7520  1  < 2.2e-16 ***
## m3.cwc    9 3996.5 4044.7 -1989.2   3978.5   6.5984  1    0.01021 *  
## m4.cmc   11 3976.9 4035.9 -1977.5   3954.9  23.5287  2  7.777e-06 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Baseline specific post-hoc

# if we do baseline specific post-hoc the full picture seems to be the following, we see significant decrease in post-sleep predicted ratings for cued items that were rated 1 and 2 units above participants ("highly arousing") own average rating at baseline. Additionally we also see that cueing increased predicted ratings for items that were rated -2 units below participants own average baseline rating.

tmp <- emmeans(m3.cwc, ~ cueing * baseline.cwc, at=list(baseline.cwc=-2:2))
pairs(tmp, by ="baseline.cwc")
## baseline.cwc = -2:
##  contrast      estimate     SE   df t.ratio p.value
##  Cued - Uncued   0.1777 0.0880 1518   2.020  0.0436
## 
## baseline.cwc = -1:
##  contrast      estimate     SE   df t.ratio p.value
##  Cued - Uncued   0.0798 0.0577 1519   1.384  0.1667
## 
## baseline.cwc =  0:
##  contrast      estimate     SE   df t.ratio p.value
##  Cued - Uncued  -0.0181 0.0426 1515  -0.425  0.6708
## 
## baseline.cwc =  1:
##  contrast      estimate     SE   df t.ratio p.value
##  Cued - Uncued  -0.1160 0.0567 1511  -2.045  0.0410
## 
## baseline.cwc =  2:
##  contrast      estimate     SE   df t.ratio p.value
##  Cued - Uncued  -0.2140 0.0868 1511  -2.465  0.0138
## 
## Results are averaged over the levels of: session 
## Degrees-of-freedom method: kenward-roger