Plot model predictions to better understand the coefficients. We limit all models to include only two-way interactions. Here, I only interpret the coefficients related to inspection time since the results for the other coefficients do not change when inspection times are added to the models.

Experiment 1

m1_e1_inspect <- glmer(correct ~ (condition + trialType + 
                                    log2(numPicN) + 
                                    log2(intervalNum + 1) +
                                    log2(inspection_time_exposure_sec))^2 + 
                         (trialType | subid), 
                       offset = logit(1/numPicN), 
                       control=glmerControl(optimizer="bobyqa"),
                       family=binomial, 
                       nAGQ=0,
                       data=df_e1_filt)

broom::tidy(m1_e1_inspect) %>% 
  filter(group == "fixed") %>% 
  mutate_at(.cols = c("estimate", "std.error", "statistic", "p.value"), 
            .funs = round, digits = 3) %>% 
  mutate(p.value = ifelse(round(p.value, 3) == 0, "< .001", round(p.value, 3))) %>% 
  knitr::kable()
term estimate std.error statistic p.value group
(Intercept) 2.893 0.341 8.494 < .001 fixed
conditionSocial 0.115 0.269 0.428 0.669 fixed
trialTypeSwitch -1.454 0.253 -5.757 < .001 fixed
log2(numPicN) 0.053 0.135 0.394 0.693 fixed
log2(intervalNum + 1) -0.471 0.113 -4.154 < .001 fixed
log2(inspection_time_exposure_sec) 0.204 0.148 1.376 0.169 fixed
conditionSocial:trialTypeSwitch -1.020 0.130 -7.862 < .001 fixed
conditionSocial:log2(numPicN) 0.359 0.098 3.675 < .001 fixed
conditionSocial:log2(intervalNum + 1) 0.095 0.059 1.612 0.107 fixed
conditionSocial:log2(inspection_time_exposure_sec) -0.173 0.068 -2.551 0.011 fixed
trialTypeSwitch:log2(numPicN) -0.619 0.093 -6.669 < .001 fixed
trialTypeSwitch:log2(intervalNum + 1) 0.519 0.055 9.386 < .001 fixed
trialTypeSwitch:log2(inspection_time_exposure_sec) 0.094 0.069 1.358 0.174 fixed
log2(numPicN):log2(intervalNum + 1) -0.053 0.042 -1.262 0.207 fixed
log2(numPicN):log2(inspection_time_exposure_sec) 0.049 0.052 0.945 0.345 fixed
log2(intervalNum + 1):log2(inspection_time_exposure_sec) 0.017 0.031 0.541 0.589 fixed

Is there an effect of gaze on same trials in E1 ?

Pairwise comparison collapsing across interval and number of referents

lsm_obj_e1 <- lsmeans(m1_e1_inspect, ~ condition | trialType)
e1_pairs <- pairs(lsm_obj_e1, adjust = "bon")
summary(e1_pairs) %>% knitr::kable(digits = 3)
contrast trialType estimate SE df z.ratio p.value
No-Social - Social Same -0.839 0.112 NA -7.521 0.000
No-Social - Social Switch 0.180 0.087 NA 2.083 0.037

KM thoughts: There apprears to be more evidence for a boost to same trials with gaze than I thought. It looks like the boost is more likely to show up at higher intervals and at higher number of referents, but is also significant in just the pairwise comparison of same/switch trials across the gaze conditions. My best guess is that the higher interval and referent conditions taxed attention and memory more in the no-gaze condition (paticipants felt they had to spread attention across the referents), whereas in the gaze condition, participants’ focused more attention/ on their selection, which led to better performance.

Experiment 2

m2_inspect_e2 <- glmer(correct ~ (condition_trial + 
                                    trialType +
                                    log2(intervalNum + 1) +
                                    log2(inspection_time_exposure_sec))^2 +
                         (trialType | subid), 
                       nAGQ=0,
                       glmerControl(optimizer = "bobyqa"),
                       family=binomial,
                       data = filter(df_expt2_analysis, trial_category == "test"))

 broom::tidy(m2_inspect_e2) %>% 
   filter(group == "fixed") %>% 
  mutate_at(.cols = c("estimate", "std.error", "statistic", "p.value"), 
            .funs = round, digits = 3) %>% 
  mutate(estimate = round(estimate, 2),
         p.value = ifelse(round(p.value, 3) == 0, "< .001", round(p.value, 3))) %>%  
   knitr::kable()
term estimate std.error statistic p.value group
(Intercept) 2.41 0.289 8.338 < .001 fixed
condition_trialsocial 0.13 0.230 0.585 0.559 fixed
trialTypeSwitch -3.12 0.256 -12.214 < .001 fixed
log2(intervalNum + 1) -0.88 0.138 -6.342 < .001 fixed
log2(inspection_time_exposure_sec) 0.15 0.134 1.138 0.255 fixed
condition_trialsocial:trialTypeSwitch -0.54 0.167 -3.210 0.001 fixed
condition_trialsocial:log2(intervalNum + 1) 0.16 0.086 1.849 0.064 fixed
condition_trialsocial:log2(inspection_time_exposure_sec) -0.14 0.101 -1.366 0.172 fixed
trialTypeSwitch:log2(intervalNum + 1) 0.77 0.096 8.000 < .001 fixed
trialTypeSwitch:log2(inspection_time_exposure_sec) 0.21 0.107 1.961 0.05 fixed
log2(intervalNum + 1):log2(inspection_time_exposure_sec) 0.04 0.058 0.773 0.44 fixed

Pairwise comparisons E2

lsm_obj_e2 <- lsmeans(m2_inspect_e2, ~ condition_trial | trialType)
e2_pairs <- pairs(lsm_obj_e2, adjust = "bon")
summary(e2_pairs) %>% knitr::kable(digits = 3)
contrast trialType estimate SE df z.ratio p.value
no-social - social Same -0.124 0.139 NA -0.887 0.375
no-social - social Switch 0.412 0.106 NA 3.874 0.000

No difference between same trials in the gaze vs. no-gaze condition. Perhaps because we did not test at high enough attention and memory demands.

Experiment 3

df_analysis_e3 <- df_expt3 %>% 
  filter(trial_category == "exposure", block == "test", 
         include_good_rt == "include") %>% 
  select(subid, itemNum, inspection_time_exposure = rt) %>% 
  mutate(inspection_time_exposure_sec = inspection_time_exposure / 1000) %>% 
  left_join(filter(df_expt3, trial_category == "test", block == "test", 
                   include_good_rt == "include"), 
            by = c("subid", "itemNum")) 
# does inspection time affect test trial performance
m_inspect_e3 <- glmer(correct ~ (log2(inspection_time_exposure_sec) + 
                                  trialType + 
                                  reliability)^2 + 
                     (trialType | subid),
                  control = glmerControl(optimizer = "bobyqa"), 
                  nAGQ = 0,
                  family = binomial,
                  data = df_analysis_e3)

# get betas and p.vals
broom::tidy(m_inspect_e3) %>% 
  filter(group == "fixed") %>% 
   mutate_at(.cols = c("estimate", "std.error", "statistic", "p.value"), 
            .funs = round, digits = 3) %>% 
  mutate(estimate = round(estimate, 2),
         p.value = ifelse(round(p.value, 3) == 0, "< .001", round(p.value, 3))) %>% 
knitr::kable()
term estimate std.error statistic p.value group
(Intercept) 2.01 0.195 10.306 < .001 fixed
log2(inspection_time_exposure_sec) 0.31 0.095 3.309 0.001 fixed
trialTypeSwitch -2.75 0.202 -13.636 < .001 fixed
reliability 0.50 0.303 1.658 0.097 fixed
log2(inspection_time_exposure_sec):trialTypeSwitch 0.03 0.089 0.337 0.736 fixed
log2(inspection_time_exposure_sec):reliability -0.20 0.110 -1.835 0.067 fixed
trialTypeSwitch:reliability -0.58 0.294 -1.974 0.048 fixed

Things to address