df.response <- read.csv("../../data/children/PROCESSED_DATA/response.csv")
#unique(data$PID)
df.response %>%
filter(trial == 1 & task == "cswl") %>%
group_by(condition) %>%
count() %>%
knitr::kable()
| condition | n |
|---|---|
| B-2O | 31 |
| B-3O | 31 |
| IGNORANCE | 31 |
df.response %>%
filter(trial == 1 & task == "cswl") %>%
group_by(condition) %>%
summarise(mean_age = mean(age_years_cont),
sd_age = sd(age_years_cont),
min_age = min(age_years_cont),
max_age = max(age_years_cont)) %>%
knitr::kable()
| condition | mean_age | sd_age | min_age | max_age |
|---|---|---|---|---|
| B-2O | 5.926911 | 0.6273751 | 5.016438 | 6.958904 |
| B-3O | 6.039328 | 0.5283934 | 5.010959 | 6.942466 |
| IGNORANCE | 5.916217 | 0.6123296 | 5.032877 | 6.928767 |
Kids are succeeding in the Baseline 2-Object and Ignorance condition (and there does not seem to be a difference between those conditions), and not really succeeding for Baseline 3-Object.
ggplot(df.response %>%
filter(task == "cswl" & block %in% c(1, 2)) %>%
mutate(condition = factor(condition, levels = c('B-3O', 'IGNORANCE', 'B-2O'))) %>%
group_by(PID, age_years, condition) %>%
summarise(mean_correct_resp = mean(correct_resp)),
aes(x = condition, y = mean_correct_resp, fill = condition)) +
geom_violin() +
stat_summary(fun.data = "mean_cl_boot",
geom = "pointrange") +
geom_jitter(alpha = 0.5,
height = 0) +
geom_hline(yintercept = 1/3, linetype = 'dashed') +
ylim(0, 1) +
#facet_grid(~age_years) +
labs(y = "Prop. Selecting Target Object") +
guides(fill="none")
## `summarise()` has grouped output by 'PID', 'age_years'. You can override using
## the `.groups` argument.
ggplot(df.response %>%
filter(task == "cswl" & block %in% c(1, 2)) %>%
group_by(PID, condition, block) %>%
summarise(mean_correct_resp = mean(correct_resp)),
aes(x = condition, y = mean_correct_resp)) +
geom_violin() +
stat_summary(fun.data = "mean_cl_boot",
geom = "pointrange") +
geom_jitter(alpha = 0.5,
height = 0) +
geom_hline(yintercept = 1/3, linetype = 'dashed') +
ylim(0, 1) +
facet_grid(~block) +
labs(title = "Proportion selecting target object by testing block.
Really no significant difference.")
## `summarise()` has grouped output by 'PID', 'condition'. You can override using
## the `.groups` argument.
Kids with higher Theory of Mind scores also perform better qualitatively in the Ignorance condition (Note: not significant in regression). Might be an age effect in Baseline 2-Object and Ignorance, but not in Baseline 3-Object.
ggplot(df.response %>%
filter(condition == "IGNORANCE") %>%
filter(task == "cswl" & block %in% c(1, 2)) %>%
group_by(PID, condition, total_tom_score) %>%
summarise(mean_correct_resp = mean(correct_resp)),
aes(x = total_tom_score, y = mean_correct_resp, color = condition)) +
geom_smooth(method = "lm") +
geom_jitter(alpha = 0.5,
height = 0) +
geom_hline(yintercept = 1/3, linetype = 'dashed') +
ylim(0, 1)
## `summarise()` has grouped output by 'PID', 'condition'. You can override using
## the `.groups` argument.
## `geom_smooth()` using formula = 'y ~ x'
ggplot(df.response %>%
filter(task == "cswl" & block %in% c(1, 2)) %>%
group_by(PID, condition, age_months) %>%
summarise(mean_correct_resp = mean(correct_resp)),
aes(x = age_months, y = mean_correct_resp, color = condition, fill = condition)) +
geom_smooth(method = "lm") +
geom_jitter(alpha = 0.5,
height = 0) +
geom_hline(yintercept = 1/3, linetype = 'dashed') +
ylim(0, 1)
## `summarise()` has grouped output by 'PID', 'condition'. You can override using
## the `.groups` argument.
## `geom_smooth()` using formula = 'y ~ x'
ggplot(df.response %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
group_by(PID, condition) %>%
summarise(mean_chose_unknown = mean(chose_unknown)),
aes(x = condition, y = mean_chose_unknown)) +
stat_summary(fun.data = "mean_cl_boot",
geom = "pointrange") +
geom_jitter(alpha = 0.5,
height = 0) +
#facet_grid(~block) +
geom_hline(yintercept = 1/3, linetype = 'dashed') +
ylim(0, 1) +
labs(title = "Proportion selecting the 'unknown' object.
In the IGNORANCE condition these are the objects that Ella doesn't know.
In the Baseline conditions these are just one of two distractors.")
## `summarise()` has grouped output by 'PID'. You can override using the `.groups`
## argument.
Right now, coded 0-4 based on whether they identify each of the 4 items that Ella does not know the name of. But might need to calculate some sort of d’ because some kids would choose more than 4 items.
Kids who remember all 4 unknown items are also better at not choosing them during test.
ggplot(df.response %>%
filter(block == "mem_check") %>%
group_by(PID) %>%
summarise(sum_memory_check = sum(correct_mem_check)),
aes(x = sum_memory_check)) +
geom_bar() +
labs(title = "Histogram of scores in the memory check. min score = 0, max score = 4")
df.mem_check_perf <- df.response %>%
filter(block == "mem_check") %>%
group_by(PID) %>%
summarise(sum_memory_check = sum(correct_mem_check))
ggplot(df.response %>%
left_join(., df.mem_check_perf) %>%
filter(task == "cswl" & block %in% c("1", "2"),
condition == "IGNORANCE") %>%
group_by(PID, sum_memory_check) %>%
summarise(mean_chose_unknown = mean(chose_unknown)),
aes(x = as.factor(sum_memory_check), y = mean_chose_unknown)) +
stat_summary(fun.data = "mean_cl_boot",
geom = "pointrange") +
geom_jitter(alpha = 0.5,
height = 0) +
geom_hline(yintercept = 1/3, linetype = 'dashed') +
ylim(0, 1) +
labs(title = "Proportion selecting the 'unknown' object in the IGNORANCE condition,
by score in the memory check section.
Lower performance in memory check --> more likely to choose the object
Ella said she doesn't know the name of")
## Joining with `by = join_by(PID)`
## `summarise()` has grouped output by 'PID'. You can override using the `.groups`
## argument.
## Warning: Removed 1 row containing missing values or values outside the scale range
## (`geom_segment()`).
Random structure: by-participant and by-target-object random intercepts.
Question: this was our preregistration, not sure if we need a random intercept with label as well.
Model 1: choice of target object (0/1) ~ age + (1|participant) + (1|object) + offset(qlogis(⅓))
No effect of age, and in general kids succeed. But this looks a little suspicious in B-3O condition (probably wouldn’t be successful if we didn’t account for random effect of item?)
Some convergence issues, might need to simplify random effects?
fit.target <- glmer(correct_resp ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
summary(fit.target)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correct_resp ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(task == "cswl" & block %in% c("1", "2")) %>%
## mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 1974.4 1995.6 -983.2 1966.4 1484
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.9299 -0.8042 -0.5655 0.8871 1.9149
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.61831 0.7863
## target_object_code (Intercept) 0.04854 0.2203
## Number of obs: 1488, groups: PID, 93; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.6077 0.1180 5.150 2.61e-07 ***
## age_zscored 0.1013 0.1005 1.008 0.314
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored -0.012
fit.target_b2o <- glmer(correct_resp ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(condition == "B-2O") %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
summary(fit.target_b2o)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correct_resp ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(condition == "B-2O") %>% filter(task ==
## "cswl" & block %in% c("1", "2")) %>% mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 641.8 658.6 -316.9 633.8 492
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.2600 -0.7657 0.4421 0.7406 1.8028
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.89325 0.9451
## target_object_code (Intercept) 0.08312 0.2883
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.8941 0.2152 4.155 3.25e-05 ***
## age_zscored 0.2083 0.1902 1.095 0.273
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored 0.049
fit.target_ignorance <- glmer(correct_resp ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(condition == "IGNORANCE") %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
summary(fit.target_ignorance)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correct_resp ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(condition == "IGNORANCE") %>% filter(task ==
## "cswl" & block %in% c("1", "2")) %>% mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 655.7 672.5 -323.8 647.7 492
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.9956 -0.8096 0.4105 0.8104 1.8876
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.7133 0.8446
## target_object_code (Intercept) 0.1377 0.3710
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.7828 0.2112 3.706 0.00021 ***
## age_zscored 0.2081 0.1781 1.168 0.24284
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored 0.049
fit.target_b3o <- glmer(correct_resp ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(condition == "B-3O") %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
## boundary (singular) fit: see help('isSingular')
summary(fit.target_b3o)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correct_resp ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(condition == "B-3O") %>% filter(task ==
## "cswl" & block %in% c("1", "2")) %>% mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 667.5 684.3 -329.8 659.5 492
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.8378 -0.8010 -0.7405 1.2233 1.3967
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 4.530e-02 2.128e-01
## target_object_code (Intercept) 1.445e-10 1.202e-05
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.22030 0.10221 2.155 0.0311 *
## age_zscored -0.06103 0.11446 -0.533 0.5939
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored -0.160
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
Model 2: choice of unknown object (0/1) ~ age + (1|participant) + (1|object) + offset(qlogis(⅓))
No effect of age overall, but there is an effect of age in IGNORANCE condition. Kids succeed overall, but not in the B3O condition.
Some convergence issues, might need to simplify random effects?
fit.unknown <- glmer(chose_unknown ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
summary(fit.unknown)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: chose_unknown ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(task == "cswl" & block %in% c("1", "2")) %>%
## mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 1653.7 1674.9 -822.8 1645.7 1484
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.0462 -0.6181 -0.4421 1.0341 2.7720
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.56084 0.7489
## target_object_code (Intercept) 0.01474 0.1214
## Number of obs: 1488, groups: PID, 93; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.5131 0.1103 -4.652 3.29e-06 ***
## age_zscored -0.1344 0.1015 -1.324 0.185
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored 0.002
fit.unknown_b2o <- glmer(chose_unknown ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(condition == "B-2O") %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
summary(fit.unknown_b2o)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: chose_unknown ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(condition == "B-2O") %>% filter(task ==
## "cswl" & block %in% c("1", "2")) %>% mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 515.4 532.2 -253.7 507.4 492
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.9734 -0.5703 -0.4061 -0.2848 3.0481
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.55227 0.7431
## target_object_code (Intercept) 0.04167 0.2041
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.7438 0.1964 -3.788 0.000152 ***
## age_zscored -0.3071 0.1717 -1.788 0.073738 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored 0.123
fit.unknown_ignorance <- glmer(chose_unknown ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(condition == "IGNORANCE") %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.0538092 (tol = 0.002, component 1)
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, : Model is nearly unidentifiable: very large eigenvalue
## - Rescale variables?
summary(fit.unknown_ignorance)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: chose_unknown ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(condition == "IGNORANCE") %>% filter(task ==
## "cswl" & block %in% c("1", "2")) %>% mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 471.2 488.0 -231.6 463.2 492
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.1401 -0.5521 -0.2095 -0.2012 3.4160
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 2.078e+00 1.4413906
## target_object_code (Intercept) 2.065e-08 0.0001437
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.146068 0.001314 -871.91 <2e-16 ***
## age_zscored -0.088272 0.001314 -67.19 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored 0.000
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## Model failed to converge with max|grad| = 0.0538092 (tol = 0.002, component 1)
## Model is nearly unidentifiable: very large eigenvalue
## - Rescale variables?
fit.unknown_b3o <- glmer(chose_unknown ~ age_zscored + (1|PID) + (1|target_object_code) + offset(qlogis(offset)),
data = df.response %>%
filter(condition == "B-3O") %>%
filter(task == 'cswl' & block %in% c("1", "2")) %>%
mutate(offset = 1/3),
family = binomial(link = 'logit'))
## boundary (singular) fit: see help('isSingular')
summary(fit.unknown_b3o)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: chose_unknown ~ age_zscored + (1 | PID) + (1 | target_object_code) +
## offset(qlogis(offset))
## Data: df.response %>% filter(condition == "B-3O") %>% filter(task ==
## "cswl" & block %in% c("1", "2")) %>% mutate(offset = 1/3)
##
## AIC BIC logLik deviance df.resid
## 637.5 654.4 -314.8 629.5 492
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -0.8168 -0.7052 -0.6633 1.3099 1.6537
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.00000 0.0000
## target_object_code (Intercept) 0.05347 0.2312
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.01435 0.12004 -0.120 0.905
## age_zscored -0.06329 0.10929 -0.579 0.563
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored -0.123
## optimizer (Nelder_Mead) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
Model 3: add condition Model 3: choice of target object (0/1) ~ presentation (Baseline Two Objects / Baseline Three Objects / Ignorance) + (1|participant) + (1|object)
There is a significant effect of condition. Reference level: Ignorance. Significant difference between Baseline 3-Object and Ignorance condition, no significant difference between Baseline 2-Object and Ignorance (no adjustment on pairwise comparisons based on prereg)
fit.pres_baseline <- glmer(correct_resp ~ age_zscored + (1|PID) + (1|target_object_code),
data = df.response %>%
mutate(condition = factor(condition,
levels = c("IGNORANCE", "B-3O", "B-2O"))) %>%
filter(task == 'cswl' & block %in% c("1", "2")),
family = binomial(link = 'logit'))
summary(fit.pres_baseline)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correct_resp ~ age_zscored + (1 | PID) + (1 | target_object_code)
## Data:
## df.response %>% mutate(condition = factor(condition, levels = c("IGNORANCE",
## "B-3O", "B-2O"))) %>% filter(task == "cswl" & block %in% c("1", "2"))
##
## AIC BIC logLik deviance df.resid
## 1974.4 1995.6 -983.2 1966.4 1484
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.9299 -0.8042 -0.5655 0.8871 1.9149
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.61831 0.7863
## target_object_code (Intercept) 0.04854 0.2203
## Number of obs: 1488, groups: PID, 93; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.08541 0.11801 -0.724 0.469
## age_zscored 0.10126 0.10048 1.008 0.314
##
## Correlation of Fixed Effects:
## (Intr)
## age_zscored -0.012
fit.pres <- glmer(correct_resp ~ condition + age_zscored + (1|PID) + (1|target_object_code),
data = df.response %>%
mutate(condition = factor(condition,
levels = c("IGNORANCE", "B-3O", "B-2O"))) %>%
filter(task == 'cswl' & block %in% c("1", "2")),
family = binomial(link = 'logit'))
summary(fit.pres)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula:
## correct_resp ~ condition + age_zscored + (1 | PID) + (1 | target_object_code)
## Data:
## df.response %>% mutate(condition = factor(condition, levels = c("IGNORANCE",
## "B-3O", "B-2O"))) %>% filter(task == "cswl" & block %in% c("1", "2"))
##
## AIC BIC logLik deviance df.resid
## 1967.8 1999.7 -977.9 1955.8 1482
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.9885 -0.8024 -0.5734 0.9172 2.0418
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.51895 0.7204
## target_object_code (Intercept) 0.05024 0.2241
## Number of obs: 1488, groups: PID, 93; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.07891 0.17437 0.453 0.65087
## conditionB-3O -0.60056 0.22934 -2.619 0.00883 **
## conditionB-2O 0.11065 0.22880 0.484 0.62866
## age_zscored 0.13068 0.09523 1.372 0.17000
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cnB-3O cnB-2O
## conditnB-3O -0.656
## conditnB-2O -0.653 0.497
## age_zscored 0.032 -0.087 -0.001
anova(fit.pres, fit.pres_baseline, type = 3)
## Data: df.response %>% mutate(condition = factor(condition, levels = c("IGNORANCE", ...
## Models:
## fit.pres_baseline: correct_resp ~ age_zscored + (1 | PID) + (1 | target_object_code)
## fit.pres: correct_resp ~ condition + age_zscored + (1 | PID) + (1 | target_object_code)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## fit.pres_baseline 4 1974.4 1995.6 -983.21 1966.4
## fit.pres 6 1967.8 1999.7 -977.92 1955.8 10.585 2 0.005029 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova(fit.pres, type = 3)
## Warning in printHypothesis(L, rhs, names(b)): one or more coefficients in the hypothesis include
## arithmetic operators in their names;
## the printed representation of the hypothesis will be omitted
## Analysis of Deviance Table (Type III Wald chisquare tests)
##
## Response: correct_resp
## Chisq Df Pr(>Chisq)
## (Intercept) 0.2048 1 0.650872
## condition 11.0867 2 0.003913 **
## age_zscored 1.8829 1 0.170004
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
emmeans(fit.pres,
specs = pairwise ~ "condition",
adjust = "none")
## $emmeans
## condition emmean SE df asymp.LCL asymp.UCL
## IGNORANCE 0.0819 0.174 Inf -0.260 0.424
## B-3O -0.5187 0.175 Inf -0.861 -0.177
## B-2O 0.1925 0.175 Inf -0.151 0.536
##
## Results are given on the logit (not the response) scale.
## Confidence level used: 0.95
##
## $contrasts
## contrast estimate SE df z.ratio p.value
## IGNORANCE - (B-3O) 0.601 0.229 Inf 2.619 0.0088
## IGNORANCE - (B-2O) -0.111 0.229 Inf -0.484 0.6287
## (B-3O) - (B-2O) -0.711 0.230 Inf -3.095 0.0020
##
## Results are given on the log odds ratio (not the response) scale.
There is no condition * age effect.
fit.pres_int <- glmer(correct_resp ~ condition * age_zscored + (1|PID) + (1|target_object_code),
data = df.response %>%
mutate(condition = factor(condition,
levels = c("IGNORANCE", "B-3O", "B-2O"))) %>%
filter(task == 'cswl' & block %in% c("1", "2")),
family = binomial(link = 'logit'))
summary(fit.pres_int)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula:
## correct_resp ~ condition * age_zscored + (1 | PID) + (1 | target_object_code)
## Data:
## df.response %>% mutate(condition = factor(condition, levels = c("IGNORANCE",
## "B-3O", "B-2O"))) %>% filter(task == "cswl" & block %in% c("1", "2"))
##
## AIC BIC logLik deviance df.resid
## 1970.2 2012.6 -977.1 1954.2 1480
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.0154 -0.8055 -0.5510 0.9244 2.0316
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.5061 0.7114
## target_object_code (Intercept) 0.0502 0.2241
## Number of obs: 1488, groups: PID, 93; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.08290 0.17334 0.478 0.6325
## conditionB-3O -0.57444 0.22839 -2.515 0.0119 *
## conditionB-2O 0.11002 0.22733 0.484 0.6284
## age_zscored 0.20714 0.15772 1.313 0.1891
## conditionB-3O:age_zscored -0.27709 0.24119 -1.149 0.2506
## conditionB-2O:age_zscored -0.00646 0.22092 -0.029 0.9767
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cnB-3O cnB-2O ag_zsc cB-3O:
## conditnB-3O -0.652
## conditnB-2O -0.653 0.496
## age_zscored 0.052 -0.040 -0.039
## cndtnB-3O:_ -0.034 -0.062 0.026 -0.654
## cndtnB-2O:_ -0.037 0.028 0.054 -0.714 0.467
anova(fit.pres_int, fit.pres, type = 3)
## Data: df.response %>% mutate(condition = factor(condition, levels = c("IGNORANCE", ...
## Models:
## fit.pres: correct_resp ~ condition + age_zscored + (1 | PID) + (1 | target_object_code)
## fit.pres_int: correct_resp ~ condition * age_zscored + (1 | PID) + (1 | target_object_code)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## fit.pres 6 1967.8 1999.7 -977.92 1955.8
## fit.pres_int 8 1970.2 2012.6 -977.10 1954.2 1.6369 2 0.4411
There is no effect of ToM in a regression
fit.tom <- glmer(correct_resp ~ tom_zscored + age_zscored + (1|PID) + (1|target_object_code),
data = df.response %>%
filter(condition == "IGNORANCE") %>%
filter(task == 'cswl' & block %in% c("1", "2")),
family = binomial(link = 'logit'))
summary(fit.tom)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula:
## correct_resp ~ tom_zscored + age_zscored + (1 | PID) + (1 | target_object_code)
## Data: df.response %>% filter(condition == "IGNORANCE") %>% filter(task ==
## "cswl" & block %in% c("1", "2"))
##
## AIC BIC logLik deviance df.resid
## 657.2 678.2 -323.6 647.2 491
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.0272 -0.8053 0.4051 0.8153 1.9605
##
## Random effects:
## Groups Name Variance Std.Dev.
## PID (Intercept) 0.6995 0.8364
## target_object_code (Intercept) 0.1367 0.3697
## Number of obs: 496, groups: PID, 31; target_object_code, 12
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.07335 0.21119 0.347 0.728
## tom_zscored 0.13447 0.19312 0.696 0.486
## age_zscored 0.15543 0.19218 0.809 0.419
##
## Correlation of Fixed Effects:
## (Intr) tm_zsc
## tom_zscored -0.108
## age_zscored 0.086 -0.390
Anova(fit.tom, type = 3)
## Analysis of Deviance Table (Type III Wald chisquare tests)
##
## Response: correct_resp
## Chisq Df Pr(>Chisq)
## (Intercept) 0.1206 1 0.7283
## tom_zscored 0.4848 1 0.4863
## age_zscored 0.6541 1 0.4186
Most children score 6 or more on the scale.
df.tom <- df.response %>%
filter(!is.na(total_tom_score)) %>%
select("PID", "age_years", "age_years_cont", contains("tom")) %>%
unique()
ggplot(df.tom,
aes(x = total_tom_score)) +
geom_histogram(bins = 7)
ggplot(df.tom,
aes(x = age_years_cont,
y = total_tom_score)) +
geom_smooth() +
geom_point() +
ylim(c(1, 7))
## `geom_smooth()` using method = 'loess' and formula = 'y ~ x'
Looking at each task by age, it seems like 5yos’ performance in the
diverse_belief and diverse_desire tasks (the easiest ones, based on
literature) are already not at ceiling. So we should be able to get some
variation in younger kids just by limiting to the first 5 items in the
scale.
df.tom_by_task <- df.tom %>%
pivot_longer(cols = starts_with("tom_") & !ends_with("zscored"),
names_to = "tom_task",
names_pattern = "tom_(.*)",
values_to = "tom_score") %>%
mutate(tom_task = factor(tom_task,
levels = c('diverse_desire', 'diverse_belief',
'knowledge_access',
'contents_falsebelief', 'explicit_falsebelief',
'belief_emotion', 'real_apparent_emotion')))
ggplot(df.tom_by_task,
aes(x = tom_task,
y = tom_score,
fill = as.factor(age_years))) +
stat_summary(fun = "mean",
geom = "bar",
position = position_dodge(0.9)) +
stat_summary(fun.data = "mean_cl_boot",
geom = "pointrange",
position = position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))
session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.4.1 (2024-06-14)
## os macOS Sonoma 14.5
## system aarch64, darwin20
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz America/Los_Angeles
## date 2025-08-08
## pandoc 3.7.0.2 @ /opt/homebrew/bin/ (via rmarkdown)
## quarto 1.5.57 @ /usr/local/bin/quarto
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date (UTC) lib source
## abind 1.4-5 2016-07-21 [1] CRAN (R 4.4.0)
## backports 1.5.0 2024-05-23 [1] CRAN (R 4.4.0)
## base64enc 0.1-3 2015-07-28 [1] CRAN (R 4.4.0)
## boot 1.3-30 2024-02-26 [1] CRAN (R 4.4.1)
## bslib 0.9.0 2025-01-30 [1] CRAN (R 4.4.1)
## cachem 1.1.0 2024-05-16 [1] CRAN (R 4.4.0)
## car * 3.1-2 2023-03-30 [1] CRAN (R 4.4.0)
## carData * 3.0-5 2022-01-06 [1] CRAN (R 4.4.0)
## checkmate 2.3.2 2024-07-29 [1] CRAN (R 4.4.0)
## cli 3.6.5 2025-04-23 [1] CRAN (R 4.4.1)
## cluster 2.1.6 2023-12-01 [1] CRAN (R 4.4.1)
## coda 0.19-4.1 2024-01-31 [1] CRAN (R 4.4.0)
## colorspace 2.1-1 2024-07-26 [1] CRAN (R 4.4.0)
## data.table 1.17.2 2025-05-12 [1] CRAN (R 4.4.1)
## digest 0.6.37 2024-08-19 [1] CRAN (R 4.4.1)
## dplyr * 1.1.4 2023-11-17 [1] CRAN (R 4.4.0)
## emmeans * 1.10.3 2024-07-01 [1] CRAN (R 4.4.0)
## estimability 1.5.1 2024-05-12 [1] CRAN (R 4.4.0)
## evaluate 1.0.3 2025-01-10 [1] CRAN (R 4.4.1)
## farver 2.1.2 2024-05-13 [1] CRAN (R 4.4.0)
## fastmap 1.2.0 2024-05-15 [1] CRAN (R 4.4.0)
## forcats * 1.0.0 2023-01-29 [1] CRAN (R 4.4.0)
## foreign 0.8-86 2023-11-28 [1] CRAN (R 4.4.1)
## Formula 1.2-5 2023-02-24 [1] CRAN (R 4.4.0)
## generics 0.1.4 2025-05-09 [1] CRAN (R 4.4.1)
## ggplot2 * 3.5.2 2025-04-09 [1] CRAN (R 4.4.1)
## glue 1.8.0 2024-09-30 [1] CRAN (R 4.4.1)
## gridExtra 2.3 2017-09-09 [1] CRAN (R 4.4.0)
## gtable 0.3.6 2024-10-25 [1] CRAN (R 4.4.1)
## Hmisc 5.2-3 2025-03-16 [1] CRAN (R 4.4.1)
## hms 1.1.3 2023-03-21 [1] CRAN (R 4.4.0)
## htmlTable 2.4.3 2024-07-21 [1] CRAN (R 4.4.0)
## htmltools 0.5.8.1 2024-04-04 [1] CRAN (R 4.4.0)
## htmlwidgets 1.6.4 2023-12-06 [1] CRAN (R 4.4.0)
## jquerylib 0.1.4 2021-04-26 [1] CRAN (R 4.4.0)
## jsonlite 2.0.0 2025-03-27 [1] CRAN (R 4.4.1)
## knitr 1.50 2025-03-16 [1] CRAN (R 4.4.1)
## labeling 0.4.3 2023-08-29 [1] CRAN (R 4.4.0)
## lattice 0.22-6 2024-03-20 [1] CRAN (R 4.4.1)
## lifecycle 1.0.4 2023-11-07 [1] CRAN (R 4.4.0)
## lme4 * 1.1-35.5 2024-07-03 [1] CRAN (R 4.4.0)
## lubridate * 1.9.3 2023-09-27 [1] CRAN (R 4.4.0)
## magrittr 2.0.3 2022-03-30 [1] CRAN (R 4.4.0)
## MASS 7.3-60.2 2024-04-26 [1] CRAN (R 4.4.1)
## Matrix * 1.7-0 2024-04-26 [1] CRAN (R 4.4.1)
## mgcv 1.9-1 2023-12-21 [1] CRAN (R 4.4.1)
## minqa 1.2.7 2024-05-20 [1] CRAN (R 4.4.0)
## mvtnorm 1.2-5 2024-05-21 [1] CRAN (R 4.4.0)
## nlme 3.1-164 2023-11-27 [1] CRAN (R 4.4.1)
## nloptr 2.1.1 2024-06-25 [1] CRAN (R 4.4.0)
## nnet 7.3-19 2023-05-03 [1] CRAN (R 4.4.1)
## pillar 1.10.2 2025-04-05 [1] CRAN (R 4.4.1)
## pkgconfig 2.0.3 2019-09-22 [1] CRAN (R 4.4.0)
## purrr * 1.0.2 2023-08-10 [1] CRAN (R 4.4.0)
## R6 2.6.1 2025-02-15 [1] CRAN (R 4.4.1)
## RColorBrewer 1.1-3 2022-04-03 [1] CRAN (R 4.4.0)
## Rcpp 1.0.13 2024-07-17 [1] CRAN (R 4.4.0)
## readr * 2.1.5 2024-01-10 [1] CRAN (R 4.4.0)
## rlang 1.1.6 2025-04-11 [1] CRAN (R 4.4.1)
## rmarkdown 2.29 2024-11-04 [1] CRAN (R 4.4.1)
## rpart 4.1.23 2023-12-05 [1] CRAN (R 4.4.1)
## rstudioapi 0.17.1 2024-10-22 [1] CRAN (R 4.4.1)
## sass 0.4.10 2025-04-11 [1] CRAN (R 4.4.1)
## scales 1.4.0 2025-04-24 [1] CRAN (R 4.4.1)
## sessioninfo * 1.2.3 2025-02-05 [1] CRAN (R 4.4.1)
## stringi 1.8.7 2025-03-27 [1] CRAN (R 4.4.1)
## stringr * 1.5.1 2023-11-14 [1] CRAN (R 4.4.0)
## tibble * 3.2.1 2023-03-20 [1] CRAN (R 4.4.0)
## tidyr * 1.3.1 2024-01-24 [1] CRAN (R 4.4.0)
## tidyselect 1.2.1 2024-03-11 [1] CRAN (R 4.4.0)
## tidyverse * 2.0.0 2023-02-22 [1] CRAN (R 4.4.0)
## timechange 0.3.0 2024-01-18 [1] CRAN (R 4.4.0)
## tzdb 0.4.0 2023-05-12 [1] CRAN (R 4.4.0)
## vctrs 0.6.5 2023-12-01 [1] CRAN (R 4.4.0)
## withr 3.0.2 2024-10-28 [1] CRAN (R 4.4.1)
## xfun 0.52 2025-04-02 [1] CRAN (R 4.4.1)
## xtable 1.8-4 2019-04-21 [1] CRAN (R 4.4.0)
## yaml 2.3.10 2024-07-26 [1] CRAN (R 4.4.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library
## * ── Packages attached to the search path.
##
## ──────────────────────────────────────────────────────────────────────────────