library(tidyverse)
library(janitor)
library(cowplot)
library(here)
library(readxl)
library(Matrix)
library(lme4)
library(lmerTest)
library(TOSTER)
library(wesanderson)
library(gghalves)
library(car)
library(multilevelTools)
library(JWileymisc)
library(broom.mixed)
library(papaja)
library(effectsize)
library(ggimage)
library(sessioninfo)
library(broom)
library(performance)
#devtools::install_github("jmgirard/agreement")
source("compute_iccs.R")
source("common.R")
theme_set(theme_cowplot())
knitr::opts_chunk$set(cache = FALSE, warn = FALSE,warning=FALSE, message = FALSE)
#set paths
data_file_path <- here::here("..","..","data","processed_data","CATegories_exp2_processed_data_with_exclusion_info.csv")
useable_trial_summary_path <- here::here("..","..","data","processed_data","CATegories_exp2_useable_trial_summary.csv")
summarize_subj_trials_path <- here::here("..","..","data","processed_data","CATegories_exp2_trial_summary_data.csv")
participant_data_path <- here::here("..","..","data","processed_data","CATegories_exp2_processed_participant_data_anonymized.csv")
resampled_data_file_path <- here::here("..","..","data","processed_data","CATegories_exp2_processed_data_resampled.csv")
#load data
d <- read_csv(data_file_path)
participant_total_useable_trials <- read_csv(useable_trial_summary_path)
summarize_subj_trials <- read_csv(summarize_subj_trials_path)
participant_data <- read_csv(participant_data_path)
d_resampled <- read_csv(resampled_data_file_path) #note that this file gets created in cluster_permutation_analysis.Rmd
In order for a trial to be included, participants must contribute at least 50% looking during the windows of interest when computing baseline-corrected proportion target looking: the critical window (300 ms - 2800 ms relative to target word onset) and the baseline window (-2000 ms - 0 ms relative to target word onset). See 2_process_exclusions.Rmd for detailed processing steps.
Overall, among the trials contributed by 141 participants (excluding 2 participants whose data was filtered out earlier in the process, because they either did not provide any useable trials or due to parent interference throughout the session), 81.1% of trials contained sufficient looking to meet our trial-level inclusion criteria (at least 50% looking during both the baseline window and the critical window). Overall, after additional trial-level exclusions (due technical errors or frame rate issues), 78.3% were retained. 84 of the 141 participants contributed valid data on at least half of the experimental trials (for all participants: M = 25.9; only for included participants: M = 36.7).
participant_data_summarized <- participant_data %>%
summarize(
N=length(unique(sub_num)),
num_sessions = sum(session<3),
session_1_N = sum(session==1),
session_2_N = sum(session==2)
)
participant_data_summarized %>%
knitr::kable()
| N | num_sessions | session_1_N | session_2_N |
|---|---|---|---|
| 143 | 255 | 143 | 112 |
participant_data_summarized <- participant_data %>%
distinct(sub_num,session_1_data,session_2_data) %>%
left_join(participant_total_useable_trials)
#participants without session 2 data who were subsequently excluded
sum(filter(participant_data_summarized,session_2_data=="N")$exclude_participant)
## [1] 27
#participants with session 2 data who were subsequently excluded
sum(filter(participant_data_summarized,session_2_data=="Y")$exclude_participant)
## [1] 32
#summarize subj info
subj_info_multisession <- d %>%
distinct(sub_num, age,age_mo,child_gender,trial_order) %>%
mutate(
age_mo_c = age_mo - mean(age_mo),
age_c = age - mean(age)
)
subj_info <- d %>%
distinct(sub_num,child_gender) %>%
summarize(
N = n(),
N_female = sum(child_gender=="f")
)
overall_subj_info <- subj_info_multisession %>%
summarize(
N = length(unique(sub_num)),
sessions = n(),
mean_age = mean(age_mo),
min_age = min(age),
max_age = max(age),
sd_age = sd(age_mo)
) %>%
left_join(subj_info)
overall_subj_info %>%
knitr::kable()
| N | sessions | mean_age | min_age | max_age | sd_age | N_female |
|---|---|---|---|---|---|---|
| 141 | 248 | 15.73065 | 410 | 718 | 1.613515 | 70 |
# subjects with usable trial data only
subj_info_multisession_usable_trials <- d %>%
filter(exclude_participant==0) %>%
distinct(sub_num, age,age_mo,child_gender,trial_order) %>%
mutate(
age_mo_c = age_mo - mean(age_mo),
age_c = age - mean(age)
)
subj_info_usable_trials <- d %>%
filter(exclude_participant==0) %>%
filter(useable_window==1) %>%
distinct(sub_num,child_gender) %>%
summarize(
N = n(),
N_female = sum(child_gender=="f")
)
overall_subj_info_usable_trials <- subj_info_multisession_usable_trials %>%
summarize(
N = length(unique(sub_num)),
sessions = n(),
mean_age = mean(age_mo),
mean_age_days=mean(age),
min_age = min(age),
max_age = max(age),
sd_age = sd(age_mo)
) %>%
left_join(subj_info_usable_trials)
overall_subj_info_usable_trials %>%
knitr::kable()
| N | sessions | mean_age | mean_age_days | min_age | max_age | sd_age | N_female |
|---|---|---|---|---|---|---|---|
| 84 | 164 | 15.70122 | 477.8963 | 410 | 574 | 1.490131 | 51 |
Next, we summarize demographic information for the final sample (removing participant exclusions).
demographics_summary <- d %>%
filter(exclude_participant == 0) %>%
distinct(sub_num,demographic_us_race_ethnicity_identification,demographic_education_level,demographic_annual_income,demographic_country,demographic_state,demographic_density)
multiple_categories_list <- c("White, Middle Eastern or North African","White, Hispanic, Latino, or Spanish origin","White, Black or African American, Middle Eastern or North African","White, Black or African American","White, Asian, Middle Eastern or North African","White, Asian","Hispanic, Latino, or Spanish origin, Black or African American","Hispanic, Latino, or Spanish origin, Asian","Black or African American, Asian","Asian, Middle Eastern or North African")
race_ethnicity <- demographics_summary %>%
group_by(demographic_us_race_ethnicity_identification)%>%
summarize(
N = n()
) %>%
ungroup() %>%
mutate(percent=N/sum(N),
multiple_categories=ifelse(demographic_us_race_ethnicity_identification %in% multiple_categories_list,1,0))
#percent reporting multiple categories
sum(filter(race_ethnicity,multiple_categories==1)$N)/sum(race_ethnicity$N)
## [1] 0.2261905
#quick visualization
ggplot(demographics_summary,aes(y=demographic_us_race_ethnicity_identification)) +
geom_bar()
income <- demographics_summary %>%
group_by(demographic_annual_income)%>%
summarize(
N = n()
)
income %>%
knitr::kable()
| demographic_annual_income | N |
|---|---|
| 100000 | 6 |
| 110000 | 1 |
| 120000 | 5 |
| 130000 | 4 |
| 140000 | 1 |
| 15000 | 1 |
| 150000 | 11 |
| 160000 | 1 |
| 170000 | 1 |
| 180000 | 1 |
| 190000 | 1 |
| 20000 | 1 |
| 30000 | 1 |
| 40000 | 4 |
| 50000 | 7 |
| 60000 | 5 |
| 70000 | 4 |
| 80000 | 5 |
| 90000 | 3 |
| >200000 | 19 |
| NA | 2 |
#quick visualiztion
ggplot(demographics_summary,aes(demographic_annual_income)) +
geom_bar()+
theme(axis.text.x = element_text(angle = 90))
education <- demographics_summary %>%
group_by(demographic_education_level)%>%
summarize(
N = n()
)
education %>%
knitr::kable()
| demographic_education_level | N |
|---|---|
| assoc | 1 |
| bach | 31 |
| col | 2 |
| grad | 3 |
| hs | 1 |
| prof | 46 |
states <- demographics_summary %>%
group_by(demographic_state)%>%
summarize(
N = n()
)
ggplot(demographics_summary,aes(demographic_state)) +
geom_bar()+
theme(axis.text.x = element_text(angle = 90))
population_density <- demographics_summary %>%
group_by(demographic_density) %>%
summarize(
N = n()
)
population_density %>%
knitr::kable()
| demographic_density | N |
|---|---|
| rural | 4 |
| suburban | 57 |
| urban | 22 |
| NA | 1 |
Here, we summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking.
# save key columns
key_cols_summarized_trial_data <- c(
"sub_num","session", "age","age_mo","days_between_sessions", "child_gender", "trial_order","trial_number","condition","target_image","distractor_image","target_category","distractor_category","target_typicality_z","distractor_typicality_z","target_parent_typicality_rating","distractor_parent_typicality_rating","target_parent_typicality_rating_z","distractor_parent_typicality_rating_z","target_parent_typicality_by_category_z","distractor_parent_typicality_by_category_z","mean_target_looking_critical","mean_target_looking_baseline","corrected_target_looking","exclude_participant","age_exclusion","trial_exclusion","trial_exclusion_reason","exclude_technical_issue","exclude_frame_rate","useable_window","useable_critical_window","useable_baseline_window","useable_window_short","total_trials_short","exclude_participant_insufficient_data_short","mean_target_looking_critical_short","corrected_target_looking_short")
#extract summarized trial-level accuracy (see 2_process_exclusions.Rmd for details on how summarized columns are computed)
trial_corrected_accuracy_all <- d %>%
select(all_of(key_cols_summarized_trial_data)) %>%
distinct()
trial_corrected_accuracy <- trial_corrected_accuracy_all %>%
filter(exclude_participant==0) %>%
filter(trial_exclusion==0)
# summarize average accuracy
avg_corrected_target_looking <- trial_corrected_accuracy %>%
group_by(sub_num) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking <- avg_corrected_target_looking %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking %>%
knitr::kable()
| N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|
| 84 | 0.072012 | 0.0173402 | 0.0546718 | 0.0893522 |
We summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking, split by typicality condition.
# summarize average accuracy within participant
avg_corrected_target_looking_by_typicality <- trial_corrected_accuracy %>%
group_by(sub_num, condition) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality <- avg_corrected_target_looking_by_typicality %>%
group_by(condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality %>%
knitr::kable()
| condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 84 | 0.0650543 | 0.0200254 | 0.0450288 | 0.0850797 |
| typical | 84 | 0.0804637 | 0.0221078 | 0.0583559 | 0.1025714 |
We summarize each participants’ average accuracy during the critical window and average baseline-corrected proportion target looking, split by typicality condition and target label.
# summarize average accuracy within participant
avg_corrected_target_looking_by_category <- trial_corrected_accuracy %>%
group_by(sub_num, condition,target_category) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
To investigate whether there was an effect of typicality on infants’ word recognition, we fit a linear mixed-effects model predicting average baseline-corrected proportion target looking from typicality condition (centered), including a by-participant random intercept. No random slope was included because the number of observations in the model would otherwise be equal to the number of random effects - however, a model with (overriding the lmer error message) or without typicality condition as a random slope yields identical results.
avg_corrected_target_looking_by_typicality <- avg_corrected_target_looking_by_typicality %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1+typicality_condition_c|sub_num),data=avg_corrected_target_looking_by_typicality,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
#Note that we ignore the warning here that number of observations is equal to the number of random effects, which lmer dislikes due to making it impossible to separate error at different hierarchical levels
#however, the model fit (at the fixed level) is for all intents and purposes identical for the model retaining the random slope for typicality and one removing the random slope for typicality
#(see commented-out model below)
#we therefore proceed with the (numerically identical) model with the random slope retained, for consistency with the Stage 1 plan.
#This same rationale applies to subsequent participant-level models we fit below.
#m_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1|sub_num),data=avg_corrected_target_looking_by_typicality)
summary(m_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +
## (1 + typicality_condition_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -305
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.19855 -0.40450 0.06351 0.40240 1.93330
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004316 0.06570
## typicality_condition_c 0.004443 0.06665 0.21
## Residual 0.004020 0.06340
## Number of obs: 168, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.072759 0.008678 83.000021 8.384 1.11e-12 ***
## typicality_condition_c 0.015409 0.012190 82.999873 1.264 0.21
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.105
## optimizer (nloptwrap) convergence code: 0 (OK)
## Model is nearly unidentifiable: large eigenvalue ratio
## - Rescale variables?
confint(m_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.055750155 0.08976778
## typicality_condition_c -0.008482829 0.03930159
Infants successfully recognized the target words (Model Intercept: \(\hat{\beta} = 0.07\), 95% CI \([0.06, 0.09]\), \(t(83) = 8.38\), \(p < .001\)).
Next, we plot the overall average base-line corrected proportion target looking for each condition (in black). Individual points represent individual subjects, lines link subject responses between conditions. Error bars represent 95% CIs.
#Overall baseline-corrected proportion target looking by condition
pal <- wes_palette("Rushmore1", n=5)
set.seed(1)
jitterer <- position_jitter(width = .05,seed=1)
overall_typicality_plot <- ggplot(avg_corrected_target_looking_by_typicality,aes(x=condition,y=average_corrected_target_looking, fill=condition))+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality, condition=="atypical"),position = position_nudge(x = -.1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="l")+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality, condition=="typical"),position = position_nudge(x = .1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="r")+
geom_path(aes(group=sub_num),color="black",fill=NA,alpha=0.15,size=0.75,position=jitterer)+
geom_point(aes(color=condition,group=sub_num), size = 2.5, alpha=0.15,position=jitterer)+
geom_point(data=overall_corrected_target_looking_by_typicality,aes(y=corrected_target_looking),color="black",size=5)+
geom_line(data=overall_corrected_target_looking_by_typicality,aes(y=corrected_target_looking,group=1),color="black",size=3)+
geom_errorbar(data=overall_corrected_target_looking_by_typicality,aes(y=corrected_target_looking,ymin=lower_ci,ymax=upper_ci),width=0,size=1.2,color="black")+
#geom_boxplot(outlier.shape = NA, alpha = .5, width = .1, colour = "black")+
#scale_colour_brewer(palette = "Dark2")+
#scale_fill_brewer(palette = "Dark2")+
geom_hline(yintercept=0,linetype="dashed")+
scale_colour_manual(values=pal[c(3,4)])+
scale_fill_manual(values=pal[c(3,4)])+
theme(legend.position="none")+
xlab("Typicality Condition")+
ylab("Baseline-Corrected\nProportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=14),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"))
overall_typicality_plot
ggsave(here::here("..","figures","baseline_corrected_accuracy_overall.png"),width=7,height=6,bg = "white")
overall_condition_summary <- avg_corrected_target_looking_by_typicality %>%
ungroup() %>%
group_by(sub_num) %>%
summarize(
condition_diff = average_corrected_target_looking[condition=="typical"]-average_corrected_target_looking[condition=="atypical"]
) %>%
ungroup() %>%
summarize(
N=n(),
diff = mean(condition_diff),
sd = sd(condition_diff)
)
tost_results <- tsum_TOST(m1=overall_condition_summary$diff,sd1=overall_condition_summary$sd,n1=overall_condition_summary$N,eqb=0.25, eqbound_type = "SMD")
#quick sanity check
#t-test in tost is equivalent to regular old paired t-test
# AND equivalent to lmer estimate of typicality effect
t.test(
avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="typical"],
avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="atypical"],
paired=T)
#effect size
typicality_effect_cohens_d <- cohens_d(avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="typical"],
avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="atypical"],
paired=T)
There was no significant effect of typicality, \(\hat{\beta} = 0.02\), 95% CI \([-0.01, 0.04]\), \(t(83.00) = 1.26\), \(p = .210\), Cohen’s d=0.14, 95% CI [-0.08,0.35]. The equivalence test was non-significant, t(83) = -1.027, p = 1.54e-01. We therefore could not reject the null hypothesis that the absolute effect size was at least as large as d=0.25.
## Typical word recognition
# recentering the model on the typical condition to make the intercept interpretable
m_1_1_3_typ <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1+ typicality_condition_typ|sub_num),data=avg_corrected_target_looking_by_typicality,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_typ)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +
## (1 + typicality_condition_typ | sub_num)
## Data: avg_corrected_target_looking_by_typicality
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -305
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.49953 -0.45988 0.07221 0.45749 2.19800
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.005182 0.07199
## typicality_condition_typ 0.002091 0.04573 0.60
## Residual 0.005196 0.07208
## Number of obs: 168, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.08046 0.01112 82.99955 7.239 2.08e-10 ***
## typicality_condition_typ 0.01541 0.01219 83.00059 1.264 0.21
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.630
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_typ,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.058678143 0.10224917
## typicality_condition_typ -0.008482714 0.03930147
#effect size
typical_cohens_d <- cohens_d(avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="typical"])
## Atypical word recognition
# recentering the model on the atypical condition to make the intercept interpretable
m_1_1_3_atyp <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1+ typicality_condition_atyp|sub_num),data=avg_corrected_target_looking_by_typicality,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_atyp)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +
## (1 + typicality_condition_atyp | sub_num)
## Data: avg_corrected_target_looking_by_typicality
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -305
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.35170 -0.43268 0.06794 0.43044 2.06798
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003916 0.06258
## typicality_condition_atyp 0.003284 0.05730 -0.20
## Residual 0.004599 0.06782
## Number of obs: 168, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.06505 0.01007 83.00005 6.461 6.69e-09 ***
## typicality_condition_atyp 0.01541 0.01219 82.99930 1.264 0.21
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.515
confint(m_1_1_3_atyp,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.045320783 0.08478777
## typicality_condition_atyp -0.008482889 0.03930165
#effect size
atypical_cohens_d <-cohens_d(avg_corrected_target_looking_by_typicality$average_corrected_target_looking[avg_corrected_target_looking_by_typicality$condition=="atypical"])
Infants robustly recognized the target words for both typical (Model: \(\hat{\beta} = 0.08\), 95% CI \([0.06, 0.10]\), \(t(83.00) = 7.24\), \(p < .001\); Cohen’s d = 0.79, 95% CI [0.54,1.03]; Mean baseline-corrected looking: M=8%, 95% CI [5.8%, 10.3%]) and atypical exemplars (Model: \(\hat{\beta} = 0.07\), 95% CI \([0.05, 0.08]\), \(t(83) = 6.46\), \(p < .001\); Cohen’s d = 0.7, 95% CI [0.46,0.94]; Mean baseline-corrected looking: M=6.5%, 95% CI [4.5%, 8.5%]).
trial_corrected_accuracy <- trial_corrected_accuracy %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
#model with typicality condition yields a singular fit, so we removed the random slope
# however, the singular model still yields relatively identical results to the model
# without the typicality condition random slope
# m_1_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c +
# (1 + typicality_condition_c|sub_num) +
# (1|target_category),
# data=trial_corrected_accuracy)
m_1_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c +
(1 | sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2262.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.15492 -0.63130 -0.02562 0.65630 2.78234
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0031427 0.05606
## target_category (Intercept) 0.0002028 0.01424
## Residual 0.1190987 0.34511
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.366e-02 1.129e-02 5.932e+00 6.524 0.000648 ***
## typicality_condition_c 1.147e-02 1.244e-02 3.014e+03 0.922 0.356604
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.001
confint(m_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.05152909 0.09578319
## typicality_condition_c -0.01291181 0.03585025
#Centering on typical condition
m_1_2_typ <- lmer(corrected_target_looking ~ 1 + typicality_condition_typ +
(1 |sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_1_2_typ)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_typ + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2262.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.15492 -0.63130 -0.02562 0.65630 2.78234
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0031427 0.05606
## target_category (Intercept) 0.0002028 0.01424
## Residual 0.1190987 0.34511
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.939e-02 1.290e-02 1.010e+01 6.156 0.000103 ***
## typicality_condition_typ 1.147e-02 1.244e-02 3.014e+03 0.922 0.356604
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.483
confint(m_1_2_typ,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.05411442 0.10466709
## typicality_condition_typ -0.01291181 0.03585025
#Centering on atypical condition
m_1_2_atyp <- lmer(corrected_target_looking ~ 1 + typicality_condition_atyp +
(1 |sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_1_2_atyp)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_atyp + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2262.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.15492 -0.63130 -0.02562 0.65630 2.78234
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0031427 0.05606
## target_category (Intercept) 0.0002028 0.01424
## Residual 0.1190987 0.34511
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.792e-02 1.288e-02 1.006e+01 5.272 0.000355 ***
## typicality_condition_atyp 1.147e-02 1.244e-02 3.014e+03 0.922 0.356604
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.482
confint(m_1_2_atyp,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.04267204 0.09317103
## typicality_condition_atyp -0.01291181 0.03585025
The model with the maximal random effects structure yielded a singular fit that was only remedied by removing the by-participant random slope for typicality condition. However, the (singular) model including the typicality random slope yielded virtually identical results to the converging model including random intercepts for participant and target word only. As in the average participant-level analysis, infants’ overall recognition of target words was significant in the trial-level model (\(\hat{\beta} = 0.07\), 95% CI \([0.05, 0.10]\), \(t(5.93) = 6.52\), \(p < .001\)) and there was no significant effect of typicality (\(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.04]\), \(t(3014.08) = 0.92\), \(p = .357\)). Word recognition was robust both for typical (\(\hat{\beta} = 0.08\), 95% CI \([0.05, 0.10]\), \(t(10.10) = 6.16\), \(p < .001\)) and atypical exemplars (\(\hat{\beta} = 0.07\), 95% CI \([0.04, 0.09]\), \(t(10.06) = 5.27\), \(p < .001\)).
The cluster-based permutation analysis is executed in cluster_permutation_analysis.Rmd.
In the cluster-based permutation analyses, we found one cluster of adjacent time bins ranging from 0-200ms with |t|>2 (in the direction of higher accuracy for typical exemplars compared to atypical exemplars). However, this cluster did not reach significance in the permutation test, p=.41.
Next, we plot the data. First we summarize the data in two steps: (1) summarize the data by subject for each time point, followed by (2) averaging looking for each time point across subjects.
#summarizing within subject for each time point
summarize_subj <- d_resampled %>%
filter(exclude_participant==0) %>%
filter(useable_window==1) %>%
group_by(sub_num, child_gender, time_normalized_corrected) %>%
summarize(N=n(),
mean_age=mean(age),
mean_age_mo=mean(age_mo),
non_na_n = sum(!is.na(accuracy_transformed)),
mean_accuracy=mean(accuracy_transformed,na.rm=TRUE),
ci=qt(0.975, non_na_n-1)*sd(accuracy_transformed,na.rm=T)/sqrt(non_na_n),
lower_ci=mean_accuracy-ci,
upper_ci=mean_accuracy+ci) %>%
ungroup()
#summarizing across subjects for each time point
summarize_across_subj <- summarize_subj %>%
group_by(time_normalized_corrected) %>%
dplyr::summarize(n=n(),
accuracy=mean(mean_accuracy,na.rm=TRUE),
sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
se_accuracy=sd_accuracy/sqrt(n))
ggplot(summarize_across_subj,aes(time_normalized_corrected,accuracy))+
xlim(-2000,4000)+
geom_smooth(method="gam")+
geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
geom_point()+
geom_vline(xintercept=0,size=1.5)+
geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
geom_vline(xintercept=300,linetype="dotted")+
ylim(0.35,0.65)+
xlab("Time (normalized to target word onset) in ms")+
ylab("Proportion Target Looking")
ggsave(here::here("..","figures","overall_accuracy.png"),bg = "white")
summarize_across_subj_by_age <- summarize_subj %>%
mutate(age_group=cut_number(mean_age_mo,n=4)) %>%
group_by(age_group,time_normalized_corrected) %>%
dplyr::summarize(n=n(),
accuracy=mean(mean_accuracy,na.rm=TRUE),
sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
se_accuracy=sd_accuracy/sqrt(n))
ggplot(summarize_across_subj_by_age,aes(time_normalized_corrected,accuracy))+
xlim(-2000,4000)+
geom_smooth(method="gam")+
geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
geom_point()+
geom_vline(xintercept=0,size=1.5)+
geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
geom_vline(xintercept=300,linetype="dotted")+
facet_wrap(~age_group)+
xlab("Time (normalized to target word onset) in ms")+
ylab("Proportion Target Looking")
ggsave(here::here("..","figures","overall_accuracy_by_age.png"),width=12, height=9,bg = "white")
summarize_subj_condition <- d_resampled %>%
filter(exclude_participant==0) %>%
filter(useable_window==1) %>%
group_by(sub_num, child_gender, condition, time_normalized_corrected) %>%
summarize(
mean_age=mean(age),
mean_age_mo=mean(age_mo),
mean_accuracy=mean(accuracy_transformed,na.rm=TRUE))
summarize_across_subj_cond <- summarize_subj_condition %>%
group_by(condition,time_normalized_corrected) %>%
summarize(n=n(),
accuracy=mean(mean_accuracy,na.rm=TRUE),
sd_accuracy=sd(mean_accuracy,na.rm=TRUE),
se_accuracy=sd_accuracy/sqrt(n))
#plot
#timecourse plot
pal <- wes_palette("Rushmore1", n=5)
timecourse_plot <- ggplot(summarize_across_subj_cond,aes(time_normalized_corrected,accuracy,color=condition))+
geom_rect(data = data.frame(xmin = 300,
xmax = 2800,
ymin = -Inf,
ymax = Inf),
aes(x=NULL, y=NULL,xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,color=NULL),
fill = "grey", alpha = 0.2)+
geom_rect(data = data.frame(xmin = -2000,
xmax = 0,
ymin = -Inf,
ymax = Inf),
aes(x=NULL, y=NULL,xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax,color=NULL),
fill = "grey", alpha = 0.2)+
geom_errorbar(aes(ymin=accuracy-se_accuracy,ymax=accuracy+se_accuracy),width=0)+
geom_point(alpha=0.5)+
geom_smooth(data=summarize_subj_condition,aes(y=mean_accuracy),method="gam")+
geom_vline(xintercept=0,size=1.5)+
geom_hline(yintercept=0.5,size=1.2,linetype="dashed")+
geom_vline(xintercept=300,linetype="dotted")+
geom_vline(xintercept=2800,linetype="dotted")+
geom_vline(xintercept=-2000,linetype="dotted")+
geom_vline(xintercept=0,linetype="dotted")+
theme(legend.position = c(0.8,0.15))+
annotate("text",label="Critical Window",x=1550,y=0.9,size=6)+
annotate("text",label="Baseline Window",x=-1000,y=0.9,size=6)+
ylim(0,1)+
#xlim(-2000,4000)+
scale_x_continuous(breaks=seq(-2000,4000,1000),limits=c(-2000,4000))+
scale_colour_manual(values=pal[c(3,4)])+
ylab("Proportion Target Looking")+
xlab("Time (centered on target word onset, in ms)")+
theme(
strip.background = element_rect(size=1, colour = "black"),
strip.text = element_text(size=16,face="bold"),
axis.title=element_text(size=20,face="bold"),
axis.text = element_text(size=14),
legend.text=element_text(size=18),
legend.title=element_text(size=18))
timecourse_plot
ggsave(here::here("..","figures","typicality_accuracy.png"),width=10,height=6,bg = "white")
#join in average age-centered variables
trial_corrected_accuracy <- trial_corrected_accuracy %>%
left_join(subj_info_multisession)
#fit main model
# the random slope of typicality causes a singular boundary fit
# all attempts to prune other random effects (including covariances) were not successful,
# so we removed the random slope for typicality.
# Note that the effects for the (singular fit) model including the random slope for typicality are
# equivalent to the model with typicality condition as a random slope.
m_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c * age_mo_c +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c * age_mo_c +
## (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2268.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1736 -0.6358 -0.0231 0.6607 2.8111
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0024581 0.04958
## target_category (Intercept) 0.0002099 0.01449
## Residual 0.1191179 0.34513
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.324e-02 1.100e-02 5.178e+00 6.657 0.00101
## typicality_condition_c 1.131e-02 1.245e-02 3.015e+03 0.909 0.36366
## age_mo_c 1.852e-02 5.567e-03 8.856e+01 3.327 0.00128
## typicality_condition_c:age_mo_c 4.366e-03 8.439e-03 3.015e+03 0.517 0.60496
##
## (Intercept) **
## typicality_condition_c
## age_mo_c **
## typicality_condition_c:age_mo_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ ag_m_c
## typclty_cn_ 0.001
## age_mo_c -0.017 0.003
## typclt__:__ 0.003 -0.035 0.002
confint(m_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.051678144 0.09480310
## typicality_condition_c -0.013086269 0.03570251
## age_mo_c 0.007611899 0.02943539
## typicality_condition_c:age_mo_c -0.012174882 0.02090698
#save interim model object
m_2_tidy <- m_2 %>%
summarize_mixed_effects_model()
In the trial-level linear mixed-effects model including age, typicality condition, and their interaction, we found a significant effect of age (\(\hat{\beta} = 0.02\), 95% CI \([0.01, 0.03]\), \(t(88.56) = 3.33\), \(p = .001\)), suggesting that word recognition accuracy increased with age overall. There was no significant interaction between age and typicality (\(\hat{\beta} = 0.00\), 95% CI \([-0.01, 0.02]\), \(t(3014.86) = 0.52\), \(p = .605\)), meaning that we found no evidence that the effect of typicality changed with age.
ggplot(avg_corrected_target_looking,aes(mean_age_mo,average_corrected_target_looking))+
geom_pointrange(aes(ymin=lower_ci,ymax=upper_ci),
position=position_jitter(width=0.1),
width=0,
size=1.5) +
geom_hline(yintercept=0,linetype="dashed")+
geom_smooth(method="lm")+
xlab("Age (in months)")+
ylab("Baseline-Corrected Proportion Target Looking")+
ylim(-0.55,0.55)+
scale_x_continuous(breaks=seq(12,18,1))
ggsave(here::here("..","figures","age_relationship_baseline_corrected_accuracy.png"),width=7,height=6,bg = "white")
pal <- wes_palette("Rushmore1", n=5)
age_by_typicality_plot <- ggplot(avg_corrected_target_looking_by_typicality,aes(mean_age_mo,average_corrected_target_looking,color=condition,group=condition))+
geom_pointrange(aes(ymin=lower_se,ymax=upper_se),
position=position_jitter(width=0.2),
width=0,
size=1) +
geom_hline(yintercept=0,linetype="dashed")+
geom_smooth(method="lm",color="black",size=1.3)+
xlab("Age (in months)")+
ylab("Baseline-Corrected\nProportion Target Looking")+
#ylim(-0.55,0.5)+
scale_colour_manual(values=pal[c(3,4)])+
scale_x_continuous(breaks=seq(12,18,1))+
facet_wrap(~condition)+
theme(
strip.background = element_rect(size=1, colour = "black"),
strip.text = element_text(size=16,face="bold"),
axis.title=element_text(size=20,face="bold"),
axis.text = element_text(size=14))+
theme(legend.position="none")
ggsave(here::here("..","figures","age_relationship_baseline_corrected_accuracy_typicality.png"),width=9,height=6,bg = "white")
age_by_typicality_plot
#combine key plots into one main figure
library(patchwork)
(overall_typicality_plot + age_by_typicality_plot) / timecourse_plot +
plot_annotation(tag_levels = 'A')+
theme(plot.tag = element_text(size = 18))
ggsave(here::here("..","figures","main_figure.png"),width=12,height=12,bg = "white")
In Aim 3, we tested whether individual differences in word recognition or typicality effects are predicted by differences in experiences with each exemplar.
#subject details for aim 3 analysis (how many participants have survey data)
aim3_subject_info <- trial_corrected_accuracy %>%
filter(!is.na(target_parent_typicality_rating_z)) %>%
ungroup()%>%
summarize(
N = length(unique(sub_num)),
mean_age = mean(age_c),
sd_age = sd(age_c)
)
aim3_subject_info%>%
knitr::kable()
| N | mean_age | sd_age |
|---|---|---|
| 74 | -0.7274035 | 45.04816 |
#fit main model
# as before, the random slope of typicality (this time parent-rated typicality) causes a singular boundary fit
# all attempts to prune other random effects (including covariances) were not successful,
# so we removed the random slope for target_parent_typicality_rating_z.
# Note that the effects for the (singular fit) model including the random slope for typicality are
# equivalent to the model with typicality as a random slope.
# In other words, the decision to omit the random slope does not substantively impact the main estimates or change the pattern of results
# m_3 <- lmer(corrected_target_looking ~ 1 + target_parent_typicality_rating_z + age_mo_c + (1+ target_parent_typicality_rating_z|sub_num) + (1|target_category), trial_corrected_accuracy)
m_3 <- lmer(corrected_target_looking ~ 1 + target_parent_typicality_rating_z + age_mo_c + (1|sub_num) + (1|target_category), trial_corrected_accuracy)
summary(m_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + target_parent_typicality_rating_z +
## age_mo_c + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2004.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.13146 -0.62555 -0.03277 0.65545 2.78542
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0026490 0.05147
## target_category (Intercept) 0.0003016 0.01737
## Residual 0.1199959 0.34640
## Number of obs: 2699, groups: sub_num, 74; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 7.355e-02 1.253e-02 5.043e+00 5.869
## target_parent_typicality_rating_z 3.840e-03 6.883e-03 2.255e+03 0.558
## age_mo_c 1.499e-02 6.070e-03 6.695e+01 2.469
## Pr(>|t|)
## (Intercept) 0.00198 **
## target_parent_typicality_rating_z 0.57703
## age_mo_c 0.01611 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) tr____
## trgt_prn___ 0.008
## age_mo_c 0.027 0.013
confint(m_3,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.048986101 0.09810614
## target_parent_typicality_rating_z -0.009651290 0.01733032
## age_mo_c 0.003090034 0.02688578
m_3_tidy <- m_3 %>%
summarize_mixed_effects_model()
Caregiver report of exemplar typicality did not significantly predict infants’ baseline-corrected word recognition accuracy (\(\hat{\beta} = 0.00\), 95% CI \([-0.01, 0.02]\), \(t(2254.94) = 0.56\), \(p = .577\)). Controlling for parent-reported typicality, age remained a significant predictor of infants’ word recognition (\(\hat{\beta} = 0.01\), 95% CI \([0.00, 0.03]\), \(t(66.95) = 2.47\), \(p = .016\)).
#quick visualization of the (non-)effect for each category
#by category
ggplot(trial_corrected_accuracy,aes(target_parent_typicality_rating_z,corrected_target_looking))+
geom_point(alpha=0.1)+
geom_smooth(method = "lm")+
facet_wrap(~target_category)
We also conducted a series of robustness analyses to probe the degree to which any results hinged on key analytic decisions.
We conducted the same main analyses as those described above using an alternative, shorter critical window of 300-1800ms (e.g., Fernald et al., 2008) (with the exception of the cluster-based permutation analysis described in analysis section 1.3).
First, we summarize participants looking behavior as before, using the new critical window.
# apply exclusions based on the new critical window
trial_corrected_accuracy_short_window <- trial_corrected_accuracy_all %>%
filter(exclude_participant_insufficient_data_short==0) %>%
#only include trials that are useable based on looking in the 300-1800ms window
filter(useable_window_short==1) %>%
#ignore trial exclusions due to "insufficient looking" based on 300-2800ms window
filter(is.na(trial_exclusion_reason) | trial_exclusion_reason=="insufficient looking")
# summarize average accuracy within participant, split by typicality
avg_corrected_target_looking_by_typicality_short_window <- trial_corrected_accuracy_short_window %>%
group_by(sub_num, condition) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking_short,na.rm=TRUE),
se=sd(corrected_target_looking_short,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking_short,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality_short_window <- avg_corrected_target_looking_by_typicality_short_window %>%
group_by(condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality_short_window %>%
knitr::kable()
| condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 82 | 0.0607253 | 0.0222173 | 0.0385080 | 0.0829426 |
| typical | 82 | 0.0805892 | 0.0227677 | 0.0578215 | 0.1033570 |
#1.1. Participant-level analysis of the typicality effect
avg_corrected_target_looking_by_typicality_short_window <- avg_corrected_target_looking_by_typicality_short_window %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_4_1_1_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1+ typicality_condition_c|sub_num),data=avg_corrected_target_looking_by_typicality_short_window,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_4_1_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +
## (1 + typicality_condition_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality_short_window
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -274.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.2088 -0.4208 0.1123 0.4960 1.5413
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003886 0.06234
## typicality_condition_c 0.005990 0.07739 0.05
## Residual 0.005097 0.07139
## Number of obs: 164, groups: sub_num, 82
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.070657 0.008858 80.999530 7.976 8.37e-12 ***
## typicality_condition_c 0.019864 0.014048 81.000333 1.414 0.161
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.025
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_4_1_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.053294920 0.08801960
## typicality_condition_c -0.007670339 0.04739821
#effect size
typicality_effect_short_window_cohens_d <- cohens_d(
filter(avg_corrected_target_looking_by_typicality_short_window,condition=="typical")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_short_window,condition=="atypical")$average_corrected_target_looking,
paired=T)
There was no significant effect of typicality in the average participant-level analysis, \(\hat{\beta} = 0.02\), 95% CI \([-0.01, 0.05]\), \(t(81.00) = 1.41\), \(p = .161\), Cohen’s d=0.1561463, 95% CI [-0.0620985,0.3734399].
# run equivalence test
overall_condition_summary_short_window <- avg_corrected_target_looking_by_typicality_short_window %>%
group_by(sub_num) %>%
summarize(
condition_diff_alternative = average_corrected_target_looking[condition=="typical"]-average_corrected_target_looking[condition=="atypical"]
) %>%
ungroup() %>%
summarize(
N=n(),
diff = mean(condition_diff_alternative),
sd = sd(condition_diff_alternative)
)
tost_results_short_window <- tsum_TOST(m1=overall_condition_summary_short_window$diff,sd1=overall_condition_summary_short_window$sd,n1=overall_condition_summary_short_window$N,eqb=0.25, eqbound_type = "SMD")
The equivalence test was non-significant, t(81) = -0.850, p = 1.99e-01, as in the main analysis.
#word recognition for typical items
m_1_1_3_typ_short_window <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1+ typicality_condition_typ|sub_num),data=avg_corrected_target_looking_by_typicality_short_window,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_typ_short_window)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +
## (1 + typicality_condition_typ | sub_num)
## Data: avg_corrected_target_looking_by_typicality_short_window
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -274.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3156 -0.4411 0.1178 0.5200 1.6158
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.005136 0.07166
## typicality_condition_typ 0.004980 0.07057 0.54
## Residual 0.005601 0.07484
## Number of obs: 164, groups: sub_num, 82
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.08059 0.01144 81.00004 7.043 5.58e-10 ***
## typicality_condition_typ 0.01986 0.01405 81.00028 1.414 0.161
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.633
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_typ_short_window,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.058161596 0.10301686
## typicality_condition_typ -0.007670371 0.04739824
#effect size
typical_cohens_d_short_window <- cohens_d(avg_corrected_target_looking_by_typicality_short_window$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_short_window$condition=="typical"])
#word recognition for atypical items
m_1_1_3_atyp_short_window <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1+ typicality_condition_atyp|sub_num),data=avg_corrected_target_looking_by_typicality_short_window,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_atyp_short_window)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +
## (1 + typicality_condition_atyp | sub_num)
## Data: avg_corrected_target_looking_by_typicality_short_window
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -274.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.2792 -0.4342 0.1159 0.5118 1.5905
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004797 0.06926
## typicality_condition_atyp 0.005329 0.07300 -0.48
## Residual 0.005427 0.07367
## Number of obs: 164, groups: sub_num, 82
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.06073 0.01117 80.99987 5.438 5.56e-07 ***
## typicality_condition_atyp 0.01986 0.01405 80.99968 1.414 0.161
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.609
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_atyp_short_window,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.038839907 0.08261068
## typicality_condition_atyp -0.007670419 0.04739829
#effect size
atypical_cohens_d_short_window <- cohens_d(avg_corrected_target_looking_by_typicality_short_window$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_short_window$condition=="atypical"])
Infants showed robust recognition of both typical (\(\hat{\beta} = 0.08\), 95% CI \([0.06, 0.10]\), \(t(81) = 7.04\), \(p < .001\); Cohen’s d = 0.7777403, 95% CI [0.5286035,1.0230632]) and atypical targets (\(\hat{\beta} = 0.06\), 95% CI \([0.04, 0.08]\), \(t(81.00) = 5.44\), \(p < .001\); Cohen’s d = 0.6005605, 95% CI [0.3636676,0.8342962]).
# 1.2.
# trial-level analysis of the typicality effect
trial_corrected_accuracy_short_window <- trial_corrected_accuracy_short_window %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_4_1_1_2 <- lmer(corrected_target_looking_short ~ 1 + typicality_condition_c +
(1 |sub_num) +
(1|target_category),
data=trial_corrected_accuracy_short_window)
summary(m_4_1_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking_short ~ 1 + typicality_condition_c +
## (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_short_window
##
## REML criterion at convergence: 3174.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.72705 -0.69103 0.01208 0.74305 2.42063
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0022511 0.04745
## target_category (Intercept) 0.0004006 0.02002
## Residual 0.1622463 0.40280
## Number of obs: 3066, groups: sub_num, 82; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.217e-02 1.346e-02 4.135e+00 5.362 0.00531 **
## typicality_condition_c 1.510e-02 1.456e-02 2.996e+03 1.037 0.29961
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.000
confint(m_4_1_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.04578524 0.09854802
## typicality_condition_c -0.01343104 0.04363985
We again found no significant effect of typicality (\(\hat{\beta} = 0.02\), 95% CI \([-0.01, 0.04]\), \(t(2996.23) = 1.04\), \(p = .300\)).
# Aim 2: Typicality by Age interaction
trial_corrected_accuracy_short_window <- trial_corrected_accuracy_short_window %>%
left_join(subj_info_multisession)
m_4_1_2 <- lmer(corrected_target_looking_short ~ 1 + typicality_condition_c * age_mo_c +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy_short_window)
summary(m_4_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking_short ~ 1 + typicality_condition_c *
## age_mo_c + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_short_window
##
## REML criterion at convergence: 3180.3
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.73503 -0.69424 0.00448 0.74206 2.45879
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0016077 0.04010
## target_category (Intercept) 0.0004124 0.02031
## Residual 0.1622557 0.40281
## Number of obs: 3066, groups: sub_num, 82; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.172e-02 1.327e-02 3.775e+00 5.403 0.00669
## typicality_condition_c 1.492e-02 1.457e-02 2.997e+03 1.024 0.30577
## age_mo_c 1.853e-02 5.831e-03 8.518e+01 3.178 0.00207
## typicality_condition_c:age_mo_c 6.154e-03 9.971e-03 2.995e+03 0.617 0.53716
##
## (Intercept) **
## typicality_condition_c
## age_mo_c **
## typicality_condition_c:age_mo_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ ag_m_c
## typclty_cn_ 0.000
## age_mo_c -0.016 0.003
## typclt__:__ 0.003 -0.033 0.000
confint(m_4_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.045700747 0.09772995
## typicality_condition_c -0.013628291 0.04346818
## age_mo_c 0.007100369 0.02995773
## typicality_condition_c:age_mo_c -0.013389289 0.02569764
Age remained a significant predictor of accuracy, \(\hat{\beta} = 0.02\), 95% CI \([0.01, 0.03]\), \(t(85.18) = 3.18\), \(p = .002\). However, we found no evidence of a typicality by age interaction (\(t(2994.91) = 0.62\), \(p = .537\)).
#Aim 3
#parent typicality ratings as a predictor
m_4_1_3 <- lmer(corrected_target_looking_short ~ 1 + target_parent_typicality_rating_z + age_mo_c + (1|sub_num) + (1|target_category), trial_corrected_accuracy_short_window)
summary(m_4_1_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## corrected_target_looking_short ~ 1 + target_parent_typicality_rating_z +
## age_mo_c + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_short_window
##
## REML criterion at convergence: 2799.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.72410 -0.69864 0.01098 0.73981 2.48807
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.001527 0.03907
## target_category (Intercept) 0.000610 0.02470
## Residual 0.162857 0.40356
## Number of obs: 2692, groups: sub_num, 72; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 7.446e-02 1.533e-02 3.602e+00 4.858
## target_parent_typicality_rating_z 9.397e-03 8.048e-03 2.389e+03 1.168
## age_mo_c 1.708e-02 6.147e-03 6.816e+01 2.778
## Pr(>|t|)
## (Intercept) 0.01076 *
## target_parent_typicality_rating_z 0.24307
## age_mo_c 0.00706 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) tr____
## trgt_prn___ 0.005
## age_mo_c 0.011 0.010
m_4_1_3_tidy <- m_4_1_3 %>%
summarize_mixed_effects_model()
Caregiver report of exemplar typicality did not significantly predict infants’ baseline-corrected word recognition accuracy (\(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.03]\), \(t(2388.88) = 1.17\), \(p = .243\)). After controlling for the effect of parental report of typicality, age remained a significant predictor of infants’ word recognition (\(\hat{\beta} = 0.02\), 95% CI \([0.01, 0.03]\), \(t(68.16) = 2.78\), \(p = .007\)).
We were unable to conduct a robustness analysis excluding words marked as unknown by caregivers due to the omission of the MCDI during data collection.
To assess the consistency of typicality effects across multiple indices of word recognition, we fit the main two models in Aim 1 (section 1.1 and 1.2 in the Analysis Plan), testing for a typicality effect at the participant level and at the trial level, using a second dependent measure: reaction time.
Reaction times are computed in 3_compute_rt.Rmd.
Here, we load in the trial-by-trial reaction time data and join it with other trial properties.
#load RT data
rt_path <- here::here("..","..","data","processed_data","CATegories_exp2_RT_by_trial.csv")
d_rt <- read_csv(rt_path)
#get some other useful metadata and combine with the RT data
d_trial_level <- d %>%
distinct(sub_num,session, trial_number,condition, age, age_mo, target_image, child_gender,target_category,exclude_technical_issue,exclude_frame_rate)
d_rt<- d_rt %>%
left_join(d_trial_level)
#participants must contribute 4 typical and 4 atypical trials to be included in analysis
d_rt_subj_summary <- d_rt %>%
#we only care about distractor-to-target shifts (not e.g. T-D, target-distractor shifts)
filter(shift_type == "D-T")%>%
#only include RTs within the critical window
filter(rt>=300) %>%
filter(rt<=2800) %>%
#only include participants from the final sample
filter(exclude_participant==0) %>%
#exclude any trials with technical issues or frame rate issues
filter(exclude_frame_rate==0) %>%
filter(exclude_technical_issue==0) %>%
group_by(sub_num, condition) %>%
summarize(
trials = n()
) %>%
pivot_wider(names_from=condition,names_prefix="useable_rt_trials_",values_from=trials) %>%
# only include participants with at least 4 reaction time trials of each type
mutate(
sufficient_rt_trials=case_when(
is.na(useable_rt_trials_typical) ~ 0,
is.na(useable_rt_trials_atypical) ~ 0,
useable_rt_trials_typical>=4 & useable_rt_trials_atypical>=4 ~ 1,
TRUE ~ 0)
)
#add exclusionary criteria to DF
d_rt <- d_rt %>%
left_join(d_rt_subj_summary)
#apply exclusions and store the final RT dataset to use in analyses
d_rt_final <- d_rt %>%
filter(shift_type == "D-T")%>%
#only include RTs within the critical window
filter(rt>=300) %>%
filter(rt<=2800) %>%
#only include participants from the final sample
filter(exclude_participant==0) %>%
#exclude any trials with technical issues or frame rate issues
filter(exclude_frame_rate==0) %>%
filter(exclude_technical_issue==0) %>%
filter(sufficient_rt_trials==1)
hist(filter(d_rt_final)$rt)
#log-transofrm rt
d_rt_final <- d_rt_final %>%
mutate(
log_rt = log(rt),
log_shift_start_rt=log(shift_start_rt)
)
The data are right skewed, which is common for RTs. We will use log transformations in the subsequent models to account for the distribution of the data.
#summarize reaction time by participant and condition
avg_subj_rt <- d_rt_final %>%
group_by(sub_num, child_gender,condition) %>%
summarize(N=n(),
average_rt=mean(rt,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(rt,na.rm=T)/sqrt(N),
lower_ci=average_rt-ci,
upper_ci=average_rt+ci,
average_log_rt=mean(log_rt,na.rm=TRUE),
log_rt_ci=qt(0.975, N-1)*sd(log_rt,na.rm=T)/sqrt(N),
lower_log_rt_ci=average_log_rt-log_rt_ci,
upper_log_rt_ci=average_log_rt+log_rt_ci) %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
)
)
#overall reaction times
overall_rt <- avg_subj_rt %>%
group_by(condition) %>%
summarize(N=n(),
avg_rt=mean(average_rt,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_rt,na.rm=T)/sqrt(N),
lower_ci=avg_rt-ci,
upper_ci=avg_rt+ci)
overall_rt %>%
knitr::kable()
| condition | N | avg_rt | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 70 | 917.8128 | 64.37932 | 853.4335 | 982.1921 |
| typical | 70 | 881.3043 | 52.31590 | 828.9884 | 933.6202 |
# participant-level model testing the typicality effect
m_4_3_1 <- lmer(average_log_rt ~ 1 + typicality_condition_c + (1|sub_num),data=avg_subj_rt)
summary(m_4_3_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_log_rt ~ 1 + typicality_condition_c + (1 | sub_num)
## Data: avg_subj_rt
##
## REML criterion at convergence: -1.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.78625 -0.55526 -0.02807 0.50575 2.55587
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.02179 0.1476
## Residual 0.03696 0.1923
## Number of obs: 140, groups: sub_num, 70
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.65438 0.02399 69.00000 277.428 <2e-16 ***
## typicality_condition_c -0.03571 0.03250 69.00000 -1.099 0.276
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.000
confint(m_4_3_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sigma NA NA
## (Intercept) 6.60737115 6.70139438
## typicality_condition_c -0.09940132 0.02798935
There was no significant effect of typicality on infants’ log reaction time on the participant level (\(\hat{\beta} = -0.04\), 95% CI \([-0.10, 0.03]\), \(t(69) = -1.10\), \(p = .276\)).
d_rt_final <- d_rt_final %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
)
)
m_4_3_2 <- lmer(log_rt ~ 1 + typicality_condition_c +
(1|sub_num) +
(1|target_category),
data=d_rt_final)
summary(m_4_3_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## log_rt ~ 1 + typicality_condition_c + (1 | sub_num) + (1 | target_category)
## Data: d_rt_final
##
## REML criterion at convergence: 1617.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.16300 -0.68130 -0.07444 0.62074 2.52892
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.018391 0.13561
## target_category (Intercept) 0.003048 0.05521
## Residual 0.256901 0.50685
## Number of obs: 1051, groups: sub_num, 70; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.64363 0.03574 4.72533 185.881 2.58e-10 ***
## typicality_condition_c -0.03107 0.03164 1010.47490 -0.982 0.326
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.004
confint(m_4_3_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 6.5735742 6.71367708
## typicality_condition_c -0.0930796 0.03093668
There was no significant effect of typicality on infants’ log reaction time on the trial level (\(\hat{\beta} = -0.03\), 95% CI \([-0.09, 0.03]\), \(t(1010.47) = -0.98\), \(p = .326\)).
Given the multi-session structure of our data collection procedure, we also tested whether analyses held individually in each test session, by repeating the analyses above while including an interaction with test session (session 1 vs. session 2).
# summarize participants' overall average accuracy by session
avg_corrected_target_looking_by_session <- trial_corrected_accuracy %>%
group_by(sub_num,session,age,age_mo,days_between_sessions) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci)
# summarize average accuracy within participant, split by session and typicality
avg_corrected_target_looking_by_typicality_session <- trial_corrected_accuracy %>%
group_by(sub_num,session,age,age_mo,days_between_sessions, condition) %>%
summarize(N=n(),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci)
avg_corrected_target_looking_by_typicality_session <- avg_corrected_target_looking_by_typicality_session %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
session_c = session - 1.5,
)
m_4_4_1 <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c*session_c + (1+session_c|sub_num),data=avg_corrected_target_looking_by_typicality_session)
summary(m_4_4_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c *
## session_c + (1 + session_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality_session
##
## REML criterion at convergence: -377
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.3492 -0.6440 0.0004 0.5732 2.8515
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004043 0.06359
## session_c 0.003318 0.05760 -0.67
## Residual 0.013545 0.11638
## Number of obs: 327, groups: sub_num, 84
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 0.070604 0.009479 78.851089 7.448
## typicality_condition_c 0.018873 0.012881 147.179841 1.465
## session_c 0.010308 0.014368 77.047592 0.717
## typicality_condition_c:session_c -0.045313 0.025762 147.179841 -1.759
## Pr(>|t|)
## (Intercept) 1.04e-10 ***
## typicality_condition_c 0.1450
## session_c 0.4753
## typicality_condition_c:session_c 0.0807 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ sssn_c
## typclty_cn_ 0.003
## session_c -0.197 -0.003
## typclty__:_ -0.003 0.020 0.003
confint(m_4_4_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.052024665 0.089183462
## typicality_condition_c -0.006372639 0.044118988
## session_c -0.017852724 0.038468200
## typicality_condition_c:session_c -0.095805028 0.005178225
Focusing on the participant-level model, there was no significant interaction between typicality and test session (\(\hat{\beta} = -0.05\), 95% CI \([-0.10, 0.01]\), \(t(147.18) = -1.76\), \(p = .081\)) and the effect of typicality remained non-significant (\(\hat{\beta} = 0.02\), 95% CI \([-0.01, 0.04]\), \(t(147.18) = 1.47\), \(p = .145\)).
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality_session <- avg_corrected_target_looking_by_typicality_session %>%
group_by(session,condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality_session %>%
knitr::kable()
| session | condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|---|
| 1 | atypical | 84 | 0.0446853 | 0.0323194 | 0.0123658 | 0.0770047 |
| 1 | typical | 83 | 0.0894682 | 0.0302022 | 0.0592660 | 0.1196704 |
| 2 | atypical | 80 | 0.0772740 | 0.0274706 | 0.0498034 | 0.1047445 |
| 2 | typical | 80 | 0.0734904 | 0.0283799 | 0.0451105 | 0.1018703 |
trial_corrected_accuracy <- trial_corrected_accuracy %>%
mutate(
session_c = session - 1.5
)
m_4_4_2 <- lmer(corrected_target_looking ~ 1 + typicality_condition_c*session_c +
(1+session_c |sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_4_4_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c * session_c +
## (1 + session_c | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2273.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1781 -0.6264 -0.0213 0.6616 2.8020
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 3.143e-03 0.0560609
## session_c 9.696e-07 0.0009847 -1.00
## target_category (Intercept) 2.014e-04 0.0141929
## Residual 1.191e-01 0.3451142
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 7.368e-02 1.128e-02 5.946e+00 6.535
## typicality_condition_c 1.183e-02 1.244e-02 3.012e+03 0.950
## session_c 2.398e-03 1.258e-02 3.073e+03 0.191
## typicality_condition_c:session_c -3.383e-02 2.487e-02 3.006e+03 -1.360
## Pr(>|t|)
## (Intercept) 0.000637 ***
## typicality_condition_c 0.341975
## session_c 0.848876
## typicality_condition_c:session_c 0.173836
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ sssn_c
## typclty_cn_ 0.001
## session_c -0.016 -0.007
## typclty__:_ -0.004 -0.022 0.004
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
confint(m_4_4_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.05158176 0.09578048
## typicality_condition_c -0.01256189 0.03621429
## session_c -0.02226126 0.02705646
## typicality_condition_c:session_c -0.08258366 0.01491515
Similarly, we found no evidence for an interaction between typicality and test session in the trial-level model (\(\hat{\beta} = -0.03\), 95% CI \([-0.08, 0.01]\), \(t(3005.87) = -1.36\), \(p = .174\)).
Images were classified as typical vs. atypical based on an adult norming study. In this analysis, we investigated whether treating typicality as a continuous metric, using the norming ratings as a predictor, would provide us more power to detect a typicality effect.
#trial-level model
#we first considered a model with a by-participant random slope for typicality, but this model yielded a singular fit. The results were qualitatively similar when including the random slope (marginal p-value for typicality)
m_5_1_1 <- lmer(corrected_target_looking ~ 1 + target_typicality_z +
(1|sub_num)+
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_1_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + target_typicality_z + (1 | sub_num) +
## (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2261.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1717 -0.6353 -0.0247 0.6642 2.7780
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0031531 0.05615
## target_category (Intercept) 0.0002215 0.01488
## Residual 0.1189918 0.34495
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.375e-02 1.150e-02 5.781e+00 6.413 0.000785 ***
## target_typicality_z 1.157e-02 6.369e-03 2.842e+03 1.816 0.069477 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## trgt_typcl_ 0.005
confint(m_5_1_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.0512077661 0.09628143
## target_typicality_z -0.0009169908 0.02404949
There was no significant effect of target typicality when analyzed continuously in a trial-level model (\(\hat{\beta} = 0.01\), 95% CI \([0.00, 0.02]\), \(t(2842.34) = 1.82\), \(p = .069\)).
We include a plot of the continuous typicality effect. Descriptively, it appears that perhaps there is a decrement in looking for items on the extreme end of typicality, and otherwise fairly similar target looking.
ggplot(trial_corrected_accuracy,aes(target_typicality_z,corrected_target_looking))+
geom_point(aes(color=condition))+
geom_hline(yintercept=0, linetype="dashed")+
geom_smooth()
m_5_1_2 <- lmer(corrected_target_looking ~ 1 + target_typicality_z * distractor_typicality_z+
(1+target_typicality_z : distractor_typicality_z|sub_num)+
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_1_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## corrected_target_looking ~ 1 + target_typicality_z * distractor_typicality_z +
## (1 + target_typicality_z:distractor_typicality_z | sub_num) +
## (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 2265.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1987 -0.6346 -0.0279 0.6631 2.8227
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0046687 0.06833
## target_typicality_z:distractor_typicality_z 0.0019488 0.04415
## target_category (Intercept) 0.0002635 0.01623
## Residual 0.1181687 0.34376
## Corr
##
## -0.57
##
##
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df
## (Intercept) 1.025e-01 1.652e-02 1.634e+01
## target_typicality_z 5.560e-03 1.284e-02 1.896e+03
## distractor_typicality_z 2.204e-03 1.282e-02 1.910e+03
## target_typicality_z:distractor_typicality_z -3.427e-02 1.359e-02 6.659e+01
## t value Pr(>|t|)
## (Intercept) 6.203 1.15e-05 ***
## target_typicality_z 0.433 0.6650
## distractor_typicality_z 0.172 0.8635
## target_typicality_z:distractor_typicality_z -2.521 0.0141 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trgt__ dstr__
## trgt_typcl_ 0.003
## dstrctr_ty_ -0.081 -0.862
## trgt_ty_:__ -0.691 0.003 0.111
confint(m_5_1_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.07011322 0.134883634
## target_typicality_z -0.01960237 0.030722700
## distractor_typicality_z -0.02292264 0.027331222
## target_typicality_z:distractor_typicality_z -0.06091430 -0.007631645
We also explored including distractor typicality (z-scored) and its interaction with target typicality in the model. We found a significant interaction between distractor and target typicality, \(\hat{\beta} = -0.03\), 95% CI \([-0.06, -0.01]\), \(t(66.59) = -2.52\), \(p = .014\). The interaction seemed to be driven primarily by there being a stronger effect of target typicality when the distractor was more atypical compared to when the distractor was typical.
#figure explaining interaction
p1 <- ggplot(trial_corrected_accuracy,aes(target_typicality_z,corrected_target_looking))+
geom_point(aes(color=condition),alpha=0.2)+
geom_hline(yintercept=0, linetype="dashed")+
geom_smooth(method="lm",color="black")+
facet_wrap(~condition,scales = "free")+
xlab("Target Typicality (z-scored adult ratings)")+
scale_color_manual(values=pal[c(3,4)])+
theme(legend.position="none")
p2 <- ggplot(trial_corrected_accuracy,aes(distractor_typicality_z,corrected_target_looking ))+
geom_point(aes(color=condition),alpha=0.2)+
geom_hline(yintercept=0, linetype="dashed")+
geom_smooth(method="lm",color="black")+
facet_wrap(~condition,scales = "free")+
scale_color_manual(values=pal[c(3,4)])+
xlab("Distractor Typicality (z-scored adult ratings)")+
theme(legend.position="none")
p1+p2
#more complex random effects structures yielded a singular fit
m_5_1_3 <- lmer(corrected_target_looking ~ target_parent_typicality_rating_z*distractor_parent_typicality_rating_z+ (1|sub_num) + (1|target_category), trial_corrected_accuracy)
summary(m_5_1_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ target_parent_typicality_rating_z *
## distractor_parent_typicality_rating_z + (1 | sub_num) + (1 |
## target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 1991.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.14869 -0.62769 -0.02533 0.65859 2.77724
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0030465 0.05519
## target_category (Intercept) 0.0002714 0.01648
## Residual 0.1193545 0.34548
## Number of obs: 2687, groups: sub_num, 73; target_category, 4
##
## Fixed effects:
## Estimate
## (Intercept) 8.120e-02
## target_parent_typicality_rating_z -1.303e-03
## distractor_parent_typicality_rating_z 8.234e-03
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z -1.491e-02
## Std. Error
## (Intercept) 1.294e-02
## target_parent_typicality_rating_z 7.764e-03
## distractor_parent_typicality_rating_z 7.660e-03
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z 8.121e-03
## df
## (Intercept) 6.446e+00
## target_parent_typicality_rating_z 1.667e+03
## distractor_parent_typicality_rating_z 2.353e+03
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z 2.554e+03
## t value
## (Intercept) 6.275
## target_parent_typicality_rating_z -0.168
## distractor_parent_typicality_rating_z 1.075
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z -1.836
## Pr(>|t|)
## (Intercept) 0.000576
## target_parent_typicality_rating_z 0.866739
## distractor_parent_typicality_rating_z 0.282509
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z 0.066508
##
## (Intercept) ***
## target_parent_typicality_rating_z
## distractor_parent_typicality_rating_z
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) tr____ ds____
## trgt_prn___ -0.005
## dstrctr____ -0.009 -0.461
## tr____:____ -0.271 0.046 0.028
confint(m_5_1_3,method="Wald")
## 2.5 %
## .sig01 NA
## .sig02 NA
## .sigma NA
## (Intercept) 0.05583514
## target_parent_typicality_rating_z -0.01652050
## distractor_parent_typicality_rating_z -0.00677946
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z -0.03082676
## 97.5 %
## .sig01 NA
## .sig02 NA
## .sigma NA
## (Intercept) 0.106559196
## target_parent_typicality_rating_z 0.013914418
## distractor_parent_typicality_rating_z 0.023248182
## target_parent_typicality_rating_z:distractor_parent_typicality_rating_z 0.001008613
We also investigated whether including caregiver-reported exemplar typicality for both target and distractor (as well as their interaction) predicted baseline-corrected proportion target looking, and found no interaction (\(\hat{\beta} = -0.01\), 95% CI \([-0.03, 0.00]\), \(t(2554.28) = -1.84\), \(p = .067\)) or evidence of significant main effects.
Next, we investigate category- (target word) and item-level (target image) variation in proportion target looking.
# summarize average accuracy within participant (by word alone)
avg_corrected_target_looking_by_word <- trial_corrected_accuracy %>%
group_by(sub_num, target_category) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
critical_window_ci = qt(0.975, N-1)*sd(mean_target_looking_critical,na.rm=T)/sqrt(N),
critical_window_lower_ci=average_critical_window_looking-critical_window_ci,
critical_window_upper_ci=average_critical_window_looking+critical_window_ci,
average_baseline_window_looking=mean(mean_target_looking_baseline,na.rm=TRUE),
baseline_window_ci = qt(0.975, N-1)*sd(mean_target_looking_baseline,na.rm=T)/sqrt(N),
baseline_window_lower_ci=average_baseline_window_looking-baseline_window_ci,
baseline_window_upper_ci=average_baseline_window_looking+baseline_window_ci)
# summarize average accuracy within participant (by word split by typicality)
avg_corrected_target_looking_by_typicality_word <- trial_corrected_accuracy %>%
group_by(sub_num, condition,target_category) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
average_baseline_window_looking=mean(mean_target_looking_baseline,na.rm=TRUE))
#overall summarized looking
#by word alone
overall_target_looking_by_word <- avg_corrected_target_looking_by_word %>%
group_by(target_category) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci,
target_looking_critical_window=mean(average_critical_window_looking,na.rm=TRUE),
ci_critical_window=qt(0.975, N-1)*sd(average_critical_window_looking,na.rm=T)/sqrt(N),
lower_ci_critical_window=target_looking_critical_window-ci_critical_window,
upper_ci_critical_window=target_looking_critical_window+ci_critical_window,
target_looking_baseline_window=mean(average_baseline_window_looking,na.rm=TRUE),
ci_baseline_window=qt(0.975, N-1)*sd(average_baseline_window_looking,na.rm=T)/sqrt(N),
lower_ci_baseline_window=target_looking_baseline_window-ci_baseline_window,
upper_ci_baseline_window=target_looking_baseline_window+ci_baseline_window
)
#by word split by typicality
overall_target_looking_by_typicality_word <- avg_corrected_target_looking_by_typicality_word %>%
group_by(condition,target_category) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci,
target_looking_critical_window=mean(average_critical_window_looking,na.rm=TRUE),
ci_critical_window=qt(0.975, N-1)*sd(average_critical_window_looking,na.rm=T)/sqrt(N),
lower_ci_critical_window=target_looking_critical_window-ci_critical_window,
upper_ci_critical_window=target_looking_critical_window+ci_critical_window,
target_looking_baseline_window=mean(average_baseline_window_looking,na.rm=TRUE),
ci_baseline_window=qt(0.975, N-1)*sd(average_baseline_window_looking,na.rm=T)/sqrt(N),
lower_ci_baseline_window=target_looking_baseline_window-ci_baseline_window,
upper_ci_baseline_window=target_looking_baseline_window+ci_baseline_window
)
Overall, participants showed robust learning of all four words, in both typicality conditions.
overall_target_looking_by_word %>%
select(target_category:upper_ci) %>%
knitr::kable()
| target_category | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| bird | 84 | 0.0819635 | 0.0299145 | 0.0520490 | 0.1118781 |
| cat | 84 | 0.0862806 | 0.0288659 | 0.0574147 | 0.1151466 |
| dog | 84 | 0.0475030 | 0.0253042 | 0.0221988 | 0.0728072 |
| fish | 84 | 0.0662666 | 0.0308036 | 0.0354630 | 0.0970702 |
overall_target_looking_by_typicality_word %>%
select(target_category:upper_ci) %>%
knitr::kable()
| condition | target_category | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|---|
| atypical | bird | 84 | 0.0911260 | 0.0432014 | 0.0479245 | 0.1343274 |
| atypical | cat | 84 | 0.0751083 | 0.0384676 | 0.0366407 | 0.1135759 |
| atypical | dog | 84 | 0.0251985 | 0.0393336 | -0.0141351 | 0.0645320 |
| atypical | fish | 84 | 0.0675099 | 0.0429791 | 0.0245308 | 0.1104890 |
| typical | bird | 84 | 0.0745592 | 0.0360839 | 0.0384753 | 0.1106431 |
| typical | cat | 84 | 0.1026123 | 0.0399858 | 0.0626265 | 0.1425981 |
| typical | dog | 84 | 0.0733940 | 0.0358556 | 0.0375384 | 0.1092495 |
| typical | fish | 84 | 0.0635989 | 0.0411800 | 0.0224190 | 0.1047789 |
#checking the typicality effect for bird
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="bird")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="bird")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "bird")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "bird")$average_corrected_target_looking
## t = -0.66186, df = 83, p-value = 0.5099
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.06635163 0.03321813
## sample estimates:
## mean difference
## -0.01656675
#checking the typicality effect for cat
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="cat")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="cat")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "cat")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "cat")$average_corrected_target_looking
## t = 1.0734, df = 83, p-value = 0.2862
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.02345916 0.07846713
## sample estimates:
## mean difference
## 0.02750398
#checking the typicality effect for dog
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="dog")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="dog")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "dog")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "dog")$average_corrected_target_looking
## t = 1.7661, df = 83, p-value = 0.08106
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.006082161 0.102473211
## sample estimates:
## mean difference
## 0.04819553
#checking the typicality effect for fish
t.test(
filter(avg_corrected_target_looking_by_typicality_word,condition=="typical"&target_category=="fish")$average_corrected_target_looking,
filter(avg_corrected_target_looking_by_typicality_word,condition=="atypical"&target_category=="fish")$average_corrected_target_looking,
paired=TRUE
)
##
## Paired t-test
##
## data: filter(avg_corrected_target_looking_by_typicality_word, condition == "typical" & target_category == "fish")$average_corrected_target_looking and filter(avg_corrected_target_looking_by_typicality_word, condition == "atypical" & target_category == "fish")$average_corrected_target_looking
## t = -0.1334, df = 83, p-value = 0.8942
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -0.06222364 0.05440177
## sample estimates:
## mean difference
## -0.003910936
pal <- wes_palette("Rushmore1", n=5)
set.seed(1)
jitterer <- position_jitter(width = .05,seed=1)
p3 <- ggplot(avg_corrected_target_looking_by_typicality_word,aes(x=condition,y=average_corrected_target_looking, fill=condition))+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality_word, condition=="atypical"),position = position_nudge(x = -.1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="l")+
geom_half_violin(data=filter(avg_corrected_target_looking_by_typicality_word, condition=="typical"),position = position_nudge(x = .1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="r")+
geom_path(aes(group=sub_num),color="black",fill=NA,alpha=0.05,size=0.75,position=jitterer)+
geom_point(aes(color=condition,group=sub_num), size = 2.5, alpha=0.05,position=jitterer)+
geom_point(data=overall_target_looking_by_typicality_word,aes(y=corrected_target_looking),color="black",size=1.8)+
geom_line(data=overall_target_looking_by_typicality_word,aes(y=corrected_target_looking,group=1),color="black",size=1.5)+
geom_errorbar(data=overall_target_looking_by_typicality_word,aes(y=corrected_target_looking,ymin=lower_ci,ymax=upper_ci),width=0,color="black")+
#geom_boxplot(outlier.shape = NA, alpha = .5, width = .1, colour = "black")+
#scale_colour_brewer(palette = "Dark2")+
#scale_fill_brewer(palette = "Dark2")+
geom_hline(yintercept=0,linetype="dashed")+
scale_colour_manual(values=pal[c(3,4)])+
scale_fill_manual(values=pal[c(3,4)])+
facet_wrap(.~target_category)+
theme(legend.position="none")+
xlab("Typicality Condition")+
ylab("Baseline-Corrected\nProportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=16),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"))
ggsave(here::here("..","figures","baseline_corrected_accuracy_by_category.png"),width=7,height=6,bg = "white")
# summarize average accuracy within participant
avg_corrected_target_looking_by_image <- trial_corrected_accuracy %>%
group_by(sub_num, condition,target_category,target_image,target_typicality_z) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
average_critical_window_looking=mean(mean_target_looking_critical,na.rm=TRUE),
average_baseline_window_looking=mean(mean_target_looking_baseline,na.rm=TRUE))
#baseline-corrected target looking summarized overall
overall_target_looking_by_image <- avg_corrected_target_looking_by_image %>%
group_by(condition,target_category,target_image,target_typicality_z) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci,
target_looking_critical_window=mean(average_critical_window_looking,na.rm=TRUE),
ci_critical_window=qt(0.975, N-1)*sd(average_critical_window_looking,na.rm=T)/sqrt(N),
lower_ci_critical_window=target_looking_critical_window-ci_critical_window,
upper_ci_critical_window=target_looking_critical_window+ci_critical_window,
target_looking_baseline_window=mean(average_baseline_window_looking,na.rm=TRUE),
ci_baseline_window=qt(0.975, N-1)*sd(average_baseline_window_looking,na.rm=T)/sqrt(N),
lower_ci_baseline_window=target_looking_baseline_window-ci_baseline_window,
upper_ci_baseline_window=target_looking_baseline_window+ci_baseline_window
) %>%
rename(target_image_name=target_image) %>%
mutate(target_image=str_replace(target_image_name,"_600x600",""),
target_image_path=here("images",paste(target_image_name,".png",sep="")))
overall_target_looking_by_image %>%
ungroup() %>%
relocate(target_image) %>%
select(-target_image_path,-target_image_name) %>%
knitr::kable()
| target_image | condition | target_category | target_typicality_z | N | corrected_target_looking | ci | lower_ci | upper_ci | target_looking_critical_window | ci_critical_window | lower_ci_critical_window | upper_ci_critical_window | target_looking_baseline_window | ci_baseline_window | lower_ci_baseline_window | upper_ci_baseline_window |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| heron | atypical | bird | -0.2912701 | 82 | 0.1336470 | 0.0687414 | 0.0649057 | 0.2023884 | 0.5533569 | 0.0556233 | 0.4977336 | 0.6089803 | 0.4197099 | 0.0407751 | 0.3789348 | 0.4604849 |
| kingfisher | atypical | bird | -0.1281589 | 79 | 0.0411643 | 0.0715193 | -0.0303550 | 0.1126836 | 0.4865531 | 0.0624677 | 0.4240854 | 0.5490208 | 0.4453888 | 0.0422521 | 0.4031367 | 0.4876409 |
| kookaburra | atypical | bird | -1.0835249 | 78 | 0.1024738 | 0.0584785 | 0.0439954 | 0.1609523 | 0.5498303 | 0.0490565 | 0.5007739 | 0.5988868 | 0.4473565 | 0.0411067 | 0.4062499 | 0.4884632 |
| cornishrex | atypical | cat | -1.1767313 | 80 | 0.0786173 | 0.0679227 | 0.0106946 | 0.1465400 | 0.5411904 | 0.0587587 | 0.4824317 | 0.5999491 | 0.4625731 | 0.0395486 | 0.4230245 | 0.5021217 |
| oriental | atypical | cat | -1.1301281 | 80 | 0.1554697 | 0.0687158 | 0.0867539 | 0.2241855 | 0.6587850 | 0.0530770 | 0.6057080 | 0.7118620 | 0.5033153 | 0.0472142 | 0.4561011 | 0.5505296 |
| sphynx | atypical | cat | -1.8058748 | 83 | 0.0042218 | 0.0681615 | -0.0639397 | 0.0723833 | 0.6178601 | 0.0498591 | 0.5680010 | 0.6677192 | 0.6136383 | 0.0431810 | 0.5704573 | 0.6568193 |
| afghanhound | atypical | dog | -1.0136201 | 79 | 0.0617888 | 0.0633279 | -0.0015390 | 0.1251167 | 0.6046304 | 0.0519757 | 0.5526547 | 0.6566061 | 0.5428416 | 0.0419047 | 0.5009369 | 0.5847462 |
| bassethound | atypical | dog | -0.5708895 | 81 | 0.0439038 | 0.0748536 | -0.0309498 | 0.1187574 | 0.6521356 | 0.0541309 | 0.5980047 | 0.7062665 | 0.6082318 | 0.0481543 | 0.5600775 | 0.6563861 |
| sheepdog | atypical | dog | -1.4563507 | 82 | -0.0222387 | 0.0621909 | -0.0844295 | 0.0399522 | 0.4799202 | 0.0506011 | 0.4293191 | 0.5305212 | 0.5021588 | 0.0420160 | 0.4601428 | 0.5441749 |
| betafish | atypical | fish | -0.5475879 | 83 | 0.0891884 | 0.0647377 | 0.0244507 | 0.1539262 | 0.5334215 | 0.0581915 | 0.4752300 | 0.5916130 | 0.4442331 | 0.0395320 | 0.4047011 | 0.4837651 |
| lionfish | atypical | fish | -0.9670168 | 80 | 0.0043154 | 0.0691845 | -0.0648691 | 0.0734998 | 0.5216443 | 0.0574138 | 0.4642305 | 0.5790581 | 0.5173290 | 0.0403106 | 0.4770184 | 0.5576396 |
| sturgeon | atypical | fish | -0.7573023 | 81 | 0.1330115 | 0.0743575 | 0.0586540 | 0.2073690 | 0.4875952 | 0.0614066 | 0.4261886 | 0.5490019 | 0.3545838 | 0.0477587 | 0.3068250 | 0.4023425 |
| cardinal | typical | bird | 0.6873975 | 82 | 0.0773277 | 0.0586860 | 0.0186416 | 0.1360137 | 0.5538145 | 0.0469000 | 0.5069145 | 0.6007144 | 0.4764868 | 0.0420795 | 0.4344073 | 0.5185663 |
| robin | typical | bird | 0.7340007 | 81 | 0.1104396 | 0.0719358 | 0.0385038 | 0.1823754 | 0.5652489 | 0.0533763 | 0.5118725 | 0.6186252 | 0.4548093 | 0.0435451 | 0.4112642 | 0.4983544 |
| sparrow | typical | bird | 1.0369217 | 83 | 0.0334298 | 0.0492813 | -0.0158515 | 0.0827112 | 0.5101554 | 0.0434339 | 0.4667215 | 0.5535893 | 0.4767255 | 0.0383011 | 0.4384245 | 0.5150266 |
| arabianmau | typical | cat | 0.8272072 | 82 | 0.0651690 | 0.0524962 | 0.0126728 | 0.1176652 | 0.5950371 | 0.0509703 | 0.5440668 | 0.6460074 | 0.5298681 | 0.0350749 | 0.4947931 | 0.5649430 |
| chartreux | typical | cat | 0.6407943 | 81 | 0.1568510 | 0.0728699 | 0.0839812 | 0.2297209 | 0.5975451 | 0.0573779 | 0.5401672 | 0.6549230 | 0.4406941 | 0.0509444 | 0.3897496 | 0.4916385 |
| tabby | typical | cat | 1.3631442 | 81 | 0.0870967 | 0.0710616 | 0.0160351 | 0.1581583 | 0.6242526 | 0.0623083 | 0.5619443 | 0.6865608 | 0.5371559 | 0.0420838 | 0.4950721 | 0.5792397 |
| beagle | typical | dog | 1.1534297 | 79 | 0.1037907 | 0.0659255 | 0.0378653 | 0.1697162 | 0.6298141 | 0.0522568 | 0.5775574 | 0.6820709 | 0.5260234 | 0.0395162 | 0.4865072 | 0.5655396 |
| germanshepherd | typical | dog | 1.1767313 | 82 | 0.0859428 | 0.0648708 | 0.0210719 | 0.1508136 | 0.5713628 | 0.0480559 | 0.5233069 | 0.6194188 | 0.4854201 | 0.0407560 | 0.4446640 | 0.5261761 |
| goldenretriever | typical | dog | 0.9670168 | 83 | 0.0325998 | 0.0576596 | -0.0250598 | 0.0902594 | 0.5494472 | 0.0464993 | 0.5029479 | 0.5959465 | 0.5168474 | 0.0412220 | 0.4756254 | 0.5580694 |
| bass | typical | fish | 0.7806040 | 80 | 0.0714446 | 0.0704467 | 0.0009979 | 0.1418913 | 0.5231518 | 0.0608834 | 0.4622683 | 0.5840352 | 0.4517072 | 0.0365771 | 0.4151300 | 0.4882843 |
| bluegill | typical | fish | 0.8039056 | 81 | 0.0705323 | 0.0644594 | 0.0060729 | 0.1349917 | 0.5662295 | 0.0534655 | 0.5127640 | 0.6196950 | 0.4956972 | 0.0432685 | 0.4524287 | 0.5389657 |
| clownfish | typical | fish | 0.7573023 | 80 | 0.0456986 | 0.0622121 | -0.0165136 | 0.1079107 | 0.5340094 | 0.0495836 | 0.4844258 | 0.5835930 | 0.4883109 | 0.0448740 | 0.4434368 | 0.5331849 |
#clean names for individual images for plot
overall_target_looking_by_image <- overall_target_looking_by_image %>%
mutate(
target_image_clean = case_when(
target_image == "oriental" ~ "Oriental cat",
target_image == "chartreux" ~ "Chartreux cat",
target_image == "betafish" ~ "Betta fish",
target_image == "tabby" ~ "Tabby cat",
target_image == "germanshepherd" ~ "German Shepherd",
target_image == "cornishrex" ~ "Cornish Rex",
target_image == "arabianmau" ~ "Arabian Mau",
target_image == "bassethound" ~ "Basset Hound",
target_image == "goldenretriever" ~ "Golden Retriever",
target_image == "sphynx" ~ "Sphynx cat",
TRUE ~ str_to_title(target_image)
)
)
ggplot(overall_target_looking_by_image,aes(reorder(target_image_clean,corrected_target_looking),corrected_target_looking))+
geom_image(aes(y=corrected_target_looking+0.1,image=target_image_path),size=.1)+
geom_hline(yintercept=0,linetype="dashed")+
geom_errorbar(aes(ymin=lower_ci,ymax=upper_ci,color=condition),width=0)+
geom_point(aes(color=condition),size=3)+
xlab("Target Image")+
ylab("Baseline-Corrected\nProportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=16,angle=90,vjust=0.5,hjust=1),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"),
legend.position=c(0.8,0.15)
)+
scale_color_manual(values=pal[c(3,4)])
ggsave(here::here("..","figures","baseline_corrected_accuracy_by_word.png"),width=9,height=6,bg = "white")
df_for_icc <- trial_corrected_accuracy %>%
#some light renaming to use Peekbank icc function
mutate(
administration_id = sub_num
) %>%
unite("trial_id", administration_id, trial_number,remove=F) %>%
mutate(
target_label=target_image
)
#ICC for participants
icc_participants <- df_for_icc %>%
get_icc(object = "administration",column="corrected_target_looking",type_icc="consistency")
icc_participants_baseline <- df_for_icc %>%
get_icc(object = "administration",column="mean_target_looking_baseline",type_icc="consistency")
icc_participants_critical <- df_for_icc %>%
get_icc(object = "administration",column="mean_target_looking_critical",type_icc="consistency")
#ICC for stimuli
icc_stimuli <- df_for_icc %>%
get_icc(object = "stimulus",column="corrected_target_looking",type_icc="consistency")
icc_stimuli_baseline <- df_for_icc %>%
get_icc(object = "stimulus",column="mean_target_looking_baseline",type_icc="consistency")
icc_stimuli_critical <- df_for_icc %>%
get_icc(object = "stimulus",column="mean_target_looking_critical",type_icc="consistency")
In S10.2.2., we further investigated the measurement reliability of infants’ word recognition (Byers-Heinlein et al., 2022) by estimating the intraclass correlation coefficient (ICC) of baseline-corrected proportion target looking across all trials, based on a mean-rating, consistency, 2-way random-effects model (model 2A). We estimated low-to-moderate ICC values for both by-participant (ICC = 0.39) and by-item (a given target image; ICC = 0.47) consistency.
Participants were required to contribute at least 24 valid trials in order to be included in the final sample. Here, we explore the impact of this criterion on whether we observe a typicality effect, by estimating the typicality effect for a range of looser and stricter trial-based exclusion criteria
min_trials_required_list = c(4,8,12,16,20,24,28,32,36,40,44,48)
# set up summarized dataset to use
subj_typ_data <- trial_corrected_accuracy_all %>%
#apply exclusion criteria to trials
filter(exclude_frame_rate==0) %>%
filter(exclude_technical_issue==0) %>%
filter(useable_window==1) %>%
filter(age_exclusion==0) %>%
group_by(sub_num,condition) %>%
summarize(
N=n(),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE)
) %>%
ungroup() %>%
group_by(sub_num) %>%
mutate(total_trials=sum(N,na.rm=TRUE)) %>%
pivot_wider(names_from=condition,values_from=c(N,average_corrected_target_looking)) %>%
ungroup()
#add and apply min trial exclusion criteria,
#creating nested dataframes for each minimum trial exclusion criterion
#then test the typicality effect within each dataset and store the results
typ_effect_min_valid_trials <- expand_grid(
subj_typ_data,
min_valid_trials_req = min_trials_required_list
) %>%
#apply min trial exclusion criterion
group_by(min_valid_trials_req) %>%
filter(total_trials>=min_valid_trials_req) %>%
mutate(
N=length(sub_num)
) %>%
group_by(min_valid_trials_req,N) %>%
#nest the data
nest() %>%
mutate(t_test = map(data, ~ t.test(.x$average_corrected_target_looking_typical, .x$average_corrected_target_looking_atypical,paired = T)),
result = map(t_test, tidy)
) %>%
unnest(result)
# typ_effect_min_valid_trials %>%
# knitr::kable()
## plot the result
ggplot(typ_effect_min_valid_trials, aes(min_valid_trials_req,estimate))+
geom_errorbar(aes(ymin=conf.low,ymax=conf.high),width=0)+
geom_point(size=3)+
geom_hline(yintercept=0,linetype="dashed")+
geom_text(aes(label=N),nudge_y = 0.07)+
ylab("Estimated Participant-Level Typicality Effect")+
xlab("Minimum Number of Trials Required for Inclusion")+
scale_x_continuous(breaks=min_trials_required_list)
ggsave(here::here("..","figures","typicality_effect_min_trials_for_inclusion.png"),width=9,height=6,bg = "white")
In our Stage 1 manuscript, we preregistered a sample of N=80. Our current results report data from all participants who contributed valid data, leading to a final N of 84 participants. Here, we remove the data from the final 4 participants who contributed data on Lookit and re-run the main analyses from Aim 1 (1.1 and 1.2), to ensure that the decision to include all participants did not change the outcome of the study.
CAT_343, CAT_344, CAT_345, and CAT_346 were the final 4 participants contributing data on Lookit.
subj_to_remove <- c("CAT_343","CAT_344","CAT_345","CAT_346")
trial_corrected_accuracy_red <- trial_corrected_accuracy_all %>%
filter(exclude_participant==0) %>%
filter(trial_exclusion==0) %>%
filter(!(sub_num %in% subj_to_remove))
# summarize by-participant and typicality
avg_corrected_target_looking_by_typicality_red <- trial_corrected_accuracy_red %>%
group_by(sub_num, condition) %>%
summarize(N=n(),
mean_age = mean(age),
mean_age_mo = mean(age_mo),
average_corrected_target_looking=mean(corrected_target_looking,na.rm=TRUE),
se=sd(corrected_target_looking,na.rm=T)/sqrt(N),
ci=qt(0.975, N-1)*sd(corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=average_corrected_target_looking-ci,
upper_ci=average_corrected_target_looking+ci,
lower_se=average_corrected_target_looking-se,
upper_se=average_corrected_target_looking+se)
#baseline-corrected target looking summarized overall
overall_corrected_target_looking_by_typicality_red <- avg_corrected_target_looking_by_typicality_red %>%
group_by(condition) %>%
summarize(N=n(),
corrected_target_looking=mean(average_corrected_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_corrected_target_looking,na.rm=T)/sqrt(N),
lower_ci=corrected_target_looking-ci,
upper_ci=corrected_target_looking+ci)
overall_corrected_target_looking_by_typicality_red %>%
knitr::kable()
| condition | N | corrected_target_looking | ci | lower_ci | upper_ci |
|---|---|---|---|---|---|
| atypical | 80 | 0.0607143 | 0.0201484 | 0.0405660 | 0.0808627 |
| typical | 80 | 0.0775846 | 0.0218312 | 0.0557533 | 0.0994158 |
avg_corrected_target_looking_by_typicality_red <- avg_corrected_target_looking_by_typicality_red %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
m_1_1_red <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_c + (1+ typicality_condition_c|sub_num),data=avg_corrected_target_looking_by_typicality_red,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_c +
## (1 + typicality_condition_c | sub_num)
## Data: avg_corrected_target_looking_by_typicality_red
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -296
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.19796 -0.42206 0.06897 0.39230 1.95368
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003699 0.06082
## typicality_condition_c 0.004567 0.06758 0.17
## Residual 0.004070 0.06380
## Number of obs: 160, groups: sub_num, 80
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.069149 0.008466 78.999851 8.168 4.1e-12 ***
## typicality_condition_c 0.016870 0.012603 79.000025 1.339 0.185
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.084
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.052556409 0.08574249
## typicality_condition_c -0.007830941 0.04157135
There remained no significant effect of typicality in the participant-level analysis, \(\hat{\beta} = 0.02\), 95% CI \([-0.01, 0.04]\), \(t(79) = 1.34\), \(p = .185\).
## Typical word recognition
# recentering the model on the typical condition to make the intercept interpretable
m_1_1_3_typ_red <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_typ + (1+ typicality_condition_typ|sub_num),data=avg_corrected_target_looking_by_typicality_red,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_typ_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_typ +
## (1 + typicality_condition_typ | sub_num)
## Data: avg_corrected_target_looking_by_typicality_red
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -296
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.42058 -0.46480 0.07596 0.43204 2.15156
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.004688 0.06847
## typicality_condition_typ 0.002834 0.05324 0.58
## Residual 0.004936 0.07026
## Number of obs: 160, groups: sub_num, 80
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.07758 0.01097 78.99956 7.074 5.4e-10 ***
## typicality_condition_typ 0.01687 0.01260 78.99986 1.339 0.185
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.639
## optimizer (nloptwrap) convergence code: 0 (OK)
## unable to evaluate scaled gradient
## Model failed to converge: degenerate Hessian with 1 negative eigenvalues
confint(m_1_1_3_typ_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.05608766 0.09908144
## typicality_condition_typ -0.00783094 0.04157135
#effect size
cohens_d(avg_corrected_target_looking_by_typicality_red$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_red$condition=="typical"])
## Cohen's d | 95% CI
## ------------------------
## 0.79 | [0.54, 1.04]
## Atypical word recognition
# recentering the model on the atypical condition to make the intercept interpretable
m_1_1_3_atyp_red <- lmer(average_corrected_target_looking ~ 1 + typicality_condition_atyp + (1+ typicality_condition_atyp|sub_num),data=avg_corrected_target_looking_by_typicality_red,control=lmerControl(check.nobs.vs.nlev= "ignore",check.nobs.vs.nRE= "ignore"))
summary(m_1_1_3_atyp_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: average_corrected_target_looking ~ 1 + typicality_condition_atyp +
## (1 + typicality_condition_atyp | sub_num)
## Data: avg_corrected_target_looking_by_typicality_red
## Control:
## lmerControl(check.nobs.vs.nlev = "ignore", check.nobs.vs.nRE = "ignore")
##
## REML criterion at convergence: -296
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.35893 -0.45297 0.07402 0.42103 2.09674
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.003509 0.05924
## typicality_condition_atyp 0.003331 0.05771 -0.28
## Residual 0.004688 0.06847
## Number of obs: 160, groups: sub_num, 80
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 0.06071 0.01012 78.99713 5.998 5.69e-08 ***
## typicality_condition_atyp 0.01687 0.01260 78.99018 1.339 0.185
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ -0.553
## optimizer (nloptwrap) convergence code: 0 (OK)
## Model is nearly unidentifiable: large eigenvalue ratio
## - Rescale variables?
confint(m_1_1_3_atyp_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sigma NA NA
## (Intercept) 0.040874649 0.08055405
## typicality_condition_atyp -0.007830908 0.04157132
#effect size
cohens_d(avg_corrected_target_looking_by_typicality_red$average_corrected_target_looking[avg_corrected_target_looking_by_typicality_red$condition=="atypical"])
## Cohen's d | 95% CI
## ------------------------
## 0.67 | [0.43, 0.91]
Infants recognized both typical and atypical exemplars, with similar effect sizes.
trial_corrected_accuracy_red <- trial_corrected_accuracy_red %>%
mutate(
typicality_condition_c = case_when(
condition == "atypical" ~ -0.5,
condition == "typical" ~ 0.5,
TRUE ~ NA_real_
),
typicality_condition_typ = case_when(
condition == "atypical" ~ -1,
condition == "typical" ~ 0,
TRUE ~ NA_real_
),
typicality_condition_atyp = case_when(
condition == "atypical" ~ 0,
condition == "typical" ~ 1,
TRUE ~ NA_real_
),
)
#model with typicality random intercept has singular fit but yields basically identical results
m_1_2_red <- lmer(corrected_target_looking ~ 1 + typicality_condition_c +
(1 | sub_num) +
(1|target_category),
data=trial_corrected_accuracy_red)
summary(m_1_2_red)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: corrected_target_looking ~ 1 + typicality_condition_c + (1 |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy_red
##
## REML criterion at convergence: 2165.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.12871 -0.62986 -0.03006 0.65800 2.76258
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0025466 0.05046
## target_category (Intercept) 0.0001313 0.01146
## Residual 0.1202984 0.34684
## Number of obs: 2924, groups: sub_num, 80; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 7.024e-02 1.032e-02 5.969e+00 6.806 0.000504 ***
## typicality_condition_c 1.358e-02 1.284e-02 2.856e+03 1.058 0.290246
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## typclty_cn_ 0.000
confint(m_1_2_red,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.05001304 0.09046888
## typicality_condition_c -0.01158511 0.03875135
The trial-level model also yields no significant typicality effect, \(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.04]\), \(t(2856.47) = 1.06\), \(p = .290\).
We also considered alternative methods of specifying a regression model incorporating information about target looking during the baseline window and the critical window: predicting target looking during the critical window while controlling for baseline looking (5.6.1.) and predicting target looking from a model including the interaction between typicality and trial window (critical window vs. baseline window) (5.6.2.).
#model with a by-participant random slope for typicality condition yields a singular fit
#however, the model itself yields very similar results.
m_5_6_1 <- lmer(mean_target_looking_critical ~ 1 + typicality_condition_c + mean_target_looking_baseline +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_6_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## mean_target_looking_critical ~ 1 + typicality_condition_c + mean_target_looking_baseline +
## (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 914.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.40807 -0.62977 0.04601 0.70981 1.97695
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0024950 0.04995
## target_category (Intercept) 0.0009999 0.03162
## Residual 0.0764645 0.27652
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 5.182e-01 2.052e-02 6.997e+00 25.253 3.92e-08
## typicality_condition_c 1.290e-02 9.969e-03 3.010e+03 1.294 0.196
## mean_target_looking_baseline 9.146e-02 2.201e-02 3.037e+03 4.155 3.34e-05
##
## (Intercept) ***
## typicality_condition_c
## mean_target_looking_baseline ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__
## typclty_cn_ 0.002
## mn_trgt_lk_ -0.525 -0.004
confint(m_5_6_1,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.477963914 0.55839772
## typicality_condition_c -0.006643998 0.03243501
## mean_target_looking_baseline 0.048316434 0.13460452
We fit a linear mixed-effects model predicting proportion target looking during the critical window from typicality condition while controlling for proportion target looking during the baseline window. We included by-participant and by-word random intercepts. As in the main analyses, we found no significant typicality effect, \(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.03]\), \(t(3009.89) = 1.29\), \(p = .196\).
#model with the by-participant random slope for target_typicality_z yields a singular fit (though qualitatively equivalent results)
m_5_6_1_continuous <- lmer(mean_target_looking_critical ~ 1 + target_typicality_z + mean_target_looking_baseline +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_6_1_continuous)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## mean_target_looking_critical ~ 1 + target_typicality_z + mean_target_looking_baseline +
## (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 915.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.41858 -0.62784 0.04423 0.70344 1.98124
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.002498 0.04998
## target_category (Intercept) 0.001071 0.03273
## Residual 0.076429 0.27646
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 5.180e-01 2.095e-02 6.693e+00 24.723 7.90e-08
## target_typicality_z 8.605e-03 5.124e-03 3.011e+03 1.679 0.0932
## mean_target_looking_baseline 9.204e-02 2.201e-02 3.039e+03 4.181 2.98e-05
##
## (Intercept) ***
## target_typicality_z .
## mean_target_looking_baseline ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trgt__
## trgt_typcl_ -0.006
## mn_trgt_lk_ -0.514 0.015
confint(m_5_6_1_continuous,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.476896001 0.55902100
## target_typicality_z -0.001437492 0.01864668
## mean_target_looking_baseline 0.048892796 0.13517660
Similarly, there was no significant effect when treating typicality as a continuous predictor (based on z-scored adult ratings), \(\hat{\beta} = 0.01\), 95% CI \([0.00, 0.02]\), \(t(3010.79) = 1.68\), \(p = .093\).
m_5_6_1_age_int <- lmer(mean_target_looking_critical ~ 1 + typicality_condition_c*age_mo_c + mean_target_looking_baseline +
(1|sub_num) +
(1|target_category),
data=trial_corrected_accuracy)
summary(m_5_6_1_age_int)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## mean_target_looking_critical ~ 1 + typicality_condition_c * age_mo_c +
## mean_target_looking_baseline + (1 | sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
##
## REML criterion at convergence: 918.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.43095 -0.62132 0.04606 0.69385 2.01864
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.001887 0.04344
## target_category (Intercept) 0.001011 0.03179
## Residual 0.076469 0.27653
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 5.180e-01 2.040e-02 6.713e+00 25.387 6.39e-08
## typicality_condition_c 1.275e-02 9.974e-03 3.011e+03 1.279 0.201154
## age_mo_c 1.731e-02 4.640e-03 8.724e+01 3.731 0.000339
## mean_target_looking_baseline 9.119e-02 2.200e-02 3.041e+03 4.145 3.50e-05
## typicality_condition_c:age_mo_c 4.671e-03 6.763e-03 3.011e+03 0.691 0.489821
##
## (Intercept) ***
## typicality_condition_c
## age_mo_c ***
## mean_target_looking_baseline ***
## typicality_condition_c:age_mo_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ ag_m_c mn_t__
## typclty_cn_ 0.002
## age_mo_c -0.010 0.003
## mn_trgt_lk_ -0.528 -0.004 0.006
## typclt__:__ 0.002 -0.035 0.002 -0.001
confint(m_5_6_1_age_int,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sigma NA NA
## (Intercept) 0.477999275 0.55798110
## typicality_condition_c -0.006796469 0.03230111
## age_mo_c 0.008216234 0.02640510
## mean_target_looking_baseline 0.048065511 0.13430643
## typicality_condition_c:age_mo_c -0.008583667 0.01792521
Following the analytic strategy for Aim 2, we also fit a linear mixed-effects model predicting proportion target looking during the critical window from age, typicality condition, and their interaction (all centered), while also controlling for proportion target looking during the baseline window. The model included by-participant and by-item random intercepts. We found a significant effect of age (\(\hat{\beta} = 0.02\), 95% CI \([0.01, 0.03]\), \(t(88.56) = 3.33\), \(p = .001\)) and looking during the baseline window (\(\hat{\beta} = 0.09\), 95% CI \([0.05, 0.13]\), \(t(3041.49) = 4.14\), \(p < .001\)). However, there was no significant interaction between age and typicality (\(\hat{\beta} = 0.00\), 95% CI \([-0.01, 0.02]\), \(t(3011.43) = 0.69\), \(p = .490\)).
m_5_6_1_continuous_td <- lmer(mean_target_looking_critical ~ 1 + target_typicality_z * distractor_typicality_z + mean_target_looking_baseline +
(1+target_typicality_z : distractor_typicality_z|sub_num) +
(1|target_category),
data=trial_corrected_accuracy,control=lmerControl(optimizer="bobyqa"))
summary(m_5_6_1_continuous_td)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## mean_target_looking_critical ~ 1 + target_typicality_z * distractor_typicality_z +
## mean_target_looking_baseline + (1 + target_typicality_z:distractor_typicality_z |
## sub_num) + (1 | target_category)
## Data: trial_corrected_accuracy
## Control: lmerControl(optimizer = "bobyqa")
##
## REML criterion at convergence: 899.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.46820 -0.62454 0.04141 0.71024 2.11417
##
## Random effects:
## Groups Name Variance Std.Dev.
## sub_num (Intercept) 0.0036335 0.06028
## target_typicality_z:distractor_typicality_z 0.0006615 0.02572
## target_category (Intercept) 0.0012362 0.03516
## Residual 0.0755362 0.27484
## Corr
##
## -0.61
##
##
## Number of obs: 3086, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df
## (Intercept) 0.54995 0.02378 8.27618
## target_typicality_z -0.02842 0.01041 2903.23367
## distractor_typicality_z 0.03778 0.01039 2907.84852
## mean_target_looking_baseline 0.08481 0.02198 3027.67824
## target_typicality_z:distractor_typicality_z -0.03431 0.01060 64.42116
## t value Pr(>|t|)
## (Intercept) 23.128 8.19e-09 ***
## target_typicality_z -2.731 0.006350 **
## distractor_typicality_z 3.635 0.000282 ***
## mean_target_looking_baseline 3.858 0.000117 ***
## target_typicality_z:distractor_typicality_z -3.236 0.001914 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trgt__ dstr__ mn_t__
## trgt_typcl_ -0.027
## dstrctr_ty_ -0.014 -0.866
## mn_trgt_lk_ -0.451 0.083 -0.087
## trgt_ty_:__ -0.390 -0.020 0.135 -0.004
confint(m_5_6_1_continuous_td,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.50334128 0.596551800
## target_typicality_z -0.04881197 -0.008024322
## distractor_typicality_z 0.01741192 0.058148376
## mean_target_looking_baseline 0.04172028 0.127893020
## target_typicality_z:distractor_typicality_z -0.05509629 -0.013533334
check_collinearity(m_5_6_1_continuous_td)
## # Check for Multicollinearity
##
## Low Correlation
##
## Term VIF VIF 95% CI Increased SE
## target_typicality_z 4.16 [3.91, 4.42] 2.04
## distractor_typicality_z 4.23 [3.98, 4.51] 2.06
## mean_target_looking_baseline 1.01 [1.00, 1.73] 1.00
## target_typicality_z:distractor_typicality_z 1.06 [1.03, 1.12] 1.03
## Tolerance Tolerance 95% CI
## 0.24 [0.23, 0.26]
## 0.24 [0.22, 0.25]
## 0.99 [0.58, 1.00]
## 0.94 [0.90, 0.97]
#there is some concerning multicollinearity for the main effects of target and distractor typicality
Finally, we investigated the influence of both target and distractor typicality using z-scored adult typicality ratings. We fit a linear mixed-effects model predicting proportion target looking during the critical window from target typicality ratings (z-scored), distractor typicality ratings (z-scored), and their interaction, while also controlling for proportion target looking during the baseline window. The model included by-participant and by-item random effects, as well as by-participant random slope for the interaction between target and distractor typicality (more complex random effects structures yielded a singular fit, albeit with comparable results). There was an interaction between target and distractor typicality, \(\hat{\beta} = -0.03\), 95% CI \([-0.06, -0.01]\), \(t(64.42) = -3.24\), \(p = .002\). Proportion target looking during the critical window increased as distractor typicality increased (\(\hat{\beta} = 0.04\), 95% CI \([0.02, 0.06]\), \(t(2907.85) = 3.64\), \(p < .001\)), and this effect was larger when the target was more atypical (see Figure below).
# figure explaining the interaction
p1 <- ggplot(trial_corrected_accuracy,aes(target_typicality_z,mean_target_looking_baseline))+
geom_point(aes(color=condition),alpha=0.2)+
geom_hline(yintercept=0.5, linetype="dashed")+
geom_smooth(method="lm",color="black")+
facet_wrap(~condition,scales = "free")+
scale_color_manual(values=pal[c(3,4)])+
xlab("Target Typicality (z-scored adult ratings)")+
ylab("Mean Prop Target Looking\nBaseline Window")+
theme(legend.position="none")
p2 <- ggplot(trial_corrected_accuracy,aes(distractor_typicality_z,mean_target_looking_baseline))+
geom_point(aes(color=condition),alpha=0.2)+
geom_hline(yintercept=0.5, linetype="dashed")+
geom_smooth(method="lm",color="black")+
facet_wrap(~condition,scales = "free")+
scale_color_manual(values=pal[c(3,4)])+
xlab("Distractor Typicality (z-scored adult ratings)")+
ylab("Mean Prop Target Looking\nBaseline Window")+
theme(legend.position="none")
p3 <- ggplot(trial_corrected_accuracy,aes(target_typicality_z,mean_target_looking_critical))+
geom_point(aes(color=condition),alpha=0.2)+
geom_hline(yintercept=0.5, linetype="dashed")+
geom_smooth(method="lm",color="black")+
facet_wrap(~condition,scales = "free")+
scale_color_manual(values=pal[c(3,4)])+
xlab("Target Typicality (z-scored adult ratings)")+
ylab("Mean Prop Target Looking\nCritical Window")+
theme(legend.position="none")
p4 <- ggplot(trial_corrected_accuracy,aes(distractor_typicality_z,mean_target_looking_critical))+
geom_point(aes(color=condition),alpha=0.2)+
geom_hline(yintercept=0.5, linetype="dashed")+
geom_smooth(method="lm",color="black")+
facet_wrap(~condition,scales = "free")+
scale_color_manual(values=pal[c(3,4)])+
xlab("Distractor Typicality (z-scored adult ratings)")+
ylab("Mean Prop Target Looking\nCritical Window")+
theme(legend.position="none")
(p1+p2)/(p3+p4)+
plot_annotation(tag_levels = 'A')+
theme(plot.tag = element_text(size = 18))
ggsave(here::here("..","figures","interaction_continuous_distractor_typicality_window.png"),width=10.5,height=9,bg = "white")
#pivot the dataset longer to incorporate trial window as a predictor
trial_corrected_accuracy_long_window <- trial_corrected_accuracy %>%
ungroup() %>%
#pivot longer
pivot_longer(cols=c(mean_target_looking_baseline,mean_target_looking_critical),names_to = "trial_window",values_to = "prop_target_looking") %>%
#clean up trial window
mutate(trial_window=str_remove(trial_window,"mean_target_looking_")) %>%
mutate(trial_window_c = case_when(
trial_window=="critical" ~ 0.5,
trial_window=="baseline" ~ -0.5))
# fit a linear mixed-effects model predicting proportion target looking from the interaction of trial window and typicality
#the model had singular fits when including additional random slopes for typicality condition and/or trial window (though results were qualitatively equivalent)
m_5_6_2 <- lmer(prop_target_looking ~ 1 + typicality_condition_c*trial_window_c +
(1|sub_num) +
(1+trial_window_c|target_category),
data=trial_corrected_accuracy_long_window)
summary(m_5_6_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: prop_target_looking ~ 1 + typicality_condition_c * trial_window_c +
## (1 | sub_num) + (1 + trial_window_c | target_category)
## Data: trial_corrected_accuracy_long_window
##
## REML criterion at convergence: 725.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.41988 -0.60843 0.01828 0.64687 2.17264
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.0002083 0.01443
## target_category (Intercept) 0.0013635 0.03693
## trial_window_c 0.0001874 0.01369 -0.39
## Residual 0.0652269 0.25540
## Number of obs: 6172, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 5.266e-01 1.881e-02 3.043e+00 27.990
## typicality_condition_c 7.558e-03 6.506e-03 6.118e+03 1.162
## trial_window_c 7.502e-02 9.441e-03 2.995e+00 7.947
## typicality_condition_c:trial_window_c 1.093e-02 1.300e-02 6.082e+03 0.841
## Pr(>|t|)
## (Intercept) 9.01e-05 ***
## typicality_condition_c 0.24541
## trial_window_c 0.00418 **
## typicality_condition_c:trial_window_c 0.40051
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ trl_w_
## typclty_cn_ 0.000
## tril_wndw_c -0.276 0.000
## typclt__:__ 0.000 0.000 0.001
confint(m_5_6_2,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.48976857 0.56352276
## typicality_condition_c -0.00519343 0.02030849
## trial_window_c 0.05652038 0.09352862
## typicality_condition_c:trial_window_c -0.01455489 0.03642298
We fit a linear mixed-effects model predicting proportion target looking from trial window (baseline vs. critical; centered), typicality condition (centered), and their interaction. We included by-participant and by-word random intercepts. We also included a by-word random slope for trial window (more complex random effects structures yielded singular fits). There was a significant effect of trial window, \(\hat{\beta} = 0.08\), 95% CI \([0.06, 0.09]\), \(t(2.99) = 7.95\), \(p = .004\), indicating successful word recognition (i.e., proportion target looking increased for the critical window compared to the baseline window) However, as in the main analyses, we found no significant trial window by typicality interaction, \(\hat{\beta} = 0.01\), 95% CI \([-0.01, 0.04]\), \(t(6082.30) = 0.84\), \(p = .401\).
# Figure
# summarize average accuracy within participant
subj_corrected_target_looking_by_window <- trial_corrected_accuracy_long_window %>%
group_by(sub_num, condition,trial_window) %>%
summarize(N=n(),
average_prop_target_looking=mean(prop_target_looking,na.rm=TRUE)
)
#then summarize across participants
avg_corrected_target_looking_by_window <- subj_corrected_target_looking_by_window %>%
group_by(condition,trial_window) %>%
summarize(N=n(),
mean_prop_target_looking=mean(average_prop_target_looking,na.rm=TRUE),
ci=qt(0.975, N-1)*sd(average_prop_target_looking,na.rm=T)/sqrt(N),
lower_ci=mean_prop_target_looking-ci,
upper_ci=mean_prop_target_looking+ci
)
#Create a plot
set.seed(1)
jitterer <- position_jitter(width = .05,seed=1)
overall_typicality_trial_window_plot <- ggplot(subj_corrected_target_looking_by_window,aes(x=trial_window,y=average_prop_target_looking, fill=trial_window))+
geom_half_violin(data=filter(subj_corrected_target_looking_by_window, trial_window=="baseline"),position = position_nudge(x = -.1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="l")+
geom_half_violin(data=filter(subj_corrected_target_looking_by_window, trial_window=="critical"),position = position_nudge(x = .1, y = 0), width=1,trim = FALSE, alpha = .8,color=NA,side="r")+
geom_path(aes(group=sub_num),color="black",fill=NA,alpha=0.15,size=0.75,position=jitterer)+
geom_point(aes(color=trial_window,group=sub_num), size = 1.5, alpha=0.15,position=jitterer)+
geom_point(data=avg_corrected_target_looking_by_window,aes(y=mean_prop_target_looking),color="black",size=3)+
geom_line(data=avg_corrected_target_looking_by_window,aes(y=mean_prop_target_looking,group=1),color="black",size=1.5)+
geom_errorbar(data=avg_corrected_target_looking_by_window,aes(y=mean_prop_target_looking,ymin=lower_ci,ymax=upper_ci),width=0,size=1.2,color="black")+
geom_hline(yintercept=0.5,linetype="dashed")+
theme(legend.position="none")+
xlab("Trial Window")+
ylab("Proportion Target Looking")+
theme(axis.title.x = element_text(face="bold", size=20),
axis.text.x = element_text(size=14),
axis.title.y = element_text(face="bold", size=20),
axis.text.y = element_text(size=16),
strip.text.x = element_text(size = 16,face="bold"))+
facet_wrap(~condition)
overall_typicality_trial_window_plot
ggsave(here::here("..","figures","trial_window_prop_looking_by_typicality.png"),width=7,height=6,bg = "white")
m_5_6_2_continuous <- lmer(prop_target_looking ~ 1 + target_typicality_z*trial_window_c +
(1|sub_num) +
(1+trial_window_c|target_category),
data=trial_corrected_accuracy_long_window)
summary(m_5_6_2_continuous)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: prop_target_looking ~ 1 + target_typicality_z * trial_window_c +
## (1 | sub_num) + (1 + trial_window_c | target_category)
## Data: trial_corrected_accuracy_long_window
##
## REML criterion at convergence: 726.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.42934 -0.60480 0.01884 0.64418 2.16465
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.0002091 0.01446
## target_category (Intercept) 0.0013842 0.03721
## trial_window_c 0.0002041 0.01428 -0.28
## Residual 0.0652092 0.25536
## Number of obs: 6172, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df t value
## (Intercept) 5.267e-01 1.895e-02 3.041e+00 27.789
## target_typicality_z 2.639e-03 3.345e-03 6.120e+03 0.789
## trial_window_c 7.511e-02 9.658e-03 2.995e+00 7.777
## target_typicality_z:trial_window_c 1.108e-02 6.657e-03 5.129e+03 1.665
## Pr(>|t|)
## (Intercept) 9.26e-05 ***
## target_typicality_z 0.43020
## trial_window_c 0.00445 **
## target_typicality_z:trial_window_c 0.09606 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trgt__ trl_w_
## trgt_typcl_ 0.001
## tril_wndw_c -0.201 0.000
## trgt_ty_:__ 0.000 -0.001 0.006
confint(m_5_6_2_continuous,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.489514289 0.563806072
## target_typicality_z -0.003917715 0.009196184
## trial_window_c 0.056182331 0.094042430
## target_typicality_z:trial_window_c -0.001966444 0.024126558
Similarly, there was no significant interaction between trial window and typicality when treating typicality as a continuous predictor (based on z-scored adult ratings), \(\hat{\beta} = 0.01\), 95% CI \([0.00, 0.02]\), \(t(5129.00) = 1.66\), \(p = .096\).
m_5_6_2_age_int <- lmer(prop_target_looking ~ 1 + typicality_condition_c*trial_window_c*age_mo_c +
(1|sub_num) +
(1+trial_window_c|target_category),
data=trial_corrected_accuracy_long_window)
summary(m_5_6_2_age_int)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: prop_target_looking ~ 1 + typicality_condition_c * trial_window_c *
## age_mo_c + (1 | sub_num) + (1 + trial_window_c | target_category)
## Data: trial_corrected_accuracy_long_window
##
## REML criterion at convergence: 732
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.58319 -0.60302 0.01998 0.64816 2.16090
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 8.646e-05 0.009298
## target_category (Intercept) 1.371e-03 0.037028
## trial_window_c 1.995e-04 0.014125 -0.35
## Residual 6.506e-02 0.255068
## Number of obs: 6172, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate Std. Error df
## (Intercept) 5.264e-01 1.883e-02 3.015e+00
## typicality_condition_c 7.576e-03 6.500e-03 6.118e+03
## trial_window_c 7.411e-02 9.597e-03 2.999e+00
## age_mo_c 7.900e-03 2.308e-03 8.321e+01
## typicality_condition_c:trial_window_c 1.098e-02 1.300e-02 6.079e+03
## typicality_condition_c:age_mo_c 2.552e-03 4.407e-03 6.118e+03
## trial_window_c:age_mo_c 1.856e-02 4.406e-03 6.078e+03
## typicality_condition_c:trial_window_c:age_mo_c 4.406e-03 8.812e-03 6.077e+03
## t value Pr(>|t|)
## (Intercept) 27.964 9.68e-05 ***
## typicality_condition_c 1.166 0.243832
## trial_window_c 7.722 0.004520 **
## age_mo_c 3.423 0.000965 ***
## typicality_condition_c:trial_window_c 0.845 0.398212
## typicality_condition_c:age_mo_c 0.579 0.562534
## trial_window_c:age_mo_c 4.212 2.57e-05 ***
## typicality_condition_c:trial_window_c:age_mo_c 0.500 0.617097
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) typc__ trl_w_ ag_m_c typclty_cndtn_c:t__
## typclty_cn_ 0.000
## tril_wndw_c -0.253 0.000
## age_mo_c -0.006 0.005 0.000
## typclty_cndtn_c:t__ 0.000 0.000 0.001 0.000
## typclty_cndtn_c:g__ 0.001 -0.034 0.000 0.002 0.000
## trl_wnd_:__ 0.000 0.000 -0.023 0.000 0.005
## typ__:__:__ 0.000 0.000 0.003 0.000 -0.034
## typclty_cndtn_c:g__ tr__:__
## typclty_cn_
## tril_wndw_c
## age_mo_c
## typclty_cndtn_c:t__
## typclty_cndtn_c:g__
## trl_wnd_:__ 0.000
## typ__:__:__ 0.000 0.002
confint(m_5_6_2_age_int,method="Wald")
## 2.5 % 97.5 %
## .sig01 NA NA
## .sig02 NA NA
## .sig03 NA NA
## .sig04 NA NA
## .sigma NA NA
## (Intercept) 0.489529817 0.56332395
## typicality_condition_c -0.005163418 0.02031552
## trial_window_c 0.055299431 0.09291779
## age_mo_c 0.003376168 0.01242474
## typicality_condition_c:trial_window_c -0.014491637 0.03645173
## typicality_condition_c:age_mo_c -0.006085328 0.01118957
## trial_window_c:age_mo_c 0.009922319 0.02719462
## typicality_condition_c:trial_window_c:age_mo_c -0.012865403 0.02167741
There was also no three-way interaction between age, trial window, and typicality, \(\hat{\beta} = 0.00\), 95% CI \([-0.01, 0.02]\), \(t(6077.38) = 0.50\), \(p = .617\). Note that, consistent with alternative analytic strategies, there was a significant age by trial window interaction, suggesting that word recognition improved with age, \(\hat{\beta} = 0.02\), 95% CI \([0.01, 0.03]\), \(t(6078.41) = 4.21\), \(p < .001\).
Below is a quick and dirty plot depicting the age effect
#quick plot of the age effect
ggplot(trial_corrected_accuracy_long_window, aes(age_mo,prop_target_looking,color=trial_window,linetype=condition))+
geom_hline(yintercept=0.5,linetype="dashed")+
geom_smooth(method="loess")
m_5_6_2_continuous_td <- lmer(prop_target_looking ~ 1 + target_typicality_z*distractor_typicality_z*trial_window_c +
(1|sub_num) +
(1+trial_window_c|target_category),
data=trial_corrected_accuracy_long_window)
summary(m_5_6_2_continuous_td)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula:
## prop_target_looking ~ 1 + target_typicality_z * distractor_typicality_z *
## trial_window_c + (1 | sub_num) + (1 + trial_window_c | target_category)
## Data: trial_corrected_accuracy_long_window
##
## REML criterion at convergence: 701
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.56191 -0.60783 0.02477 0.64758 2.31574
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## sub_num (Intercept) 0.0002184 0.01478
## target_category (Intercept) 0.0013310 0.03648
## trial_window_c 0.0002408 0.01552 0.04
## Residual 0.0646516 0.25427
## Number of obs: 6172, groups: sub_num, 84; target_category, 4
##
## Fixed effects:
## Estimate
## (Intercept) 5.409e-01
## target_typicality_z -3.497e-02
## distractor_typicality_z 4.061e-02
## trial_window_c 1.051e-01
## target_typicality_z:distractor_typicality_z -1.734e-02
## target_typicality_z:trial_window_c 4.801e-03
## distractor_typicality_z:trial_window_c 2.787e-03
## target_typicality_z:distractor_typicality_z:trial_window_c -3.587e-02
## Std. Error
## (Intercept) 1.941e-02
## target_typicality_z 6.787e-03
## distractor_typicality_z 6.777e-03
## trial_window_c 1.482e-02
## target_typicality_z:distractor_typicality_z 6.626e-03
## target_typicality_z:trial_window_c 1.339e-02
## distractor_typicality_z:trial_window_c 1.338e-02
## target_typicality_z:distractor_typicality_z:trial_window_c 1.295e-02
## df t value
## (Intercept) 3.578e+00 27.872
## target_typicality_z 6.077e+03 -5.153
## distractor_typicality_z 6.073e+03 5.992
## trial_window_c 1.206e+01 7.093
## target_typicality_z:distractor_typicality_z 6.005e+03 -2.618
## target_typicality_z:trial_window_c 2.481e+03 0.358
## distractor_typicality_z:trial_window_c 2.515e+03 0.208
## target_typicality_z:distractor_typicality_z:trial_window_c 1.159e+03 -2.769
## Pr(>|t|)
## (Intercept) 2.58e-05 ***
## target_typicality_z 2.65e-07 ***
## distractor_typicality_z 2.19e-09 ***
## trial_window_c 1.23e-05 ***
## target_typicality_z:distractor_typicality_z 0.00887 **
## target_typicality_z:trial_window_c 0.72006
## distractor_typicality_z:trial_window_c 0.83498
## target_typicality_z:distractor_typicality_z:trial_window_c 0.00571 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) trgt__ dstr__ trl_w_ trgt_typclty_z:d__
## trgt_typcl_ 0.009
## dstrctr_ty_ -0.043 -0.865
## tril_wndw_c 0.017 0.000 0.000
## trgt_typclty_z:d__ -0.286 -0.023 0.143 0.000
## trgt_typclty_z:t__ 0.000 0.000 0.000 0.001 0.000
## dstrct__:__ 0.000 0.000 0.000 -0.090 0.000
## trg__:__:__ 0.000 0.000 0.000 -0.731 0.000
## trgt_typclty_z:t__ d__:__
## trgt_typcl_
## dstrctr_ty_
## tril_wndw_c
## trgt_typclty_z:d__
## trgt_typclty_z:t__
## dstrct__:__ -0.862
## trg__:__:__ 0.008 0.116
confint(m_5_6_2_continuous_td,method="Wald")
## 2.5 %
## .sig01 NA
## .sig02 NA
## .sig03 NA
## .sig04 NA
## .sigma NA
## (Intercept) 0.50286423
## target_typicality_z -0.04827622
## distractor_typicality_z 0.02732843
## trial_window_c 0.07604565
## target_typicality_z:distractor_typicality_z -0.03033187
## target_typicality_z:trial_window_c -0.02145040
## distractor_typicality_z:trial_window_c -0.02343037
## target_typicality_z:distractor_typicality_z:trial_window_c -0.06126074
## 97.5 %
## .sig01 NA
## .sig02 NA
## .sig03 NA
## .sig04 NA
## .sigma NA
## (Intercept) 0.578935321
## target_typicality_z -0.021669854
## distractor_typicality_z 0.053894778
## trial_window_c 0.134117759
## target_typicality_z:distractor_typicality_z -0.004359042
## target_typicality_z:trial_window_c 0.031051603
## distractor_typicality_z:trial_window_c 0.029003946
## target_typicality_z:distractor_typicality_z:trial_window_c -0.010482963
In order to explore the influence of both target and distractor typicality (analogously to the analyses in 5.3.), we fit a model including the interaction between continuous target image typicality (adult norms, z-scored), continuous distractor image typicality (adult norms, z-scored), trial window (centered), and their interaction. The model included by-participant and by-word random intercepts, as well as a by-word random slope for trial window. There was a significant three-way interaction between target typicality, distractor typicality, and trial window, \(\hat{\beta} = -0.04\), 95% CI \([-0.06, -0.01]\), \(t(1158.74) = -2.77\), \(p = .006\). There was a significant overall effect of distractor typicality on target looking, \(\hat{\beta} = 0.04\), 95% CI \([0.03, 0.05]\), \(t(6072.94) = 5.99\), \(p < .001\), and this effect was stronger when the target was more atypical, \(\hat{\beta} = -0.02\), 95% CI \([-0.03, 0.00]\), \(t(6005.27) = -2.62\), \(p = .009\). The three-way interaction indicates that this interaction between distractor and target typicality was stronger in the critical window than in the baseline window, i.e., target looking increased as distractor typicality increased, and this effect was stronger in the critical window than in the baseline window when the target was atypical.
We also conducted an exploratory analysis investigating the degree to which the visual similarity between the target image and the distractor image affected infants’ word recognition. We predicted that recognizing the target should be more difficult (i.e., lower proportion looking to the target during the critical window) the more (visually) similar the distractor image was to the target image. To obtain an estimate of visual similarity between each target and distractor image pair, we used DreamSim (https://dreamsim-nights.github.io/; Fu et al., 2023) to generate predictions of perceptual distance between each image pair. In this metric, lower distance indicates higher perceptual similarity, i.e. the lower the distance between two images, the more similar they are. We then used this distance metric as a predictor in a linear mixed-effects model predicting trial-by-trial proportion target looking during the critical window. See the analysis script categories_dreamsim_distances.R for further details on the analyses.
sessionInfo()
## R version 4.3.2 (2023-10-31)
## Platform: aarch64-apple-darwin20 (64-bit)
## Running under: macOS Sonoma 14.4.1
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/lib/libRlapack.dylib; LAPACK version 3.11.0
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## time zone: America/New_York
## tzcode source: internal
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] patchwork_1.2.0 agreement_0.0.0.9003 performance_0.10.9
## [4] broom_1.0.5 sessioninfo_1.2.2 ggimage_0.3.3
## [7] effectsize_0.8.6 papaja_0.1.2 tinylabels_0.2.4
## [10] broom.mixed_0.2.9.4 JWileymisc_1.4.1 multilevelTools_0.1.1
## [13] car_3.1-2 carData_3.0-5 gghalves_0.1.4
## [16] wesanderson_0.3.7 TOSTER_0.8.0 lmerTest_3.1-3
## [19] lme4_1.1-35.1 Matrix_1.6-5 readxl_1.4.3
## [22] here_1.0.1 cowplot_1.1.3 janitor_2.2.0
## [25] lubridate_1.9.3 forcats_1.0.0 stringr_1.5.1
## [28] dplyr_1.1.4 purrr_1.0.2 readr_2.1.5
## [31] tidyr_1.3.1 tibble_3.2.1 ggplot2_3.5.0
## [34] tidyverse_2.0.0
##
## loaded via a namespace (and not attached):
## [1] splines_4.3.2 polspline_1.1.24 ggplotify_0.1.2
## [4] cellranger_1.1.0 datawizard_0.9.1 rpart_4.1.21
## [7] lifecycle_1.0.4 rstatix_0.7.2 rprojroot_2.0.4
## [10] vroom_1.6.5 globals_0.16.2 lattice_0.21-9
## [13] MASS_7.3-60 insight_0.19.8 ggdist_3.3.1
## [16] backports_1.4.1 magrittr_2.0.3 Hmisc_5.1-1
## [19] sass_0.4.8 rmarkdown_2.25 jquerylib_0.1.4
## [22] yaml_2.3.8 minqa_1.2.6 multcomp_1.4-25
## [25] abind_1.4-5 quadprog_1.5-8 yulab.utils_0.1.4
## [28] nnet_7.3-19 TH.data_1.1-2 sandwich_3.1-0
## [31] listenv_0.9.1 MatrixModels_0.5-3 parallelly_1.37.0
## [34] codetools_0.2-19 tidyselect_1.2.0 shape_1.4.6
## [37] farver_2.1.1 stats4_4.3.2 base64enc_0.1-3
## [40] jsonlite_1.8.8 fst_0.9.8 mitml_0.4-5
## [43] Formula_1.2-5 survival_3.5-7 iterators_1.0.14
## [46] emmeans_1.10.0 systemfonts_1.0.5 foreach_1.5.2
## [49] tools_4.3.2 ragg_1.2.7 Rcpp_1.0.12
## [52] glue_1.7.0 mnormt_2.1.1 gridExtra_2.3
## [55] pan_1.9 xfun_0.42 mgcv_1.9-0
## [58] distributional_0.4.0 ggthemes_5.1.0 withr_3.0.0
## [61] numDeriv_2016.8-1.1 fastmap_1.1.1 boot_1.3-28.1
## [64] fansi_1.0.6 SparseM_1.81 digest_0.6.34
## [67] timechange_0.3.0 R6_2.5.1 gridGraphics_0.5-1
## [70] estimability_1.5 textshaping_0.3.7 mice_3.16.0
## [73] colorspace_2.1-0 utf8_1.2.4 generics_0.1.3
## [76] data.table_1.15.0 robustbase_0.99-2 htmlwidgets_1.6.4
## [79] parameters_0.21.5 pkgconfig_2.0.3 gtable_0.3.4
## [82] furrr_0.3.1 htmltools_0.5.7 lavaan_0.6-17
## [85] multcompView_0.1-10 scales_1.3.0 snakecase_0.11.1
## [88] ggfun_0.1.4 knitr_1.45 rstudioapi_0.15.0
## [91] tzdb_0.4.0 coda_0.19-4.1 checkmate_2.3.1
## [94] nlme_3.1-163 nloptr_2.0.3 cachem_1.0.8
## [97] zoo_1.8-12 parallel_4.3.2 foreign_0.8-85
## [100] pillar_1.9.0 grid_4.3.2 vctrs_0.6.5
## [103] VGAM_1.1-10 ggpubr_0.6.0 jomo_2.7-6
## [106] xtable_1.8-4 cluster_2.1.4 htmlTable_2.4.2
## [109] evaluate_0.23 pbivnorm_0.6.0 extraoperators_0.3.0
## [112] magick_2.8.3 mvtnorm_1.2-4 cli_3.6.2
## [115] compiler_4.3.2 crayon_1.5.2 rlang_1.1.3
## [118] rms_6.7-1 ggsignif_0.6.4 labeling_0.4.3
## [121] fs_1.6.3 stringi_1.8.3 psych_2.4.1
## [124] assertthat_0.2.1 munsell_0.5.0 glmnet_4.1-8
## [127] bayestestR_0.13.2 quantreg_5.97 fstcore_0.9.18
## [130] hms_1.1.3 bit64_4.0.5 future_1.33.1
## [133] highr_0.10 memoise_2.0.1 bslib_0.6.1
## [136] bit_4.0.5 DEoptimR_1.1-3