Note that warnings and messages are suppressed in all code chunks to keep output readable. This file contains the analyses of the test trials, for the analyses of the practice trials please see this link
Open the libraries to be used throughout these analyses.
Opening data files
Drop participants with hearing loss and other developmental disabilities
full_2 <- filter(full_2 , ParticipantName != "BI8sFIfv" & ParticipantName != "rqqYhcLZ")
full_2 <- filter(full_2 , ParticipantName != "9oXjRYVw" & ParticipantName != "opeMB1zY")
How many missing windows per participant
table(Part_To_Drop$n_miss)
##
## 0 1 2 3 4
## 81 13 9 4 6
Drop participants with more than 66% trackloss on 2+ windows.
full_2 <- subset(full_2, n_miss < 2)
n_distinct(full_2$ParticipantName)
## [1] 92
Convert Dataset to EyeTrackingR Data and add variables for Window and Time Bin. Also, flag problematic trials.
SBdata <- make_eyetrackingr_data(full_2,
participant_column = "ParticipantName",
trial_column = "MediaName",
time_column = "TrialTime",
aoi_columns = c('AOI_Target','AOI_Distracter'),
trackloss_column = "TrackLoss",
treat_non_aoi_looks_as_missing = TRUE
)
## Converting Participants to proper type.
## Converting Trial to proper type.
## Converting Trackloss to proper type.
## Warning: `as_data_frame()` was deprecated in tibble 2.0.0.
## Please use `as_tibble()` instead.
## The signature and semantics have changed, see `?as_tibble`.
SB_early <- subset_by_window(SBdata,
window_start_time=19000, window_end_time=27000)
## Avg. window length in new data will be 8000
SB_early$Window = 1
SB_late <- subset_by_window(SBdata,
window_start_time=32000, window_end_time=40000)
## Avg. window length in new data will be 8000
SB_late$Window = 2
SB_full <- rbind(SB_early, SB_late)
SB_full$TimeBin <- rep(0, nrow(SB_full))
for(i in 1:nrow(SB_full)){
if(SB_full$TrialTime[i] >= 2000 & SB_full$TrialTime[i] < 4000) SB_full$TimeBin[i] = 1
if(SB_full$TrialTime[i] >= 4000 & SB_full$TrialTime[i] < 6000) SB_full$TimeBin[i] = 2
if(SB_full$TrialTime[i] >= 6000 & SB_full$TrialTime[i] < 8000) SB_full$TimeBin[i] = 3
}
SB_full$problem_window <- ifelse(SB_full$MediaName=="Seq4_Trial1.wmv" & SB_full$Trial==1 &SB_full$Window==1, yes=1, no=0 )
SB_full<- subset(SB_full, problem_window==0) # Removed window with error.
trackloss_analysis(SB_full)
Drop windows with greater than 66% missing data.
SB_full_clean <- clean_by_trackloss(SB_full, trial_prop_thresh=.66)
## Performing Trackloss Analysis...
## Will exclude trials whose trackloss proportion is greater than : 0.66
## ...removed 3 trials.
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
group_by(MediaName) %>%
count()
## Analyzing AOI_Target...
## Warning: `group_by_()` was deprecated in dplyr 0.7.0.
## Please use `group_by()` instead.
## See vignette('programming') for more help
## Warning: `summarise_()` was deprecated in dplyr 0.7.0.
## Please use `summarise()` instead.
## Analyzing AOI_Distracter...
Another 3 trials with 66% trackloss were removed.
First model, intercepts only. Does overall proportion of looks to target differ from chance?
m1 <-SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | ParticipantName) + (1 | MediaName), family=Beta, data=.,
file = "models/m1")
summary(m1)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ 1 + (1 | ParticipantName) + (1 | MediaName)
## Data: . (Number of observations: 181)
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup samples = 4000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 17)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.19 0.08 0.04 0.37 1.00 759 591
##
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.08 0.06 0.00 0.21 1.00 1498 1815
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.02 0.07 -0.12 0.15 1.00 2293 2316
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 10.93 1.22 8.73 13.49 1.00 3248 2630
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m1, "Intercept > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Intercept) > 0 0.02 0.07 -0.1 0.13 1.56 0.61
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
Note to self (and any curious reader): MediaName has 17 levels, not 16. This is because there were originally 16 distinct videos. But one was edited halfway through the experiment because of a problem with the video (See description of IMPL task in the paper). Only the first half of that edited trial was removed. I have kept different grouping factors for data from the fixed version and the remaining data from the original version.
I’m going to plot the distribution of random interceps by media item, converted to the probability scale, to see if any video attracts a particularly high or low proportion of looks to the target.
(p_random <- posterior_samples(m1) %>%
select(starts_with("r_MediaName") & ends_with("Intercept]")) %>%
mutate(sample = 1:4000) %>%
pivot_longer(-sample,
names_to="Trial",
values_to="Y") %>%
mutate(
Trial = str_sub(Trial, 13, 27),
Prop = inv_logit_scaled(Y + .05)
) %>%
ggplot(aes(x=Trial, y=Prop)) + geom_boxplot() +
geom_hline(yintercept=.5))
Nothing jumps out: there’s variability between videos certainly, but everything is pretty much between .45 and .55.
Because this effect does not differ from 0, I decided to control for the proportion of looks to the target imag when it was the distracter. In this case the intercept should indicate how much more participants looked to chance when the action was the target vs when it was the distracer.
Looks_dis <- SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns="Distractor") %>%
filter(AOI=="AOI_Distracter") %>%
group_by(Distractor) %>%
mutate(
Dis_Prop = mean(Prop),
Logit.s = logit(Dis_Prop)
) %>%
slice(1) %>%
select(Distractor, Dis_Prop, Logit.s) %>%
rename(Target=Distractor)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
Here is the model controlling for this effect.
m1b <-SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Target", "Logit.s")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ Logit.s + (1 + Logit.s|| ParticipantName) + (1 || MediaName), family=Beta, data=.,
file = "models/m1b")
summary(m1b)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ Logit.s + (1 + Logit.s || ParticipantName) + (1 || MediaName)
## Data: . (Number of observations: 181)
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup samples = 4000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 17)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.16 0.08 0.02 0.33 1.00 922 1091
##
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.08 0.06 0.00 0.22 1.00 1721 2478
## sd(Logit.s) 0.77 0.49 0.04 1.78 1.00 1297 2276
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.05 0.07 -0.08 0.18 1.00 5003 3184
## Logit.s 0.82 0.43 -0.03 1.70 1.00 5096 3203
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 11.55 1.43 9.03 14.63 1.00 2657 2754
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m1b, "Intercept > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Intercept) > 0 0.05 0.07 -0.06 0.16 3.91 0.8
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
This doesn’t seem to make a huge difference, but I will include it in subsequent models per request of a reviewer.
In order to calculate bayes factors comparing null and alternative models. We need more specific priors to do this.
int_prior1 <- prior(normal(.4, .15), class="Intercept")
int_prior2 <- prior(normal(0, .05), class="Intercept")
int_prior3 <- prior(exponential(1), class="Intercept")
To understand these priors, I will convert them to the probability scale, and plot them below, with Xs representing sample means from Gertner, Fisher and Eisengart 2006.
Comparing the null to alternative hypothesis 1.
theme_set(theme_minimal())
Existing_data <- tibble(
P = c(.56, .56, .70, .64),
L = c("Study 1", "Study 2", "Study 3", "Study 4")
)
(p1 <- tibble(
null = rnorm(10000, 0, .05),
alt = rnorm(10000, .4, .15)
) %>%
pivot_longer(everything(),
names_to = "Hypothesis",
values_to = "Mu") %>%
mutate(
Prop = inv_logit_scaled(Mu)
) %>%
ggplot(aes(x=Prop, fill=Hypothesis, group=Hypothesis)) + geom_density(alpha=.50) +
ylab("Density") +
scale_fill_manual(expression(italic(Hypothesis)), values = wes_palette("GrandBudapest1", type="discrete")[c(4, 2)], labels=c("Alternative", "Null")) +
annotate("text", x=.56, y=.0, label = "X") +
annotate("text", x=.57, y=.0, label = "X") +
annotate("text", x=.64, y=.0, label = "X") +
annotate("text", x=.70, y=.0, label = "X") +
annotate("text", x=.5, y=33, label = "Normal(0, .05)", colour=wes_palette("GrandBudapest1")[2]) +
annotate("text", x=.63, y=10, label = "Normal(.4, .15)", colour=wes_palette("GrandBudapest1")[4]) +
theme(legend.position = "none")
)
Comparing the null to alternative hypothesis 2.
theme_set(theme_minimal())
(p2 <- tibble(
null = rnorm(10000, 0, .05),
alt = rexp(10000, 1)
) %>%
pivot_longer(everything(),
names_to = "Hypothesis",
values_to = "Mu") %>%
mutate(
Prop = inv_logit_scaled(Mu)
) %>%
ggplot(aes(x=Prop, fill=Hypothesis, group=Hypothesis)) + geom_density(alpha=.50) +
ylab("Density") +
scale_fill_manual(values = wes_palette("GrandBudapest1", type="discrete")[c(4, 2)], labels=c("Alternative", "Null")) +
annotate("text", x=.56, y=.0, label = "X") +
annotate("text", x=.57, y=.0, label = "X") +
annotate("text", x=.64, y=.0, label = "X") +
annotate("text", x=.70, y=.0, label = "X") +
annotate("text", x=.53, y=33, label = "Normal(0, .05)", colour=wes_palette("GrandBudapest1")[2]) +
annotate("text", x=.63, y=7, label = "Exponential(1)", colour=wes_palette("GrandBudapest1")[4]) +
theme(legend.position = "none")
)
Fit the models, with these priors. These require many posterior samples, so I have increased the number of iterations. We also need to save all parameters to calculate bayes factors. I have not saved these files in the github repository as they are too large to be uploaded.
m_alt <-SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | ParticipantName) + (1 | MediaName), int_prior1, iter=40000, cores=4, save_all_pars=TRUE, family="beta", data=.,
file="models/m_alt")
summary(m_alt)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ 1 + (1 | ParticipantName) + (1 | MediaName)
## Data: . (Number of observations: 181)
## Samples: 4 chains, each with iter = 40000; warmup = 20000; thin = 1;
## total post-warmup samples = 80000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 17)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.20 0.09 0.04 0.39 1.00 18305 17400
##
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.08 0.06 0.00 0.21 1.00 30606 38437
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.08 0.07 -0.04 0.22 1.00 45640 41963
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 10.95 1.22 8.72 13.49 1.00 63595 53009
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
m_null <-SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | ParticipantName) + (1 | MediaName), int_prior2, iter=40000, cores=4, save_all_pars=TRUE, family="beta", data=.,
file="models/m_null")
I’ve fun the bayes factor function several times per comparison, because it is sometimes unstable if the number of iterations is too small.
#bayes_factor(m_alt, m_null)
#bayes_factor(m_alt, m_null)
#bayes_factor(m_alt, m_null)
#bayes_factor(m_alt, m_null)
Second alternative hypothesis.
m_alt2 <-SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | ParticipantName) + (1 | MediaName), int_prior3, iter=40000, cores=4, save_all_pars=TRUE, family="beta", data=.,
file="models/m_alt2")
More bayes factors
#bayes_factor(m_alt2, m_null)
#bayes_factor(m_alt2, m_null)
#bayes_factor(m_alt2, m_null)
#bayes_factor(m_alt2, m_null)
Get mean of our sample for plots.
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
summarise(
Mean = mean(Prop)
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
Plot the posterior distributions for null and each alternative hypothesis.
alter <- posterior_samples(m_alt) %>%
mutate(
Alt = inv_logit_scaled(b_Intercept)
) %>%
select(Alt)
null <- posterior_samples(m_null) %>%
mutate(
Null = inv_logit_scaled(b_Intercept)
) %>%
select(Null)
(p3 <- bind_cols(alter, null) %>%
pivot_longer(everything(),
names_to = "Hypothesis",
values_to = "Prop") %>%
ggplot(aes(x=Prop, fill=Hypothesis, group=Hypothesis)) + geom_density(alpha=.50) +
ylab("Density") +
scale_fill_manual(values = wes_palette("GrandBudapest1", type="discrete")[c(4, 2)], labels=c("Alternative", "Null")) +
annotate("text", x=.51, y=.0, label = "O") +
annotate("text", x=.56, y=.0, label = "X") +
annotate("text", x=.57, y=.0, label = "X") +
annotate("text", x=.64, y=.0, label = "X") +
annotate("text", x=.70, y=.0, label = "X") +
theme(legend.position = "none")
)
alter <- posterior_samples(m_alt2) %>%
mutate(
Alt = inv_logit_scaled(b_Intercept)
) %>%
select(Alt)
null <- posterior_samples(m_null) %>%
mutate(
Null = inv_logit_scaled(b_Intercept)
) %>%
select(Null)
(p4 <- bind_cols(alter, null) %>%
pivot_longer(everything(),
names_to = "Hypothesis",
values_to = "Prop") %>%
ggplot(aes(x=Prop, fill=Hypothesis, group=Hypothesis)) + geom_density(alpha=.50) +
ylab("Density") +
scale_fill_manual(values = wes_palette("GrandBudapest1", type="discrete")[c(4, 2)], labels=c("Alternative", "Null")) +
annotate("text", x=.51, y=.0, label = "O") +
annotate("text", x=.56, y=.0, label = "X") +
annotate("text", x=.57, y=.0, label = "X") +
annotate("text", x=.64, y=.0, label = "X") +
annotate("text", x=.70, y=.0, label = "X") +
theme(legend.position = "none")
)
Priors + Posteriors for each bayes factor.
p1 | p3
p2 | p4
Evidence supports the null for the most part.
m2 <- SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Trial", "Window", "Logit.s")) %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = (SamplesInAOI/SamplesTotal*(SamplesTotal-1) + (1/2))/SamplesTotal # correction of 0s and 1s
) %>%
filter(AOI=="AOI_Target") %>%
brm(Prop_no0 ~ 1 + Window1.s*Trial1.s + Logit.s + (1 + Window1.s*Trial1.s + Logit.s || ParticipantName) + (1+ Window1.s*Trial1.s || MediaName), family="beta", data=.,
file="models/m2")
summary(m2)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop_no0 ~ 1 + Window1.s * Trial1.s + Logit.s + (1 + Window1.s * Trial1.s + Logit.s || ParticipantName) + (1 + Window1.s * Trial1.s || MediaName)
## Data: . (Number of observations: 356)
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup samples = 4000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 17)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.13 0.09 0.01 0.34 1.01 945
## sd(Window1.s) 0.19 0.15 0.01 0.57 1.00 928
## sd(Trial1.s) 0.26 0.19 0.01 0.71 1.00 647
## sd(Window1.s:Trial1.s) 0.40 0.29 0.01 1.10 1.00 971
## Tail_ESS
## sd(Intercept) 1372
## sd(Window1.s) 1631
## sd(Trial1.s) 1402
## sd(Window1.s:Trial1.s) 1486
##
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.34 0.07 0.19 0.46 1.00 453
## sd(Window1.s) 0.89 0.13 0.65 1.14 1.00 606
## sd(Trial1.s) 0.86 0.14 0.58 1.12 1.00 488
## sd(Logit.s) 0.71 0.50 0.04 1.84 1.01 666
## sd(Window1.s:Trial1.s) 1.57 0.26 1.04 2.07 1.00 665
## Tail_ESS
## sd(Intercept) 590
## sd(Window1.s) 988
## sd(Trial1.s) 642
## sd(Logit.s) 1091
## sd(Window1.s:Trial1.s) 1300
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.07 0.08 -0.08 0.22 1.00 2052 2220
## Window1.s -0.21 0.14 -0.49 0.09 1.00 1874 2594
## Trial1.s -0.03 0.16 -0.35 0.29 1.00 1890 2359
## Logit.s 0.86 0.56 -0.22 2.03 1.00 1756 2063
## Window1.s:Trial1.s 0.10 0.28 -0.43 0.65 1.00 1835 2066
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 9.67 2.27 6.01 14.90 1.00 272 465
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Look at posterior predictive distribution.
pp_check(m2)
## Using 10 posterior samples for ppc type 'dens_overlay' by default.
Looks reasonable, though certainly far from perfect.
Plot model implied means and credible intervals for each condition, next to raw data and confidence intervals.
new_data <- expand_grid(Window1.s=c(-.5, .5), Trial1.s=c(-.5, .5), Logit.s=0)
post <- as.tibble(fitted(m2, new_data, scale="response", probs=c(.025, .975), re_formula=NA)) %>%
bind_cols(new_data, .) %>%
mutate(
Data = "Estimated"
)
## Warning: `as.tibble()` was deprecated in tibble 2.0.0.
## Please use `as_tibble()` instead.
## The signature and semantics have changed, see `?as_tibble`.
raw_data <- SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial")) %>%
filter(AOI=="AOI_Target") %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
group_by(Window1.s, Trial1.s, ParticipantName) %>%
mutate(
M = mean(Prop)
) %>%
slice(1) %>%
ungroup(ParticipantName) %>%
summarise(
Estimate = mean(M),
Q2.5 = Estimate - 1.97*(sd(M)/sqrt(92)),
Q97.5 = Estimate + 1.97*(sd(M)/sqrt(92))
) %>%
mutate(
Data = "Raw"
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## `summarise()` has grouped output by 'Window1.s'. You can override using the `.groups` argument.
post <- bind_rows(post, raw_data)
p1 <- post %>%
mutate(
Window = ifelse(Window1.s == .5, yes= "Window 1", no = "Window 2"),
Trial = ifelse(Trial1.s == .5, yes= "Trial 1", no = "Trial 2"),
Condition = paste(Trial, Window, Data, sep = ", ")
) %>%
ggplot(aes(y = Condition, x = Estimate, xmin=Q2.5, xmax=Q97.5, colour=Data)) +
geom_pointinterval(stat="identity") +
geom_vline(xintercept = .50)
ggsave(last_plot(), dpi=500, path = NULL, scale = 1, width = 20, height = 10, units="cm", filename="figures/Figure5.png")
Threeway interaction between Window, Trial and Timebin
m5 <- SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "TimeBin", "Trial", "Logit.s")) %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal,
Time_factor = as.factor(TimeBin)
) %>%
filter(AOI=="AOI_Target") %>%
brm(Prop_no0 ~ 1 + Window1.s*Trial1.s*as.factor(TimeBin) + Logit.s + (1 + Window1.s*Trial1.s*as.factor(TimeBin) + Logit.s || ParticipantName) + (1 + Window1.s*Trial1.s*as.factor(TimeBin) || MediaName), family="beta", iter=4000, data=.,
file="models/m5")
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## Warning: Rows containing NAs were excluded from the model.
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL '895234fbe3f64bb4e6aa385aa9285f84' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.016 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 160 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 4000 [ 0%] (Warmup)
## Chain 1: Iteration: 400 / 4000 [ 10%] (Warmup)
## Chain 1: Iteration: 800 / 4000 [ 20%] (Warmup)
## Chain 1: Iteration: 1200 / 4000 [ 30%] (Warmup)
## Chain 1: Iteration: 1600 / 4000 [ 40%] (Warmup)
## Chain 1: Iteration: 2000 / 4000 [ 50%] (Warmup)
## Chain 1: Iteration: 2001 / 4000 [ 50%] (Sampling)
## Chain 1: Iteration: 2400 / 4000 [ 60%] (Sampling)
## Chain 1: Iteration: 2800 / 4000 [ 70%] (Sampling)
## Chain 1: Iteration: 3200 / 4000 [ 80%] (Sampling)
## Chain 1: Iteration: 3600 / 4000 [ 90%] (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 287.867 seconds (Warm-up)
## Chain 1: 202.806 seconds (Sampling)
## Chain 1: 490.673 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL '895234fbe3f64bb4e6aa385aa9285f84' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0.003 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 30 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 4000 [ 0%] (Warmup)
## Chain 2: Iteration: 400 / 4000 [ 10%] (Warmup)
## Chain 2: Iteration: 800 / 4000 [ 20%] (Warmup)
## Chain 2: Iteration: 1200 / 4000 [ 30%] (Warmup)
## Chain 2: Iteration: 1600 / 4000 [ 40%] (Warmup)
## Chain 2: Iteration: 2000 / 4000 [ 50%] (Warmup)
## Chain 2: Iteration: 2001 / 4000 [ 50%] (Sampling)
## Chain 2: Iteration: 2400 / 4000 [ 60%] (Sampling)
## Chain 2: Iteration: 2800 / 4000 [ 70%] (Sampling)
## Chain 2: Iteration: 3200 / 4000 [ 80%] (Sampling)
## Chain 2: Iteration: 3600 / 4000 [ 90%] (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 278.442 seconds (Warm-up)
## Chain 2: 198.414 seconds (Sampling)
## Chain 2: 476.856 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL '895234fbe3f64bb4e6aa385aa9285f84' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0.004 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 40 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 4000 [ 0%] (Warmup)
## Chain 3: Iteration: 400 / 4000 [ 10%] (Warmup)
## Chain 3: Iteration: 800 / 4000 [ 20%] (Warmup)
## Chain 3: Iteration: 1200 / 4000 [ 30%] (Warmup)
## Chain 3: Iteration: 1600 / 4000 [ 40%] (Warmup)
## Chain 3: Iteration: 2000 / 4000 [ 50%] (Warmup)
## Chain 3: Iteration: 2001 / 4000 [ 50%] (Sampling)
## Chain 3: Iteration: 2400 / 4000 [ 60%] (Sampling)
## Chain 3: Iteration: 2800 / 4000 [ 70%] (Sampling)
## Chain 3: Iteration: 3200 / 4000 [ 80%] (Sampling)
## Chain 3: Iteration: 3600 / 4000 [ 90%] (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 278.453 seconds (Warm-up)
## Chain 3: 198.041 seconds (Sampling)
## Chain 3: 476.494 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL '895234fbe3f64bb4e6aa385aa9285f84' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0.003 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 30 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 4000 [ 0%] (Warmup)
## Chain 4: Iteration: 400 / 4000 [ 10%] (Warmup)
## Chain 4: Iteration: 800 / 4000 [ 20%] (Warmup)
## Chain 4: Iteration: 1200 / 4000 [ 30%] (Warmup)
## Chain 4: Iteration: 1600 / 4000 [ 40%] (Warmup)
## Chain 4: Iteration: 2000 / 4000 [ 50%] (Warmup)
## Chain 4: Iteration: 2001 / 4000 [ 50%] (Sampling)
## Chain 4: Iteration: 2400 / 4000 [ 60%] (Sampling)
## Chain 4: Iteration: 2800 / 4000 [ 70%] (Sampling)
## Chain 4: Iteration: 3200 / 4000 [ 80%] (Sampling)
## Chain 4: Iteration: 3600 / 4000 [ 90%] (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 282.591 seconds (Warm-up)
## Chain 4: 205.959 seconds (Sampling)
## Chain 4: 488.55 seconds (Total)
## Chain 4:
summary(m5)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop_no0 ~ 1 + Window1.s * Trial1.s * as.factor(TimeBin) + Logit.s + (1 + Window1.s * Trial1.s * as.factor(TimeBin) + Logit.s || ParticipantName) + (1 + Window1.s * Trial1.s * as.factor(TimeBin) || MediaName)
## Data: . (Number of observations: 1404)
## Samples: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup samples = 8000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 17)
## Estimate Est.Error l-95% CI u-95% CI
## sd(Intercept) 0.06 0.05 0.00 0.19
## sd(Window1.s) 0.13 0.11 0.01 0.40
## sd(Trial1.s) 0.13 0.10 0.00 0.38
## sd(as.factorTimeBin1) 0.17 0.12 0.01 0.45
## sd(as.factorTimeBin2) 0.20 0.13 0.01 0.51
## sd(as.factorTimeBin3) 0.17 0.12 0.01 0.45
## sd(Window1.s:Trial1.s) 0.26 0.20 0.01 0.75
## sd(Window1.s:as.factorTimeBin1) 0.27 0.21 0.01 0.78
## sd(Window1.s:as.factorTimeBin2) 0.28 0.21 0.01 0.76
## sd(Window1.s:as.factorTimeBin3) 0.24 0.19 0.01 0.69
## sd(Trial1.s:as.factorTimeBin1) 0.35 0.24 0.01 0.92
## sd(Trial1.s:as.factorTimeBin2) 0.39 0.27 0.02 1.02
## sd(Trial1.s:as.factorTimeBin3) 0.33 0.24 0.02 0.88
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 0.52 0.40 0.02 1.50
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 0.56 0.41 0.02 1.50
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 0.46 0.36 0.02 1.32
## Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.00 3170 3211
## sd(Window1.s) 1.00 2855 3136
## sd(Trial1.s) 1.00 2850 2333
## sd(as.factorTimeBin1) 1.00 1945 2816
## sd(as.factorTimeBin2) 1.00 1931 3196
## sd(as.factorTimeBin3) 1.00 2365 3221
## sd(Window1.s:Trial1.s) 1.00 3072 3040
## sd(Window1.s:as.factorTimeBin1) 1.00 2557 2994
## sd(Window1.s:as.factorTimeBin2) 1.00 2613 3613
## sd(Window1.s:as.factorTimeBin3) 1.00 2979 2943
## sd(Trial1.s:as.factorTimeBin1) 1.00 2312 2393
## sd(Trial1.s:as.factorTimeBin2) 1.00 1787 3046
## sd(Trial1.s:as.factorTimeBin3) 1.00 2341 2679
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 1.00 2672 3438
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 1.00 2686 3234
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 1.00 3125 2830
##
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI
## sd(Intercept) 0.07 0.05 0.00 0.17
## sd(Window1.s) 0.18 0.12 0.01 0.44
## sd(Trial1.s) 0.19 0.12 0.01 0.45
## sd(as.factorTimeBin1) 0.12 0.09 0.00 0.32
## sd(as.factorTimeBin2) 0.11 0.08 0.01 0.29
## sd(as.factorTimeBin3) 0.17 0.11 0.01 0.41
## sd(Logit.s) 0.43 0.31 0.02 1.13
## sd(Window1.s:Trial1.s) 0.45 0.26 0.02 0.97
## sd(Window1.s:as.factorTimeBin1) 0.18 0.13 0.01 0.50
## sd(Window1.s:as.factorTimeBin2) 0.26 0.19 0.01 0.69
## sd(Window1.s:as.factorTimeBin3) 0.25 0.18 0.01 0.65
## sd(Trial1.s:as.factorTimeBin1) 0.35 0.23 0.02 0.84
## sd(Trial1.s:as.factorTimeBin2) 0.34 0.22 0.02 0.81
## sd(Trial1.s:as.factorTimeBin3) 0.52 0.27 0.04 1.04
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 0.56 0.39 0.02 1.44
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 0.56 0.39 0.03 1.44
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 0.42 0.31 0.02 1.15
## Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.00 2310 3326
## sd(Window1.s) 1.00 1442 2358
## sd(Trial1.s) 1.00 1407 2268
## sd(as.factorTimeBin1) 1.00 2777 2946
## sd(as.factorTimeBin2) 1.00 2787 2925
## sd(as.factorTimeBin3) 1.00 1633 2543
## sd(Logit.s) 1.00 2290 3155
## sd(Window1.s:Trial1.s) 1.00 1374 2004
## sd(Window1.s:as.factorTimeBin1) 1.00 3478 3043
## sd(Window1.s:as.factorTimeBin2) 1.00 2304 3127
## sd(Window1.s:as.factorTimeBin3) 1.00 2962 3959
## sd(Trial1.s:as.factorTimeBin1) 1.00 1737 2546
## sd(Trial1.s:as.factorTimeBin2) 1.00 1889 3027
## sd(Trial1.s:as.factorTimeBin3) 1.00 1170 1887
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 1.00 2325 2668
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 1.00 2333 3043
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 1.00 2902 3490
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept 0.00 0.08 -0.15 0.16 1.00
## Window1.s 0.02 0.15 -0.27 0.32 1.00
## Trial1.s -0.31 0.16 -0.61 -0.01 1.00
## as.factorTimeBin1 0.08 0.13 -0.17 0.33 1.00
## as.factorTimeBin2 0.08 0.13 -0.18 0.34 1.00
## as.factorTimeBin3 0.02 0.12 -0.22 0.26 1.00
## Logit.s 0.23 0.39 -0.55 0.97 1.00
## Window1.s:Trial1.s -0.00 0.31 -0.61 0.60 1.00
## Window1.s:as.factorTimeBin1 -0.11 0.23 -0.55 0.34 1.00
## Window1.s:as.factorTimeBin2 -0.20 0.23 -0.65 0.26 1.00
## Window1.s:as.factorTimeBin3 -0.18 0.23 -0.65 0.26 1.00
## Trial1.s:as.factorTimeBin1 0.23 0.25 -0.26 0.73 1.00
## Trial1.s:as.factorTimeBin2 0.44 0.26 -0.10 0.95 1.00
## Trial1.s:as.factorTimeBin3 0.31 0.25 -0.18 0.81 1.00
## Window1.s:Trial1.s:as.factorTimeBin1 0.34 0.47 -0.56 1.28 1.00
## Window1.s:Trial1.s:as.factorTimeBin2 0.11 0.47 -0.84 1.03 1.00
## Window1.s:Trial1.s:as.factorTimeBin3 -0.06 0.45 -0.95 0.82 1.00
## Bulk_ESS Tail_ESS
## Intercept 7269 6176
## Window1.s 3466 4040
## Trial1.s 4540 5606
## as.factorTimeBin1 4787 4760
## as.factorTimeBin2 4361 4584
## as.factorTimeBin3 5054 5685
## Logit.s 4418 5064
## Window1.s:Trial1.s 3415 4839
## Window1.s:as.factorTimeBin1 3892 4874
## Window1.s:as.factorTimeBin2 3674 5107
## Window1.s:as.factorTimeBin3 3589 4535
## Trial1.s:as.factorTimeBin1 4285 5236
## Trial1.s:as.factorTimeBin2 4041 4873
## Trial1.s:as.factorTimeBin3 4389 5230
## Window1.s:Trial1.s:as.factorTimeBin1 4030 5017
## Window1.s:Trial1.s:as.factorTimeBin2 3754 4725
## Window1.s:Trial1.s:as.factorTimeBin3 4130 4938
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 0.83 0.03 0.77 0.89 1.00 2470 4185
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Posterior predictive distribution
pp_check(m5)
## Using 10 posterior samples for ppc type 'dens_overlay' by default.
Admittedly doesn’t look great. I think because the timebins are now relatively narrow (2000ms), the data are trimodally distributed around 0, 1 and .5.
new_data <- expand_grid(Window1.s=c(-.5, .5), Trial1.s = c(-.5, .5), TimeBin=c(0, 1, 2, 3), Logit.s=0)
post <- as.tibble(fitted(m5, new_data, scale="response", probs=c(.025, .975), re_formula=NA)) %>%
bind_cols(new_data, .) %>%
mutate(
Data = "Estimated"
)
raw_data <- SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin")) %>%
filter(AOI=="AOI_Target") %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(SamplesTotal > 0) %>%
group_by(Window1.s, Trial1.s, ParticipantName, TimeBin) %>%
mutate(
M = mean(Prop_no0, na.rm=TRUE)
) %>%
slice(1) %>%
ungroup(ParticipantName) %>%
summarise(
Estimate = mean(M),
Est.Error = sd(M)/sqrt(92),
Q2.5 = Estimate - 1.97*(sd(M)/sqrt(92)),
Q97.5 = Estimate + 1.97*(sd(M)/sqrt(92))
) %>%
mutate(
Data = "Raw"
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## `summarise()` has grouped output by 'Window1.s', 'Trial1.s'. You can override using the `.groups` argument.
post <- bind_rows(post, raw_data)
p2 <- post %>%
mutate(
Window = ifelse(Window1.s == .5, yes= "Window 1", no = "Window 2"),
Trial = ifelse(Trial1.s == .5, yes= "Trial 1", no = "Trial 2"),
Time_Bin = ifelse(TimeBin==0, "0-2s",
ifelse(TimeBin==1, "2-4s",
ifelse(TimeBin==2, "4-6s", "6-8s"))),
Condition = paste(Window, Time_Bin, Data, sep = ", ")
) %>%
ggplot(aes(y = Condition, x = Estimate, xmin=Q2.5, xmax=Q97.5, colour=Data)) + facet_wrap(~Trial) +
geom_pointinterval(stat="identity") +
geom_vline(xintercept = .50)
I don’t see much evidence that participants differed from chance at any of the time points.
ggsave(last_plot(), dpi=500, path = NULL, scale = 1, width = 20, height = 10, units="cm", filename="figures/Figure6.png")
Let’s just try it with a simpler random effects structure to see if that’s causing the uncertainty in the estimates
m5b <- SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin", "Logit.s")) %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal,
Time_factor = as.factor(TimeBin)
) %>%
filter(AOI=="AOI_Target") %>%
brm(Prop_no0 ~ 1 + Window1.s*Trial1.s*as.factor(TimeBin) + Logit.s + (1 + Window1.s + Trial1.s+ as.factor(TimeBin) + Logit.s|| ParticipantName) + (1 + Window1.s+Trial1.s+as.factor(TimeBin)|| MediaName), family="beta", iter=4000, data=.,
file="models/m5b")
summary(m5b)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop_no0 ~ 1 + Window1.s * Trial1.s * as.factor(TimeBin) + Logit.s + (1 + Window1.s + Trial1.s + as.factor(TimeBin) + Logit.s || ParticipantName) + (1 + Window1.s + Trial1.s + as.factor(TimeBin) + Logit.s || MediaName)
## Data: . (Number of observations: 1404)
## Samples: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup samples = 8000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 17)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.07 0.06 0.00 0.22 1.00 2973
## sd(Window1.s) 0.14 0.10 0.01 0.38 1.00 2961
## sd(Trial1.s) 0.15 0.12 0.01 0.43 1.00 3190
## sd(as.factorTimeBin1) 0.17 0.12 0.01 0.43 1.00 2534
## sd(as.factorTimeBin2) 0.22 0.13 0.02 0.51 1.00 2030
## sd(as.factorTimeBin3) 0.17 0.11 0.01 0.42 1.00 2486
## sd(Logit.s) 0.45 0.37 0.02 1.38 1.00 3894
## Tail_ESS
## sd(Intercept) 4283
## sd(Window1.s) 3462
## sd(Trial1.s) 3933
## sd(as.factorTimeBin1) 3365
## sd(as.factorTimeBin2) 2959
## sd(as.factorTimeBin3) 3770
## sd(Logit.s) 4252
##
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.06 0.05 0.00 0.17 1.00 3240
## sd(Window1.s) 0.18 0.12 0.01 0.43 1.00 2369
## sd(Trial1.s) 0.22 0.13 0.01 0.48 1.00 2268
## sd(as.factorTimeBin1) 0.11 0.08 0.00 0.31 1.00 2909
## sd(as.factorTimeBin2) 0.10 0.08 0.00 0.28 1.00 3806
## sd(as.factorTimeBin3) 0.16 0.10 0.01 0.39 1.00 2448
## sd(Logit.s) 0.43 0.31 0.02 1.14 1.00 2923
## Tail_ESS
## sd(Intercept) 3396
## sd(Window1.s) 3591
## sd(Trial1.s) 3267
## sd(as.factorTimeBin1) 3694
## sd(as.factorTimeBin2) 3755
## sd(as.factorTimeBin3) 3499
## sd(Logit.s) 3124
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept 0.00 0.08 -0.16 0.17 1.00
## Window1.s 0.02 0.15 -0.27 0.31 1.00
## Trial1.s -0.30 0.17 -0.63 0.02 1.00
## as.factorTimeBin1 0.08 0.12 -0.15 0.30 1.00
## as.factorTimeBin2 0.08 0.12 -0.15 0.31 1.00
## as.factorTimeBin3 0.02 0.11 -0.20 0.25 1.00
## Logit.s 0.38 0.43 -0.48 1.23 1.00
## Window1.s:Trial1.s -0.01 0.30 -0.59 0.59 1.00
## Window1.s:as.factorTimeBin1 -0.11 0.20 -0.51 0.27 1.00
## Window1.s:as.factorTimeBin2 -0.21 0.20 -0.60 0.18 1.00
## Window1.s:as.factorTimeBin3 -0.17 0.20 -0.57 0.22 1.00
## Trial1.s:as.factorTimeBin1 0.23 0.23 -0.22 0.67 1.00
## Trial1.s:as.factorTimeBin2 0.42 0.24 -0.06 0.88 1.00
## Trial1.s:as.factorTimeBin3 0.30 0.23 -0.15 0.75 1.00
## Window1.s:Trial1.s:as.factorTimeBin1 0.29 0.41 -0.53 1.08 1.00
## Window1.s:Trial1.s:as.factorTimeBin2 0.08 0.40 -0.71 0.87 1.00
## Window1.s:Trial1.s:as.factorTimeBin3 -0.04 0.41 -0.84 0.75 1.00
## Bulk_ESS Tail_ESS
## Intercept 8615 5598
## Window1.s 4357 5518
## Trial1.s 4790 5474
## as.factorTimeBin1 7090 5586
## as.factorTimeBin2 6923 5570
## as.factorTimeBin3 8151 5973
## Logit.s 6010 5479
## Window1.s:Trial1.s 4346 5779
## Window1.s:as.factorTimeBin1 6124 6080
## Window1.s:as.factorTimeBin2 6078 5966
## Window1.s:as.factorTimeBin3 6086 5312
## Trial1.s:as.factorTimeBin1 6233 5937
## Trial1.s:as.factorTimeBin2 5255 5575
## Trial1.s:as.factorTimeBin3 5940 6081
## Window1.s:Trial1.s:as.factorTimeBin1 6020 6401
## Window1.s:Trial1.s:as.factorTimeBin2 5603 6674
## Window1.s:Trial1.s:as.factorTimeBin3 5473 5636
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 0.78 0.02 0.73 0.83 1.00 6916 5802
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
new_data <- expand_grid(Window1.s=c(-.5, .5), Trial1.s = c(-.5, .5), TimeBin=c(0, 1, 2, 3), Logit.s = 0)
post <- as.tibble(fitted(m5b, new_data, scale="response", probs=c(.025, .975), re_formula=NA)) %>%
bind_cols(new_data, .) %>%
mutate(
Data = "Estimated"
)
raw_data <- SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin")) %>%
filter(AOI=="AOI_Target") %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(SamplesTotal > 0) %>%
group_by(Window1.s, Trial1.s, ParticipantName, TimeBin) %>%
mutate(
M = mean(Prop_no0, na.rm=TRUE)
) %>%
slice(1) %>%
ungroup(ParticipantName) %>%
summarise(
Estimate = mean(M),
Est.Error = sd(M)/sqrt(92),
Q2.5 = Estimate - 1.97*(sd(M)/sqrt(92)),
Q97.5 = Estimate + 1.97*(sd(M)/sqrt(92))
) %>%
mutate(
Data = "Raw"
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## `summarise()` has grouped output by 'Window1.s', 'Trial1.s'. You can override using the `.groups` argument.
post <- bind_rows(post, raw_data)
post %>%
mutate(
Window = ifelse(Window1.s == .5, yes= "Window 1", no = "Window 2"),
Trial = ifelse(Trial1.s == .5, yes= "Trial 1", no = "Trial 2"),
Time_Bin = ifelse(TimeBin==0, "0-2s",
ifelse(TimeBin==1, "2-4s",
ifelse(TimeBin==2, "4-6s", "6-8s"))),
Condition = paste(Window, Time_Bin, Data, sep = ", ")
) %>%
ggplot(aes(y = Condition, x = Estimate, xmin=Q2.5, xmax=Q97.5, colour=Data)) + facet_wrap(~Trial) +
geom_pointinterval(stat="identity") +
geom_vline(xintercept = .50)
In an earlier run through of this, I found that one of the actions attracted more looks when it was both a target and a distracter. As can be seen below:
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "Target"), aois="AOI_Target") %>%
select(ParticipantName, Target, Prop) %>%
ggplot(aes(x=Target, y=Prop)) + geom_boxplot() + geom_point()
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "Distractor"), aois="AOI_Target") %>%
select(ParticipantName, Distractor, Prop) %>%
ggplot(aes(x=Distractor, y=Prop)) + geom_boxplot() + geom_point()
Given that the push variable seems to be attracting a lot more attention–both as a target and a distracter–I’m going to re-fit the models from above with the trials in which push was a target or distracter removed.
m1_nopush <-SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | MediaName), family="beta", data=.,
file= "models/m1_nopush")
summary(m1_nopush)
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ 1 + (1 | MediaName)
## Data: . (Number of observations: 90)
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup samples = 4000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 9)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.22 0.14 0.01 0.53 1.00 827 1366
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.03 0.11 -0.20 0.25 1.00 1056 943
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 9.80 1.44 7.20 12.93 1.00 3173 2308
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m1_nopush, "Intercept > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Intercept) > 0 0.03 0.11 -0.16 0.2 1.7 0.63
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
mnull_nopush <- SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | MediaName), prior= int_prior2, iter=10000, family="beta", save_all_pars=TRUE, data=.,
file= "models/m_null_nopush ")
malt_nopush <- SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | MediaName), prior= int_prior1, iter=10000, family="beta", save_all_pars=TRUE, data=.,
file= "models/m_alt_nopush ")
malt2_nopush <- SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName")) %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + (1 | MediaName), prior= int_prior3, iter=10000, family="beta", save_all_pars=TRUE, data=.,
file= "models/m_alt2_nopush ")
bayes_factor(malt_nopush, mnull_nopush)
## Warning: 313 of the 10000 log_prob() evaluations on the posterior draws produced
## -Inf/Inf.
## Warning: 360 of the 10000 log_prob() evaluations on the proposal draws produced
## -Inf/Inf.
## Iteration: 1
## Iteration: 2
## Iteration: 3
## Iteration: 4
## Iteration: 5
## Iteration: 6
## Iteration: 7
## Iteration: 8
## Warning: 313 of the 10000 log_prob() evaluations on the posterior draws produced
## -Inf/Inf.
## Warning: 354 of the 10000 log_prob() evaluations on the proposal draws produced
## -Inf/Inf.
## Iteration: 1
## Iteration: 2
## Iteration: 3
## Iteration: 4
## Iteration: 5
## Iteration: 6
## Iteration: 7
## Iteration: 8
## Estimated Bayes factor in favor of malt_nopush over mnull_nopush: 0.07818
#bayes_factor(malt_nopush, mnull_nopush)
#bayes_factor(malt_nopush, mnull_nopush)
bayes_factor(malt2_nopush, mnull_nopush)
## Warning: 373 of the 10000 log_prob() evaluations on the posterior draws produced
## -Inf/Inf.
## Warning: 1257 of the 10000 log_prob() evaluations on the proposal draws produced
## -Inf/Inf.
## Iteration: 1
## Iteration: 2
## Iteration: 3
## Iteration: 4
## Iteration: 5
## Iteration: 6
## Iteration: 7
## Warning: 313 of the 10000 log_prob() evaluations on the posterior draws produced
## -Inf/Inf.
## Warning: 378 of the 10000 log_prob() evaluations on the proposal draws produced
## -Inf/Inf.
## Iteration: 1
## Iteration: 2
## Iteration: 3
## Iteration: 4
## Iteration: 5
## Iteration: 6
## Iteration: 7
## Estimated Bayes factor in favor of malt2_nopush over mnull_nopush: 0.17367
#bayes_factor(malt2_nopush, mnull_nopush)
#bayes_factor(malt2_nopush, mnull_nopush)
m2_nopush <-SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns = "Logit.s") %>%
filter(AOI=="AOI_Target") %>%
data.frame() %>%
brm(Prop ~ 1 + Logit.s+ (1 || MediaName), family="beta", data=.,
file= "models/m2_nopush")
m3_nopush <- SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "Logit.s")) %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(AOI=="AOI_Target") %>%
brm(Prop_no0 ~ 1 + Window1.s*Trial1.s + Logit.s + (1 + Window1.s*Trial1.s + Logit.s || ParticipantName) + (1 + Window1.s*Trial1.s || MediaName), data=., iter=4000, family="beta", control=list(adapt_delta=.95, max_treedepth=20),
file="models/m3_nopush")
summary(m3_nopush)
## Warning: There were 7 divergent transitions after warmup. Increasing adapt_delta
## above 0.95 may help. See http://mc-stan.org/misc/warnings.html#divergent-
## transitions-after-warmup
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop_no0 ~ 1 + Window1.s * Trial1.s + Logit.s + (1 + Window1.s * Trial1.s + Logit.s || ParticipantName) + (1 + Window1.s * Trial1.s || MediaName)
## Data: . (Number of observations: 174)
## Samples: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup samples = 8000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 9)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.19 0.17 0.01 0.63 1.00 2420
## sd(Window1.s) 0.29 0.26 0.01 0.94 1.00 3168
## sd(Trial1.s) 0.37 0.32 0.01 1.22 1.00 2718
## sd(Window1.s:Trial1.s) 0.57 0.50 0.02 1.80 1.00 3183
## Tail_ESS
## sd(Intercept) 3689
## sd(Window1.s) 4173
## sd(Trial1.s) 3203
## sd(Window1.s:Trial1.s) 3768
##
## ~ParticipantName (Number of levels: 90)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.34 0.18 0.02 0.67 1.01 551
## sd(Window1.s) 0.77 0.37 0.07 1.38 1.01 390
## sd(Trial1.s) 0.69 0.38 0.04 1.37 1.01 432
## sd(Logit.s) 1.61 1.02 0.08 3.68 1.00 1230
## sd(Window1.s:Trial1.s) 1.33 0.74 0.08 2.64 1.01 382
## Tail_ESS
## sd(Intercept) 1741
## sd(Window1.s) 1560
## sd(Trial1.s) 1100
## sd(Logit.s) 2555
## sd(Window1.s:Trial1.s) 922
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.44 0.43 -0.43 1.31 1.00 3788 3097
## Window1.s -0.06 0.24 -0.52 0.44 1.00 4105 3201
## Trial1.s -0.16 0.28 -0.78 0.38 1.00 4335 2866
## Logit.s 2.84 3.15 -3.25 9.45 1.00 4207 3611
## Window1.s:Trial1.s 0.40 0.50 -0.53 1.40 1.00 4727 3239
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 12.66 5.26 6.10 26.42 1.01 432 791
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
new_data <- expand_grid(Window1.s=c(-.5, .5), Trial1.s = c(-.5, .5), Logit.s=0)
post <- as.tibble(fitted(m3_nopush, new_data, scale="response", probs=c(.025, .975), re_formula=NA)) %>%
bind_cols(new_data, .) %>%
mutate(
Data = "Estimated"
)
raw_data <- SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial")) %>%
filter(AOI=="AOI_Target") %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(SamplesTotal > 0) %>%
group_by(Window1.s, Trial1.s, ParticipantName) %>%
mutate(
M = mean(Prop_no0, na.rm=TRUE)
) %>%
slice(1) %>%
ungroup(ParticipantName) %>%
summarise(
Estimate = mean(M),
Est.Error = sd(M)/sqrt(92),
Q2.5 = Estimate - 1.97*(sd(M)/sqrt(92)),
Q97.5 = Estimate + 1.97*(sd(M)/sqrt(92))
) %>%
mutate(
Data = "Raw"
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## `summarise()` has grouped output by 'Window1.s'. You can override using the `.groups` argument.
post <- bind_rows(post, raw_data)
post %>%
mutate(
Window = ifelse(Window1.s == .5, yes= "Window 1", no = "Window 2"),
Trial = ifelse(Trial1.s == .5, yes= "Trial 1", no = "Trial 2"),
Condition = paste(Trial, Window, Data, sep = ", ")
) %>%
ggplot(aes(y = Condition, x = Estimate, xmin=Q2.5, xmax=Q97.5, colour=Data)) +
geom_pointinterval(stat="identity") +
geom_vline(xintercept = .50)
ggsave(last_plot(), dpi=500, path = NULL, scale = 1, width = 20, height = 10, units="cm", filename="figures/Figure7.png")
m4_nopush <- SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin", "Logit.s")) %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(AOI=="AOI_Target") %>%
brm(Prop_no0 ~ 1 + Window1.s*Trial1.s*as.factor(TimeBin) + Logit.s + (1 + Window1.s*Trial1.s*as.factor(TimeBin) + Logit.s|| ParticipantName) + (1 + Window1.s*Trial1.s*as.factor(TimeBin) || MediaName), family="beta", data=., iter=4000, control=list(adapt_delta=.95, max_treedepth=20),
file="models/m4_nopush")
summary(m4_nopush)
## Warning: There were 5 divergent transitions after warmup. Increasing adapt_delta
## above 0.95 may help. See http://mc-stan.org/misc/warnings.html#divergent-
## transitions-after-warmup
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop_no0 ~ 1 + Window1.s * Trial1.s * as.factor(TimeBin) + Logit.s + (1 + Window1.s * Trial1.s * as.factor(TimeBin) + Logit.s || ParticipantName) + (1 + Window1.s * Trial1.s * as.factor(TimeBin) || MediaName)
## Data: . (Number of observations: 690)
## Samples: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup samples = 8000
##
## Group-Level Effects:
## ~MediaName (Number of levels: 9)
## Estimate Est.Error l-95% CI u-95% CI
## sd(Intercept) 0.14 0.14 0.00 0.50
## sd(Window1.s) 0.28 0.25 0.01 0.94
## sd(Trial1.s) 0.27 0.26 0.01 0.98
## sd(as.factorTimeBin1) 0.24 0.21 0.01 0.76
## sd(as.factorTimeBin2) 0.34 0.27 0.02 0.99
## sd(as.factorTimeBin3) 0.25 0.22 0.01 0.81
## sd(Window1.s:Trial1.s) 0.54 0.47 0.02 1.69
## sd(Window1.s:as.factorTimeBin1) 0.51 0.42 0.02 1.55
## sd(Window1.s:as.factorTimeBin2) 0.41 0.35 0.02 1.29
## sd(Window1.s:as.factorTimeBin3) 0.47 0.40 0.02 1.50
## sd(Trial1.s:as.factorTimeBin1) 0.47 0.38 0.02 1.42
## sd(Trial1.s:as.factorTimeBin2) 0.63 0.51 0.03 1.86
## sd(Trial1.s:as.factorTimeBin3) 0.48 0.40 0.02 1.49
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 0.93 0.76 0.03 2.79
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 0.76 0.63 0.03 2.31
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 0.84 0.69 0.03 2.58
## Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.00 3273 3220
## sd(Window1.s) 1.00 3455 4205
## sd(Trial1.s) 1.00 2873 3593
## sd(as.factorTimeBin1) 1.00 3473 3453
## sd(as.factorTimeBin2) 1.00 2835 3792
## sd(as.factorTimeBin3) 1.00 3408 4339
## sd(Window1.s:Trial1.s) 1.00 3857 4414
## sd(Window1.s:as.factorTimeBin1) 1.00 3424 4227
## sd(Window1.s:as.factorTimeBin2) 1.00 3600 3987
## sd(Window1.s:as.factorTimeBin3) 1.00 3509 3644
## sd(Trial1.s:as.factorTimeBin1) 1.00 3488 3467
## sd(Trial1.s:as.factorTimeBin2) 1.00 2598 3908
## sd(Trial1.s:as.factorTimeBin3) 1.00 3192 3898
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 1.00 3210 3553
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 1.00 3565 3442
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 1.00 3583 4058
##
## ~ParticipantName (Number of levels: 90)
## Estimate Est.Error l-95% CI u-95% CI
## sd(Intercept) 0.10 0.07 0.00 0.26
## sd(Window1.s) 0.25 0.17 0.01 0.63
## sd(Trial1.s) 0.19 0.14 0.01 0.52
## sd(as.factorTimeBin1) 0.20 0.14 0.01 0.52
## sd(as.factorTimeBin2) 0.24 0.16 0.01 0.59
## sd(as.factorTimeBin3) 0.40 0.22 0.02 0.82
## sd(Logit.s) 0.66 0.50 0.03 1.83
## sd(Window1.s:Trial1.s) 0.49 0.34 0.03 1.25
## sd(Window1.s:as.factorTimeBin1) 0.41 0.29 0.01 1.06
## sd(Window1.s:as.factorTimeBin2) 0.29 0.22 0.01 0.83
## sd(Window1.s:as.factorTimeBin3) 0.30 0.23 0.01 0.84
## sd(Trial1.s:as.factorTimeBin1) 0.39 0.28 0.02 1.05
## sd(Trial1.s:as.factorTimeBin2) 0.45 0.32 0.02 1.16
## sd(Trial1.s:as.factorTimeBin3) 0.76 0.44 0.04 1.61
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 0.79 0.55 0.04 2.03
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 0.57 0.43 0.03 1.58
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 0.58 0.44 0.02 1.61
## Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.00 2673 3382
## sd(Window1.s) 1.00 1955 3141
## sd(Trial1.s) 1.00 2608 3302
## sd(as.factorTimeBin1) 1.00 3006 3798
## sd(as.factorTimeBin2) 1.00 2184 3025
## sd(as.factorTimeBin3) 1.00 785 1899
## sd(Logit.s) 1.00 3017 3746
## sd(Window1.s:Trial1.s) 1.00 2215 3478
## sd(Window1.s:as.factorTimeBin1) 1.00 2480 3511
## sd(Window1.s:as.factorTimeBin2) 1.00 3809 4007
## sd(Window1.s:as.factorTimeBin3) 1.00 3528 3523
## sd(Trial1.s:as.factorTimeBin1) 1.00 1983 2826
## sd(Trial1.s:as.factorTimeBin2) 1.00 1976 3199
## sd(Trial1.s:as.factorTimeBin3) 1.00 950 1941
## sd(Window1.s:Trial1.s:as.factorTimeBin1) 1.00 2641 3316
## sd(Window1.s:Trial1.s:as.factorTimeBin2) 1.00 3563 3444
## sd(Window1.s:Trial1.s:as.factorTimeBin3) 1.00 3663 3627
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat
## Intercept 0.19 0.34 -0.50 0.86 1.00
## Window1.s 0.09 0.27 -0.45 0.62 1.00
## Trial1.s -0.47 0.28 -1.02 0.07 1.00
## as.factorTimeBin1 0.11 0.21 -0.30 0.53 1.00
## as.factorTimeBin2 0.21 0.25 -0.28 0.71 1.00
## as.factorTimeBin3 0.06 0.22 -0.38 0.50 1.00
## Logit.s 1.66 2.49 -3.20 6.65 1.00
## Window1.s:Trial1.s 0.05 0.55 -1.02 1.17 1.00
## Window1.s:as.factorTimeBin1 -0.22 0.43 -1.07 0.65 1.00
## Window1.s:as.factorTimeBin2 -0.06 0.39 -0.81 0.71 1.00
## Window1.s:as.factorTimeBin3 -0.07 0.41 -0.85 0.74 1.00
## Trial1.s:as.factorTimeBin1 0.37 0.42 -0.48 1.16 1.00
## Trial1.s:as.factorTimeBin2 0.53 0.51 -0.50 1.50 1.00
## Trial1.s:as.factorTimeBin3 0.42 0.43 -0.44 1.25 1.00
## Window1.s:Trial1.s:as.factorTimeBin1 0.61 0.84 -1.01 2.38 1.00
## Window1.s:Trial1.s:as.factorTimeBin2 0.36 0.78 -1.14 1.91 1.00
## Window1.s:Trial1.s:as.factorTimeBin3 0.14 0.82 -1.53 1.71 1.00
## Bulk_ESS Tail_ESS
## Intercept 4926 3792
## Window1.s 5253 5049
## Trial1.s 4971 4234
## as.factorTimeBin1 4824 4712
## as.factorTimeBin2 4175 3822
## as.factorTimeBin3 5455 4544
## Logit.s 5138 4315
## Window1.s:Trial1.s 5198 4475
## Window1.s:as.factorTimeBin1 4642 4741
## Window1.s:as.factorTimeBin2 5146 4615
## Window1.s:as.factorTimeBin3 5138 4863
## Trial1.s:as.factorTimeBin1 5199 4860
## Trial1.s:as.factorTimeBin2 4388 4161
## Trial1.s:as.factorTimeBin3 4994 4387
## Window1.s:Trial1.s:as.factorTimeBin1 5562 4624
## Window1.s:Trial1.s:as.factorTimeBin2 5176 4298
## Window1.s:Trial1.s:as.factorTimeBin3 5455 4689
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 0.91 0.05 0.81 1.03 1.00 3181 5358
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
new_data <- expand_grid(Window1.s=c(-.5, .5), Trial1.s = c(-.5, .5), TimeBin=c(0, 1, 2, 3), Logit.s=0)
post <- as.tibble(fitted(m4_nopush, new_data, scale="response", probs=c(.025, .975), re_formula=NA)) %>%
bind_cols(new_data, .) %>%
mutate(
Data = "Estimated"
)
raw_data <- SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin")) %>%
filter(AOI=="AOI_Target") %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(SamplesTotal > 0) %>%
group_by(Window1.s, Trial1.s, ParticipantName, TimeBin) %>%
mutate(
M = mean(Prop_no0, na.rm=TRUE)
) %>%
slice(1) %>%
ungroup(ParticipantName) %>%
summarise(
Estimate = mean(M),
Est.Error = sd(M)/sqrt(92),
Q2.5 = Estimate - 1.97*(sd(M)/sqrt(92)),
Q97.5 = Estimate + 1.97*(sd(M)/sqrt(92))
) %>%
mutate(
Data = "Raw"
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## `summarise()` has grouped output by 'Window1.s', 'Trial1.s'. You can override using the `.groups` argument.
post <- bind_rows(post, raw_data)
post %>%
mutate(
Window = ifelse(Window1.s == .5, yes= "Window 1", no = "Window 2"),
Trial = ifelse(Trial1.s == .5, yes= "Trial 1", no = "Trial 2"),
Time_Bin = ifelse(TimeBin==0, "0-2s",
ifelse(TimeBin==1, "2-4s",
ifelse(TimeBin==2, "4-6s", "6-8s"))),
Condition = paste(Window, Time_Bin, Data, sep = ", ")
) %>%
ggplot(aes(y = Condition, x = Estimate, xmin=Q2.5, xmax=Q97.5, colour=Data)) + facet_wrap(~Trial) +
geom_pointinterval(stat="identity") +
geom_vline(xintercept = .50)
ggsave(last_plot(), dpi=500, path = NULL, scale = 1, width = 20, height = 10, units="cm", filename="figures/Figure8.png")
m4_nopushb <- SB_full_clean %>%
left_join(., Looks_dis, by="Target") %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin", "Logit.s")) %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(AOI=="AOI_Target") %>%
brm(Prop_no0 ~ 1 + Window1.s*Trial1.s*as.factor(TimeBin) + Logit.s + (1 + Window1.s + Trial1.s + as.factor(TimeBin)+ Logit.s || ParticipantName) + (1 + Window1.s+ Trial1.s + as.factor(TimeBin) || MediaName), family="beta", data=., iter = 4000, control=list(adapt_delta=.95, max_treedepth=20), file="models/m4_nopushb")
new_data <- expand_grid(Window1.s=c(-.5, .5), Trial1.s = c(-.5, .5), TimeBin=c(0, 1, 2, 3), Logit.s=0)
post <- as.tibble(fitted(m4_nopushb, new_data, scale="response", probs=c(.025, .975), re_formula=NA)) %>%
bind_cols(new_data, .) %>%
mutate(
Data = "Estimated"
)
raw_data <- SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "MediaName"), predictor_columns=c("Window", "Trial", "TimeBin")) %>%
filter(AOI=="AOI_Target") %>%
mutate(
Window1.s = ifelse(Window==1, yes=.5, no = -.5),
Trial1.s = ifelse(Trial==1, yes=.5, no=-.5),
Prop_no0 = ((SamplesInAOI/SamplesTotal)*(SamplesTotal-1) + (1/2))/SamplesTotal
) %>%
filter(SamplesTotal > 0) %>%
group_by(Window1.s, Trial1.s, ParticipantName, TimeBin) %>%
mutate(
M = mean(Prop_no0, na.rm=TRUE)
) %>%
slice(1) %>%
ungroup(ParticipantName) %>%
summarise(
Estimate = mean(M),
Est.Error = sd(M)/sqrt(91),
Q2.5 = Estimate - 1.97*(sd(M)/sqrt(91)),
Q97.5 = Estimate + 1.97*(sd(M)/sqrt(91))
) %>%
mutate(
Data = "Raw"
)
## Analyzing AOI_Target...
## Analyzing AOI_Distracter...
## `summarise()` has grouped output by 'Window1.s', 'Trial1.s'. You can override using the `.groups` argument.
post <- bind_rows(post, raw_data)
post %>%
mutate(
Window = ifelse(Window1.s == .5, yes= "Window 1", no = "Window 2"),
Trial = ifelse(Trial1.s == .5, yes= "Trial 1", no = "Trial 2"),
Time_Bin = ifelse(TimeBin==0, "0-2s",
ifelse(TimeBin==1, "2-4s",
ifelse(TimeBin==2, "4-6s", "6-8s"))),
Condition = paste(Window, Time_Bin, Data, sep = ", ")
) %>%
ggplot(aes(y = Condition, x = Estimate, xmin=Q2.5, xmax=Q97.5, colour=Data)) + facet_wrap(~Trial) +
geom_pointinterval(stat="identity") +
geom_vline(xintercept = .50)
Next we’ll start looking at the models of individual differences. Ideally we’d be able to include each trial, or each window as a separate observation for each participant. However, nne thing that looked strange in the models above was how small the random intercept variance was. Let’s take a look at the correlation between each Trial Window for each participant.
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "Trial", "Window"), aois="AOI_Target") %>%
select(ParticipantName, Trial, Window, Prop) %>%
mutate(
Window = ifelse(Window==1, yes = "Window1", no ="Window2"),
Trial = ifelse(Trial==2, yes="Trial1", no ="Trial2")
) %>%
pivot_wider(id_cols=ParticipantName, names_from=c(Trial, Window), values_from=Prop) %>%
select(starts_with("Trial")) %>%
ggpairs()
## Warning: Removed 9 rows containing non-finite values (stat_density).
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 9 rows containing missing values
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 9 rows containing missing values
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 9 rows containing missing values
## Warning: Removed 9 rows containing missing values (geom_point).
## Warning: Removed 3 rows containing non-finite values (stat_density).
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 3 rows containing missing values
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 3 rows containing missing values
## Warning: Removed 9 rows containing missing values (geom_point).
## Warning: Removed 3 rows containing missing values (geom_point).
## Warning: Removed 9 rows containing missing values (geom_point).
## Warning: Removed 3 rows containing missing values (geom_point).
Let’s try collapsing across windows
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "Trial"), aois="AOI_Target") %>%
mutate(
Trial = ifelse(Trial==1, yes="Trial 1", no="Trial2")
) %>%
select(ParticipantName, Trial, Prop) %>%
pivot_wider(id_cols=ParticipantName, names_from=Trial, values_from=Prop) %>%
select(starts_with("Trial")) %>%
ggpairs()
## Warning: Removed 3 rows containing non-finite values (stat_density).
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 3 rows containing missing values
## Warning: Removed 3 rows containing missing values (geom_point).
Trials are negatively correlated.
SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName", "Window"), aois="AOI_Target") %>%
mutate(
Window = ifelse(Window==1, yes="Window 1", no="Window 2")
) %>%
select(ParticipantName, Window, Prop) %>%
pivot_wider(id_cols=ParticipantName, names_from=Window, values_from=Prop) %>%
select(starts_with("Window")) %>%
ggpairs()
SB_full_clean %>%
filter(Target != "Push" & Distractor != "Push") %>%
make_time_window_data(summarize_by=c("ParticipantName", "Window"), aois="AOI_Target") %>%
mutate(
Window = ifelse(Window==1, yes="Window 1", no="Window 2")
) %>%
select(ParticipantName, Window, Prop) %>%
pivot_wider(id_cols=ParticipantName, names_from=Window, values_from=Prop) %>%
select(starts_with("Window")) %>%
ggpairs()
## Warning: Removed 6 rows containing non-finite values (stat_density).
## Warning in ggally_statistic(data = data, mapping = mapping, na.rm = na.rm, :
## Removed 6 rows containing missing values
## Warning: Removed 6 rows containing missing values (geom_point).
Given the relatively low correlation between trials, I think it’s going to be really hard to identify the mixture model. I’m, therefore, going to only include the average proportion of looks to target
Data set
df_mixture <- SB_full_clean %>%
make_time_window_data(summarize_by=c("ParticipantName"), aois="AOI_Target")
prior_mixed= c(
prior(normal(0, 1), class=sd),
prior(gamma(3.5, .5), class=phi)
)
m_mixed <- brm(
Prop ~ 1 + (1 | ParticipantName), prior=prior_mixed, control=list(adapt_delta=.9, max_treedepth=20), iter=40000, chains=8, family="beta", data= df_mixture, save_all_pars=TRUE, file="models/m_mixed")
summary(m_mixed)
## Warning: There were 16 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See http://mc-stan.org/misc/
## warnings.html#divergent-transitions-after-warmup
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ 1 + (1 | ParticipantName)
## Data: df_mixture (Number of observations: 92)
## Samples: 8 chains, each with iter = 40000; warmup = 20000; thin = 1;
## total post-warmup samples = 160000
##
## Group-Level Effects:
## ~ParticipantName (Number of levels: 92)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.08 0.06 0.00 0.22 1.00 49768 75001
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.04 0.04 -0.05 0.13 1.00 212637 118761
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 22.70 3.50 16.61 30.32 1.00 113756 83347
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Correlation between random intercept variance and phi, which is what I expected; however, they are certainly not perfectly correlated, suggesting that they’re not just trading off with each other.
library(ggpubr)
posterior_samples(m_mixed) %>%
select(sd_ParticipantName__Intercept, phi) %>%
ggplot(aes(x=sd_ParticipantName__Intercept, y = phi)) +
geom_point() + stat_cor(method = "pearson", label.x = .1, label.y = 40)
Visualize prior on phi
temp <- expression("Prior Density of" ~phi)
(p_gamma<- tibble(
Phi = seq(1, 40, length=200)
) %>%
mutate(
density = dgamma(Phi, 3.5, .5)
) %>%
ggplot(aes(x=Phi, ymin=0, ymax=density)) +
geom_ribbon(aes(x=Phi)))
(phis <-tibble(
Phi = rep(c(rep(2, 100), rep(7,100), rep(12,100), rep(17, 100), rep(22, 100)), 3),
mu = c(rep(.25, 500), rep(.5, 500), rep(.75, 500))
) %>%
mutate(
Prob = rbeta(1500, mu*Phi, (1-mu)*Phi)
) %>%
ggplot(aes(x=Prob)) +
geom_histogram() + facet_grid(Phi~mu) +
labs(main="Simulated data at various varying values of ")
)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
p_gamma + phis +
plot_layout(widths = c(1, 2))
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Mixture distribution: discrete individual differences.
priors_mixture <- c(
prior(normal(0, .05), class=Intercept, dpar=mu1),
prior(normal(.4, .15), class=Intercept, dpar=mu2),
prior(dirichlet(2), class=theta),
prior(gamma(3.5, .5), class=phi1),
prior(gamma(3.5, .5), class=phi2)
)
mix = mixture(Beta(link_phi="log"), # define mixture distribution
Beta(link_phi="log")
)
## Setting order = 'mu' for mixtures of the same family.
m_mixture <- brm(
bf(Prop ~ 1), control=list(adapt_delta=.9, max_treedepth=20), prior =priors_mixture, iter=40000, chains=8, family=mix, data= df_mixture, save_all_pars=TRUE, file="models/m_mixture")
pp_check(m_mixture)
## Using 10 posterior samples for ppc type 'dens_overlay' by default.
pp_check(m_mixed)
## Using 10 posterior samples for ppc type 'dens_overlay' by default.
summary(m_mixture)
## Family: mixture(beta, beta)
## Links: mu1 = logit; phi1 = identity; mu2 = logit; phi2 = identity; theta1 = identity; theta2 = identity
## Formula: Prop ~ 1
## Data: df_mixture (Number of observations: 92)
## Samples: 8 chains, each with iter = 40000; warmup = 20000; thin = 1;
## total post-warmup samples = 160000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## mu1_Intercept 0.01 0.04 -0.07 0.08 1.00 78845 83160
## mu2_Intercept 0.33 0.15 0.06 0.62 1.00 44294 33563
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi1 23.10 4.70 12.32 32.14 1.00 55437 23616
## phi2 11.41 5.34 3.50 24.21 1.00 45850 44810
## theta1 0.83 0.18 0.19 0.98 1.00 40195 20850
## theta2 0.17 0.18 0.02 0.81 1.00 40195 20850
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
loo(m_mixed, m_mixture)
## Output of model 'm_mixed':
##
## Computed from 160000 by 92 log-likelihood matrix
##
## Estimate SE
## elpd_loo 82.5 5.7
## p_loo 5.8 1.0
## looic -165.1 11.3
## ------
## Monte Carlo SE of elpd_loo is 0.0.
##
## Pareto k diagnostic values:
## Count Pct. Min. n_eff
## (-Inf, 0.5] (good) 91 98.9% 52094
## (0.5, 0.7] (ok) 1 1.1% 26233
## (0.7, 1] (bad) 0 0.0% <NA>
## (1, Inf) (very bad) 0 0.0% <NA>
##
## All Pareto k estimates are ok (k < 0.7).
## See help('pareto-k-diagnostic') for details.
##
## Output of model 'm_mixture':
##
## Computed from 160000 by 92 log-likelihood matrix
##
## Estimate SE
## elpd_loo 82.3 5.3
## p_loo 1.3 0.2
## looic -164.6 10.5
## ------
## Monte Carlo SE of elpd_loo is 0.0.
##
## All Pareto k estimates are good (k < 0.5).
## See help('pareto-k-diagnostic') for details.
##
## Model comparisons:
## elpd_diff se_diff
## m_mixed 0.0 0.0
## m_mixture -0.2 0.6
model_weights(m_mixed, m_mixture)
## Warning: Some Pareto k diagnostic values are slightly high. See help('pareto-k-diagnostic') for details.
## m_mixed m_mixture
## 0.994195283 0.005804717
df_mixture2 <- left_join(df_mixture, mean_rt_short, by="ParticipantName") %>%
mutate(
RTz = (RT24 - mean(RT24, na.rm=TRUE))/sd(RT24, na.rm=TRUE)
)
priors_mixture <- c(
prior(normal(0, .05), class=Intercept, dpar=mu1),
prior(normal(.4, .15), class=Intercept, dpar=mu2),
prior(normal(0, .5), class=b, dpar=mu2),
prior(dirichlet(2), class=theta),
prior(gamma(3.5, .5), class = phi1),
prior(gamma(3.5, .5), class= phi2)
)
mix = mixture(Beta, Beta)
## Setting order = 'mu' for mixtures of the same family.
m_mixture_LWL <- brm(
bf(Prop ~ 1,
mu1 ~ 1,
mu2 ~ 1 + RTz),
control=list(adapt_delta=.9, max_treedepth=20),
prior =priors_mixture, family=mix,
data= df_mixture2,
file="models/m_mixture_LWL")
summary(m_mixture_LWL)
## Family: mixture(beta, beta)
## Links: mu1 = logit; phi1 = identity; mu2 = logit; phi2 = identity; theta1 = identity; theta2 = identity
## Formula: Prop ~ 1
## mu1 ~ 1
## mu2 ~ 1 + RTz
## Data: df_mixture2 (Number of observations: 88)
## Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup samples = 4000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## mu1_Intercept 0.00 0.04 -0.07 0.07 1.00 2239 2237
## mu2_Intercept 0.34 0.14 0.07 0.64 1.00 1757 1037
## mu2_RTz 0.13 0.30 -0.59 0.69 1.00 2430 1286
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi1 23.39 4.16 15.86 32.17 1.00 2281 1899
## phi2 10.70 4.94 3.26 22.18 1.00 1907 2120
## theta1 0.87 0.12 0.55 0.99 1.00 1263 1362
## theta2 0.13 0.12 0.01 0.45 1.00 1263 1362
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
prior_mixed= c(
prior(normal(0, 1), class=sd),
prior(gamma(3.5, .5), class=phi))
m_mixed_LWL <- brm(
Prop ~ 1 + RTz + (1 | ParticipantName), prior=prior_mixed, control=list(adapt_delta=.9, max_treedepth=20), iter=20000, chains=8, family="beta", data= df_mixture2,
file="models/m_mixed_LWL")
summary(m_mixed_LWL)
## Warning: There were 4 divergent transitions after warmup. Increasing adapt_delta
## above 0.9 may help. See http://mc-stan.org/misc/warnings.html#divergent-
## transitions-after-warmup
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ 1 + RTz + (1 | ParticipantName)
## Data: df_mixture2 (Number of observations: 88)
## Samples: 8 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 80000
##
## Group-Level Effects:
## ~ParticipantName (Number of levels: 88)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.08 0.06 0.00 0.22 1.00 25689 37394
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.03 0.05 -0.06 0.12 1.00 112960 60992
## RTz 0.06 0.05 -0.03 0.15 1.00 111875 58089
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 22.51 3.54 16.32 30.20 1.00 64930 43917
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
df_mixture <- full_join(df_mixture, CDI_byverb_short, by="ParticipantName")
priors_mixture2 <- c(
prior(normal(0, .05), class=Intercept, dpar=mu1),
prior(normal(.4, .15), class=Intercept, dpar=mu2),
prior(gamma(3.5, .5), class = phi1),
prior(gamma(3.5, .5), class= phi2),
prior(normal(0, .5), class = b, dpar=theta2)
)
df_mixture$verbs_prop <- df_mixture$total_verbs/max(df_mixture$total_verbs, na.rm=TRUE)
m_mixture_Vocab <- brm(
bf(Prop ~ 1,
theta2 ~ verbs_prop),
control=list(adapt_delta=.9, max_treedepth=20),
prior=priors_mixture2,
family=mix,
sample_prior=T,
chains=8,
iter=5000,
data= df_mixture,
file = "models/m_mixture_Vocab")
summary(m_mixture_Vocab)
## Family: mixture(beta, beta)
## Links: mu1 = logit; phi1 = identity; mu2 = logit; phi2 = identity; theta1 = identity; theta2 = identity
## Formula: Prop ~ 1
## theta2 ~ verbs_prop
## Data: df_mixture (Number of observations: 88)
## Samples: 8 chains, each with iter = 5000; warmup = 2500; thin = 1;
## total post-warmup samples = 20000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## mu1_Intercept 0.02 0.04 -0.06 0.09 1.00 11222 10587
## mu2_Intercept 0.36 0.15 0.07 0.66 1.00 4380 2737
## theta2_Intercept -2.70 1.91 -6.18 2.68 1.00 4002 1373
## theta2_verbs_prop -0.06 0.50 -1.05 0.91 1.00 13866 13116
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi1 22.16 4.74 8.89 30.90 1.00 4590 1496
## phi2 9.93 5.26 2.59 23.34 1.00 4534 2804
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
prior_mixed= c(
prior(normal(0, 1), class=sd),
prior(gamma(3.5, .5), class=phi))
m_mixed_Vocab <- brm(
Prop ~ 1 + verbs_prop + (1 | ParticipantName), prior=prior_mixed, control=list(adapt_delta=.9, max_treedepth=20), iter=20000, chains=8, family="beta", data= df_mixture,
file="models/m_mixed_Vocab")
summary(m_mixed_Vocab)
## Warning: There were 6 divergent transitions after warmup. Increasing adapt_delta
## above 0.9 may help. See http://mc-stan.org/misc/warnings.html#divergent-
## transitions-after-warmup
## Family: beta
## Links: mu = logit; phi = identity
## Formula: Prop ~ 1 + verbs_prop + (1 | ParticipantName)
## Data: df_mixture (Number of observations: 88)
## Samples: 8 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 80000
##
## Group-Level Effects:
## ~ParticipantName (Number of levels: 88)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.08 0.06 0.00 0.22 1.00 24417 34737
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 0.14 0.09 -0.04 0.33 1.00 100871 57182
## verbs_prop -0.17 0.16 -0.48 0.13 1.00 99876 58760
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## phi 22.52 3.56 16.37 30.28 1.00 56928 41812
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m_mixed_Vocab, "verbs_prop > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (verbs_prop) > 0 -0.17 0.16 -0.43 0.08 0.15 0.13
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
sessionInfo()
## R version 4.1.0 (2021-05-18)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 19043)
##
## Matrix products: default
##
## locale:
## [1] LC_COLLATE=Dutch_Netherlands.1252 LC_CTYPE=Dutch_Netherlands.1252
## [3] LC_MONETARY=Dutch_Netherlands.1252 LC_NUMERIC=C
## [5] LC_TIME=Dutch_Netherlands.1252
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] ggpubr_0.4.0 tidybayes_3.0.0 wesanderson_0.3.6 GGally_2.1.2
## [5] patchwork_1.1.1 forcats_0.5.1 stringr_1.4.0 purrr_0.3.4
## [9] readr_2.0.0 tidyr_1.1.3 tibble_3.1.3 tidyverse_1.3.1
## [13] brms_2.15.0 Rcpp_1.0.7 psych_2.1.6 ggplot2_3.3.5
## [17] eyetrackingR_0.1.8 dplyr_1.0.7
##
## loaded via a namespace (and not attached):
## [1] readxl_1.3.1 backports_1.2.1 plyr_1.8.6
## [4] igraph_1.2.6 lazyeval_0.2.2 svUnit_1.0.6
## [7] splines_4.1.0 crosstalk_1.1.1 rstantools_2.1.1
## [10] inline_0.3.19 digest_0.6.27 htmltools_0.5.1.1
## [13] rsconnect_0.8.24 fansi_0.5.0 checkmate_2.0.0
## [16] magrittr_2.0.1 openxlsx_4.2.4 tzdb_0.1.2
## [19] modelr_0.1.8 RcppParallel_5.1.4 matrixStats_0.60.0
## [22] vroom_1.5.3 xts_0.12.1 prettyunits_1.1.1
## [25] colorspace_2.0-2 rvest_1.0.1 ggdist_3.0.0
## [28] haven_2.4.3 xfun_0.24 callr_3.7.0
## [31] crayon_1.4.1 jsonlite_1.7.2 lme4_1.1-27.1
## [34] zoo_1.8-9 glue_1.4.2 gtable_0.3.0
## [37] emmeans_1.6.2-1 V8_3.4.2 distributional_0.2.2
## [40] car_3.0-11 pkgbuild_1.2.0 rstan_2.21.2
## [43] abind_1.4-5 scales_1.1.1 mvtnorm_1.1-2
## [46] DBI_1.1.1 rstatix_0.7.0 miniUI_0.1.1.1
## [49] xtable_1.8-4 tmvnsim_1.0-2 foreign_0.8-81
## [52] bit_4.0.4 stats4_4.1.0 StanHeaders_2.21.0-7
## [55] DT_0.18 htmlwidgets_1.5.3 httr_1.4.2
## [58] threejs_0.3.3 arrayhelpers_1.1-0 RColorBrewer_1.1-2
## [61] posterior_1.0.1 ellipsis_0.3.2 farver_2.1.0
## [64] reshape_0.8.8 pkgconfig_2.0.3 loo_2.4.1
## [67] sass_0.4.0 dbplyr_2.1.1 utf8_1.2.2
## [70] labeling_0.4.2 tidyselect_1.1.1 rlang_0.4.11
## [73] reshape2_1.4.4 later_1.2.0 munsell_0.5.0
## [76] cellranger_1.1.0 tools_4.1.0 cli_3.0.1
## [79] generics_0.1.0 broom_0.7.9 ggridges_0.5.3
## [82] evaluate_0.14 fastmap_1.1.0 yaml_2.2.1
## [85] bit64_4.0.5 processx_3.5.2 knitr_1.33
## [88] fs_1.5.0 zip_2.2.0 nlme_3.1-152
## [91] mime_0.11 projpred_2.0.2 xml2_1.3.2
## [94] compiler_4.1.0 bayesplot_1.8.1 shinythemes_1.2.0
## [97] rstudioapi_0.13 curl_4.3.2 gamm4_0.2-6
## [100] ggsignif_0.6.2 reprex_2.0.1 bslib_0.2.5.1
## [103] stringi_1.7.3 highr_0.9 ps_1.6.0
## [106] Brobdingnag_1.2-6 lattice_0.20-44 Matrix_1.3-3
## [109] nloptr_1.2.2.2 markdown_1.1 tensorA_0.36.2
## [112] shinyjs_2.0.0 vctrs_0.3.8 pillar_1.6.2
## [115] lifecycle_1.0.0 jquerylib_0.1.4 bridgesampling_1.1-2
## [118] estimability_1.3 data.table_1.14.0 httpuv_1.6.1
## [121] R6_2.5.0 promises_1.2.0.1 rio_0.5.27
## [124] gridExtra_2.3 codetools_0.2-18 boot_1.3-28
## [127] colourpicker_1.1.0 MASS_7.3-54 gtools_3.9.2
## [130] assertthat_0.2.1 withr_2.4.2 shinystan_2.5.0
## [133] mnormt_2.0.2 mgcv_1.8-35 parallel_4.1.0
## [136] hms_1.1.0 grid_4.1.0 coda_0.19-4
## [139] minqa_1.2.4 rmarkdown_2.9 carData_3.0-4
## [142] shiny_1.6.0 lubridate_1.7.10 base64enc_0.1-3
## [145] dygraphs_1.1.1.6