library(peekbankr)
library(lme4)
## Loading required package: Matrix
library(tictoc)
library(langcog)
##
## Attaching package: 'langcog'
## The following object is masked from 'package:base':
##
## scale
library(here)
## here() starts at /Users/mcfrank/Projects/peekbank/peekbank-paper
library(tictoc)
library(broom)
library(tidyverse)
knitr::opts_chunk$set(cache = FALSE, warn = FALSE, message = FALSE)
Goal is to make developmental curves by using the growth modeling approach advocated by Mirman (2014).
load(file = "data/aoi_data_joined.Rds")
First take only familiar word data.
fam_data <- aoi_data_joined %>%
filter(age > 12, age <= 60,
stimulus_novelty == "familiar") %>%
mutate(age_binned = cut(age, seq(0,60,12)))
Check that the general curves look good.
means <- fam_data %>%
group_by(t_norm, dataset_name, age_binned, stimulus_novelty) %>%
summarise(n = sum(aoi %in% c("target","distractor"), na.rm = TRUE),
p = sum(aoi == "target", na.rm = TRUE),
prop_looking = p / n,
ci_lower = binom::binom.confint(p, n, method = "bayes")$lower,
ci_upper = binom::binom.confint(p, n, method = "bayes")$upper)
ggplot(means,
aes(x = t_norm, y = prop_looking)) +
geom_line(aes(col = dataset_name)) +
geom_ribbon(aes(ymin = ci_lower, ymax = ci_upper,
fill = dataset_name), alpha = .5) +
facet_grid(age_binned~stimulus_novelty) +
geom_hline(yintercept = .5, lty = 2) +
geom_vline(xintercept = 0, lty = 2) +
ylab("Proportion Target Looking") +
xlab("Time (msec)") +
theme_classic() +
scale_color_solarized() +
scale_fill_solarized()
While the curves look good, the words are very different across datasets and so the curves don’t line up perfectly. You need models to deal with this situation.
We’re going to use the empirical logit modeling approach from Mirman (2014), where we analyze
\[elogit(y,N) = \log(\frac{y + .5}{N - y + .5})\] as a linear variable binned by time. In practice the histogram doesn’t look remotely linear but it allows us to use linear mixed models rather than logistic models and the logistics right now don’t fit with this much data…
BIN_INTERVAL <- 100
T_RANGE <- c(0,1500)
ms_timecourse <- fam_data %>%
filter(aoi %in% c("target","distractor"),
# dataset_name %in% c("perry_cowpig","mahr_coartic"),
t_norm >= T_RANGE[1],
t_norm <= T_RANGE[2]) %>%
mutate(t_window =
as.numeric(as.character(
cut(t_norm,
breaks = seq(T_RANGE[1],T_RANGE[2],BIN_INTERVAL),
labels = seq(T_RANGE[1] + BIN_INTERVAL / 2,
T_RANGE[2] - BIN_INTERVAL / 2,
BIN_INTERVAL)))),
age_centered = age - 36) %>%
group_by(dataset_id, administration_id, trial_id, t_window,
age, age_centered, age_binned, english_stimulus_label) %>%
summarise(prop_target = round(mean(aoi=="target")),
num_target = sum(aoi == "target"),
N = length(aoi),
elogit = log( (num_target + .5) / (N - num_target + .5)),
wts = 1/(num_target + .5) + 1/(N - num_target + .5)) %>%
filter(!is.na(t_window))
hist(ms_timecourse$prop_target)
hist(ms_timecourse$elogit)
Let’s visualize the data going into our model, just for kicks. We can see why this is tricky.
ggplot(ms_timecourse,
aes(x = t_window, y = elogit, col = age_binned)) +
stat_summary(fun.data = mean_se, geom = "pointrange") +
stat_summary(geom = "line") +
xlab("Time (ms)") +
ylab("Proportion Target Looking")
Now we make our orthogonal polynomials with code straight from Mirman (2014). Make more than we need so we can try out different degrees.
POLY_DEG <- 5
ops <- poly(unique(ms_timecourse$t_window), POLY_DEG)
ops_tibble <- tibble(ot1 = ops[,1],
ot2 = ops[,2],
ot3 = ops[,3],
ot4 = ops[,4],
ot5 = ops[,5],
t_window = unique(ms_timecourse$t_window))
ms_timecourse <- left_join(ms_timecourse, ops_tibble)
We explore a glmer over the discretized proportions. This fits slowly and has some convergence issues even when we prune to the most minimal random effect structure. I wasn’t able to get any random slopes in there in particular, which seems like a deal-breaker. So let’s skip this.
mod_prop <- glmer(prop_target ~ (ot1 + ot2 + ot3) * age_centered +
(1 | dataset_id) +
(1 | english_stimulus_label),
family = "binomial",
data = ms_timecourse)
The elogit is quite similar in distribution… not sure we can legitimately use a linear link, but we adopt it for now.
tic()
elogit <- lmer(elogit ~ (ot1 + ot2 + ot3) * age_centered +
(1 | administration_id) +
(1 | dataset_id) +
(1 | english_stimulus_label),
weights = 1/wts,
data = ms_timecourse)
toc()
## 10.782 sec elapsed
Let’s look at the model.
summary(elogit)
## Linear mixed model fit by REML ['lmerMod']
## Formula: elogit ~ (ot1 + ot2 + ot3) * age_centered + (1 | administration_id) +
## (1 | dataset_id) + (1 | english_stimulus_label)
## Data: ms_timecourse
## Weights: 1/wts
##
## REML criterion at convergence: 1249536
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3049 -1.0838 0.4512 0.8241 2.0543
##
## Random effects:
## Groups Name Variance Std.Dev.
## administration_id (Intercept) 0.153608 0.39193
## english_stimulus_label (Intercept) 0.242539 0.49248
## dataset_id (Intercept) 0.009124 0.09552
## Residual 1.728908 1.31488
## Number of obs: 298074, groups:
## administration_id, 1241; english_stimulus_label, 151; dataset_id, 10
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 0.5306870 0.0557820 9.514
## ot1 1.8682077 0.0167483 111.546
## ot2 -0.1068669 0.0166876 -6.404
## ot3 -0.4154466 0.0166914 -24.890
## age_centered 0.0184593 0.0012408 14.878
## ot1:age_centered 0.0263506 0.0011017 23.918
## ot2:age_centered 0.0006773 0.0010985 0.617
## ot3:age_centered -0.0038702 0.0010984 -3.523
##
## Correlation of Fixed Effects:
## (Intr) ot1 ot2 ot3 ag_cnt ot1:g_ ot2:g_
## ot1 -0.001
## ot2 0.001 -0.003
## ot3 0.000 0.008 -0.002
## age_centerd 0.148 0.000 0.004 0.000
## ot1:g_cntrd -0.001 0.558 -0.003 0.006 0.001
## ot2:g_cntrd 0.001 -0.003 0.555 -0.003 0.004 0.000
## ot3:g_cntrd 0.000 0.006 -0.003 0.556 0.000 0.007 0.001
Now, let’s look at model fits. I want to get this back into proportion space but I confess that I haven’t managed it.
elogit_data = ms_timecourse
elogit_data$fit <- fitted(elogit)
ggplot(elogit_data,
aes(x = t_window, y = elogit, col = age_binned)) +
stat_summary(fun.data = mean_se, geom = "pointrange") +
stat_summary(aes(y = fit), fun = mean, geom = "line") +
geom_hline(yintercept = 0, col = "black", lty = 2) +
# ylim(0, 1) +
xlab("Time (ms)") +
ylab("Proportion Target Looking")
Plot model predictions from fixed effects.
newdata <- left_join(expand_grid(t_window = seq(50, 1450, 100),
age_centered = c(18,30, 42, 54) - 36),
ops_tibble)
newdata$pred <- predict(elogit, newdata = newdata, re.form = NA)
ggplot(newdata,
aes(x = t_window, y = pred, col = factor(age_centered+36))) +
geom_line()
Performance is slow here so we can’t really add too much more in the way of bells and whistles, but I think we need them! So let’s try Julia.
library(jglmm)
options(JULIA_HOME = "/Applications/Julia-1.5.app/Contents/Resources/julia/bin")
jglmm_setup()
ms_timecourse$administration_id <- as.character(ms_timecourse$administration_id)
ms_timecourse$dataset_id <- as.character(ms_timecourse$dataset_id)
tic()
elogit_jl <- jglmm(elogit ~ (ot1 + ot2 + ot3) * age_centered +
(1 | administration_id) +
(1 | dataset_id) +
(1 | english_stimulus_label),
family = "normal",
weights = 1/ms_timecourse$wts,
data = ms_timecourse)
toc()
## 28.018 sec elapsed
Surprisingly, dataset ID has very low variance. That suggests that maybe we don’t want to model that as carefully? Or perhaps it’s because a simple intercept of dataset doesn’t really tell you much and some random effects would be good?
elogit_jl$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + (1 | administration_id) + (1 | dataset_id) + (1 | english_stimulus_label)
## logLik -2 logLik AIC AICc BIC
## -624732.2656 1249464.5312 1249488.5312 1249488.5322 1249615.7923
##
## Variance components:
## Column Variance Std.Dev.
## administration_id (Intercept) 0.1534844 0.3917709
## english_stimulus_label (Intercept) 0.2414288 0.4913540
## dataset_id (Intercept) 0.0083321 0.0912806
## Residual 1.7288737 1.3148664
## Number of obs: 298074; levels of grouping factors: 1241, 151, 10
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.530602 0.0549623 9.65 <1e-21
## ot1 1.86821 0.0167481 111.55 <1e-99
## ot2 -0.106867 0.0166874 -6.40 <1e-9
## ot3 -0.415449 0.0166912 -24.89 <1e-99
## age_centered 0.0184224 0.00123564 14.91 <1e-49
## ot1 & age_centered 0.0263506 0.00110168 23.92 <1e-99
## ot2 & age_centered 0.000677277 0.00109847 0.62 0.5375
## ot3 & age_centered -0.00387026 0.00109841 -3.52 0.0004
## ──────────────────────────────────────────────────────────────
The coefficients on this one match the one above almost exactly.
elogit_jl_data <- augment(elogit_jl)
ggplot(elogit_jl_data,
aes(x = t_window, y = elogit, col = age_binned)) +
stat_summary(fun.data = mean_se, geom = "pointrange") +
stat_summary(aes(y = .fitted), fun = mean, geom = "line") +
geom_hline(yintercept = 0, col = "black", lty = 2) +
# ylim(0, 1) +
xlab("Time (ms)") +
ylab("Proportion Target Looking")
Now we have a workflow that lets us fit a bigger class of models faster!
tic()
elogit_jl_4 <- jglmm(elogit ~ (ot1 + ot2 + ot3 + ot4) * age_centered +
(1 | administration_id) +
(1 | dataset_id) +
(1 | english_stimulus_label),
family = "normal",
weights = 1/ms_timecourse$wts,
data = ms_timecourse)
toc()
## 11.112 sec elapsed
elogit_jl_4_data <- augment(elogit_jl_4)
ggplot(elogit_jl_4_data,
aes(x = t_window, y = elogit, col = age_binned)) +
stat_summary(fun.data = mean_se, geom = "pointrange") +
stat_summary(aes(y = .fitted), fun = mean, geom = "line") +
geom_hline(yintercept = 0, col = "black", lty = 2) +
# ylim(0, 1) +
xlab("Time (ms)") +
ylab("Proportion Target Looking")
tic()
elogit_jl_5 <- jglmm(elogit ~ (ot1 + ot2 + ot3 + ot4 + ot5) * age_centered +
(1 | administration_id) +
(1 | dataset_id) +
(1 | english_stimulus_label),
family = "normal",
weights = 1/ms_timecourse$wts,
data = ms_timecourse)
toc()
## 20.34 sec elapsed
elogit_jl_5_data <- augment(elogit_jl_5)
ggplot(elogit_jl_5_data,
aes(x = t_window, y = elogit, col = age_binned)) +
stat_summary(fun.data = mean_se, geom = "pointrange") +
stat_summary(aes(y = .fitted), fun = mean, geom = "line") +
geom_hline(yintercept = 0, col = "black", lty = 2) +
# ylim(0, 1) +
xlab("Time (ms)") +
ylab("Proportion Target Looking")
elogit_jl$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + (1 | administration_id) + (1 | dataset_id) + (1 | english_stimulus_label)
## logLik -2 logLik AIC AICc BIC
## -624732.2656 1249464.5312 1249488.5312 1249488.5322 1249615.7923
##
## Variance components:
## Column Variance Std.Dev.
## administration_id (Intercept) 0.1534844 0.3917709
## english_stimulus_label (Intercept) 0.2414288 0.4913540
## dataset_id (Intercept) 0.0083321 0.0912806
## Residual 1.7288737 1.3148664
## Number of obs: 298074; levels of grouping factors: 1241, 151, 10
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.530602 0.0549623 9.65 <1e-21
## ot1 1.86821 0.0167481 111.55 <1e-99
## ot2 -0.106867 0.0166874 -6.40 <1e-9
## ot3 -0.415449 0.0166912 -24.89 <1e-99
## age_centered 0.0184224 0.00123564 14.91 <1e-49
## ot1 & age_centered 0.0263506 0.00110168 23.92 <1e-99
## ot2 & age_centered 0.000677277 0.00109847 0.62 0.5375
## ot3 & age_centered -0.00387026 0.00109841 -3.52 0.0004
## ──────────────────────────────────────────────────────────────
elogit_jl_4$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + ot4 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + ot4 & age_centered + (1 | administration_id) + (1 | dataset_id) + (1 | english_stimulus_label)
## logLik -2 logLik AIC AICc BIC
## -624725.6651 1249451.3302 1249479.3302 1249479.3316 1249627.8016
##
## Variance components:
## Column Variance Std.Dev.
## administration_id (Intercept) 0.1534825 0.3917685
## english_stimulus_label (Intercept) 0.2414130 0.4913379
## dataset_id (Intercept) 0.0083170 0.0911974
## Residual 1.7287970 1.3148372
## Number of obs: 298074; levels of grouping factors: 1241, 151, 10
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.530593 0.0549467 9.66 <1e-21
## ot1 1.86817 0.0167477 111.55 <1e-99
## ot2 -0.106421 0.016688 -6.38 <1e-9
## ot3 -0.415543 0.016691 -24.90 <1e-99
## ot4 0.0522724 0.0166799 3.13 0.0017
## age_centered 0.0184144 0.00123554 14.90 <1e-49
## ot1 & age_centered 0.0263524 0.00110166 23.92 <1e-99
## ot2 & age_centered 0.000697329 0.00109849 0.63 0.5256
## ot3 & age_centered -0.00388726 0.00109839 -3.54 0.0004
## ot4 & age_centered 0.000231514 0.00109798 0.21 0.8330
## ──────────────────────────────────────────────────────────────
elogit_jl_5$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + ot4 + ot5 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + ot4 & age_centered + ot5 & age_centered + (1 | administration_id) + (1 | dataset_id) + (1 | english_stimulus_label)
## logLik -2 logLik AIC AICc BIC
## -624711.6159 1249423.2319 1249455.2319 1249455.2337 1249624.9134
##
## Variance components:
## Column Variance Std.Dev.
## administration_id (Intercept) 0.1534911 0.3917794
## english_stimulus_label (Intercept) 0.2413880 0.4913125
## dataset_id (Intercept) 0.0083150 0.0911864
## Residual 1.7286332 1.3147749
## Number of obs: 298074; levels of grouping factors: 1241, 151, 10
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.530626 0.0549432 9.66 <1e-21
## ot1 1.86813 0.0167469 111.55 <1e-99
## ot2 -0.106541 0.0166873 -6.38 <1e-9
## ot3 -0.41484 0.0166911 -24.85 <1e-99
## ot4 0.0521837 0.0166792 3.13 0.0018
## ot5 0.0812289 0.0166662 4.87 <1e-5
## age_centered 0.0184124 0.00123553 14.90 <1e-49
## ot1 & age_centered 0.0263465 0.00110161 23.92 <1e-99
## ot2 & age_centered 0.000693069 0.00109844 0.63 0.5281
## ot3 & age_centered -0.00385405 0.00109839 -3.51 0.0005
## ot4 & age_centered 0.000212135 0.00109793 0.19 0.8468
## ot5 & age_centered 0.0010634 0.00109711 0.97 0.3324
## ──────────────────────────────────────────────────────────────
Conclusion, even 5th order polynomials don’t completely capture all the weird stuff going on here? BIC actually increases though, so let’s not retain these. Interestingly, we only see interactions of age and ot1 and ot3.
Now let’s try adding some more random effects. Dataset age x time slopes feel important here. Tried adding random effects under dataset but totally struck out.
Following https://github.com/JuliaStats/MixedModels.jl/issues/127 I tried adding dataset_id as a fixed effect instead and that works but is gross. We would have to do some kind of coding so that the other effects are at the average rather than dummy coding…
After discussion, seems like we can have random slopes but only if we want one factor.
tic()
elogit_jl_re_stim <- jglmm(elogit ~ (ot1 + ot2 + ot3) * age_centered +
# (1 | administration_id) +
# (1 | dataset_id) +
(age_centered | english_stimulus_label),
family = "normal",
weights = 1/ms_timecourse$wts,
data = ms_timecourse)
toc()
## 3.111 sec elapsed
tic()
elogit_jl_re_dataset <- jglmm(elogit ~ (ot1 + ot2 + ot3) * age_centered +
# (1 | administration_id) +
(age_centered | dataset_id),
# (age_centered | english_stimulus_label),
family = "normal",
weights = 1/ms_timecourse$wts,
data = ms_timecourse)
toc()
## 1.714 sec elapsed
elogit_jl_re_stim$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + (1 + age_centered | english_stimulus_label)
## logLik -2 logLik AIC AICc BIC
## -627680.3734 1255360.7467 1255384.7467 1255384.7478 1255512.0079
##
## Variance components:
## Column Variance Std.Dev. Corr.
## english_stimulus_label (Intercept) 0.4273777 0.6537413
## age_centered 0.0035975 0.0599792 -0.41
## Residual 1.7755328 1.3324912
## Number of obs: 298074; levels of grouping factors: 151
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.55132 0.0612856 9.00 <1e-18
## ot1 1.86757 0.0169522 110.17 <1e-99
## ot2 -0.107724 0.0169008 -6.37 <1e-9
## ot3 -0.418204 0.0169087 -24.73 <1e-99
## age_centered 0.0111316 0.00529354 2.10 0.0355
## ot1 & age_centered 0.0262924 0.00111528 23.57 <1e-99
## ot2 & age_centered 0.000495612 0.00111259 0.45 0.6560
## ot3 & age_centered -0.00391094 0.00111277 -3.51 0.0004
## ──────────────────────────────────────────────────────────────
elogit_jl_re_dataset$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + (1 + age_centered | dataset_id)
## logLik -2 logLik AIC AICc BIC
## -630295.9296 1260591.8592 1260615.8592 1260615.8602 1260743.1203
##
## Variance components:
## Column Variance Std.Dev. Corr.
## dataset_id (Intercept) 0.7468486 0.8642040
## age_centered 0.0019211 0.0438309 +0.97
## Residual 1.8124722 1.3462809
## Number of obs: 298074; levels of grouping factors: 10
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.778145 0.276611 2.81 0.0049
## ot1 1.86622 0.0171251 108.98 <1e-99
## ot2 -0.1068 0.0170744 -6.25 <1e-9
## ot3 -0.416576 0.0170827 -24.39 <1e-99
## age_centered 0.0206815 0.0141355 1.46 0.1434
## ot1 & age_centered 0.0261372 0.00112666 23.20 <1e-99
## ot2 & age_centered 0.000541018 0.00112403 0.48 0.6303
## ot3 & age_centered -0.00385865 0.00112423 -3.43 0.0006
## ──────────────────────────────────────────────────────────────
Comparing BICs, we see that the item random effect is important. So let’s try and add more random slopes. These in the end aren’t nearly as important as the extra random intercepts, though.
tic()
elogit_explore <- jglmm(elogit ~ (ot1 + ot2 + ot3) * age_centered +
# (1 | administration_id) +
# (1 | dataset_id) +
(age_centered * ot1| english_stimulus_label),
family = "normal",
weights = 1/ms_timecourse$wts,
data = ms_timecourse)
toc()
## 4.161 sec elapsed
elogit_explore$model
## Julia Object of type LinearMixedModel{Float64}.
## Linear mixed model fit by maximum likelihood
## elogit ~ 1 + ot1 + ot2 + ot3 + age_centered + ot1 & age_centered + ot2 & age_centered + ot3 & age_centered + (1 + age_centered + ot1 + age_centered & ot1 | english_stimulus_label)
## logLik -2 logLik AIC AICc BIC
## -626199.1724 1252398.3448 1252436.3448 1252436.3473 1252637.8416
##
## Variance components:
## Column Variance Std.Dev. Corr.
## english_stimulus_label (Intercept) 0.4493604 0.6703435
## age_centered 0.0037961 0.0616129 -0.43
## ot1 6.0745625 2.4646627 -0.24 +0.28
## age_centered & ot1 0.0630380 0.2510737 +0.24 -0.24 -0.69
## Residual 1.7521526 1.3236890
## Number of obs: 298074; levels of grouping factors: 151
##
## Fixed-effects parameters:
## ──────────────────────────────────────────────────────────────
## Coef. Std. Error z Pr(>|z|)
## ──────────────────────────────────────────────────────────────
## (Intercept) 0.544603 0.0625778 8.70 <1e-17
## ot1 1.97418 0.221975 8.89 <1e-18
## ot2 -0.107535 0.0167919 -6.40 <1e-9
## ot3 -0.419279 0.0167983 -24.96 <1e-99
## age_centered 0.0124103 0.00541733 2.29 0.0220
## ot1 & age_centered -0.0104212 0.021635 -0.48 0.6300
## ot2 & age_centered 0.000459846 0.00110542 0.42 0.6774
## ot3 & age_centered -0.00395697 0.0011055 -3.58 0.0003
## ──────────────────────────────────────────────────────────────
In the end, more simple random intercepts win over the others.
elogit_jl_data <- augment(elogit_jl)
ggplot(elogit_jl_data,
aes(x = t_window, y = elogit, col = age_binned)) +
stat_summary(fun.data = mean_se, geom = "pointrange") +
stat_summary(aes(y = .fitted), fun = mean, geom = "line") +
geom_hline(yintercept = 0, col = "black", lty = 2) +
xlab("Time (ms)") +
ylab("Target Looking (Empirical Logit)") +
scale_color_solarized(name = "Age Group") +
theme_mikabr()
library(JuliaCall)
library(glue)
source(here("misc/jglmm_helper.R"))
train_ranefs <- jglmm_ranef(elogit_jl, "dataset_id")
train_coefs <- tidy(elogit_jl)
test_predictions <- jglmm_predict(train_coefs, train_ranefs, formula, testing_data, group)
newdata$pred <- jglmm_predict(elogit_jl, newdata = newdata)
ggplot(newdata,
aes(x = t_window, y = pred, col = factor(age_centered+36))) +
geom_line()