Cross Sectional Analysis

Longitudinal Analyses: 42 months, 48 months, 54 months, Longitudinal

library(brms)
library(tidyverse)
library(readxl)
library(rmarkdown)
library(tidybayes)
library(plotrix)
library(dplyr)
library(GGally)
library(ggpubr)
library(tidybayes)
library(patchwork)
library(sjPlot)
library(lubridate)
library(zoo)
library(broom.mixed)

1 Data Preparation

Open prepped files – these come DOD_42mo.RMD, DOD_48mo.RMD and DOD_54mo.RMD. In those files, I removed missing data and created a variable to indicate whether participants produced at least 1 DOD, and also calculated the number of missing trials per participant. I then saved those datasets so I don’t have to re-run those processing steps.

DOD_42mo_prepped <- read_excel("DOD_42mo_prepped.xlsx") %>%
  mutate(COVID.Zoom.session = NA) 
DOD_48mo_prepped <- read_excel("DOD_48mo_prepped.xlsx") %>%
  mutate(COVID.Zoom.session = NA) 
DOD_54mo_prepped <- read_excel("DOD_54mo_prepped.xlsx")

Add indicator variable for time, select appropriate variables.

DOD_42mo<- DOD_42mo_prepped %>%
  mutate(
    Time = -1, 
    Time_c = "42",
    Eff1 = -.5,
    Eff2 = -.5,
    PartSession = paste(Blind_ID, Time, sep="")
  ) %>%
  dplyr::select("Blind_ID", "Time", "Eff1", "Eff2", "Time_c", "PartSession", "List", "Item", "Prime.verb", "Target.verb", "Prime.Type", "Verb.match", "RESPONSE.CODE", "Prime", "Overlap", "bias", "biasmatch", "biasmismatchc", "Trial_Problem", "COVID.Zoom.session")

DOD_48mo<- DOD_48mo_prepped %>%
  mutate(
    Time = 0, 
    Time_c = "48",
    Eff1 = .5,
    Eff2 = 0,
    PartSession = paste(Blind_ID, Time, sep="")
  ) %>%
  dplyr::select("Blind_ID", "Time", "Eff1", "Eff2", "Time_c", "PartSession",  "List", "Item", "Prime.verb", "Target.verb", "Prime.Type", "Verb.match", "RESPONSE.CODE", "Prime", "Overlap", "bias", "biasmatch", "biasmismatchc", "Trial_Problem", "COVID.Zoom.session")

DOD_54mo<- DOD_54mo_prepped %>%
  mutate(
    Time = 1, 
    Time_c = "54",
    Eff1 = 0, 
    Eff2 = .5,
    PartSession = paste(Blind_ID, Time, sep="")
  ) %>%
  dplyr::select("Blind_ID", "Time", "Eff1", "Eff2", "Time_c", "PartSession", "List", "Item", "Prime.verb", "Target.verb", "Prime.Type", "Verb.match", "RESPONSE.CODE", "Prime", "Overlap", "bias", "biasmatch", "biasmismatchc", "Trial_Problem", "COVID.Zoom.session")

Attach DOD Prod variable from first session – to model exclusively trajectory of only first session.

Combined = bind_rows(DOD_42mo, DOD_48mo, DOD_54mo)


Combined <- DOD_42mo_prepped %>%
  group_by(Blind_ID) %>%
  dplyr::select("Blind_ID", "Prod_DOD") %>%
  slice(1) %>%
  full_join(Combined, ., by = "Blind_ID")

Plot effects by time point–to see if priming effect and lexical boost seems to increase by age.

(p1 <- Combined %>% 
  group_by(Prime, Overlap, Time_c) %>%
  summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE), 
            ci_u = 1.92*(std.error(RESPONSE.CODE, na.rm=TRUE))) %>%
  
ggplot(aes(y=mean, x=Overlap, fill=Prime, ymin=mean, ymax=mean+ci_u)) +
  geom_bar(stat="identity",position="dodge") +
 geom_errorbar(width=.4,
                 position=position_dodge(.9)) +
  facet_wrap(~Time_c) +
  ylim(0, .4) + 
  theme_minimal() + 
  labs(y="Raw Mean") + 
  theme(legend.position = "none")
)
## `summarise()` has grouped output by 'Prime', 'Overlap'. You can override using
## the `.groups` argument.

2 Abstract Priming/Lexical Boost

2.1 Compare two random effects structures

Truncated gaussian with SD of 2 on random effect SDs. Keeps 95% of probability < 4 (roughly). A random effect SD greater than that is a bit wild on the logit scale.

logit_prior = prior(normal(0,2), class=sd)

Model with nested random effects: ID by Time.

mlong0 <- brm(RESPONSE.CODE ~ 1 + (1 | Blind_ID/Time_c) + (1 | Target.verb),  family="bernoulli", prior=logit_prior, iter=4000, cores=4, data=Combined,seed=12345, file="LONG/output/mlong0.rds")
mlong0 <- add_criterion(mlong0, "loo")
summary(mlong0, prob=.9)
## Warning: There were 1 divergent transitions after warmup. Increasing adapt_delta
## above 0.8 may help. See http://mc-stan.org/misc/warnings.html#divergent-
## transitions-after-warmup
##  Family: bernoulli 
##   Links: mu = logit 
## Formula: RESPONSE.CODE ~ 1 + (1 | Blind_ID/Time_c) + (1 | Target.verb) 
##    Data: Combined (Number of observations: 2865) 
##   Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
##          total post-warmup draws = 8000
## 
## Group-Level Effects: 
## ~Blind_ID (Number of levels: 100) 
##               Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.80      0.25     0.33     1.18 1.00      774      856
## 
## ~Blind_ID:Time_c (Number of levels: 261) 
##               Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     1.53      0.16     1.28     1.81 1.00     2131     3600
## 
## ~Target.verb (Number of levels: 6) 
##               Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     1.43      0.51     0.81     2.40 1.00     3685     5277
## 
## Population-Level Effects: 
##           Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS Tail_ESS
## Intercept    -2.90      0.63    -3.87    -1.87 1.00     3267     3913
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Random effects by participant, with no additional dependence between trials in a given session.

mlong0b <- brm(RESPONSE.CODE ~ 1 + (1 | Blind_ID) + (1 | Target.verb),  family="bernoulli", prior=logit_prior, iter=4000, cores=4, data=Combined,seed=12345, file="LONG/output/mlong0b.rds")
mlong0b <- add_criterion(mlong0b, "loo")
loo(mlong0, mlong0b)
## Output of model 'mlong0':
## 
## Computed from 8000 by 2865 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -841.0 31.3
## p_loo       147.4  7.2
## looic      1681.9 62.5
## ------
## Monte Carlo SE of elpd_loo is 0.2.
## 
## Pareto k diagnostic values:
##                          Count Pct.    Min. n_eff
## (-Inf, 0.5]   (good)     2851  99.5%   1918      
##  (0.5, 0.7]   (ok)         14   0.5%   1608      
##    (0.7, 1]   (bad)         0   0.0%   <NA>      
##    (1, Inf)   (very bad)    0   0.0%   <NA>      
## 
## All Pareto k estimates are ok (k < 0.7).
## See help('pareto-k-diagnostic') for details.
## 
## Output of model 'mlong0b':
## 
## Computed from 8000 by 2865 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -935.4 31.9
## p_loo        69.4  3.2
## looic      1870.7 63.8
## ------
## Monte Carlo SE of elpd_loo is 0.1.
## 
## All Pareto k estimates are good (k < 0.5).
## See help('pareto-k-diagnostic') for details.
## 
## Model comparisons:
##         elpd_diff se_diff
## mlong0    0.0       0.0  
## mlong0b -94.4      13.9
model_weights(mlong0, mlong0b)
## Warning: Some Pareto k diagnostic values are slightly high. See help('pareto-k-diagnostic') for details.
##     mlong0    mlong0b 
## 0.96416204 0.03583796

Model with within session random effects fits much better, so let’s go with that.

2.2 Full data

2.2.1 Model time with continous predictor (centered at 48 months)

Hypothesized model was linear decrease. However, given time-point specific models, this seems unlikely. Fit and compare to two models with categorical time variables.

mlong1 <- brm(RESPONSE.CODE ~ 1 + Time*Prime.Type*Overlap + 
            (1 +Prime.Type*Overlap  | Blind_ID/Time_c) + 
            (Time*Prime.Type*Overlap - 1 - Prime.Type*Overlap - Prime - Overlap | Blind_ID ) +
            (1 + Time*Prime.Type*Overlap   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          cores=4,
          seed=12345,
          data=Combined, 
          file="LONG/output/mlong1.rds")
mlong1 <- add_criterion(mlong1, "loo")

There are so many random effects, so I’m only returning the fixed effects for readability (just get rid of $fixed to see them). See R Markdown file for random effects.

summary(mlong1, prob=.9)$fixed
##                                  Estimate Est.Error   l-90% CI   u-90% CI
## Intercept                      -2.9574452 0.8581916 -4.2893150 -1.4912575
## Time                           -0.2578154 0.2884928 -0.7096679  0.1953325
## Prime.Type                      0.5401787 0.3385431  0.0143038  1.0865327
## OverlapOverlap                 -1.0804949 0.4402094 -1.7975419 -0.3933186
## Time:Prime.Type                 0.2444492 0.3835186 -0.3912053  0.8371745
## Time:OverlapOverlap             0.2222308 0.5225489 -0.5680081  1.0895593
## Prime.Type:OverlapOverlap       1.4057313 0.6571364  0.3817918  2.5009545
## Time:Prime.Type:OverlapOverlap -0.4170724 0.6807013 -1.5168999  0.6921657
##                                     Rhat Bulk_ESS Tail_ESS
## Intercept                      1.0011330 1808.528 2964.537
## Time                           1.0002473 3751.575 3491.544
## Prime.Type                     1.0009692 4786.192 4777.876
## OverlapOverlap                 1.0007144 3315.530 4389.785
## Time:Prime.Type                0.9999948 5725.508 3915.488
## Time:OverlapOverlap            1.0012429 4290.605 4446.727
## Prime.Type:OverlapOverlap      1.0015246 4493.258 4394.902
## Time:Prime.Type:OverlapOverlap 1.0001293 5265.871 4632.519

Test directional hypothesis for each effect for each coefficient (proportion of posterior distribution in the hypothesized direction)

hypothesis(mlong1, "Time > 0")
## Hypothesis Tests for class b:
##   Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
## 1 (Time) > 0    -0.26      0.29    -0.71      0.2       0.19      0.16     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong1, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0     0.54      0.34     0.01     1.09      20.51      0.95
##   Star
## 1    *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong1, "Time:Prime.Type < 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type) < 0     0.24      0.38    -0.39     0.84       0.31
##   Post.Prob Star
## 1      0.24     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong1, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0     1.41      0.66     0.38      2.5      67.97
##   Post.Prob Star
## 1      0.99    *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong1, "Time:Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type:... > 0    -0.42      0.68    -1.52     0.69       0.32
##   Post.Prob Star
## 1      0.24     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.

Effect of prime and lexical boost, nothing else. No interactions.

Plot Pareto’s k (measure of influence of each data point, > .7 problematic. )

Combined %>%
  mutate(
    k = mlong1$criteria$loo$diagnostics$pareto_k
  ) %>%
  ggplot(aes(x=k)) + geom_histogram() + geom_vline(xintercept=.7)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

A few problematic observations but the distribution is pretty continuous, no huge outliers (will re-fit in sensitivity analysis)

Get abstract priming effect and lexical boost at each time point. I got the predicted value (on the logit scale) for each cell in the Age x Prime.Structure x Overlap table, calculated abstract priming effect and lexical boost for both, then summed up the proportion in the predicted direction.

df <- tibble( # Design matrix (minus intercept which is automatic)
  Overlap = factor(rep(c(0, 0, 1, 1), 3), labels = c("No Overlap", "Overlap")), 
  Prime.Type = rep(c(-.5, .5, -.5, .5), 3), 
  Time = c(-1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1)
)

ps_means <- posterior_linpred(mlong1, newdata=df, re_formula=NA) %>% # posterior samples of linear predictor. 
  t() %>%
  as_tibble() %>%
  bind_cols(df, .) %>%
  pivot_longer(starts_with("V"), names_to="sample", values_to="linpred") %>%
  mutate(
    Time_c = ifelse(Time == -1, yes="42", no = 
                      ifelse(Time==0, yes="48", no = "54")),
    Prime.Type = ifelse(Prime.Type == .5, yes="DOD", no = "POD"), 
    Overlap = ifelse(Overlap=="Overlap", yes="Ov", no = "No")
  )
## Warning: The `x` argument of `as_tibble.matrix()` must have unique column names if `.name_repair` is omitted as of tibble 2.0.0.
## Using compatibility `.name_repair`.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was generated.
ps_means %>%
  dplyr::select(-"Time") %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="linpred") %>%
  summarise(
    Priming_42 = sum(No_DOD_42 > No_POD_42)/8000,
    LB_42 = sum((No_DOD_42 - No_POD_42) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_48 = sum(No_DOD_48 > No_POD_48)/8000,
    LB_48 = sum((No_DOD_48 - No_POD_48) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_54 = sum(No_DOD_54 > No_POD_54)/8000,
    LB_54 = sum((No_DOD_54 - No_POD_54) < (Ov_DOD_54 - Ov_POD_54))/8000
  )
## # A tibble: 1 x 6
##   Priming_42 LB_42 Priming_48 LB_48 Priming_54 LB_54
##        <dbl> <dbl>      <dbl> <dbl>      <dbl> <dbl>
## 1      0.735 0.974      0.954 0.953      0.942 0.870

Lexical boost is evident from the earliest time point, no evidence of abstract priming until 48. Same as Time-point specific models.

Get effect sizes (risk ratios) for priming with and without lexical overlap (also proportion of posterior in hypothesized direction > 1 for all)

ps_means <- ps_means %>%
    mutate(
    prob = inv_logit_scaled(linpred)
  ) %>%
  dplyr::select(-c("linpred", "Time")) %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="prob") %>%
  mutate(
    Abstract_42 = No_DOD_42/No_POD_42, 
    LB_42 = (Ov_DOD_42/Ov_POD_42), 
    Abstract_48 = No_DOD_48/No_POD_48,
    LB_48 = (Ov_DOD_48/Ov_POD_48),
    Abstract_54 = No_DOD_54/No_POD_54, 
    LB_54 = (Ov_DOD_54/Ov_POD_54)
  ) %>%
  dplyr::select("sample", starts_with(c("Abs", "LB"))) %>%
  pivot_longer(
    starts_with(c("Abs", "LB")), 
    names_to = c("Effect", "Time"), 
    names_sep = "_", 
    values_to = "RR"
  ) %>%
  mutate(
    Effect = ifelse(Effect=="Abstract", yes= "Abstract Priming", no = "Lexically Boosted Priming")
  ) 



# Risk Ratio of priming effect and lexically specified priming effect
ps_means %>%
  dplyr::select(-"sample") %>%
  group_by(Time, Effect) %>%
  median_qi(.width=.90)
## # A tibble: 6 x 8
##   Time  Effect                       RR .lower .upper .width .point .interval
##   <chr> <chr>                     <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 42    Abstract Priming           1.29  0.628   2.85    0.9 median qi       
## 2 42    Lexically Boosted Priming  7.45  1.75   37.5     0.9 median qi       
## 3 48    Abstract Priming           1.65  1.01    2.78    0.9 median qi       
## 4 48    Lexically Boosted Priming  6.46  2.44   19.5     0.9 median qi       
## 5 54    Abstract Priming           2.10  0.955   4.68    0.9 median qi       
## 6 54    Lexically Boosted Priming  5.46  1.35   24.8     0.9 median qi
ps_means %>% 
  mutate(
    Prop1 = ifelse(RR > 1, yes=1, no=0)
  ) %>%
  group_by(Time, Effect) %>%
  summarise(
    Prop = sum(Prop1)  / (sum(Prop1) + sum(Prop1==0))
  )
## `summarise()` has grouped output by 'Time'. You can override using the
## `.groups` argument.
## # A tibble: 6 x 3
## # Groups:   Time [3]
##   Time  Effect                     Prop
##   <chr> <chr>                     <dbl>
## 1 42    Abstract Priming          0.735
## 2 42    Lexically Boosted Priming 0.985
## 3 48    Abstract Priming          0.954
## 4 48    Lexically Boosted Priming 0.997
## 5 54    Abstract Priming          0.942
## 6 54    Lexically Boosted Priming 0.974

Priming with lexical overlap at 42 months (no abstract priming).

Plot predicted values vs data

p1 <- p1 + theme(axis.title.x = element_blank()) 
p2 <- fitted(mlong1, re_formula=NA, scale="response") %>%
  cbind(Combined) %>%
  dplyr::select(Prime, Overlap, Time_c, Estimate, Est.Error) %>%
  ggplot(aes(y=Estimate, x=Overlap, fill=Prime )) +
  geom_bar(stat="identity",position="dodge") + 
  geom_errorbar(aes(ymin=Estimate, ymax=Estimate+Est.Error), width=.2,
                 position=position_dodge(.9)) + 
  facet_wrap(~Time_c) +
  theme_minimal() + 
  ylim(0, .4) + 
  labs(y="Predicted Probability") + 
  theme(legend.position = "none", 
        strip.text = element_blank())

p1/p2 

ggsave("plots/Fig4.png", last_plot(), dpi=500)
## Saving 7 x 5 in image

2.2.2 Effects coded

A problem with the linear model is that it makes a pretty strict assumption about the trajectory (that it’s equal between 54 and 48 and 48 and 42 months). Here I treat time categorically, using helmert coding.

mlong_eff <- brm(RESPONSE.CODE ~ 1 + (Eff1 + Eff2)*Prime.Type*Overlap + 
            (1 +Prime.Type*Overlap  | Blind_ID/Time_c) + 
            ((Eff1 + Eff2)*Prime.Type*Overlap - 1 - Prime.Type*Overlap - Prime - Overlap | Blind_ID ) +
            (1 + (Eff1 + Eff2)*Prime.Type*Overlap   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          cores=4,
          seed=12345,
          data=Combined, 
          file="LONG/output/mlong_eff.rds")
mlong_eff <- add_criterion(mlong_eff, "loo")
summary(mlong_eff, prob=.9)$fixed
##                                  Estimate Est.Error    l-90% CI   u-90% CI
## Intercept                      -3.0320327 0.8319876 -4.34659136 -1.6465584
## Eff1                            0.5041162 0.5252200 -0.34476872  1.3510313
## Eff2                           -0.6193470 0.5884230 -1.57810644  0.3283375
## Prime.Type                      0.5885157 0.3447755  0.04603027  1.1624867
## OverlapOverlap                 -1.1336761 0.4500352 -1.87456896 -0.4225843
## Eff1:Prime.Type                -0.3275815 0.6908901 -1.45550465  0.7925713
## Eff2:Prime.Type                 0.6753457 0.8060354 -0.63371796  1.9582077
## Eff1:OverlapOverlap            -0.7423833 0.6117880 -1.73513108  0.2647022
## Eff2:OverlapOverlap             0.8449336 0.9160483 -0.58615606  2.4252293
## Prime.Type:OverlapOverlap       1.4260238 0.6395229  0.43369292  2.4955682
## Eff1:Prime.Type:OverlapOverlap  1.2005337 1.1112286 -0.62989796  3.0347812
## Eff2:Prime.Type:OverlapOverlap -1.5527558 1.3007521 -3.68146350  0.5371852
##                                    Rhat Bulk_ESS Tail_ESS
## Intercept                      1.001593 2034.677 3326.208
## Eff1                           1.000373 4376.357 4843.281
## Eff2                           1.000431 3381.309 4547.565
## Prime.Type                     1.000666 4514.640 4529.961
## OverlapOverlap                 1.001360 3277.694 4088.470
## Eff1:Prime.Type                1.001179 6101.814 4804.455
## Eff2:Prime.Type                1.000418 5740.219 4850.476
## Eff1:OverlapOverlap            1.000070 6111.833 5310.660
## Eff2:OverlapOverlap            1.000797 4795.115 4279.295
## Prime.Type:OverlapOverlap      1.000984 4747.517 4448.788
## Eff1:Prime.Type:OverlapOverlap 1.000460 5968.120 6018.510
## Eff2:Prime.Type:OverlapOverlap 1.000929 5136.886 5148.756
hypothesis(mlong_eff, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0     0.59      0.34     0.05     1.16      25.94      0.96
##   Star
## 1    *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong_eff, "Eff1:Prime.Type < 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff1:Prime.Type) < 0    -0.33      0.69    -1.46     0.79       2.21
##   Post.Prob Star
## 1      0.69     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong_eff, "Eff2:Prime.Type < 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff2:Prime.Type) < 0     0.68      0.81    -0.63     1.96       0.24
##   Post.Prob Star
## 1      0.19     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong_eff, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0     1.43      0.64     0.43      2.5      78.21
##   Post.Prob Star
## 1      0.99    *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong_eff, "Eff1:Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff1:Prime.Type:... > 0      1.2      1.11    -0.63     3.03       6.41
##   Post.Prob Star
## 1      0.86     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong_eff, "Eff2:Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff2:Prime.Type:... > 0    -1.55       1.3    -3.68     0.54       0.12
##   Post.Prob Star
## 1      0.11     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
Combined %>%
  mutate(
    k = mlong_eff$criteria$loo$diagnostics$pareto_k
  ) %>%
  ggplot(aes(x=k)) + geom_histogram() + geom_vline(xintercept=.7)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

Again, not great, but will re-fit without problematic values in sensitivity analyses.

Get time-point specific abstract priming effects and lexical boosts

df <- tibble( # Design matrix (minus intercept which is automatic)
  Overlap = factor(rep(c(0, 0, 1, 1), 3), labels = c("No Overlap", "Overlap")), 
  Prime.Type = rep(c(-.5, .5, -.5, .5), 3), 
  Eff1 = c(-.5, -.5, -.5, -.5, .5, .5, .5, .5, 0, 0, 0, 0), 
  Eff2 = c(-.5, -.5, -.5, -.5, 0, 0, 0, 0, .5, .5, .5, .5)
)

ps_means <- posterior_linpred(mlong_eff, newdata=df, re_formula=NA) %>% # posterior samples of linear predictor. 
  t() %>%
  as_tibble() %>%
  bind_cols(df, .) %>%
  pivot_longer(starts_with("V"), names_to="sample", values_to="linpred") %>%
  mutate(
    Time_c = ifelse(Eff1 == -.5, yes="42", no = 
                      ifelse(Eff1==.5, yes="48", no = "54")),
    Prime.Type = ifelse(Prime.Type == .5, yes="DOD", no = "POD"), 
    Overlap = ifelse(Overlap=="Overlap", yes="Ov", no = "No")
  )

ps_means %>%
  dplyr::select(-c("Eff1", "Eff2")) %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="linpred") %>%
  summarise(
    Priming_42 = sum(No_DOD_42 > No_POD_42)/8000,
    LB_42 = sum((No_DOD_42 - No_POD_42) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_48 = sum(No_DOD_48 > No_POD_48)/8000,
    LB_48 = sum((No_DOD_48 - No_POD_48) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_54 = sum(No_DOD_54 > No_POD_54)/8000,
    LB_54 = sum((No_DOD_54 - No_POD_54) < (Ov_DOD_54 - Ov_POD_54))/8000
  )
## # A tibble: 1 x 6
##   Priming_42 LB_42 Priming_48 LB_48 Priming_54 LB_54
##        <dbl> <dbl>      <dbl> <dbl>      <dbl> <dbl>
## 1      0.773 0.952      0.829 0.941      0.956 0.771

Effect sizes

ps_means <- ps_means %>%
    mutate(
    prob = inv_logit_scaled(linpred)
  ) %>%
  dplyr::select(-c("linpred", "Eff1", "Eff2")) %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="prob") %>%
  mutate(
    Abstract_42 = No_DOD_42/No_POD_42, 
    LB_42 = (Ov_DOD_42/Ov_POD_42), 
    Abstract_48 = No_DOD_48/No_POD_48,
    LB_48 = (Ov_DOD_48/Ov_POD_48),
    Abstract_54 = No_DOD_54/No_POD_54, 
    LB_54 = (Ov_DOD_54/Ov_POD_54)
  ) %>%
  dplyr::select("sample", starts_with(c("Abs", "LB"))) %>%
  pivot_longer(
    starts_with(c("Abs", "LB")), 
    names_to = c("Effect", "Time"), 
    names_sep = "_", 
    values_to = "RR"
  ) %>%
  mutate(
    Effect = ifelse(Effect=="Abstract", yes= "Abstract Priming", no = "Lexically Boosted Priming")
  ) 



# Risk Ratio of priming effect and lexically specified priming effect
ps_means %>%
  dplyr::select(-"sample") %>%
  group_by(Time, Effect) %>%
  median_qi(.width=.90)
## # A tibble: 6 x 8
##   Time  Effect                       RR .lower .upper .width .point .interval
##   <chr> <chr>                     <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 42    Abstract Priming           1.46  0.626   3.60    0.9 median qi       
## 2 42    Lexically Boosted Priming  6.90  1.58   33.9     0.9 median qi       
## 3 48    Abstract Priming           1.46  0.745   3.03    0.9 median qi       
## 4 48    Lexically Boosted Priming 10.6   3.00   42.0     0.9 median qi       
## 5 54    Abstract Priming           2.41  1.04    5.66    0.9 median qi       
## 6 54    Lexically Boosted Priming  4.52  1.17   19.0     0.9 median qi
ps_means %>% 
  mutate(
    Prop1 = ifelse(RR > 1, yes=1, no=0)
  ) %>%
  group_by(Time, Effect) %>%
  summarise(
    Prop = sum(Prop1)  / (sum(Prop1) + sum(Prop1==0))
  )
## `summarise()` has grouped output by 'Time'. You can override using the
## `.groups` argument.
## # A tibble: 6 x 3
## # Groups:   Time [3]
##   Time  Effect                     Prop
##   <chr> <chr>                     <dbl>
## 1 42    Abstract Priming          0.773
## 2 42    Lexically Boosted Priming 0.983
## 3 48    Abstract Priming          0.829
## 4 48    Lexically Boosted Priming 0.998
## 5 54    Abstract Priming          0.956
## 6 54    Lexically Boosted Priming 0.966
p1 <- p1 + theme(axis.title.x = element_blank()) 
p2 <- fitted(mlong_eff, re_formula=NA, scale="response") %>%
  cbind(Combined) %>%
  dplyr::select(Prime, Overlap, Time_c, Estimate, Est.Error) %>%
  ggplot(aes(y=Estimate, x=Overlap, fill=Prime )) +
  geom_bar(stat="identity",position="dodge") + 
  geom_errorbar(aes(ymin=Estimate, ymax=Estimate+Est.Error), width=.2,
                 position=position_dodge(.9)) + 
  facet_wrap(~Time_c) +
  theme_minimal() + 
  ylim(0, .4) + 
  labs(y="Predicted Probability") + 
  theme(legend.position = "none", 
        strip.text = element_blank())

p1/p2 

ggsave("plots/Fig4.png", last_plot(), dpi=500)
## Saving 7 x 5 in image

2.2.3 Compare fit

Comapre fit of linear and helmert models.

loo(mlong1, mlong_eff)
## Output of model 'mlong1':
## 
## Computed from 8000 by 2865 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -796.9 31.8
## p_loo       244.4 11.9
## looic      1593.9 63.6
## ------
## Monte Carlo SE of elpd_loo is NA.
## 
## Pareto k diagnostic values:
##                          Count Pct.    Min. n_eff
## (-Inf, 0.5]   (good)     2719  94.9%   744       
##  (0.5, 0.7]   (ok)        139   4.9%   347       
##    (0.7, 1]   (bad)         7   0.2%   204       
##    (1, Inf)   (very bad)    0   0.0%   <NA>      
## See help('pareto-k-diagnostic') for details.
## 
## Output of model 'mlong_eff':
## 
## Computed from 8000 by 2865 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -804.1 32.2
## p_loo       262.8 12.7
## looic      1608.2 64.5
## ------
## Monte Carlo SE of elpd_loo is NA.
## 
## Pareto k diagnostic values:
##                          Count Pct.    Min. n_eff
## (-Inf, 0.5]   (good)     2673  93.3%   588       
##  (0.5, 0.7]   (ok)        185   6.5%   258       
##    (0.7, 1]   (bad)         7   0.2%   91        
##    (1, Inf)   (very bad)    0   0.0%   <NA>      
## See help('pareto-k-diagnostic') for details.
## 
## Model comparisons:
##           elpd_diff se_diff
## mlong1     0.0       0.0   
## mlong_eff -7.2       2.8
model_weights(mlong1, mlong_eff)
## Warning: Some Pareto k diagnostic values are too high. See help('pareto-k-diagnostic') for details.

## Warning: Some Pareto k diagnostic values are too high. See help('pareto-k-diagnostic') for details.
##       mlong1    mlong_eff 
## 9.999966e-01 3.392806e-06

Note to self: Linear model fits better, but is this because of all the additional random effects in the effects-coded model? These are difficult to estimate and will be penalized by loo. Think more about this.

I’ve also run some sensitivity analyses, which are available in the RMarkdown file, but not in the html files. These include: (a) a model excluding 54 month sessions that took place on Zoom, (b) a model excluding the improperly administered trial (see Data at earlier time points), (c) a model with uncorrelated random effects, and (d) a model without observations for which Pareto’s k was greater than or equal to .7. There were no substantive differneces between conclusions from these models and our main analyses.

More sensitivity anlayses.

2.3 Only kids who produced a DOD at the first time point

Include only participants who produced a DOD at first time point.

2.3.1 Linear Time

Combined2 <- filter(Combined, Prod_DOD > 0)

mlong2 <- brm(RESPONSE.CODE ~ 1 + Time*Prime.Type*Overlap + 
            (1 +Prime.Type*Overlap  | Blind_ID/Time_c) + 
            (Time*Prime.Type*Overlap - 1 - Prime.Type*Overlap - Prime - Overlap | Blind_ID ) +
            (1 + Time*Prime.Type*Overlap   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          seed=12345,
          cores=4, 
          data=Combined2, 
          file="LONG/output/mlong2.rds")


mlong2 <- add_criterion(mlong2, "loo")
summary(mlong2, prob=.9)$fixed
##                                   Estimate Est.Error   l-90% CI   u-90% CI
## Intercept                      -2.17612198 0.8705429 -3.5224547 -0.6813513
## Time                           -0.70306089 0.3522917 -1.2348869 -0.1547181
## Prime.Type                      0.49722978 0.4101942 -0.1335619  1.1557297
## OverlapOverlap                 -1.33933073 0.5709300 -2.3399356 -0.5258952
## Time:Prime.Type                -0.01576225 0.4465421 -0.7430019  0.6688684
## Time:OverlapOverlap            -0.05863135 0.6170099 -1.0367148  0.9454130
## Prime.Type:OverlapOverlap       2.05077896 0.9709553  0.5641492  3.6803147
## Time:Prime.Type:OverlapOverlap -0.14704733 0.7353176 -1.3535531  1.0367822
##                                    Rhat Bulk_ESS Tail_ESS
## Intercept                      1.003551 2167.967 3156.505
## Time                           1.000725 4891.409 4606.733
## Prime.Type                     1.000207 5730.596 5158.320
## OverlapOverlap                 1.000746 2973.638 3965.391
## Time:Prime.Type                1.000743 6371.289 5200.600
## Time:OverlapOverlap            1.000472 4481.346 4731.637
## Prime.Type:OverlapOverlap      1.000250 5250.187 5289.036
## Time:Prime.Type:OverlapOverlap 1.000133 6643.235 5531.245
hypothesis(mlong2, "Time > 0")
## Hypothesis Tests for class b:
##   Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob Star
## 1 (Time) > 0     -0.7      0.35    -1.23    -0.15       0.02      0.02     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong2, "Time:Prime.Type < 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type) < 0    -0.02      0.45    -0.74     0.67          1
##   Post.Prob Star
## 1       0.5     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong2, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0     2.05      0.97     0.56     3.68      71.07
##   Post.Prob Star
## 1      0.99    *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong2, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0      0.5      0.41    -0.13     1.16       9.83      0.91
##   Star
## 1     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong2, "Time:Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type:... > 0    -0.15      0.74    -1.35     1.04       0.72
##   Post.Prob Star
## 1      0.42     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
df <- tibble( # Design matrix (minus intercept which is automatic)
  Overlap = factor(rep(c(0, 0, 1, 1), 3), labels = c("No Overlap", "Overlap")), 
  Prime.Type = rep(c(-.5, .5, -.5, .5), 3), 
  Time = c(-1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1)
)

ps_means <- posterior_linpred(mlong2, newdata=df, re_formula=NA) %>% # posterior samples of linear predictor. 
  t() %>%
  as_tibble() %>%
  bind_cols(df, .) %>%
  pivot_longer(starts_with("V"), names_to="sample", values_to="linpred") %>%
  mutate(
    Time_c = ifelse(Time == -1, yes="42", no = 
                      ifelse(Time==0, yes="48", no = "54")),
    Prime.Type = ifelse(Prime.Type == .5, yes="DOD", no = "POD"), 
    Overlap = ifelse(Overlap=="Overlap", yes="Ov", no = "No")
  )
ps_means %>%
  dplyr::select(-"Time") %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="linpred") %>%
  summarise(
    Priming_42 = sum(No_DOD_42 > No_POD_42)/8000,
    LB_42 = sum((No_DOD_42 - No_POD_42) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_48 = sum(No_DOD_48 > No_POD_48)/8000,
    LB_48 = sum((No_DOD_48 - No_POD_48) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_54 = sum(No_DOD_54 > No_POD_54)/8000,
    LB_54 = sum((No_DOD_54 - No_POD_54) < (Ov_DOD_54 - Ov_POD_54))/8000
  )
## # A tibble: 1 x 6
##   Priming_42 LB_42 Priming_48 LB_48 Priming_54 LB_54
##        <dbl> <dbl>      <dbl> <dbl>      <dbl> <dbl>
## 1      0.827 0.977      0.908 0.978      0.787 0.937

Effect sizes

ps_means <- ps_means %>%
    mutate(
    prob = inv_logit_scaled(linpred)
  ) %>%
  dplyr::select(-c("linpred", "Time")) %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="prob") %>%
  mutate(
    Abstract_42 = No_DOD_42/No_POD_42, 
    LB_42 = (Ov_DOD_42/Ov_POD_42), 
    Abstract_48 = No_DOD_48/No_POD_48,
    LB_48 = (Ov_DOD_48/Ov_POD_48),
    Abstract_54 = No_DOD_54/No_POD_54, 
    LB_54 = (Ov_DOD_54/Ov_POD_54)
  ) %>%
  dplyr::select("sample", starts_with(c("Abs", "LB"))) %>%
  pivot_longer(
    starts_with(c("Abs", "LB")), 
    names_to = c("Effect", "Time"), 
    names_sep = "_", 
    values_to = "RR"
  ) %>%
  mutate(
    Effect = ifelse(Effect=="Abstract", yes= "Abstract Priming", no = "Lexically Boosted Priming")
  ) 



# Risk Ratio of priming effect and lexically specified priming effect
ps_means %>%
  dplyr::select(-"sample") %>%
  group_by(Time, Effect) %>%
  median_qi(.width=.90)
## # A tibble: 6 x 8
##   Time  Effect                       RR .lower .upper .width .point .interval
##   <chr> <chr>                     <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 42    Abstract Priming           1.46  0.743   3.28    0.9 median qi       
## 2 42    Lexically Boosted Priming 10.3   2.14   74.3     0.9 median qi       
## 3 48    Abstract Priming           1.52  0.895   2.82    0.9 median qi       
## 4 48    Lexically Boosted Priming 10.6   2.68   55.6     0.9 median qi       
## 5 54    Abstract Priming           1.56  0.612   4.00    0.9 median qi       
## 6 54    Lexically Boosted Priming  9.80  1.42   84.1     0.9 median qi
ps_means %>% 
  mutate(
    Prop1 = ifelse(RR > 1, yes=1, no=0)
  ) %>%
  group_by(Time, Effect) %>%
  summarise(
    Prop = sum(Prop1)  / (sum(Prop1) + sum(Prop1==0))
  )
## `summarise()` has grouped output by 'Time'. You can override using the
## `.groups` argument.
## # A tibble: 6 x 3
## # Groups:   Time [3]
##   Time  Effect                     Prop
##   <chr> <chr>                     <dbl>
## 1 42    Abstract Priming          0.827
## 2 42    Lexically Boosted Priming 0.991
## 3 48    Abstract Priming          0.908
## 4 48    Lexically Boosted Priming 0.994
## 5 54    Abstract Priming          0.787
## 6 54    Lexically Boosted Priming 0.972

2.3.2 Effects-Coded Time

mlong2_eff <- brm(RESPONSE.CODE ~ 1 + (Eff1 + Eff2)*Prime.Type*Overlap + 
            (1 +Prime.Type*Overlap  | Blind_ID/Time_c) + 
            ((Eff1 + Eff2)*Prime.Type*Overlap - 1 - Prime.Type*Overlap - Prime - Overlap | Blind_ID ) +
            (1 + (Eff1 + Eff2)*Prime.Type*Overlap   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          cores=4,
          seed=12345,
          data=Combined2, 
          file="LONG/output/mlong2_eff.rds")
mlong2_eff <- add_criterion(mlong2_eff, "loo")
summary(mlong2_eff, prob=.9)$fixed
##                                   Estimate Est.Error   l-90% CI   u-90% CI
## Intercept                      -2.24131911 0.8811301 -3.6364492 -0.7501565
## Eff1                           -0.72288977 0.6171738 -1.7552701  0.2757026
## Eff2                           -0.94743521 0.6779183 -2.0397929  0.1503424
## Prime.Type                      0.53449595 0.4228378 -0.1208374  1.2485179
## OverlapOverlap                 -1.38846077 0.5714446 -2.3896383 -0.5707166
## Eff1:Prime.Type                 0.11256468 0.9103507 -1.3537639  1.5886266
## Eff2:Prime.Type                -0.07918747 0.9407769 -1.6577450  1.3999254
## Eff1:OverlapOverlap            -0.37447067 0.8774771 -1.8179333  1.0512761
## Eff2:OverlapOverlap             0.06290260 1.1626571 -1.8129392  1.9604784
## Prime.Type:OverlapOverlap       2.11215293 0.9551391  0.6546843  3.7299320
## Eff1:Prime.Type:OverlapOverlap  0.14115403 1.4896158 -2.3172245  2.5665762
## Eff2:Prime.Type:OverlapOverlap -0.37476682 1.5731535 -2.9121919  2.2109773
##                                     Rhat Bulk_ESS Tail_ESS
## Intercept                      1.0002001 2035.961 3154.056
## Eff1                           1.0003772 4824.084 4973.987
## Eff2                           1.0004877 4553.034 5435.486
## Prime.Type                     1.0001727 5015.579 3918.038
## OverlapOverlap                 1.0010570 3181.999 3414.604
## Eff1:Prime.Type                1.0005488 6769.396 6043.791
## Eff2:Prime.Type                1.0000785 6500.970 5788.929
## Eff1:OverlapOverlap            1.0005195 5421.070 5691.682
## Eff2:OverlapOverlap            1.0003053 5041.715 5330.975
## Prime.Type:OverlapOverlap      0.9999607 4480.256 4860.323
## Eff1:Prime.Type:OverlapOverlap 1.0007910 6031.858 5859.283
## Eff2:Prime.Type:OverlapOverlap 1.0007336 5866.879 5830.628

Get time-point specific abstract priming effects and lexical boosts

df <- tibble( # Design matrix (minus intercept which is automatic)
  Overlap = factor(rep(c(0, 0, 1, 1), 3), labels = c("No Overlap", "Overlap")), 
  Prime.Type = rep(c(-.5, .5, -.5, .5), 3), 
  Eff1 = c(-.5, -.5, -.5, -.5, .5, .5, .5, .5, 0, 0, 0, 0), 
  Eff2 = c(-.5, -.5, -.5, -.5, 0, 0, 0, 0, .5, .5, .5, .5)
)

ps_means <- posterior_linpred(mlong2_eff, newdata=df, re_formula=NA) %>% # posterior samples of linear predictor. 
  t() %>%
  as_tibble() %>%
  bind_cols(df, .) %>%
  pivot_longer(starts_with("V"), names_to="sample", values_to="linpred") %>%
  mutate(
    Time_c = ifelse(Eff1 == -.5, yes="42", no = 
                      ifelse(Eff1==.5, yes="48", no = "54")),
    Prime.Type = ifelse(Prime.Type == .5, yes="DOD", no = "POD"), 
    Overlap = ifelse(Overlap=="Overlap", yes="Ov", no = "No")
  )

ps_means %>%
  dplyr::select(-c("Eff1", "Eff2")) %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="linpred") %>%
  summarise(
    Priming_42 = sum(No_DOD_42 > No_POD_42)/8000,
    LB_42 = sum((No_DOD_42 - No_POD_42) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_48 = sum(No_DOD_48 > No_POD_48)/8000,
    LB_48 = sum((No_DOD_48 - No_POD_48) < (Ov_DOD_42 - Ov_POD_42))/8000, 
    Priming_54 = sum(No_DOD_54 > No_POD_54)/8000,
    LB_54 = sum((No_DOD_54 - No_POD_54) < (Ov_DOD_54 - Ov_POD_54))/8000
  )
## # A tibble: 1 x 6
##   Priming_42 LB_42 Priming_48 LB_48 Priming_54 LB_54
##        <dbl> <dbl>      <dbl> <dbl>      <dbl> <dbl>
## 1      0.811 0.976      0.828 0.957      0.782 0.941
ps_means <- ps_means %>%
    mutate(
    prob = inv_logit_scaled(linpred)
  ) %>%
  dplyr::select(-c("linpred", "Eff1", "Eff2")) %>%
  pivot_wider(names_from=c("Overlap", "Prime.Type", "Time_c"), values_from="prob") %>%
  mutate(
    Abstract_42 = No_DOD_42/No_POD_42, 
    LB_42 = (Ov_DOD_42/Ov_POD_42), 
    Abstract_48 = No_DOD_48/No_POD_48,
    LB_48 = (Ov_DOD_48/Ov_POD_48),
    Abstract_54 = No_DOD_54/No_POD_54, 
    LB_54 = (Ov_DOD_54/Ov_POD_54)
  ) %>%
  dplyr::select("sample", starts_with(c("Abs", "LB"))) %>%
  pivot_longer(
    starts_with(c("Abs", "LB")), 
    names_to = c("Effect", "Time"), 
    names_sep = "_", 
    values_to = "RR"
  ) %>%
  mutate(
    Effect = ifelse(Effect=="Abstract", yes= "Abstract Priming", no = "Lexically Boosted Priming")
  ) 



# Risk Ratio of priming effect and lexically specified priming effect
ps_means %>%
  dplyr::select(-"sample") %>%
  group_by(Time, Effect) %>%
  median_qi(.width=.90)
## # A tibble: 6 x 8
##   Time  Effect                       RR .lower .upper .width .point .interval
##   <chr> <chr>                     <dbl>  <dbl>  <dbl>  <dbl> <chr>  <chr>    
## 1 42    Abstract Priming           1.46  0.700   3.47    0.9 median qi       
## 2 42    Lexically Boosted Priming 10.5   2.07   68.5     0.9 median qi       
## 3 48    Abstract Priming           1.68  0.692   4.51    0.9 median qi       
## 4 48    Lexically Boosted Priming 14.2   2.16  111.      0.9 median qi       
## 5 54    Abstract Priming           1.56  0.592   4.29    0.9 median qi       
## 6 54    Lexically Boosted Priming  9.71  1.56   80.6     0.9 median qi
ps_means %>% 
  mutate(
    Prop1 = ifelse(RR > 1, yes=1, no=0)
  ) %>%
  group_by(Time, Effect) %>%
  summarise(
    Prop = sum(Prop1)  / (sum(Prop1) + sum(Prop1==0))
  )
## `summarise()` has grouped output by 'Time'. You can override using the
## `.groups` argument.
## # A tibble: 6 x 3
## # Groups:   Time [3]
##   Time  Effect                     Prop
##   <chr> <chr>                     <dbl>
## 1 42    Abstract Priming          0.811
## 2 42    Lexically Boosted Priming 0.990
## 3 48    Abstract Priming          0.828
## 4 48    Lexically Boosted Priming 0.988
## 5 54    Abstract Priming          0.782
## 6 54    Lexically Boosted Priming 0.978

More sensitivity analyses.

3 Prime Surprisal Effect

3.1 Full Data

3.1.1 Linear model

mlong5 <- brm(RESPONSE.CODE ~ 1 + Time*+Prime.Type*biasmatch + 
            (1 +Prime.Type*biasmatch | Blind_ID/Time_c) + 
            (Time*Prime.Type*biasmatch- 1 - Prime.Type*biasmatch - Prime - biasmatch | Blind_ID ) +
            (1 + Time*Prime.Type*biasmatch   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          seed=12345,
          cores=4, 
          data=Combined %>% filter(Overlap=="No Overlap"), 
          file="LONG/output/mlong5.rds")
mlong5 <- add_criterion(mlong5, "loo")
summary(mlong5, prob=.9)$fixed
##                                      Estimate Est.Error   l-90% CI     u-90% CI
## Intercept                         -2.87693141 0.9078392 -4.3393020 -1.352438709
## Time                              -0.43969129 0.3717122 -1.0354355  0.152886780
## Prime.Type                         0.66949889 0.5703991 -0.2177710  1.583609055
## biasmatchmismatch                 -0.85926846 0.5633064 -1.8308043 -0.002710462
## Time:Prime.Type                   -0.11371366 0.5477411 -1.0124482  0.764112531
## Time:biasmatchmismatch             0.05896644 0.4890238 -0.7428996  0.849101272
## Prime.Type:biasmatchmismatch       0.36371541 0.9111311 -1.0343519  1.916922463
## Time:Prime.Type:biasmatchmismatch  0.80902131 0.8334959 -0.5436896  2.200634274
##                                        Rhat Bulk_ESS Tail_ESS
## Intercept                         1.0015136 2417.901 3804.936
## Time                              0.9998355 4132.455 4546.900
## Prime.Type                        1.0004696 4507.906 4704.519
## biasmatchmismatch                 1.0021901 3343.536 4498.529
## Time:Prime.Type                   1.0004840 6345.606 5329.129
## Time:biasmatchmismatch            1.0002072 5659.359 5533.645
## Prime.Type:biasmatchmismatch      1.0016729 4725.311 4499.662
## Time:Prime.Type:biasmatchmismatch 1.0003489 5891.982 5725.964
hypothesis(mlong5, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0     0.67      0.57    -0.22     1.58       8.62       0.9
##   Star
## 1     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5, "Time:Prime.Type< 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type) < 0    -0.11      0.55    -1.01     0.76        1.4
##   Post.Prob Star
## 1      0.58     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5, "Prime.Type:biasmatchmismatch  > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0     0.36      0.91    -1.03     1.92       1.91
##   Post.Prob Star
## 1      0.66     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5, "Time:Prime.Type:biasmatchmismatch < 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type:... < 0     0.81      0.83    -0.54      2.2        0.2
##   Post.Prob Star
## 1      0.17     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.

Test prime surprisal effect per time point.

df <- tibble( # Design matrix (minus intercept which is automatic)
  biasmatch = factor(rep(c(0, 0, 1, 1), 3), labels = c("match", "mismatch")), 
  Prime.Type = rep(c(-.5, .5, -.5, .5), 3), 
  Time = c(-1, -1, -1, -1, 0, 0, 0, 0, 1, 1, 1, 1)
)

ps_means <- posterior_linpred(mlong5, newdata=df, re_formula=NA) %>% # posterior samples of linear predictor. 
  t() %>%
  as_tibble() %>%
  bind_cols(df, .) %>%
  pivot_longer(starts_with("V"), names_to="sample", values_to="linpred") %>%
  mutate(
    Time_c = ifelse(Time == -1, yes="42", no = 
                      ifelse(Time==0, yes="48", no = "54")),
    Prime.Type = ifelse(Prime.Type == .5, yes="DOD", no = "POD"), 
    biasmatch = ifelse(biasmatch=="mismatch", yes="Mis", no = "Mat")
  )
ps_means %>%
  dplyr::select(-"Time") %>%
  pivot_wider(names_from=c("biasmatch", "Prime.Type", "Time_c"), values_from="linpred") %>%
  summarise(
    Surprisal_42 = sum((Mis_DOD_42 - Mis_POD_42) > (Mat_DOD_42 - Mat_POD_42))/8000, 
    Surprisal_48 = sum((Mis_DOD_48 - Mis_POD_48) > (Mat_DOD_48 - Mat_POD_48))/8000, 
    Surprisal_54 = sum((Mis_DOD_54 - Mis_POD_54) > (Mat_DOD_54 - Mat_POD_54))/8000
  )
## # A tibble: 1 x 3
##   Surprisal_42 Surprisal_48 Surprisal_54
##          <dbl>        <dbl>        <dbl>
## 1        0.336        0.656        0.828

3.1.2 Effects Coded

mlong5_eff <- brm(RESPONSE.CODE ~ 1 + (Eff1 + Eff2)*+Prime.Type*biasmatch + 
            (1 +Prime.Type*biasmatch | Blind_ID/Time_c) + 
            ((Eff1 + Eff2)*Prime.Type*biasmatch- 1 - Prime.Type*biasmatch - Prime - biasmatch | Blind_ID ) +
            (1 + (Eff1 + Eff2)*Prime.Type*biasmatch   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          seed=12345,
          cores=4, 
          data=Combined %>% filter(Overlap=="No Overlap"), 
          file="LONG/output/mlong5_eff.rds")
mlong5_eff <- add_criterion(mlong5_eff, "loo")
summary(mlong5_eff, prob=.9)$fixed
##                                     Estimate Est.Error   l-90% CI   u-90% CI
## Intercept                         -3.0259226 0.9248673 -4.4908434 -1.5071033
## Eff1                               0.4033430 0.7071933 -0.7774285  1.5396159
## Eff2                              -0.7856503 0.7808589 -2.0489589  0.4617057
## Prime.Type                         0.7386018 0.5965228 -0.2027755  1.6968751
## biasmatchmismatch                 -0.9959671 0.5409850 -1.9077662 -0.1705713
## Eff1:Prime.Type                   -0.8518952 1.0848460 -2.6329267  0.9172106
## Eff2:Prime.Type                    0.2453111 1.1822723 -1.7044258  2.1502222
## Eff1:biasmatchmismatch             0.3324336 1.0453831 -1.3812081  2.0455296
## Eff2:biasmatchmismatch             0.2459385 1.1077742 -1.6002582  2.0353782
## Prime.Type:biasmatchmismatch       0.3545749 0.9431540 -1.1167968  1.9402935
## Eff1:Prime.Type:biasmatchmismatch  1.0177109 1.7051867 -1.7467696  3.8282937
## Eff2:Prime.Type:biasmatchmismatch  0.9288921 1.8938521 -2.1989192  4.0055426
##                                       Rhat Bulk_ESS Tail_ESS
## Intercept                         1.001589 2091.407 3128.147
## Eff1                              1.000792 4921.220 5817.821
## Eff2                              1.000187 3805.516 4477.990
## Prime.Type                        1.000299 4244.821 4380.101
## biasmatchmismatch                 1.000064 3130.298 4582.803
## Eff1:Prime.Type                   1.000069 5640.752 5868.115
## Eff2:Prime.Type                   1.000362 5625.839 5830.737
## Eff1:biasmatchmismatch            1.000079 4969.504 5872.721
## Eff2:biasmatchmismatch            1.000939 4257.912 5518.129
## Prime.Type:biasmatchmismatch      1.000547 4738.705 4915.931
## Eff1:Prime.Type:biasmatchmismatch 1.000392 5766.289 5955.276
## Eff2:Prime.Type:biasmatchmismatch 1.000992 5519.602 5274.128
hypothesis(mlong5_eff, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0     0.74       0.6     -0.2      1.7       9.64      0.91
##   Star
## 1     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_eff, "Eff1:Prime.Type< 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff1:Prime.Type) < 0    -0.85      1.08    -2.63     0.92       3.69
##   Post.Prob Star
## 1      0.79     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_eff, "Eff2:Prime.Type< 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff2:Prime.Type) < 0     0.25      1.18     -1.7     2.15        0.7
##   Post.Prob Star
## 1      0.41     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_eff, "Prime.Type:biasmatchmismatch  > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0     0.35      0.94    -1.12     1.94       1.83
##   Post.Prob Star
## 1      0.65     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_eff, "Eff1:Prime.Type:biasmatchmismatch < 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff1:Prime.Type:... < 0     1.02      1.71    -1.75     3.83       0.39
##   Post.Prob Star
## 1      0.28     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_eff, "Eff2:Prime.Type:biasmatchmismatch < 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff2:Prime.Type:... < 0     0.93      1.89     -2.2     4.01       0.45
##   Post.Prob Star
## 1      0.31     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.

Get probability of positive prime surprisal effect per time.

df <- tibble( # Design matrix (minus intercept which is automatic)
  biasmatch = factor(rep(c(0, 0, 1, 1), 3), labels = c("match", "mismatch")), 
  Prime.Type = rep(c(-.5, .5, -.5, .5), 3), 
  Eff1= c(-.5, -.5, -.5, -.5, .5, .5, .5, .5, 0, 0, 0, 0), 
  Eff2 = c(-.5, -.5, -.5, -.5, 0, 0, 0, 0, .5, .5, .5, .5)
)

ps_means <- posterior_linpred(mlong5_eff, newdata=df, re_formula=NA) %>% # posterior samples of linear predictor. 
  t() %>%
  as_tibble() %>%
  bind_cols(df, .) %>%
  pivot_longer(starts_with("V"), names_to="sample", values_to="linpred") %>%
  mutate(
    Time_c = ifelse(Eff1 == -.5, yes="42", no = 
                      ifelse(Eff1==.5, yes="48", no = "54")),
    Prime.Type = ifelse(Prime.Type == .5, yes="DOD", no = "POD"), 
    biasmatch = ifelse(biasmatch=="mismatch", yes="Mis", no = "Mat")
  )

ps_means %>%
  dplyr::select(-"Eff1", -"Eff2") %>%
  pivot_wider(names_from=c("biasmatch", "Prime.Type", "Time_c"), values_from="linpred") %>%
  summarise(
    Surprisal_42 = sum((Mis_DOD_42 - Mis_POD_42) > (Mat_DOD_42 - Mat_POD_42))/8000, 
    Surprisal_48 = sum((Mis_DOD_48 - Mis_POD_48) > (Mat_DOD_48 - Mat_POD_48))/8000, 
    Surprisal_54 = sum((Mis_DOD_54 - Mis_POD_54) > (Mat_DOD_54 - Mat_POD_54))/8000
  )
## # A tibble: 1 x 3
##   Surprisal_42 Surprisal_48 Surprisal_54
##          <dbl>        <dbl>        <dbl>
## 1        0.326        0.766         0.73

Not sure that the 48 month value is correct given my time-specific analyses. Double-check this code is correct.

Try model with simplified random effects by item. I think it’s important to keep at least the random intercepts in because of the imabalance in the interaction between prime type and biasmatch across verbs (See Appendix B), so this is the simplest structure that I think is justifiable.

mlong5_simp <- brm(RESPONSE.CODE ~ 1 + (Eff1 + Eff2)*+Prime.Type*biasmatch + 
            (1 +Prime.Type*biasmatch | Blind_ID/Time_c) + 
            ((Eff1 + Eff2)*Prime.Type*biasmatch- 1 - Prime.Type*biasmatch - Prime - biasmatch | Blind_ID ) +
            (1 | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          seed=12345,
          cores=4, 
          data=Combined %>% filter(Overlap=="No Overlap"), 
          file="LONG/output/mlong5_simp.rds")
mlong5_simp <- add_criterion(mlong5_simp, "loo")
hypothesis(mlong5_simp, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0      0.7      0.41     0.01     1.36      20.45      0.95
##   Star
## 1    *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_simp, "Eff1:Prime.Type< 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff1:Prime.Type) < 0    -0.59      0.88    -2.03     0.86          3
##   Post.Prob Star
## 1      0.75     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_simp, "Eff2:Prime.Type< 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff2:Prime.Type) < 0     0.18      0.96     -1.4     1.74       0.74
##   Post.Prob Star
## 1      0.42     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_simp, "Prime.Type:biasmatchmismatch  > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0      0.2      0.71    -0.92     1.38       1.52
##   Post.Prob Star
## 1       0.6     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_simp, "Eff1:Prime.Type:biasmatchmismatch < 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff1:Prime.Type:... < 0     0.62       1.4    -1.68     2.92        0.5
##   Post.Prob Star
## 1      0.33     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong5_simp, "Eff2:Prime.Type:biasmatchmismatch < 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Eff2:Prime.Type:... < 0     1.23      1.49    -1.18     3.71       0.25
##   Post.Prob Star
## 1       0.2     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.

3.2 Only DOD Producers

Still need to get conditional effects for these models. Will do later

3.2.1 Linear

mlong6 <- brm(RESPONSE.CODE ~ 1 + Time*+Prime.Type*biasmatch + 
            (1 +Prime.Type*biasmatch | Blind_ID/Time_c) + 
            (Time*Prime.Type*biasmatch- 1 - Prime.Type*biasmatch - Prime - biasmatch | Blind_ID ) +
            (1 + Time*Prime.Type*biasmatch   | Target.verb), 
          family="bernoulli", 
          prior=logit_prior, 
          iter=4000, 
          cores=4, 
          seed=12345,
          data=Combined2 %>% filter(Overlap=="No Overlap"), 
          file="LONG/output/mlong6.rds")
mlong6 <- add_criterion(mlong6, "loo")
summary(mlong6, prob=.9)$fixed
##                                       Estimate Est.Error   l-90% CI
## Intercept                         -2.068695613 0.9112198 -3.5389782
## Time                              -0.797353912 0.4557525 -1.5228359
## Prime.Type                         0.759515398 0.6140199 -0.2443512
## biasmatchmismatch                 -0.848168351 0.5575665 -1.7611483
## Time:Prime.Type                   -0.363872719 0.6918768 -1.4949244
## Time:biasmatchmismatch            -0.002902969 0.5665712 -0.9331608
## Prime.Type:biasmatchmismatch       0.025054812 1.0186403 -1.5658947
## Time:Prime.Type:biasmatchmismatch  0.622991122 1.0056815 -0.9763109
##                                        u-90% CI      Rhat Bulk_ESS Tail_ESS
## Intercept                         -0.5773305460 1.0005622 3128.252 4827.867
## Time                              -0.0757218237 1.0002745 6166.325 5184.220
## Prime.Type                         1.7556452724 0.9999766 6983.833 5477.774
## biasmatchmismatch                 -0.0003590273 1.0001269 5662.214 5529.282
## Time:Prime.Type                    0.7230924617 0.9998699 7772.776 5548.856
## Time:biasmatchmismatch             0.8937143605 1.0002270 7240.335 6197.565
## Prime.Type:biasmatchmismatch       1.7140039297 1.0000248 6352.018 5614.915
## Time:Prime.Type:biasmatchmismatch  2.2615067040 1.0003336 8538.992 5688.062
hypothesis(mlong6, "Prime.Type > 0")
## Hypothesis Tests for class b:
##         Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0     0.76      0.61    -0.24     1.76       9.42       0.9
##   Star
## 1     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong6, "Time:Prime.Type< 0")
## Hypothesis Tests for class b:
##              Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type) < 0    -0.36      0.69    -1.49     0.72       2.45
##   Post.Prob Star
## 1      0.71     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong6, "Prime.Type:biasmatchmismatch  > 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0     0.03      1.02    -1.57     1.71       1.02
##   Post.Prob Star
## 1       0.5     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(mlong6, "Time:Prime.Type:biasmatchmismatch < 0")
## Hypothesis Tests for class b:
##                 Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Time:Prime.Type:... < 0     0.62      1.01    -0.98     2.26       0.35
##   Post.Prob Star
## 1      0.26     
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.

3.2.2 Effects coded

(p2 <- Combined2 %>% 
  filter(Overlap=="No Overlap") %>% 
  group_by(Prime, biasmatch, Time_c) %>%
  summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE), 
            sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
  
ggplot(aes(y=mean, x=biasmatch, fill=Prime)) +
  geom_bar(stat="identity",position="dodge") +
 geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
                 position=position_dodge(.9)) +
  facet_wrap(~Time_c) +
  ylim(0, .85) + 
  theme_minimal() + 
  labs(y="Raw Mean") + 
  theme(legend.position = "none", 
        axis.title.x = element_blank())
)
## `summarise()` has grouped output by 'Prime', 'biasmatch'. You can override
## using the `.groups` argument.

p5 <- fitted(mlong5, re_formula=NA, scale="response") %>%
  cbind(Combined %>% filter(Overlap=="No Overlap")) %>%
  dplyr::select(Prime, biasmatch, Time_c, Estimate, Est.Error) %>%
  ggplot(aes(y=Estimate, x=biasmatch, fill=Prime)) +
  geom_bar(stat="identity",position="dodge") + 
  geom_errorbar(aes(ymin=Estimate, ymax=Estimate+Est.Error), width=.2,
                 position=position_dodge(.9)) + 
  facet_wrap(~Time_c) +
  theme_minimal() + 
  ylim(0, .4) + 
  labs(y="Predicted Probability") + 
  theme(legend.position = "none", 
        strip.text = element_blank())
p2/p5

ggsave("plots/Fig5.png", last_plot(), dpi=500)
## Saving 7 x 5 in image
tab_model(mlong1, mlong2, mlong5, mlong6, show.ci=.90, show.re.var=FALSE, show.r2=FALSE, show.icc=FALSE, transform=NULL,  collapse.ci=TRUE, show.p=TRUE, ci.hyphen = " : ", dv.labels=c("Model 1",  "Model 2", "Model 3", "Model 4"), string.stat="", file="Table_Long",
           pred.labels=c(
            "Intercept", 
            "Time Point.c",
            "Prime (DOD)", 
            "Overlap",
            "Time Point.c * Prime", 
            "Time Point.c * Overlap", 
            "Prime * Overlap", 
            "Time.c * Prime * Overlap", 
            "Bias Mismatch", 
            "Time * Bias Mismatch ", 
            "Prime * Bias Mismatch", 
            "Time * Prime * Bias Mismatch"
          ))
## Warning: Following potential variables could not be found in the data: Prime

## Warning: Following potential variables could not be found in the data: Prime

## Warning: Following potential variables could not be found in the data: Prime

## Warning: Following potential variables could not be found in the data: Prime
  Model 1 Model 2 Model 3 Model 4
Predictors Log-Odds Log-Odds Log-Odds Log-Odds
Intercept -3.00
(-4.29 : -1.49)
-2.22
(-3.52 : -0.68)
-2.88
(-4.34 : -1.35)
-2.07
(-3.54 : -0.58)
Time Point.c -0.25
(-0.71 : 0.20)
-0.70
(-1.23 : -0.15)
-0.44
(-1.04 : 0.15)
-0.80
(-1.52 : -0.08)
Prime (DOD) 0.54
(0.01 : 1.09)
0.49
(-0.13 : 1.16)
0.66
(-0.22 : 1.58)
0.77
(-0.24 : 1.76)
Overlap -1.07
(-1.80 : -0.39)
-1.28
(-2.34 : -0.53)
Time Point.c * Prime 0.25
(-0.39 : 0.84)
-0.00
(-0.74 : 0.67)
-0.11
(-1.01 : 0.76)
-0.35
(-1.49 : 0.72)
Time Point.c * Overlap 0.21
(-0.57 : 1.09)
-0.07
(-1.04 : 0.95)
Prime * Overlap 1.38
(0.38 : 2.50)
2.01
(0.56 : 3.68)
Time.c * Prime * Overlap -0.42
(-1.52 : 0.69)
-0.14
(-1.35 : 1.04)
Bias Mismatch -0.82
(-1.83 : -0.00)
-0.83
(-1.76 : -0.00)
Time * Bias Mismatch 0.06
(-0.74 : 0.85)
0.01
(-0.93 : 0.89)
Prime * Bias Mismatch 0.33
(-1.03 : 1.92)
0.01
(-1.57 : 1.71)
Time * Prime * Bias Mismatch 0.81
(-0.54 : 2.20)
0.61
(-0.98 : 2.26)
N 3 Time_c 3 Time_c 3 Time_c 3 Time_c
100 Blind_ID 46 Blind_ID 100 Blind_ID 46 Blind_ID
6 Target.verb 6 Target.verb 6 Target.verb 6 Target.verb
Observations 2865 1479 1355 696