library(brms)
library(readxl)
library(writexl)
library(DHARMa)
library(rmarkdown)
library(tidybayes)
library(ggplot2)
library(tidyr)
library(dplyr)
library(GGally)
library(ggpubr)
library(tidybayes)
library(patchwork)
library(sjPlot)
library(lubridate)
library(zoo)
library(readODS)
DOD_48mo <- read_ods("DOD_48mo_blind.ods")
N_total <- nrow(DOD_48mo) # sanity check
DOD_42mo <- read_ods("DOD_42mo_prepped.ods") %>%
group_by(Blind_ID) %>%
slice(1) %>%
dplyr::select("Blind_ID", "Prod_DOD") %>%
rename("DOD_42" = "Prod_DOD") %>%
ungroup()
Create mismatch variables. I’m also creating some character versions of many factors to make plotting a bit easier later.
DOD_48mo <- DOD_48mo %>%
mutate(
Prime = ifelse(Prime.Type==.5, yes="DOD", no="POD"), # Prime type, character version
Overlap = ifelse(Verb.match==.5, yes="Overlap", no="No Overlap") # overlap
)
DOD_48mo$bias <-"DOD" # set as DOD bias
DOD_48mo$bias[DOD_48mo$Prime.verb=="passed"] = "POD" # Change POD-biased prime verbs.
DOD_48mo$bias[DOD_48mo$Prime.verb=="sent"] = "POD"
DOD_48mo$bias[DOD_48mo$Prime.verb=="threw"] = "POD"
DOD_48mo$biasmatch <-"match" # set to match
DOD_48mo$biasmatch[DOD_48mo$bias=="POD" & DOD_48mo$Prime == "DOD"] = "mismatch" # Change to mismatch.
DOD_48mo$biasmatch[DOD_48mo$bias=="DOD" & DOD_48mo$Prime == "POD"] = "mismatch"
table(DOD_48mo$biasmatch)
##
## match mismatch
## 541 539
DOD_48mo$biasmismatchc <- ifelse(DOD_48mo$biasmatch == "mismatch", yes=.5, no=-.5)
There is one type of trial coded as an admin error that I don’t want to remove at the time being – trials 12r on lists 6 and 8.
DOD_48mo <- DOD_48mo %>%
mutate(
Trial_Drop = ifelse(is.na(RESPONSE.CODE), yes=1, no=0),
Trial_Problem = ifelse(List==6 & Item == "12r", yes = 1, no =ifelse(
List==8 & Item == "12r", yes = 1, no =0
))
)
nrow(filter(DOD_48mo, Trial_Problem==1)) == (nrow(filter(DOD_48mo, List==6))/12) + (nrow(filter(DOD_48mo, List==8))/12)
## [1] TRUE
N_miss <- DOD_48mo %>%
filter(is.na(RESPONSE.CODE)) %>%
nrow()
See how many missing values we have by participant
DOD_48mo %>%
group_by(Blind_ID) %>%
mutate(
N_Missing = sum(is.na(RESPONSE.CODE))
) %>%
slice(1) %>%
select("Blind_ID", "N_Missing") %>%
group_by(N_Missing) %>%
count()
## # A tibble: 6 × 2
## # Groups: N_Missing [6]
## N_Missing n
## <int> <int>
## 1 0 54
## 2 1 21
## 3 2 10
## 4 3 2
## 5 4 2
## 6 6 1
DOD_48mo <- DOD_48mo %>%
group_by(Blind_ID) %>%
mutate(
N_Missing = sum(is.na(RESPONSE.CODE))
) %>%
slice(1) %>%
select("Blind_ID", "N_Missing") %>%
mutate(
Drop = ifelse(N_Missing >= 10, 1, 0),
MightDrop = ifelse(N_Missing > 6, 1, 0)
) %>%
full_join(DOD_48mo, .)
## Joining with `by = join_by(Blind_ID)`
N_drop = DOD_48mo %>%
filter(Drop == 1 & !is.na(RESPONSE.CODE)) %>%
count()
And by item.
DOD_48mo %>%
group_by(Target.verb) %>%
mutate(
N_Missing = sum(is.na(RESPONSE.CODE))
) %>%
slice(1) %>%
select("Target.verb", "N_Missing") %>%
group_by(Target.verb)
## # A tibble: 7 × 2
## # Groups: Target.verb [7]
## Target.verb N_Missing
## <chr> <int>
## 1 brought 16
## 2 gave 2
## 3 passed 6
## 4 put 1
## 5 sent 7
## 6 showed 11
## 7 threw 18
All non-target verbs have been removed. It doesn’t make sense to estimate random effects for a level of a random factor with only a single observation.
And let’s just check the proportion of missing trials across the various experimental conditions.
DOD_48mo %>%
mutate(
Missing = ifelse(is.na(RESPONSE.CODE), yes=1, no = 0)
) %>%
group_by(Prime, Overlap) %>%
summarise(
mean = mean(Missing),
sd = sd(Missing)
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
## # A tibble: 4 × 4
## # Groups: Prime [2]
## Prime Overlap mean sd
## <chr> <chr> <dbl> <dbl>
## 1 DOD No Overlap 0.0734 0.261
## 2 DOD Overlap 0.0643 0.246
## 3 POD No Overlap 0.0309 0.173
## 4 POD Overlap 0.0567 0.232
DOD_48mo %>%
mutate(
Missing = ifelse(is.na(RESPONSE.CODE), yes=1, no = 0)
) %>%
group_by(Prime, biasmatch) %>%
summarise(
mean = mean(Missing),
sd = sd(Missing)
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
## # A tibble: 4 × 4
## # Groups: Prime [2]
## Prime biasmatch mean sd
## <chr> <chr> <dbl> <dbl>
## 1 DOD match 0.0519 0.222
## 2 DOD mismatch 0.0855 0.280
## 3 POD match 0.0480 0.214
## 4 POD mismatch 0.0407 0.198
The differences in missingness between condition don’t look drastic. To sum up things, we have much less missing data than at 42 months, which would be expected. In particular, no participants are missing more than 6 trials.
DOD_48mo <- filter(DOD_48mo, !is.na(RESPONSE.CODE) & Drop==0) # 1019
nrow(DOD_48mo) == N_total - N_miss - N_drop
## n
## [1,] TRUE
DOD_48mo %>%
summarise(
min = min(Session.Age),
max = max(Session.Age)
)
## min max
## 1 48 : 11 49 : 4
(p1 <- DOD_48mo %>%
group_by(Prime, Overlap) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
ylim(0, .6) +
theme_minimal() +
labs(y="Raw Mean") +
theme(legend.position = "none")
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
DOD_48mo %>%
group_by(Prime, Overlap, Target.verb) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
facet_wrap(~Target.verb)
## `summarise()` has grouped output by 'Prime', 'Overlap'. You can override using
## the `.groups` argument.
Looks good–the difference between DODs in the DOD vs POD condition is
bigger when there is lexical overlap for all verbs, except possibly
brought, for which the difference looks similar
(proportionately, though not additively).
And let’s look at the pattern of responses when we only consider participants who produced at least 1 DOD.
DOD_48mo <- DOD_48mo %>%
group_by(Blind_ID) %>%
mutate(
Prod_DOD = sum(RESPONSE.CODE)
) %>%
dplyr::select(Blind_ID, Prod_DOD) %>%
slice(1) %>%
ungroup() %>%
full_join(DOD_48mo, ., by="Blind_ID") %>%
left_join(., DOD_42mo, by="Blind_ID")
writexl::write_xlsx(DOD_48mo, "DOD_48mo_prepped.xlsx")
DOD_48mo2 <- filter(DOD_48mo, !(DOD_42==0 & Prod_DOD==0))
DOD_48mo2 %>%
group_by(Blind_ID) %>%
slice(1) %>%
ungroup() %>%
count()
## # A tibble: 1 × 1
## n
## <int>
## 1 66
(p1b <- DOD_48mo2 %>%
group_by(Prime, Overlap) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
ylim(0, .9) +
theme_minimal() +
labs(y="Raw Mean") +
theme(legend.position = "none")
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
DOD_48mo2 %>%
group_by(Prime, Overlap, Target.verb) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
facet_wrap(~Target.verb)
## `summarise()` has grouped output by 'Prime', 'Overlap'. You can override using
## the `.groups` argument.
Maybe a bit different for brought but we’re working
with less data here and the overlap condition appears to be at
floor.
Plot proprortion of DOD responses by each cell in two-way interaction between biasmatch and prime type.
(p1c <- DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
group_by(Prime, biasmatch) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=biasmatch, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
ylim(0, .7) +
theme_minimal() +
labs(y="Raw Mean")
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
group_by(Prime, biasmatch, Target.verb) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=biasmatch, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
facet_wrap(~Target.verb)
## `summarise()` has grouped output by 'Prime', 'biasmatch'. You can override
## using the `.groups` argument.
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
group_by(Target.verb, Prime, biasmatch) %>%
count()
## # A tibble: 21 × 4
## # Groups: Target.verb, Prime, biasmatch [21]
## Target.verb Prime biasmatch n
## <chr> <chr> <chr> <int>
## 1 brought DOD match 21
## 2 brought DOD mismatch 16
## 3 brought POD match 20
## 4 brought POD mismatch 19
## 5 gave DOD match 2
## 6 gave DOD mismatch 43
## 7 gave POD match 46
## 8 passed DOD match 11
## 9 passed DOD mismatch 21
## 10 passed POD match 24
## # ℹ 11 more rows
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
filter(Target.verb=="gave" & Prime=="DOD" & biasmatch == "match")
## Blind_ID Gender Session.Age List Item Prime.verb Target.verb Prime.Type
## 1 XqCEb62p 1 48 : 19 3 2 showed gave 0.5
## 2 6BFe6U46 1 48 : 18 3 2 showed gave 0.5
## Verb.match RESPONSE.CODE Prime Overlap bias biasmatch biasmismatchc
## 1 -0.5 1 DOD No Overlap DOD match -0.5
## 2 -0.5 0 DOD No Overlap DOD match -0.5
## Trial_Drop Trial_Problem N_Missing Drop MightDrop Prod_DOD DOD_42
## 1 0 0 0 0 0 2 0
## 2 0 0 0 0 0 0 0
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
filter(Target.verb=="gave" & Prime=="POD" & biasmatch == "mismatch")
## [1] Blind_ID Gender Session.Age List Item
## [6] Prime.verb Target.verb Prime.Type Verb.match RESPONSE.CODE
## [11] Prime Overlap bias biasmatch biasmismatchc
## [16] Trial_Drop Trial_Problem N_Missing Drop MightDrop
## [21] Prod_DOD DOD_42
## <0 rows> (or 0-length row.names)
logit_prior = prior(normal(0,2), class=sd)
First lets consider whether we should include random effects by the interaction between ID and Item. In principle this is possible, but I think it will greatly complicated the model. So I’ll fit two variance component models with the two random effects specifications and compare them to one another.
m48mo0<- brm(RESPONSE.CODE ~ 1 + (1 | Blind_ID) + (1 | Target.verb), family="bernoulli", seed=12345, prior=logit_prior, iter=4000, cores=4, data=DOD_48mo, file="48mo/output/m48mo0.rds")
m48mo0b <- brm(RESPONSE.CODE ~ 1 + (1 | Blind_ID) + (1 | Target.verb) + (1 | Blind_ID:Target.verb), seed=12345, family="bernoulli", prior=logit_prior, iter=4000, cores=4, data=DOD_48mo, file="48mo/output/m48mo0b.rds")
Add loo for loo_compare
A slight prefernce for more complex one, but to stay consistent, I’ll stick with the simpler model.
Here’s the model of the lexical boost.
m48mo1 <- brm(RESPONSE.CODE ~ 1 + Prime.Type*Overlap + (1 + Prime.Type*Overlap | Blind_ID) + (1 + Prime.Type*Overlap | Target.verb), logit_prior,family="bernoulli", seed=12345, cores=4, control=list(adapt_delta=.9), iter=4000, data=DOD_48mo, file="48mo/output/m48mo1.rds")
m48mo1<- add_criterion(m48mo1, "loo")
Chains are good
#plot(m48mo1, N=4)
summary(m48mo1, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * Overlap + (1 + Prime.Type * Overlap | Blind_ID) + (1 + Prime.Type * Overlap | Target.verb)
## Data: DOD_48mo (Number of observations: 1019)
## Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup draws = 8000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 90)
## Estimate Est.Error l-90% CI
## sd(Intercept) 2.14 0.35 1.61
## sd(Prime.Type) 0.48 0.35 0.04
## sd(OverlapOverlap) 0.85 0.49 0.10
## sd(Prime.Type:OverlapOverlap) 0.79 0.57 0.07
## cor(Intercept,Prime.Type) 0.01 0.44 -0.71
## cor(Intercept,OverlapOverlap) 0.17 0.37 -0.48
## cor(Prime.Type,OverlapOverlap) 0.02 0.44 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.12 0.43 -0.77
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.02 0.44 -0.74
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.03 0.44 -0.71
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.73 1.00 4203 5860
## sd(Prime.Type) 1.16 1.00 4710 5626
## sd(OverlapOverlap) 1.69 1.00 1999 3507
## sd(Prime.Type:OverlapOverlap) 1.87 1.00 3972 5002
## cor(Intercept,Prime.Type) 0.73 1.00 14907 5997
## cor(Intercept,OverlapOverlap) 0.75 1.00 11203 6133
## cor(Prime.Type,OverlapOverlap) 0.73 1.00 3310 5031
## cor(Intercept,Prime.Type:OverlapOverlap) 0.64 1.00 12939 5957
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.71 1.00 7917 6665
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.74 1.00 7908 6811
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.63 0.61 0.87
## sd(Prime.Type) 0.45 0.40 0.03
## sd(OverlapOverlap) 0.48 0.42 0.04
## sd(Prime.Type:OverlapOverlap) 0.91 0.69 0.07
## cor(Intercept,Prime.Type) -0.08 0.45 -0.78
## cor(Intercept,OverlapOverlap) -0.03 0.44 -0.72
## cor(Prime.Type,OverlapOverlap) 0.03 0.45 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.20 0.42 -0.81
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.03 0.45 -0.76
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.00 0.45 -0.72
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.80 1.00 5319 5788
## sd(Prime.Type) 1.25 1.00 5468 5674
## sd(OverlapOverlap) 1.29 1.00 6032 6211
## sd(Prime.Type:OverlapOverlap) 2.24 1.00 5353 5058
## cor(Intercept,Prime.Type) 0.68 1.00 13987 5414
## cor(Intercept,OverlapOverlap) 0.69 1.00 15304 5922
## cor(Prime.Type,OverlapOverlap) 0.76 1.00 8401 6754
## cor(Intercept,Prime.Type:OverlapOverlap) 0.55 1.00 14107 6180
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.71 1.00 8150 6492
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.72 1.00 7416 6996
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -2.68 0.79 -3.93 -1.36 1.00 4123
## Prime.Type 0.40 0.45 -0.31 1.13 1.00 9691
## OverlapOverlap -1.43 0.55 -2.40 -0.62 1.00 6195
## Prime.Type:OverlapOverlap 2.23 0.91 0.86 3.81 1.00 8344
## Tail_ESS
## Intercept 5186
## Prime.Type 6321
## OverlapOverlap 5235
## Prime.Type:OverlapOverlap 5624
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m48mo1, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 0.4 0.45 -0.31 1.13 4.71 0.82
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo1, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0 2.23 0.91 0.86 3.81 257.06
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo1, "Intercept + OverlapOverlap + .5*Prime.Type + .5*Prime.Type:OverlapOverlap >
Intercept + OverlapOverlap - .5*Prime.Type - .5*Prime.Type:OverlapOverlap ")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Intercept+Overla... > 0 2.63 0.91 1.29 4.19 614.38
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
m48mo1.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo1)),
observedResponse = DOD_48mo$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo1)), 1, mean),
integerResponse=TRUE
)
plot(m48mo1.model.check) # standard plots
testDispersion(m48mo1.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.75463, p-value = 0.00225
## alternative hypothesis: two.sided
Underdispersed, no other issues.
A quick check that the posterior predictive distribution looks okay.
#pp_check(m48mo1)
Sensitivity analyses (without problem trials and without trials with high Pareto’s k) in R Markdown file.
m48mo2 <- brm(RESPONSE.CODE ~ 1 + Prime.Type*Overlap + (1 + Prime.Type*Overlap | Blind_ID) + (1 + Prime.Type*Overlap | Target.verb), logit_prior,family="bernoulli", cores=4, seed=12345, iter = 8000, data=DOD_48mo2, file="48mo/output/m48mo2.rds")
m48mo2<- add_criterion(m48mo2, "loo")
Chains look good.
#plot(m48mo2, N = 4)
summary(m48mo2, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * Overlap + (1 + Prime.Type * Overlap | Blind_ID) + (1 + Prime.Type * Overlap | Target.verb)
## Data: DOD_48mo2 (Number of observations: 747)
## Draws: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 66)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.59 0.30 1.13
## sd(Prime.Type) 0.46 0.34 0.04
## sd(OverlapOverlap) 0.90 0.49 0.14
## sd(Prime.Type:OverlapOverlap) 0.80 0.58 0.07
## cor(Intercept,Prime.Type) -0.02 0.43 -0.72
## cor(Intercept,OverlapOverlap) 0.20 0.36 -0.43
## cor(Prime.Type,OverlapOverlap) -0.01 0.44 -0.73
## cor(Intercept,Prime.Type:OverlapOverlap) -0.17 0.42 -0.79
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.03 0.45 -0.75
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.01 0.44 -0.71
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.10 1.00 5992 9766
## sd(Prime.Type) 1.10 1.00 6938 8237
## sd(OverlapOverlap) 1.74 1.00 2851 5143
## sd(Prime.Type:OverlapOverlap) 1.88 1.00 5245 7374
## cor(Intercept,Prime.Type) 0.70 1.00 15411 10828
## cor(Intercept,OverlapOverlap) 0.76 1.00 8788 9018
## cor(Prime.Type,OverlapOverlap) 0.71 1.00 3734 7531
## cor(Intercept,Prime.Type:OverlapOverlap) 0.58 1.00 15053 10943
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.72 1.00 9327 12267
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.72 1.00 10912 12710
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.62 0.60 0.88
## sd(Prime.Type) 0.42 0.38 0.03
## sd(OverlapOverlap) 0.48 0.42 0.03
## sd(Prime.Type:OverlapOverlap) 0.93 0.70 0.09
## cor(Intercept,Prime.Type) -0.07 0.45 -0.77
## cor(Intercept,OverlapOverlap) -0.02 0.44 -0.74
## cor(Prime.Type,OverlapOverlap) 0.03 0.45 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.18 0.42 -0.80
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.03 0.45 -0.75
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.00 0.45 -0.73
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.75 1.00 6341 9746
## sd(Prime.Type) 1.15 1.00 8145 6239
## sd(OverlapOverlap) 1.28 1.00 8411 8311
## sd(Prime.Type:OverlapOverlap) 2.28 1.00 8511 8462
## cor(Intercept,Prime.Type) 0.68 1.00 18264 11771
## cor(Intercept,OverlapOverlap) 0.70 1.00 17527 11700
## cor(Prime.Type,OverlapOverlap) 0.76 1.00 13128 12894
## cor(Intercept,Prime.Type:OverlapOverlap) 0.58 1.00 15668 11641
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.72 1.00 12359 12204
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.73 1.00 11861 12887
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -1.77 0.75 -2.95 -0.53 1.00 4403
## Prime.Type 0.42 0.42 -0.24 1.10 1.00 11665
## OverlapOverlap -1.41 0.51 -2.28 -0.64 1.00 6989
## Prime.Type:OverlapOverlap 2.26 0.87 0.95 3.79 1.00 9194
## Tail_ESS
## Intercept 6238
## Prime.Type 9951
## OverlapOverlap 8067
## Prime.Type:OverlapOverlap 9595
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m48mo2, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 0.42 0.42 -0.24 1.1 5.83 0.85
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo2, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0 2.26 0.87 0.95 3.79 379.95
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo2, "Intercept + OverlapOverlap + .5*Prime.Type + .5*Prime.Type:OverlapOverlap >
Intercept + OverlapOverlap - .5*Prime.Type - .5*Prime.Type:OverlapOverlap ")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Intercept+Overla... > 0 2.69 0.84 1.43 4.17 1332.33
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
m48mo2.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo2)),
observedResponse = DOD_48mo2$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo2)), 1, mean),
integerResponse=TRUE
)
plot(m48mo2.model.check) # standard plots
testDispersion(m48mo2.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.76694, p-value = 0.002375
## alternative hypothesis: two.sided
tab_model(m48mo1, m48mo2, show.re.var=FALSE, show.r2=FALSE, show.icc=FALSE, transform=NULL, collapse.ci=TRUE, p.style="scientific", ci.hyphen = " : ", dv.labels=c("Model 1", "Model 2"), string.stat="",
pred.labels=c(
"Intercept",
"Prime (DOD)",
"Overlap",
"Prime * Overlap"
))
| Model 1 | Model 2 | |
|---|---|---|
| Predictors | Log-Odds | Log-Odds |
| Intercept |
-2.70 (-4.22 : -1.05) |
-1.78 (-3.22 : -0.21) |
| Prime (DOD) |
0.39 (-0.48 : 1.32) |
0.42 (-0.39 : 1.26) |
| Overlap |
-1.38 (-2.67 : -0.44) |
-1.38 (-2.51 : -0.49) |
| Prime * Overlap |
2.18 (0.63 : 4.19) |
2.21 (0.70 : 4.16) |
| N | 90 Blind_ID | 66 Blind_ID |
| 6 Target.verb | 6 Target.verb | |
| Observations | 1019 | 747 |
DOD_48mo3 <- filter(DOD_48mo, Overlap=="No Overlap")
m48mo3 <- brm(RESPONSE.CODE ~ 1 +Prime.Type*biasmatch + (1 + Prime.Type*biasmatch| Blind_ID) + (1 + Prime.Type*biasmatch| Target.verb), seed=12345, logit_prior, family="bernoulli", iter=8000, cores=4, file="48mo/output/m48mo3.rds", data=DOD_48mo3)
summary(m48mo3, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * biasmatch + (1 + Prime.Type * biasmatch | Blind_ID) + (1 + Prime.Type * biasmatch | Target.verb)
## Data: DOD_48mo3 (Number of observations: 491)
## Draws: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 90)
## Estimate Est.Error l-90% CI
## sd(Intercept) 2.48 0.56 1.64
## sd(Prime.Type) 0.61 0.48 0.05
## sd(biasmatchmismatch) 2.87 0.88 1.50
## sd(Prime.Type:biasmatchmismatch) 1.69 1.09 0.16
## cor(Intercept,Prime.Type) 0.10 0.45 -0.68
## cor(Intercept,biasmatchmismatch) -0.17 0.27 -0.59
## cor(Prime.Type,biasmatchmismatch) 0.04 0.43 -0.68
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.19 0.40 -0.53
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.09 0.46 -0.79
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.15 0.41 -0.77
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 3.45 1.00 5550
## sd(Prime.Type) 1.53 1.00 6301
## sd(biasmatchmismatch) 4.39 1.00 3108
## sd(Prime.Type:biasmatchmismatch) 3.66 1.00 3704
## cor(Intercept,Prime.Type) 0.80 1.00 12606
## cor(Intercept,biasmatchmismatch) 0.31 1.00 5872
## cor(Prime.Type,biasmatchmismatch) 0.72 1.00 1013
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.79 1.00 9753
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.68 1.00 6584
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.58 1.00 10104
## Tail_ESS
## sd(Intercept) 9197
## sd(Prime.Type) 7303
## sd(biasmatchmismatch) 3750
## sd(Prime.Type:biasmatchmismatch) 5682
## cor(Intercept,Prime.Type) 10402
## cor(Intercept,biasmatchmismatch) 7767
## cor(Prime.Type,biasmatchmismatch) 2568
## cor(Intercept,Prime.Type:biasmatchmismatch) 10229
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 10934
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 11182
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.76 0.68 0.88
## sd(Prime.Type) 0.66 0.57 0.05
## sd(biasmatchmismatch) 0.67 0.58 0.04
## sd(Prime.Type:biasmatchmismatch) 0.95 0.78 0.07
## cor(Intercept,Prime.Type) 0.09 0.44 -0.66
## cor(Intercept,biasmatchmismatch) 0.05 0.44 -0.69
## cor(Prime.Type,biasmatchmismatch) 0.01 0.45 -0.73
## cor(Intercept,Prime.Type:biasmatchmismatch) -0.06 0.44 -0.76
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.08 0.45 -0.78
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.02 0.46 -0.75
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 3.04 1.00 6848
## sd(Prime.Type) 1.76 1.00 8112
## sd(biasmatchmismatch) 1.80 1.00 8653
## sd(Prime.Type:biasmatchmismatch) 2.48 1.00 9634
## cor(Intercept,Prime.Type) 0.77 1.00 15048
## cor(Intercept,biasmatchmismatch) 0.76 1.00 16175
## cor(Prime.Type,biasmatchmismatch) 0.74 1.00 13237
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.69 1.00 17009
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.69 1.00 14069
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.73 1.00 12266
## Tail_ESS
## sd(Intercept) 9859
## sd(Prime.Type) 7351
## sd(biasmatchmismatch) 7044
## sd(Prime.Type:biasmatchmismatch) 8007
## cor(Intercept,Prime.Type) 10573
## cor(Intercept,biasmatchmismatch) 11511
## cor(Prime.Type,biasmatchmismatch) 11947
## cor(Intercept,Prime.Type:biasmatchmismatch) 11069
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 12397
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 12330
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -2.91 0.94 -4.48 -1.41 1.00 5019
## Prime.Type -0.02 0.75 -1.24 1.16 1.00 8079
## biasmatchmismatch -1.22 1.03 -2.98 0.30 1.00 5599
## Prime.Type:biasmatchmismatch 1.49 1.42 -0.70 3.94 1.00 7462
## Tail_ESS
## Intercept 7375
## Prime.Type 8422
## biasmatchmismatch 7245
## Prime.Type:biasmatchmismatch 8182
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
m48mo3 <- add_criterion(m48mo3, "loo")
Chains look okay.
#plot(m48mo3, N = 4)
hypothesis(m48mo3, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 -0.02 0.75 -1.24 1.16 1 0.5
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo3, "Prime.Type:biasmatchmismatch > 0") # Bias mismatch
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0 1.49 1.42 -0.7 3.94 6.55
## Post.Prob Star
## 1 0.87
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
m48mo3.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo3)),
observedResponse = DOD_48mo3$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo3)), 1, mean),
integerResponse=TRUE
)
plot(m48mo3.model.check) # standard plots
testDispersion(m48mo3.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.52833, p-value < 2.2e-16
## alternative hypothesis: two.sided
p3 <- fitted(m48mo3, re_formula=NA, scale="response") %>%
cbind(DOD_48mo3) %>%
dplyr::select(Prime, biasmatch, Estimate, Est.Error) %>%
ggplot(aes(y=Estimate, x=biasmatch, fill=Prime )) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=Estimate, ymax=Estimate+Est.Error), width=.2,
position=position_dodge(.9)) +
theme_minimal() +
ylim(0, .6) +
labs(y="Predicted Probability") +
theme(legend.position = "none")
p1c | p3 + plot_layout(guides = "collect") & theme(legend.position = "right")
DOD_48mo4 <-
DOD_48mo2 %>%
filter(Overlap=="No Overlap")
m48mo4 <- brm(RESPONSE.CODE ~ 1 +Prime.Type*biasmatch + (1 + Prime.Type*biasmatch| Blind_ID) + (1 + Prime.Type*biasmatch| Target.verb), logit_prior, family="bernoulli", iter=8000, seed=12345, cores=4, control=list(adapt_delta = .95), file="48mo/output/m48mo4.rds", data=DOD_48mo4) # ncreased adapt delta because of divergent transitions
summary(m48mo4, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * biasmatch + (1 + Prime.Type * biasmatch | Blind_ID) + (1 + Prime.Type * biasmatch | Target.verb)
## Data: DOD_48mo4 (Number of observations: 358)
## Draws: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 66)
## Estimate Est.Error l-90% CI
## sd(Intercept) 2.02 0.53 1.22
## sd(Prime.Type) 0.61 0.48 0.05
## sd(biasmatchmismatch) 2.68 0.87 1.31
## sd(Prime.Type:biasmatchmismatch) 1.69 1.09 0.17
## cor(Intercept,Prime.Type) 0.09 0.45 -0.67
## cor(Intercept,biasmatchmismatch) -0.28 0.28 -0.68
## cor(Prime.Type,biasmatchmismatch) 0.05 0.44 -0.68
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.16 0.40 -0.54
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.10 0.45 -0.79
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.13 0.41 -0.76
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 2.94 1.00 6370
## sd(Prime.Type) 1.52 1.00 7968
## sd(biasmatchmismatch) 4.17 1.00 4410
## sd(Prime.Type:biasmatchmismatch) 3.70 1.00 4937
## cor(Intercept,Prime.Type) 0.79 1.00 21607
## cor(Intercept,biasmatchmismatch) 0.23 1.00 8425
## cor(Prime.Type,biasmatchmismatch) 0.75 1.00 1746
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.77 1.00 15691
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.69 1.00 9012
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.59 1.00 14883
## Tail_ESS
## sd(Intercept) 9668
## sd(Prime.Type) 9397
## sd(biasmatchmismatch) 4384
## sd(Prime.Type:biasmatchmismatch) 7886
## cor(Intercept,Prime.Type) 12268
## cor(Intercept,biasmatchmismatch) 9797
## cor(Prime.Type,biasmatchmismatch) 4099
## cor(Intercept,Prime.Type:biasmatchmismatch) 11635
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 11802
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 13178
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.73 0.67 0.88
## sd(Prime.Type) 0.61 0.52 0.04
## sd(biasmatchmismatch) 0.65 0.56 0.05
## sd(Prime.Type:biasmatchmismatch) 0.96 0.78 0.08
## cor(Intercept,Prime.Type) 0.07 0.44 -0.67
## cor(Intercept,biasmatchmismatch) 0.06 0.44 -0.69
## cor(Prime.Type,biasmatchmismatch) -0.00 0.45 -0.73
## cor(Intercept,Prime.Type:biasmatchmismatch) -0.06 0.44 -0.76
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.08 0.45 -0.79
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.02 0.45 -0.75
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 3.01 1.00 10351
## sd(Prime.Type) 1.61 1.00 11308
## sd(biasmatchmismatch) 1.75 1.00 12516
## sd(Prime.Type:biasmatchmismatch) 2.48 1.00 13163
## cor(Intercept,Prime.Type) 0.76 1.00 22924
## cor(Intercept,biasmatchmismatch) 0.76 1.00 24849
## cor(Prime.Type,biasmatchmismatch) 0.74 1.00 15852
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.68 1.00 25712
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.68 1.00 17731
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.73 1.00 14823
## Tail_ESS
## sd(Intercept) 12131
## sd(Prime.Type) 9909
## sd(biasmatchmismatch) 10852
## sd(Prime.Type:biasmatchmismatch) 10636
## cor(Intercept,Prime.Type) 12036
## cor(Intercept,biasmatchmismatch) 11846
## cor(Prime.Type,biasmatchmismatch) 12693
## cor(Intercept,Prime.Type:biasmatchmismatch) 12070
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 12613
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 13580
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -1.99 0.86 -3.41 -0.60 1.00 8348
## Prime.Type -0.02 0.71 -1.20 1.12 1.00 14449
## biasmatchmismatch -0.85 0.87 -2.36 0.46 1.00 11106
## Prime.Type:biasmatchmismatch 1.54 1.32 -0.50 3.78 1.00 13276
## Tail_ESS
## Intercept 9678
## Prime.Type 11084
## biasmatchmismatch 10684
## Prime.Type:biasmatchmismatch 11978
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
m48mo4 <- add_criterion(m48mo4, "loo")
m48mo4.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo4)),
observedResponse = DOD_48mo4$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo4)), 1, mean),
integerResponse=TRUE
)
plot(m48mo4.model.check) # standard plots
testDispersion(m48mo4.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.55439, p-value < 2.2e-16
## alternative hypothesis: two.sided
hypothesis(m48mo4, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 -0.02 0.71 -1.2 1.12 0.98 0.49
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo4, "Prime.Type:biasmatchmismatch > 0") # Bias mismatch
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0 1.54 1.32 -0.5 3.78 8.05
## Post.Prob Star
## 1 0.89
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
tab_model(m48mo1, m48mo2, m48mo3, m48mo4, show.ci=.90, show.re.var=FALSE, show.r2=FALSE, show.icc=FALSE, transform=NULL, collapse.ci=TRUE, p.style="scientific", ci.hyphen = " : ", dv.labels=c("Model 1", "Model 2", "Model 3", "Model 4"), bpe="mean", string.stat="", file="Table48",
pred.labels=c(
"Intercept",
"Prime (DOD)",
"Overlap",
"Prime * Overlap",
"Bias Mismatch (POD)",
"Prime * Bias Mismatch "
))
| Model 1 | Model 2 | Model 3 | Model 4 | |
|---|---|---|---|---|
| Predictors | Log-Odds | Log-Odds | Log-Odds | Log-Odds |
| Intercept |
-2.68 (-3.93 : -1.36) |
-1.77 (-2.95 : -0.53) |
-2.91 (-4.48 : -1.41) |
-1.99 (-3.41 : -0.60) |
| Prime (DOD) |
0.40 (-0.31 : 1.13) |
0.42 (-0.24 : 1.10) |
-0.02 (-1.24 : 1.16) |
-0.02 (-1.20 : 1.12) |
| Overlap |
-1.43 (-2.40 : -0.62) |
-1.41 (-2.28 : -0.64) |
||
| Prime * Overlap |
2.23 (0.86 : 3.81) |
2.26 (0.95 : 3.79) |
||
| Bias Mismatch (POD) |
-1.22 (-2.98 : 0.30) |
-0.85 (-2.36 : 0.46) |
||
| Prime * Bias Mismatch |
1.49 (-0.70 : 3.94) |
1.54 (-0.50 : 3.78) |
||
| N | 90 Blind_ID | 66 Blind_ID | 90 Blind_ID | 66 Blind_ID |
| 6 Target.verb | 6 Target.verb | 6 Target.verb | 6 Target.verb | |
| Observations | 1019 | 747 | 491 | 358 |