This is the analysis of the 48 month data from the longitudinal study. Other analyses are printed at the links below.
Longitudinal Analyses: 42 months, 48 months, 54 months, Longitudinal
library(brms)
library(readxl)
library(writexl)
library(DHARMa)
library(rmarkdown)
library(tidybayes)
library(ggplot2)
library(tidyr)
library(dplyr)
library(GGally)
library(ggpubr)
library(tidybayes)
library(patchwork)
library(sjPlot)
library(lubridate)
library(zoo)
DOD_48mo <- read_xlsx("DOD_48mo_blind.xlsx")
N_total <- nrow(DOD_48mo) # sanity check
DOD_42mo <- read_xlsx("DOD_42mo_prepped.xlsx") %>%
group_by(Blind_ID) %>%
slice(1) %>%
dplyr::select("Blind_ID", "Prod_DOD") %>%
rename("DOD_42" = "Prod_DOD") %>%
ungroup()
Create mismatch variables. I’m also creating some character versions of many factors to make plotting a bit easier later.
DOD_48mo <- DOD_48mo %>%
mutate(
Prime = ifelse(Prime.Type==.5, yes="DOD", no="POD"), # Prime type, character version
Overlap = ifelse(Verb.match==.5, yes="Overlap", no="No Overlap") # overlap
)
DOD_48mo$bias <-"DOD" # set as DOD bias
DOD_48mo$bias[DOD_48mo$Prime.verb=="passed"] = "POD" # Change POD-biased prime verbs.
DOD_48mo$bias[DOD_48mo$Prime.verb=="sent"] = "POD"
DOD_48mo$bias[DOD_48mo$Prime.verb=="threw"] = "POD"
DOD_48mo$biasmatch <-"match" # set to match
DOD_48mo$biasmatch[DOD_48mo$bias=="POD" & DOD_48mo$Prime == "DOD"] = "mismatch" # Change to mismatch.
DOD_48mo$biasmatch[DOD_48mo$bias=="DOD" & DOD_48mo$Prime == "POD"] = "mismatch"
table(DOD_48mo$biasmatch)
##
## match mismatch
## 541 539
DOD_48mo$biasmismatchc <- ifelse(DOD_48mo$biasmatch == "mismatch", yes=.5, no=-.5)
There is one type of trial coded as an admin error that I don’t want to remove at the time being – trials 12r on lists 6 and 8.
DOD_48mo <- DOD_48mo %>%
mutate(
Trial_Drop = ifelse(is.na(RESPONSE.CODE), yes=1, no=0),
Trial_Problem = ifelse(List==6 & Item == "12r", yes = 1, no =ifelse(
List==8 & Item == "12r", yes = 1, no =0
))
)
nrow(filter(DOD_48mo, Trial_Problem==1)) == (nrow(filter(DOD_48mo, List==6))/12) + (nrow(filter(DOD_48mo, List==8))/12)
## [1] TRUE
N_miss <- DOD_48mo %>%
filter(is.na(RESPONSE.CODE)) %>%
nrow()
DOD_48mo %>%
summarise(
Verb_Error = sum(Verb.error, na.rm=TRUE),
Admin_Error = sum(Admin.error, na.rm=TRUE),
Prompts.disallowed = sum(Prompts.disallowed, na.rm=TRUE),
Prep_Error = sum(Prep.error.bin, na.rm=TRUE),
Reversal_Error = sum(Reversal.error, na.rm=TRUE),
NonTargetResponse = sum(NonTarget.response, na.rm=TRUE),
No_Response = sum(No.response, na.rm=TRUE),
GavePOD = sum(Gave.POD, na.rm=TRUE)
)
## # A tibble: 1 x 8
## Verb_Error Admin_Error Prompts.disal~1 Prep_~2 Rever~3 NonTa~4 No_Re~5 GavePOD
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <int> <dbl>
## 1 4 10 3 39 1 54 0 6
## # ... with abbreviated variable names 1: Prompts.disallowed, 2: Prep_Error,
## # 3: Reversal_Error, 4: NonTargetResponse, 5: No_Response
54 non-target responses (this includes lots of stuff, non-dative responses, preposition errors, etc).
DOD_48mo %>%
filter(is.na(NonTarget.response) & is.na(RESPONSE.CODE)) %>%
summarise(
Verb_Error = sum(Verb.error, na.rm=TRUE),
Admin_Error = sum(Admin.error, na.rm=TRUE),
Prompts.disallowed = sum(Prompts.disallowed, na.rm=TRUE),
Prep_Error = sum(Prep.error.bin, na.rm=TRUE),
Reversal_Error = sum(Reversal.error, na.rm=TRUE),
NonTargetResponse = sum(NonTarget.response, na.rm=TRUE),
No_Response = sum(No.response, na.rm=TRUE),
GavePOD = sum(Gave.POD, na.rm=TRUE)
)
## # A tibble: 1 x 8
## Verb_Error Admin_Error Prompts.disal~1 Prep_~2 Rever~3 NonTa~4 No_Re~5 GavePOD
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <int> <dbl>
## 1 1 3 3 0 0 0 0 0
## # ... with abbreviated variable names 1: Prompts.disallowed, 2: Prep_Error,
## # 3: Reversal_Error, 4: NonTargetResponse, 5: No_Response
3 admin errors and 3 disallowed prompts and 1 verb error. 54 non-target responses + 7 other errors = 61 errors.
DOD_48mo %>%
filter(is.na(NonTarget.response) & is.na(RESPONSE.CODE))
## # A tibble: 7 x 40
## Blind_ID Gender Session.~1 List Item Prime~2 Targe~3 Prime~4 Verb.~5 Verb.~6
## <chr> <dbl> <chr> <dbl> <chr> <chr> <chr> <dbl> <dbl> <dbl>
## 1 YwdLnSAD 1 48 : 21 1 3 threw threw -0.5 0.5 NA
## 2 HJ0iufnq 1 48 : 15 7 10 brought brought -0.5 0.5 NA
## 3 o0f6pCmo 0 48 : 15 4 4r showed showed 0.5 0.5 NA
## 4 o0f6pCmo 0 48 : 15 4 11r sent put 0.5 -0.5 1
## 5 a4x2pSkP 1 49 : 0 4 11r sent sent 0.5 0.5 NA
## 6 MVST3nlx 1 48 : 20 8 6r sent gave -0.5 -0.5 NA
## 7 k8FS5lbv 0 49 : 3 1 2 showed brought -0.5 -0.5 NA
## # ... with 30 more variables: Verb.error_systematic <dbl>, Admin.error <dbl>,
## # Repeat.prime <dbl>, Repeat.target.prompt <dbl>, Prime.interference <dbl>,
## # Missing.article <dbl>, Pronoun <dbl>, Prep.error <chr>,
## # Prep.error.bin <dbl>, Reversal.error <dbl>, Prompts.allowable <dbl>,
## # Prompts.disallowed <dbl>, NonTarget.response <dbl>, No.response <lgl>,
## # Gave.POD <dbl>, RESPONSE.CODE <dbl>, Prime.code <dbl>, Transcription <chr>,
## # Coded.response <chr>, Notes <chr>, X.2 <dbl>, X.1 <dbl>, X <dbl>, ...
See how many missing values we have by participant
DOD_48mo %>%
group_by(Blind_ID) %>%
mutate(
N_Missing = sum(is.na(RESPONSE.CODE))
) %>%
slice(1) %>%
select("Blind_ID", "N_Missing") %>%
group_by(N_Missing) %>%
count()
## # A tibble: 6 x 2
## # Groups: N_Missing [6]
## N_Missing n
## <int> <int>
## 1 0 54
## 2 1 21
## 3 2 10
## 4 3 2
## 5 4 2
## 6 6 1
DOD_48mo <- DOD_48mo %>%
group_by(Blind_ID) %>%
mutate(
N_Missing = sum(is.na(RESPONSE.CODE))
) %>%
slice(1) %>%
select("Blind_ID", "N_Missing") %>%
mutate(
Drop = ifelse(N_Missing >= 10, 1, 0),
MightDrop = ifelse(N_Missing > 6, 1, 0)
) %>%
full_join(DOD_48mo, .)
## Joining, by = "Blind_ID"
N_drop = DOD_48mo %>%
filter(Drop == 1 & !is.na(RESPONSE.CODE)) %>%
count()
And by item.
DOD_48mo %>%
group_by(Target.verb) %>%
mutate(
N_Missing = sum(is.na(RESPONSE.CODE))
) %>%
slice(1) %>%
select("Target.verb", "N_Missing") %>%
group_by(Target.verb)
## # A tibble: 7 x 2
## # Groups: Target.verb [7]
## Target.verb N_Missing
## <chr> <int>
## 1 brought 16
## 2 gave 2
## 3 passed 6
## 4 put 1
## 5 sent 7
## 6 showed 11
## 7 threw 18
All non-target verbs have been removed. It doesn’t make sense to estimate random effects for a level of a random factor with only a single observation.
And let’s just check the proportion of missing trials across the various experimental conditions.
DOD_48mo %>%
mutate(
Missing = ifelse(is.na(RESPONSE.CODE), yes=1, no = 0)
) %>%
group_by(Prime, Overlap) %>%
summarise(
mean = mean(Missing),
sd = sd(Missing)
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
## # A tibble: 4 x 4
## # Groups: Prime [2]
## Prime Overlap mean sd
## <chr> <chr> <dbl> <dbl>
## 1 DOD No Overlap 0.0734 0.261
## 2 DOD Overlap 0.0643 0.246
## 3 POD No Overlap 0.0309 0.173
## 4 POD Overlap 0.0567 0.232
DOD_48mo %>%
mutate(
Missing = ifelse(is.na(RESPONSE.CODE), yes=1, no = 0)
) %>%
group_by(Prime, biasmatch) %>%
summarise(
mean = mean(Missing),
sd = sd(Missing)
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
## # A tibble: 4 x 4
## # Groups: Prime [2]
## Prime biasmatch mean sd
## <chr> <chr> <dbl> <dbl>
## 1 DOD match 0.0519 0.222
## 2 DOD mismatch 0.0855 0.280
## 3 POD match 0.0480 0.214
## 4 POD mismatch 0.0407 0.198
The differences in missingness between condition don’t look drastic. To sum up things, we have much less missing data than at 42 months, which would be expected. In particular, no participants are missing more than 6 trials.
DOD_48mo <- filter(DOD_48mo, !is.na(RESPONSE.CODE) & Drop==0) # 1019
nrow(DOD_48mo) == N_total - N_miss - N_drop
## n
## [1,] TRUE
DOD_48mo %>%
summarise(
min = min(Session.Age),
max = max(Session.Age)
)
## # A tibble: 1 x 2
## min max
## <chr> <chr>
## 1 48 : 11 49 : 4
(p1 <- DOD_48mo %>%
group_by(Prime, Overlap) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
ylim(0, .6) +
theme_minimal() +
labs(y="Raw Mean") +
theme(legend.position = "none")
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
DOD_48mo %>%
group_by(Prime, Overlap, Target.verb) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
facet_wrap(~Target.verb)
## `summarise()` has grouped output by 'Prime', 'Overlap'. You can override using
## the `.groups` argument.
Looks good–the difference between DODs in the DOD vs POD condition is bigger when there is lexical overlap for all verbs, except possibly brought, for which the difference looks similar (proportionately, though not additively).
And let’s look at the pattern of responses when we only consider participants who produced at least 1 DOD.
DOD_48mo <- DOD_48mo %>%
group_by(Blind_ID) %>%
mutate(
Prod_DOD = sum(RESPONSE.CODE)
) %>%
dplyr::select(Blind_ID, Prod_DOD) %>%
slice(1) %>%
ungroup() %>%
full_join(DOD_48mo, ., by="Blind_ID") %>%
left_join(., DOD_42mo, by="Blind_ID")
writexl::write_xlsx(DOD_48mo, "DOD_48mo_prepped.xlsx")
DOD_48mo2 <- filter(DOD_48mo, !(DOD_42==0 & Prod_DOD==0))
DOD_48mo2 %>%
group_by(Blind_ID) %>%
slice(1) %>%
ungroup() %>%
count()
## # A tibble: 1 x 1
## n
## <int>
## 1 66
(p1b <- DOD_48mo2 %>%
group_by(Prime, Overlap) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
ylim(0, .9) +
theme_minimal() +
labs(y="Raw Mean") +
theme(legend.position = "none")
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
DOD_48mo2 %>%
group_by(Prime, Overlap, Target.verb) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=Overlap, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
facet_wrap(~Target.verb)
## `summarise()` has grouped output by 'Prime', 'Overlap'. You can override using
## the `.groups` argument.
Maybe a bit different for brought but we’re working with less data here and the overlap condition appears to be at floor.
Plot proprortion of DOD responses by each cell in two-way interaction between biasmatch and prime type.
(p1c <- DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
group_by(Prime, biasmatch) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=biasmatch, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
ylim(0, .7) +
theme_minimal() +
labs(y="Raw Mean")
)
## `summarise()` has grouped output by 'Prime'. You can override using the
## `.groups` argument.
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
group_by(Prime, biasmatch, Target.verb) %>%
summarise(mean = mean(RESPONSE.CODE, na.rm=TRUE),
sd = sd(RESPONSE.CODE, na.rm=TRUE)) %>%
ggplot(aes(y=mean, x=biasmatch, fill=Prime)) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=mean, ymax=mean+sd), width=.2,
position=position_dodge(.9)) +
facet_wrap(~Target.verb)
## `summarise()` has grouped output by 'Prime', 'biasmatch'. You can override using
## the `.groups` argument.
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
group_by(Target.verb, Prime, biasmatch) %>%
count()
## # A tibble: 21 x 4
## # Groups: Target.verb, Prime, biasmatch [21]
## Target.verb Prime biasmatch n
## <chr> <chr> <chr> <int>
## 1 brought DOD match 21
## 2 brought DOD mismatch 16
## 3 brought POD match 20
## 4 brought POD mismatch 19
## 5 gave DOD match 2
## 6 gave DOD mismatch 43
## 7 gave POD match 46
## 8 passed DOD match 11
## 9 passed DOD mismatch 21
## 10 passed POD match 24
## # ... with 11 more rows
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
filter(Target.verb=="gave" & Prime=="DOD" & biasmatch == "match")
## # A tibble: 2 x 45
## Blind_ID Gender Session.~1 List Item Prime~2 Targe~3 Prime~4 Verb.~5 Verb.~6
## <chr> <dbl> <chr> <dbl> <chr> <chr> <chr> <dbl> <dbl> <dbl>
## 1 XqCEb62p 1 48 : 19 3 2 showed gave 0.5 -0.5 1
## 2 6BFe6U46 1 48 : 18 3 2 showed gave 0.5 -0.5 1
## # ... with 35 more variables: Verb.error_systematic <dbl>, Admin.error <dbl>,
## # Repeat.prime <dbl>, Repeat.target.prompt <dbl>, Prime.interference <dbl>,
## # Missing.article <dbl>, Pronoun <dbl>, Prep.error <chr>,
## # Prep.error.bin <dbl>, Reversal.error <dbl>, Prompts.allowable <dbl>,
## # Prompts.disallowed <dbl>, NonTarget.response <dbl>, No.response <lgl>,
## # Gave.POD <dbl>, RESPONSE.CODE <dbl>, Prime.code <dbl>, Transcription <chr>,
## # Coded.response <chr>, Notes <chr>, X.2 <dbl>, X.1 <dbl>, X <dbl>, ...
DOD_48mo %>%
filter(Overlap == "No Overlap") %>%
filter(Target.verb=="gave" & Prime=="POD" & biasmatch == "mismatch")
## # A tibble: 0 x 45
## # ... with 45 variables: Blind_ID <chr>, Gender <dbl>, Session.Age <chr>,
## # List <dbl>, Item <chr>, Prime.verb <chr>, Target.verb <chr>,
## # Prime.Type <dbl>, Verb.match <dbl>, Verb.error <dbl>,
## # Verb.error_systematic <dbl>, Admin.error <dbl>, Repeat.prime <dbl>,
## # Repeat.target.prompt <dbl>, Prime.interference <dbl>,
## # Missing.article <dbl>, Pronoun <dbl>, Prep.error <chr>,
## # Prep.error.bin <dbl>, Reversal.error <dbl>, Prompts.allowable <dbl>, ...
logit_prior = prior(normal(0,2), class=sd)
First lets consider whether we should include random effects by the interaction between ID and Item. In principle this is possible, but I think it will greatly complicated the model. So I’ll fit two variance component models with the two random effects specifications and compare them to one another.
m48mo0<- brm(RESPONSE.CODE ~ 1 + (1 | Blind_ID) + (1 | Target.verb), family="bernoulli", seed=12345, prior=logit_prior, iter=4000, cores=4, data=DOD_48mo, file="48mo/output/m48mo0.rds")
m48mo0b <- brm(RESPONSE.CODE ~ 1 + (1 | Blind_ID) + (1 | Target.verb) + (1 | Blind_ID:Target.verb), seed=12345, family="bernoulli", prior=logit_prior, iter=4000, cores=4, data=DOD_48mo, file="48mo/output/m48mo0b.rds")
Add loo for loo_compare
m48mo0<- add_criterion(m48mo0, "loo")
m48mo0b<- add_criterion(m48mo0b, "loo")
loo1 <- loo(m48mo0b)
loo2 <- reloo(m48mo0b, loo1)
## 1 problematic observation(s) found.
## The model will be refit 1 times.
##
## Fitting model 1 out of 1 (leaving out observation 593)
## Start sampling
loo3 <- loo(m48mo0)
loo_compare(loo2, loo3)
## elpd_diff se_diff
## m48mo0b 0.0 0.0
## m48mo0 -0.1 1.1
A slight prefernce for more complex one, but to stay consistent, I’ll stick with the simpler model.
Here’s the model of the lexical boost.
m48mo1 <- brm(RESPONSE.CODE ~ 1 + Prime.Type*Overlap + (1 + Prime.Type*Overlap | Blind_ID) + (1 + Prime.Type*Overlap | Target.verb), logit_prior,family="bernoulli", seed=12345, cores=4, control=list(adapt_delta=.9), iter=4000, data=DOD_48mo, file="48mo/output/m48mo1.rds")
m48mo1<- add_criterion(m48mo1, "loo")
Chains are good
#plot(m48mo1, N=4)
summary(m48mo1, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * Overlap + (1 + Prime.Type * Overlap | Blind_ID) + (1 + Prime.Type * Overlap | Target.verb)
## Data: DOD_48mo (Number of observations: 1019)
## Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup draws = 8000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 90)
## Estimate Est.Error l-90% CI
## sd(Intercept) 2.14 0.35 1.61
## sd(Prime.Type) 0.48 0.35 0.04
## sd(OverlapOverlap) 0.85 0.49 0.10
## sd(Prime.Type:OverlapOverlap) 0.79 0.57 0.07
## cor(Intercept,Prime.Type) 0.01 0.44 -0.71
## cor(Intercept,OverlapOverlap) 0.17 0.37 -0.48
## cor(Prime.Type,OverlapOverlap) 0.02 0.44 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.12 0.43 -0.77
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.02 0.44 -0.74
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.03 0.44 -0.71
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.73 1.00 4203 5860
## sd(Prime.Type) 1.16 1.00 4710 5626
## sd(OverlapOverlap) 1.69 1.00 1999 3507
## sd(Prime.Type:OverlapOverlap) 1.87 1.00 3972 5002
## cor(Intercept,Prime.Type) 0.73 1.00 14907 5997
## cor(Intercept,OverlapOverlap) 0.75 1.00 11203 6133
## cor(Prime.Type,OverlapOverlap) 0.73 1.00 3310 5031
## cor(Intercept,Prime.Type:OverlapOverlap) 0.64 1.00 12939 5957
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.71 1.00 7917 6665
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.74 1.00 7908 6811
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.63 0.61 0.87
## sd(Prime.Type) 0.45 0.40 0.03
## sd(OverlapOverlap) 0.48 0.42 0.04
## sd(Prime.Type:OverlapOverlap) 0.91 0.69 0.07
## cor(Intercept,Prime.Type) -0.08 0.45 -0.78
## cor(Intercept,OverlapOverlap) -0.03 0.44 -0.72
## cor(Prime.Type,OverlapOverlap) 0.03 0.45 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.20 0.42 -0.81
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.03 0.45 -0.76
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.00 0.45 -0.72
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.80 1.00 5319 5788
## sd(Prime.Type) 1.25 1.00 5468 5674
## sd(OverlapOverlap) 1.29 1.00 6032 6211
## sd(Prime.Type:OverlapOverlap) 2.24 1.00 5353 5058
## cor(Intercept,Prime.Type) 0.68 1.00 13987 5414
## cor(Intercept,OverlapOverlap) 0.69 1.00 15304 5922
## cor(Prime.Type,OverlapOverlap) 0.76 1.00 8401 6754
## cor(Intercept,Prime.Type:OverlapOverlap) 0.55 1.00 14107 6180
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.71 1.00 8150 6492
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.72 1.00 7416 6996
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -2.68 0.79 -3.93 -1.36 1.00 4123
## Prime.Type 0.40 0.45 -0.31 1.13 1.00 9691
## OverlapOverlap -1.43 0.55 -2.40 -0.62 1.00 6195
## Prime.Type:OverlapOverlap 2.23 0.91 0.86 3.81 1.00 8344
## Tail_ESS
## Intercept 5186
## Prime.Type 6321
## OverlapOverlap 5235
## Prime.Type:OverlapOverlap 5624
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m48mo1, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 0.4 0.45 -0.31 1.13 4.71 0.82
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo1, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0 2.23 0.91 0.86 3.81 257.06
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo1, "Intercept + OverlapOverlap + .5*Prime.Type + .5*Prime.Type:OverlapOverlap >
Intercept + OverlapOverlap - .5*Prime.Type - .5*Prime.Type:OverlapOverlap ")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Intercept+Overla... > 0 2.63 0.91 1.29 4.19 614.38
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
m48mo1.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo1)),
observedResponse = DOD_48mo$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo1)), 1, mean),
integerResponse=TRUE
)
plot(m48mo1.model.check) # standard plots
testDispersion(m48mo1.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.75463, p-value = 0.00225
## alternative hypothesis: two.sided
Underdispersed, no other issues.
A quick check that the posterior predictive distribution looks okay.
#pp_check(m48mo1)
Sensitivity analyses (without problem trials and without trials with high Pareto’s k) in R Markdown file.
m48mo2 <- brm(RESPONSE.CODE ~ 1 + Prime.Type*Overlap + (1 + Prime.Type*Overlap | Blind_ID) + (1 + Prime.Type*Overlap | Target.verb), logit_prior,family="bernoulli", cores=4, seed=12345, iter = 8000, data=DOD_48mo2, file="48mo/output/m48mo2.rds")
m48mo2<- add_criterion(m48mo2, "loo")
Chains look good.
#plot(m48mo2, N = 4)
summary(m48mo2, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * Overlap + (1 + Prime.Type * Overlap | Blind_ID) + (1 + Prime.Type * Overlap | Target.verb)
## Data: DOD_48mo2 (Number of observations: 747)
## Draws: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 66)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.59 0.30 1.13
## sd(Prime.Type) 0.46 0.34 0.04
## sd(OverlapOverlap) 0.90 0.49 0.14
## sd(Prime.Type:OverlapOverlap) 0.80 0.58 0.07
## cor(Intercept,Prime.Type) -0.02 0.43 -0.72
## cor(Intercept,OverlapOverlap) 0.20 0.36 -0.43
## cor(Prime.Type,OverlapOverlap) -0.01 0.44 -0.73
## cor(Intercept,Prime.Type:OverlapOverlap) -0.17 0.42 -0.79
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.03 0.45 -0.75
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.01 0.44 -0.71
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.10 1.00 5992 9766
## sd(Prime.Type) 1.10 1.00 6938 8237
## sd(OverlapOverlap) 1.74 1.00 2851 5143
## sd(Prime.Type:OverlapOverlap) 1.88 1.00 5245 7374
## cor(Intercept,Prime.Type) 0.70 1.00 15411 10828
## cor(Intercept,OverlapOverlap) 0.76 1.00 8788 9018
## cor(Prime.Type,OverlapOverlap) 0.71 1.00 3734 7531
## cor(Intercept,Prime.Type:OverlapOverlap) 0.58 1.00 15053 10943
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.72 1.00 9327 12267
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.72 1.00 10912 12710
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.62 0.60 0.88
## sd(Prime.Type) 0.42 0.38 0.03
## sd(OverlapOverlap) 0.48 0.42 0.03
## sd(Prime.Type:OverlapOverlap) 0.93 0.70 0.09
## cor(Intercept,Prime.Type) -0.07 0.45 -0.77
## cor(Intercept,OverlapOverlap) -0.02 0.44 -0.74
## cor(Prime.Type,OverlapOverlap) 0.03 0.45 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.18 0.42 -0.80
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.03 0.45 -0.75
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.00 0.45 -0.73
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.75 1.00 6341 9746
## sd(Prime.Type) 1.15 1.00 8145 6239
## sd(OverlapOverlap) 1.28 1.00 8411 8311
## sd(Prime.Type:OverlapOverlap) 2.28 1.00 8511 8462
## cor(Intercept,Prime.Type) 0.68 1.00 18264 11771
## cor(Intercept,OverlapOverlap) 0.70 1.00 17527 11700
## cor(Prime.Type,OverlapOverlap) 0.76 1.00 13128 12894
## cor(Intercept,Prime.Type:OverlapOverlap) 0.58 1.00 15668 11641
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.72 1.00 12359 12204
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.73 1.00 11861 12887
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -1.77 0.75 -2.95 -0.53 1.00 4403
## Prime.Type 0.42 0.42 -0.24 1.10 1.00 11665
## OverlapOverlap -1.41 0.51 -2.28 -0.64 1.00 6989
## Prime.Type:OverlapOverlap 2.26 0.87 0.95 3.79 1.00 9194
## Tail_ESS
## Intercept 6238
## Prime.Type 9951
## OverlapOverlap 8067
## Prime.Type:OverlapOverlap 9595
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m48mo2, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 0.42 0.42 -0.24 1.1 5.83 0.85
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo2, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0 2.26 0.87 0.95 3.79 379.95
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo2, "Intercept + OverlapOverlap + .5*Prime.Type + .5*Prime.Type:OverlapOverlap >
Intercept + OverlapOverlap - .5*Prime.Type - .5*Prime.Type:OverlapOverlap ")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Intercept+Overla... > 0 2.69 0.84 1.43 4.17 1332.33
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
m48mo2b <- DOD_48mo2 %>%
filter(Trial_Problem == 0) %>%
brm(RESPONSE.CODE ~ 1 + Prime.Type*Overlap +
(1 + Prime.Type*Overlap | Blind_ID) +
(1 + Prime.Type*Overlap | Target.verb),
family="bernoulli",
control=list(adapt_delta=.9),
cores=4,
data=.,
file="48mo/output/m48mo2b.rds")
summary(m48mo2b, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * Overlap + (1 + Prime.Type * Overlap | Blind_ID) + (1 + Prime.Type * Overlap | Target.verb)
## Data: . (Number of observations: 730)
## Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 4000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 66)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.56 0.29 1.11
## sd(Prime.Type) 0.42 0.32 0.03
## sd(OverlapOverlap) 0.93 0.51 0.12
## sd(Prime.Type:OverlapOverlap) 0.79 0.58 0.07
## cor(Intercept,Prime.Type) 0.02 0.44 -0.69
## cor(Intercept,OverlapOverlap) 0.16 0.35 -0.46
## cor(Prime.Type,OverlapOverlap) 0.04 0.43 -0.69
## cor(Intercept,Prime.Type:OverlapOverlap) -0.18 0.43 -0.80
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.07 0.45 -0.76
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.01 0.44 -0.71
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 2.06 1.00 1925 2720
## sd(Prime.Type) 1.04 1.00 1712 1756
## sd(OverlapOverlap) 1.81 1.00 699 1224
## sd(Prime.Type:OverlapOverlap) 1.87 1.00 1605 1987
## cor(Intercept,Prime.Type) 0.73 1.00 5377 3360
## cor(Intercept,OverlapOverlap) 0.72 1.00 2776 2291
## cor(Prime.Type,OverlapOverlap) 0.72 1.00 1085 1781
## cor(Intercept,Prime.Type:OverlapOverlap) 0.59 1.00 4494 2726
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.67 1.00 3171 3127
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.72 1.00 3544 3353
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.70 0.71 0.88
## sd(Prime.Type) 0.43 0.41 0.03
## sd(OverlapOverlap) 0.55 0.47 0.04
## sd(Prime.Type:OverlapOverlap) 0.91 0.74 0.07
## cor(Intercept,Prime.Type) -0.06 0.45 -0.78
## cor(Intercept,OverlapOverlap) -0.06 0.44 -0.74
## cor(Prime.Type,OverlapOverlap) 0.03 0.45 -0.71
## cor(Intercept,Prime.Type:OverlapOverlap) -0.15 0.42 -0.79
## cor(Prime.Type,Prime.Type:OverlapOverlap) -0.05 0.45 -0.75
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.01 0.46 -0.74
## u-90% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 3.06 1.00 1659 2074
## sd(Prime.Type) 1.21 1.00 2484 2061
## sd(OverlapOverlap) 1.45 1.00 2314 2064
## sd(Prime.Type:OverlapOverlap) 2.29 1.00 2416 2659
## cor(Intercept,Prime.Type) 0.69 1.00 6535 2967
## cor(Intercept,OverlapOverlap) 0.68 1.00 5324 2986
## cor(Prime.Type,OverlapOverlap) 0.76 1.00 3174 3056
## cor(Intercept,Prime.Type:OverlapOverlap) 0.59 1.00 4451 2951
## cor(Prime.Type,Prime.Type:OverlapOverlap) 0.70 1.00 3866 3624
## cor(OverlapOverlap,Prime.Type:OverlapOverlap) 0.75 1.00 3244 2722
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -1.69 0.81 -2.90 -0.28 1.00 1375
## Prime.Type 0.39 0.43 -0.30 1.09 1.00 3587
## OverlapOverlap -1.46 0.53 -2.37 -0.66 1.00 2239
## Prime.Type:OverlapOverlap 2.39 0.89 1.07 3.91 1.00 2519
## Tail_ESS
## Intercept 1422
## Prime.Type 2655
## OverlapOverlap 2580
## Prime.Type:OverlapOverlap 2235
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
hypothesis(m48mo2b, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 0.39 0.43 -0.3 1.09 4.95 0.83
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo2b, "Prime.Type:OverlapOverlap > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:Overl... > 0 2.39 0.89 1.07 3.91 249
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
# No k's above .7
m48mo2.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo2)),
observedResponse = DOD_48mo2$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo2)), 1, mean),
integerResponse=TRUE
)
plot(m48mo2.model.check) # standard plots
testDispersion(m48mo2.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.76694, p-value = 0.002375
## alternative hypothesis: two.sided
tab_model(m48mo1, m48mo2, show.re.var=FALSE, show.r2=FALSE, show.icc=FALSE, transform=NULL, collapse.ci=TRUE, p.style="scientific", ci.hyphen = " : ", dv.labels=c("Model 1", "Model 2"), string.stat="",
pred.labels=c(
"Intercept",
"Prime (DOD)",
"Overlap",
"Prime * Overlap"
))
| Model 1 | Model 2 | |
|---|---|---|
| Predictors | Log-Odds | Log-Odds |
| Intercept |
-2.70 (-4.22 : -1.05) |
-1.78 (-3.22 : -0.21) |
| Prime (DOD) |
0.39 (-0.48 : 1.32) |
0.42 (-0.39 : 1.26) |
| Overlap |
-1.38 (-2.67 : -0.44) |
-1.38 (-2.51 : -0.49) |
| Prime * Overlap |
2.18 (0.63 : 4.19) |
2.21 (0.70 : 4.16) |
| N | 90 Blind_ID | 66 Blind_ID |
| 6 Target.verb | 6 Target.verb | |
| Observations | 1019 | 747 |
DOD_48mo3 <- filter(DOD_48mo, Overlap=="No Overlap")
m48mo3 <- brm(RESPONSE.CODE ~ 1 +Prime.Type*biasmatch + (1 + Prime.Type*biasmatch| Blind_ID) + (1 + Prime.Type*biasmatch| Target.verb), seed=12345, logit_prior, family="bernoulli", iter=8000, cores=4, file="48mo/output/m48mo3.rds", data=DOD_48mo3)
summary(m48mo3, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * biasmatch + (1 + Prime.Type * biasmatch | Blind_ID) + (1 + Prime.Type * biasmatch | Target.verb)
## Data: DOD_48mo3 (Number of observations: 491)
## Draws: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 90)
## Estimate Est.Error l-90% CI
## sd(Intercept) 2.48 0.56 1.64
## sd(Prime.Type) 0.61 0.48 0.05
## sd(biasmatchmismatch) 2.87 0.88 1.50
## sd(Prime.Type:biasmatchmismatch) 1.69 1.09 0.16
## cor(Intercept,Prime.Type) 0.10 0.45 -0.68
## cor(Intercept,biasmatchmismatch) -0.17 0.27 -0.59
## cor(Prime.Type,biasmatchmismatch) 0.04 0.43 -0.68
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.19 0.40 -0.53
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.09 0.46 -0.79
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.15 0.41 -0.77
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 3.45 1.00 5550
## sd(Prime.Type) 1.53 1.00 6301
## sd(biasmatchmismatch) 4.39 1.00 3108
## sd(Prime.Type:biasmatchmismatch) 3.66 1.00 3704
## cor(Intercept,Prime.Type) 0.80 1.00 12606
## cor(Intercept,biasmatchmismatch) 0.31 1.00 5872
## cor(Prime.Type,biasmatchmismatch) 0.72 1.00 1013
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.79 1.00 9753
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.68 1.00 6584
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.58 1.00 10104
## Tail_ESS
## sd(Intercept) 9197
## sd(Prime.Type) 7303
## sd(biasmatchmismatch) 3750
## sd(Prime.Type:biasmatchmismatch) 5682
## cor(Intercept,Prime.Type) 10402
## cor(Intercept,biasmatchmismatch) 7767
## cor(Prime.Type,biasmatchmismatch) 2568
## cor(Intercept,Prime.Type:biasmatchmismatch) 10229
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 10934
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 11182
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.76 0.68 0.88
## sd(Prime.Type) 0.66 0.57 0.05
## sd(biasmatchmismatch) 0.67 0.58 0.04
## sd(Prime.Type:biasmatchmismatch) 0.95 0.78 0.07
## cor(Intercept,Prime.Type) 0.09 0.44 -0.66
## cor(Intercept,biasmatchmismatch) 0.05 0.44 -0.69
## cor(Prime.Type,biasmatchmismatch) 0.01 0.45 -0.73
## cor(Intercept,Prime.Type:biasmatchmismatch) -0.06 0.44 -0.76
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.08 0.45 -0.78
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.02 0.46 -0.75
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 3.04 1.00 6848
## sd(Prime.Type) 1.76 1.00 8112
## sd(biasmatchmismatch) 1.80 1.00 8653
## sd(Prime.Type:biasmatchmismatch) 2.48 1.00 9634
## cor(Intercept,Prime.Type) 0.77 1.00 15048
## cor(Intercept,biasmatchmismatch) 0.76 1.00 16175
## cor(Prime.Type,biasmatchmismatch) 0.74 1.00 13237
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.69 1.00 17009
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.69 1.00 14069
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.73 1.00 12266
## Tail_ESS
## sd(Intercept) 9859
## sd(Prime.Type) 7351
## sd(biasmatchmismatch) 7044
## sd(Prime.Type:biasmatchmismatch) 8007
## cor(Intercept,Prime.Type) 10573
## cor(Intercept,biasmatchmismatch) 11511
## cor(Prime.Type,biasmatchmismatch) 11947
## cor(Intercept,Prime.Type:biasmatchmismatch) 11069
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 12397
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 12330
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -2.91 0.94 -4.48 -1.41 1.00 5019
## Prime.Type -0.02 0.75 -1.24 1.16 1.00 8079
## biasmatchmismatch -1.22 1.03 -2.98 0.30 1.00 5599
## Prime.Type:biasmatchmismatch 1.49 1.42 -0.70 3.94 1.00 7462
## Tail_ESS
## Intercept 7375
## Prime.Type 8422
## biasmatchmismatch 7245
## Prime.Type:biasmatchmismatch 8182
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
m48mo3 <- add_criterion(m48mo3, "loo")
Chains look okay.
#plot(m48mo3, N = 4)
hypothesis(m48mo3, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 -0.02 0.75 -1.24 1.16 1 0.5
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo3, "Prime.Type:biasmatchmismatch > 0") # Bias mismatch
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0 1.49 1.42 -0.7 3.94 6.55
## Post.Prob Star
## 1 0.87
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
m48mo3.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo3)),
observedResponse = DOD_48mo3$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo3)), 1, mean),
integerResponse=TRUE
)
plot(m48mo3.model.check) # standard plots
testDispersion(m48mo3.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.52833, p-value < 2.2e-16
## alternative hypothesis: two.sided
p3 <- fitted(m48mo3, re_formula=NA, scale="response") %>%
cbind(DOD_48mo3) %>%
dplyr::select(Prime, biasmatch, Estimate, Est.Error) %>%
ggplot(aes(y=Estimate, x=biasmatch, fill=Prime )) +
geom_bar(stat="identity",position="dodge") +
geom_errorbar(aes(ymin=Estimate, ymax=Estimate+Est.Error), width=.2,
position=position_dodge(.9)) +
theme_minimal() +
ylim(0, .6) +
labs(y="Predicted Probability") +
theme(legend.position = "none")
p1c | p3 + plot_layout(guides = "collect") & theme(legend.position = "right")
DOD_48mo4 <-
DOD_48mo2 %>%
filter(Overlap=="No Overlap")
m48mo4 <- brm(RESPONSE.CODE ~ 1 +Prime.Type*biasmatch + (1 + Prime.Type*biasmatch| Blind_ID) + (1 + Prime.Type*biasmatch| Target.verb), logit_prior, family="bernoulli", iter=8000, seed=12345, cores=4, control=list(adapt_delta = .95), file="48mo/output/m48mo4.rds", data=DOD_48mo4) # ncreased adapt delta because of divergent transitions
summary(m48mo4, prob=.9)
## Family: bernoulli
## Links: mu = logit
## Formula: RESPONSE.CODE ~ 1 + Prime.Type * biasmatch + (1 + Prime.Type * biasmatch | Blind_ID) + (1 + Prime.Type * biasmatch | Target.verb)
## Data: DOD_48mo4 (Number of observations: 358)
## Draws: 4 chains, each with iter = 8000; warmup = 4000; thin = 1;
## total post-warmup draws = 16000
##
## Group-Level Effects:
## ~Blind_ID (Number of levels: 66)
## Estimate Est.Error l-90% CI
## sd(Intercept) 2.02 0.53 1.22
## sd(Prime.Type) 0.61 0.48 0.05
## sd(biasmatchmismatch) 2.68 0.87 1.31
## sd(Prime.Type:biasmatchmismatch) 1.69 1.09 0.17
## cor(Intercept,Prime.Type) 0.09 0.45 -0.67
## cor(Intercept,biasmatchmismatch) -0.28 0.28 -0.68
## cor(Prime.Type,biasmatchmismatch) 0.05 0.44 -0.68
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.16 0.40 -0.54
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.10 0.45 -0.79
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.13 0.41 -0.76
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 2.94 1.00 6370
## sd(Prime.Type) 1.52 1.00 7968
## sd(biasmatchmismatch) 4.17 1.00 4410
## sd(Prime.Type:biasmatchmismatch) 3.70 1.00 4937
## cor(Intercept,Prime.Type) 0.79 1.00 21607
## cor(Intercept,biasmatchmismatch) 0.23 1.00 8425
## cor(Prime.Type,biasmatchmismatch) 0.75 1.00 1746
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.77 1.00 15691
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.69 1.00 9012
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.59 1.00 14883
## Tail_ESS
## sd(Intercept) 9668
## sd(Prime.Type) 9397
## sd(biasmatchmismatch) 4384
## sd(Prime.Type:biasmatchmismatch) 7886
## cor(Intercept,Prime.Type) 12268
## cor(Intercept,biasmatchmismatch) 9797
## cor(Prime.Type,biasmatchmismatch) 4099
## cor(Intercept,Prime.Type:biasmatchmismatch) 11635
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 11802
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 13178
##
## ~Target.verb (Number of levels: 6)
## Estimate Est.Error l-90% CI
## sd(Intercept) 1.73 0.67 0.88
## sd(Prime.Type) 0.61 0.52 0.04
## sd(biasmatchmismatch) 0.65 0.56 0.05
## sd(Prime.Type:biasmatchmismatch) 0.96 0.78 0.08
## cor(Intercept,Prime.Type) 0.07 0.44 -0.67
## cor(Intercept,biasmatchmismatch) 0.06 0.44 -0.69
## cor(Prime.Type,biasmatchmismatch) -0.00 0.45 -0.73
## cor(Intercept,Prime.Type:biasmatchmismatch) -0.06 0.44 -0.76
## cor(Prime.Type,Prime.Type:biasmatchmismatch) -0.08 0.45 -0.79
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) -0.02 0.45 -0.75
## u-90% CI Rhat Bulk_ESS
## sd(Intercept) 3.01 1.00 10351
## sd(Prime.Type) 1.61 1.00 11308
## sd(biasmatchmismatch) 1.75 1.00 12516
## sd(Prime.Type:biasmatchmismatch) 2.48 1.00 13163
## cor(Intercept,Prime.Type) 0.76 1.00 22924
## cor(Intercept,biasmatchmismatch) 0.76 1.00 24849
## cor(Prime.Type,biasmatchmismatch) 0.74 1.00 15852
## cor(Intercept,Prime.Type:biasmatchmismatch) 0.68 1.00 25712
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 0.68 1.00 17731
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 0.73 1.00 14823
## Tail_ESS
## sd(Intercept) 12131
## sd(Prime.Type) 9909
## sd(biasmatchmismatch) 10852
## sd(Prime.Type:biasmatchmismatch) 10636
## cor(Intercept,Prime.Type) 12036
## cor(Intercept,biasmatchmismatch) 11846
## cor(Prime.Type,biasmatchmismatch) 12693
## cor(Intercept,Prime.Type:biasmatchmismatch) 12070
## cor(Prime.Type,Prime.Type:biasmatchmismatch) 12613
## cor(biasmatchmismatch,Prime.Type:biasmatchmismatch) 13580
##
## Population-Level Effects:
## Estimate Est.Error l-90% CI u-90% CI Rhat Bulk_ESS
## Intercept -1.99 0.86 -3.41 -0.60 1.00 8348
## Prime.Type -0.02 0.71 -1.20 1.12 1.00 14449
## biasmatchmismatch -0.85 0.87 -2.36 0.46 1.00 11106
## Prime.Type:biasmatchmismatch 1.54 1.32 -0.50 3.78 1.00 13276
## Tail_ESS
## Intercept 9678
## Prime.Type 11084
## biasmatchmismatch 10684
## Prime.Type:biasmatchmismatch 11978
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
m48mo4 <- add_criterion(m48mo4, "loo")
m48mo4.model.check <- createDHARMa(
simulatedResponse = t(posterior_predict(m48mo4)),
observedResponse = DOD_48mo4$RESPONSE.CODE,
fittedPredictedResponse = apply(t(posterior_epred(m48mo4)), 1, mean),
integerResponse=TRUE
)
plot(m48mo4.model.check) # standard plots
testDispersion(m48mo4.model.check)
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.55439, p-value < 2.2e-16
## alternative hypothesis: two.sided
hypothesis(m48mo4, "Prime.Type > 0")
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio Post.Prob
## 1 (Prime.Type) > 0 -0.02 0.71 -1.2 1.12 0.98 0.49
## Star
## 1
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(m48mo4, "Prime.Type:biasmatchmismatch > 0") # Bias mismatch
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (Prime.Type:biasm... > 0 1.54 1.32 -0.5 3.78 8.05
## Post.Prob Star
## 1 0.89
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
tab_model(m48mo1, m48mo2, m48mo3, m48mo4, show.ci=.90, show.re.var=FALSE, show.r2=FALSE, show.icc=FALSE, transform=NULL, collapse.ci=TRUE, p.style="scientific", ci.hyphen = " : ", dv.labels=c("Model 1", "Model 2", "Model 3", "Model 4"), bpe="mean", string.stat="", file="Table48",
pred.labels=c(
"Intercept",
"Prime (DOD)",
"Overlap",
"Prime * Overlap",
"Bias Mismatch (POD)",
"Prime * Bias Mismatch "
))
| Model 1 | Model 2 | Model 3 | Model 4 | |
|---|---|---|---|---|
| Predictors | Log-Odds | Log-Odds | Log-Odds | Log-Odds |
| Intercept |
-2.68 (-3.93 : -1.36) |
-1.77 (-2.95 : -0.53) |
-2.91 (-4.48 : -1.41) |
-1.99 (-3.41 : -0.60) |
| Prime (DOD) |
0.40 (-0.31 : 1.13) |
0.42 (-0.24 : 1.10) |
-0.02 (-1.24 : 1.16) |
-0.02 (-1.20 : 1.12) |
| Overlap |
-1.43 (-2.40 : -0.62) |
-1.41 (-2.28 : -0.64) |
||
| Prime * Overlap |
2.23 (0.86 : 3.81) |
2.26 (0.95 : 3.79) |
||
| Bias Mismatch (POD) |
-1.22 (-2.98 : 0.30) |
-0.85 (-2.36 : 0.46) |
||
| Prime * Bias Mismatch |
1.49 (-0.70 : 3.94) |
1.54 (-0.50 : 3.78) |
||
| N | 90 Blind_ID | 66 Blind_ID | 90 Blind_ID | 66 Blind_ID |
| 6 Target.verb | 6 Target.verb | 6 Target.verb | 6 Target.verb | |
| Observations | 1019 | 747 | 491 | 358 |
sessionInfo()
## R version 4.1.0 (2021-05-18)
## Platform: x86_64-w64-mingw32/x64 (64-bit)
## Running under: Windows 10 x64 (build 19045)
##
## Matrix products: default
##
## locale:
## [1] LC_COLLATE=Dutch_Netherlands.1252 LC_CTYPE=Dutch_Netherlands.1252
## [3] LC_MONETARY=Dutch_Netherlands.1252 LC_NUMERIC=C
## [5] LC_TIME=Dutch_Netherlands.1252
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] zoo_1.8-9 lubridate_1.8.0 sjPlot_2.8.10 patchwork_1.1.1
## [5] ggpubr_0.4.0 GGally_2.1.2 dplyr_1.0.7 tidyr_1.2.0
## [9] ggplot2_3.3.5 tidybayes_3.0.2 rmarkdown_2.13 DHARMa_0.4.6
## [13] writexl_1.4.0 readxl_1.3.1 brms_2.17.0 Rcpp_1.0.8.3
##
## loaded via a namespace (and not attached):
## [1] backports_1.4.1 plyr_1.8.6 igraph_1.2.11
## [4] splines_4.1.0 svUnit_1.0.6 listenv_0.8.0
## [7] crosstalk_1.2.0 gap.datasets_0.0.5 TH.data_1.1-0
## [10] rstantools_2.1.1 inline_0.3.19 digest_0.6.29
## [13] foreach_1.5.2 htmltools_0.5.2 fansi_1.0.2
## [16] magrittr_2.0.1 checkmate_2.0.0 doParallel_1.0.17
## [19] globals_0.15.0 modelr_0.1.8 RcppParallel_5.1.5
## [22] matrixStats_0.61.0 xts_0.12.1 sandwich_3.0-1
## [25] prettyunits_1.1.1 colorspace_2.0-3 ggdist_3.1.1
## [28] xfun_0.30 callr_3.7.0 crayon_1.5.0
## [31] jsonlite_1.8.0 lme4_1.1-28 iterators_1.0.14
## [34] survival_3.3-1 glue_1.6.2 gtable_0.3.1
## [37] emmeans_1.7.2 sjstats_0.18.1 sjmisc_2.8.9
## [40] distributional_0.3.0 car_3.0-12 pkgbuild_1.3.1
## [43] rstan_2.21.3 abind_1.4-5 scales_1.2.1
## [46] mvtnorm_1.1-3 DBI_1.1.2 ggeffects_1.1.1
## [49] rstatix_0.7.0 miniUI_0.1.1.1 performance_0.8.0
## [52] xtable_1.8-4 diffobj_0.3.5 stats4_4.1.0
## [55] StanHeaders_2.21.0-7 DT_0.21 datawizard_0.3.0
## [58] htmlwidgets_1.5.4 threejs_0.3.3 arrayhelpers_1.1-0
## [61] RColorBrewer_1.1-3 posterior_1.2.1 ellipsis_0.3.2
## [64] pkgconfig_2.0.3 reshape_0.8.8 loo_2.5.0
## [67] farver_2.1.0 qgam_1.3.4 sass_0.4.0
## [70] utf8_1.2.2 labeling_0.4.2 effectsize_0.6.0.1
## [73] tidyselect_1.1.2 rlang_1.0.6 reshape2_1.4.4
## [76] later_1.3.0 munsell_0.5.0 cellranger_1.1.0
## [79] tools_4.1.0 cli_3.3.0 generics_0.1.3
## [82] sjlabelled_1.1.8 broom_0.7.12 ggridges_0.5.3
## [85] evaluate_0.15 stringr_1.4.0 fastmap_1.1.0
## [88] yaml_2.3.5 processx_3.5.2 knitr_1.37
## [91] purrr_0.3.4 future_1.26.1 nlme_3.1-155
## [94] mime_0.12 projpred_2.0.2 gap_1.3-1
## [97] compiler_4.1.0 bayesplot_1.9.0 shinythemes_1.2.0
## [100] rstudioapi_0.13 gamm4_0.2-6 ggsignif_0.6.3
## [103] tibble_3.1.6 bslib_0.3.1 stringi_1.7.6
## [106] highr_0.9 parameters_0.17.0 ps_1.6.0
## [109] Brobdingnag_1.2-8 lattice_0.20-45 Matrix_1.4-0
## [112] nloptr_2.0.0 markdown_1.1 shinyjs_2.1.0
## [115] tensorA_0.36.2 vctrs_0.3.8 pillar_1.8.1
## [118] lifecycle_1.0.3 jquerylib_0.1.4 bridgesampling_1.1-2
## [121] estimability_1.3 insight_0.16.0 httpuv_1.6.5
## [124] R6_2.5.1 promises_1.2.0.1 gridExtra_2.3
## [127] parallelly_1.30.0 codetools_0.2-18 boot_1.3-28
## [130] colourpicker_1.1.1 MASS_7.3-55 gtools_3.9.2
## [133] assertthat_0.2.1 withr_2.5.0 shinystan_2.6.0
## [136] multcomp_1.4-18 bayestestR_0.11.5 mgcv_1.8-39
## [139] parallel_4.1.0 grid_4.1.0 coda_0.19-4
## [142] minqa_1.2.4 carData_3.0-5 shiny_1.7.1
## [145] base64enc_0.1-3 dygraphs_1.1.1.6