Setup

Contrast: Curiosity[high]-Curiosity[low]

library(tidyverse)    # ggplot, dplyr, %>%, and friends
library(brms)         # Bayesian modeling through Stan
library(tidybayes)    # Manipulate Stan objects in a tidy way
## Warning: package 'tidybayes' was built under R version 4.2.3
## 
## Attaching package: 'tidybayes'
## The following objects are masked from 'package:brms':
## 
##     dstudent_t, pstudent_t, qstudent_t, rstudent_t
library(broom)        # Convert model objects to data frames
## Warning: package 'broom' was built under R version 4.2.3
library(broom.mixed)  # Convert brms model objects to data frames
## Warning: package 'broom.mixed' was built under R version 4.2.3
library(emmeans)      # Calculate marginal effects in even fancier ways
## Warning: package 'emmeans' was built under R version 4.2.3
library(patchwork)    # Combine ggplot objects
## Warning: package 'patchwork' was built under R version 4.2.3
library(ggokabeito)   # Neat accessible color palette
## Warning: package 'ggokabeito' was built under R version 4.2.3
library(gghalves)     # Special half geoms
## Warning: package 'gghalves' was built under R version 4.2.3
library(ggthemes)
## Warning: package 'ggthemes' was built under R version 4.2.3
options(mc.cores = parallel::detectCores())
library(glue)

# m1 <- readRDS("models_trial/univariate_contrast-CuriosityGroup_condition-PresentationFace_desc-triallevelwSlope.rds")
m1 <- readRDS("models_trial/univariate_contrast-CuriosityGroup_condition-PresentationFace_desc-triallevel_nestedintercept.rds")

summary(m1)
## Warning: There were 14 divergent transitions after warmup. Increasing
## adapt_delta above 0.99 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
##  Family: gaussian 
##   Links: mu = identity; sigma = identity 
## Formula: value ~ 0 + CuriosityGroup + (1 | subject) + (1 | CuriosityGroup/roi) 
##    Data: glmtrial_df %>% filter(trial_type == target) (Number of observations: 21852) 
##   Draws: 4 chains, each with iter = 2000; warmup = 500; thin = 1;
##          total post-warmup draws = 6000
## 
## Group-Level Effects: 
## ~CuriosityGroup (Number of levels: 2) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     2.78      3.09     0.11    10.37 1.00     1171     1097
## 
## ~CuriosityGroup:roi (Number of levels: 24) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.05      0.01     0.02     0.07 1.00     1693     2093
## 
## ~subject (Number of levels: 19) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.05      0.01     0.03     0.08 1.00     2125     2891
## 
## Population-Level Effects: 
##                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## CuriosityGrouphigh     0.01      3.73    -8.37     7.47 1.00     1516      824
## CuriosityGrouplow      0.10      4.28    -7.62     8.61 1.00     1297      874
## 
## Family Specific Parameters: 
##       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma     1.18      0.01     1.17     1.19 1.00    12166     4101
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
all_regions_ame <- m1 %>% 
  emmeans(~ CuriosityGroup + roi,
          at = list(roi = unique(m1$data$roi), subject = unique(m1$data$subject)),
          epred = TRUE, re_formula = NULL) %>% 
  contrast(method = "pairwise", by = "roi") %>% 
  gather_emmeans_draws()
## Loading required namespace: rstanarm
# calculate the proportion of the posterior distribution that is above 0 in all_regions_ame
# Load the ggplot2 package
# Load the ggplot2 package
library(ggplot2)

# Create the plot
all_regions_ame %>%
  group_by(contrast, roi) %>%
  summarise(prop_positive = mean(.value > 0)) %>%
  mutate(roi = reorder(roi, prop_positive)) %>%
  ggplot(aes(x = roi, y = prop_positive, fill = contrast)) +
  geom_col() +
  coord_flip() +
  geom_text(aes(label = round(prop_positive, 2)), hjust = -0.1) +
  labs(x = "ROI", y = "Proportion Positive", fill = "Contrast") +
  theme_minimal()
## `summarise()` has grouped output by 'contrast'. You can override using the
## `.groups` argument.

Contrast: Face accuracy X Curiosity

trialtype = "PresentationFace"
outcome = "face"

# univariate_contrast-boldXCuriosityGroup_condition-PresentationFace_desc-face_triallevel_slope.rds
m1 <- readRDS(glue("models_trial/univariate_contrast-boldXCuriosityGroup_condition-{trialtype}_desc-{outcome}_triallevel_slope.rds"))
m2 <- readRDS(glue("models_trial/univariate_contrast-boldXCuriosityGroup_condition-{trialtype}_desc-{outcome}_triallevel_nestedintercept.rds"))

summary(m1)
##  Family: bernoulli 
##   Links: mu = logit 
## Formula: face_accuracy ~ CuriosityGroup * value + (1 | subject) + (1 + CuriosityGroup | roi) 
##    Data: glmtrial_df %>% filter(trial_type == target) (Number of observations: 21156) 
##   Draws: 4 chains, each with iter = 2000; warmup = 500; thin = 1;
##          total post-warmup draws = 6000
## 
## Group-Level Effects: 
## ~roi (Number of levels: 12) 
##                                  Estimate Est.Error l-95% CI u-95% CI Rhat
## sd(Intercept)                        0.02      0.01     0.00     0.05 1.00
## sd(CuriosityGrouplow)                0.02      0.02     0.00     0.07 1.00
## cor(Intercept,CuriosityGrouplow)    -0.16      0.58    -0.97     0.93 1.00
##                                  Bulk_ESS Tail_ESS
## sd(Intercept)                        4647     2754
## sd(CuriosityGrouplow)                4441     2786
## cor(Intercept,CuriosityGrouplow)     6554     4248
## 
## ~subject (Number of levels: 19) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.45      0.08     0.32     0.65 1.00     1113     2200
## 
## Population-Level Effects: 
##                         Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                  -0.19      0.11    -0.40     0.03 1.01      952
## CuriosityGrouplow          -0.16      0.03    -0.22    -0.10 1.00     7714
## value                       0.03      0.02    -0.01     0.06 1.00     5884
## CuriosityGrouplow:value    -0.03      0.02    -0.07     0.02 1.00     5679
##                         Tail_ESS
## Intercept                   1425
## CuriosityGrouplow           4410
## value                       4644
## CuriosityGrouplow:value     4338
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(m2)
## Warning: There were 3 divergent transitions after warmup. Increasing
## adapt_delta above 0.99 may help. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
##  Family: bernoulli 
##   Links: mu = logit 
## Formula: face_accuracy ~ CuriosityGroup * value + (1 | subject) + (1 | CuriosityGroup/roi) 
##    Data: glmtrial_df %>% filter(trial_type == target) (Number of observations: 21156) 
##   Draws: 4 chains, each with iter = 2000; warmup = 500; thin = 1;
##          total post-warmup draws = 6000
## 
## Group-Level Effects: 
## ~CuriosityGroup (Number of levels: 2) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     2.18      2.15     0.07     7.67 1.00     2160     1880
## 
## ~CuriosityGroup:roi (Number of levels: 24) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.01      0.01     0.00     0.04 1.00     3732     2305
## 
## ~subject (Number of levels: 19) 
##               Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept)     0.45      0.09     0.32     0.66 1.00     1370     2406
## 
## Population-Level Effects: 
##                         Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                  -0.17      2.54    -5.48     5.42 1.00     1903
## CuriosityGrouplow          -0.11      4.52    -9.59     9.67 1.00     1739
## value                       0.03      0.02    -0.01     0.06 1.00     4697
## CuriosityGrouplow:value    -0.03      0.02    -0.07     0.02 1.00     4862
##                         Tail_ESS
## Intercept                   1787
## CuriosityGrouplow           1463
## value                       4408
## CuriosityGrouplow:value     4207
## 
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
newdata = expand_grid(CuriosityGroup = c("low", "high"), value = seq(-1, 1, by = 0.1), roi = unique(m1$data$roi), subject = unique(m1$data$subject))
# # compare which model is better
# loo(m1, m2)
all_regions_ame <- m1 %>% 
  emmeans(~ CuriosityGroup*value + roi,
          at = list(roi = unique(m1$data$roi), subject = unique(m1$data$subject), value=0),
          epred = TRUE, re_formula = NULL) %>% 
  contrast(method = "pairwise", by = "roi") %>% 
  gather_emmeans_draws()


# Create the plot
all_regions_ame %>%
  group_by(contrast, roi) %>%
  summarise(prob = mean(plogis(.value))) %>%
  mutate(roi = reorder(roi, prob)) %>%
  ggplot(aes(x = roi, y = prob, fill = contrast)) +
  geom_col() +
  coord_flip() +
  geom_text(aes(label = round(prob, 2)), hjust = -0.1) +
  labs(x = "ROI", y = "Proportion Positive", fill = "Contrast") +
  theme_minimal()
## `summarise()` has grouped output by 'contrast'. You can override using the
## `.groups` argument.

all_regions_ame %>%
  group_by(contrast, roi) %>%
  summarise(prob = mean(plogis(.value))) %>%
  mutate(roi = reorder(roi, prob))
## `summarise()` has grouped output by 'contrast'. You can override using the
## `.groups` argument.
## # A tibble: 12 × 3
## # Groups:   contrast [1]
##    contrast                 roi              prob
##    <fct>                    <fct>           <dbl>
##  1 high value0 - low value0 Subiculum_left  0.509
##  2 high value0 - low value0 CA1_left        0.509
##  3 high value0 - low value0 CA23_left       0.509
##  4 high value0 - low value0 CA4_DG_left     0.509
##  5 high value0 - low value0 Subiculum_right 0.509
##  6 high value0 - low value0 CA1_right       0.509
##  7 high value0 - low value0 CA23_right      0.509
##  8 high value0 - low value0 CA4_DG_right    0.509
##  9 high value0 - low value0 SN_left         0.509
## 10 high value0 - low value0 SN_right        0.509
## 11 high value0 - low value0 VTA_left        0.509
## 12 high value0 - low value0 VTA_right       0.509
library(sjPlot)
## Warning: package 'sjPlot' was built under R version 4.2.3
plot_model(m1, type = "pred", terms = c("value[-2,2]","CuriosityGroup", "roi"), title = "Face accuracy X Curiosity")
## Note: uncertainty of error terms are not taken into account. You may
##   want to use `rstantools::posterior_predict()`.

# covert singular logit to probability
all_regions_ame <- m2 %>% 
  emmeans(~ CuriosityGroup*value + roi,
          at = list(roi = unique(m1$data$roi), subject = unique(m1$data$subject), value=0),
          epred = TRUE, re_formula = NULL) %>% 
  contrast(method = "pairwise", by = "roi") %>% 
  gather_emmeans_draws()

# Create the plot
all_regions_ame %>%
  group_by(contrast, roi) %>%
  summarise(prob = mean(plogis(.value))) %>%
  mutate(roi = reorder(roi, prob)) %>%
  ggplot(aes(x = roi, y = prob, fill = contrast)) +
  geom_col() +
  coord_flip() +
  geom_text(aes(label = round(prob, 2)), hjust = -0.1) +
  labs(x = "ROI", y = "Proportion Positive", fill = "Contrast") +
  theme_minimal()
## `summarise()` has grouped output by 'contrast'. You can override using the
## `.groups` argument.

all_regions_ame %>%
  group_by(contrast, roi) %>%
  summarise(prob = mean(plogis(.value))) %>%
  mutate(roi = reorder(roi, prob))
## `summarise()` has grouped output by 'contrast'. You can override using the
## `.groups` argument.
## # A tibble: 12 × 3
## # Groups:   contrast [1]
##    contrast                 roi              prob
##    <fct>                    <fct>           <dbl>
##  1 high value0 - low value0 Subiculum_left  0.509
##  2 high value0 - low value0 CA1_left        0.509
##  3 high value0 - low value0 CA23_left       0.509
##  4 high value0 - low value0 CA4_DG_left     0.509
##  5 high value0 - low value0 Subiculum_right 0.509
##  6 high value0 - low value0 CA1_right       0.509
##  7 high value0 - low value0 CA23_right      0.509
##  8 high value0 - low value0 CA4_DG_right    0.509
##  9 high value0 - low value0 SN_left         0.509
## 10 high value0 - low value0 SN_right        0.509
## 11 high value0 - low value0 VTA_left        0.509
## 12 high value0 - low value0 VTA_right       0.509
plot_model(m2, type = "pred", terms = c("value[-2,2]","CuriosityGroup", "roi"), title = "Face accuracy X Curiosity")
## Note: uncertainty of error terms are not taken into account. You may
##   want to use `rstantools::posterior_predict()`.

# plot_model(m2, type = "pred", terms = c("value[-2,2]","CuriosityGroup"), title = "Face accuracy X Curiosity")