Analysis

Demographics Section

Demographic table

Experiment_2_demographics$Gender <- as.factor(Experiment_2_demographics$Gender)
demo_table_j <- d2
table1(~ factor(Gender) + Age + Education + Ethnicity + Ethnic_Origin, data = demo_table_j)
Overall
(N=289)
factor(Gender)
Female 124 (42.9%)
Male 155 (53.6%)
Gender Non-Binary 8 (2.8%)
Prefer not to respond 2 (0.7%)
Age
Mean (SD) 29.3 (9.84)
Median [Min, Max] 26.0 [18.0, 78.0]
Education
A-Levels or Equivalent 65 (22.5%)
Doctoral Degree 4 (1.4%)
GCSEs or Equivalent 18 (6.2%)
Prefer not to respond 5 (1.7%)
Primary School 5 (1.7%)
University Post-Graduate Program 62 (21.5%)
University Undergraduate Program 130 (45.0%)
Ethnicity
African 51 (17.6%)
Asian or Asian Scottish or Asian British 5 (1.7%)
Mixed or Multi-ethnic 7 (2.4%)
Other ethnicity 3 (1.0%)
Prefer not to respond 1 (0.3%)
White 222 (76.8%)
Ethnic_Origin
African 50 (17.3%)
Asian 7 (2.4%)
English 16 (5.5%)
European 200 (69.2%)
Latin American 6 (2.1%)
Other 10 (3.5%)

Gender

Experiment_2_demographics$Gender <- as.factor(Experiment_2_demographics$Gender)
ggplot(d2, aes(x = Gender, fill = Gender)) +
  geom_histogram(stat = "count") +
  labs(x = "Gender2") +
  scale_x_discrete(labels = c("Female", "Male", "Gender \nNon-Binary", "Prefer not \nto respond"), guide = "prism_offset") +
  scale_y_continuous(breaks = seq(0, 160, 10), guide = "prism_offset") +
  theme(legend.position = "none")
## Warning: Ignoring unknown parameters: binwidth, bins, pad

Age

Experiment_2_demographics_Gender$Gender <- as.factor(Experiment_2_demographics_Gender$Gender)
d2 <- Experiment_2_demographics_Gender %>%
  mutate_at(vars(locfunc(Experiment_2_demographics_Gender, "Gender")), ~ as.factor(recode(., "1" = "Female", "2" = "Male")))
age_plot <- ggplot(d2, aes(x = Age, fill = Gender)) +
  geom_bar(data = subset(d2, Gender == "Female")) +
  geom_bar(data = subset(d2, Gender == "Male"), aes(y = ..count.. * (-1))) +
  scale_y_continuous(breaks = seq(-30, 30, 1), labels = abs(seq(-30, 30, 1))) +
  scale_x_continuous(breaks = seq(20, 80, 5)) +
  ylab("Number of Participants") +
  xlab("Age of Participants (In years)") +
  geom_hline(yintercept = 0) +
  coord_flip()
ggplotly(age_plot)
## Warning: `gather_()` was deprecated in tidyr 1.2.0.
## Please use `gather()` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was generated.

Ethnicity

Experiment_2_demographics$Ethnicity <- as.factor(Experiment_2_demographics$Ethnicity)
ggplot(Experiment_2_demographics, aes(x = Ethnicity, fill = Ethnicity)) +
  geom_histogram(stat = "count") +
  scale_x_discrete(labels = c("White ", "Mixed \nor \nMulti-ethnic ", "Asian \nor \nAsian Scottish \nor \nAsian British", "African", "Caribbean \nor \nBlack", "Arab ", "Other ethnicity", "Prefer not \nto respond"), guide = "prism_offset") +
  scale_y_continuous(breaks = seq(0, 250, 20), guide = "prism_offset") +
  theme(legend.position = "none")
## Warning: Ignoring unknown parameters: binwidth, bins, pad

Ethnic Origin

Experiment_2_demographics$Ethnic_Origin <- as.factor(Experiment_2_demographics$Ethnic_Origin)
ggplot(Experiment_2_demographics, aes(x = Ethnic_Origin, fill = Ethnic_Origin)) +
  geom_histogram(stat = "count") +
  scale_x_discrete(labels = c("Scottish", "English", "European", "Latin \nAmerican", "Asian", "Arab", "African", "Other", "Prefer not \nto respond"), guide = "prism_offset") +
  scale_y_continuous(breaks = seq(0, 250, 20), guide = "prism_offset") +
  theme(legend.position = "none")
## Warning: Ignoring unknown parameters: binwidth, bins, pad

Education

Experiment_2_demographics$Education <- as.factor(Experiment_2_demographics$Education)
ggplot(Experiment_2_demographics, aes(x = Education, fill = Education)) +
  geom_histogram(stat = "count") +
  scale_x_discrete(labels = c("Primary School ", "GCSEs \nor \nEquivalent", "A-Levels \nor \nEquivalent", "University \nUndergraduate \nProgram", "University \nPost-Graduate \nProgram", "Doctoral \nDegree", "Prefer not \nto respond"), guide = "prism_offset") +
  scale_y_continuous(breaks = seq(0, 250, 20), guide = "prism_offset") +
  theme(legend.position = "none")
## Warning: Ignoring unknown parameters: binwidth, bins, pad

Analysis

General Correlation

correlation_df <- Experiment_2_demographics_Gender[, columns_names$x]

correlation_df$dominance_Sum_z <- Experiment_2_demographics$dominance_Sum_z
correlation_df$prestige_Sum_z <- Experiment_2_demographics$prestige_Sum_z
correlation_df$leadership_Sum_z <- Experiment_2_demographics$leadership_Sum_z
correlation_df$PNI_Sum <- as.numeric(scale(Experiment_2_demographics_Gender$PNI_Sum))

correlation_df <- correlation_df %>% rename(
  "Ethical Preference" = "ethicalPreference_z",
  "Financial Preference" = "financialPreference_z",
  "Health and Safety Preference" = "healthAndSafetyPreference_z",
  "Social Preference" = "socialPreference_z",
  "Recreational Preference" = "recreationalPreference_z",
  "Dominance" = "dominance_Sum",
  "Prestige" = "prestige_Sum",
  "Leadership" = "leadership_Sum",
  "UMS Affiliation" = "UMSAffiliationSum_z",
  "UMS Intimacy" = "UMSIntimacySum_z",
  "UMS Sum" = "UMSSum_z",
  "B-PNI" = "PNI_Sum",
  "Risk Perception" = "riskPerceptionSum_z",
  "Risk Benefit" = "riskBenefitSum_z",
  "Risk Sum" = "riskSum_z",
  "General Expected Benefits" = "generalExpectedBenefits_z",
  "General Risk Preference" = "generalRiskPreference_z"
)
corr_1 <- correlation(correlation_df, bayesian = TRUE, method = "auto")
saveRDS(corr_1, "corr_1.rds")
corr_1 <- readRDS("corr_1.rds")
print(summary(corr_1))
ggcorrplot(corr_1, type = "lower", lab = TRUE, insig = "blank", show.diag = TRUE, sig.level = 0.05) + scale_x_discrete(labels = 1:17) + theme(axis.text.x = element_text(angle = 0, hjust = .5))

B-PNI distribution

## B-PNI distribution

B_PNI_1 <- ggplot(Experiment_2_demographics, aes(x = Age, y = PNI_Sum)) +
  geom_point(size = 0.7, alpha = 0.8, position = "jitter") +
  geom_smooth(method = "lm", se = FALSE, size = 2, alpha = 0.8)
ggplotly(B_PNI_1)
## brms SEM attempt
riska <- lm(riskBenefitSum ~ riskSum, data = Experiment_2_demographics)
riskb <- lm(riskBenefitSum ~ riskPerceptionSum, data = Experiment_2_demographics)
Experiment_2_demographics$generalRiskPreference <- (Experiment_2_J_Analysis[, "riskBenefitSum"] * riska$coefficients[2]) + (Experiment_2_J_Analysis[, "riskPerceptionSum"] * riskb$coefficients[2])

Experiment_2_demographics_Gender$generalExpectedBenefits <- (Experiment_2_J_Analysis[, "riskBenefitSum"] * riska$coefficients[2])
Experiment_2_demographics_Gender$generalPerceievedRisk <- (Experiment_2_J_Analysis[, "riskPerceptionSum"] * riskb$coefficients[2])
Experiment_2_demographics_Gender$riskBenefitSum_z <- scale(Experiment_2_demographics_Gender$riskBenefitSum)
Experiment_2_demographics_Gender$generalRiskPreference_z <- scale(Experiment_2_demographics$generalRiskPreference)
Experiment_2_demographics_Gender$generalExpectedBenefits_z <- scale(Experiment_2_demographics$generalExpectedBenefits)
Experiment_2_demographics_Gender$generalPerceievedRisk_z <- scale(Experiment_2_demographics$generalPerceievedRisk)

Experiment_2_demographics$UMSSum_z <- scale(Experiment_2_demographics$UMSSum)
Experiment_2_demographics$UMSAffiliationSum_z <- scale(Experiment_2_demographics$UMSAffiliationSum)
Experiment_2_demographics$UMSIntimacySum_z <- scale(Experiment_2_demographics$UMSIntimacySum)


Experiment_2_demographics_Gender <- Experiment_2_demographics[!(Experiment_2_demographics$Gender == "3" | Experiment_2_demographics$Gender == "6"), ]

m1 Interaction model

m1_interaction <- brm(generalRiskPreference_z ~ dominance_Sum * Gender + prestige_Sum * Gender + leadership_Sum * Gender + PNI_Sum * Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, prior = prior_m1_interaction_gen, save_pars = save_pars(all = T), cores = parallel::detectCores(), backend = "cmdstanr")
saveRDS(m1_interaction, "m1_interaction.rds")
m1_interaction_no_pni <- brm(generalRiskPreference_z ~ dominance_Sum * Gender + prestige_Sum * Gender + leadership_Sum * Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, prior = prior_m1_interaction_gen_no_pni, save_pars = save_pars(all = T), cores = parallel::detectCores(), backend = "cmdstanr")

# Model without PNI is favored over model with PNI
m1 <- brm(generalRiskPreference_z ~ dominance_Sum + prestige_Sum + leadership_Sum + PNI_Sum + Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, , cores = parallel::detectCores(), backend = "cmdstanr", prior = prior_m1, save_pars = save_pars(all = T))
saveRDS(m1, "m1.rds")
m1_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(m1))


kable(m1_fixef[
  sign_match(m1_fixef[, 4]) == sign_match(m1_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Dominance 1.02 0.95 0.55 1.5
Prestige 1.36 0.95 0.91 1.82
Leadership -3.76 0.95 -3.84 -3.68

Model comparison

m1_comparison <- loo(m1, m1_interaction)
m1_comparison
m1_compari <- brms::WAIC(m1, m1_interaction)
m1_compari

m1_comparison_bfs <- bayesfactor_models(m1, m1_interaction, denominator = 2)

# m1_interaction over m1

m2 Additive Model with DoPL and DOSPERT + PNI

m2 <- brm(mvbind(riskSum_z, riskBenefitSum_z, riskPerceptionSum_z) ~ dominance_Sum + prestige_Sum + leadership_Sum + PNI_Sum + Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, prior = prior_m2, save_pars = save_pars(all = T), cores = parallel::detectCores(), backend = "cmdstanr")
saveRDS(m2, "m2.rds")
m2_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(m2))


kable(m2_fixef[
  sign_match(m2_fixef[, 4]) == sign_match(m2_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Risk Benefit * Intercept 0.51 0.95 0.12 0.89
Risk Perception * Intercept 1.16 0.95 0.93 1.39
Risk * Dominance 0.27 0.95 0.14 0.39
Risk * Gender 0.27 0.95 0.05 0.48
Risk Benefit * Dominance 0.22 0.95 0.1 0.35
Risk Benefit * Age -0.02 0.95 -0.03 -0.01
Risk Perception * Dominance -0.27 0.95 -0.42 -0.13
Risk Perception * Leadership 0.15 0.95 0.02 0.29
Risk Perception * Gender -0.35 0.95 -0.56 -0.14
Risk Perception * Age -0.03 0.95 -0.04 -0.03

Additive Model with DoPL and DOSPERT + PNI Gender Interaction

m2_interaction_gender <- brm(mvbind(riskSum_z, riskBenefitSum_z, riskPerceptionSum_z) ~ dominance_Sum * Gender + prestige_Sum * Gender + leadership_Sum * Gender + PNI_Sum * Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, prior = prior_m2_interaction_gender, save_pars = save_pars(all = T), cores = parallel::detectCores(), backend = "cmdstanr")
saveRDS(m2_interaction_gender, "m2_interaction_gender.rds")
m2_interaction_gender_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(m2_interaction_gender))


kable(m2_interaction_gender_fixef[
  sign_match(m2_interaction_gender_fixef[, 4]) == sign_match(m2_interaction_gender_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Risk Benefit * Intercept 0.47 0.95 0.08 0.86
Risk Perception * Intercept 1.19 0.95 0.96 1.42
Risk * Dominance 0.25 0.95 0.04 0.45
Risk * Gender 0.27 0.95 0.05 0.49
Risk Benefit * Age -0.02 0.95 -0.03 -0.01
Risk Perception * Gender -0.36 0.95 -0.57 -0.15
Risk Perception * Age -0.03 0.95 -0.04 -0.03

Model Comparison

m2_comparison <- loo(m2, m2_interaction_gender)
m2_comparison
m2_compari <- brms::waic(m2, m2_interaction_gender)
bayes_factor(m2_interaction_gender, m2)

# m2 over m2_interaction_gender

DOSPERT and DoPL and PNI

m3 <- brm(mvbind(ethicalPreference_z, financialPreference_z, healthAndSafetyPreference_z, recreationalPreference_z, socialPreference_z) ~ dominance_Sum + prestige_Sum + leadership_Sum + PNI_Sum + Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, prior = prior_m3, save_pars = save_pars(all = T), cores = parallel::detectCores(), backend = "cmdstanr")
saveRDS(m3, "m3.rds")
m3_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(m3))


kable(m3_fixef[
  sign_match(m3_fixef[, 4]) == sign_match(m3_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Ethical Preferencez * Intercept 0.42 0.95 0.06 0.79
Health and Safety Preferencez * Intercept 0.48 0.95 0.1 0.86
Recreational Preferencez * Intercept 0.71 0.95 0.33 1.09
Social Preferencez * Intercept 0.67 0.95 0.31 1.03
Ethical Preferencez * Dominance 0.31 0.95 0.19 0.44
Ethical Preferencez * Leadership -0.18 0.95 -0.3 -0.06
Ethical Preferencez * Gender 0.27 0.95 0.06 0.49
Ethical Preferencez * Age -0.02 0.95 -0.03 -0.01
Health and Safety Preferencez * Dominance 0.27 0.95 0.14 0.41
Health and Safety Preferencez * Prestige -0.26 0.95 -0.39 -0.13
Health and Safety Preferencez * Age -0.02 0.95 -0.03 -0.01
Recreational Preferencez * Dominance 0.15 0.95 0.02 0.28
Recreational Preferencez * Prestige -0.28 0.95 -0.4 -0.16
Recreational Preferencez * Leadership 0.17 0.95 0.05 0.3
Recreational Preferencez * Age -0.03 0.95 -0.04 -0.02
Social Preferencez * Leadership 0.27 0.95 0.14 0.39
Social Preferencez * PNI 0.17 0.95 0.04 0.3
Social Preferencez * Gender -0.44 0.95 -0.65 -0.22

m3 Interaction Gender DOSPERT, DoPL, and PNI

Mediation_comparison_2 <- brm(mvbind(ethicalPreference_z, financialPreference_z, healthAndSafetyPreference_z, recreationalPreference_z, socialPreference_z) ~ dominance_Sum * Gender + prestige_Sum * Gender + leadership_Sum * Gender + PNI_Sum * Gender + Age, data = Experiment_2_demographics_Gender, warmup = 1000, iter = 10000, prior = prior_Mediation_comparison_2, save_pars = save_pars(all = T), cores = parallel::detectCores(), backend = "cmdstanr")

saveRDS(Mediation_comparison_2, "Mediation_comparison_2.rds")
Mediation_comparison_2_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(Mediation_comparison_2))


kable(Mediation_comparison_2_fixef[
  sign_match(Mediation_comparison_2_fixef[, 4]) == sign_match(Mediation_comparison_2_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Risk * Intercept -0.54 0.95 -1.02 -0.07
Risk * Risk Benefit 0.44 0.95 0.34 0.54
Risk * Risk Perception -0.28 0.95 -0.37 -0.19
Risk * Dominance 0.11 0.95 0.01 0.22
Risk Benefit * Dominance 0.26 0.95 0.13 0.38

Model Comparison

m3_comparison <- loo(m3, Mediation_comparison_2)
m3_comparison
m3_compari <- brms::waic(m3, Mediation_comparison_2)
bayes_factor(Mediation_comparison_2, m3)

# m3 over Mediation_comparison_2

Mediation

Mediation Model attempt

mediation_model.1 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + PNI_Sum)
mediation_model.2 <- bf(riskBenefitSum_z ~ riskSum_z + riskPerceptionSum_z + PNI_Sum)
mediation_model_1 <- brm(mediation_model.1 + mediation_model.2 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE))

mediation_model.3 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + dominance_Sum)
mediation_model.4 <- bf(riskBenefitSum_z ~ riskSum_z + riskPerceptionSum_z + dominance_Sum)
mediation_model_2 <- brm(mediation_model.3 + mediation_model.4 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE))

mediation_model.5 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + prestige_Sum)
mediation_model.6 <- bf(riskBenefitSum_z ~ riskSum_z + riskPerceptionSum_z + prestige_Sum)
mediation_model_3 <- brm(mediation_model.5 + mediation_model.6 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE))

mediation_model.7 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + leadership_Sum)
mediation_model.8 <- bf(riskBenefitSum_z ~ riskSum_z + riskPerceptionSum_z + leadership_Sum)
mediation_model_4 <- brm(mediation_model.7 + mediation_model.8 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE))
mediation_loo <- brms::loo(mediation_model_1, mediation_model_2, mediation_model_3, mediation_model_4)
mediation_comparison <- bayesfactor_models(mediation_model_1, mediation_model_2, mediation_model_3, mediation_model_4, denominator = mediation_model_4)
saveRDS(mediation_loo, "mediation_loo.rds")
saveRDS(mediation_comparison, "mediation_comparison.rds")
# I think this indicates that model 4 with dominance is the strongest predictor
mediation_loo
## Output of model 'mediation_model_1':
## 
## Computed from 36000 by 279 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -668.7 23.7
## p_loo        11.8  1.6
## looic      1337.4 47.5
## ------
## Monte Carlo SE of elpd_loo is 0.0.
## 
## All Pareto k estimates are good (k < 0.5).
## See help('pareto-k-diagnostic') for details.
## 
## Output of model 'mediation_model_2':
## 
## Computed from 36000 by 279 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -667.7 23.3
## p_loo        11.5  1.5
## looic      1335.5 46.6
## ------
## Monte Carlo SE of elpd_loo is 0.0.
## 
## All Pareto k estimates are good (k < 0.5).
## See help('pareto-k-diagnostic') for details.
## 
## Output of model 'mediation_model_3':
## 
## Computed from 36000 by 279 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -670.6 23.5
## p_loo        11.8  1.5
## looic      1341.2 47.0
## ------
## Monte Carlo SE of elpd_loo is 0.0.
## 
## All Pareto k estimates are good (k < 0.5).
## See help('pareto-k-diagnostic') for details.
## 
## Output of model 'mediation_model_4':
## 
## Computed from 36000 by 279 log-likelihood matrix
## 
##          Estimate   SE
## elpd_loo   -673.7 24.1
## p_loo        11.7  1.5
## looic      1347.4 48.1
## ------
## Monte Carlo SE of elpd_loo is 0.0.
## 
## All Pareto k estimates are good (k < 0.5).
## See help('pareto-k-diagnostic') for details.
## 
## Model comparisons:
##                   elpd_diff se_diff
## mediation_model_2  0.0       0.0   
## mediation_model_1 -1.0       3.1   
## mediation_model_3 -2.9       4.4   
## mediation_model_4 -6.0       3.0
mediation_comparison
## Bayes Factors for Model Comparison
## 
##     Model     BF
## [1]       143.56
## [2]       354.27
## [3]        21.71
## 
## * Against Denominator: [4]
## *   Bayes Factor Type: marginal likelihoods (bridgesampling)
model_1_blavaan <- "
riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + PNI_Sum
riskBenefitSum_z ~ riskSum_z + riskPerceptionSum_z + PNI_Sum
"

fit <- sem(model_1_blavaan, data = mediation_dataset)

m_model_1 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + riskPerceptionSum_z + PNI_Sum)
m_model_2 <- bf(riskBenefitSum_z ~ PNI_Sum)

Mediation_comparison_1 <- brm(m_model_1 + m_model_2 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE))

summary(Mediation_comparison_1)
##  Family: MV(gaussian, gaussian) 
##   Links: mu = identity; sigma = identity
##          mu = identity; sigma = identity 
## Formula: riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + riskPerceptionSum_z + PNI_Sum 
##          riskBenefitSum_z ~ PNI_Sum 
##    Data: mediation_dataset (Number of observations: 279) 
##   Draws: 4 chains, each with iter = 10000; warmup = 1000; thin = 1;
##          total post-warmup draws = 36000
## 
## Population-Level Effects: 
##                              Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## riskSumz_Intercept              -0.78      0.22    -1.21    -0.34 1.00    47709
## riskBenefitSumz_Intercept       -1.02      0.27    -1.55    -0.48 1.00    59884
## riskSumz_riskBenefitSum_z        0.46      0.05     0.36     0.56 1.00    53703
## riskSumz_riskPerceptionSum_z    -0.30      0.05    -0.39    -0.21 1.00    50848
## riskSumz_PNI_Sum                 0.01      0.00     0.00     0.01 1.00    47821
## riskBenefitSumz_PNI_Sum          0.01      0.00     0.01     0.02 1.00    59824
##                              Tail_ESS
## riskSumz_Intercept              30618
## riskBenefitSumz_Intercept       28413
## riskSumz_riskBenefitSum_z       28519
## riskSumz_riskPerceptionSum_z    27366
## riskSumz_PNI_Sum                29742
## riskBenefitSumz_PNI_Sum         28143
## 
## Family Specific Parameters: 
##                       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sigma_riskSumz            0.76      0.03     0.70     0.83 1.00    59469
## sigma_riskBenefitSumz     0.98      0.04     0.90     1.07 1.00    52622
##                       Tail_ESS
## sigma_riskSumz           26298
## sigma_riskBenefitSumz    26656
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
Mediation_comparison_1_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(Mediation_comparison_1))


kable(Mediation_comparison_1_fixef[
  sign_match(Mediation_comparison_1_fixef[, 4]) == sign_match(Mediation_comparison_1_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Risk * Intercept -0.78 0.95 -1.21 -0.34
Risk Benefit * Intercept -1.02 0.95 -1.55 -0.48
Risk * Risk Benefit 0.46 0.95 0.36 0.56
Risk * Risk Perception -0.3 0.95 -0.39 -0.21
Risk Benefit * PNI 0.01 0.95 0.01 0.02

m_model_3 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + PNI_Sum + dominance_Sum)
m_model_4 <- bf(riskBenefitSum_z ~ PNI_Sum + dominance_Sum)

Mediation_comparison_2 <- brm(m_model_3 + m_model_4 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE))
saveRDS(Mediation_comparison_2, "Mediation_comparison_2.rds")
summary(Mediation_comparison_2)
##  Family: MV(gaussian, gaussian) 
##   Links: mu = identity; sigma = identity
##          mu = identity; sigma = identity 
## Formula: riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + PNI_Sum + dominance_Sum 
##          riskBenefitSum_z ~ PNI_Sum + dominance_Sum 
##    Data: mediation_dataset (Number of observations: 279) 
##   Draws: 4 chains, each with iter = 10000; warmup = 1000; thin = 1;
##          total post-warmup draws = 36000
## 
## Population-Level Effects: 
##                               Estimate Est.Error l-95% CI u-95% CI Rhat
## riskSumz_Intercept               -0.54      0.24    -1.02    -0.07 1.00
## riskBenefitSumz_Intercept        -0.46      0.30    -1.05     0.13 1.00
## riskSumz_riskBenefitSum_z         0.44      0.05     0.34     0.54 1.00
## riskSumz_riskPerceptionSum_z     -0.28      0.05    -0.37    -0.19 1.00
## riskSumz_PNI_Sum                  0.01      0.00     0.00     0.01 1.00
## riskSumz_dominance_Sum            0.11      0.05     0.01     0.22 1.00
## riskBenefitSumz_PNI_Sum           0.00      0.00    -0.00     0.01 1.00
## riskBenefitSumz_dominance_Sum     0.26      0.06     0.13     0.38 1.00
##                               Bulk_ESS Tail_ESS
## riskSumz_Intercept               47447    32044
## riskBenefitSumz_Intercept        48655    30697
## riskSumz_riskBenefitSum_z        41395    28921
## riskSumz_riskPerceptionSum_z     43496    29097
## riskSumz_PNI_Sum                 47126    32355
## riskSumz_dominance_Sum           41436    28687
## riskBenefitSumz_PNI_Sum          48976    31087
## riskBenefitSumz_dominance_Sum    42356    29264
## 
## Family Specific Parameters: 
##                       Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sigma_riskSumz            0.76      0.03     0.70     0.83 1.00    48314
## sigma_riskBenefitSumz     0.96      0.04     0.88     1.04 1.00    46680
##                       Tail_ESS
## sigma_riskSumz           29040
## sigma_riskBenefitSumz    27251
## 
## Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).

Mediation_comparison_2_fixef <- MutateHDI::mutate_each_hdi_no_save(brms::fixef(Mediation_comparison_2))


kable(Mediation_comparison_2_fixef[
  sign_match(Mediation_comparison_2_fixef[, 4]) == sign_match(Mediation_comparison_2_fixef[, 5]),
  c("Parameter", "Estimate", "CI", "CI Low", "CI High")
], format = "html", booktabs = T, escape = F, longtable = F, digits = 2) %>%
  kable_styling(full_width = T) %>%
  remove_column(1)
Parameter Estimate CI CI Low CI High
Risk * Intercept -0.54 0.95 -1.02 -0.07
Risk * Risk Benefit 0.44 0.95 0.34 0.54
Risk * Risk Perception -0.28 0.95 -0.37 -0.19
Risk * Dominance 0.11 0.95 0.01 0.22
Risk Benefit * Dominance 0.26 0.95 0.13 0.38

m_model_5 <- bf(riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + PNI_Sum + dominance_Sum + leadership_Sum + prestige_Sum)
m_model_6 <- bf(riskBenefitSum_z ~ PNI_Sum + dominance_Sum + leadership_Sum + prestige_Sum)

Mediation_comparison_3 <- brm(m_model_5 + m_model_6 + set_rescor(FALSE), warmup = 1000, iter = 10000, data = mediation_dataset, backend = "cmdstanr", save_pars = save_pars(all = TRUE), cores = parallel::detectCores())
model_blavaan_test <- "
riskSum_z ~ riskBenefitSum_z + riskPerceptionSum_z + PNI_Sum + dominance_Sum
riskBenefitSum_z ~ PNI_Sum + dominance_Sum
"

fit2 <- sem(model_blavaan_test, data = mediation_dataset)