VRCF_data_all
#data
load("C:/Users/aaron/Desktop/PhD/Projects/RL_threat/RL_threat_data_analysis/VRCF data/VRCF_data_all.RData")
load("C:/Users/aaron/Desktop/PhD/Projects/RL_threat/RL_threat_data_analysis/VRCF data/VRCF_compmodel_outputs.RData")#comp_modelKnit
#__________
#knit
setwd("C:/Users/aaron/Desktop/PhD/Projects/RL_threat/RL_threat_data_analysis/RL_threat analysis")Packages
#packages
#packages
library(psych)
library(plyr)
library(dplyr)
library(tidyr)# long format
library(Routliers)
library(mvnormtest)
library(effectsize)
library(mvnormtest)
library(pwr)
library(moments)#kurtosis
#plot
library(ggthemes)
library(ggplot2)
library(flexplot)
library(ggsignif)
#library(hrbrthemes)
library(viridis)
library(Cairo)
library(sjPlot)
library(sjmisc)
#LMM Analysis
library(lme4)
library(lmerTest)
library(nlme, lib.loc = "C:/Program Files/R/R-4.2.2/library")
#Lmm modelout puts
library(sjPlot)
library(sjmisc)
library(sjlabelled)
#Lmm posthoc testing and plots
library(pbkrtest)
library(emmeans)
#bootstrap CIs
library(confintr)
#Bayesian HDI analysis
library(rjags)
library(BEST)
library(coda)
library(posterior)
library(bayestestR)
#Bootstrapping betas of LMM
library(boot)
library(reshape2)To investigate differences in subjective experience between the threat and non threat conditions, we tested for differences in the post-task questionnaire across conditions. For the affect variables, we used a LMM to predict rating, with fixed factors for affective category (e.g., “unpredictable”), condition, and their interaction. Intercepts were allowed to vary as a random factor at the level of the individual.
A significant effect of condition (F(1, 2673) = 131.290, p >. 001), and its interaction with affective category (F(13, 2673) = 17.787, p >. 001) was found on rating.
Next, post hoc comparisons of affective terms where computed.To adjust for multivariate comparisons a MVT correction was applied. Results suggested that between conditions, ratings that the VRCF was “frightening” (t.ratio (2673) = - 11.04, p <. 001), “creepy” (t.ratio (2673) = - 8.52, p >. 001),“surprising” (t.ratio (2673) = - 7.71, p <.001, “interesting” (t.ratio (2673) = - 4.31, p <.001, “engaging” (t.ratio (2673) = - 6.13, p <.001, and “unpredictable” (t.ratio (2673) = - 3.80, p =.002, where reported following the in the threat condition when compared to the non threat condition.
Conversely, ratings that the VRCF was “boring” (t.ratio (2673) = 5.37, p <. 001) were higher following the non threat condition. All other were p’s> .05 (see Supplementary Materials).
[Note. We had a lot more power here than in previous studies so more categories have inevitably come out as significant across conditions. ]
#data
VRCF_affect<-VRCF_data_all %>%
select(ID,Frightening_NT, Creepy_NT, Frustrating_NT, Unpredictable_NT, Engaging_NT, Confusing_NT, Enjoyable_NT, Boring_NT, Funny_NT, Amusing_NT, Sad_NT, Disgusting_NT, Interesting_NT, Surprising_NT,
Frightening_T, Creepy_T, Frustrating_T, Unpredictable_T, Engaging_T, Confusing_T, Enjoyable_T, Boring_T, Funny_T, Amusing_T, Sad_T, Disgusting_T, Interesting_T, Surprising_T
)
#_____
# Use gather() to reshape the data
VRCF_affect_long <- gather(
VRCF_affect,
key = "condition_Rating",
value = "score",
-ID
)
#_____
#Separate the 'Condition_Rating' column into two new columns: 'Affect' and 'Condition'
VRCF_affect_long <- separate(VRCF_affect_long, condition_Rating, into = c("affect", "condition"), sep = "_")
#_____
#change Condition and ID, and affect as.factor
VRCF_affect_long$ID<-as.factor(VRCF_affect_long$ID)
VRCF_affect_long$affect<-as.factor(VRCF_affect_long$affect)
VRCF_affect_long$condition<-as.factor(VRCF_affect_long$condition)
#model
affect_Lmm<-lmer(score ~ affect * condition + (1 | ID), data = VRCF_affect_long)
#results
summary(affect_Lmm)## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: score ~ affect * condition + (1 | ID)
## Data: VRCF_affect_long
##
## REML criterion at convergence: 25371.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.9256 -0.6709 -0.0882 0.5933 3.8373
##
## Random effects:
## Groups Name Variance Std.Dev.
## ID (Intercept) 104.4 10.22
## Residual 492.4 22.19
## Number of obs: 2800, groups: ID, 100
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 18.240 2.443 1518.358 7.467 1.38e-13
## affectBoring 16.250 3.138 2673.000 5.178 2.41e-07
## affectConfusing 11.420 3.138 2673.000 3.639 0.000279
## affectCreepy 1.960 3.138 2673.000 0.625 0.532303
## affectDisgusting -15.630 3.138 2673.000 -4.981 6.74e-07
## affectEngaging 21.330 3.138 2673.000 6.797 1.31e-11
## affectEnjoyable 21.440 3.138 2673.000 6.832 1.03e-11
## affectFrightening -8.670 3.138 2673.000 -2.763 0.005770
## affectFrustrating 24.250 3.138 2673.000 7.728 1.54e-14
## affectFunny -10.920 3.138 2673.000 -3.480 0.000510
## affectInteresting 18.200 3.138 2673.000 5.800 7.43e-09
## affectSad -15.080 3.138 2673.000 -4.805 1.63e-06
## affectSurprising 0.070 3.138 2673.000 0.022 0.982205
## affectUnpredictable 25.000 3.138 2673.000 7.967 2.39e-15
## conditionT 1.640 3.138 2673.000 0.523 0.601294
## affectBoring:conditionT -18.500 4.438 2673.000 -4.169 3.16e-05
## affectConfusing:conditionT 4.270 4.438 2673.000 0.962 0.336063
## affectCreepy:conditionT 25.090 4.438 2673.000 5.653 1.74e-08
## affectDisgusting:conditionT 3.430 4.438 2673.000 0.773 0.439667
## affectEngaging:conditionT 17.600 4.438 2673.000 3.966 7.51e-05
## affectEnjoyable:conditionT 1.310 4.438 2673.000 0.295 0.767880
## affectFrightening:conditionT 33.010 4.438 2673.000 7.438 1.37e-13
## affectFrustrating:conditionT 1.000 4.438 2673.000 0.225 0.821742
## affectFunny:conditionT -2.020 4.438 2673.000 -0.455 0.649031
## affectInteresting:conditionT 11.890 4.438 2673.000 2.679 0.007426
## affectSad:conditionT 1.660 4.438 2673.000 0.374 0.708402
## affectSurprising:conditionT 22.550 4.438 2673.000 5.081 4.01e-07
## affectUnpredictable:conditionT 10.290 4.438 2673.000 2.319 0.020491
##
## (Intercept) ***
## affectBoring ***
## affectConfusing ***
## affectCreepy
## affectDisgusting ***
## affectEngaging ***
## affectEnjoyable ***
## affectFrightening **
## affectFrustrating ***
## affectFunny ***
## affectInteresting ***
## affectSad ***
## affectSurprising
## affectUnpredictable ***
## conditionT
## affectBoring:conditionT ***
## affectConfusing:conditionT
## affectCreepy:conditionT ***
## affectDisgusting:conditionT
## affectEngaging:conditionT ***
## affectEnjoyable:conditionT
## affectFrightening:conditionT ***
## affectFrustrating:conditionT
## affectFunny:conditionT
## affectInteresting:conditionT **
## affectSad:conditionT
## affectSurprising:conditionT ***
## affectUnpredictable:conditionT *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 28 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
anova(affect_Lmm)## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## affect 632445 48650 13 2673 98.802 < 2.2e-16 ***
## condition 64647 64647 1 2673 131.290 < 2.2e-16 ***
## affect:condition 113860 8758 13 2673 17.787 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Q-Q plot
qqnorm(resid(affect_Lmm)) #_________
#creating table results
tab_model(affect_Lmm)| score | |||
|---|---|---|---|
| Predictors | Estimates | CI | p |
| (Intercept) | 18.24 | 13.45 – 23.03 | <0.001 |
| affect [Boring] | 16.25 | 10.10 – 22.40 | <0.001 |
| affect [Confusing] | 11.42 | 5.27 – 17.57 | <0.001 |
| affect [Creepy] | 1.96 | -4.19 – 8.11 | 0.532 |
| affect [Disgusting] | -15.63 | -21.78 – -9.48 | <0.001 |
| affect [Engaging] | 21.33 | 15.18 – 27.48 | <0.001 |
| affect [Enjoyable] | 21.44 | 15.29 – 27.59 | <0.001 |
| affect [Frightening] | -8.67 | -14.82 – -2.52 | 0.006 |
| affect [Frustrating] | 24.25 | 18.10 – 30.40 | <0.001 |
| affect [Funny] | -10.92 | -17.07 – -4.77 | 0.001 |
| affect [Interesting] | 18.20 | 12.05 – 24.35 | <0.001 |
| affect [Sad] | -15.08 | -21.23 – -8.93 | <0.001 |
| affect [Surprising] | 0.07 | -6.08 – 6.22 | 0.982 |
| affect [Unpredictable] | 25.00 | 18.85 – 31.15 | <0.001 |
| condition [T] | 1.64 | -4.51 – 7.79 | 0.601 |
|
affect [Boring] × condition [T] |
-18.50 | -27.20 – -9.80 | <0.001 |
|
affect [Confusing] × condition [T] |
4.27 | -4.43 – 12.97 | 0.336 |
|
affect [Creepy] × condition [T] |
25.09 | 16.39 – 33.79 | <0.001 |
|
affect [Disgusting] × condition [T] |
3.43 | -5.27 – 12.13 | 0.440 |
|
affect [Engaging] × condition [T] |
17.60 | 8.90 – 26.30 | <0.001 |
|
affect [Enjoyable] × condition [T] |
1.31 | -7.39 – 10.01 | 0.768 |
|
affect [Frightening] × condition [T] |
33.01 | 24.31 – 41.71 | <0.001 |
|
affect [Frustrating] × condition [T] |
1.00 | -7.70 – 9.70 | 0.822 |
|
affect [Funny] × condition [T] |
-2.02 | -10.72 – 6.68 | 0.649 |
|
affect [Interesting] × condition [T] |
11.89 | 3.19 – 20.59 | 0.007 |
|
affect [Sad] × condition [T] |
1.66 | -7.04 – 10.36 | 0.708 |
|
affect [Surprising] × condition [T] |
22.55 | 13.85 – 31.25 | <0.001 |
|
affect [Unpredictable] × condition [T] |
10.29 | 1.59 – 18.99 | 0.020 |
| Random Effects | |||
| σ2 | 492.39 | ||
| τ00 ID | 104.35 | ||
| ICC | 0.17 | ||
| N ID | 100 | ||
| Observations | 2800 | ||
| Marginal R2 / Conditional R2 | 0.327 / 0.445 | ||
#_________
# contrast with MVT correction
em_interaction <-emmeans(affect_Lmm, ~ affect * condition)
test(pairs(em_interaction, by = "affect"), by = NULL, adjust = "mvt")# correction this one is reported## contrast affect estimate SE df t.ratio p.value
## NT - T Amusing -1.64 3.14 2673 -0.523 1.0000
## NT - T Boring 16.86 3.14 2673 5.373 <.0001
## NT - T Confusing -5.91 3.14 2673 -1.883 0.5775
## NT - T Creepy -26.73 3.14 2673 -8.518 <.0001
## NT - T Disgusting -5.07 3.14 2673 -1.616 0.7921
## NT - T Engaging -19.24 3.14 2673 -6.131 <.0001
## NT - T Enjoyable -2.95 3.14 2673 -0.940 0.9974
## NT - T Frightening -34.65 3.14 2673 -11.042 <.0001
## NT - T Frustrating -2.64 3.14 2673 -0.841 0.9992
## NT - T Funny 0.38 3.14 2673 0.121 1.0000
## NT - T Interesting -13.53 3.14 2673 -4.311 0.0002
## NT - T Sad -3.30 3.14 2673 -1.052 0.9922
## NT - T Surprising -24.19 3.14 2673 -7.708 <.0001
## NT - T Unpredictable -11.93 3.14 2673 -3.802 0.0021
##
## Degrees-of-freedom method: kenward-roger
## P value adjustment: mvt method for 14 tests
Overall task score, between conditions [dependent t-test, Bayesian posterior estimation]
No significant effect group on overall score.
There was no significant difference on overall score between the threatening (M = xx.xx, SD = xx.xx) and nonthreatening (M = xx.xx, SD = xx.xx) conditions, t (99) = .68, p = .498.
However, when a overall task score, group and order (e.g., threat condition first) interaction [LMM] was modeled a significant effect was found.
To assess the effect of order on performance, we ran an LMM with condition, order (e.g. which condition participants completed first), and their interaction as fixed factors, and score as a dependent variable.Intercepts were allowed to vary as a random factor at the level of the individual.
Results demonstrate that nether the effect of condition (F(1, 98) = .50, p = .479), or order (F(1, 98) = 1.77, p = .186) score was significant. However, significant effect of condition/order interaction (F(1, 98) = 10.00, p = .002) on score.
Further investigating of the significant condition/order interaction on score using a MVT correction suggested that, only when participants did the threat condition first, did those in the threatening condition preformed worse on the task (t.ratio (98) = 2.72, p = .014.
#dependent t.test
t.test(VRCF_data_all$NT_score, VRCF_data_all$T_score, alternative = "two.sided", paired = TRUE)##
## Paired t-test
##
## data: VRCF_data_all$NT_score and VRCF_data_all$T_score
## t = 0.67978, df = 99, p-value = 0.4982
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -1.055403 2.155403
## sample estimates:
## mean difference
## 0.55
BEST
#data
y1<-VRCF_data_all$NT_score
y2<-VRCF_data_all$T_score
#running model without priors
BESToutNFC <- BESTmcmc(y1, y2, parallel=T)
#Look at the result:
BESToutNFC
plot(BESToutNFC)Overall task score, between conditions (filtered by order)
#filter the data where order_first is 1
filtered_data_threat_first <- VRCF_data_all %>% filter(order_first == 1)
# Perform a paired t-test on the filtered data
t.test(filtered_data_threat_first$NT_score, filtered_data_threat_first$T_score,
paired = TRUE,
alternative = "two.sided") # Change "two.sided" if needed##
## Paired t-test
##
## data: filtered_data_threat_first$NT_score and filtered_data_threat_first$T_score
## t = 2.8389, df = 49, p-value = 0.006572
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## 0.8763679 5.1236321
## sample estimates:
## mean difference
## 3
#___
#filter the data where order_first is 1
filtered_data_nonthreat_first <- VRCF_data_all %>% filter(order_first == 0)
# Perform a paired t-test on the filtered data
t.test(filtered_data_nonthreat_first$NT_score, filtered_data_nonthreat_first$T_score,
paired = TRUE,
alternative = "two.sided") ##
## Paired t-test
##
## data: filtered_data_nonthreat_first$NT_score and filtered_data_nonthreat_first$T_score
## t = -1.6771, df = 49, p-value = 0.09989
## alternative hypothesis: true mean difference is not equal to 0
## 95 percent confidence interval:
## -4.176654 0.376654
## sample estimates:
## mean difference
## -1.9
Overall task score, group and order (e.g., threat condition first) interaction [LMM]
#mixed model (amounting for order e.g. doing threat condition first)
#data
VRCF_score_overall<-VRCF_data_all %>%
select(ID,
order_first,
NT_score,
T_score
)
#_____
# Use gather() to reshape the data
VRCF_score_overall_long <- gather(
VRCF_score_overall,
key = "condition",
value = "score",
-ID,
-order_first
)
#_____
#change Condition and ID, and affect as.factor
VRCF_score_overall_long$ID<-as.factor(VRCF_score_overall_long$ID)
VRCF_score_overall_long$order_first<-as.factor(VRCF_score_overall_long$order_first)
VRCF_score_overall_long$condition<-as.factor(VRCF_score_overall_long$condition)
#_____
#model
score_overall_Lmm<-lmer(score ~ condition * order_first + (1 | ID), data = VRCF_score_overall_long)
#results
summary(score_overall_Lmm)## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: score ~ condition * order_first + (1 | ID)
## Data: VRCF_score_overall_long
##
## REML criterion at convergence: 1348.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.77635 -0.57352 -0.09123 0.51181 2.62350
##
## Random effects:
## Groups Name Variance Std.Dev.
## ID (Intercept) 31.23 5.588
## Residual 30.00 5.477
## Number of obs: 200, groups: ID, 100
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 53.900 1.107 155.542 48.707 < 2e-16 ***
## conditionT_score 1.900 1.095 98.000 1.734 0.08599 .
## order_first1 0.640 1.565 155.542 0.409 0.68314
## conditionT_score:order_first1 -4.900 1.549 98.000 -3.163 0.00208 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) cndtT_ ordr_1
## condtnT_scr -0.495
## order_frst1 -0.707 0.350
## cndtnT_s:_1 0.350 -0.707 -0.495
anova(score_overall_Lmm)## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## condition 15.125 15.125 1 98 0.5041 0.479380
## order_first 53.153 53.153 1 98 1.7716 0.186269
## condition:order_first 300.125 300.125 1 98 10.0033 0.002081 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Q-Q plot
qqnorm(resid(score_overall_Lmm)) #_____
#creating table results
tab_model(score_overall_Lmm)| score | |||
|---|---|---|---|
| Predictors | Estimates | CI | p |
| (Intercept) | 53.90 | 51.72 – 56.08 | <0.001 |
| condition [T_score] | 1.90 | -0.26 – 4.06 | 0.084 |
| order first [1] | 0.64 | -2.45 – 3.73 | 0.683 |
|
condition [T_score] × order first [1] |
-4.90 | -7.96 – -1.84 | 0.002 |
| Random Effects | |||
| σ2 | 30.00 | ||
| τ00 ID | 31.23 | ||
| ICC | 0.51 | ||
| N ID | 100 | ||
| Observations | 200 | ||
| Marginal R2 / Conditional R2 | 0.038 / 0.529 | ||
#_____
#Estimated marginal means
em_all <-emmeans (score_overall_Lmm, ~ condition | order_first)
em_all## order_first = 0:
## condition emmean SE df lower.CL upper.CL
## NT_score 53.9 1.11 156 51.7 56.1
## T_score 55.8 1.11 156 53.6 58.0
##
## order_first = 1:
## condition emmean SE df lower.CL upper.CL
## NT_score 54.5 1.11 156 52.4 56.7
## T_score 51.5 1.11 156 49.4 53.7
##
## Degrees-of-freedom method: kenward-roger
## Confidence level used: 0.95
#_____
#plot
flexplot(score ~ condition + order_first, data = VRCF_score_overall_long, summary="mean_sd",raw.data = FALSE)#_________
#correct contrast used in study see:https://cran.r-project.org/web/packages/emmeans/vignettes/AQuickStart.html
em_interaction <-emmeans(score_overall_Lmm, ~ condition * order_first)
test(pairs(em_interaction, by = "condition"), by = NULL, adjust = "mvt")# correction this one is reported## contrast condition estimate SE df t.ratio p.value
## order_first0 - order_first1 NT_score -0.64 1.57 156 -0.409 0.8851
## order_first0 - order_first1 T_score 4.26 1.57 156 2.722 0.0138
##
## Degrees-of-freedom method: kenward-roger
## P value adjustment: mvt method for 2 tests
Bootstrapped estimates (beta’s) of the group * order effect on score.
#bootstrapping estmates from LMM
boot_fun <- function(model) {#define a function for bootstrapping
fixef(model)
}
#______
#perform bootstrapping of LMM model
boot_results <- bootMer(score_overall_Lmm, boot_fun, nsim = 1000, type = "parametric")# simulations = (nsim = 1000)
#______
#results
summary(boot_results$t)
#extract bootstrap estimates
boot_estimates <- boot_results$t
colnames(boot_estimates) <- names(fixef(score_overall_Lmm))
boot_estimates = data.frame(boot_estimates)
hist(boot_estimates$groupT_score.order_first1)To assess the effect of block on performance, we ran an LMM with block, condition, order (e.g. which condition participants completed first), and their interactions as fixed factors, and score as a dependent variable.Intercepts were allowed to vary as a random factor at the level of the individual.
Results demonstrate that the effect of a block, condition, order intratction on score was not significant (F(4, 686) = .74, p = .526). However, again a significant effect of a condition order interaction on score was found (F(1, 686) = 10.88, p = .001).
The inclusion of a autocorrelation term does not significantly improve model fit.
#data
VRCF_score_overtime <-VRCF_data_all %>%
select(ID,
order_first,
NT_score_block_1,
NT_score_block_2,
NT_score_block_3,
NT_score_block_4,
T_score_block_1,
T_score_block_2,
T_score_block_3,
T_score_block_4,
)
#change name to process (need one "_" to separate)
names(VRCF_score_overtime)<- c("ID",
"order_first",
"NT_block1",
"NT_block2",
"NT_block3",
"NT_block4",
"T_block1",
"T_block2",
"T_block3",
"T_block4"
)
#use gather() to reshape the data
VRCF_score_overtime_long <- gather(
VRCF_score_overtime,
key = "block",
value = "score",
-ID,
-order_first
)
#Separate the 'block' column into two new columns: 'block' and 'condition'
VRCF_score_overtime_long <- separate(VRCF_score_overtime_long, block, into = c("condition", "block"), sep = "_")
#change condition and ID,order, and block as.factor
VRCF_score_overtime_long$ID<-as.factor(VRCF_score_overtime_long$ID)
VRCF_score_overtime_long$condition<-as.factor(VRCF_score_overtime_long$condition)
VRCF_score_overtime_long$block<-as.factor(VRCF_score_overtime_long$block)
VRCF_score_overtime_long$order_first<-as.factor(VRCF_score_overtime_long$order_first)
#model
score_overtime_Lmm<-lmer(score ~ block * condition * order_first + (1 | ID), data = VRCF_score_overtime_long)
#results
anova(score_overtime_Lmm)## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## block 181.254 60.418 3 686 8.7649 1.039e-05 ***
## condition 3.781 3.781 1 686 0.5486 0.45916
## order_first 12.212 12.212 1 98 1.7716 0.18627
## block:condition 12.644 4.215 3 686 0.6114 0.60775
## block:order_first 34.814 11.605 3 686 1.6835 0.16921
## condition:order_first 75.031 75.031 1 686 10.8849 0.00102 **
## block:condition:order_first 15.394 5.131 3 686 0.7444 0.52583
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Q-Q plot
qqnorm(resid(score_overtime_Lmm))
Lmm model (with autocorrelation term, using nlme)
Model comparisons demonstrate that the inclusion of a autocorrelation term does not significantly improve model fit.
#model: Specify Your Model with lme Function. Replace your current lmer function with lme and include the correlation argument to specify the autocorrelation structure.
score_overtime_Lmm_autocor <- lme(score ~ block * condition * order_first, random = ~ 1 | ID, correlation = corAR1(form = ~ 1 | ID), data = VRCF_score_overtime_long)
#results
summary(score_overtime_Lmm_autocor)## Linear mixed-effects model fit by REML
## Data: VRCF_score_overtime_long
## AIC BIC logLik
## 3957.37 4045.994 -1959.685
##
## Random effects:
## Formula: ~1 | ID
## (Intercept) Residual
## StdDev: 1.410348 2.632793
##
## Correlation Structure: AR(1)
## Formula: ~1 | ID
## Parameter estimate(s):
## Phi
## 0.02166325
## Fixed effects: score ~ block * condition * order_first
## Value Std.Error DF t-value p-value
## (Intercept) 13.68 0.4223903 686 32.38710 0.0000
## blockblock2 -0.54 0.5208239 686 -1.03682 0.3002
## blockblock3 -0.10 0.5264350 686 -0.18996 0.8494
## blockblock4 -0.18 0.5265559 686 -0.34184 0.7326
## conditionT 0.60 0.5265586 686 1.13947 0.2549
## order_first1 0.70 0.5973501 98 1.17184 0.2441
## blockblock2:conditionT -0.46 0.7365581 686 -0.62453 0.5325
## blockblock3:conditionT 0.14 0.7445789 686 0.18803 0.8509
## blockblock4:conditionT -0.18 0.7486846 686 -0.24042 0.8101
## blockblock2:order_first1 -0.92 0.7365562 686 -1.24906 0.2121
## blockblock3:order_first1 -0.12 0.7444916 686 -0.16118 0.8720
## blockblock4:order_first1 -1.12 0.7446626 686 -1.50404 0.1330
## conditionT:order_first1 -0.92 0.7446663 686 -1.23545 0.2171
## blockblock2:conditionT:order_first1 -0.24 1.0416504 686 -0.23040 0.8178
## blockblock3:conditionT:order_first1 -1.22 1.0529936 686 -1.15860 0.2470
## blockblock4:conditionT:order_first1 0.24 1.0587999 686 0.22667 0.8207
## Correlation:
## (Intr) blckb2 blckb3 blckb4 cndtnT ordr_1
## blockblock2 -0.617
## blockblock3 -0.623 0.505
## blockblock4 -0.623 0.495 0.511
## conditionT -0.623 0.495 0.500 0.511
## order_first1 -0.707 0.436 0.441 0.441 0.441
## blockblock2:conditionT 0.436 -0.707 -0.358 -0.357 -0.699 -0.308
## blockblock3:conditionT 0.441 -0.357 -0.707 -0.369 -0.707 -0.312
## blockblock4:conditionT 0.438 -0.348 -0.359 -0.711 -0.711 -0.310
## blockblock2:order_first1 0.436 -0.707 -0.357 -0.350 -0.350 -0.617
## blockblock3:order_first1 0.441 -0.357 -0.707 -0.361 -0.354 -0.623
## blockblock4:order_first1 0.441 -0.350 -0.361 -0.707 -0.361 -0.623
## conditionT:order_first1 0.441 -0.350 -0.354 -0.361 -0.707 -0.623
## blockblock2:conditionT:order_first1 -0.308 0.500 0.253 0.253 0.495 0.436
## blockblock3:conditionT:order_first1 -0.312 0.253 0.500 0.261 0.500 0.441
## blockblock4:conditionT:order_first1 -0.310 0.246 0.254 0.503 0.503 0.438
## blc2:T blc3:T blc4:T bl2:_1 bl3:_1 bl4:_1
## blockblock2
## blockblock3
## blockblock4
## conditionT
## order_first1
## blockblock2:conditionT
## blockblock3:conditionT 0.505
## blockblock4:conditionT 0.497 0.513
## blockblock2:order_first1 0.500 0.253 0.246
## blockblock3:order_first1 0.253 0.500 0.254 0.505
## blockblock4:order_first1 0.253 0.261 0.503 0.495 0.511
## conditionT:order_first1 0.495 0.500 0.503 0.495 0.500 0.511
## blockblock2:conditionT:order_first1 -0.707 -0.357 -0.352 -0.707 -0.358 -0.357
## blockblock3:conditionT:order_first1 -0.357 -0.707 -0.363 -0.357 -0.707 -0.369
## blockblock4:conditionT:order_first1 -0.352 -0.363 -0.707 -0.348 -0.359 -0.711
## cnT:_1 b2:T:_ b3:T:_
## blockblock2
## blockblock3
## blockblock4
## conditionT
## order_first1
## blockblock2:conditionT
## blockblock3:conditionT
## blockblock4:conditionT
## blockblock2:order_first1
## blockblock3:order_first1
## blockblock4:order_first1
## conditionT:order_first1
## blockblock2:conditionT:order_first1 -0.699
## blockblock3:conditionT:order_first1 -0.707 0.505
## blockblock4:conditionT:order_first1 -0.711 0.497 0.513
##
## Standardized Within-Group Residuals:
## Min Q1 Med Q3 Max
## -2.93165801 -0.67455520 0.03571571 0.60402926 2.73892038
##
## Number of Observations: 800
## Number of Groups: 100
anova(score_overtime_Lmm_autocor)## numDF denDF F-value p-value
## (Intercept) 1 686 6296.839 <.0001
## block 3 686 9.000 <.0001
## condition 1 686 0.500 0.4797
## order_first 1 98 1.761 0.1876
## block:condition 3 686 0.619 0.6032
## block:order_first 3 686 1.689 0.1680
## condition:order_first 1 686 10.419 0.0013
## block:condition:order_first 3 686 0.757 0.5185
#Q-Q plot
qqnorm(resid(score_overtime_Lmm_autocor)) #__________
#run alt model in Lme (must be created in same package)
score_overtime_Lme <- lme(score ~ block * condition * order_first, random = ~ 1 | ID, data = VRCF_score_overtime_long)
#results
summary(score_overtime_Lme)## Linear mixed-effects model fit by REML
## Data: VRCF_score_overtime_long
## AIC BIC logLik
## 3955.599 4039.558 -1959.799
##
## Random effects:
## Formula: ~1 | ID
## (Intercept) Residual
## StdDev: 1.423985 2.625483
##
## Fixed effects: score ~ block * condition * order_first
## Value Std.Error DF t-value p-value
## (Intercept) 13.68 0.4223954 686 32.38672 0.0000
## blockblock2 -0.54 0.5250965 686 -1.02838 0.3041
## blockblock3 -0.10 0.5250965 686 -0.19044 0.8490
## blockblock4 -0.18 0.5250965 686 -0.34279 0.7319
## conditionT 0.60 0.5250965 686 1.14265 0.2536
## order_first1 0.70 0.5973573 98 1.17183 0.2441
## blockblock2:conditionT -0.46 0.7425986 686 -0.61945 0.5358
## blockblock3:conditionT 0.14 0.7425986 686 0.18853 0.8505
## blockblock4:conditionT -0.18 0.7425986 686 -0.24239 0.8085
## blockblock2:order_first1 -0.92 0.7425986 686 -1.23889 0.2158
## blockblock3:order_first1 -0.12 0.7425986 686 -0.16159 0.8717
## blockblock4:order_first1 -1.12 0.7425986 686 -1.50822 0.1320
## conditionT:order_first1 -0.92 0.7425986 686 -1.23889 0.2158
## blockblock2:conditionT:order_first1 -0.24 1.0501931 686 -0.22853 0.8193
## blockblock3:conditionT:order_first1 -1.22 1.0501931 686 -1.16169 0.2458
## blockblock4:conditionT:order_first1 0.24 1.0501931 686 0.22853 0.8193
## Correlation:
## (Intr) blckb2 blckb3 blckb4 cndtnT ordr_1
## blockblock2 -0.622
## blockblock3 -0.622 0.500
## blockblock4 -0.622 0.500 0.500
## conditionT -0.622 0.500 0.500 0.500
## order_first1 -0.707 0.440 0.440 0.440 0.440
## blockblock2:conditionT 0.440 -0.707 -0.354 -0.354 -0.707 -0.311
## blockblock3:conditionT 0.440 -0.354 -0.707 -0.354 -0.707 -0.311
## blockblock4:conditionT 0.440 -0.354 -0.354 -0.707 -0.707 -0.311
## blockblock2:order_first1 0.440 -0.707 -0.354 -0.354 -0.354 -0.622
## blockblock3:order_first1 0.440 -0.354 -0.707 -0.354 -0.354 -0.622
## blockblock4:order_first1 0.440 -0.354 -0.354 -0.707 -0.354 -0.622
## conditionT:order_first1 0.440 -0.354 -0.354 -0.354 -0.707 -0.622
## blockblock2:conditionT:order_first1 -0.311 0.500 0.250 0.250 0.500 0.440
## blockblock3:conditionT:order_first1 -0.311 0.250 0.500 0.250 0.500 0.440
## blockblock4:conditionT:order_first1 -0.311 0.250 0.250 0.500 0.500 0.440
## blc2:T blc3:T blc4:T bl2:_1 bl3:_1 bl4:_1
## blockblock2
## blockblock3
## blockblock4
## conditionT
## order_first1
## blockblock2:conditionT
## blockblock3:conditionT 0.500
## blockblock4:conditionT 0.500 0.500
## blockblock2:order_first1 0.500 0.250 0.250
## blockblock3:order_first1 0.250 0.500 0.250 0.500
## blockblock4:order_first1 0.250 0.250 0.500 0.500 0.500
## conditionT:order_first1 0.500 0.500 0.500 0.500 0.500 0.500
## blockblock2:conditionT:order_first1 -0.707 -0.354 -0.354 -0.707 -0.354 -0.354
## blockblock3:conditionT:order_first1 -0.354 -0.707 -0.354 -0.354 -0.707 -0.354
## blockblock4:conditionT:order_first1 -0.354 -0.354 -0.707 -0.354 -0.354 -0.707
## cnT:_1 b2:T:_ b3:T:_
## blockblock2
## blockblock3
## blockblock4
## conditionT
## order_first1
## blockblock2:conditionT
## blockblock3:conditionT
## blockblock4:conditionT
## blockblock2:order_first1
## blockblock3:order_first1
## blockblock4:order_first1
## conditionT:order_first1
## blockblock2:conditionT:order_first1 -0.707
## blockblock3:conditionT:order_first1 -0.707 0.500
## blockblock4:conditionT:order_first1 -0.707 0.500 0.500
##
## Standardized Within-Group Residuals:
## Min Q1 Med Q3 Max
## -2.9500641 -0.6714733 0.0384233 0.6012923 2.7492985
##
## Number of Observations: 800
## Number of Groups: 100
anova(score_overtime_Lme)## numDF denDF F-value p-value
## (Intercept) 1 686 6294.741 <.0001
## block 3 686 8.765 <.0001
## condition 1 686 0.549 0.4592
## order_first 1 98 1.772 0.1863
## block:condition 3 686 0.611 0.6078
## block:order_first 3 686 1.683 0.1692
## condition:order_first 1 686 10.885 0.0010
## block:condition:order_first 3 686 0.744 0.5258
#compare models:To see if this improves the model, compare the original model with the one including the autocorrelation term using AIC (Akaike Information Criterion) or another appropriate model comparison metric.
AIC(score_overtime_Lme, score_overtime_Lmm_autocor)## df AIC
## score_overtime_Lme 18 3955.599
## score_overtime_Lmm_autocor 19 3957.370
#compare Models Using Likelihood Ratio Test(LRT)
anova(score_overtime_Lme,score_overtime_Lmm_autocor)## Model df AIC BIC logLik Test
## score_overtime_Lme 1 18 3955.599 4039.558 -1959.799
## score_overtime_Lmm_autocor 2 19 3957.370 4045.994 -1959.685 1 vs 2
## L.Ratio p-value
## score_overtime_Lme
## score_overtime_Lmm_autocor 0.2283341 0.6328
Rescorla-Wagner[Learning rate, Inverse temperature]
In the model: Alpha (Learning rate) is about learning and adaptation—how participants update their beliefs. Beta(Inverse temperature) is about decision-making precision—how strongly they act based on those beliefs.
Both parameters capture key individual differences: Alpha (Learning rate) shows variability in learning speed. Beta (Inverse temperature) reflects differences in decision-making style (deterministic vs. exploratory).
#fit using cmdstanr
stan_model_code <- "
data {
int<lower=1> P; // number of participants
array[P] int<lower=1> N; // number of trials per participant
array[sum(N)] int<lower=1,upper=3> choice; // stacked choices across all participants
array[sum(N)] int<lower=0,upper=1> reward; // stacked rewards across all participants
array[P] int<lower=1> start_idx; // starting index for each participant's trials
}
parameters {
// Individual-level parameters for each participant
vector<lower=0, upper=1>[P] alpha; // learning rate for each participant
vector<lower=0, upper=5>[P] beta; // inverse temperature for each participant
// Group-level (hyper-)parameters
real<lower=0, upper=1> mu_alpha; // group mean for alpha
real<lower=0> sigma_alpha; // group standard deviation for alpha
real<lower=0, upper=5> mu_beta; // group mean for beta
real<lower=0> sigma_beta; // group standard deviation for beta
}
model {
// Hyperpriors for group-level parameters
mu_alpha ~ beta(2, 2);
sigma_alpha ~ cauchy(0, 2);
mu_beta ~ normal(0, 2);
sigma_beta ~ cauchy(0, 2);
// Priors for individual-level parameters
alpha ~ normal(mu_alpha, sigma_alpha);
beta ~ normal(mu_beta, sigma_beta);
// Likelihood: loop over each participant and then each trial
for (p in 1:P) {
int idx = start_idx[p];
// Initialize Q-values (expected rewards) at 0 for the three choices
vector[3] Q = rep_vector(0, 3);
for (n in 1:N[p]) {
int i = idx + n - 1; // Calculate the corresponding index in the stacked data
vector[3] prob = softmax(beta[p] * Q); // Compute choice probabilities via softmax
target += log(prob[choice[i]]); // Add the log probability of the observed choice
// Update Q-value for the chosen action using the Rescorla-Wagner rule:
Q[choice[i]] = Q[choice[i]] + alpha[p] * (reward[i] - Q[choice[i]]);
}
}
}
"
#_________________________________________________________
# Write the Stan model code to a file
setwd("C:/Users/aaron/Desktop/PhD/Projects/RL_threat/RL_threat_data_analysis/RL_threat analysis")
writeLines(stan_model_code, con = "hierarchical_RW_cmdstanr.stan")
#____
# Compile the model
setwd("C:/Users/aaron/Desktop/PhD/Projects/RL_threat/RL_threat_data_analysis/RL_threat analysis")
model <- cmdstan_model("hierarchical_RW_cmdstanr.stan")
#____
# Run the model for threat condition
fit_T <- model$sample(
data = stan_data_T,
iter_sampling = 4000, # Number of sampling iterations
iter_warmup = 1000, # Warmup iterations
chains = 4, # Number of chains
parallel_chains = 4 # Parallel computation
)
#_________________________________________________________
# Run the model for non threat condition
fit_NT <- model$sample(
data = stan_data_NT,
iter_sampling = 4000, # Number of sampling iterations
iter_warmup = 1000, # Warmup iterations
chains = 4, # Number of chains
parallel_chains = 4 # Parallel computation
)Difference in Learning rate across conditions 92.46% probability that the groups are not the same
#learning rate
#visualize
draws1 <- as_draws_matrix(fit_T$draws("mu_alpha"))
draws2 <- as_draws_matrix(fit_NT$draws("mu_alpha"))
#convert to data frames for ggplot2
y1_df <- data.frame(value = as.numeric(draws1), group = "T")
y2_df <- data.frame(value = as.numeric(draws2), group = "NT")
#combine the data frames
combined_df <- rbind(y1_df, y2_df)
#Plot density curves using ggplot2
ggplot(combined_df, aes(x = value, fill = group)) +
geom_density(alpha = 0.5) + # Density plot with transparency
labs(title = "Posterior Distributions of learning rate",
x = "mu_alpha",
y = "Density") +
scale_fill_manual(values = c("T" = "red", "NT" = "gray")) + # Custom colors
theme_minimal()
HDI 89%
#HDI from model
#extract posterior samples for the parameters of interest (e.g., mu_alpha)
draws1 <- as.matrix(fit_T$draws("mu_alpha"))
draws2 <- as.matrix(fit_NT$draws("mu_alpha"))
# compute the difference between posterior samples
comparison <- draws1 - draws2
#convert the comparison to an MCMC object (required for HPDinterval)
comparison_mcmc <- as.mcmc(comparison)
#calculate the HPD interval (e.g., 89% interval)
hpd_result <- HPDinterval(comparison_mcmc, prob = 0.89)
print(hpd_result)## lower upper
## var1 -0.009827 0.164674
## attr(,"Probability")
## [1] 0.89
#Visualize the distribution of differences
hist(comparison, breaks = 30, main = "Distribution of Differences",
xlab = "T - NT (mu_alpha)", col = "skyblue", border = "white")
abline(v = hpd_result, col = "red", lwd = 2, lty = 2) # Add HPD bounds
Calculate probability of direction
#compare posterior means: Probability mu_alpha (Group 1 > Group 2)
pd_diff_mu_alpha <- pd(draws1 - draws2 )
print(pd_diff_mu_alpha)## Probability of Direction
##
## Parameter | pd
## ------------------
## Posterior | 92.46%
Difference in Learning rate across conditions 82.21% probability that the groups are not the same
#visualize
draws1 <- as_draws_matrix(fit_T$draws("mu_beta"))
draws2 <- as_draws_matrix(fit_NT$draws("mu_beta"))
#convert to data frames for ggplot2
y1_df <- data.frame(value = as.numeric(draws1), group = "T")
y2_df <- data.frame(value = as.numeric(draws2), group = "NT")
#combine the data frames
combined_df <- rbind(y1_df, y2_df)
#Plot density curves using ggplot2
ggplot(combined_df, aes(x = value, fill = group)) +
geom_density(alpha = 0.5) + # Density plot with transparency
labs(title = "Posterior Distributions of inverse temp",
x = "mu_beta",
y = "Density") +
scale_fill_manual(values = c("T" = "red", "NT" = "gray")) + # Custom colors
theme_minimal()
HDI 89%
#extract posterior samples for the parameters of interest (e.g., mu_beta)
draws1 <- as.matrix(fit_T$draws("mu_beta"))
draws2 <- as.matrix(fit_NT$draws("mu_beta"))
# compute the difference between posterior samples
comparison <- draws1 - draws2
#convert the comparison to an MCMC object (required for HPDinterval)
comparison_mcmc <- as.mcmc(comparison)
#calculate the HPD interval (e.g., 89% interval)
hpd_result <- HPDinterval(comparison_mcmc, prob = 0.89)
print(hpd_result)## lower upper
## var1 -0.17404 0.61367
## attr(,"Probability")
## [1] 0.89
#Visualize the distribution of differences
hist(comparison, breaks = 30, main = "Distribution of Differences",
xlab = "T - NT (mu_alpha)", col = "skyblue", border = "white")
abline(v = hpd_result, col = "red", lwd = 2, lty = 2) # Add HPD bounds
Calculate probability of direction
#compare posterior means: Probability mu_beta (Group 1 > Group 2)
pd_diff_mu_beta <- pd(draws1 - draws2 )
print(pd_diff_mu_beta)## Probability of Direction
##
## Parameter | pd
## ------------------
## Posterior | 82.21%
Parameters from computational model.Parametric, non parametric, and bootstrapped tests.
No interesting correlations
#IoU
#IoU with Inverse temperature [THREAT]
cor.test(VRCF_data_all$IoU,ind_level_params_T_beta$mean)# with holm adjustment##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$IoU and ind_level_params_T_beta$mean
## t = -0.23083, df = 98, p-value = 0.8179
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2187280 0.1739029
## sample estimates:
## cor
## -0.02331143
cor.test(VRCF_data_all$IoU,ind_level_params_T_beta$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$IoU and ind_level_params_T_beta$mean
## S = 174433, p-value = 0.6445
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## -0.04670278
#bootstrapped CIs
ci_cor(
VRCF_data_all$IoU,
ind_level_params_T_beta$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: -0.02331143
## Confidence interval:
## 2.5% 97.5%
## -0.2066155 0.1630391
#IoU with Inverse temperature [NONTHREAT]
cor.test(VRCF_data_all$IoU,ind_level_params_NT_beta$mean)# with holm adjustment##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$IoU and ind_level_params_NT_beta$mean
## t = 0.31053, df = 98, p-value = 0.7568
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1660877 0.2263773
## sample estimates:
## cor
## 0.03135328
cor.test(VRCF_data_all$IoU,ind_level_params_NT_beta$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$IoU and ind_level_params_NT_beta$mean
## S = 168919, p-value = 0.8931
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## -0.01361313
#bootstrapped CIs
ci_cor(
VRCF_data_all$IoU,
ind_level_params_NT_beta$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: 0.03135328
## Confidence interval:
## 2.5% 97.5%
## -0.1684107 0.2358334
#IoU with Learning rate [THREAT]
cor.test(VRCF_data_all$IoU,ind_level_params_T_alpha$mean)##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$IoU and ind_level_params_T_alpha$mean
## t = 1.0114, df = 98, p-value = 0.3143
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.09671024 0.29222285
## sample estimates:
## cor
## 0.1016386
cor.test(VRCF_data_all$IoU,ind_level_params_T_alpha$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$IoU and ind_level_params_T_alpha$mean
## S = 149690, p-value = 0.3137
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.1017681
#bootstrapped CIs
ci_cor(
VRCF_data_all$IoU,
ind_level_params_T_alpha$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: 0.1016386
## Confidence interval:
## 2.5% 97.5%
## -0.1052463 0.2940826
#IoU with Learning rate [NONTHREAT]
cor.test(VRCF_data_all$IoU,ind_level_params_NT_alpha$mean)##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$IoU and ind_level_params_NT_alpha$mean
## t = 0.063709, df = 98, p-value = 0.9493
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1902232 0.2025974
## sample estimates:
## cor
## 0.006435399
cor.test(VRCF_data_all$IoU,ind_level_params_NT_alpha$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$IoU and ind_level_params_NT_alpha$mean
## S = 165898, p-value = 0.9645
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.004511679
#bootstrapped CIs
ci_cor(
VRCF_data_all$IoU,
ind_level_params_NT_alpha$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: 0.006435399
## Confidence interval:
## 2.5% 97.5%
## -0.1906500 0.2143363
#NfC
#NfC with Inverse temperature [THREAT]
cor.test(VRCF_data_all$NfC,ind_level_params_T_beta$mean)##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$NfC and ind_level_params_T_beta$mean
## t = -0.88166, df = 98, p-value = 0.3801
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2802447 0.1096185
## sample estimates:
## cor
## -0.0887096
cor.test(VRCF_data_all$NfC,ind_level_params_T_beta$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$NfC and ind_level_params_T_beta$mean
## S = 180395, p-value = 0.4146
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## -0.08247988
#bootstrapped CIs
ci_cor(
VRCF_data_all$NfC,
ind_level_params_T_beta$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: -0.0887096
## Confidence interval:
## 2.5% 97.5%
## -0.2807322 0.1085521
#NfC with Inverse temperature [NONTHREAT]
cor.test(VRCF_data_all$NfC,ind_level_params_NT_beta$mean)##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$NfC and ind_level_params_NT_beta$mean
## t = -0.081782, df = 98, p-value = 0.935
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2043475 0.1884630
## sample estimates:
## cor
## -0.008260921
cor.test(VRCF_data_all$NfC,ind_level_params_NT_beta$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$NfC and ind_level_params_NT_beta$mean
## S = 187240, p-value = 0.2207
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## -0.1235516
#bootstrapped CIs
ci_cor(
VRCF_data_all$NfC,
ind_level_params_NT_beta$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: -0.008260921
## Confidence interval:
## 2.5% 97.5%
## -0.2131626 0.2011812
#NfC with Learning rate [THREAT]
cor.test(VRCF_data_all$NfC,ind_level_params_T_alpha$mean)##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$NfC and ind_level_params_T_alpha$mean
## t = -0.26773, df = 98, p-value = 0.7895
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2222729 0.1702873
## sample estimates:
## cor
## -0.02703511
cor.test(VRCF_data_all$NfC,ind_level_params_T_alpha$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$NfC and ind_level_params_T_alpha$mean
## S = 161558, p-value = 0.7628
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.03055589
#bootstrapped CIs
ci_cor(
VRCF_data_all$NfC,
ind_level_params_T_alpha$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: -0.02703511
## Confidence interval:
## 2.5% 97.5%
## -0.2263589 0.1625859
#NfC with Learning rate [NONTHREAT]
cor.test(VRCF_data_all$NfC,ind_level_params_NT_alpha$mean)##
## Pearson's product-moment correlation
##
## data: VRCF_data_all$NfC and ind_level_params_NT_alpha$mean
## t = -0.33901, df = 98, p-value = 0.7353
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.2291028 0.1632910
## sample estimates:
## cor
## -0.03422481
cor.test(VRCF_data_all$NfC,ind_level_params_NT_alpha$mean,method = c("spearman"))##
## Spearman's rank correlation rho
##
## data: VRCF_data_all$NfC and ind_level_params_NT_alpha$mean
## S = 157644, p-value = 0.5933
## alternative hypothesis: true rho is not equal to 0
## sample estimates:
## rho
## 0.05403917
#bootstrapped CIs
ci_cor(
VRCF_data_all$NfC,
ind_level_params_NT_alpha$mean,
probs = c(0.025, 0.975),
method = c("pearson"),
type = c("bootstrap"),
boot_type = c("bca"),
R = 9999L,
)##
## Two-sided 95% bootstrap confidence interval for the true Pearson
## correlation coefficient based on 9999 bootstrap replications and the
## bca method
##
## Sample estimate: -0.03422481
## Confidence interval:
## 2.5% 97.5%
## -0.2265076 0.1648839