Analysis for tDCS EXER-NIBS study
Intro
Here, we perform the analysis for the EXER-NIBS project. This include the TETs, EEG (frequencies and Lempel Ziv) and conventionalbehavioral analysis of the flanker task (RT and ACC); and the analaysis of the parameters obtained with the Drift difussion modeling.
Pre Processing
Loading the packages
invisible({capture.output({
# Package to correlation and the Mixed Model
library(lme4)
library(afex)
library(broom.mixed)
library(metan)
library(nlme)
library(rmcorr)
library(stats)
library(purrr)
# Package to perform the Anova tablet contrast the results
library(car)
library(emmeans)
library(easystats)
library(dunn.test)
library(coin)
#Package for clustering
library(cluster)
library(factoextra)
# Load the epitools package for odds
library(epitools)
#Package for load online data
library(osfr) #To access to OSF
library("httr") #To load url links
#For the figures and tables
library(Rmisc)
library(PupillometryR)
library(smplot2)
library(ggeasy)
library(pheatmap)
library(reshape2)
library(kableExtra)
library(tidyverse)
library(writexl)
require(ggpubr)
library(dplyr)
library(knitr)
library(gt)
library(cowplot)
library(gridExtra)
#Packages for logistics
library(this.path) #To set working directory where the code file is located
#To set working directory where the code is located.
# setwd(this.path::here())
})})Conventional Behavioral analysis
Loading the data from OSF
# Loading directly all data available in OSF
invisible({capture.output({
# Flanker task
url <- 'https://osf.io/ayk5r//?action=download'
filename <- 'MergedBehavioralData_Raw_anonymized.-.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
flankerData <- readxl::read_xlsx(filename, sheet= "MergedBehavioralData")
})})Linear mixed models behavioral data
Reaction times
Here, we will perform the LMM for the the RTs of the correct responses and the >150ms.
There are fixed effect of group, block and congruence. We also compare the performance of the different models with our without fixed effects / interactions.
blocks <- c("B1", "B2", "B3", "B4", "B5")
blocks <- rep(blocks, each=104)
sequence = c("B1", "B2", "B3", "B4", "B5")
Period=rep(sequence, each = 104)
flankerData$Block <- rep(Period, times=60)
flankerData$Block <- factor(flankerData$Block)
flankerData$Stim <- factor(flankerData$Stim)
flankerData<-flankerData %>% mutate(Stim= recode(Stim, `A` = "Anodal", `B` = "Sham", `C` = "Cathodal"))
flankerData$Subject <- factor(flankerData$Subject)
flankerData <- filter(flankerData, RT > 150) #All stimuli
flankerData_filtered <- filter(flankerData, Error == 1)
# Simplify dataset. Calculate one RT per participant and block
flankerData_averaged <- flankerData_filtered %>%
group_by(Subject, Stim, Block, Comp) %>%
summarize(RT = mean(RT, na.rm = TRUE))
# Full model with all fixed effects and interactions
model_full <- lmer(RT ~ Stim * Block * Comp + (1 | Subject), data = flankerData_averaged)
# Model without interaction terms
model_main_effects <- lmer(RT ~ Stim + Block + (1 | Subject), data = flankerData_averaged)
# Null model (no fixed effects)
model_null <- lmer(RT ~ 1 + (1 | Subject), data = flankerData_averaged)
anova(model_null, model_main_effects, model_full)## Data: flankerData_averaged
## Models:
## model_null: RT ~ 1 + (1 | Subject)
## model_main_effects: RT ~ Stim + Block + (1 | Subject)
## model_full: RT ~ Stim * Block * Comp + (1 | Subject)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## model_null 3 6537.2 6550.3 -3265.6 6531.2
## model_main_effects 9 6541.8 6581.4 -3261.9 6523.8 7.3704 6 0.2879
## model_full 32 6377.1 6517.8 -3156.6 6313.1 210.6793 23 <2e-16
##
## model_null
## model_main_effects
## model_full ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# anova(model_full)
anova_results <- anova(model_full)
anova_table <- as.data.frame(anova_results)
print(anova_table)## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## Stim 4475.909 2237.9547 2 57 1.4743443 2.375266e-01
## Block 9291.121 2322.7802 4 513 1.5302265 1.920554e-01
## Comp 358738.854 358738.8542 1 513 236.3339093 3.857451e-44
## Stim:Block 5114.929 639.3662 8 513 0.4212086 9.084306e-01
## Stim:Comp 1161.254 580.6271 2 513 0.3825119 6.823402e-01
## Block:Comp 4030.111 1007.5277 4 513 0.6637501 6.174094e-01
## Stim:Block:Comp 2548.246 318.5308 8 513 0.2098452 9.891767e-01
# eta_squared(model_full)
Comp <- flankerData_averaged %>%
group_by(Comp) %>%
summarize(RT = mean(RT, na.rm = TRUE))
ggplot(flankerData_averaged, aes(x = Block, y = RT, fill= Comp)) +
geom_boxplot() +
labs(title = "Distribution of RT across blocks and stimuli type",
x = "Block",
y = "Reaction times (ms)") +
theme_minimal()Accuracy
Similar to the RTs, but for the accuracy.
#For ACC
# Calculate the % of correct answer for each block
ACC <- flankerData %>%
group_by(Subject, Block, Stim, Comp) %>%
summarize(ACC = mean(Error == 1) * 100)
# Full model with all fixed effects and interactions
model_full <- lmer(ACC ~ Stim * Block *Comp + (1 | Subject), data = ACC)
# Model without interaction terms
model_main_effects <- lmer(ACC ~ Stim + Block + Comp + (1 | Subject), data = ACC)
# Null model (no fixed effects)
model_null <- lmer(ACC ~ 1 + (1 | Subject), data = ACC)
anova(model_null, model_main_effects, model_full)## Data: ACC
## Models:
## model_null: ACC ~ 1 + (1 | Subject)
## model_main_effects: ACC ~ Stim + Block + Comp + (1 | Subject)
## model_full: ACC ~ Stim * Block * Comp + (1 | Subject)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## model_null 3 2899.0 2912.2 -1446.5 2893.0
## model_main_effects 10 2781.3 2825.3 -1380.7 2761.3 131.739 7 <2e-16
## model_full 32 2806.8 2947.6 -1371.4 2742.8 18.449 22 0.6791
##
## model_null
## model_main_effects ***
## model_full
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# eta_squared(model_full)
anova_results <- anova(model_full)
anova_table <- as.data.frame(anova_results)
print(anova_table)## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## Stim 9.757144 4.878572 2 57 0.9597523 3.890894e-01
## Block 39.420504 9.855126 4 513 1.9387804 1.027139e-01
## Comp 693.449455 693.449455 1 513 136.4210031 4.116018e-28
## Stim:Block 26.498945 3.312368 8 513 0.6516359 7.341472e-01
## Stim:Comp 24.761072 12.380536 2 513 2.4355995 8.855703e-02
## Block:Comp 20.210826 5.052706 4 513 0.9940094 4.102971e-01
## Stim:Block:Comp 19.159387 2.394923 8 513 0.4711488 8.766345e-01
Comp_ACC <- ACC %>%
group_by(Comp) %>%
summarize(ACC = mean(ACC, na.rm = TRUE))
ggplot(ACC, aes(x = Block, y = ACC, fill= Comp)) +
geom_boxplot() +
labs(title = "Distribution of correct respons across blocks and stimuli type",
x = "Block",
y = "ACC (%)") +
theme_minimal()Drift diffusion Modeling parameters
Statistics from the parameters obtained from the DDM for conflict resolution task:
invisible({capture.output({
#Load data
url <- 'https://osf.io/3gjyp//?action=download'
filename <- 'DDM_Online_Mixed.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
DDM <- readxl::read_xlsx(filename, sheet= "Sheet1")
})})
DDM <- DDM %>%
mutate(cond= recode (cond, A = "Anodal", B = "Sham", C = "Cathodal"))
DDM$cond <- as.factor(DDM$cond)
DDM$subject <- as.factor(DDM$subject)
DDM <- DDM[, c(1:7)]
DDM_Long <-pivot_longer(DDM, cols = 3:7, names_to = "Parameter", values_to = "Value")
# Amp: amplitude of automatic processes;
# tau: time to peak of automatic processes;
# drc: drift rate of the controlled processes;
# bands: boundary separation;
# resMean: residual or non-decision time mean;
# cost: RMSE or cost value.
report(aov(amp ~ cond, data=DDM))## The ANOVA (formula: amp ~ cond) suggests that:
##
## - The main effect of cond is statistically not significant and small (F(2, 56)
## = 0.94, p = 0.398; Eta2 = 0.03, 95% CI [0.00, 1.00])
##
## Effect sizes were labelled following Field's (2013) recommendations.
report(aov(tau ~ cond, data=DDM))## The ANOVA (formula: tau ~ cond) suggests that:
##
## - The main effect of cond is statistically not significant and small (F(2, 56)
## = 0.72, p = 0.492; Eta2 = 0.03, 95% CI [0.00, 1.00])
##
## Effect sizes were labelled following Field's (2013) recommendations.
report(aov(drc ~ cond, data=DDM))## The ANOVA (formula: drc ~ cond) suggests that:
##
## - The main effect of cond is statistically not significant and medium (F(2, 56)
## = 1.85, p = 0.167; Eta2 = 0.06, 95% CI [0.00, 1.00])
##
## Effect sizes were labelled following Field's (2013) recommendations.
report(aov(resMean ~ cond, data=DDM))## The ANOVA (formula: resMean ~ cond) suggests that:
##
## - The main effect of cond is statistically not significant and very small (F(2,
## 56) = 0.28, p = 0.758; Eta2 = 9.82e-03, 95% CI [0.00, 1.00])
##
## Effect sizes were labelled following Field's (2013) recommendations.
report(aov(bnds ~ cond, data=DDM))## The ANOVA (formula: bnds ~ cond) suggests that:
##
## - The main effect of cond is statistically not significant and medium (F(2, 56)
## = 2.47, p = 0.093; Eta2 = 0.08, 95% CI [0.00, 1.00])
##
## Effect sizes were labelled following Field's (2013) recommendations.
Visualization of each parameter for each group:
size <- theme(
title = element_text(size = 14),
axis.title.y = element_text(size = 10))
# Check the structure of the dataframe
# str(DDM_Long)
# Define custom y-axis limits and labels for each parameter
y_axis_settings <- list(
amp = list(limits = c(0, 40), label = "amplitude to peak of automatic processes", title = "Amplitude"),
tau = list(limits = c(0, 100), label = "time to peak of automatic processes", title = "Time to peak"),
drc = list(limits = c(0, 1.2), label = "drift rate of the controlled processes", title = "Drift rate "),
bnds = list(limits = c(0, 120), label = "boundary separation", title = "Boundary"),
resMean = list(limits = c(200, 500), label = "non-decision time mean", title = "Non-decision time "),
cost = list(limits = c(0, 15), label = "Cost", title = "Cost")
)
# Create a function to generate plots for each parameter with custom y-axis settings
create_plot <- function(parameter_name) {
# Filter data for the given parameter
parameter_data <- DDM_Long %>% filter(Parameter == parameter_name)
# Get custom y-axis settings
y_limits <- y_axis_settings[[parameter_name]]$limits
y_label <- y_axis_settings[[parameter_name]]$label
title <- y_axis_settings[[parameter_name]]$title
# Create the plot
p <- ggplot(parameter_data, aes(x = cond, y = Value, fill = cond)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
labs(title = title, y = y_label) +
theme_minimal() +
theme(legend.position = "none",
axis.title.x=element_blank()) +
ylim(y_limits)+
size
return(p)
}
# Get unique parameters
unique_parameters <- unique(DDM_Long$Parameter)
# Create a list to store plots
plots <- list()
# Generate plots for each parameter
for (parameter in unique_parameters) {
plots[[parameter]] <- create_plot(parameter)
}
# # Display the plots
# for (parameter in unique_parameters) {
# print(plots[[parameter]])
# }
# legend <- get_legend(ggplot(DDM_Long %>% filter(Parameter == unique_parameters[1]), aes(x = cond, y = Value, fill = cond)) +
# geom_boxplot() +
# theme(legend.position = "bottom"))
# Combine all the plots into a single plot with a common legend
combined_plot <- plot_grid(plotlist = plots, ncol = 2)
# Add the legend at the bottom
final_plot <- plot_grid(combined_plot, legend, ncol = 1, rel_heights = c(1, 0.1))
# Print the final combined plot
print(final_plot)EEG Analysis
Frequencies
We first conducted the classic frequency analysis. ICA was conducted and component were rejected. Files were then segmented in Pre (resting and task separately) and post (task and resting separately). Then we extracted the different frequencies bands.
# load data
invisible({capture.output({
# Read data from OSF.
url <- 'https://osf.io/xgnrq//?action=download'
filename <- 'Delta_all.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
delta <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/akxet//?action=download'
filename <- 'alpha_all.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
alpha <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/8pae4//?action=download'
filename <- 'beta_all.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
beta <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/keqc6//?action=download'
filename <- 'theta_all.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
theta <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/wj9f4//?action=download'
filename <- 'gamma_all.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
gamma <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/dyz8j//?action=download'
filename <- 'Delta_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
delta_task <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/g46v3//?action=download'
filename <- 'Alpha_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
alpha_task <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/2njpf//?action=download'
filename <- 'Beta_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
beta_task <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/rt23u//?action=download'
filename <- 'Theta_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
theta_task <- readxl::read_xlsx(filename, sheet= "Feuil1")
url <- 'https://osf.io/mz3gn//?action=download'
filename <- 'gamma_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
gamma_task <- readxl::read_xlsx(filename, sheet= "Feuil1")
})})We had 8 channels, so we calculated the average power across all channels for obtaining the global power in each frequency range.
# Calculate mean pre (column 3-10) and post (columns 11-18)
# for resting pre
delta$pre <- rowMeans(delta[, 3:10])
alpha$pre <- rowMeans(alpha[, 3:10])
beta$pre <- rowMeans(beta[, 3:10])
theta$pre <- rowMeans(theta[, 3:10])
gamma$pre <- rowMeans(gamma[, 3:10])
# for task pre
delta_task$pre <- rowMeans(delta_task[, 3:10])
alpha_task$pre <- rowMeans(alpha_task[, 3:10])
beta_task$pre <- rowMeans(beta_task[, 3:10])
theta_task$pre <- rowMeans(theta_task[, 3:10])
gamma_task$pre <- rowMeans(gamma_task[, 3:10])
# Resting post
delta$post <- rowMeans(delta[, 11:18])
alpha$post <- rowMeans(alpha[, 11:18])
beta$post <- rowMeans(beta[, 11:18])
theta$post <- rowMeans(theta[, 11:18])
gamma$post <- rowMeans(gamma[, 11:18])
# for task post
delta_task$post <- rowMeans(delta_task[, 11:18])
alpha_task$post <- rowMeans(alpha_task[, 11:18])
beta_task$post <- rowMeans(beta_task[, 11:18])
theta_task$post <- rowMeans(theta_task[, 11:18])
gamma_task$post <- rowMeans(gamma_task[, 11:18])We have tested several formulas to calculate the ratio of change pre-post, and finally the most clear and straigh forward formula is post-test rating minus pre-test divided by post-test rating plus pre-test.
This formula can be interpreted as a way to normalize the change between the post and baseline values relative to their combined magnitude. This normalization method balances the difference with the total magnitude of the measures, providing a proportionate understanding of the change.
Range: The output will always be between -1 and 1. If the post value is much larger than the baseline, the result approaches 1. If the post value is much smaller than the baseline, the result approaches -1. If the post value is equal to the baseline, the result is 0.
# Calculate post/(pre+post) for each participant
# Resting
delta <- delta %>%
mutate(post_ratio = post / (pre + post))
delta <- delta %>%
mutate(normalized = (post- pre) / (pre + post))
alpha <- alpha %>%
mutate(post_ratio = post / (pre + post))
alpha <- alpha %>%
mutate(normalized = (post- pre) / (pre + post))
beta <- beta %>%
mutate(post_ratio = post / (pre + post))
beta <- beta %>%
mutate(normalized = (post- pre) / (pre + post))
theta <- theta %>%
mutate(post_ratio = post / (pre + post))
theta <- theta %>%
mutate(normalized = (post- pre) / (pre + post))
gamma <- gamma %>%
mutate(post_ratio = post / (pre + post))
gamma <- gamma %>%
mutate(normalized = (post- pre) / (pre + post))
# For task
delta_task <- delta_task %>%
mutate(post_ratio = post / (pre + post))
delta_task <- delta_task %>%
mutate(normalized = (post- pre) / (pre + post))
alpha_task <- alpha_task %>%
mutate(post_ratio = post / (pre + post))
alpha_task <- alpha_task %>%
mutate(normalized = (post- pre) / (pre + post))
beta_task <- beta_task %>%
mutate(post_ratio = post / (pre + post))
beta_task <- beta_task %>%
mutate(normalized = (post- pre) / (pre + post))
theta_task <- theta_task %>%
mutate(post_ratio = post / (pre + post))
theta_task <- theta_task %>%
mutate(normalized = (post- pre) / (pre + post))
gamma_task <- gamma_task %>%
mutate(post_ratio = post / (pre + post))
gamma_task <- gamma_task %>%
mutate(normalized = (post- pre) / (pre + post))
# Calculate (post - pre) / pre*100) for each participant
delta <- delta %>%
mutate(diff = (post - pre) / pre*100)
alpha <- alpha %>%
mutate(diff = (post - pre) / pre*100)
beta <- beta %>%
mutate(diff = (post - pre) / pre*100)
theta <- theta %>%
mutate(diff = (post - pre) / pre*100)
# For the task
delta_task <- delta_task %>%
mutate(diff = (post - pre) / pre*100)
alpha_task <- alpha_task %>%
mutate(diff = (post - pre) / pre*100)
beta_task <- beta_task %>%
mutate(diff = (post - pre) / pre*100)
theta_task <- theta_task %>%
mutate(diff = (post - pre) / pre*100)
gamma_task <- gamma_task %>%
mutate(diff = (post - pre) / pre*100)Frequencies resting
We visualize the data before performing any statistics. The power for the different frequency bands is (somehow i missed gamma here):
This is the visualization with the post/(pre+post):
# Create a raincloud plot
# Resting
delta_plot <- ggplot(delta, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = " Normalized score (AU)", title = "Delta") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
theta_plot <- ggplot(theta, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = "Normalized score (AU)", title = "Theta") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
alpha_plot <- ggplot(alpha, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = " Normalized score (AU)", title = "Alpha") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
beta_plot <- ggplot(beta, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(y = " Normalized score (AU)", title = "Beta") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
gamma_plot <- ggplot(gamma, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(y = " Normalized score (AU)", title = "Gamma") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p1 <- ggarrange(delta_plot, theta_plot, alpha_plot, beta_plot, gamma_plot,
# labels = c("delta", "theta", "alpha", "beta", "gamma"),
ncol = 3, nrow = 2,
# common.legend = T,
# legend = "right",
align = "h")check outliers with this formula
check_outliers(
delta$normalized,
method = c("ci", "zscore_robust", "iqr"),
threshold = NULL, #Default threshold like this. If you want to specify, use: list('zscore' = 2, 'iqr' = 1.97))
ID = NULL,
verbose = TRUE
)## 6 outliers detected: cases 4, 12, 36, 40, 47, 49.
## - Based on the following methods and thresholds: ci (3.09),
## zscore_robust (1.7), iqr (1).
## - For variable: delta$normalized.
##
## Note: Outliers were classified as such by at least half of the selected methods.
There are some potential outliers, that we should maybe keep in mind.
Frequencies resting stats
The design is 3 conditions and 1 measure. We performed an one way ANOVA (or the non parametric version). We will check if the anova model if appropriate or not in one of the frequency range before. These analysis were performed with the post /(post+pre) formula
# Perform one-way ANOVA
# Resting
anova_delta <- aov(normalized ~ Condition, data = delta)
summary(anova_delta)## Df Sum Sq Mean Sq F value Pr(>F)
## Condition 2 0.423 0.2117 1.697 0.193
## Residuals 55 6.862 0.1248
plot(check_model(anova_delta, panel = FALSE))## $PP_CHECK
##
## $NCV
##
## $HOMOGENEITY
##
## $OUTLIERS
##
## $QQ
# anova_alpha <- aov(post_ratio ~ Condition, data = alpha)
# summary(anova_alpha)
# # plot(check_model(anova_alpha, panel = TRUE))
#
# anova_beta <- aov(post_ratio ~ Condition, data = beta)
# summary(anova_beta)
# # plot(check_model(anova_beta, panel = TRUE))
#
#
# anova_theta <- aov(post_ratio ~ Condition, data = theta)
# summary(anova_theta)
# # plot(check_model(anova_theta, panel = TRUE))Given that the data does fulfill the ANOVA assumptions, we perform the non parametric version with two formulas:
The results are:
For delta
model_delta_resting <- oneway.test(normalized ~ Condition, data = delta, var.equal = FALSE)
model_delta_resting##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 1.9602, num df = 2.000, denom df = 35.881, p-value = 0.1556
model_delta_resting_krustal <- kruskal.test(normalized ~ Condition, data = delta)
model_delta_resting_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 5.1896, df = 2, p-value = 0.07466
For theta
model_theta_resting <- oneway.test(normalized ~ Condition, data = theta, var.equal = FALSE)
model_theta_resting##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 1.8, num df = 2.000, denom df = 36.293, p-value = 0.1798
model_theta_resting_krustal <- kruskal.test(normalized ~ Condition, data = theta)
model_theta_resting_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 2.9761, df = 2, p-value = 0.2258
For alpha
model_alpha_resting <- oneway.test(normalized ~ Condition, data = alpha, var.equal = FALSE)
model_alpha_resting##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 1.1867, num df = 2.00, denom df = 36.23, p-value = 0.3168
model_alpha_resting_krustal <- kruskal.test(normalized ~ Condition, data = alpha)
model_alpha_resting_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 0.82749, df = 2, p-value = 0.6612
For beta
model_beta_resting <- oneway.test(normalized ~ Condition, data = beta, var.equal = FALSE)
model_beta_resting##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 0.55261, num df = 2.000, denom df = 34.772, p-value = 0.5804
model_beta_resting_krustal <- kruskal.test(normalized ~ Condition, data = beta)
model_beta_resting_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 0.13396, df = 2, p-value = 0.9352
For gamma
model_gamma_resting <- oneway.test(normalized ~ Condition, data = gamma, var.equal = FALSE)
model_gamma_resting##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 0.36808, num df = 2.000, denom df = 34.321, p-value = 0.6948
model_gamma_resting_krustal <- kruskal.test(normalized ~ Condition, data = gamma)
model_gamma_resting_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 1.9401, df = 2, p-value = 0.3791
Frequencies task
# Task
delta_plot_task <- ggplot(delta_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = "Normalized score (AU)", title = "Delta") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
theta_plot_task <- ggplot(theta_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = "Normalized score (AU)", title = "Theta") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
alpha_plot_task <- ggplot(alpha_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = "Normalized score (AU)", title = "Alpha") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
beta_plot_task <- ggplot(beta_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = "postNormalized score (AU)", title = "Beta") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
gamma_plot_task <- ggplot(gamma_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
labs(x = "Condition", y = "Normalized score (AU)", title = "Gamma") +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p2 <- ggarrange(delta_plot_task,theta_plot_task, alpha_plot_task, beta_plot_task, gamma_plot_task,
# labels = c("delta", "theta", "alpha", "beta", "gamma"),
ncol = 3, nrow = 2,
common.legend = T,
legend = "right",
align = "h")
# Add labels to the combined plots
labeled_combined_plot1 <- plot_grid(p1, labels = "Resting", label_size = 20, ncol = 1)
labeled_combined_plot2 <- plot_grid(p2, labels = "Task", label_size = 20, ncol = 1)
final_combined_plot <- plot_grid(labeled_combined_plot1, labeled_combined_plot2, ncol = 1)
# grid.arrange(p1, p2, ncol = 1)Frequencies task stats
And the analysis, we will perform the non parametric version:
For delta
model_delta_task <- oneway.test(normalized ~ Condition, data = delta_task, var.equal = FALSE)
model_delta_task##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 0.37647, num df = 2.000, denom df = 33.607, p-value = 0.6891
model_delta_task_krustal <- kruskal.test(normalized ~ Condition, data = delta_task)
model_delta_task_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 1.3337, df = 2, p-value = 0.5133
For theta
model_theta_task <- oneway.test(normalized ~ Condition, data = theta_task, var.equal = FALSE)
model_theta_task##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 0.4551, num df = 2.000, denom df = 33.582, p-value = 0.6382
model_theta_task_krustal <- kruskal.test(normalized ~ Condition, data = theta_task)
model_theta_task_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 1.1407, df = 2, p-value = 0.5653
For alpha
model_alpha_task <- oneway.test(normalized ~ Condition, data = alpha_task, var.equal = FALSE)
model_alpha_task##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 1.4046, num df = 2.000, denom df = 34.873, p-value = 0.259
model_alpha_task_krustal <- kruskal.test(normalized ~ Condition, data = alpha_task)
model_alpha_task_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 4.9837, df = 2, p-value = 0.08276
For beta:
model_beta_task <- oneway.test(normalized ~ Condition, data = beta_task, var.equal = FALSE)
model_beta_task##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 1.9093, num df = 2.000, denom df = 34.801, p-value = 0.1634
model_beta_task_krustal <- kruskal.test(normalized ~ Condition, data = beta_task)
model_beta_task_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 3.7515, df = 2, p-value = 0.1532
For gamma
model_gamma_task <- oneway.test(normalized ~ Condition, data = gamma_task, var.equal = FALSE)
model_gamma_task##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 2.1266, num df = 2.00, denom df = 36.39, p-value = 0.1338
model_gamma_task_krustal <- kruskal.test(normalized ~ Condition, data = gamma_task)
model_gamma_task_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 3.1985, df = 2, p-value = 0.202
Lempel Ziv Complexity
Here, we calculated the indices considering the change from pre-post with the same formula as described above.
invisible({capture.output({
# Read data from OSF.
url <- 'https://osf.io/4a7jx//?action=download'
filename <- 'lzc_resting.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
lzc_resting <- readxl::read_xlsx(filename, sheet= "Sheet1")
url <- 'https://osf.io/43kf7//?action=download'
filename <- 'lzsum_resting.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
lzsum_resting <- readxl::read_xlsx(filename, sheet= "Sheet1")
url <- 'https://osf.io/wfx6d//?action=download'
filename <- 'lzc_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
lzc_task <- readxl::read_xlsx(filename, sheet= "Sheet1")
url <- 'https://osf.io/czqtn//?action=download'
filename <- 'lzsum_task.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
lzsum_task <- readxl::read_xlsx(filename, sheet= "Sheet1")
})})LZ resting
lzc_resting[, 3:300] <- lapply(lzc_resting[, 3:300], as.numeric) #transform the data point in numeric
lzc_resting$Condition <- factor(lzc_resting$Condition)
lzc_resting$pre <- rowMeans(lzc_resting[, 3:151], na.rm = TRUE)
lzc_resting$post <- rowMeans(lzc_resting[, 152:300],na.rm = TRUE)
lzc_resting <- lzc_resting[,c(1:2, 301:302)] # Simplify the data set for clarity
lzsum_resting[, 3:300] <- lapply(lzsum_resting[, 3:300], as.numeric)
lzsum_resting$Condition <- factor(lzsum_resting$Condition)
lzsum_resting$pre <- rowMeans(lzsum_resting[, 3:151], na.rm = TRUE)
lzsum_resting$post <- rowMeans(lzsum_resting[, 152:300],na.rm = TRUE)
lzsum_resting <- lzsum_resting[,c(1:2, 301:302)] # Simplify the data set for clarity
# Calculate ratios
lzc_resting <- lzc_resting %>%
mutate(post_ratio = post / (pre + post))
lzc_resting <- lzc_resting %>%
mutate(diff = (post - pre) / pre*100)
lzc_resting <- lzc_resting %>%
mutate(normalized = (post - pre) / (post + pre))
lzsum_resting <- lzsum_resting %>%
mutate(post_ratio = post / (pre + post))
lzsum_resting <- lzsum_resting %>%
mutate(diff = (post - pre) / pre*100)
lzsum_resting <- lzsum_resting %>%
mutate(normalized = (post - pre) / (post + pre))Let’s first visualize the difference in complexity in the 3 conditions:
lzc_plot_resting <- ggplot(lzc_resting, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
scale_y_continuous(limits = c(-0.2, 0.4))+
labs(x = FALSE, y = "Normalized score (AU)") +
theme(legend.position = "none")+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
# lzc_plot_resting
lzsum_plot_resting <- ggplot(lzsum_resting, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(y = "Normalized score (AU)") +
theme(legend.position = "none")+
easy_remove_x_axis(what = "text")+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
# lzsum_plot_resting
ggarrange(lzc_plot_resting, lzsum_plot_resting,
labels = c("Global LZc", "Global LZ sum"),
ncol = 2, nrow = 1,
common.legend = T,
legend = "right",
align = "h")
*Global is because is the calculated from all channels.
LZ stats resting
Now the statistical tests. Given than there are some influential observation, a one way ANOVA is not suitable for this data set (even if we can perform it). We can check here in the normality of the residuals.
anova_lzc_resting <- aov(normalized ~ Condition, data = lzc_resting)
summary(anova_lzc_resting)## Df Sum Sq Mean Sq F value Pr(>F)
## Condition 2 0.02469 0.012345 3.757 0.0296 *
## Residuals 55 0.18073 0.003286
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
diagnostic_plots <- plot(check_model(anova_lzc_resting, panel = FALSE))
performance(anova_lzc_resting)## # Indices of model performance
##
## AIC | BIC | R2 | R2 (adj.) | RMSE | Sigma
## -------------------------------------------------------
## -162.133 | -153.891 | 0.120 | 0.088 | 0.056 | 0.057
diagnostic_plots[[1]]diagnostic_plots[[2]]diagnostic_plots[[3]]diagnostic_plots[[4]]diagnostic_plots[[5]]We could check for potential outliers with different methods for both indices:
check_outliers(
lzc_resting$normalized,
method = c("ci", "zscore", "iqr"),
threshold = NULL, #Default threshold like this. If you want to specify, use: list('zscore' = 2, 'iqr' = 1.97))
ID = NULL,
verbose = TRUE
)## 2 outliers detected: cases 9, 31.
## - Based on the following methods and thresholds: ci (3.09), zscore
## (1.7), iqr (1).
## - For variable: lzc_resting$normalized.
##
## Note: Outliers were classified as such by at least half of the selected methods.
check_outliers(
lzsum_resting$normalized,
method = c("ci", "zscore", "iqr"),
threshold = NULL, #Default threshold like this. If you want to specify, use: list('zscore' = 2, 'iqr' = 1.97))
ID = NULL,
verbose = TRUE
)## 1 outlier detected: case 9.
## - Based on the following methods and thresholds: ci (3.09), zscore
## (1.7), iqr (1).
## - For variable: lzsum_resting$normalized.
##
## Note: Outliers were classified as such by at least half of the selected methods.
We see the observation 9 can potentially influence the results. We keep this in mind for the analysis.
We should then perform the non parametric version
For the LZc the results show:
model_lzc_resting <- oneway.test(normalized ~ Condition, data = lzc_resting, var.equal = FALSE)
model_lzc_resting##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 3.2355, num df = 2.000, denom df = 31.517, p-value = 0.05272
model_lzc_resting_krustal <- kruskal.test(normalized ~ Condition, data = lzc_resting)
model_lzc_resting_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 7.958, df = 2, p-value = 0.0187
#Permutation tests
# permutation_test_resting_lzc <- oneway_test(diff ~ Condition, data = lzc_resting, distribution = approximate(nresample = 1000))
# permutation_testThe posthoc analysis reveals that:
pairwise.t.test(lzc_resting$normalized, lzc_resting$Condition, p.adjust.method = "holm")##
## Pairwise comparisons using t tests with pooled SD
##
## data: lzc_resting$normalized and lzc_resting$Condition
##
## Anodal Cathodal
## Cathodal 0.670 -
## Sham 0.043 0.071
##
## P value adjustment method: holm
dunn.test(lzc_resting$post_ratio, lzc_resting$Condition, method = "sidak")## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 7.958, df = 2, p-value = 0.02
##
##
## Comparison of x by group
## (Šidák)
## Col Mean-|
## Row Mean | Anodal Cathodal
## ---------+----------------------
## Cathodal | -0.871847
## | 0.4718
## |
## Sham | -2.740093 -1.919439
## | 0.0092* 0.0802
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
The posthoc comparison for the kruskal test, reveals a significant difference between the anodal and sham condition.
However, as we saw before, there is potentially an outlier in the dataset. There is the observation 9 (1010 Sham) with a change of 88% from pre-post. It is controversial, because it could be a real difference or a EEG signal problem. In any case, we check how the results change after removing this observation. I think it is worthy to study this positibility.
lzc_resting_filtered <- filter(lzc_resting, Participant != 1010)
model_lzc_resting_filtered <- oneway.test(normalized ~ Condition, data = lzc_resting_filtered, var.equal = FALSE)
model_lzc_resting_filtered##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 3.6922, num df = 2.000, denom df = 32.847, p-value = 0.03578
model_lzc_resting_filtered_krustal <- kruskal.test(normalized ~ Condition, data = lzc_resting_filtered)
model_lzc_resting_filtered_krustal##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 6.8117, df = 2, p-value = 0.03318
After removing the outlier, the results do not change, and even the One-way analysis of means (not assuming equal variances) is also significant.
For the LZsum, the data do not either fulfill the criteria for an ANOVA, thus we will perform the same test as before:
oneway.test(normalized ~ Condition, data = lzsum_resting, var.equal = FALSE)##
## One-way analysis of means (not assuming equal variances)
##
## data: normalized and Condition
## F = 11.255, num df = 2.000, denom df = 36.123, p-value = 0.0001588
kruskal.test(normalized ~ Condition, data = lzsum_resting)##
## Kruskal-Wallis rank sum test
##
## data: normalized by Condition
## Kruskal-Wallis chi-squared = 15.2, df = 2, p-value = 0.0005005
# permutation_test <- oneway_test(post_ratio ~ Condition, data = lzsum_resting, distribution = approximate(nresample = 1000))
# permutation_testFor this indices, the results seem more robust, an all methods revealed a significant difference. The posthoc tests shows:
pairwise.t.test(lzsum_resting$normalized, lzsum_resting$Condition, p.adjust.method = "holm")##
## Pairwise comparisons using t tests with pooled SD
##
## data: lzsum_resting$normalized and lzsum_resting$Condition
##
## Anodal Cathodal
## Cathodal 0.01581 -
## Sham 0.00022 0.12221
##
## P value adjustment method: holm
dunn.test(lzsum_resting$normalized, lzsum_resting$Condition, method = "sidak")## Kruskal-Wallis rank sum test
##
## data: x and group
## Kruskal-Wallis chi-squared = 15.1998, df = 2, p-value = 0
##
##
## Comparison of x by group
## (Šidák)
## Col Mean-|
## Row Mean | Anodal Cathodal
## ---------+----------------------
## Cathodal | -2.628707
## | 0.0128*
## |
## Sham | -3.831675 -1.235931
## | 0.0002* 0.2908
##
## alpha = 0.05
## Reject Ho if p <= alpha/2
With the Pairwise comparisons using t tests with pooled SD, there is a difference between Sham and Anodal, as in the LZc index. However, Kruskal-Wallis chi-squared, there is also a difference between Anodal and Cathodal.
LZ task
lzc_task[, 3:360] <- lapply(lzc_task[, 3:360], as.numeric) #transform the data point in numeric
lzc_task$Condition <- factor(lzc_task$Condition)
lzc_task$pre <- rowMeans(lzc_task[, 3:181], na.rm = TRUE)
lzc_task$post <- rowMeans(lzc_task[, 182:360],na.rm = TRUE)
lzc_task <- lzc_task[,c(1:2, 361:362)] # Simplify the data set for clarity
lzsum_task[, 3:360] <- lapply(lzsum_task[, 3:360], as.numeric)
lzsum_task$Condition <- factor(lzsum_task$Condition)
lzsum_task$pre <- rowMeans(lzsum_task[, 3:181], na.rm = TRUE)
lzsum_task$post <- rowMeans(lzsum_task[, 182:360],na.rm = TRUE)
lzsum_task <- lzsum_task[,c(1:2, 361:362)] # Simplify the data set for clarity
# Calculate ratios
lzc_task <- lzc_task %>%
mutate(post_ratio = post / (pre + post))
lzc_task <- lzc_task %>%
mutate(diff = (post - pre) / pre*100)
lzc_task <- lzc_task %>%
mutate(normalized = (post - pre) / (post+pre))
lzsum_task <- lzsum_task %>%
mutate(post_ratio = post / (pre + post))
lzsum_task <- lzsum_task %>%
mutate(diff = (post - pre) / pre*100)
lzsum_task <- lzsum_task %>%
mutate(normalized = (post - pre) / (post+pre))lzc_plot_task <- ggplot(lzc_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(x = "Condition", y = "Normalized Score (AU)") +
theme(legend.position = "none")+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
# lzc_plot_task
lzsum_plot_task <- ggplot(lzsum_task, aes(x = Condition, y = normalized, fill = Condition)) +
geom_flat_violin(position = position_nudge(x = 0.2), alpha = 0.7) +
geom_point(position = position_jitter(width = 0.2, height = 0), alpha = 0.3, size = 2) +
geom_boxplot(width = 0.1, outlier.shape = NA, alpha = 0.5) +
theme_minimal() +
scale_y_continuous(limits = c(-0.2, 0.3))+
labs(x = "Condition", y = "Normalized Score (AU)") +
theme(legend.position = "none")+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
# lzsum_plot_task
ggarrange(lzc_plot_task, lzsum_plot_task,
labels = c("Global LZc", "Global LZ sum"),
ncol = 2, nrow = 1,
common.legend = T,
legend = "right",
align = "h")LZ stats task
Check for potential outliers in the LZc and LZ sum:
check_outliers(
lzc_task$diff,
method = c("ci", "zscore", "iqr"),
threshold = NULL, #Default threshold like this. If you want to specify, use: list('zscore' = 2, 'iqr' = 1.97))
ID = NULL,
verbose = TRUE
)## 1 outlier detected: case 9.
## - Based on the following methods and thresholds: ci (3.09), zscore
## (1.7), iqr (1).
## - For variable: lzc_task$diff.
##
## Note: Outliers were classified as such by at least half of the selected methods.
check_outliers(
lzsum_task$diff,
method = c("ci", "zscore", "iqr"),
threshold = NULL, #Default threshold like this. If you want to specify, use: list('zscore' = 2, 'iqr' = 1.97))
ID = NULL,
verbose = TRUE
)## 1 outlier detected: case 5.
## - Based on the following methods and thresholds: ci (3.09), zscore
## (1.7), iqr (1).
## - For variable: lzsum_task$diff.
##
## Note: Outliers were classified as such by at least half of the selected methods.
The ANOVA test shows:
#Perform anova
anova_lzc_task <- aov(diff ~ Condition, data = lzc_task)
summary(anova_lzc_task)## Df Sum Sq Mean Sq F value Pr(>F)
## Condition 2 519 259.6 2.443 0.0963 .
## Residuals 55 5845 106.3
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
diagnostic_plots_lzc_resting <- plot(check_model(anova_lzc_task, panel = FALSE))
# performance(anova_lzc_task)
# report(anova_lzc_task)
anova_lzsum_task <- aov(diff ~ Condition, data = lzsum_task)
summary(anova_lzsum_task)## Df Sum Sq Mean Sq F value Pr(>F)
## Condition 2 75.1 37.57 1.115 0.335
## Residuals 55 1853.7 33.70
diagnostic_plots_lzsum_task <- plot(check_model(anova_lzsum_task, panel = FALSE))
# performance(anova_lzsum_task)
# report(anova_lzsum_task)diagnostic_plots_lzc_resting[[5]]diagnostic_plots_lzsum_task[[5]]ANOVA it does not seem to perform well in the LZc indices (1st graph), but it “could” be suitable for the LZsum (2nd graph). For keeping the consistency, we limit the analysis to the non-parametric methods.
For LZc:
oneway.test(post_ratio ~ Condition, data = lzc_task, var.equal = FALSE)##
## One-way analysis of means (not assuming equal variances)
##
## data: post_ratio and Condition
## F = 2.2203, num df = 2.000, denom df = 35.537, p-value = 0.1234
kruskal.test(post_ratio ~ Condition, data = lzc_task)##
## Kruskal-Wallis rank sum test
##
## data: post_ratio by Condition
## Kruskal-Wallis chi-squared = 3.0001, df = 2, p-value = 0.2231
# library(coin)
# permutation_test <- oneway_test(diff ~ Condition, data = lzc_task, distribution = approximate(nresample = 1000))
# permutation_testFor LZ sum:
oneway.test(diff ~ Condition, data = lzsum_task, var.equal = FALSE)##
## One-way analysis of means (not assuming equal variances)
##
## data: diff and Condition
## F = 0.86919, num df = 2.000, denom df = 34.327, p-value = 0.4283
kruskal.test(diff ~ Condition, data = lzsum_task)##
## Kruskal-Wallis rank sum test
##
## data: diff by Condition
## Kruskal-Wallis chi-squared = 0.52263, df = 2, p-value = 0.77
# permutation_test <- oneway_test(diff ~ Condition, data = lzsum_task, distribution = approximate(nresample = 1000))
# permutation_testIt seems that the results are quite robust for these indexes during the task, and there are not differences between conditions.
All plots together
# all LZ plots together
ggarrange(lzc_plot_resting, lzsum_plot_resting, lzc_plot_task, lzsum_plot_task,
labels = c("LZc resting", "LZ sum resting", "LZc task", "LZ sum task"),
ncol = 2, nrow = 2,
heights = c(0.2, 0.2, 0.2, 0.2),
common.legend = T,
legend = "right",
align = "h")TET
First step is to organize the data from the matrix obtained in Matlab after processing all the images. I did not have the variables “epoch” and “condition, thus I add this in this step. The conditions were ordered in series for the participants as Sham (P1)-Cathodal (P2)-Anodal (P3) - Sham (P4), and so on. Additionally,the dimensions boredom and mind-wandering are reversed, as higher values represent less boredom and less un-related toughts with the task. So, I invert the values to match all dimensions.
set.seed(123)
#Read data
invisible({capture.output({
# Read data from OSF.
url <- 'https://osf.io/7fkbr//?action=download'
filename <- 'combined_data_TET.csv'
GET(url, write_disk(filename, overwrite = TRUE))
data <- read.csv(filename)
})})
# Generate the epochs from 1 to 60 numbers for each of the dimensions
sequence= rep(1:60, each = 8)
#Generate the conditions to the data set
tdcs = c("Sham", "Cathodal", "Anodal")
conditions=rep(tdcs, each = 480) #There are 480 points for each participants 8 (dimensions) x 60 epochs each.
# Reverse Boredom and Mind-wandering
data$Value[data$Dimension %in% c("Boredom", "Mindwandering")] <- 1 - data$Value[data$Dimension %in% c("Boredom", "Mindwandering")]
#Add the epochs and conditions to the data sets and convert into factors
data$Epoch <- sequence
data$Condition <- conditions
data$Condition <- factor(data$Condition)
data$Epoch <- factor(data$Epoch)
data$Dimension <- factor(data$Dimension)
data$Participant <- factor(data$Participant)Raw Visualization
First, we are interesting in visualizing all the dimensions together average across participants to check if the follow a similar patter across time.
size <- theme(
title = element_text(size = 18),
axis.title.y = element_text(size = 16))
aggregate_all <- data %>%
group_by(Epoch, Dimension) %>%
summarise_at(vars(Value),
list(average = mean))
aggregate_all <- filter(aggregate_all, Dimension != c("Disconfort", "Stimulation"))
ggplot(aggregate_all, aes(x = Epoch, y = average, color = Dimension)) +
geom_line() +
geom_point() +
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
sm_classic(legends = TRUE) +
easy_remove_x_axis(what = "text")+
labs(title = "Dimensions across epoch",
x = "Epoch",
y = "Value")+
sizeIt is also interesting to represent each dimension across time and group. This next plot represent the average of 20 participants in each group of stimulation.
Effort <-filter(data, Dimension =="Effort")
Effort <- Effort %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Value),
list(Value = mean))
Efforrtplot<-ggplot(Effort, aes(x = Epoch, y = Value, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Effort",
x = "Time",
y = "Value") +
theme_minimal()+
easy_remove_x_axis(what = "text")
Alertness <-filter(data, Dimension =="Alertness")
Alertness <- Alertness %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Value),
list(Value = mean))
Alertnessplot<-ggplot(Alertness, aes(x = Epoch, y = Value, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Alertness",
x = "Time",
y = "Value") +
theme_minimal()+
easy_remove_x_axis(what = "text")
Boredom <-filter(data, Dimension =="Boredom")
Boredom <- Boredom %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Value),
list(Value = mean))
Boredomplot<-ggplot(Boredom, aes(x = Epoch, y = Value, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Boredom",
x = "Time",
y = "Value") +
theme_minimal()+
easy_remove_x_axis(what = "text")
Execution <-filter(data, Dimension =="Execution")
Execution <- Execution %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Value),
list(Value = mean))
Executionplot<-ggplot(Execution, aes(x = Epoch, y = Value, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Execution",
x = "Time",
y = "Value") +
theme_minimal()+
easy_remove_x_axis(what = "text")
Fedup <-filter(data, Dimension =="Fedup")
Fedup <- Fedup %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Value),
list(Value = mean))
Fedupplot<-ggplot(Fedup, aes(x = Epoch, y = Value, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Fedup",
x = "Time",
y = "Value") +
theme_minimal()+
easy_remove_x_axis(what = "text")
Mindwandering <-filter(data, Dimension =="Mindwandering")
Mindwandering <- Mindwandering %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Value),
list(Value = mean))
Mindwanderingplot<-ggplot(Mindwandering, aes(x = Epoch, y = Value, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Mind-wandering",
x = "Time",
y = "Value") +
theme_minimal()+
easy_remove_x_axis(what = "text")
Dimensions <- ggarrange(Efforrtplot, Alertnessplot, Fedupplot, Boredomplot, Executionplot, Mindwanderingplot,
ncol =2 ,labels = c("A", "B", "C", "D" , "E", "F"),
nrow=3,
common.legend = T,
legend = "right",
align = "h")
DimensionsK-means cluster
Understanding the k-means cluster processing
A “cluster” refers to a group of data points or observations that are similar to each other and dissimilar to data points in other clusters. Clustering is a process of grouping a set of objects in such a way that objects in the same group (cluster) are more similar to each other than to those in other groups. Similarity in the context of clustering refers to how alike or comparable two data points are to each other.
The metric used for calculating the similarity is the Euclidean Distance. It calculates the straight-line distance between two data points in the feature space. It is defined as the square root of the sum of squared differences between corresponding elements of two vectors.
K-means minimizes the sum of squared distances within each cluster, effectively making it equivalent to minimizing the variance within clusters. This objective function is optimized iteratively by alternating between two steps:
Assignment Step (Expectation Step): Assign each data point to the nearest cluster center (centroid) based on the squared Euclidean distance.
Update Step (Maximization Step): Recalculate the cluster centroids by taking the mean of all data points assigned to each cluster.
data_wide <- data %>%
pivot_wider(
names_from = c(Dimension), # Can accommodate more variables, if needed.
values_from = c(Value)
)
knitr::kable(head(data_wide[, 1:10]), "pipe", caption = "Data organization for the clustering")| Participant | Epoch | Condition | Alertness | Boredom | Disconfort | Effort | Execution | Fedup | Mindwandering |
|---|---|---|---|---|---|---|---|---|---|
| P1 | 1 | Sham | 0.3021386 | 0.7300518 | 0.0657291 | 0.1407205 | 0.2853995 | 0.1128441 | 0.9520842 |
| P1 | 2 | Sham | 0.3031934 | 0.7340121 | 0.0656384 | 0.1457850 | 0.2830950 | 0.0884037 | 0.9451794 |
| P1 | 3 | Sham | 0.3092520 | 0.7259379 | 0.0584025 | 0.1401299 | 0.2784468 | 0.0855226 | 0.9387686 |
| P1 | 4 | Sham | 0.3092213 | 0.7339871 | 0.0608355 | 0.1390654 | 0.2708953 | 0.0849987 | 0.9317946 |
| P1 | 5 | Sham | 0.3128231 | 0.7327219 | 0.0661469 | 0.1422142 | 0.2677700 | 0.0807643 | 0.9239842 |
| P1 | 6 | Sham | 0.3182425 | 0.7411999 | 0.0657054 | 0.1478955 | 0.2653211 | 0.0796578 | 0.9185568 |
Detrmining the optimal nunmber of cluster to be selected
We need to identify the optimal number of cluster that we want to identify. The choice could be driven by Domain Knowledge: Utilize domain knowledge or context-specific information to decide the appropriate number of clusters. Or comparing the output of different techniques.As we only have 6 dimensions, we had the hypothesis there would be 2 cluster in data, but we will compare the with the output of different methods.
The main methods are:
Elbow Method Plot the within-cluster sum of squares (WCSS) or total within-cluster variation against the number of clusters. The “elbow” point, where the rate of decrease in WCSS slows down, can be considered as the optimal number of clusters.
the Gap statistic This plot will typically display the Gap Statistic, the standard error, and the reference null distribution. Look for the number of clusters where the Gap Statistic reaches a maximum or starts to plateau. This point indicates the optimal number of clusters.
Interpret the Results: Examine the plot and identify the “elbow” or the point where the Gap Statistic starts to stabilize. This point suggests the optimal number of clusters for your dataset.
Choose the Number of Clusters: Based on the plot and your judgment, select the number of clusters that best balances model complexity and goodness of fit.
Silhouette Method Calculate the average silhouette width for different numbers of clusters. The number of clusters with the highest average silhouette width is considered optimal.
Caliński-Harabasz Score For The Caliński-Harabasz Score the score is defined as ratio of the sum of between-cluster dispersion and of within-cluster dispersion.Between all the possible solution, it should be chosed the cluster solution with the higher score.
data_cluster<- data_wide[,c(1:5, 7:11)] #Remove the Discomfort and Stimulation variables which are not related
fviz_nbclust(data_cluster[, 4:10], kmeans, method = "wss")+
labs(subtitle="Elbow Method")fviz_nbclust(data_cluster[, 4:10], # data
kmeans, # clustering algorithm
method = "silhouette")+ # silhouette
labs(subtitle="Silhouette Method")# fviz_nbclust(data_wide[, 4:9], # data,
# kmeans ,
# nstart = 25,
# method = "gap_stat")+
# labs(subtitle="Gap Methods")# Load necessary libraries
library(stats)
library(clusterCrit)
# Function to calculate the CH score for a given number of clusters
calculate_ch_score <- function(data, k) {
set.seed(123)
kmeans_result <- kmeans(data, centers = k)
clustering_vector <- kmeans_result$cluster
ch_score <- intCriteria(as.matrix(data), clustering_vector, "Calinski_Harabasz")
return(ch_score$calinski_harabasz)
}
# Range of clusters to evaluate
k_values <- 2:10
# Calculate CH scores for each number of clusters
ch_scores <- sapply(k_values, calculate_ch_score, data = data_cluster[, 4:10])
# Print CH scores for each k
for (i in 1:length(k_values)) {
cat("k =", k_values[i], ": CH Score =", ch_scores[i], "\n")
}## k = 2 : CH Score = 1387.317
## k = 3 : CH Score = 1014.827
## k = 4 : CH Score = 905.3366
## k = 5 : CH Score = 822.4183
## k = 6 : CH Score = 735.928
## k = 7 : CH Score = 717.3059
## k = 8 : CH Score = 694.4478
## k = 9 : CH Score = 656.1889
## k = 10 : CH Score = 644.3053
# Plot the CH scores to visualize the optimal number of clusters
plot(k_values, ch_scores, type = "b", pch = 19, frame = FALSE,
xlab = "Number of Clusters (k)", ylab = "Caliński-Harabasz Score",
main = "Caliński-Harabasz Score vs. Number of Clusters")
# Optional: Highlight the optimal k
optimal_k <- k_values[which.max(ch_scores)]
points(optimal_k, max(ch_scores), col = "red", pch = 19)
text(optimal_k, max(ch_scores), labels = paste("Optimal k =", optimal_k), pos = 4, col = "red")Given the output of different methods and our prior hypothesis, we are calculating the K-means for 2 clusters and adding this information to the data set:
km.out <- kmeans(data_cluster[, 4:10], centers=2,nstart=100)
km.out$centers## Alertness Boredom Effort Execution Fedup Mindwandering Stimulation
## 1 0.5859025 0.3024246 0.2552470 0.8000843 0.2534677 0.3858119 0.2880954
## 2 0.3617499 0.7395494 0.3946377 0.7405903 0.5810178 0.6961024 0.3511051
# Total within-cluster sum of squares
# km.out$tot.withinss
# km.clusters<-km.out$cluster
data_cluster$Cluster <-km.out$cluster
# fviz_cluster(list(data=data_wide[, 4:9], cluster = km.clusters))
#
# cluster_stats <- cluster.stats(data_wide[, 4:9], clustering = km.clusters)
#
# ch_index <- cluster.stats(data_wide[, 4:9], clustering = km.clusters, criterion = "ch")Intensity of each dimension for of each cluster
This is to characterize how each cluster is characterized for each of the subjective dimensions.
#Change Epoch time into 3 different phases of (pre-stim, stimulation and post-stim)
segment_lengths <- c(12, 36, 12)
# Create a vector to specify the period for each observation
period <- rep(1:length(segment_lengths), times = segment_lengths)
data_cluster$Phase <- rep(period, 60)
data_cluster<-data_cluster %>% mutate(Phase= recode(Phase, `1` = "Pre", `2` = "During", `3` = "Post"))
data_cluster$Phase <- factor(data_cluster$Phase, levels=c("Pre", "During", "Post"))
# Transform data to long format with the cluster assignation. We are going to create data frame with Participant, Epoch, Dimension, Cluster, Value(intensity)
Cluster_long<- pivot_longer(data_cluster, cols = c(Alertness, Boredom, Effort, Execution, Fedup, Mindwandering, Stimulation), names_to = "Dimension",
values_to = "Value", values_drop_na = TRUE)
Cluster_long$Cluster <- as.factor(Cluster_long$Cluster)
sumrepdatClust <- summarySE(Cluster_long, measurevar = "Value",
groupvars=c("Dimension", "Cluster"))
ClusterRain <- ggplot(Cluster_long, aes(x = Dimension, y = Value, fill = Cluster)) +
geom_flat_violin(aes(fill = Cluster),position = position_nudge(x = .1, y = 0), adjust = 1.5, trim = FALSE, alpha = .5, colour = NA)+
# geom_point(aes(x = Dimension, y = Value, colour = Cluster),position = position_jitter(width = .05), size = .25, shape = 20)+
geom_boxplot(aes(x = Dimension, y = Value, fill = Cluster),outlier.shape = NA, alpha = .5, width = .3, colour = "black")+
# geom_line(data = sumrepdatClust, aes(x = Dimension, y = Value, group = Cluster, colour = Cluster), linetype = 3)+
geom_point(data = sumrepdatClust, aes(x = Dimension, y = Value, group = Cluster, colour = Cluster), shape = 18, size = 3) +
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
sm_classic() +
labs( y = "Intensity", x = element_blank(),
title ="Intensity of each dimension for each cluster",
)+
theme(legend.position="right")+
scale_x_discrete(guide = guide_axis(n.dodge=3))
ClusterRain# If we want to divide by conditions
# ggplot(Cluster_long, aes(x = Dimension, y = Value, fill = Cluster)) +
# # geom_flat_violin(position = position_nudge(x = .2, y = 0), alpha = 0.8) +
# # geom_point(position = position_jitter(width = .15), size = 1, color = "black", alpha = 0.6) +
# geom_boxplot(width = 0.2, outlier.shape = NA, alpha = 0.5) +
# scale_fill_manual(values = c("#00AFBB", "#E7B800")) + # Choose your colors
# # theme_minimal() +
# labs(x = "Dimension", y = "Value", fill = "Cluster") +
# theme(legend.position = "right") + facet_grid(~Condition)Then, we are going to check is whether there are different in the intensity of the clusters for each dimension. Linear mixed model
data_relative_cluster <- summarySE(Cluster_long, measurevar = "Value",
groupvars=c("Participant", "Dimension", "Cluster"))
data_relative_cluster <- data_relative_cluster[, c(1:3,5)]
# Define the dimensions to loop over
dimensions <- c("Alertness", "Boredom", "Effort", "Execution", "Fedup", "Mindwandering", "Stimulation")
# Initialize a list to store model results
model_results <- list()
for (dim in dimensions) {
# Subset the data for the current dimension
data_subset <- subset(data_relative_cluster, Dimension == dim)
# Fit the linear mixed-effects model
model <- lmer(Value ~ Cluster + (1 | Participant), data = data_subset)
# Extract and tidy the fixed effects summary
model_summary <- tidy(model, effects = "fixed") %>%
mutate(Dimension = dim) %>%
mutate(across(c(estimate, std.error, statistic, df, p.value), ~ round(., 2)))
# Append the results to the list
model_results[[dim]] <- model_summary
}
# Combine all model results into a single data frame
model_results_df <- bind_rows(model_results)
# Combine all model results into a single data frame
model_results_df <- bind_rows(model_results)
# Create a table for model results
kable(model_results_df, format = "html", table.attr = "class='table table-striped'") %>%
kable_styling(full_width = F, position = "left", bootstrap_options = c("striped", "hover", "condensed"))| effect | term | estimate | std.error | statistic | df | p.value | Dimension |
|---|---|---|---|---|---|---|---|
| fixed | (Intercept) | 0.56 | 0.03 | 19.64 | 85.79 | 0.00 | Alertness |
| fixed | Cluster2 | -0.16 | 0.03 | -5.63 | 51.73 | 0.00 | Alertness |
| fixed | (Intercept) | 0.34 | 0.02 | 16.62 | 79.68 | 0.00 | Boredom |
| fixed | Cluster2 | 0.34 | 0.02 | 18.56 | 48.60 | 0.00 | Boredom |
| fixed | (Intercept) | 0.26 | 0.02 | 10.59 | 84.65 | 0.00 | Effort |
| fixed | Cluster2 | 0.13 | 0.02 | 5.42 | 52.52 | 0.00 | Effort |
| fixed | (Intercept) | 0.80 | 0.02 | 43.57 | 80.23 | 0.00 | Execution |
| fixed | Cluster2 | -0.06 | 0.02 | -3.96 | 51.18 | 0.00 | Execution |
| fixed | (Intercept) | 0.25 | 0.02 | 12.04 | 86.95 | 0.00 | Fedup |
| fixed | Cluster2 | 0.32 | 0.02 | 15.46 | 52.38 | 0.00 | Fedup |
| fixed | (Intercept) | 0.40 | 0.03 | 15.75 | 95.87 | 0.00 | Mindwandering |
| fixed | Cluster2 | 0.26 | 0.03 | 8.53 | 51.75 | 0.00 | Mindwandering |
| fixed | (Intercept) | 0.29 | 0.02 | 11.94 | 101.80 | 0.00 | Stimulation |
| fixed | Cluster2 | 0.01 | 0.03 | 0.45 | 50.31 | 0.66 | Stimulation |
# Descriptive
descriptive_stats <- list()
for (dim in dimensions) {
# Subset the data for the current dimension
data_subset <- subset(data_relative_cluster, Dimension == dim)
# Calculate descriptive statistics by Cluster
descriptive_by_cluster <- data_subset %>%
group_by(Cluster) %>%
summarise(
count = n(),
mean = mean(Value, na.rm = TRUE),
sd = sd(Value, na.rm = TRUE),
se = sd / sqrt(count),
ci_lower = mean - qt(1 - (0.05 / 2), count - 1) * se,
ci_upper = mean + qt(1 - (0.05 / 2), count - 1) * se
) %>%
mutate(Dimension = dim)
# Append the results to the list
descriptive_stats[[dim]] <- descriptive_by_cluster
}
# Combine all descriptive statistics into a single data frame
descriptive_stats_df <- bind_rows(descriptive_stats)
kable(descriptive_stats_df, format = "html", table.attr = "class='table table-striped'") %>%
kable_styling(full_width = F, position = "left", bootstrap_options = c("striped", "hover", "condensed"))| Cluster | count | mean | sd | se | ci_lower | ci_upper | Dimension |
|---|---|---|---|---|---|---|---|
| 1 | 56 | 0.5632431 | 0.2000123 | 0.0267278 | 0.5096795 | 0.6168068 | Alertness |
| 2 | 51 | 0.3980468 | 0.2351118 | 0.0329222 | 0.3319205 | 0.4641730 | Alertness |
| 1 | 56 | 0.3329807 | 0.1464980 | 0.0195766 | 0.2937483 | 0.3722131 | Boredom |
| 2 | 51 | 0.6974278 | 0.1648454 | 0.0230830 | 0.6510643 | 0.7437914 | Boredom |
| 1 | 56 | 0.2585246 | 0.1509461 | 0.0201710 | 0.2181009 | 0.2989482 | Effort |
| 2 | 51 | 0.3850851 | 0.2225165 | 0.0311585 | 0.3225014 | 0.4476689 | Effort |
| 1 | 56 | 0.7982706 | 0.1185453 | 0.0158413 | 0.7665239 | 0.8300172 | Execution |
| 2 | 51 | 0.7323936 | 0.1614974 | 0.0226142 | 0.6869718 | 0.7778155 | Execution |
| 1 | 56 | 0.2531505 | 0.1243263 | 0.0166138 | 0.2198558 | 0.2864453 | Fedup |
| 2 | 51 | 0.5709010 | 0.1893869 | 0.0265195 | 0.5176351 | 0.6241669 | Fedup |
| 1 | 56 | 0.3933966 | 0.1870814 | 0.0249998 | 0.3432959 | 0.4434973 | Mindwandering |
| 2 | 51 | 0.6606683 | 0.1949900 | 0.0273041 | 0.6058264 | 0.7155101 | Mindwandering |
| 1 | 56 | 0.2852898 | 0.1731280 | 0.0231352 | 0.2389258 | 0.3316538 | Stimulation |
| 2 | 51 | 0.3002460 | 0.1858421 | 0.0260231 | 0.2479771 | 0.3525149 | Stimulation |
T-test independent samples:
# for t-test
# Initialize a list to store t-test results
t_test_results <- list()
for (dim in dimensions) {
# Subset the data for the current dimension
data_subset <- subset(data_relative_cluster, Dimension == dim)
# Perform independent t-test between clusters
t_test <- t.test(Value ~ Cluster, data = data_subset)
# Create a summary data frame for t-test results
t_test_summary <- data.frame(
Dimension = dim,
t_statistic = t_test$statistic,
p_value = t_test$p.value,
conf_int_lower = t_test$conf.int[1],
conf_int_upper = t_test$conf.int[2],
mean_diff = t_test$estimate[1] - t_test$estimate[2]
)
# Append the results to the list
t_test_results[[dim]] <- t_test_summary
}
# Combine all t-test results into a single data frame
t_test_results_df <- bind_rows(t_test_results)
# Create a table for t-test results
kable(t_test_results_df, format = "html", table.attr = "class='table table-striped'") %>%
kable_styling(full_width = F, position = "left", bootstrap_options = c("striped", "hover", "condensed"))| Dimension | t_statistic | p_value | conf_int_lower | conf_int_upper | mean_diff | |
|---|---|---|---|---|---|---|
| t…1 | Alertness | 3.8956129 | 0.0001784 | 0.0810506 | 0.2493421 | 0.1651964 |
| t…2 | Boredom | -12.0412277 | 0.0000000 | -0.4244915 | -0.3044029 | -0.3644472 |
| t…3 | Effort | -3.4097082 | 0.0009885 | -0.2003381 | -0.0527830 | -0.1265606 |
| t…4 | Execution | 2.3859273 | 0.0191046 | 0.0110331 | 0.1207208 | 0.0658769 |
| t…5 | Fedup | -10.1537913 | 0.0000000 | -0.3799704 | -0.2555305 | -0.3177505 |
| t…6 | Mindwandering | -7.2195986 | 0.0000000 | -0.3406918 | -0.1938515 | -0.2672717 |
| t…7 | Stimulation | -0.4295275 | 0.6684434 | -0.0840199 | 0.0541075 | -0.0149562 |
There were two options for analysis, either to consider the intensity of each cluster as independent, or to consider the mean intensity of each participant as dependent samples. This is why there are the analyses with independent samples t-test and a linear mixed model
Relative fequency across the different phases of the task execution by groups
As we have 3 differentiate phases of the stimulation during the task, we are representing interesting in relative frequency of each cluster across these periods (6 min pre stimulation, 18 min tDCS stimulation, 6 min post stimulation)
# Split data into participant condition phase and cluster to obtain the relative frequency
relative_complete_phases <- summarySE(Cluster_long, measurevar = "Value",
groupvars=c("Participant", "Condition", "Phase", "Cluster"))
relative_complete_phases <- relative_complete_phases[,1:5]
# To fill the NA with 0 to include phases with no representation of clusters
relative_complete_phases <- relative_complete_phases %>%
complete(Participant, Cluster, Phase, fill = list(N = 0))
# After including the 0 in the N, it does not fill in the conditions, so
# thus we need to fill Condition based on the closest match:
relative_complete_phases <- relative_complete_phases %>%
group_by(Participant) %>%
fill(Condition, .direction = "downup") %>%
ungroup()
relative_complete_phases$Cluster <- as.factor(relative_complete_phases$Cluster)Visualizing the relative frequency of each cluster during each phase and for every stimulation group. I am representing both clusters, but it is a bit redundant here as the other cluster is just the opposite frequency.
# Calculate the relative frequency for each cluster by participant and phase
relative_complete_phases <- relative_complete_phases %>%
group_by(Participant, Phase) %>%
mutate(Frequency = N / sum(N)) %>%
ungroup()
# filter each condition to plot the relative frequency
relative_anodal <- relative_complete_phases %>% filter(Condition == "Anodal")
relative_cathodal <- relative_complete_phases %>% filter(Condition == "Cathodal")
relative_sham <- relative_complete_phases %>% filter(Condition == "Sham")
# To represent only cluster 1
# relative_anodal <- relative_complete_phases %>% filter(Condition == "Anodal" & Cluster == "1")
# relative_cathodal <- relative_complete_phases %>% filter(Condition == "Cathodal" & Cluster == "1")
# relative_sham <- relative_complete_phases %>% filter(Condition == "Sham"& Cluster == "1")
relat_anodal <- ggplot(relative_anodal, aes(x = Phase, y = Frequency, fill = Cluster)) +
geom_flat_violin(aes(fill = Cluster),position = position_nudge(x = .1, y = 0), adjust = 1.5, trim = FALSE, alpha = .5, colour = NA)+
geom_point(aes(x = as.numeric(Phase)-.15, y = Frequency, colour = Cluster),position = position_jitter(width = .05), size = .25, shape = 20)+
geom_boxplot(aes(x = Phase, y = Frequency, fill = Cluster),outlier.shape = NA, alpha = .5, width = .1, colour = "black")+
# geom_line(data = sumrepdatAVG20, aes(x = as.numeric(Phase)+.1, y = Value, group = Cluster, colour = Condition), linetype = 3)+
# geom_point(data = sumrepdatAVG20, aes(x = as.numeric(Phase)+.1, y = Value, group = Cluster, colour = Condition), shape = 18) +
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
sm_classic(legends = FALSE)+
labs( y = "Relative frequency", x = element_blank(),
title ="Anodal")
relat_cathodal <- ggplot(relative_cathodal, aes(x = Phase, y = Frequency, fill = Cluster)) +
geom_flat_violin(aes(fill = Cluster),position = position_nudge(x = .1, y = 0), adjust = 1.5, trim = FALSE, alpha = .5, colour = NA)+
geom_point(aes(x = as.numeric(Phase)-.15, y = Frequency, colour = Cluster),position = position_jitter(width = .05), size = .25, shape = 20)+
geom_boxplot(aes(x = Phase, y = Frequency, fill = Cluster),outlier.shape = NA, alpha = .5, width = .6, colour = "black")+
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
sm_classic(legends = FALSE)+
# easy_remove_x_axis(what = "text")+
labs( y = element_blank(), x = element_blank(),
title ="Cathodal")
relat_sham <- ggplot(relative_sham, aes(x = Phase, y = Frequency, fill = Cluster)) +
geom_flat_violin(aes(fill = Cluster),position = position_nudge(x = .1, y = 0), adjust = 1.5, trim = FALSE, alpha = .5, colour = NA)+
geom_point(aes(x = as.numeric(Phase)-.15, y = Frequency, colour = Cluster),position = position_jitter(width = .05), size = .25, shape = 20)+
geom_boxplot(aes(x = Phase, y = Frequency, fill = Cluster),outlier.shape = NA, alpha = .5, width = .6, colour = "black")+
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
sm_classic(legends = FALSE)+
# easy_remove_x_axis(what = "text")+
labs( y = element_blank(), x = element_blank(),
title ="Sham")
Relat_plots <- ggarrange(relat_anodal, relat_cathodal, relat_sham,
ncol =3 ,labels = c("A", "B", "C"),
nrow=1,
common.legend = T,
legend = "right",
align = "h")
Relat_plotsdescriptive_relative <- summarySE(relative_complete_phases, measurevar = "Frequency", groupvars=c("Condition", "Phase", "Cluster"))
descriptive_relative_2 <- relative_complete_phases %>%
group_by(Condition, Phase, Cluster) %>%
summarise(average = median(Frequency))
kable(descriptive_relative, format = "html", table.attr = "class='table table-striped'") %>%
kable_styling(full_width = F, position = "left", bootstrap_options = c("striped", "hover", "condensed"))| Condition | Phase | Cluster | N | Frequency | sd | se | ci |
|---|---|---|---|---|---|---|---|
| Anodal | Pre | 1 | 20 | 0.7958333 | 0.3828133 | 0.0855997 | 0.1791622 |
| Anodal | Pre | 2 | 20 | 0.2041667 | 0.3828133 | 0.0855997 | 0.1791622 |
| Anodal | During | 1 | 20 | 0.6222222 | 0.4106683 | 0.0918282 | 0.1921987 |
| Anodal | During | 2 | 20 | 0.3777778 | 0.4106683 | 0.0918282 | 0.1921987 |
| Anodal | Post | 1 | 20 | 0.3000000 | 0.4421174 | 0.0988605 | 0.2069173 |
| Anodal | Post | 2 | 20 | 0.7000000 | 0.4421174 | 0.0988605 | 0.2069173 |
| Cathodal | Pre | 1 | 20 | 0.8916667 | 0.3071992 | 0.0686918 | 0.1437736 |
| Cathodal | Pre | 2 | 20 | 0.1083333 | 0.3071992 | 0.0686918 | 0.1437736 |
| Cathodal | During | 1 | 20 | 0.5986111 | 0.4228561 | 0.0945535 | 0.1979027 |
| Cathodal | During | 2 | 20 | 0.4013889 | 0.4228561 | 0.0945535 | 0.1979027 |
| Cathodal | Post | 1 | 20 | 0.3416667 | 0.4009309 | 0.0896509 | 0.1876415 |
| Cathodal | Post | 2 | 20 | 0.6583333 | 0.4009309 | 0.0896509 | 0.1876415 |
| Sham | Pre | 1 | 20 | 0.9500000 | 0.2236068 | 0.0500000 | 0.1046512 |
| Sham | Pre | 2 | 20 | 0.0500000 | 0.2236068 | 0.0500000 | 0.1046512 |
| Sham | During | 1 | 20 | 0.5638889 | 0.3224465 | 0.0721012 | 0.1509096 |
| Sham | During | 2 | 20 | 0.4361111 | 0.3224465 | 0.0721012 | 0.1509096 |
| Sham | Post | 1 | 20 | 0.1500000 | 0.3251181 | 0.0726986 | 0.1521599 |
| Sham | Post | 2 | 20 | 0.8500000 | 0.3251181 | 0.0726986 | 0.1521599 |
This figure illustrates the percentage of time spent in each cluster at participant level for the different phases of the protocol.
Here, we calculate the odd ratios
As an alternative, we could also use the Linear mixed models to analyze the relative frequency.
First, we are comparing the performance of the different models with the inclusion/exclusion of fixed effects and interactions
# Calculate the Pre value of each point
relative_complete_phases <- relative_complete_phases %>%
group_by(Participant, Cluster, Condition) %>%
mutate(Pre_value = Frequency[Phase == "Pre"]) %>%
ungroup()
# Full model with all fixed effects and interactions
model_full <- lmer(Frequency ~ Cluster * Phase * Condition + Pre_value + (1 | Participant), data = relative_complete_phases)
# Model without interaction terms
model_no_interactions <- lmer(Frequency ~ Cluster + Phase + Condition + Pre_value + (1 | Participant), data = relative_complete_phases)
# Model without pre_value
model_no_prevalue <- lmer(Frequency ~ Cluster * Phase * Condition + (1 | Participant), data = relative_complete_phases)
# Model with only main effects
model_main_effects <- lmer(Frequency ~ Cluster + Phase + Condition + (1 | Participant), data = relative_complete_phases)
# Null model (no fixed effects)
model_null <- lmer(Frequency ~ 1 + (1 | Participant), data = relative_complete_phases)
anova(model_null, model_main_effects, model_no_prevalue, model_no_interactions, model_full)## Data: relative_complete_phases
## Models:
## model_null: Frequency ~ 1 + (1 | Participant)
## model_main_effects: Frequency ~ Cluster + Phase + Condition + (1 | Participant)
## model_no_interactions: Frequency ~ Cluster + Phase + Condition + Pre_value + (1 | Participant)
## model_no_prevalue: Frequency ~ Cluster * Phase * Condition + (1 | Participant)
## model_full: Frequency ~ Cluster * Phase * Condition + Pre_value + (1 | Participant)
## npar AIC BIC logLik deviance Chisq Df
## model_null 3 448.99 460.65 -221.497 442.99
## model_main_effects 8 447.51 478.60 -215.755 431.51 11.483 5
## model_no_interactions 9 385.69 420.66 -183.844 367.69 63.823 1
## model_no_prevalue 20 319.08 396.80 -139.540 279.08 88.607 11
## model_full 21 211.95 293.56 -84.974 169.95 109.133 1
## Pr(>Chisq)
## model_null
## model_main_effects 0.04261 *
## model_no_interactions 1.361e-15 ***
## model_no_prevalue 3.124e-14 ***
## model_full < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
AIC(model_null, model_main_effects, model_no_prevalue, model_no_interactions, model_full)## df AIC
## model_null 3 454.6477
## model_main_effects 8 473.6073
## model_no_prevalue 20 376.5923
## model_no_interactions 9 416.3531
## model_full 21 278.8928
BIC(model_null, model_main_effects, model_no_prevalue, model_no_interactions, model_full)## df BIC
## model_null 3 466.3060
## model_main_effects 8 504.6961
## model_no_prevalue 20 454.3144
## model_no_interactions 9 451.3281
## model_full 21 360.5009
As we can see, the full model is significant and it also has the lowest AIC/BIC values. Then, we check the output of this model and test.
anova_results <- anova(model_full)
anova_table <- as.data.frame(anova_results)
print(anova_table)## Sum Sq Mean Sq NumDF DenDF F value
## Cluster 3.098077e+00 3.098077e+00 1 341 3.126062e+01
## Phase 9.571451e-14 4.785725e-14 2 341 4.828955e-13
## Condition 1.659090e-13 8.295448e-14 2 341 8.370381e-13
## Pre_value 1.196716e+01 1.196716e+01 1 341 1.207526e+02
## Cluster:Phase 2.275773e+01 1.137887e+01 2 341 1.148165e+02
## Cluster:Condition 8.097210e-01 4.048605e-01 2 341 4.085176e+00
## Phase:Condition 4.026457e-29 1.006614e-29 4 341 1.015707e-28
## Cluster:Phase:Condition 1.170353e+00 2.925883e-01 4 341 2.952313e+00
## Pr(>F)
## Cluster 4.629139e-08
## Phase 1.000000e+00
## Condition 1.000000e+00
## Pre_value 2.991089e-24
## Cluster:Phase 7.512164e-39
## Cluster:Condition 1.765027e-02
## Phase:Condition 1.000000e+00
## Cluster:Phase:Condition 2.021087e-02
emm <- emmeans(model_full, ~ Cluster * Phase * Condition)
# Pairwise comparisons with adjustment for multiple testing (Tukey method)
pairwise_comparisons <- pairs(emm, adjust = "Tukey")
# Displaying the results in a clean format
summary(pairwise_comparisons)## contrast estimate SE df
## Cluster1 Pre Anodal - Cluster2 Pre Anodal 0.2363 0.1047 284
## Cluster1 Pre Anodal - Cluster1 During Anodal 0.1736 0.0996 284
## Cluster1 Pre Anodal - Cluster2 During Anodal 0.0626 0.1047 284
## Cluster1 Pre Anodal - Cluster1 Post Anodal 0.4958 0.0996 284
## Cluster1 Pre Anodal - Cluster2 Post Anodal -0.2596 0.1047 284
## Cluster1 Pre Anodal - Cluster1 Pre Cathodal -0.0383 0.0997 341
## Cluster1 Pre Anodal - Cluster2 Pre Cathodal 0.2745 0.1064 340
## Cluster1 Pre Anodal - Cluster1 During Cathodal 0.2548 0.0997 341
## Cluster1 Pre Anodal - Cluster2 During Cathodal -0.0185 0.1064 340
## Cluster1 Pre Anodal - Cluster1 Post Cathodal 0.5117 0.0997 341
## Cluster1 Pre Anodal - Cluster2 Post Cathodal -0.2755 0.1064 340
## Cluster1 Pre Anodal - Cluster1 Pre Sham -0.0616 0.0999 341
## Cluster1 Pre Anodal - Cluster2 Pre Sham 0.2978 0.1076 340
## Cluster1 Pre Anodal - Cluster1 During Sham 0.3246 0.0999 341
## Cluster1 Pre Anodal - Cluster2 During Sham -0.0883 0.1076 340
## Cluster1 Pre Anodal - Cluster1 Post Sham 0.7384 0.0999 341
## Cluster1 Pre Anodal - Cluster2 Post Sham -0.5022 0.1076 340
## Cluster2 Pre Anodal - Cluster1 During Anodal -0.0626 0.1047 284
## Cluster2 Pre Anodal - Cluster2 During Anodal -0.1736 0.0996 284
## Cluster2 Pre Anodal - Cluster1 Post Anodal 0.2596 0.1047 284
## Cluster2 Pre Anodal - Cluster2 Post Anodal -0.4958 0.0996 284
## Cluster2 Pre Anodal - Cluster1 Pre Cathodal -0.2745 0.1064 340
## Cluster2 Pre Anodal - Cluster2 Pre Cathodal 0.0383 0.0997 341
## Cluster2 Pre Anodal - Cluster1 During Cathodal 0.0185 0.1064 340
## Cluster2 Pre Anodal - Cluster2 During Cathodal -0.2548 0.0997 341
## Cluster2 Pre Anodal - Cluster1 Post Cathodal 0.2755 0.1064 340
## Cluster2 Pre Anodal - Cluster2 Post Cathodal -0.5117 0.0997 341
## Cluster2 Pre Anodal - Cluster1 Pre Sham -0.2978 0.1076 340
## Cluster2 Pre Anodal - Cluster2 Pre Sham 0.0616 0.0999 341
## Cluster2 Pre Anodal - Cluster1 During Sham 0.0883 0.1076 340
## Cluster2 Pre Anodal - Cluster2 During Sham -0.3246 0.0999 341
## Cluster2 Pre Anodal - Cluster1 Post Sham 0.5022 0.1076 340
## Cluster2 Pre Anodal - Cluster2 Post Sham -0.7384 0.0999 341
## Cluster1 During Anodal - Cluster2 During Anodal -0.1110 0.1047 284
## Cluster1 During Anodal - Cluster1 Post Anodal 0.3222 0.0996 284
## Cluster1 During Anodal - Cluster2 Post Anodal -0.4332 0.1047 284
## Cluster1 During Anodal - Cluster1 Pre Cathodal -0.2119 0.0997 341
## Cluster1 During Anodal - Cluster2 Pre Cathodal 0.1009 0.1064 340
## Cluster1 During Anodal - Cluster1 During Cathodal 0.0812 0.0997 341
## Cluster1 During Anodal - Cluster2 During Cathodal -0.1921 0.1064 340
## Cluster1 During Anodal - Cluster1 Post Cathodal 0.3381 0.0997 341
## Cluster1 During Anodal - Cluster2 Post Cathodal -0.4491 0.1064 340
## Cluster1 During Anodal - Cluster1 Pre Sham -0.2352 0.0999 341
## Cluster1 During Anodal - Cluster2 Pre Sham 0.1242 0.1076 340
## Cluster1 During Anodal - Cluster1 During Sham 0.1509 0.0999 341
## Cluster1 During Anodal - Cluster2 During Sham -0.2619 0.1076 340
## Cluster1 During Anodal - Cluster1 Post Sham 0.5648 0.0999 341
## Cluster1 During Anodal - Cluster2 Post Sham -0.6758 0.1076 340
## Cluster2 During Anodal - Cluster1 Post Anodal 0.4332 0.1047 284
## Cluster2 During Anodal - Cluster2 Post Anodal -0.3222 0.0996 284
## Cluster2 During Anodal - Cluster1 Pre Cathodal -0.1009 0.1064 340
## Cluster2 During Anodal - Cluster2 Pre Cathodal 0.2119 0.0997 341
## Cluster2 During Anodal - Cluster1 During Cathodal 0.1921 0.1064 340
## Cluster2 During Anodal - Cluster2 During Cathodal -0.0812 0.0997 341
## Cluster2 During Anodal - Cluster1 Post Cathodal 0.4491 0.1064 340
## Cluster2 During Anodal - Cluster2 Post Cathodal -0.3381 0.0997 341
## Cluster2 During Anodal - Cluster1 Pre Sham -0.1242 0.1076 340
## Cluster2 During Anodal - Cluster2 Pre Sham 0.2352 0.0999 341
## Cluster2 During Anodal - Cluster1 During Sham 0.2619 0.1076 340
## Cluster2 During Anodal - Cluster2 During Sham -0.1509 0.0999 341
## Cluster2 During Anodal - Cluster1 Post Sham 0.6758 0.1076 340
## Cluster2 During Anodal - Cluster2 Post Sham -0.5648 0.0999 341
## Cluster1 Post Anodal - Cluster2 Post Anodal -0.7554 0.1047 284
## Cluster1 Post Anodal - Cluster1 Pre Cathodal -0.5341 0.0997 341
## Cluster1 Post Anodal - Cluster2 Pre Cathodal -0.2213 0.1064 340
## Cluster1 Post Anodal - Cluster1 During Cathodal -0.2410 0.0997 341
## Cluster1 Post Anodal - Cluster2 During Cathodal -0.5144 0.1064 340
## Cluster1 Post Anodal - Cluster1 Post Cathodal 0.0159 0.0997 341
## Cluster1 Post Anodal - Cluster2 Post Cathodal -0.7713 0.1064 340
## Cluster1 Post Anodal - Cluster1 Pre Sham -0.5574 0.0999 341
## Cluster1 Post Anodal - Cluster2 Pre Sham -0.1980 0.1076 340
## Cluster1 Post Anodal - Cluster1 During Sham -0.1713 0.0999 341
## Cluster1 Post Anodal - Cluster2 During Sham -0.5841 0.1076 340
## Cluster1 Post Anodal - Cluster1 Post Sham 0.2426 0.0999 341
## Cluster1 Post Anodal - Cluster2 Post Sham -0.9980 0.1076 340
## Cluster2 Post Anodal - Cluster1 Pre Cathodal 0.2213 0.1064 340
## Cluster2 Post Anodal - Cluster2 Pre Cathodal 0.5341 0.0997 341
## Cluster2 Post Anodal - Cluster1 During Cathodal 0.5144 0.1064 340
## Cluster2 Post Anodal - Cluster2 During Cathodal 0.2410 0.0997 341
## Cluster2 Post Anodal - Cluster1 Post Cathodal 0.7713 0.1064 340
## Cluster2 Post Anodal - Cluster2 Post Cathodal -0.0159 0.0997 341
## Cluster2 Post Anodal - Cluster1 Pre Sham 0.1980 0.1076 340
## Cluster2 Post Anodal - Cluster2 Pre Sham 0.5574 0.0999 341
## Cluster2 Post Anodal - Cluster1 During Sham 0.5841 0.1076 340
## Cluster2 Post Anodal - Cluster2 During Sham 0.1713 0.0999 341
## Cluster2 Post Anodal - Cluster1 Post Sham 0.9980 0.1076 340
## Cluster2 Post Anodal - Cluster2 Post Sham -0.2426 0.0999 341
## Cluster1 Pre Cathodal - Cluster2 Pre Cathodal 0.3128 0.1084 284
## Cluster1 Pre Cathodal - Cluster1 During Cathodal 0.2931 0.0996 284
## Cluster1 Pre Cathodal - Cluster2 During Cathodal 0.0197 0.1084 284
## Cluster1 Pre Cathodal - Cluster1 Post Cathodal 0.5500 0.0996 284
## Cluster1 Pre Cathodal - Cluster2 Post Cathodal -0.2372 0.1084 284
## Cluster1 Pre Cathodal - Cluster1 Pre Sham -0.0233 0.0996 341
## Cluster1 Pre Cathodal - Cluster2 Pre Sham 0.3361 0.1097 339
## Cluster1 Pre Cathodal - Cluster1 During Sham 0.3628 0.0996 341
## Cluster1 Pre Cathodal - Cluster2 During Sham -0.0500 0.1097 339
## Cluster1 Pre Cathodal - Cluster1 Post Sham 0.7767 0.0996 341
## Cluster1 Pre Cathodal - Cluster2 Post Sham -0.4639 0.1097 339
## Cluster2 Pre Cathodal - Cluster1 During Cathodal -0.0197 0.1084 284
## Cluster2 Pre Cathodal - Cluster2 During Cathodal -0.2931 0.0996 284
## Cluster2 Pre Cathodal - Cluster1 Post Cathodal 0.2372 0.1084 284
## Cluster2 Pre Cathodal - Cluster2 Post Cathodal -0.5500 0.0996 284
## Cluster2 Pre Cathodal - Cluster1 Pre Sham -0.3361 0.1097 339
## Cluster2 Pre Cathodal - Cluster2 Pre Sham 0.0233 0.0996 341
## Cluster2 Pre Cathodal - Cluster1 During Sham 0.0500 0.1097 339
## Cluster2 Pre Cathodal - Cluster2 During Sham -0.3628 0.0996 341
## Cluster2 Pre Cathodal - Cluster1 Post Sham 0.4639 0.1097 339
## Cluster2 Pre Cathodal - Cluster2 Post Sham -0.7767 0.0996 341
## Cluster1 During Cathodal - Cluster2 During Cathodal -0.2733 0.1084 284
## Cluster1 During Cathodal - Cluster1 Post Cathodal 0.2569 0.0996 284
## Cluster1 During Cathodal - Cluster2 Post Cathodal -0.5303 0.1084 284
## Cluster1 During Cathodal - Cluster1 Pre Sham -0.3163 0.0996 341
## Cluster1 During Cathodal - Cluster2 Pre Sham 0.0430 0.1097 339
## Cluster1 During Cathodal - Cluster1 During Sham 0.0698 0.0996 341
## Cluster1 During Cathodal - Cluster2 During Sham -0.3431 0.1097 339
## Cluster1 During Cathodal - Cluster1 Post Sham 0.4837 0.0996 341
## Cluster1 During Cathodal - Cluster2 Post Sham -0.7570 0.1097 339
## Cluster2 During Cathodal - Cluster1 Post Cathodal 0.5303 0.1084 284
## Cluster2 During Cathodal - Cluster2 Post Cathodal -0.2569 0.0996 284
## Cluster2 During Cathodal - Cluster1 Pre Sham -0.0430 0.1097 339
## Cluster2 During Cathodal - Cluster2 Pre Sham 0.3163 0.0996 341
## Cluster2 During Cathodal - Cluster1 During Sham 0.3431 0.1097 339
## Cluster2 During Cathodal - Cluster2 During Sham -0.0698 0.0996 341
## Cluster2 During Cathodal - Cluster1 Post Sham 0.7570 0.1097 339
## Cluster2 During Cathodal - Cluster2 Post Sham -0.4837 0.0996 341
## Cluster1 Post Cathodal - Cluster2 Post Cathodal -0.7872 0.1084 284
## Cluster1 Post Cathodal - Cluster1 Pre Sham -0.5733 0.0996 341
## Cluster1 Post Cathodal - Cluster2 Pre Sham -0.2139 0.1097 339
## Cluster1 Post Cathodal - Cluster1 During Sham -0.1872 0.0996 341
## Cluster1 Post Cathodal - Cluster2 During Sham -0.6000 0.1097 339
## Cluster1 Post Cathodal - Cluster1 Post Sham 0.2267 0.0996 341
## Cluster1 Post Cathodal - Cluster2 Post Sham -1.0139 0.1097 339
## Cluster2 Post Cathodal - Cluster1 Pre Sham 0.2139 0.1097 339
## Cluster2 Post Cathodal - Cluster2 Pre Sham 0.5733 0.0996 341
## Cluster2 Post Cathodal - Cluster1 During Sham 0.6000 0.1097 339
## Cluster2 Post Cathodal - Cluster2 During Sham 0.1872 0.0996 341
## Cluster2 Post Cathodal - Cluster1 Post Sham 1.0139 0.1097 339
## Cluster2 Post Cathodal - Cluster2 Post Sham -0.2267 0.0996 341
## Cluster1 Pre Sham - Cluster2 Pre Sham 0.3594 0.1110 284
## Cluster1 Pre Sham - Cluster1 During Sham 0.3861 0.0996 284
## Cluster1 Pre Sham - Cluster2 During Sham -0.0267 0.1110 284
## Cluster1 Pre Sham - Cluster1 Post Sham 0.8000 0.0996 284
## Cluster1 Pre Sham - Cluster2 Post Sham -0.4406 0.1110 284
## Cluster2 Pre Sham - Cluster1 During Sham 0.0267 0.1110 284
## Cluster2 Pre Sham - Cluster2 During Sham -0.3861 0.0996 284
## Cluster2 Pre Sham - Cluster1 Post Sham 0.4406 0.1110 284
## Cluster2 Pre Sham - Cluster2 Post Sham -0.8000 0.0996 284
## Cluster1 During Sham - Cluster2 During Sham -0.4129 0.1110 284
## Cluster1 During Sham - Cluster1 Post Sham 0.4139 0.0996 284
## Cluster1 During Sham - Cluster2 Post Sham -0.8267 0.1110 284
## Cluster2 During Sham - Cluster1 Post Sham 0.8267 0.1110 284
## Cluster2 During Sham - Cluster2 Post Sham -0.4139 0.0996 284
## Cluster1 Post Sham - Cluster2 Post Sham -1.2406 0.1110 284
## t.ratio p.value
## 2.257 0.7132
## 1.744 0.9562
## 0.598 1.0000
## 4.981 0.0002
## -2.480 0.5477
## -0.384 1.0000
## 2.580 0.4716
## 2.556 0.4896
## -0.174 1.0000
## 5.133 0.0001
## -2.589 0.4648
## -0.616 1.0000
## 2.768 0.3384
## 3.249 0.1096
## -0.821 1.0000
## 7.391 <.0001
## -4.668 0.0006
## -0.598 1.0000
## -1.744 0.9562
## 2.480 0.5477
## -4.981 0.0002
## -2.580 0.4716
## 0.384 1.0000
## 0.174 1.0000
## -2.556 0.4896
## 2.589 0.4648
## -5.133 0.0001
## -2.768 0.3384
## 0.616 1.0000
## 0.821 1.0000
## -3.249 0.1096
## 4.668 0.0006
## -7.391 <.0001
## -1.060 0.9999
## 3.237 0.1144
## -4.139 0.0058
## -2.125 0.7996
## 0.948 1.0000
## 0.814 1.0000
## -1.806 0.9409
## 3.392 0.0729
## -4.220 0.0040
## -2.354 0.6431
## 1.155 0.9996
## 1.511 0.9894
## -2.435 0.5820
## 5.654 <.0001
## -6.282 <.0001
## 4.139 0.0058
## -3.237 0.1144
## -0.948 1.0000
## 2.125 0.7996
## 1.806 0.9409
## -0.814 1.0000
## 4.220 0.0040
## -3.392 0.0729
## -1.155 0.9996
## 2.354 0.6431
## 2.435 0.5820
## -1.511 0.9894
## 6.282 <.0001
## -5.654 <.0001
## -7.217 <.0001
## -5.358 <.0001
## -2.080 0.8260
## -2.418 0.5947
## -4.834 0.0003
## 0.160 1.0000
## -7.249 <.0001
## -5.579 <.0001
## -1.841 0.9305
## -1.714 0.9628
## -5.430 <.0001
## 2.428 0.5868
## -9.277 <.0001
## 2.080 0.8260
## 5.358 <.0001
## 4.834 0.0003
## 2.418 0.5947
## 7.249 <.0001
## -0.160 1.0000
## 1.841 0.9305
## 5.579 <.0001
## 5.430 <.0001
## 1.714 0.9628
## 9.277 <.0001
## -2.428 0.5868
## 2.886 0.2672
## 2.944 0.2357
## 0.182 1.0000
## 5.525 <.0001
## -2.189 0.7593
## -0.234 1.0000
## 3.064 0.1767
## 3.643 0.0333
## -0.456 1.0000
## 7.798 <.0001
## -4.230 0.0039
## -0.182 1.0000
## -2.944 0.2357
## 2.189 0.7593
## -5.525 <.0001
## -3.064 0.1767
## 0.234 1.0000
## 0.456 1.0000
## -3.643 0.0333
## 4.230 0.0039
## -7.798 <.0001
## -2.522 0.5155
## 2.581 0.4713
## -4.893 0.0002
## -3.176 0.1331
## 0.392 1.0000
## 0.700 1.0000
## -3.128 0.1507
## 4.856 0.0003
## -6.902 <.0001
## 4.893 0.0002
## -2.581 0.4713
## -0.392 1.0000
## 3.176 0.1331
## 3.128 0.1507
## -0.700 1.0000
## 6.902 <.0001
## -4.856 0.0003
## -7.264 <.0001
## -5.756 <.0001
## -1.951 0.8898
## -1.879 0.9176
## -5.471 <.0001
## 2.276 0.7000
## -9.245 <.0001
## 1.951 0.8898
## 5.756 <.0001
## 5.471 <.0001
## 1.879 0.9176
## 9.245 <.0001
## -2.276 0.7000
## 3.236 0.1145
## 3.879 0.0152
## -0.241 1.0000
## 8.036 <.0001
## -3.968 0.0110
## 0.241 1.0000
## -3.879 0.0152
## 3.968 0.0110
## -8.036 <.0001
## -3.718 0.0265
## 4.158 0.0054
## -7.445 <.0001
## 7.445 <.0001
## -4.158 0.0054
## -11.172 <.0001
##
## Degrees-of-freedom method: kenward-roger
## P value adjustment: tukey method for comparing a family of 18 estimates
Once again, this could bit a redundant with the clusters, as the cluster 1 is the opposite of the cluster 2, so we could also performing the analysis for considering one of the cluster.
# considering only cluster 1
cluster1 <- data_filter(relative_complete_phases, Cluster== "1")
cluster2 <- data_filter(relative_complete_phases, Cluster== "2")
# Full model with all fixed effects and interactions
model_full <- lmer(Frequency ~ Phase * Condition + Pre_value + (1 | Participant), data = cluster2)
# Model without interaction terms
model_no_interactions <- lmer(Frequency ~ Phase + Condition + Pre_value + (1 | Participant), data = cluster2)
# Model without pre_value
model_no_prevalue <- lmer(Frequency ~ Phase * Condition + (1 | Participant), data = cluster2)
# Model with only main effects
model_main_effects <- lmer(Frequency ~ Phase + Condition + (1 | Participant), data = cluster2)
# Null model (no fixed effects)
model_null <- lmer(Frequency ~ 1 + (1 | Participant), data = cluster2)
anova(model_null, model_main_effects, model_no_prevalue, model_no_interactions, model_full)## refitting model(s) with ML (instead of REML)
## Data: cluster2
## Models:
## model_null: Frequency ~ 1 + (1 | Participant)
## model_main_effects: Frequency ~ Phase + Condition + (1 | Participant)
## model_no_interactions: Frequency ~ Phase + Condition + Pre_value + (1 | Participant)
## model_no_prevalue: Frequency ~ Phase * Condition + (1 | Participant)
## model_full: Frequency ~ Phase * Condition + Pre_value + (1 | Participant)
## npar AIC BIC logLik deviance Chisq Df Pr(>Chisq)
## model_null 3 219.86 229.44 -106.929 213.858
## model_main_effects 7 132.64 154.99 -59.319 118.639 95.22 4 < 2.2e-16
## model_no_interactions 8 101.22 126.76 -42.609 85.219 33.42 1 7.426e-09
## model_no_prevalue 11 132.97 168.09 -55.486 110.972 0.00 3 1
## model_full 12 101.55 139.87 -38.776 77.552 33.42 1 7.426e-09
##
## model_null
## model_main_effects ***
## model_no_interactions ***
## model_no_prevalue
## model_full ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# model_cluster1 <- lmer(Frequency ~ Phase * Condition + Pre_value + (1 | Participant), data = cluster1)
# model_cluster2 <- lmer(Frequency ~ Phase * Condition + (1 | Participant), data = cluster2)
anova_results_cluster2 <- anova(model_full)
anova_table_cluster2<- as.data.frame(anova_results_cluster2)
print(anova_table_cluster2)## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## Phase 11.3788662 5.6894331 2 114 73.119735 3.692377e-21
## Condition 0.2197719 0.1098859 2 56 1.412237 2.521385e-01
## Pre_value 3.2480870 3.2480870 1 56 41.743923 2.678353e-08
## Phase:Condition 0.5851766 0.1462942 4 114 1.880150 1.186857e-01
emm_cluster2 <- emmeans(model_full, ~ Phase * Condition)
# Pairwise comparisons with adjustment for multiple testing (Tukey method)
pairwise_comparisons_cluster2 <- pairs(emm_cluster2, adjust = "tukey")
# Displaying the results in a clean format
summary(pairwise_comparisons_cluster2)## contrast estimate SE df t.ratio p.value
## Pre Anodal - During Anodal -0.1736 0.0882 114 -1.968 0.5685
## Pre Anodal - Post Anodal -0.4958 0.0882 114 -5.621 <.0001
## Pre Anodal - Pre Cathodal 0.0383 0.1002 153 0.382 1.0000
## Pre Anodal - During Cathodal -0.2548 0.1002 153 -2.542 0.2205
## Pre Anodal - Post Cathodal -0.5117 0.1002 153 -5.106 <.0001
## Pre Anodal - Pre Sham 0.0616 0.1009 152 0.610 0.9995
## Pre Anodal - During Sham -0.3246 0.1009 152 -3.218 0.0408
## Pre Anodal - Post Sham -0.7384 0.1009 152 -7.322 <.0001
## During Anodal - Post Anodal -0.3222 0.0882 114 -3.653 0.0114
## During Anodal - Pre Cathodal 0.2119 0.1002 153 2.114 0.4677
## During Anodal - During Cathodal -0.0812 0.1002 153 -0.810 0.9964
## During Anodal - Post Cathodal -0.3381 0.1002 153 -3.374 0.0256
## During Anodal - Pre Sham 0.2352 0.1009 152 2.332 0.3299
## During Anodal - During Sham -0.1509 0.1009 152 -1.497 0.8556
## During Anodal - Post Sham -0.5648 0.1009 152 -5.601 <.0001
## Post Anodal - Pre Cathodal 0.5341 0.1002 153 5.329 <.0001
## Post Anodal - During Cathodal 0.2410 0.1002 153 2.405 0.2888
## Post Anodal - Post Cathodal -0.0159 0.1002 153 -0.159 1.0000
## Post Anodal - Pre Sham 0.5574 0.1009 152 5.527 <.0001
## Post Anodal - During Sham 0.1713 0.1009 152 1.698 0.7467
## Post Anodal - Post Sham -0.2426 0.1009 152 -2.406 0.2886
## Pre Cathodal - During Cathodal -0.2931 0.0882 114 -3.322 0.0317
## Pre Cathodal - Post Cathodal -0.5500 0.0882 114 -6.235 <.0001
## Pre Cathodal - Pre Sham 0.0233 0.1000 154 0.233 1.0000
## Pre Cathodal - During Sham -0.3628 0.1000 154 -3.629 0.0113
## Pre Cathodal - Post Sham -0.7767 0.1000 154 -7.769 <.0001
## During Cathodal - Post Cathodal -0.2569 0.0882 114 -2.913 0.0965
## During Cathodal - Pre Sham 0.3163 0.1000 154 3.164 0.0475
## During Cathodal - During Sham -0.0698 0.1000 154 -0.698 0.9988
## During Cathodal - Post Sham -0.4837 0.1000 154 -4.838 0.0001
## Post Cathodal - Pre Sham 0.5733 0.1000 154 5.734 <.0001
## Post Cathodal - During Sham 0.1872 0.1000 154 1.872 0.6337
## Post Cathodal - Post Sham -0.2267 0.1000 154 -2.268 0.3683
## Pre Sham - During Sham -0.3861 0.0882 114 -4.377 0.0009
## Pre Sham - Post Sham -0.8000 0.0882 114 -9.069 <.0001
## During Sham - Post Sham -0.4139 0.0882 114 -4.692 0.0003
##
## Degrees-of-freedom method: kenward-roger
## P value adjustment: tukey method for comparing a family of 9 estimates
Our comparison of interests is the difference at each phase between the groups. Pre Anodal - Pre Cathodal (0.9904); Pre Anodal - Pre Sham (0.8795); Pre Cathodal - Pre Sham (0.9999) During Anodal - During Cathodal (1.0000) ; During Anodal - During Sham (0.9997); During Cathodal - During Sham (1.0000) Post Anodal - Post Cathodal (1.0000); Post Anodal - Post Sham (0.9194); Post Cathodal - Post Sham (0.8110)
We can also see the relative frequency of each cluster across time for the different groups.
# Obtain the number of data points within each cluster for all participants at every epoch
stacked_data <- summarySE(Cluster_long, measurevar = "Value",
groupvars=c("Epoch", "Condition", "Cluster"))
# Filter the conditions for the stacked plots
stacked_anodal <- stacked_data %>% filter(Condition == "Anodal")
stacked_anodal <- stacked_anodal[,c(1,3:4)]
stacked_cathodal <- stacked_data %>% filter(Condition == "Cathodal")
stacked_cathodal <- stacked_cathodal[,c(1,3:4)]
stacked_sham <- stacked_data %>% filter(Condition == "Sham")
stacked_sham <- stacked_sham[,c(1,3:4)]
# obtain relative frecuency of each cluster at each epoch
anodal_relative <- stacked_anodal %>%
group_by(Epoch) %>%
mutate(Frequency = N / sum(N)) %>%
ungroup()
anodal_relative$Epoch <- as.numeric(anodal_relative$Epoch)
plot_stacked_anodal <- ggplot(anodal_relative, aes(x = Epoch, y = Frequency, fill = Cluster)) +
geom_area(position = 'stack') +
sm_classic(legends = FALSE)+
easy_remove_x_axis(what = "text")+
labs(title = "Anodal",
x = "Task execution",
y = "Relative Frequency of the cluster") +
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
geom_vline(xintercept = 12, linetype="dotted",
color = "blue", size=1.5)+
geom_vline(xintercept = 48, linetype="dotted",
color = "red", size=1.5)
# obtain relative frecuency of each cluster at each epoch
cathodal_relative <- stacked_cathodal %>%
group_by(Epoch) %>%
mutate(Frequency = N / sum(N)) %>%
ungroup()
cathodal_relative$Epoch <- as.numeric(cathodal_relative$Epoch)
plot_stacked_cathodal <- ggplot(cathodal_relative, aes(x = Epoch, y = Frequency, fill = Cluster)) +
geom_area(position = 'stack') +
sm_classic(legends = FALSE)+
easy_remove_x_axis(what = "text")+
labs(title = "Cathodal",
x = "Task execution",
y = element_blank()) +
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
geom_vline(xintercept = 12, linetype="dotted",
color = "blue", size=1.5)+
geom_vline(xintercept = 48, linetype="dotted",
color = "red", size=1.5)
sham_relative <- stacked_sham %>%
group_by(Epoch) %>%
mutate(Frequency = N / sum(N)) %>%
ungroup()
sham_relative$Epoch <- as.numeric(sham_relative$Epoch)
plot_stacked_sham <- ggplot(sham_relative, aes(x = Epoch, y = Frequency, fill = Cluster)) +
geom_area(position = 'stack') +
sm_classic(legends = FALSE)+
easy_remove_x_axis(what = "text")+
labs(title = "Sham",
x = "Task execution",
y = element_blank()) +
scale_colour_brewer(palette = "Set2")+
scale_fill_brewer(palette = "Set2")+
geom_vline(xintercept = 12, linetype="dotted",
color = "blue", size=1.5)+
geom_vline(xintercept = 48, linetype="dotted",
color = "red", size=1.5)
Stacked_plots <- ggarrange(plot_stacked_anodal, plot_stacked_cathodal, plot_stacked_sham,
ncol =3 ,labels = c("A", "B", "C"),
nrow=1,
common.legend = T,
legend = "right",
align = "h")
Stacked_plots
Stacked plots for the relative contribution of each cluster across time
and for each condition separately. Vertical lines delimit the different
phases. This also helps to understand the odd ratios of being in cluster
1 of each phase for each group.
Sensation of stimulation
TET trace
We also asked participants about the sensation of stimulation during the 30 minutes, thus we are going check if the stimulation sensation across time was different for the 3 groups.
data_wide <- data %>%
pivot_wider(
names_from = c(Dimension), # Can accommodate more variables, if needed.
values_from = c(Value)
)
stimulation <- data_wide[,c(1:3,11)]
stimulation <- stimulation %>%
group_by(Epoch, Condition) %>%
summarise_at(vars(Stimulation),
list(Stimulation = mean))
Stimulationplot<-ggplot(stimulation, aes(x = Epoch, y = Stimulation, color = Condition)) +
geom_line() +
geom_point() +
labs(title = "Sensation of stimulation",
x = "Time",
y = "Intensity",
) +
theme_minimal()+
easy_remove_x_axis(what = "text")+
geom_vline(xintercept = 12, linetype="dotted",
color = "blue", size=1.5)+
geom_vline(xintercept = 48, linetype="dotted",
color = "red", size=1.5)
Stimulationplot#Change Epoch time into 3 different phases of (pre-stim, stimulation and post-stim)
segment_lengths <- c(12, 36, 12)
data_wide <- data %>%
pivot_wider(
names_from = c(Dimension), # Can accommodate more variables, if needed.
values_from = c(Value)
)
stimulation <- data_wide[,c(1:3,11)]
# Create a vector to specify the period for each observation
period <- rep(1:length(segment_lengths), times = segment_lengths)
stimulation$Phase <- rep(period, 60)
stimulation<-stimulation %>% mutate(Phase= recode(Phase, `1` = "Pre", `2` = "During", `3` = "Post"))
stimulation$Phase <- factor(stimulation$Phase, levels=c("Pre", "During", "Post"))
model_stimulation <- lmer(Stimulation ~ Condition*Phase + (1|Participant), data =stimulation)
anova(model_stimulation)## Type III Analysis of Variance Table with Satterthwaite's method
## Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## Condition 0.2717 0.1358 2 57.8 4.4557 0.01586 *
## Phase 24.7297 12.3649 2 3534.0 405.5876 < 2.2e-16 ***
## Condition:Phase 2.4008 0.6002 4 3534.0 19.6876 4.831e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
It seems that at subjective level, there are different between the groups and phases. Our main interest is to check that there is a difference within each groups between the phases, i.e. the sensation increase in the “during” stimulation phase compared to the pre. It should increase in all group to show the effectiveness of the blinding.
emm_stimulation <- emmeans(model_stimulation, ~ Phase * Condition)
# Pairwise comparisons with adjustment for multiple testing (Tukey method)
pairwise_comparisons_stimulation<- pairs(emm_stimulation, adjust = "tukey")
# Displaying the results in a clean format
summary(pairwise_comparisons_stimulation)## contrast estimate SE df z.ratio p.value
## Pre Anodal - During Anodal -0.19333 0.0130 Inf -14.856 <.0001
## Pre Anodal - Post Anodal -0.00640 0.0159 Inf -0.402 1.0000
## Pre Anodal - Pre Cathodal 0.12981 0.0490 Inf 2.650 0.1660
## Pre Anodal - During Cathodal -0.08387 0.0481 Inf -1.743 0.7194
## Pre Anodal - Post Cathodal 0.11918 0.0490 Inf 2.433 0.2656
## Pre Anodal - Pre Sham 0.13721 0.0490 Inf 2.801 0.1148
## Pre Anodal - During Sham -0.01432 0.0481 Inf -0.298 1.0000
## Pre Anodal - Post Sham 0.04181 0.0490 Inf 0.854 0.9951
## During Anodal - Post Anodal 0.18693 0.0130 Inf 14.364 <.0001
## During Anodal - Pre Cathodal 0.32314 0.0481 Inf 6.718 <.0001
## During Anodal - During Cathodal 0.10946 0.0472 Inf 2.318 0.3308
## During Anodal - Post Cathodal 0.31252 0.0481 Inf 6.497 <.0001
## During Anodal - Pre Sham 0.33054 0.0481 Inf 6.871 <.0001
## During Anodal - During Sham 0.17901 0.0472 Inf 3.791 0.0047
## During Anodal - Post Sham 0.23515 0.0481 Inf 4.888 <.0001
## Post Anodal - Pre Cathodal 0.13621 0.0490 Inf 2.781 0.1209
## Post Anodal - During Cathodal -0.07747 0.0481 Inf -1.610 0.7995
## Post Anodal - Post Cathodal 0.12559 0.0490 Inf 2.564 0.2018
## Post Anodal - Pre Sham 0.14361 0.0490 Inf 2.932 0.0812
## Post Anodal - During Sham -0.00792 0.0481 Inf -0.165 1.0000
## Post Anodal - Post Sham 0.04821 0.0490 Inf 0.984 0.9874
## Pre Cathodal - During Cathodal -0.21368 0.0130 Inf -16.419 <.0001
## Pre Cathodal - Post Cathodal -0.01062 0.0159 Inf -0.666 0.9992
## Pre Cathodal - Pre Sham 0.00740 0.0490 Inf 0.151 1.0000
## Pre Cathodal - During Sham -0.14413 0.0481 Inf -2.996 0.0679
## Pre Cathodal - Post Sham -0.08799 0.0490 Inf -1.797 0.6846
## During Cathodal - Post Cathodal 0.20305 0.0130 Inf 15.602 <.0001
## During Cathodal - Pre Sham 0.22108 0.0481 Inf 4.596 0.0001
## During Cathodal - During Sham 0.06955 0.0472 Inf 1.473 0.8683
## During Cathodal - Post Sham 0.12568 0.0481 Inf 2.613 0.1811
## Post Cathodal - Pre Sham 0.01802 0.0490 Inf 0.368 1.0000
## Post Cathodal - During Sham -0.13351 0.0481 Inf -2.775 0.1226
## Post Cathodal - Post Sham -0.07737 0.0490 Inf -1.580 0.8161
## Pre Sham - During Sham -0.15153 0.0130 Inf -11.643 <.0001
## Pre Sham - Post Sham -0.09539 0.0159 Inf -5.985 <.0001
## During Sham - Post Sham 0.05614 0.0130 Inf 4.313 0.0005
##
## Degrees-of-freedom method: asymptotic
## P value adjustment: tukey method for comparing a family of 9 estimates
descriptive_stimulation <- stimulation %>%
group_by(Condition, Phase) %>%
summarise(average = mean(Stimulation))tES questionnaire
set.seed(123)
#Read data
invisible({capture.output({
# Read data from OSF.
url <- 'https://osf.io/rghv9//?action=download'
filename <- 'tES.xlsx'
GET(url, write_disk(filename, overwrite = TRUE))
tES <- readxl::read_xlsx(filename)
})})
tES$Group <- factor(tES$Group)
tES<-tES %>% mutate(Group= recode(Group, `A` = "Anodal", `B` = "Sham", `C` = "Cathodal"))Results of the Kruskal-Wallis test for the tES survey items:
# Perform Kruskal-Wallis tests and store results
result_total <- kruskal.test(Total ~ Group, data = tES)
result_itching <- kruskal.test(Itching ~ Group, data = tES)
result_pain <- kruskal.test(Pain ~ Group, data = tES)
result_burning <- kruskal.test(Burning ~ Group, data = tES)
result_warm <- kruskal.test(Warm ~ Group, data = tES)
result_pinching <- kruskal.test(Pinching ~ Group, data = tES)
result_metallic <- kruskal.test(Metallic ~ Group, data = tES)
result_fatigue <- kruskal.test(Fatigue ~ Group, data = tES)
# Extract relevant information
test_names <- c("Total Discomfort", "Itching", "Pain", "Burning Sensation", "Warm", "Pinching", "Metallic/Iron", "Fatigue")
statistic <- c(result_total$statistic, result_itching$statistic, result_pain$statistic, result_burning$statistic,
result_warm$statistic, result_pinching$statistic, result_metallic$statistic, result_fatigue$statistic)
p_value <- c(result_total$p.value, result_itching$p.value, result_pain$p.value, result_burning$p.value,
result_warm$p.value, result_pinching$p.value, result_metallic$p.value, result_fatigue$p.value)
# Combine results into a data frame
results_df <- data.frame(
Test = test_names,
Statistic = round(statistic, 2),
P_Value = round(p_value, 4)
)
# Print the results table
print(results_df)## Test Statistic P_Value
## 1 Total Discomfort 0.66 0.7184
## 2 Itching 2.93 0.2314
## 3 Pain 2.77 0.2503
## 4 Burning Sensation 2.64 0.2678
## 5 Warm 0.82 0.6637
## 6 Pinching 0.53 0.7677
## 7 Metallic/Iron 3.97 0.1375
## 8 Fatigue 0.06 0.9693
The proportion of participants Real vs Sham
tES <- tES[, c(1:2, 11)]
proportions <- tES %>%
filter(!is.na(RealVsPlacebo)) %>%
group_by(Group, RealVsPlacebo) %>%
summarise(count = n()) %>%
group_by(Group) %>%
mutate(proportion = count / sum(count)) %>%
ungroup()
# Spread the proportions to get a table format
proportion_table <- proportions %>%
select(-count) %>%
spread(key = RealVsPlacebo, value = proportion, fill = 0)
# Print the proportion table
print(proportion_table)## # A tibble: 3 × 4
## Group `1` `2` `3`
## <fct> <dbl> <dbl> <dbl>
## 1 Anodal 0.95 0.05 0
## 2 Sham 0.895 0.0526 0.0526
## 3 Cathodal 0.75 0.1 0.15
# Create the contingency table for RealVsPlacebo value 1 across the groups
contingency_table <- table(tES$Group, tES$RealVsPlacebo == 1)
# Print the contingency table
print(contingency_table)##
## FALSE TRUE
## Anodal 1 19
## Sham 2 17
## Cathodal 5 15
# # Perform chi-squared test
# chi_squared_test <- chisq.test(contingency_table)
#
# # Print the result of the chi-squared test
# print(chi_squared_test)
#
# Perform Fisher's exact test
fisher_test <- fisher.test(contingency_table)
# Print the result of the Fisher's exact test
print(fisher_test)##
## Fisher's Exact Test for Count Data
##
## data: contingency_table
## p-value = 0.1959
## alternative hypothesis: two.sided
sessioninfo::session_info()%>%
details::details(
summary = 'Current session info',
open = TRUE
)Current session info
─ Session info ───────────────────────────────────────────────────────────────
setting value
version R version 4.2.2 (2022-10-31)
os macOS Ventura 13.6
system aarch64, darwin20
ui X11
language (EN)
collate fr_CH.UTF-8
ctype fr_CH.UTF-8
tz Europe/Zurich
date 2024-09-14
pandoc 2.19.2 @ /Applications/RStudio.app/Contents/MacOS/quarto/bin/tools/ (via rmarkdown)
─ Packages ───────────────────────────────────────────────────────────────────
package * version date (UTC) lib source
abind 1.4-5 2016-07-21 [1] CRAN (R 4.2.0)
afex * 1.2-1 2023-01-09 [1] CRAN (R 4.2.0)
assertthat 0.2.1 2019-03-21 [1] CRAN (R 4.2.0)
backports 1.4.1 2021-12-13 [1] CRAN (R 4.2.0)
base64enc 0.1-3 2015-07-28 [1] CRAN (R 4.2.0)
bayestestR * 0.13.0 2022-09-18 [1] CRAN (R 4.2.0)
bitops 1.0-7 2021-04-24 [1] CRAN (R 4.2.0)
bookdown 0.32 2023-01-17 [1] CRAN (R 4.2.0)
boot 1.3-28 2021-05-03 [1] CRAN (R 4.2.2)
broom 1.0.3 2023-01-25 [1] CRAN (R 4.2.0)
broom.mixed * 0.2.9.5 2024-04-01 [1] CRAN (R 4.2.3)
bslib 0.4.2 2022-12-16 [1] CRAN (R 4.2.0)
cachem 1.0.6 2021-08-19 [1] CRAN (R 4.2.0)
car * 3.1-1 2022-10-19 [1] CRAN (R 4.2.0)
carData * 3.0-5 2022-01-06 [1] CRAN (R 4.2.0)
caTools 1.18.2 2021-03-28 [1] CRAN (R 4.2.0)
cellranger 1.1.0 2016-07-27 [1] CRAN (R 4.2.0)
checkmate 2.1.0 2022-04-21 [1] CRAN (R 4.2.0)
cli 3.6.0 2023-01-09 [1] CRAN (R 4.2.0)
clipr 0.8.0 2022-02-22 [1] CRAN (R 4.2.0)
cluster * 2.1.6 2023-12-01 [1] CRAN (R 4.2.3)
clusterCrit * 1.3.0 2023-11-23 [1] CRAN (R 4.2.3)
coda 0.19-4 2020-09-30 [1] CRAN (R 4.2.0)
codetools 0.2-18 2020-11-04 [1] CRAN (R 4.2.2)
coin * 1.4-3 2023-09-27 [1] CRAN (R 4.2.0)
colorspace 2.1-0 2023-01-23 [1] CRAN (R 4.2.0)
correlation * 0.8.3 2022-10-09 [1] CRAN (R 4.2.0)
cowplot * 1.1.1 2020-12-30 [1] CRAN (R 4.2.0)
crayon 1.5.2 2022-09-29 [1] CRAN (R 4.2.0)
crul 1.3 2022-09-03 [1] CRAN (R 4.2.0)
curl 5.0.0 2023-01-12 [1] CRAN (R 4.2.0)
data.table 1.14.6 2022-11-16 [1] CRAN (R 4.2.0)
datawizard * 0.6.4 2022-11-19 [1] CRAN (R 4.2.2)
DBI 1.1.3 2022-06-18 [1] CRAN (R 4.2.0)
dbplyr 2.2.1 2022-06-27 [1] CRAN (R 4.2.0)
deldir 1.0-6 2021-10-23 [1] CRAN (R 4.2.0)
DEoptimR 1.1-3 2023-10-07 [1] CRAN (R 4.2.0)
desc 1.4.2 2022-09-08 [1] CRAN (R 4.2.0)
details 0.3.0 2022-03-27 [1] CRAN (R 4.2.0)
digest 0.6.31 2022-12-11 [1] CRAN (R 4.2.0)
doParallel 1.0.17 2022-02-07 [1] CRAN (R 4.2.0)
dplyr * 1.1.0 2023-01-29 [1] CRAN (R 4.2.0)
dunn.test * 1.3.6 2024-04-12 [1] CRAN (R 4.2.3)
easystats * 0.5.2 2022-08-30 [1] CRAN (R 4.2.0)
effectsize * 0.8.2 2022-10-31 [1] CRAN (R 4.2.0)
ellipsis 0.3.2 2021-04-29 [1] CRAN (R 4.2.0)
emmeans * 1.8.4-1 2023-01-17 [1] CRAN (R 4.2.0)
epitools * 0.5-10.1 2020-03-22 [1] CRAN (R 4.2.0)
estimability 1.4.1 2022-08-05 [1] CRAN (R 4.2.0)
evaluate 0.20 2023-01-17 [1] CRAN (R 4.2.0)
factoextra * 1.0.7 2020-04-01 [1] CRAN (R 4.2.0)
fansi 1.0.4 2023-01-22 [1] CRAN (R 4.2.0)
farver 2.1.1 2022-07-06 [1] CRAN (R 4.2.0)
fastmap 1.1.0 2021-01-25 [1] CRAN (R 4.2.0)
forcats * 0.5.2 2022-08-19 [1] CRAN (R 4.2.0)
foreach 1.5.2 2022-02-02 [1] CRAN (R 4.2.0)
foreign 0.8-83 2022-09-28 [1] CRAN (R 4.2.2)
Formula 1.2-4 2020-10-16 [1] CRAN (R 4.2.0)
fs 1.6.0 2023-01-23 [1] CRAN (R 4.2.0)
furrr 0.3.1 2022-08-15 [1] CRAN (R 4.2.0)
future 1.33.2 2024-03-26 [1] CRAN (R 4.2.3)
gargle 1.2.1 2022-09-08 [1] CRAN (R 4.2.0)
generics 0.1.3 2022-07-05 [1] CRAN (R 4.2.0)
GGally 2.2.1 2024-02-14 [1] CRAN (R 4.2.3)
ggeasy * 0.1.3 2022-11-20 [1] Github (jonocarroll/ggeasy@b4b7269)
ggforce 0.4.2 2024-02-19 [1] CRAN (R 4.2.3)
gghalves 0.1.3 2022-05-30 [1] CRAN (R 4.2.0)
ggplot2 * 3.5.0.9000 2024-04-15 [1] Github (tidyverse/ggplot2@1050f09)
ggpubr * 0.5.0 2022-11-16 [1] CRAN (R 4.2.2)
ggrepel 0.9.2 2022-11-06 [1] CRAN (R 4.2.0)
ggsignif 0.6.4 2022-10-13 [1] CRAN (R 4.2.0)
ggstats 0.6.0 2024-04-05 [1] CRAN (R 4.2.3)
globals 0.16.3 2024-03-08 [1] CRAN (R 4.2.3)
glue 1.6.2 2022-02-24 [1] CRAN (R 4.2.0)
googledrive 2.0.0 2021-07-08 [1] CRAN (R 4.2.0)
googlesheets4 1.0.1 2022-08-13 [1] CRAN (R 4.2.0)
gridExtra * 2.3 2017-09-09 [1] CRAN (R 4.2.0)
gt * 0.8.0 2022-11-16 [1] CRAN (R 4.2.2)
gtable 0.3.1 2022-09-01 [1] CRAN (R 4.2.0)
haven 2.5.1 2022-08-22 [1] CRAN (R 4.2.0)
highr 0.10 2022-12-22 [1] CRAN (R 4.2.0)
Hmisc 4.7-1 2022-08-15 [1] CRAN (R 4.2.0)
hms 1.1.2 2022-08-19 [1] CRAN (R 4.2.0)
htmlTable 2.4.1 2022-07-07 [1] CRAN (R 4.2.0)
htmltools 0.5.4 2022-12-07 [1] CRAN (R 4.2.0)
htmlwidgets 1.6.1 2023-01-07 [1] CRAN (R 4.2.0)
httpcode 0.3.0 2020-04-10 [1] CRAN (R 4.2.0)
httr * 1.4.4 2022-08-17 [1] CRAN (R 4.2.0)
insight * 0.18.7 2022-11-18 [1] CRAN (R 4.2.2)
interp 1.1-3 2022-07-13 [1] CRAN (R 4.2.0)
iterators 1.0.14 2022-02-05 [1] CRAN (R 4.2.0)
jpeg 0.1-10 2022-11-29 [1] CRAN (R 4.2.0)
jquerylib 0.1.4 2021-04-26 [1] CRAN (R 4.2.0)
jsonlite 1.8.4 2022-12-06 [1] CRAN (R 4.2.0)
kableExtra * 1.4.0 2024-01-24 [1] CRAN (R 4.2.3)
knitr * 1.42 2023-01-25 [1] CRAN (R 4.2.0)
labeling 0.4.2 2020-10-20 [1] CRAN (R 4.2.0)
lattice * 0.20-45 2021-09-22 [1] CRAN (R 4.2.2)
latticeExtra 0.6-30 2022-07-04 [1] CRAN (R 4.2.0)
libcoin 1.0-10 2023-09-27 [1] CRAN (R 4.2.0)
lifecycle 1.0.3 2022-10-07 [1] CRAN (R 4.2.0)
listenv 0.9.1 2024-01-29 [1] CRAN (R 4.2.3)
lme4 * 1.1-35.2 2024-03-28 [1] CRAN (R 4.2.2)
lmerTest 3.1-3 2020-10-23 [1] CRAN (R 4.2.0)
lubridate 1.9.0 2022-11-06 [1] CRAN (R 4.2.0)
magrittr 2.0.3 2022-03-30 [1] CRAN (R 4.2.0)
MASS 7.3-58.1 2022-08-03 [1] CRAN (R 4.2.2)
mathjaxr 1.6-0 2022-02-28 [1] CRAN (R 4.2.0)
Matrix * 1.6-5 2024-01-11 [1] CRAN (R 4.2.3)
matrixStats 0.63.0 2022-11-18 [1] CRAN (R 4.2.0)
memoise 2.0.1 2021-11-26 [1] CRAN (R 4.2.0)
metan * 1.18.0 2023-03-05 [1] CRAN (R 4.2.0)
mgcv 1.8-41 2022-10-21 [1] CRAN (R 4.2.2)
minqa 1.2.5 2022-10-19 [1] CRAN (R 4.2.0)
modelbased * 0.8.5 2022-08-18 [1] CRAN (R 4.2.0)
modelr 0.1.10 2022-11-11 [1] CRAN (R 4.2.0)
modeltools 0.2-23 2020-03-05 [1] CRAN (R 4.2.0)
multcomp 1.4-20 2022-08-07 [1] CRAN (R 4.2.0)
munsell 0.5.0 2018-06-12 [1] CRAN (R 4.2.0)
mvtnorm 1.1-3 2021-10-08 [1] CRAN (R 4.2.0)
nlme * 3.1-160 2022-10-10 [1] CRAN (R 4.2.2)
nloptr 2.0.3 2022-05-26 [1] CRAN (R 4.2.0)
nnet 7.3-18 2022-09-28 [1] CRAN (R 4.2.2)
numDeriv 2016.8-1.1 2019-06-06 [1] CRAN (R 4.2.0)
opdisDownsampling 1.0.1 2024-04-15 [1] CRAN (R 4.2.3)
osfr * 0.2.9 2022-09-25 [1] CRAN (R 4.2.0)
parallelly 1.37.1 2024-02-29 [1] CRAN (R 4.2.3)
parameters * 0.19.0 2022-10-05 [1] CRAN (R 4.2.0)
patchwork 1.1.2 2022-08-19 [1] CRAN (R 4.2.0)
pbkrtest 0.5.2 2023-01-19 [1] CRAN (R 4.2.0)
pbmcapply 1.5.1 2022-04-28 [1] CRAN (R 4.2.0)
performance * 0.10.0 2022-10-03 [1] CRAN (R 4.2.0)
pheatmap * 1.0.12 2019-01-04 [1] CRAN (R 4.2.0)
pillar 1.8.1 2022-08-19 [1] CRAN (R 4.2.0)
pkgconfig 2.0.3 2019-09-22 [1] CRAN (R 4.2.0)
plyr * 1.8.8 2022-11-11 [1] CRAN (R 4.2.0)
png 0.1-8 2022-11-29 [1] CRAN (R 4.2.0)
polyclip 1.10-6 2023-09-27 [1] CRAN (R 4.2.0)
pracma 2.4.4 2023-11-10 [1] CRAN (R 4.2.0)
PupillometryR * 0.0.4 2021-09-19 [1] CRAN (R 4.2.0)
purrr * 1.0.1 2023-01-10 [1] CRAN (R 4.2.0)
pwr 1.3-0 2020-03-17 [1] CRAN (R 4.2.0)
qqconf 1.3.2 2023-04-14 [1] CRAN (R 4.2.0)
qqplotr 0.0.6 2023-01-25 [1] CRAN (R 4.2.0)
R6 2.5.1 2021-08-19 [1] CRAN (R 4.2.0)
RColorBrewer 1.1-3 2022-04-03 [1] CRAN (R 4.2.0)
Rcpp 1.0.10 2023-01-22 [1] CRAN (R 4.2.0)
readr * 2.1.3 2022-10-01 [1] CRAN (R 4.2.0)
readxl 1.4.1 2022-08-17 [1] CRAN (R 4.2.0)
report * 0.5.5 2022-08-22 [1] CRAN (R 4.2.0)
reprex 2.0.2 2022-08-17 [1] CRAN (R 4.2.0)
reshape2 * 1.4.4 2020-04-09 [1] CRAN (R 4.2.0)
rlang * 1.1.3 2024-01-10 [1] CRAN (R 4.2.3)
rmarkdown 2.20 2023-01-19 [1] CRAN (R 4.2.0)
rmcorr * 0.6.0 2024-04-15 [1] Github (lmarusich/rmcorr@8816f10)
rmdformats 1.0.4.9000 2023-02-05 [1] Github (juba/rmdformats@6d5b252)
Rmisc * 1.5.1 2022-05-02 [1] CRAN (R 4.2.0)
robustbase 0.99-2 2024-01-27 [1] CRAN (R 4.2.3)
rpart 4.1.19 2022-10-21 [1] CRAN (R 4.2.2)
rprojroot 2.0.3 2022-04-02 [1] CRAN (R 4.2.0)
rstatix 0.7.1 2022-11-09 [1] CRAN (R 4.2.0)
rstudioapi 0.14 2022-08-22 [1] CRAN (R 4.2.0)
rvest 1.0.3 2022-08-19 [1] CRAN (R 4.2.0)
sandwich 3.0-2 2022-06-15 [1] CRAN (R 4.2.0)
sass 0.4.5 2023-01-24 [1] CRAN (R 4.2.0)
scales 1.3.0 2023-11-28 [1] CRAN (R 4.2.3)
sdamr 0.2.0 2022-11-16 [1] CRAN (R 4.2.2)
see * 0.7.3 2022-09-20 [1] CRAN (R 4.2.0)
sessioninfo 1.2.2 2021-12-06 [1] CRAN (R 4.2.0)
smplot2 * 0.1.0 2022-11-20 [1] Github (smin95/smplot2@87f8331)
stringi 1.7.12 2023-01-11 [1] CRAN (R 4.2.0)
stringr * 1.5.0 2022-12-02 [1] CRAN (R 4.2.0)
survival * 3.4-0 2022-08-09 [1] CRAN (R 4.2.2)
svglite 2.1.0 2022-02-03 [1] CRAN (R 4.2.0)
systemfonts 1.0.4 2022-02-11 [1] CRAN (R 4.2.0)
TH.data 1.1-1 2022-04-26 [1] CRAN (R 4.2.0)
this.path * 2.2.0 2023-10-29 [1] CRAN (R 4.2.0)
tibble * 3.1.8 2022-07-22 [1] CRAN (R 4.2.0)
tidyr * 1.3.0 2023-01-24 [1] CRAN (R 4.2.0)
tidyselect 1.2.0 2022-10-10 [1] CRAN (R 4.2.0)
tidyverse * 1.3.2 2022-07-18 [1] CRAN (R 4.2.0)
timechange 0.1.1 2022-11-04 [1] CRAN (R 4.2.0)
tweenr 2.0.3 2024-02-26 [1] CRAN (R 4.2.3)
twosamples 2.0.1 2023-06-23 [1] CRAN (R 4.2.0)
tzdb 0.3.0 2022-03-28 [1] CRAN (R 4.2.0)
utf8 1.2.3 2023-01-31 [1] CRAN (R 4.2.0)
vctrs 0.6.5 2023-12-01 [1] CRAN (R 4.2.3)
viridisLite 0.4.1 2022-08-22 [1] CRAN (R 4.2.0)
withr 2.5.0 2022-03-03 [1] CRAN (R 4.2.0)
writexl * 1.4.1 2022-10-18 [1] CRAN (R 4.2.0)
xfun 0.37 2023-01-31 [1] CRAN (R 4.2.0)
xml2 1.3.3 2021-11-30 [1] CRAN (R 4.2.0)
xtable 1.8-4 2019-04-21 [1] CRAN (R 4.2.0)
yaml 2.3.7 2023-01-23 [1] CRAN (R 4.2.0)
zoo 1.8-11 2022-09-17 [1] CRAN (R 4.2.0)
[1] /Library/Frameworks/R.framework/Versions/4.2-arm64/Resources/library
──────────────────────────────────────────────────────────────────────────────