cat('\014')     # clean terminal

rm(list = ls()) # clean workspace
try(dev.off(), silent = TRUE) # close all plots
library(tidyverse)
library(afex)
library(emmeans)
library(readxl)
library(GGally)
library(rstatix)
my_dodge <- .5
exclude_bad_eeg <- TRUE
theme_set(theme_minimal())

a_posteriori <- function(afex_aov, sig_level = .05) {
  factors  <- as.list(rownames(afex_aov$anova_table))
  for (j in 1:length(factors)) {
    if (grepl(':', factors[[j]])) {
      factors[[j]] <- unlist(strsplit(factors[[j]], ':'))
    }
  }
  p_values <- afex_aov$anova_table$`Pr(>F)`
  for (i in 1:length(p_values)) {
    if (p_values[i] <= sig_level) {
      print(emmeans(afex_aov, factors[[i]], contr = 'pairwise'))
      cat(rep('_', 100), '\n', sep = '')
    }
  }
}
eeg_check   <- read_excel(file.path('..', 'bad_channels_bqd.xlsx')) |>
  mutate(subject = parse_number(file))
bad_eeg     <- eeg_check$subject[eeg_check$to_kill == 1]
ratings_now <- read_csv('dish_preferences.csv', col_types = cols()) |> 
  mutate(Group = 'Latest') |> 
  filter(!(subject %in% bad_eeg))
ratings_old <- read_csv('Dishes_Blacos_Fin.csv', col_types = cols(), ) |> 
  filter(Group == 'Naive')
ratings     <- rbind(ratings_now, ratings_old) |> 
  filter(!is.na(answer)) |> 
  mutate_if(is.character, as.factor) |> 
  mutate(valence = fct_relevel(valence, 'neutral')) |> 
  mutate(final_valence = fct_relevel(final_valence, 'final_neutral')) |> 
  mutate(Group = fct_relevel(Group, 'Naive'))
saliva <- read_csv('Salivas2023.csv', col_types = cols()) |> 
  mutate(subject = parse_number(Sujeto)) |> 
  filter(!(subject %in% bad_eeg)) |> 
  mutate(Group = ifelse(condition == 1, 'control', 'mindful')) |> 
  mutate_if(is.character, as.factor) |> 
  mutate(subject = factor(subject)) |> 
  mutate(Group = fct_relevel(Group, 'mindful'))
saliva <- saliva |> 
  mutate(vol_adjusted = volume_ul - mean(saliva$volume_ul[saliva$sample_time == 'T0']))
precise_saliva_mean <- 262.79922

1 General description

options(width = 100)
summary(ratings)
              date_time       subject          dish             valence         answer     
 1/10/2023 11:15:24: 120   1      : 120   Min.   :  1.0   neutral   :5099   Min.   :1.000  
 1/11/2023 15:58:14: 120   10     : 120   1st Qu.: 30.5   attractive:5100   1st Qu.:2.000  
 1/14/2023 13:20:50: 120   11     : 120   Median : 60.0                     Median :4.000  
 1/17/2023 12:08:26: 120   13     : 120   Mean   : 60.5                     Mean   :3.404  
 1/18/2023 12:08:01: 120   14     : 120   3rd Qu.: 90.5                     3rd Qu.:4.000  
 (Other)           :6599   15     : 120   Max.   :120.0                     Max.   :5.000  
 NA's              :3000   (Other):9479                                                    
           new_valence     personal_valence          final_valence     Group     
 now_attractive  : 845   attractive:3518    final_neutral   :4500   Naive :3000  
 now_neutral     : 927   neutral   :3681    final_attractive:4500   Latest:7199  
 still_attractive:2673   NA's      :3000    NA's            :1199                
 still_neutral   :2754                                                           
 NA's            :3000                                                           
                                                                                 
                                                                                 

2 Rating

2.1 Rating by valence & group

options(width = 100)
ratings_rep_anova <- aov_ez('subject', 'answer', ratings, within = c('valence'), between = c('Group'))
Warning: More than one observation per design cell, aggregating data using `fun_aggregate = mean`.
To turn off this warning, pass `fun_aggregate = mean` explicitly.
Contrasts set to contr.sum for the following variables: Group
cell_means <- ratings_rep_anova$data$long |>
  group_by(Group, valence) |>
  summarise(answer = mean(answer))
`summarise()` has grouped output by 'Group'. You can override using the `.groups` argument.
ratings_afex_plot <-
  afex_plot(
    ratings_rep_anova,
    x     = 'valence',
    trace = 'Group',
    # panel = 'sex',
    error = 'between',
    error_arg = list(width = .1, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .1, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3,
    # point_arg = list(size = 2)
   )
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"
suppressWarnings(print(
  ratings_afex_plot +
    scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
    geom_point(data = cell_means,
               aes(x = valence, y = answer, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))

nice(ratings_rep_anova)
Anova Table (Type 3 tests)

Response: answer
         Effect    df  MSE          F   ges p.value
1         Group 1, 83 0.29       2.18  .017    .144
2       valence 1, 83 0.16 305.03 ***  .564   <.001
3 Group:valence 1, 83 0.16       0.19 <.001    .667
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘+’ 0.1 ‘ ’ 1
a_posteriori(ratings_rep_anova)
$emmeans
 valence    emmean     SE df lower.CL upper.CL
 neutral      2.85 0.0491 83     2.75     2.95
 attractive   4.01 0.0626 83     3.89     4.14

Results are averaged over the levels of: Group 
Confidence level used: 0.95 

$contrasts
 contrast             estimate     SE df t.ratio p.value
 neutral - attractive    -1.17 0.0668 83 -17.465  <.0001

Results are averaged over the levels of: Group 

____________________________________________________________________________________________________
afex_plot(
  ratings_rep_anova,
  x     = "valence",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .1, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  line_arg = list(size = 0),
  mapping = c("color")
) + 
  scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
  geom_point(data = ratings_rep_anova$data$long,
             aes(x = valence, y = answer, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.1, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = valence, y = answer, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"

2.2 Rating by final_valence & group

options(width = 100)
ratings_subset <- ratings |> 
  filter(final_valence %in% c('final_attractive', 'final_neutral'))
ratings_final_rep_anova <- aov_ez('subject', 'answer', ratings_subset, within = c('final_valence'), between = c('Group'))
Warning: More than one observation per design cell, aggregating data using `fun_aggregate = mean`.
To turn off this warning, pass `fun_aggregate = mean` explicitly.
Contrasts set to contr.sum for the following variables: Group
cell_means <- ratings_final_rep_anova$data$long |>
  group_by(Group, final_valence) |>
  summarise(answer = mean(answer))
`summarise()` has grouped output by 'Group'. You can override using the `.groups` argument.
ratings_final_afex_plot <-
  afex_plot(
    ratings_final_rep_anova,
    x     = 'final_valence',
    trace = 'Group',
    # panel = 'sex',
    error = 'between',
    error_arg = list(width = .1, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .1, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3,
    # point_arg = list(size = 2)
    )
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"
suppressWarnings(print(
  ratings_final_afex_plot +
    scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
    geom_point(data = cell_means,
               aes(x = final_valence, y = answer, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))

nice(ratings_final_rep_anova)
Anova Table (Type 3 tests)

Response: answer
               Effect    df  MSE           F  ges p.value
1               Group 1, 83 0.26      3.24 + .027    .076
2       final_valence 1, 83 0.10 1157.35 *** .796   <.001
3 Group:final_valence 1, 83 0.10      6.83 * .022    .011
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘+’ 0.1 ‘ ’ 1
a_posteriori(ratings_final_rep_anova)
$emmeans
 final_valence    emmean     SE df lower.CL upper.CL
 final_neutral      2.51 0.0485 83     2.42     2.61
 final_attractive   4.33 0.0525 83     4.23     4.43

Results are averaged over the levels of: Group 
Confidence level used: 0.95 

$contrasts
 contrast                         estimate     SE df t.ratio p.value
 final_neutral - final_attractive    -1.82 0.0534 83 -34.020  <.0001

Results are averaged over the levels of: Group 

____________________________________________________________________________________________________
$emmeans
 Group  final_valence    emmean     SE df lower.CL upper.CL
 Naive  final_neutral      2.66 0.0814 83     2.50     2.82
 Latest final_neutral      2.36 0.0526 83     2.26     2.47
 Naive  final_attractive   4.34 0.0883 83     4.16     4.51
 Latest final_attractive   4.32 0.0570 83     4.21     4.44

Confidence level used: 0.95 

$contrasts
 contrast                                         estimate     SE df t.ratio p.value
 Naive final_neutral - Latest final_neutral         0.2940 0.0969 83   3.034  0.0167
 Naive final_neutral - Naive final_attractive      -1.6787 0.0898 83 -18.691  <.0001
 Naive final_neutral - Latest final_attractive     -1.6640 0.0994 83 -16.743  <.0001
 Latest final_neutral - Naive final_attractive     -1.9727 0.1027 83 -19.200  <.0001
 Latest final_neutral - Latest final_attractive    -1.9580 0.0580 83 -33.774  <.0001
 Naive final_attractive - Latest final_attractive   0.0147 0.1051 83   0.140  0.9990

P value adjustment: tukey method for comparing a family of 4 estimates 

____________________________________________________________________________________________________
afex_plot(
  ratings_final_rep_anova,
  x     = "final_valence",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .1, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  line_arg = list(size = 0),
  mapping = c("color")
  ) + 
  scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
  geom_point(data = ratings_final_rep_anova$data$long,
             aes(x = final_valence, y = answer, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.1, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = final_valence, y = answer, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"

3 Saliva

options(width = 100)
summary(saliva)
     Sujeto      condition     sample_time      tube       volume_ul         subject   
 S01    :  3   Min.   :1.000   T0:60       S05_T0 :  2   Min.   :  10.0   1      :  3  
 S02    :  3   1st Qu.:1.000   T1:60       S05_T1 :  2   1st Qu.: 158.5   2      :  3  
 S03    :  3   Median :2.000   T2:60       S05_T2 :  2   Median : 250.0   3      :  3  
 S04    :  3   Mean   :1.517               S27_T1 :  2   Mean   : 324.6   4      :  3  
 S05    :  3   3rd Qu.:2.000               S27_T2 :  2   3rd Qu.: 432.5   5      :  3  
 S06    :  3   Max.   :2.000               S35_T0 :  2   Max.   :1240.0   6      :  3  
 (Other):162                               (Other):168                    (Other):162  
     Group     vol_adjusted    
 mindful:93   Min.   :-254.25  
 control:87   1st Qu.:-105.75  
              Median : -14.25  
              Mean   :  60.34  
              3rd Qu.: 168.25  
              Max.   : 975.75  
                               

3.1 Saliva volume

options(width = 100)
saliva_rep_anova <- aov_ez('subject', 'volume_ul', saliva, within = c('sample_time'), between = c('Group'))
Contrasts set to contr.sum for the following variables: Group
cell_means <- saliva_rep_anova$data$long |>
  group_by(Group, sample_time) |>
  summarise(volume_ul = mean(volume_ul))
`summarise()` has grouped output by 'Group'. You can override using the `.groups` argument.
saliva_afex_plot <-
  afex_plot(
    saliva_rep_anova,
    x     = 'sample_time',
    trace = 'Group',
    # panel = 'sex',
    error = 'between',
    error_arg = list(width = .15, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .2, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3,
    # point_arg = list(size = 2)
   )
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"
suppressWarnings(print(
  saliva_afex_plot +
    scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
    geom_point(data = cell_means,
               aes(x = sample_time, y = volume_ul, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))

nice(saliva_rep_anova)
Anova Table (Type 3 tests)

Response: volume_ul
             Effect          df       MSE         F  ges p.value
1             Group       1, 58 124951.19      0.50 .007    .480
2       sample_time 1.63, 94.32  20914.61 13.66 *** .048   <.001
3 Group:sample_time 1.63, 94.32  20914.61      2.06 .008    .142
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘+’ 0.1 ‘ ’ 1

Sphericity correction method: GG 
a_posteriori(saliva_rep_anova)
$emmeans
 sample_time emmean   SE df lower.CL upper.CL
 T0             263 28.0 58      207      319
 T1             322 27.4 58      267      377
 T2             387 33.4 58      320      454

Results are averaged over the levels of: Group 
Confidence level used: 0.95 

$contrasts
 contrast estimate   SE df t.ratio p.value
 T0 - T1     -59.1 23.2 58  -2.545  0.0358
 T0 - T2    -124.4 28.6 58  -4.356  0.0002
 T1 - T2     -65.4 18.6 58  -3.507  0.0025

Results are averaged over the levels of: Group 
P value adjustment: tukey method for comparing a family of 3 estimates 

____________________________________________________________________________________________________
afex_plot(
  saliva_rep_anova,
  x     = "sample_time",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .2, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  mapping = c("color")
  ) + 
  scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
  geom_point(data = saliva_rep_anova$data$long,
             aes(x = sample_time, y = volume_ul, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.2, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = sample_time, y = volume_ul, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"

3.2 Saliva volume, adjusted

T0’s mean volume substracted from every value

options(width = 100)
saliva_adj_rep_anova <- aov_ez('subject', 'vol_adjusted', saliva, within = c('sample_time'), between = c('Group'))
Contrasts set to contr.sum for the following variables: Group
cell_means <- saliva_adj_rep_anova$data$long |>
  group_by(Group, sample_time) |>
  summarise(vol_adjusted = mean(vol_adjusted))
`summarise()` has grouped output by 'Group'. You can override using the `.groups` argument.
saliva_adj_afex_plot <-
  afex_plot(
    saliva_adj_rep_anova,
    x     = 'sample_time',
    trace = 'Group',
    # panel = 'sex',
    # data_geom = geom_violin,
    error = 'between',
    error_arg = list(width = .15, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .2, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3, 
    # point_arg = list(size = 2)
   )
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"
suppressWarnings(print(
  saliva_adj_afex_plot +
    scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
    geom_point(data = cell_means,
               aes(x = sample_time, y = vol_adjusted, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))

nice(saliva_adj_rep_anova)
Anova Table (Type 3 tests)

Response: vol_adjusted
             Effect          df       MSE         F  ges p.value
1             Group       1, 58 124951.19      0.50 .007    .480
2       sample_time 1.63, 94.32  20914.61 13.66 *** .048   <.001
3 Group:sample_time 1.63, 94.32  20914.61      2.06 .008    .142
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘+’ 0.1 ‘ ’ 1

Sphericity correction method: GG 
a_posteriori(saliva_adj_rep_anova)
$emmeans
 sample_time emmean   SE df lower.CL upper.CL
 T0           -1.45 28.0 58   -57.58     54.7
 T1           57.60 27.4 58     2.68    112.5
 T2          122.99 33.4 58    56.21    189.8

Results are averaged over the levels of: Group 
Confidence level used: 0.95 

$contrasts
 contrast estimate   SE df t.ratio p.value
 T0 - T1     -59.1 23.2 58  -2.545  0.0358
 T0 - T2    -124.4 28.6 58  -4.356  0.0002
 T1 - T2     -65.4 18.6 58  -3.507  0.0025

Results are averaged over the levels of: Group 
P value adjustment: tukey method for comparing a family of 3 estimates 

____________________________________________________________________________________________________
afex_plot(
  saliva_adj_rep_anova,
  x     = "sample_time",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .2, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  mapping = c("color")
  ) + 
  scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
  geom_point(data = saliva_adj_rep_anova$data$long,
             aes(x = sample_time, y = vol_adjusted, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.2, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = sample_time, y = vol_adjusted, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
Warning: Panel(s) show a mixed within-between-design.
Error bars do not allow comparisons across all means.
Suppress error bars with: error = "none"

4 RTs and saliva

options(width = 100)
# prepare data
saliva_wide <- saliva[c('subject', 'sample_time', 'volume_ul', 'Group')] |> 
  spread(sample_time, volume_ul) |> 
  mutate(up_to_t1 = T1 - T0,
         up_to_t2 = T2 - T0)
rts_clean <- read_csv('../x_infinity_reference_and_personal_choice/results/RTs_xtracted_from_EEG_data_clean_personal_choice.csv', col_types = cols()) |> 
  filter(!(num_id %in% bad_eeg)) |>
  mutate_if(is.character, as.factor) |> 
  mutate(num_id = factor(num_id))
try(detach('package:Hotelling', unload = TRUE), silent = TRUE)
avg_rts <- rts_clean |> 
  group_by(bin_name, num_id) |> 
  summarise(avg_rt = mean(rt_no_out, na.rm = TRUE), .groups = 'drop') |> 
  spread(bin_name, avg_rt) |> 
  mutate(fab_attractive =  Avoid_Attractive - Approach_Attractive,
         fab_tolat      = (Avoid_Attractive - Approach_Attractive) - (Avoid_Neutral - Approach_Neutral))
rts_and_saliva <- inner_join(avg_rts, saliva_wide, by = join_by(num_id == subject))
write.csv(rts_and_saliva,  file.path('rts_and_saliva.csv'),  row.names = FALSE)
# summary and plots
summary(rts_and_saliva)
     num_id   Approach_Attractive Approach_Neutral Avoid_Attractive Avoid_Neutral  
 1      : 1   Min.   :407.3       Min.   :385.5    Min.   :425.0    Min.   :396.1  
 2      : 1   1st Qu.:520.8       1st Qu.:519.7    1st Qu.:530.7    1st Qu.:517.4  
 3      : 1   Median :568.7       Median :582.5    Median :593.1    Median :596.9  
 4      : 1   Mean   :589.3       Mean   :591.7    Mean   :605.4    Mean   :603.1  
 5      : 1   3rd Qu.:653.0       3rd Qu.:654.6    3rd Qu.:665.2    3rd Qu.:670.0  
 6      : 1   Max.   :870.6       Max.   :866.6    Max.   :860.7    Max.   :831.1  
 (Other):54                                                                        
 fab_attractive       fab_tolat           Group          T0               T1       
 Min.   :-66.2434   Min.   :-70.040   mindful:31   Min.   :  26.0   Min.   : 58.0  
 1st Qu.:  0.1407   1st Qu.:-10.114   control:29   1st Qu.: 113.5   1st Qu.:173.8  
 Median : 11.9694   Median :  7.057                Median : 200.0   Median :246.0  
 Mean   : 16.1097   Mean   :  4.699                Mean   : 264.2   Mean   :322.4  
 3rd Qu.: 27.8864   3rd Qu.: 22.777                3rd Qu.: 348.5   3rd Qu.:455.5  
 Max.   :119.5019   Max.   : 84.670                Max.   :1240.0   Max.   :840.0  
                                                                                   
       T2            up_to_t1          up_to_t2      
 Min.   :  10.0   Min.   :-460.00   Min.   :-490.00  
 1st Qu.: 195.5   1st Qu.: -21.00   1st Qu.:   1.25  
 Median : 350.0   Median :  22.50   Median : 131.00  
 Mean   : 387.1   Mean   :  58.18   Mean   : 122.83  
 3rd Qu.: 544.0   3rd Qu.: 142.50   3rd Qu.: 193.00  
 Max.   :1040.0   Max.   : 514.00   Max.   : 726.00  
                                                     
focal_vars <- c('fab_attractive', 'fab_tolat', 'up_to_t1', 'up_to_t2')
focal_pairs <- ggpairs(rts_and_saliva,
                       columns    = focal_vars,
                       aes(colour = Group, alpha = .25),
                       progress   = FALSE,
                       lower      = list(continuous = wrap('points')))
focal_pairs

4.1 t-tests between groups

options(width = 120)
library(Hotelling)

Attaching package: ‘Hotelling’

The following object is masked from ‘package:dplyr’:

    summarise
mind <- subset(rts_and_saliva, Group == 'mindful', select = c('fab_attractive', 'fab_tolat', 'up_to_t1', 'up_to_t2'))
cont <- subset(rts_and_saliva, Group == 'control', select = c('fab_attractive', 'fab_tolat', 'up_to_t1', 'up_to_t2'))
summarise(mind, cont)
Group 1
=======
          fab_attractive fab_tolat  up_to_t1  up_to_t2
Mean            17.59106   9.25132  32.96774  76.12903
Median          14.69362  11.96378  12.00000  50.00000
Std. Dev.       39.41556  24.45971 172.16165 224.91432
N               31.00000  31.00000  31.00000  31.00000

Group 2
=======
          fab_attractive  fab_tolat  up_to_t1 up_to_t2
Mean            14.52615 -0.1678811  85.13793 172.7586
Median          11.15033  1.0977081  26.00000 162.0000
Std. Dev.       26.58209 36.1922050 187.34093 217.1168
N               29.00000 29.0000000  29.00000  29.0000
h_t <- hotelling.test(mind, cont)
print('Hotelling T²')
[1] "Hotelling T²"
print(h_t)
Test stat:  6.0644 
Numerator df:  4 
Denominator df:  55 
P-value:  0.2339 
try(detach('package:Hotelling', unload = TRUE), silent = TRUE)
rts_and_saliva_long <- rts_and_saliva[c('num_id', 'Group', focal_vars)] |> 
  pivot_longer(-c(num_id, Group), names_to = 'variable', values_to = 'value')
t_tests <- rts_and_saliva_long |> 
  group_by(variable) |> 
  t_test(value ~ Group) |> 
  adjust_pvalue(method = 'fdr')
t_tests
---
title: 'Dish ratings baq'
author: 'Alvaro Rivera-Rei'
date: "`r format(Sys.time(), '%d %B, %Y')`"
output:
  html_notebook:
    code_folding: hide
    highlight: tango
    number_sections: yes
    theme: cerulean
    toc: yes
    toc_float:
      collapsed: no
      smooth_scroll: no
  pdf_document:
    toc: yes
subtitle: Likert scale from 1 to 5
---

```{r Clean and Load Libraries}
cat('\014')     # clean terminal
rm(list = ls()) # clean workspace
try(dev.off(), silent = TRUE) # close all plots
library(tidyverse)
library(afex)
library(emmeans)
library(readxl)
library(GGally)
library(rstatix)
```

```{r Set Defaults}
my_dodge <- .5
exclude_bad_eeg <- TRUE
theme_set(theme_minimal())

a_posteriori <- function(afex_aov, sig_level = .05) {
  factors  <- as.list(rownames(afex_aov$anova_table))
  for (j in 1:length(factors)) {
    if (grepl(':', factors[[j]])) {
      factors[[j]] <- unlist(strsplit(factors[[j]], ':'))
    }
  }
  p_values <- afex_aov$anova_table$`Pr(>F)`
  for (i in 1:length(p_values)) {
    if (p_values[i] <= sig_level) {
      print(emmeans(afex_aov, factors[[i]], contr = 'pairwise'))
      cat(rep('_', 100), '\n', sep = '')
    }
  }
}
```

```{r Load rating data}
eeg_check   <- read_excel(file.path('..', 'bad_channels_bqd.xlsx')) |>
  mutate(subject = parse_number(file))
bad_eeg     <- eeg_check$subject[eeg_check$to_kill == 1]
ratings_now <- read_csv('dish_preferences.csv', col_types = cols()) |> 
  mutate(Group = 'Latest') |> 
  filter(!(subject %in% bad_eeg))
ratings_old <- read_csv('Dishes_Blacos_Fin.csv', col_types = cols(), ) |> 
  filter(Group == 'Naive')
ratings     <- rbind(ratings_now, ratings_old) |> 
  filter(!is.na(answer)) |> 
  mutate_if(is.character, as.factor) |> 
  mutate(valence = fct_relevel(valence, 'neutral')) |> 
  mutate(final_valence = fct_relevel(final_valence, 'final_neutral')) |> 
  mutate(Group = fct_relevel(Group, 'Naive'))
saliva <- read_csv('Salivas2023.csv', col_types = cols()) |> 
  mutate(subject = parse_number(Sujeto)) |> 
  filter(!(subject %in% bad_eeg)) |> 
  mutate(Group = ifelse(condition == 1, 'control', 'mindful')) |> 
  mutate_if(is.character, as.factor) |> 
  mutate(subject = factor(subject)) |> 
  mutate(Group = fct_relevel(Group, 'mindful'))
saliva <- saliva |> 
  mutate(vol_adjusted = volume_ul - mean(saliva$volume_ul[saliva$sample_time == 'T0']))
precise_saliva_mean <- 262.79922
```

# General description
```{r general, fig.width = 12}
options(width = 100)
summary(ratings)
```

# Rating
## Rating by valence & group

```{r valence, fig.width = 8}
options(width = 100)
ratings_rep_anova <- aov_ez('subject', 'answer', ratings, within = c('valence'), between = c('Group'))
cell_means <- ratings_rep_anova$data$long |>
  group_by(Group, valence) |>
  summarise(answer = mean(answer))
ratings_afex_plot <-
  afex_plot(
    ratings_rep_anova,
    x     = 'valence',
    trace = 'Group',
    # panel = 'sex',
    error = 'between',
    error_arg = list(width = .1, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .1, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3,
    # point_arg = list(size = 2)
   )
suppressWarnings(print(
  ratings_afex_plot +
    scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
    geom_point(data = cell_means,
               aes(x = valence, y = answer, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))
nice(ratings_rep_anova)
a_posteriori(ratings_rep_anova)

afex_plot(
  ratings_rep_anova,
  x     = "valence",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .1, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  line_arg = list(size = 0),
  mapping = c("color")
) + 
  scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
  geom_point(data = ratings_rep_anova$data$long,
             aes(x = valence, y = answer, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.1, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = valence, y = answer, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
```

## Rating by final_valence & group

```{r final_valence, fig.width = 8}
options(width = 100)
ratings_subset <- ratings |> 
  filter(final_valence %in% c('final_attractive', 'final_neutral'))
ratings_final_rep_anova <- aov_ez('subject', 'answer', ratings_subset, within = c('final_valence'), between = c('Group'))
cell_means <- ratings_final_rep_anova$data$long |>
  group_by(Group, final_valence) |>
  summarise(answer = mean(answer))
ratings_final_afex_plot <-
  afex_plot(
    ratings_final_rep_anova,
    x     = 'final_valence',
    trace = 'Group',
    # panel = 'sex',
    error = 'between',
    error_arg = list(width = .1, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .1, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3,
    # point_arg = list(size = 2)
    )
suppressWarnings(print(
  ratings_final_afex_plot +
    scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
    geom_point(data = cell_means,
               aes(x = final_valence, y = answer, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))
nice(ratings_final_rep_anova)
a_posteriori(ratings_final_rep_anova)

afex_plot(
  ratings_final_rep_anova,
  x     = "final_valence",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .1, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  line_arg = list(size = 0),
  mapping = c("color")
  ) + 
  scale_color_manual(values = c('Naive' = 'cornflowerblue', 'Latest' = 'firebrick2')) +
  geom_point(data = ratings_final_rep_anova$data$long,
             aes(x = final_valence, y = answer, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.1, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = final_valence, y = answer, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
```

# Saliva

```{r saliva descrip, fig.width = 8}
options(width = 100)
summary(saliva)
```

## Saliva volume

```{r saliva, fig.width = 8}
options(width = 100)
saliva_rep_anova <- aov_ez('subject', 'volume_ul', saliva, within = c('sample_time'), between = c('Group'))
cell_means <- saliva_rep_anova$data$long |>
  group_by(Group, sample_time) |>
  summarise(volume_ul = mean(volume_ul))
saliva_afex_plot <-
  afex_plot(
    saliva_rep_anova,
    x     = 'sample_time',
    trace = 'Group',
    # panel = 'sex',
    error = 'between',
    error_arg = list(width = .15, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .2, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3,
    # point_arg = list(size = 2)
   )
suppressWarnings(print(
  saliva_afex_plot +
    scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
    geom_point(data = cell_means,
               aes(x = sample_time, y = volume_ul, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))
nice(saliva_rep_anova)
a_posteriori(saliva_rep_anova)
afex_plot(
  saliva_rep_anova,
  x     = "sample_time",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .2, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  mapping = c("color")
  ) + 
  scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
  geom_point(data = saliva_rep_anova$data$long,
             aes(x = sample_time, y = volume_ul, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.2, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = sample_time, y = volume_ul, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
```

## Saliva volume, adjusted
T0's mean volume substracted from every value

```{r saliva_adj, fig.width = 8}
options(width = 100)
saliva_adj_rep_anova <- aov_ez('subject', 'vol_adjusted', saliva, within = c('sample_time'), between = c('Group'))
cell_means <- saliva_adj_rep_anova$data$long |>
  group_by(Group, sample_time) |>
  summarise(vol_adjusted = mean(vol_adjusted))
saliva_adj_afex_plot <-
  afex_plot(
    saliva_adj_rep_anova,
    x     = 'sample_time',
    trace = 'Group',
    # panel = 'sex',
    # data_geom = geom_violin,
    error = 'between',
    error_arg = list(width = .15, lwd = .75, col = 'gray30', alpha = .7),
    dodge = my_dodge,
    data_arg  = list(
      position = 
        position_jitterdodge(
          jitter.width  = .2, 
          # jitter.height = .1, 
          dodge.width   = my_dodge  ## needs to be same as dodge
        )),
    mapping = c('color'), data_alpha = .3, 
    # point_arg = list(size = 2)
   )
suppressWarnings(print(
  saliva_adj_afex_plot +
    scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
    geom_point(data = cell_means,
               aes(x = sample_time, y = vol_adjusted, group = Group, color = Group),
               position = position_dodge(my_dodge), size = 2.5)
  ))
nice(saliva_adj_rep_anova)
a_posteriori(saliva_adj_rep_anova)

afex_plot(
  saliva_adj_rep_anova,
  x     = "sample_time",
  trace = "Group",
  error = "between",
  data_geom = geom_violin,
  error_arg = list(width = .2, lwd = .75, col = 'gray10', alpha = .8),
  dodge = my_dodge,
  mapping = c("color")
  ) + 
  scale_color_manual(values = c('mindful' = '#2db839', 'control' = '#ef9614')) +
  geom_point(data = saliva_adj_rep_anova$data$long,
             aes(x = sample_time, y = vol_adjusted, group = Group, color = Group),
             position = position_jitterdodge(jitter.width = 0.2, dodge.width = my_dodge),
             alpha = .4) +
  geom_point(data = cell_means,
             aes(x = sample_time, y = vol_adjusted, group = Group, color = Group),
             position = position_dodge(my_dodge), size = 2.5)
```

# RTs and saliva

```{r rts_and_saliva, fig.width = 10}
options(width = 100)
# prepare data
saliva_wide <- saliva[c('subject', 'sample_time', 'volume_ul', 'Group')] |> 
  spread(sample_time, volume_ul) |> 
  mutate(up_to_t1 = T1 - T0,
         up_to_t2 = T2 - T0)
rts_clean <- read_csv('../x_infinity_reference_and_personal_choice/results/RTs_xtracted_from_EEG_data_clean_personal_choice.csv', col_types = cols()) |> 
  filter(!(num_id %in% bad_eeg)) |>
  mutate_if(is.character, as.factor) |> 
  mutate(num_id = factor(num_id))
try(detach('package:Hotelling', unload = TRUE), silent = TRUE)
avg_rts <- rts_clean |> 
  group_by(bin_name, num_id) |> 
  summarise(avg_rt = mean(rt_no_out, na.rm = TRUE), .groups = 'drop') |> 
  spread(bin_name, avg_rt) |> 
  mutate(fab_attractive =  Avoid_Attractive - Approach_Attractive,
         fab_tolat      = (Avoid_Attractive - Approach_Attractive) - (Avoid_Neutral - Approach_Neutral))
rts_and_saliva <- inner_join(avg_rts, saliva_wide, by = join_by(num_id == subject))
write.csv(rts_and_saliva,  file.path('rts_and_saliva.csv'),  row.names = FALSE)
# summary and plots
summary(rts_and_saliva)
focal_vars <- c('fab_attractive', 'fab_tolat', 'up_to_t1', 'up_to_t2')
focal_pairs <- ggpairs(rts_and_saliva,
                       columns    = focal_vars,
                       aes(colour = Group, alpha = .25),
                       progress   = FALSE,
                       lower      = list(continuous = wrap('points')))
focal_pairs
```
## t-tests between groups
```{r rts_and_saliva_t_tests, fig.width = 10}
options(width = 120)
library(Hotelling)
mind <- subset(rts_and_saliva, Group == 'mindful', select = c('fab_attractive', 'fab_tolat', 'up_to_t1', 'up_to_t2'))
cont <- subset(rts_and_saliva, Group == 'control', select = c('fab_attractive', 'fab_tolat', 'up_to_t1', 'up_to_t2'))
summarise(mind, cont)
h_t <- hotelling.test(mind, cont)
print('Hotelling T²')
print(h_t)
try(detach('package:Hotelling', unload = TRUE), silent = TRUE)
rts_and_saliva_long <- rts_and_saliva[c('num_id', 'Group', focal_vars)] |> 
  pivot_longer(-c(num_id, Group), names_to = 'variable', values_to = 'value')
t_tests <- rts_and_saliva_long |> 
  group_by(variable) |> 
  t_test(value ~ Group) |> 
  adjust_pvalue(method = 'fdr')
t_tests
```
