This is structured to include only the final analyses in the paper.

rm(list = ls())

# load packages
library(knitr)
library(tidyverse)
library(tidytext)
library(RCurl)
library(forcats)
library(langcog)
library(data.table)
library(igraph)
library(intergraph)
library(ggnetwork)
library(stringr)
library(lme4)

opts_chunk$set(echo = T, message = F, warning = F, 
               error = F, cache = F, tidy = F)

theme_set(theme_bw())

Read in data

# tdcs data
d.all = read.csv("../data/labDataPlusNorms.csv") %>%
  rename(condition = electrode,
         association.num = mention) %>%
  select(1:4,7,9:19,21,22) %>%
  mutate(condition = fct_recode(condition, "sham" = "na")) %>%
  #filter(as.character(target) != as.character(cue)) %>%
  mutate(target= str_trim(target)) 

# turker accuracy data
d.bw.raw = read.csv("../data/backALLfinal_withALLSimilaritiesPlusNorms.csv")  
d.bw = d.bw.raw %>%
          select(cue, labSubj, subjCode, isRight)

Verify analytical approach

Try to reproduce number of associates analysis in manuscript

num.associates = count(d.all, condition, labSubj, cue) 

num.associates %>%
  group_by(condition) %>%
  summarize(mean = mean(n)) %>%
  kable()
condition mean
anodal 5.655046
cathodal 5.648598
sham 4.849910

This is almost identical to manuscript (sham differs slightly).

Fit models

contrasts(num.associates$condition) <-c(.5, -.5, 0)
colnames(contrasts(num.associates$condition)) <- c('anodalVScathodal','experimentalVSsham')
  
m1a = lmer(n~condition+(1|labSubj)+ (1|cue), data=num.associates)
ma = lmer(n~1+(1|labSubj)+ (1|cue), data=num.associates)
anova(m1a, ma, test = "Chi")
## Data: num.associates
## Models:
## ma: n ~ 1 + (1 | labSubj) + (1 | cue)
## m1a: n ~ condition + (1 | labSubj) + (1 | cue)
##     Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## ma   4 4159.9 4181.5 -2075.9   4151.9                           
## m1a  6 4158.0 4190.4 -2073.0   4146.0 5.8496      2    0.05367 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

“X2(2)=5.87, p=.053”

Exploring random effects

m1a = lmer(n~condition+(1|labSubj)+ (condition|cue), data=num.associates)
ma = lmer(n~1+(1|labSubj)+ (condition|cue), data=num.associates)
anova(m1a, ma, test = "Chi")
## Data: num.associates
## Models:
## ma: n ~ 1 + (1 | labSubj) + (condition | cue)
## m1a: n ~ condition + (1 | labSubj) + (condition | cue)
##     Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## ma   9 4168.0 4216.6 -2075.0   4150.0                           
## m1a 11 4166.1 4225.5 -2072.1   4144.1 5.8425      2    0.05387 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
m1b = lmer(n~condition+(1|labSubj)+ (1|cue), 
           data=filter(num.associates, condition !="cathodal"))
mb = lmer(n~1+(1|labSubj)+ (1|cue), 
          data=filter(num.associates, condition !="cathodal"))
#lmerTest::difflsmeans(m1b)
anova(m1b, mb, test = "Chi")
## Data: filter(num.associates, condition != "cathodal")
## Models:
## mb: n ~ 1 + (1 | labSubj) + (1 | cue)
## m1b: n ~ condition + (1 | labSubj) + (1 | cue)
##     Df  AIC  BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## mb   4 2760 2780 -1376.0     2752                           
## m1b  5 2757 2782 -1373.5     2747 5.0127      1    0.02516 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

….for this m1a model, the model converges when condition is included as a random slope. Check this.

Pairwise comparisions:

m1b = lmer(n~condition+(1|labSubj)+ (1|cue), 
           data=filter(num.associates, condition !="cathodal"))
mb = lmer(n~1+(1|labSubj)+ (1|cue), 
          data=filter(num.associates, condition !="cathodal"))
#lmerTest::difflsmeans(m1b)
anova(m1b, mb, test = "Chi")
## Data: filter(num.associates, condition != "cathodal")
## Models:
## mb: n ~ 1 + (1 | labSubj) + (1 | cue)
## m1b: n ~ condition + (1 | labSubj) + (1 | cue)
##     Df  AIC  BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## mb   4 2760 2780 -1376.0     2752                           
## m1b  5 2757 2782 -1373.5     2747 5.0127      1    0.02516 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

“b=-.83, 95% CI [-1.54, -.11]; X2(1)=5.03, p=.025”

Near identical, but slightly different confidence intervals – what package is being used here to calculate confidence intervals? I’m using lmerTest.

m1c = lmer(n~condition+(1|labSubj)+ (1|cue), 
           data=filter(num.associates, condition !="anodal"))
mc = lmer(n~1+(1|labSubj)+ (1|cue), 
          data=filter(num.associates, condition !="anodal"))

#lmerTest::difflsmeans(m1c)
anova(m1c, mc, test = "Chi")
## Data: filter(num.associates, condition != "anodal")
## Models:
## mc: n ~ 1 + (1 | labSubj) + (1 | cue)
## m1c: n ~ condition + (1 | labSubj) + (1 | cue)
##     Df    AIC    BIC  logLik deviance  Chisq Chi Df Pr(>Chisq)  
## mc   4 2774.1 2794.0 -1383.0   2766.1                           
## m1c  5 2770.6 2795.6 -1380.3   2760.6 5.4625      1    0.01943 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

“b=-.82, 95% CI [-1.50, -.14]; X2(1)=5.49, p=.019”

m1d = lmer(n~condition+(1|labSubj)+ (1|cue), data=filter(num.associates, condition !="sham"))
md = lmer(n~1+(1|labSubj)+ (1|cue), data=filter(num.associates, condition !="sham"))
anova(m1d, md, test = "Chi")
## Data: filter(num.associates, condition != "sham")
## Models:
## md: n ~ 1 + (1 | labSubj) + (1 | cue)
## m1d: n ~ condition + (1 | labSubj) + (1 | cue)
##     Df    AIC    BIC  logLik deviance Chisq Chi Df Pr(>Chisq)
## md   4 2804.9 2824.8 -1398.4   2796.9                        
## m1d  5 2806.9 2831.8 -1398.4   2796.9 2e-04      1     0.9876

“p=.99”

Overall, this looks reasonable.

Associate types

Spoken frequency and concreteness

Descriptive statistics

# read in frequency and concreteness
subtlexus.url <- getURL("https://raw.githubusercontent.com/mllewis/RC/master/data/corpus/SUBTLEXus_corpus.txt")
freqs <- read.table(text = subtlexus.url, header=TRUE) %>%
          select(Word,Lg10WF)
concreteness <-read.csv("../data/brysbaert_corpus.csv", header=TRUE) %>%
  select(Word, Conc.M)

associate.types = d.all %>%
  left_join(freqs, by=c("target"= "Word")) %>%
  left_join(concreteness, by=c("target"= "Word")) %>%
  select(condition,labSubj, cue, target, Lg10WF, Conc.M) 

subjs.means = associate.types %>%
  group_by(condition, labSubj, cue) %>%
  summarize(log.freq = mean(Lg10WF, na.rm = T),
            conc = mean(Conc.M, na.rm = T)) %>%
  group_by(condition, labSubj) %>%
  summarize(log.frequency = mean(log.freq, na.rm = T),
            concreteness = mean(conc, na.rm = T)) 

cond.means = subjs.means %>%
  gather(variable, value, -1:-2) %>%
  group_by(variable, condition) %>%
  multi_boot_standard(col = "value", na.rm = T) 

kable(cond.means)
variable condition ci_lower ci_upper mean
concreteness anodal 3.922435 4.171794 4.060420
concreteness cathodal 4.009505 4.202687 4.108435
concreteness sham 3.953620 4.191749 4.080662
log.frequency anodal 3.173231 3.254842 3.214721
log.frequency cathodal 3.179364 3.291233 3.237422
log.frequency sham 3.123595 3.279987 3.208827
ggplot(cond.means, aes(x = condition, y = mean, fill = condition)) +
    facet_wrap(~variable, scales = "free") +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin = ci_lower)) +
    ggtitle("Associate-type measures") +
    theme_bw() +
    theme(legend.position = "none")

Models

Fit mixed-effects models.

First, setup contrasts.

contrasts(associate.types$condition) <- c(.5, -.5, 0)
colnames(contrasts(associate.types$condition)) <- c('anodalVScathodal',
'experimentalVSsham')
kable(contrasts(associate.types$condition))
anodalVScathodal experimentalVSsham
anodal 0.5 -0.4082483
cathodal -0.5 -0.4082483
sham 0.0 0.8164966

Frequency

Model M6a in paper

M6a <-lmer(scale(Lg10WF) ~ condition +  (1|labSubj) + (condition|cue), data=associate.types)
kable(tidy(M6a))
term estimate std.error statistic group
(Intercept) 0.0003009 0.0495157 0.0060776 fixed
conditionanodalVScathodal -0.0271362 0.0560165 -0.4844330 fixed
conditionexperimentalVSsham -0.0063393 0.0395203 -0.1604063 fixed
sd_(Intercept).labSubj 0.1310000 NA NA labSubj
sd_(Intercept).cue 0.2744148 NA NA cue
sd_conditionanodalVScathodal.cue 0.0117885 NA NA cue
sd_conditionexperimentalVSsham.cue 0.0145929 NA NA cue
cor_(Intercept).conditionanodalVScathodal.cue -0.9999727 NA NA cue
cor_(Intercept).conditionexperimentalVSsham.cue -0.9999996 NA NA cue
cor_conditionanodalVScathodal.conditionexperimentalVSsham.cue 0.9999659 NA NA cue
sd_Observation.Residual 0.9552072 NA NA Residual
freq.base<-lmer(scale(Lg10WF) ~ 1 +  (1|labSubj) + (condition|cue) , data=associate.types)

kable(tidy(anova(M6a, freq.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
freq.base 9 22077.38 22140.23 -11029.69 22059.38 NA NA NA
M6a 11 22081.11 22157.93 -11029.55 22059.11 0.2700137 2 0.8737099

Concreteness

Model M6b in paper.

M6b <- lmer(scale(Conc.M) ~ condition+ (1|labSubj) + (1|cue), data=associate.types)
kable(tidy(M6b))
term estimate std.error statistic group
(Intercept) 0.0005930 0.0587379 0.0100960 fixed
conditionanodalVScathodal -0.0497766 0.1050772 -0.4737140 fixed
conditionexperimentalVSsham -0.0030181 0.0730799 -0.0412987 fixed
sd_(Intercept).labSubj 0.2698475 NA NA labSubj
sd_(Intercept).cue 0.2528691 NA NA cue
sd_Observation.Residual 0.9288013 NA NA Residual
conc.base <- lmer(scale(Conc.M) ~ 1 + (1|labSubj) + (1|cue), data=associate.types) # does not converge with random slopes by condition|cue

kable(tidy(anova(M6b, conc.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
conc.base 4 21927.94 21955.92 -10959.97 21919.94 NA NA NA
M6b 6 21931.70 21973.67 -10959.85 21919.70 0.2406801 2 0.8866189

Cue-associate relationships

Conditional probabilitiy

Merge in Nelson norms

# nelson norms
cp.url <- getURL("https://raw.githubusercontent.com/wjhopper/USF-WAN-SS/master/norms.csv")
cps <- read.csv(text = cp.url, header=FALSE) 

names(cps) = c("cue", "target", "FS", "BS")

cps = cps %>%
      mutate(cue = tolower(as.character(cue)),
             target = tolower(as.character(target)))

dcp =  d.all %>%
    filter(as.character(target) != as.character(cue)) %>%
    left_join(cps) %>%
    select(labSubj, cue, target, condition, association.num, FS, BS)

All cues are present in Nelson norms

cuetdcs = unique(d.all$cue)
cueNelson = unique(cps$cue)

length(intersect(cuetdcs, cueNelson)) # all cues are present in nelson
## [1] 39

Lots of NAs but no difference between critical conditions

dcp %>%
  group_by(condition) %>%
  summarize(na = length(which(is.na(FS))),
            notna = length(which(!is.na(FS)))) %>%
  kable()
condition na notna
anodal 1883 1110
cathodal 1887 1040
sham 1668 924

Replacing missing associate values with zero. Note that BS and FS are skewed. Take the log.

dcp <- dcp %>%
       mutate(FS = ifelse(is.na(FS), 0, FS),
              BS = ifelse(is.na(BS), 0, BS),
              log.FS = log(FS),
              log.BS = log(BS))  

# non-finite values are treated as NA

dcp %>%
  gather(variable, value, 6:9) %>%
  ggplot(aes(x = value)) +
  facet_wrap(~variable, scales = "free")+
  geom_histogram() +
  theme_bw()

Do conditional probabilities differ by condition?

dcp.item = dcp %>%
  gather(variable, value, 6:9) %>%
  mutate(value = ifelse(is.infinite(value), NA, value)) %>%
  group_by(condition, labSubj, cue, variable) %>%
  summarize(value = mean(value, na.rm = T)) %>%
  group_by(labSubj, condition, variable) %>%
  summarize(value = mean(value, na.rm = T))

dcp.means = dcp.item %>%
  filter(is.finite(value)) %>%
  group_by(condition, variable) %>%
  multi_boot_standard(col = "value", na.rm = T)

ggplot(dcp.means, aes(x = condition, y = mean, fill = condition)) +
    facet_wrap(~variable, scales = "free") +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin = ci_lower)) +
    ggtitle("Measures of forward/backward probability") +
    theme_bw() +
    theme(legend.position = "none")

No difference here for either backwards or forward probability.

Models

First, setup contrasts.

contrasts(dcp$condition) <- c(.5, -.5, 0)
colnames(contrasts(dcp$condition)) <- c('anodalVScathodal',
'experimentalVSsham')
kable(contrasts(dcp$condition))
anodalVScathodal experimentalVSsham
anodal 0.5 -0.4082483
cathodal -0.5 -0.4082483
sham 0.0 0.8164966

FW probability

Descriptive statistics

dcp.means %>%
  filter(variable == "FS") %>%
  mutate(mean = round(mean,2))
## # A tibble: 3 x 5
## # Groups:   condition, variable [3]
##   condition variable ci_lower ci_upper  mean
##   <fct>     <chr>       <dbl>    <dbl> <dbl>
## 1 anodal    FS         0.0424   0.0568  0.05
## 2 cathodal  FS         0.0425   0.0567  0.05
## 3 sham      FS         0.0413   0.0567  0.05

Model

M9a <- lmer(scale(FS) ~ condition+ (1|labSubj) + (condition|cue), data = dcp)

FS.base <- lmer(scale(FS) ~ 1 + (1|labSubj) + (condition|cue), data = dcp)

kable(tidy(anova(M9a, FS.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
FS.base 9 23657.39 23720.83 -11819.69 23639.39 NA NA NA
M9a 11 23661.37 23738.91 -11819.68 23639.37 0.0188213 2 0.9906335

BW probability

Descriptive statistics

dcp.means %>%
  filter(variable == "BS") %>%
  mutate(mean = round(mean,2))
## # A tibble: 3 x 5
## # Groups:   condition, variable [3]
##   condition variable ci_lower ci_upper  mean
##   <fct>     <chr>       <dbl>    <dbl> <dbl>
## 1 anodal    BS         0.0378   0.0542  0.05
## 2 cathodal  BS         0.0363   0.0514  0.04
## 3 sham      BS         0.0326   0.0470  0.04

Model

M9b <- lmer(scale(BS) ~ condition+ (1|labSubj) + (association.num*condition|cue), data = dcp)

BS.base <- lmer(scale(BS) ~ 1 + (1|labSubj) + (association.num*condition|cue), data = dcp)

kable(tidy(anova(M9b, BS.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
BS.base 24 23367.64 23536.82 -11659.82 23319.64 NA NA NA
M9b 26 23369.41 23552.69 -11658.71 23317.41 2.228804 2 0.3281114

cue-associate similarity

This is the existing plot in the paper

d.all %>%
  select(condition, taxInclusive, partsInclusive, thematic, 
                       action, descriptor, holonym, metaphor) %>%
  gather(word_type, value, -1) %>%
  group_by(condition, word_type) %>%
  summarise(n= sum(value)) %>%
  mutate(freq = n / sum(n)) %>%
  ungroup() %>%
  mutate(word_type = as.factor(word_type),
         word_type = fct_relevel(word_type, "thematic", "taxInclusive", "partsInclusive",
                                 "holonym", "action", "descriptor", "metaphor"),
         condition = fct_relevel(condition, "anodal", "sham", "cathodal")) %>%
  ggplot(aes(y = freq, fill = condition, x = word_type)) +
  ylab("Proportion of listed associates")+
  ylim(0,.4)+
  geom_bar(stat = "identity", position = "dodge") +
    theme(axis.text.x = element_text(angle = 60, hjust = 1))

Associate-co-occurences

Response networks

A third approach: I constructed a network for each cue for each condition. Nodes are the associates given by participants. A link is added between two nodes if a participant listed both associates. Weights are determined by the number of people who listed two associates. If participants are giving more idiosyncratic responses in the cathodal group, we should expect less centrality in the cathodal networks. In the limit, if participants give ONLY idiosnycratic responses, then the network should look like a bunch of separate clusters. Conversely, if everyone gives the same responses, it should look like a hairball.

First, here are the graphs for “bicycle” in the cathodal and anodal conditions.

It looks like participants in the cathodal condition are doing more idiosyncratic responses in general. In principle, it seems like forward and backward probabilites should capture this.

Modularity setup

# get all pairs
all.pairs = d.all %>%
  #filter(association.num < 6) %>%
  filter(as.character(target) != as.character(cue)) %>%
  group_by(labSubj, cue, condition) %>%
  do(m = data.table(t(combn(.$target, 2)))) %>%
  ungroup() %>%
  unnest() %>%
  rename(a1 = V1,
         a2 = V2)

# modularity function
get_modularity <- function(df){
    counts <- count(df, a1, a2) 
    graph <- counts %>%
      select(a1, a2) %>%
      graph_from_data_frame(directed = FALSE) %>%
      simplify() 
    
    E(graph)$weight=counts$n # add in weights
    
    clustering = cluster_leading_eigen(graph, 
                              options = list(maxiter=1000000), 
                              weights = counts$n)
    
    closeness = mean(estimate_closeness(graph, cutoff= 100))
    betweeness = mean(estimate_betweenness(graph, cutoff= 100),  
                      weights = counts$n)
    degree = mean(igraph::degree(graph))
    weighted_degree = mean(igraph::graph.strength(graph))

    data.frame(Q = round(clustering$modularity,4), 
               n.groups = round(length(clustering),4),
               closeness  = round(closeness, 4),
               betweeness  = round(betweeness, 4),
               weighted_degree = round(weighted_degree, 4),
               degree = mean(degree))
}
getCathodal <- function() {    
  # single item example
  test = all.pairs %>%
    filter(cue == "bicycle" & condition == "cathodal") %>%
    filter(a1 != a2)  %>%
      mutate(a1 = str_trim(a1),
             a2 = str_trim(a2)) 
  
  counts.ex <- count(test, a1, a2) 
  
  v.attr = d.all %>%
    filter(as.character(target) != as.character(cue)) %>%
    mutate(target = str_trim(target)) %>%
    filter(cue == "bicycle" & condition == "cathodal") %>%
    distinct(target, .keep_all = T) %>%
    gather(type, value, c(10, 12,14,11,16, 17,18)) %>%
    filter(value == 1) %>%
    select(target, type) 
  
  graph <- counts.ex %>%
        select(a1, a2) %>%
        graph_from_data_frame(directed = FALSE, vertices = v.attr) %>%
        simplify() 
      
  E(graph)$weight=counts.ex$n # add in weights
  
  gc = ggplot(ggnetwork(asNetwork(graph)), 
            aes(x = x, y = y, xend = xend, yend = yend)) +
         geom_edges(aes(size = weight), color = "grey60", 
                    curvature = .15, 
                    show.legend = FALSE) +
         geom_nodes(aes(color = type), show.legend = FALSE) +
         geom_nodelabel(aes(label =  vertex.names,
                                  fill = type), 
                        show.legend = FALSE, 
                        label.padding = unit(0.2,"lines"), 
                        label.size = 0.4, size = 3,
                        force = .1) +
        ggtitle("Cathodal Stimulation") +
        theme_blank()
  
  gt <- ggplot_gtable(ggplot_build(gc))
  gt$layout$clip[gt$layout$name == "panel"] <- "off"
  gt
}
 
getAnodal <- function() {    
  test = all.pairs %>%
    filter(cue == "bicycle" & condition == "anodal") %>%
    filter(a1 != a2)  %>%
    mutate(a1 = str_trim(a1),
          a2 = str_trim(a2)) 
  
  counts.ex <- count(test, a1, a2) 
  
  v.attr = d.all %>%
    filter(as.character(target) != as.character(cue)) %>%
    mutate(target = str_trim(target)) %>%
    filter(cue == "bicycle" & condition == "anodal") %>%
    distinct(target, .keep_all = T) %>%
    gather(type, value, c(10, 12, 14, 11, 16, 17,18)) %>%
    filter(value == 1) %>%
    select(target, type)  %>%
    mutate(type =  fct_recode(type, 
                              "Part" = "partsInclusive",
                              "Taxonomic" = "taxInclusive",
                              "Action" = "action",
                              "Thematic" = "thematic",
                              "Descriptor" = "descriptor")) 
  
  
  graph <- counts.ex %>%
        select(a1, a2) %>%
        graph_from_data_frame(directed = FALSE, vertices = v.attr) %>%
        simplify() 
      
  E(graph)$weight=counts.ex$n # add in weights
  
  ga = ggplot(ggnetwork(asNetwork(graph)), 
            aes(x = x, y = y, xend = xend, yend = yend)) +
         geom_edges(aes(size = weight), color = "grey60", 
                    curvature = .15, 
                    show.legend = FALSE) +
         geom_nodes(aes(color = type), size = 3) +
         geom_nodelabel(aes(label =  vertex.names , fill = type), 
                        show.legend = FALSE, 
                        label.padding = unit(0.2,"lines"), 
                        label.size = 0.4, size = 3) +
        ggtitle("Anodal Stimulation") +
        theme_blank()  +
        scale_fill_discrete(name = "New Legend Title")
  
  ga    
  
  #gt <- ggplot_gtable(ggplot_build(ga))
  #gt$layout$clip[gt$layout$name == "panel"] <- "off"
  #gt
}

#multiplot(grid.draw(getCathodal()), grid.draw(getAnodal()), cols=2)
cath = getCathodal()
anodal = getAnodal()

#png("legend.png",width = 6, height = 6, units = 'in', res = 500) 
#grid.draw(anodal) 
#dev.off()

#png("legend.png",width = 6, height = 6, units = 'in', res = 500) 

#v.attr %>%
 # count(type) %>%
#  ggplot( aes(x = type, y = n )) +
#  geom_bar(stat= "identity", aes(fill = type)) +
#  scale_fill_discrete(name = "Similarity Relation")
#dev.off()

…it looks like the cathodal network is more modular.

Now, let’s do this across all items and quantify modularity, using three measures: Betweenness, mean degree, and modularity, and weighted mean degree.

## get all modularity
modularity = all.pairs %>%
      filter(a1 != a2)  %>%
      mutate(a1 = str_trim(a1),
             a2 = str_trim(a2)) %>%
     group_by(cue, condition) %>%
     do(get_modularity(.))


#  mean betweeness
modularity %>%
    group_by(condition) %>%
    multi_boot_standard(col = "betweeness", na.rm = T) %>%
    ggplot(aes(x = condition, y = mean, fill = condition)) +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin = ci_lower)) +
    theme_bw()  +
    ggtitle("betweeness") +
    theme(legend.position = "none")

modularity %>%
  filter(condition != "sham") %>%
  select(cue, condition, betweeness) %>%
  ungroup() %>%
  spread(condition, betweeness) %>%
  do(tidy(t.test(.$anodal, .$cathodal, paired=TRUE)))   %>%
  kable()
estimate statistic p.value parameter conf.low conf.high method alternative
-2.714844 -3.421774 0.0015021 38 -4.321003 -1.108685 Paired t-test two.sided
#  mean degree
modularity %>%
    group_by(condition) %>%
    multi_boot_standard(col = "degree", na.rm = T) %>%
    ggplot(aes(x = condition, y = mean, fill = condition)) +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin =ci_lower)) +
    theme_bw()  +
    ggtitle("mean degree") +
    theme(legend.position = "none")

modularity %>%
  filter(condition != "sham") %>%
  select(cue, condition, degree) %>%
  ungroup() %>%
  spread(condition, degree) %>%
  do(tidy(t.test(.$anodal, .$cathodal, paired=TRUE)))   %>%
  kable()
estimate statistic p.value parameter conf.low conf.high method alternative
0.3877107 2.541442 0.0152419 38 0.0788784 0.6965431 Paired t-test two.sided
#  weighted node degree
modularity %>%
    group_by(condition) %>%
    multi_boot_standard(col = "weighted_degree", na.rm = T) %>%
    ggplot(aes(x = condition, y = mean, fill = condition)) +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin = ci_lower)) +
    theme_bw()  +
    ggtitle("weighted_degree") +
    theme(legend.position = "none")

modularity %>%
  filter(condition != "sham") %>%
  select(cue, condition, betweeness) %>%
  ungroup() %>%
  spread(condition, betweeness) %>%
  do(tidy(t.test(.$anodal, .$cathodal, paired=TRUE)))   %>%
  kable()
estimate statistic p.value parameter conf.low conf.high method alternative
-2.714844 -3.421774 0.0015021 38 -4.321003 -1.108685 Paired t-test two.sided
#  clustering
modularity %>%
    group_by(condition) %>%
    multi_boot_standard(col = "Q", na.rm = T) %>%
    ggplot(aes(x = condition, y = mean, fill = condition)) +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin = ci_lower)) +
    ggtitle("modularity") +
    theme_bw() +
    theme(legend.position = "none")

modularity %>%
  filter(condition != "sham") %>%
  select(cue, condition, Q) %>%
  ungroup() %>%
  spread(condition, Q) %>%
  do(tidy(t.test(.$anodal, .$cathodal, paired=TRUE)))   %>%
  kable()
estimate statistic p.value parameter conf.low conf.high method alternative
-0.0350205 -2.870941 0.0066548 38 -0.0597146 -0.0103264 Paired t-test two.sided

Across three different measures, anodal group is more clustered than cathodal group, suggesting they are all sampling from a shared “concept” of the cue.

In the paper, report weighted mean degree (in previous version, unweighted mean degree). Here are the means:

mean.w.degree = modularity %>%
  select(cue, condition, weighted_degree) 

mean.w.degree %>%
    group_by(condition) %>%
    multi_boot_standard(col = "weighted_degree", na.rm = T) %>%
    kable()
condition ci_lower ci_upper mean
anodal 9.811025 10.896244 10.342174
cathodal 9.097519 9.995119 9.527461
sham 7.063391 7.936655 7.499415

And, the model (M15a in the paper):

contrasts(mean.w.degree$condition) <-c(.5, -.5, 0)
colnames(contrasts(mean.w.degree$condition)) <- c('anodalVScathodal','experimentalVSsham')
  
M15a <- lmer(scale(weighted_degree) ~ condition + (1|cue), data = mean.w.degree)
degree.base <- lmer(scale(weighted_degree) ~ 1 + (1|cue), data = mean.w.degree)
kable(tidy(anova(M15a, degree.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
degree.base 3 334.8616 343.1482 -164.4308 328.8616 NA NA NA
M15a 5 250.9646 264.7754 -120.4823 240.9646 87.89707 2 0
names =  rownames(summary(M15a)$coefficients)
coefficients = summary(M15a)$coefficients %>%
     as.data.frame() %>%
     mutate(comparision = names) %>%
     rename(SE = `Std. Error`) %>%
     filter(comparision == "conditionanodalVScathodal")

print(paste(round(coefficients$Estimate, 2),
            round(coefficients$Estimate + (coefficients$SE * 1.96),2),
            round(coefficients$Estimate - (coefficients$SE * 1.96),2)))
## [1] "0.42 0.66 0.19"

Model coefficients: 0.42 0.66 0.19

Pairwise comparision models; all are significant.

summary(M15b <- lmer(scale(weighted_degree) ~ condition + (1|cue), data = mean.w.degree[mean.w.degree$condition != "cathodal",]))
## Linear mixed model fit by REML ['lmerMod']
## Formula: scale(weighted_degree) ~ condition + (1 | cue)
##    Data: mean.w.degree[mean.w.degree$condition != "cathodal", ]
## 
## REML criterion at convergence: 164.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -1.43171 -0.65568  0.02328  0.47044  2.45598 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  cue      (Intercept) 0.2888   0.5374  
##  Residual             0.2570   0.5069  
## Number of obs: 78, groups:  cue, 39
## 
## Fixed effects:
##               Estimate Std. Error t value
## (Intercept)     0.6748     0.1183   5.705
## conditionsham  -1.3497     0.1148 -11.758
## 
## Correlation of Fixed Effects:
##             (Intr)
## conditinshm -0.485
summary(M15c <- lmer(scale(weighted_degree) ~ condition + (1|cue), data = mean.w.degree[mean.w.degree$condition != "anodal",]))
## Linear mixed model fit by REML ['lmerMod']
## Formula: scale(weighted_degree) ~ condition + (1 | cue)
##    Data: mean.w.degree[mean.w.degree$condition != "anodal", ]
## 
## REML criterion at convergence: 179.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -1.60847 -0.63092 -0.02966  0.60396  1.77547 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  cue      (Intercept) 0.3671   0.6059  
##  Residual             0.3033   0.5508  
## Number of obs: 78, groups:  cue, 39
## 
## Fixed effects:
##               Estimate Std. Error t value
## (Intercept)     0.5778     0.1311   4.407
## conditionsham  -1.1557     0.1247  -9.266
## 
## Correlation of Fixed Effects:
##             (Intr)
## conditinshm -0.476
summary(M15d <- lmer(scale(weighted_degree) ~ condition + (1|cue), data = mean.w.degree[mean.w.degree$condition != "sham",]))
## Linear mixed model fit by REML ['lmerMod']
## Formula: scale(weighted_degree) ~ condition + (1 | cue)
##    Data: mean.w.degree[mean.w.degree$condition != "sham", ]
## 
## REML criterion at convergence: 204.6
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -1.88052 -0.53848 -0.05319  0.49747  2.11934 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  cue      (Intercept) 0.5315   0.7290  
##  Residual             0.4166   0.6454  
## Number of obs: 78, groups:  cue, 39
## 
## Fixed effects:
##                   Estimate Std. Error t value
## (Intercept)         0.2519     0.1559   1.616
## conditioncathodal  -0.5039     0.1462  -3.447
## 
## Correlation of Fixed Effects:
##             (Intr)
## condtncthdl -0.469

Per reviewers comment, let’s also include the clustering analysis (Q) since it’s weighted (accounts for “type-token issue”).

Here are the means:

mean.Q = modularity %>%
  select(cue, condition, Q) 

mean.Q %>%
    group_by(condition) %>%
    multi_boot_standard(col = "Q", na.rm = T) %>%
    kable()
condition ci_lower ci_upper mean
anodal 0.2652888 0.3074526 0.2865795
cathodal 0.2962183 0.3457525 0.3216000
sham 0.3267944 0.3848122 0.3555231

And, the model (M15aa):

contrasts(mean.Q$condition) <-c(.5, -.5, 0)
colnames(contrasts(mean.Q$condition)) <- c('anodalVScathodal','experimentalVSsham')
  
M15aa <- lmer(scale(Q) ~ condition + (1|cue), data = mean.Q)
Q.base <- lmer(scale(Q) ~ 1 + (1|cue), data = mean.Q)
kable(tidy(anova(M15aa, Q.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
Q.base 3 325.0716 333.3581 -159.5358 319.0716 NA NA NA
M15aa 5 307.7651 321.5760 -148.8826 297.7651 21.30645 2 2.36e-05
names =  rownames(summary(M15aa)$coefficients)
coefficients = summary(M15aa)$coefficients %>%
     as.data.frame() %>%
     mutate(comparision = names) %>%
     rename(SE = `Std. Error`) %>%
     filter(comparision == "conditionanodalVScathodal")

print(paste(round(coefficients$Estimate, 2),
            round(coefficients$Estimate + (coefficients$SE * 1.96),2),
            round(coefficients$Estimate - (coefficients$SE * 1.96),2)))
## [1] "-0.4 -0.08 -0.72"

Model coefficients: -0.4 -0.08 -0.72

Pairwise comparision models; all are significant.

summary(M15bb <- lmer(scale(Q) ~ condition + (1|cue), data = mean.Q[mean.Q$condition != "cathodal",]))
## Linear mixed model fit by REML ['lmerMod']
## Formula: scale(Q) ~ condition + (1 | cue)
##    Data: mean.Q[mean.Q$condition != "cathodal", ]
## 
## REML criterion at convergence: 202.2
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.05987 -0.51839 -0.01843  0.47131  2.24854 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  cue      (Intercept) 0.4132   0.6428  
##  Residual             0.4522   0.6725  
## Number of obs: 78, groups:  cue, 39
## 
## Fixed effects:
##               Estimate Std. Error t value
## (Intercept)    -0.3794     0.1490  -2.547
## conditionsham   0.7588     0.1523   4.983
## 
## Correlation of Fixed Effects:
##             (Intr)
## conditinshm -0.511
summary(M15cc <- lmer(scale(Q) ~ condition + (1|cue), data = mean.Q[mean.Q$condition != "anodal",]))
## Linear mixed model fit by REML ['lmerMod']
## Formula: scale(Q) ~ condition + (1 | cue)
##    Data: mean.Q[mean.Q$condition != "anodal", ]
## 
## REML criterion at convergence: 215.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -1.93726 -0.57428 -0.07673  0.54204  2.39824 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  cue      (Intercept) 0.3649   0.6041  
##  Residual             0.6122   0.7824  
## Number of obs: 78, groups:  cue, 39
## 
## Fixed effects:
##               Estimate Std. Error t value
## (Intercept)    -0.1874     0.1583  -1.184
## conditionsham   0.3748     0.1772   2.115
## 
## Correlation of Fixed Effects:
##             (Intr)
## conditinshm -0.560
summary(M15dd <- lmer(scale(Q) ~ condition + (1|cue), data = mean.Q[mean.Q$condition != "sham",]))
## Linear mixed model fit by REML ['lmerMod']
## Formula: scale(Q) ~ condition + (1 | cue)
##    Data: mean.Q[mean.Q$condition != "sham", ]
## 
## REML criterion at convergence: 210.2
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -1.73690 -0.64468  0.09469  0.54324  2.27157 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  cue      (Intercept) 0.4526   0.6728  
##  Residual             0.5057   0.7111  
## Number of obs: 78, groups:  cue, 39
## 
## Fixed effects:
##                   Estimate Std. Error t value
## (Intercept)        -0.2312     0.1568  -1.475
## conditioncathodal   0.4623     0.1610   2.871
## 
## Correlation of Fixed Effects:
##             (Intr)
## condtncthdl -0.514

Number of incorrect guesses

Here are the means:

d.bw2 = d.bw.raw %>%
       select(electrode,  labSubj,  subjCode, cue, 
             turkerGuess, isRight) %>%
        rename(condition = electrode)  %>%     
        mutate(condition = fct_recode(condition, "sham" = "na")) %>%
        mutate(turkerGuess = str_trim(turkerGuess))

cue.ns = d.bw2 %>%
  filter(isRight == 0) %>%
  group_by(condition, cue) %>%
  summarize(n = length(unique(turkerGuess))) 
  
cue.ms = cue.ns %>%
  group_by(condition) %>%
  multi_boot_standard(col = "n", na.rm = T)  

ggplot(cue.ms, aes(x = condition, y = mean, fill = condition)) +
    geom_bar(position = "dodge", stat = "identity") +
    geom_linerange(aes(ymax = ci_upper, ymin = ci_lower)) +
    ggtitle("mean number of unique wrong guesses") +
    theme_bw() +
    theme(legend.position = "none")

 kable(cue.ms)
condition ci_lower ci_upper mean
anodal 41.76923 50.89744 46.33333
cathodal 45.25321 53.48910 49.30769
sham 47.56346 56.46218 52.02564

The model (M16a in paper)

contrasts(cue.ns$condition) <- c(.5, -.5, 0)
colnames(contrasts(cue.ns$condition)) <- c('anodalVScathodal','experimentalVSsham')
  
M16a <- lmer(scale(n) ~ condition + (1|cue), data = cue.ns)
wrong.base <- lmer(scale(n) ~ 1 + (1|cue), data = cue.ns)
kable(tidy(anova(M16a, wrong.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
wrong.base 3 278.2508 286.5373 -136.1254 272.2508 NA NA NA
M16a 5 270.5438 284.3546 -130.2719 260.5438 11.70703 2 0.0028698
names =  rownames(summary(M16a)$coefficients)
coefficients = summary(M16a)$coefficients %>%
     as.data.frame() %>%
     mutate(comparision = names) %>%
     rename(SE = `Std. Error`) %>%
     filter(comparision == "conditionexperimentalVSsham")

print(paste(round(coefficients$Estimate, 2),
            round(coefficients$Estimate + (coefficients$SE * 1.96),2),
            round(coefficients$Estimate - (coefficients$SE * 1.96),2)))
## [1] "0.25 0.41 0.09"

Model coefficients: 0.25 0.41 0.09

M16b in paper

M16b<- lmer(scale(n) ~ condition + (1|cue), data = cue.ns[cue.ns$condition != "cathodal",])

wrong.base <- lmer(scale(n) ~ 1 + (1|cue), data = cue.ns[cue.ns$condition != "cathodal",])
kable(tidy(anova(M16b, wrong.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
wrong.base 3 209.9556 217.0257 -101.97780 203.9556 NA NA NA
M16b 4 203.6639 213.0907 -97.83195 195.6639 8.291691 1 0.0039827
names =  rownames(summary(M16b)$coefficients)
coefficients = summary(M16b)$coefficients %>%
     as.data.frame() %>%
     mutate(comparision = names) %>%
     rename(SE = `Std. Error`) %>%
    slice(2)

print(paste(round(coefficients$Estimate, 2),
            round(coefficients$Estimate + (coefficients$SE * 1.96),2),
            round(coefficients$Estimate - (coefficients$SE * 1.96),2)))
## [1] "0.4 0.65 0.14"

Model coefficients: 0.4 0.65 0.14

M16c in paper

M16c<- lmer(scale(n) ~ condition + (1|cue), data = cue.ns[cue.ns$condition != "anodal",])
wrong.base <- lmer(scale(n) ~ 1 + (1|cue), data = cue.ns[cue.ns$condition != "anodal",])
kable(tidy(anova(M16c, wrong.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
wrong.base 3 202.7162 209.7863 -98.35808 196.7162 NA NA NA
M16c 4 202.1227 211.5495 -97.06133 194.1227 2.59351 1 0.1073023
names =  rownames(summary(M16c)$coefficients)
coefficients = summary(M16c)$coefficients %>%
     as.data.frame() %>%
     mutate(comparision = names) %>%
     rename(SE = `Std. Error`) %>%
     slice(2)

print(paste(round(coefficients$Estimate, 2),
            round(coefficients$Estimate + (coefficients$SE * 1.96),2),
            round(coefficients$Estimate - (coefficients$SE * 1.96),2)))
## [1] "0.2 0.45 -0.04"

Model coefficients: 0.2 0.45 -0.04

M16d in paper

M16d<- lmer(scale(n) ~ condition + (1|cue), data = cue.ns[cue.ns$condition != "sham",])
wrong.base <- lmer(scale(n) ~ 1 + (1|cue), data = cue.ns[cue.ns$condition != "sham",])
kable(tidy(anova(M16d, wrong.base, test = "Chi")))
term df AIC BIC logLik deviance statistic Chi.Df p.value
wrong.base 3 181.2404 188.3105 -87.62021 175.2404 NA NA NA
M16d 4 177.5321 186.9590 -84.76607 169.5321 5.708273 1 0.0168851
names =  rownames(summary(M16d)$coefficients)
coefficients = summary(M16d)$coefficients %>%
     as.data.frame() %>%
     mutate(comparision = names) %>%
     rename(SE = `Std. Error`) %>%
    slice(2)

print(paste(round(coefficients$Estimate, 2),
            round(coefficients$Estimate + (coefficients$SE * 1.96),2),
            round(coefficients$Estimate - (coefficients$SE * 1.96),2)))
## [1] "0.22 0.39 0.04"

Model coefficients: 0.22 0.39 0.04