Task: Participants were presented with 39 pictures cues and asked to produce 4-8 associates. Particpants were either given anodal (up-regulating), cathodal (down-regulating), or sham tdcs. In a different experiement, turkers were given the list of associates produced by the the participants in the tdcs experiment and asked to guess the picture.

Question: Does tDCS condition affect (1) the type of associates participant produce at the word level and (2) the relationship between associates? Secondly, do these measures differ as a function of accuracy in identification by turkers later?


Read in data

# tdcs data
d.all = read.csv("../data/labDataPlusNorms.csv") %>%
  rename(condition = electrode,
         association.num = mention) %>%
  select(1:4,7,9:19,21,22)

# turker accuracy data
d.bw = read.csv("../data/backALLfinal_withALLSimilaritiesPlusNorms.csv") %>%  
  rename(condition = electrode) %>%
  select(condition, cue, labSubj, subjCode, isRight, similarity2)

Accuracy

Original analysis

First, we get weights on the six most frequent relation types by predicting accuracy in the turker data with the sham group.

d.wide = d.all %>%
  gather(word_type, value, 6:16) %>%
  group_by(labSubj, word_type, cue, condition) %>%
  summarize(n = sum(value)) %>%
  spread(word_type,n) %>%
  ungroup() %>%
  mutate(condition = fct_rev(condition)) %>%
  select(condition, cue, labSubj, thematic, action, descriptor, meronymPart, hypernym, taxonomic)

kable(head(d.wide))
condition cue labSubj thematic action descriptor meronymPart hypernym taxonomic
anodal alligator PAS101 4 0 0 1 1 0
anodal ball PAS101 2 0 1 0 0 0
anodal banana PAS101 2 0 1 1 2 0
anodal bed PAS101 0 2 2 0 0 0
anodal bicycle PAS101 2 1 0 1 0 0
anodal bird PAS101 3 1 0 1 0 0
d.bw1 = left_join(d.bw, d.wide)

sham.model = glmer(isRight~ scale(thematic) + scale(action) + scale(descriptor) + scale(meronymPart) + scale(hypernym) +  scale(taxonomic) + 
                     (1|subjCode)+(1|cue) +(1|labSubj),
                   filter(d.bw1, condition == "na"), family="binomial")

sham.model
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: isRight ~ scale(thematic) + scale(action) + scale(descriptor) +  
##     scale(meronymPart) + scale(hypernym) + scale(taxonomic) +  
##     (1 | subjCode) + (1 | cue) + (1 | labSubj)
##    Data: filter(d.bw1, condition == "na")
##       AIC       BIC    logLik  deviance  df.resid 
##  7421.445  7489.001 -3700.723  7401.445      6336 
## Random effects:
##  Groups   Name        Std.Dev.
##  subjCode (Intercept) 0.5468  
##  cue      (Intercept) 0.8562  
##  labSubj  (Intercept) 0.5660  
## Number of obs: 6346, groups:  subjCode, 525; cue, 39; labSubj, 15
## Fixed Effects:
##        (Intercept)     scale(thematic)       scale(action)  
##         -0.4873578          -0.1357493           0.1385252  
##  scale(descriptor)  scale(meronymPart)     scale(hypernym)  
##         -0.0542950           0.2839048           0.2410480  
##   scale(taxonomic)  
##         -0.0002321
sham.weights = sham.model %>%
  tidy() %>%
  select(term, estimate) %>%
  mutate(term = unlist(lapply(strsplit(term, "\\(|\\)"), function(x) x[2]))) %>%
  filter(term != "Intercept") %>%
  mutate(sign = ifelse(estimate < 0, -1, 1))

kable(sham.weights)
term estimate sign
thematic -0.1357493 -1
action 0.1385252 1
descriptor -0.0542950 -1
meronymPart 0.2839048 1
hypernym 0.2410480 1
taxonomic -0.0002321 -1

Next we compute a score for each list of associates (1 per cue per participant). We compute two scores: signed and weighted. Signed is given by adding +1 for positive weighted cues, and -1 for negative weighted cues, and then summing across the list. Weighted is given by adding the coefficient for each associate. Note that because participants gave different number of associates, we normalize scores by the total number of associates given for that cue

list.counts = d.all %>%
  select(condition, cue, labSubj, thematic, action, descriptor, 
         meronymPart, hypernym, taxonomic) %>%
  gather(word_type, value, 4:9) %>%
  filter(value > 0) %>%
  group_by(condition, labSubj, cue, word_type) %>%
  summarize(count = n()) %>%
  left_join(sham.weights, by=c("word_type" = "term")) %>%
  mutate(total = sum(count),
         normalized.counts = count/total,
         weighted.counts = count*estimate,
         signed.counts = count*sign,
         weighted.counts_normalized = normalized.counts*estimate,
         signed.counts_normalized = normalized.counts*sign) 

kable(head(list.counts))
condition labSubj cue word_type count estimate sign total normalized.counts weighted.counts signed.counts weighted.counts_normalized signed.counts_normalized
anodal PAS101 alligator hypernym 1 0.2410480 1 6 0.1666667 0.2410480 1 0.0401747 0.1666667
anodal PAS101 alligator meronymPart 1 0.2839048 1 6 0.1666667 0.2839048 1 0.0473175 0.1666667
anodal PAS101 alligator thematic 4 -0.1357493 -1 6 0.6666667 -0.5429971 -4 -0.0904995 -0.6666667
anodal PAS101 ball descriptor 1 -0.0542950 -1 3 0.3333333 -0.0542950 -1 -0.0180983 -0.3333333
anodal PAS101 ball thematic 2 -0.1357493 -1 3 0.6666667 -0.2714985 -2 -0.0904995 -0.6666667
anodal PAS101 banana descriptor 1 -0.0542950 -1 6 0.1666667 -0.0542950 -1 -0.0090492 -0.1666667
list.ms = list.counts %>%
  group_by(condition, cue, labSubj) %>%
  summarize(weighted.counts = sum(weighted.counts),
            signed.counts = sum(signed.counts),
            weighted.counts_normalized = sum(weighted.counts_normalized),
            signed.counts_normalized = sum(signed.counts_normalized))

kable(head(list.ms))
condition cue labSubj weighted.counts signed.counts weighted.counts_normalized signed.counts_normalized
anodal alligator PAS101 -0.0180442 -2 -0.0030074 -0.3333333
anodal alligator PAS103 -0.5429971 -4 -0.1357493 -1.0000000
anodal alligator PAS105 0.6188134 1 0.1237627 0.2000000
anodal alligator PAS111 0.3374524 -1 0.0482075 -0.1428571
anodal alligator PAS113 0.0781631 -2 0.0195408 -0.5000000
anodal alligator PAS115 0.4734338 1 0.0946868 0.2000000

Finally, we take the mean across participants, as a function of condition.

subjs.ms =  list.ms %>%
  group_by(labSubj, condition) %>%
  summarize(weighted.counts =  mean(weighted.counts),
            signed.counts = mean(signed.counts),
            weighted.counts_normalized = mean(weighted.counts_normalized),
            signed.counts_normalized  = mean(signed.counts_normalized)) %>%
  gather(measure, value, 3:6) 

subjs.ms %>%
  group_by(condition, measure) %>%
  multi_boot_standard(column = "value")  %>%
  ggplot(aes(fill = condition, y = mean, x = condition)) +
  facet_wrap(~measure, scales = "free_y") +
  ylab("list score") +
  geom_bar(stat = "identity", position = "dodge") +
  geom_linerange(aes(ymax = ci_upper, ymin=ci_lower),
                 position = position_dodge(width = .9)) 

Model fits

list.ms = list.ms %>%
           ungroup() %>%
           mutate(condition = fct_rev(condition))
               
summary(lmer(weighted.counts_normalized ~ condition + (1|labSubj) +  (1|cue), list.ms))
## Linear mixed model fit by REML ['lmerMod']
## Formula: weighted.counts_normalized ~ condition + (1 | labSubj) + (1 |  
##     cue)
##    Data: list.ms
## 
## REML criterion at convergence: -3769.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1417 -0.6665 -0.0399  0.6135  3.7526 
## 
## Random effects:
##  Groups   Name        Variance  Std.Dev.
##  labSubj  (Intercept) 0.0018220 0.04268 
##  cue      (Intercept) 0.0009945 0.03154 
##  Residual             0.0050894 0.07134 
## Number of obs: 1632, groups:  labSubj, 43; cue, 39
## 
## Fixed effects:
##                    Estimate Std. Error t value
## (Intercept)        0.037681   0.012510   3.012
## conditioncathodal -0.001749   0.016454  -0.106
## conditionanodal   -0.007375   0.016446  -0.448
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnc
## condtncthdl -0.636       
## conditinndl -0.637  0.484
summary(lmer(signed.counts_normalized ~ condition + (1|labSubj) +  (1|cue), list.ms))
## Linear mixed model fit by REML ['lmerMod']
## Formula: signed.counts_normalized ~ condition + (1 | labSubj) + (1 | cue)
##    Data: list.ms
## 
## REML criterion at convergence: 2083
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1090 -0.6609 -0.0325  0.6403  3.5420 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  labSubj  (Intercept) 0.05328  0.2308  
##  cue      (Intercept) 0.04280  0.2069  
##  Residual             0.18513  0.4303  
## Number of obs: 1632, groups:  labSubj, 43; cue, 39
## 
## Fixed effects:
##                    Estimate Std. Error t value
## (Intercept)       -0.111074   0.070687  -1.571
## conditioncathodal -0.002622   0.089740  -0.029
## conditionanodal   -0.006153   0.089693  -0.069
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnc
## condtncthdl -0.615       
## conditinndl -0.615  0.484

Modified categories (same as paper)

First, we get weights on the six most frequent relation types by predicting accuracy in the turker data with the sham group.

d.wide = d.all %>%
    select(-6:-9,-13,-15) %>%
  gather(word_type, value, 6:12) %>%
  group_by(labSubj, word_type, cue, condition) %>%
  summarize(n = sum(value)) %>%
  spread(word_type,n) %>%
  ungroup() %>%
  mutate(condition = fct_rev(condition)) 

d.bw2 = left_join(d.bw, d.wide)

sham.model = glmer(isRight~ scale(thematic) + scale(action) + scale(descriptor) + scale(holonym) + scale(metaphor) + scale(partsInclusive) +  scale(taxInclusive)  + 
                    (1|subjCode)+(1|cue) +(1|labSubj),
                   filter(d.bw2, condition == "na"), family="binomial")

sham.model
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: isRight ~ scale(thematic) + scale(action) + scale(descriptor) +  
##     scale(holonym) + scale(metaphor) + scale(partsInclusive) +  
##     scale(taxInclusive) + (1 | subjCode) + (1 | cue) + (1 | labSubj)
##    Data: filter(d.bw2, condition == "na")
##       AIC       BIC    logLik  deviance  df.resid 
##  7412.580  7486.892 -3695.290  7390.580      6335 
## Random effects:
##  Groups   Name        Std.Dev.
##  subjCode (Intercept) 0.5489  
##  cue      (Intercept) 0.8621  
##  labSubj  (Intercept) 0.5753  
## Number of obs: 6346, groups:  subjCode, 525; cue, 39; labSubj, 15
## Fixed Effects:
##           (Intercept)        scale(thematic)          scale(action)  
##             -0.487365              -0.064971               0.207752  
##     scale(descriptor)         scale(holonym)        scale(metaphor)  
##              0.015279               0.008515              -0.063429  
## scale(partsInclusive)    scale(taxInclusive)  
##              0.352551               0.264608  
## convergence code 0; 1 optimizer warnings; 0 lme4 warnings
sham.weights = sham.model %>%
  tidy() %>%
  select(term, estimate) %>%
  mutate(term = unlist(lapply(strsplit(term, "\\(|\\)"), function(x) x[2]))) %>%
  filter(term != "Intercept") %>%
  mutate(sign = ifelse(estimate < 0, -1, 1))

kable(sham.weights)
term estimate sign
thematic -0.0649705 -1
action 0.2077522 1
descriptor 0.0152791 1
holonym 0.0085151 1
metaphor -0.0634289 -1
partsInclusive 0.3525512 1
taxInclusive 0.2646079 1
list.counts = d.all %>%
  select(condition, cue, labSubj, thematic, action, descriptor, 
         holonym, metaphor, partsInclusive, taxInclusive) %>%
  gather(word_type, value, 4:10) %>%
  filter(value > 0) %>%
  group_by(condition, labSubj, cue, word_type) %>%
  summarize(count = n()) %>%
  left_join(sham.weights, by=c("word_type" = "term")) %>%
  mutate(total = sum(count),
         normalized.counts = count/total,
         weighted.counts = count*estimate,
         signed.counts = count*sign,
         weighted.counts_normalized = normalized.counts*estimate,
         signed.counts_normalized = normalized.counts*sign) 

list.ms = list.counts %>%
  group_by(condition, cue, labSubj) %>%
  summarize(weighted.counts =  sum(weighted.counts),
            signed.counts = sum(signed.counts),
            weighted.counts_normalized = sum(weighted.counts_normalized),
            signed.counts_normalized = sum(signed.counts_normalized))

Finally, we take the mean across participants, as a function of condition

subjs.ms =  list.ms %>%
  group_by(labSubj, condition) %>%
  summarize(weighted.counts =  mean(weighted.counts),
            signed.counts = mean(signed.counts),
            weighted.counts_normalized = mean(weighted.counts_normalized),
            signed.counts_normalized  = mean(signed.counts_normalized)) %>%
  gather(measure, value, 3:6) 

subjs.ms %>%
  group_by(condition, measure) %>%
  multi_boot_standard(column = "value")  %>%
  ggplot(aes(fill = condition, y = mean, x = condition)) +
  facet_wrap(~measure, scales = "free_y") +
  ylab("list score") +
  geom_bar(stat = "identity", position = "dodge") +
  geom_linerange(aes(ymax = ci_upper, ymin=ci_lower),
                 position = position_dodge(width = .9)) 

Model fits

list.ms = list.ms %>%
              ungroup() %>%
              mutate(condition = fct_rev(condition))
               
summary(lmer(weighted.counts_normalized ~ condition + (1|labSubj) +  (1|cue), list.ms))
## Linear mixed model fit by REML ['lmerMod']
## Formula: weighted.counts_normalized ~ condition + (1 | labSubj) + (1 |  
##     cue)
##    Data: list.ms
## 
## REML criterion at convergence: -3950.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7403 -0.6567 -0.0527  0.6315  3.2922 
## 
## Random effects:
##  Groups   Name        Variance  Std.Dev.
##  labSubj  (Intercept) 0.0015466 0.03933 
##  cue      (Intercept) 0.0007932 0.02816 
##  Residual             0.0045782 0.06766 
## Number of obs: 1633, groups:  labSubj, 43; cue, 39
## 
## Fixed effects:
##                    Estimate Std. Error t value
## (Intercept)        0.117270   0.011490  10.206
## conditioncathodal -0.002654   0.015191  -0.175
## conditionanodal   -0.007424   0.015184  -0.489
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnc
## condtncthdl -0.640       
## conditinndl -0.640  0.484
summary(lmer(signed.counts_normalized ~ condition + (1|labSubj) +  (1|cue), list.ms))
## Linear mixed model fit by REML ['lmerMod']
## Formula: signed.counts_normalized ~ condition + (1 | labSubj) + (1 | cue)
##    Data: list.ms
## 
## REML criterion at convergence: 1780.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2904 -0.6623  0.0752  0.6856  3.8538 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  labSubj  (Intercept) 0.04389  0.2095  
##  cue      (Intercept) 0.04695  0.2167  
##  Residual             0.15276  0.3908  
## Number of obs: 1633, groups:  labSubj, 43; cue, 39
## 
## Fixed effects:
##                   Estimate Std. Error t value
## (Intercept)        0.38847    0.06645   5.846
## conditioncathodal -0.11379    0.08144  -1.397
## conditionanodal   -0.06058    0.08141  -0.744
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnc
## condtncthdl -0.593       
## conditinndl -0.594  0.484

Similarity

First, we get weights on the six most frequent relation types by predicting accuracy in the turker data with the sham group.

d.wide = d.all %>%
  select(-6:-9,-13,-15) %>%
  gather(word_type, value, 6:12) %>%
  group_by(labSubj, word_type, cue, condition) %>%
  summarize(n = sum(value)) %>%
  spread(word_type,n) %>%
  ungroup() %>%
  mutate(condition = fct_rev(condition)) 

d.bw3 = left_join(d.bw, d.wide)

sham.model = lmer(similarity2 ~ scale(thematic) + scale(action) + scale(descriptor) + scale(holonym) + scale(metaphor) + scale(partsInclusive) +  scale(taxInclusive) +   (1|subjCode)+(1|labSubj), filter(d.bw3, condition == "na"))

sham.model
## Linear mixed model fit by REML ['lmerMod']
## Formula: 
## similarity2 ~ scale(thematic) + scale(action) + scale(descriptor) +  
##     scale(holonym) + scale(metaphor) + scale(partsInclusive) +  
##     scale(taxInclusive) + (1 | subjCode) + (1 | labSubj)
##    Data: filter(d.bw3, condition == "na")
## REML criterion at convergence: 6048.654
## Random effects:
##  Groups   Name        Std.Dev.
##  subjCode (Intercept) 0.10395 
##  labSubj  (Intercept) 0.08088 
##  Residual             0.37639 
## Number of obs: 6346, groups:  subjCode, 525; labSubj, 15
## Fixed Effects:
##           (Intercept)        scale(thematic)          scale(action)  
##              0.582906              -0.006611               0.053630  
##     scale(descriptor)         scale(holonym)        scale(metaphor)  
##              0.039095               0.003023              -0.010686  
## scale(partsInclusive)    scale(taxInclusive)  
##              0.087380               0.049323
sham.weights = sham.model %>%
  tidy() %>%
  select(term, estimate) %>%
  mutate(term = unlist(lapply(strsplit(term, "\\(|\\)"), function(x) x[2]))) %>%
  filter(term != "Intercept") %>%
  mutate(sign = ifelse(estimate < 0, -1, 1))

kable(sham.weights)
term estimate sign
thematic -0.0066114 -1
action 0.0536298 1
descriptor 0.0390952 1
holonym 0.0030226 1
metaphor -0.0106860 -1
partsInclusive 0.0873803 1
taxInclusive 0.0493231 1
list.counts = d.all %>%
  select(condition, cue, labSubj, thematic, action, descriptor, 
         holonym, metaphor, partsInclusive, taxInclusive) %>%
  gather(word_type, value, 4:9) %>%
  filter(value > 0) %>%
  group_by(condition, labSubj, cue, word_type) %>%
  summarize(count = n()) %>%
  left_join(sham.weights, by=c("word_type" = "term")) %>%
  mutate(total = sum(count),
         normalized.counts = count/total,
         weighted.counts = count*estimate,
         signed.counts = count*sign,
         weighted.counts_normalized = normalized.counts*estimate,
         signed.counts_normalized = normalized.counts*sign) 

list.ms = list.counts %>%
  group_by(condition, cue, labSubj) %>%
  summarize(weighted.counts =  sum(weighted.counts),
            signed.counts = sum(signed.counts),
            weighted.counts_normalized = sum(weighted.counts_normalized),
            signed.counts_normalized = sum(signed.counts_normalized))

Finally, we take the mean across participants, as a function of condition

subjs.ms =  list.ms %>%
  group_by(labSubj, condition) %>%
  summarize(weighted.counts = mean(weighted.counts),
            signed.counts = mean(signed.counts),
            weighted.counts_normalized = mean(weighted.counts_normalized),
            signed.counts_normalized  = mean(signed.counts_normalized)) %>%
  gather(measure, value, 3:6) 

subjs.ms %>%
  group_by(condition, measure) %>%
  multi_boot_standard(column = "value")  %>%
  ggplot(aes(fill = condition, y = mean, x = condition)) +
  facet_wrap(~measure, scales = "free_y") +
  ylab("list score") +
  geom_bar(stat = "identity", position = "dodge") +
  geom_linerange(aes(ymax = ci_upper, ymin=ci_lower), 
                 position = position_dodge(width = .9)) 

Model fits

list.ms = list.ms %>%
          ungroup() %>%
          mutate(condition = fct_rev(condition))
               
summary(lmer(weighted.counts_normalized ~ condition + (1|labSubj) +  (1|cue), list.ms))
## Linear mixed model fit by REML ['lmerMod']
## Formula: weighted.counts_normalized ~ condition + (1 | labSubj) + (1 |  
##     cue)
##    Data: list.ms
## 
## REML criterion at convergence: -8559
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4236 -0.6692  0.0081  0.6574  3.6138 
## 
## Random effects:
##  Groups   Name        Variance  Std.Dev.
##  labSubj  (Intercept) 9.710e-05 0.009854
##  cue      (Intercept) 4.467e-05 0.006683
##  Residual             2.708e-04 0.016455
## Number of obs: 1633, groups:  labSubj, 43; cue, 39
## 
## Fixed effects:
##                    Estimate Std. Error t value
## (Intercept)        0.035775   0.002851  12.549
## conditioncathodal -0.002664   0.003798  -0.702
## conditionanodal   -0.003059   0.003797  -0.806
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnc
## condtncthdl -0.645       
## conditinndl -0.645  0.484
summary(lmer(signed.counts_normalized ~ condition + (1|labSubj) +  (1|cue), list.ms))
## Linear mixed model fit by REML ['lmerMod']
## Formula: signed.counts_normalized ~ condition + (1 | labSubj) + (1 | cue)
##    Data: list.ms
## 
## REML criterion at convergence: 2163.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2836 -0.6590  0.0671  0.6704  3.5567 
## 
## Random effects:
##  Groups   Name        Variance Std.Dev.
##  labSubj  (Intercept) 0.05915  0.2432  
##  cue      (Intercept) 0.06010  0.2452  
##  Residual             0.19282  0.4391  
## Number of obs: 1633, groups:  labSubj, 43; cue, 39
## 
## Fixed effects:
##                   Estimate Std. Error t value
## (Intercept)        0.30356    0.07646   3.970
## conditioncathodal -0.13128    0.09429  -1.392
## conditionanodal   -0.07454    0.09425  -0.791
## 
## Correlation of Fixed Effects:
##             (Intr) cndtnc
## condtncthdl -0.597       
## conditinndl -0.597  0.484