articleID <- "1-1-2015_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Marc B. Harrison, Gobi Dasu" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Michèle Nuijten" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 120 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 220 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("10/26/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("10/16/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("07/22/19", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")

Methods summary:

A sense of power has often been tied to how we perceive each other’s voice. Social hierarchy is embedded into the structure of society and provides a metric by which others relate to one another. In 1956, the Brunswik Lens Model was introduced to examine how vocal cues might influence hierarchy. In “The Sound of Power: Conveying and Detecting Hierarchical Rank Through Voice,” Ko and colleagues investigated how manipulation of hierarchal rank within a situation might impact vocal acoustic cues. Using the Brunswik Model, six acoustic metrics were utilized (pitch mean & variability, loudness mean & variability, and resonance mean & variability) to isolate a potential contribution between individuals of different hierarchal rank. In the first experiment, Ko, Sadler & Galinsky examined the vocal acoustic cues of individuals before and after being assigned a hierarchal rank in a sample of 161 subjects (80 male). Each of the six hierarchy acoustic cues were analyzed with a 2 (high vs. low rank condition) x 2 (male vs. female) analysis of covariance, controlling for the baseline of the respective acoustic cue.


Target outcomes:

The impact of hierarchical rank on speakers’ acoustic cues. Each of the six hierarchy-based (i.e., postmanipulation) acoustic variables was submitted to a 2 (condition: high rank, low rank) × 2 (speaker’s sex: female, male) between-subjects analysis of covariance, controlling for the corresponding baseline acoustic variable. Table 4 presents the adjusted means by condition. Condition had a significant effect on pitch, pitch variability, and loudness variability. Speakers’ voices in the high-rank condition had higher pitch, F(1, 156) = 4.48, p < .05; were more variable in loudness, F(1, 156) = 4.66, p < .05; and were more monotone (i.e., less variable in pitch), F(1, 156) = 4.73, p < .05, compared with speakers’ voices in the low-rank condition (all other Fs < 1; see the Supplemental Material for additional analyses of covariance involving pitch and loudness). (from Ko et al., 2015, p. 6; emphasis added)

The adjusted means for these analyses are reported in Table 4:


Step 1: Load packages

library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(psych)

## load packages for ANCOVA
library(car)
library(compute.es)
library(lsmeans)
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)

Step 2: Load data

sound <-read_csv("data/S1_voice_level_Final.csv")
# DT::datatable(sound)

Step 3: Tidy data

With the code below, the six target outcome variables are renamed. The addition "_p" indicates that these vocal cues were recorded after the power manipulation.

d.tidy <- sound %>% 
    rename(pitch_mean = pitch_rmean,
         pitch_var = pitch_rvar,          
         loud_mean = intense_rmean,
         loud_var = intense_rvar, 
         res_mean = form_rmean,
         res_var = form_rvar,
         pitch_mean_p = pitch_smean,
         pitch_var_p = pitch_svar,          
         loud_mean_p = intense_smean,
         loud_var_p = intense_svar, 
         res_mean_p = form_smean,
         res_var_p = form_svar)

Step 4: Run analysis

Pre-processing

Descriptive statistics

In the paper, the adjusted means are reported (see Table 4). These are reproduced in the section Inferential Statistics below.

Inferential statistics

# select relevant variables --------------------
myvars2 <- c("pitch_mean", "pitch_var", "loud_mean","loud_var","res_mean","res_var","pitch_mean_p","pitch_var_p", "loud_mean_p","loud_var_p","res_mean_p","res_var_p","plev", "vsex")
sound_power <- d.tidy[myvars2]

sound_power$plev <- as.factor(sound_power$plev)
sound_power$vsex <- as.factor(sound_power$vsex)

contrasts(sound_power$plev) <- cbind(c(1,-1))
contrasts(sound_power$vsex) <- cbind(c(-1,1))

# ANCOVA 2x2 tests & adjusted means ------------

# model 1: pitch mean
model1 <- aov(pitch_mean_p ~ plev + vsex + pitch_mean + plev:vsex, data = sound_power)
anova1 <- Anova(model1, type="III")
anova1
## Anova Table (Type III tests)
## 
## Response: pitch_mean_p
##             Sum Sq  Df  F value  Pr(>F)    
## (Intercept)    359   1   4.2359 0.04124 *  
## plev           380   1   4.4837 0.03581 *  
## vsex           396   1   4.6687 0.03224 *  
## pitch_mean   43334   1 511.2208 < 2e-16 ***
## plev:vsex      222   1   2.6156 0.10783    
## Residuals    13223 156                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# adjusted means model 1
lsmeans(model1,
        pairwise ~ plev,
        adjust = "tukey")
## $lsmeans
##  plev lsmean   SE  df lower.CL upper.CL
##  -1      156 1.04 156      153      158
##  1       159 1.02 156      156      161
## 
## Results are averaged over the levels of: vsex 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $contrasts
##  contrast estimate   SE  df t.ratio p.value
##  -1 - 1      -3.09 1.46 156 -2.117  0.0358 
## 
## Results are averaged over the levels of: vsex
# check inferential statistics model 1
reportObject <- reproCheck(reportedValue = "1", obtainedValue = anova1["plev", "Df"], valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "156", obtainedValue = anova1["Residuals", "Df"], valueType = "df")
## [1] "MATCH for df. The reported value (156) and the obtained value (156) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "4.48", obtainedValue = anova1["plev", "F value"], valueType = "F")
## [1] "MATCH for F. The reported value (4.48) and the obtained value (4.48) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "<.05", obtainedValue = anova1["plev", "Pr(>F)"], valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# check adjusted means model 1
reportObject <- reproCheck(reportedValue = "158.61", obtainedValue = 158.6098, valueType = "mean")
## [1] "MATCH for mean. The reported value (158.61) and the obtained value (158.61) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "155.52", obtainedValue = 155.5227, valueType = "mean")
## [1] "MATCH for mean. The reported value (155.52) and the obtained value (155.52) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# model 2: pitch variability
model2 <- aov(pitch_var_p ~ plev + vsex + pitch_var + plev:vsex, data = sound_power)
anova2 <- Anova(model2, type="III")
anova2
## Anova Table (Type III tests)
## 
## Response: pitch_var_p
##               Sum Sq  Df  F value    Pr(>F)    
## (Intercept) 45439310   1 107.1878 < 2.2e-16 ***
## plev         2003882   1   4.7270    0.0312 *  
## vsex        26179681   1  61.7558 5.939e-13 ***
## pitch_var   27760551   1  65.4850 1.548e-13 ***
## plev:vsex      37857   1   0.0893    0.7655    
## Residuals   66131879 156                       
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# adjusted means model 2
lsmeans(model2,
        pairwise ~ plev,
        adjust="tukey")
## $lsmeans
##  plev lsmean   SE  df lower.CL upper.CL
##  -1     1648 73.3 156     1483     1814
##  1      1425 71.9 156     1263     1587
## 
## Results are averaged over the levels of: vsex 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $contrasts
##  contrast estimate  SE  df t.ratio p.value
##  -1 - 1        223 103 156 2.174   0.0312 
## 
## Results are averaged over the levels of: vsex
# check inferential statistics model 2
reportObject <- reproCheck(reportedValue = "1", obtainedValue = anova2["plev", "Df"], valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "156", obtainedValue = anova2["Residuals", "Df"], valueType = "df")
## [1] "MATCH for df. The reported value (156) and the obtained value (156) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "4.73", obtainedValue = anova2["plev", "F value"], valueType = "F")
## [1] "MATCH for F. The reported value (4.73) and the obtained value (4.73) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "<.05", obtainedValue = anova2["plev", "Pr(>F)"], valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# check adjusted means model 2
reportObject <- reproCheck(reportedValue = "1648.37", obtainedValue = 1648.367, valueType = "mean")
## [1] "MATCH for mean. The reported value (1648.37) and the obtained value (1648.37) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "1425.02", obtainedValue = 1425.016, valueType = "mean")
## [1] "MATCH for mean. The reported value (1425.02) and the obtained value (1425.02) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# model 3: loudness mean
model3 <- aov(loud_mean_p ~ plev + vsex + loud_mean + plev:vsex, data = sound_power)
anova3 <- Anova(model3, type="III")
anova3
## Anova Table (Type III tests)
## 
## Response: loud_mean_p
##              Sum Sq  Df  F value    Pr(>F)    
## (Intercept)  208.90   1  25.7761  1.08e-06 ***
## plev          17.93   1   2.2118 0.1389771    
## vsex         103.84   1  12.8131 0.0004591 ***
## loud_mean   1195.54   1 147.5165 < 2.2e-16 ***
## plev:vsex     11.69   1   1.4424 0.2315772    
## Residuals   1264.30 156                       
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# adjusted means model 3
lsmeans(model3,
        pairwise ~ plev,
        adjust="tukey")
## $lsmeans
##  plev lsmean    SE  df lower.CL upper.CL
##  -1     58.7 0.321 156     57.9     59.4
##  1      59.3 0.314 156     58.6     60.0
## 
## Results are averaged over the levels of: vsex 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $contrasts
##  contrast estimate    SE  df t.ratio p.value
##  -1 - 1     -0.668 0.449 156 -1.487  0.1390 
## 
## Results are averaged over the levels of: vsex
# check inferential statistics model 3
reportObject <- reproCheck(reportedValue = "<1", obtainedValue = anova3["plev", "F value"], valueType = "F", eyeballCheck = FALSE)
## [1] "MAJOR ERROR for F. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = ">.05", obtainedValue = anova3["plev", "Pr(>F)"], valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# check adjusted means model 3
reportObject <- reproCheck(reportedValue = "59.34", obtainedValue = 59.33567, valueType = "mean")
## [1] "MATCH for mean. The reported value (59.34) and the obtained value (59.34) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "58.67", obtainedValue = 58.66784, valueType = "mean")
## [1] "MATCH for mean. The reported value (58.67) and the obtained value (58.67) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# model 4: loudness variability
model4 <- aov(loud_var_p ~ plev + vsex + loud_var + plev:vsex, data=sound_power)
anova4 <- Anova(model4, type="III")
anova4
## Anova Table (Type III tests)
## 
## Response: loud_var_p
##             Sum Sq  Df  F value    Pr(>F)    
## (Intercept)  61767   1  40.8930 1.777e-09 ***
## plev          7042   1   4.6620   0.03236 *  
## vsex         29895   1  19.7923 1.634e-05 ***
## loud_var    184927   1 122.4323 < 2.2e-16 ***
## plev:vsex       40   1   0.0262   0.87160    
## Residuals   235630 156                       
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# adjusted means model 4
lsmeans(model4,
        pairwise ~ plev,
        adjust="tukey")
## $lsmeans
##  plev lsmean   SE  df lower.CL upper.CL
##  -1      183 4.38 156      174      193
##  1       197 4.30 156      187      206
## 
## Results are averaged over the levels of: vsex 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $contrasts
##  contrast estimate   SE  df t.ratio p.value
##  -1 - 1      -13.3 6.14 156 -2.159  0.0324 
## 
## Results are averaged over the levels of: vsex
# check inferential statistics model 4
reportObject <- reproCheck(reportedValue = "1", obtainedValue = anova4["plev", "Df"], valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "156", obtainedValue = anova4["Residuals", "Df"], valueType = "df")
## [1] "MATCH for df. The reported value (156) and the obtained value (156) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "4.66", obtainedValue = anova4["plev", "F value"], valueType = "F")
## [1] "MATCH for F. The reported value (4.66) and the obtained value (4.66) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "<.05", obtainedValue = anova4["plev", "Pr(>F)"], valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# check adjusted means model 4
reportObject <- reproCheck(reportedValue = "196.73", obtainedValue = 196.7301, valueType = "mean")
## [1] "MATCH for mean. The reported value (196.73) and the obtained value (196.73) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "183.48", obtainedValue = 183.4795, valueType = "mean")
## [1] "MATCH for mean. The reported value (183.48) and the obtained value (183.48) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# model 5: resonance mean
model5 <- aov(res_mean_p ~ plev + vsex + res_mean + plev:vsex, data=sound_power)
anova5 <- Anova(model5, type="III")
anova5
## Anova Table (Type III tests)
## 
## Response: res_mean_p
##              Sum Sq  Df F value    Pr(>F)    
## (Intercept)   35300   1  5.0525 0.0259916 *  
## plev             13   1  0.0019 0.9651132    
## vsex          80646   1 11.5431 0.0008630 ***
## res_mean      89319   1 12.7845 0.0004656 ***
## plev:vsex       170   1  0.0243 0.8763623    
## Residuals   1089894 156                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
lsmeans(model5,
        pairwise ~ plev,
        adjust="tukey")
## $lsmeans
##  plev lsmean   SE  df lower.CL upper.CL
##  -1     1129 9.42 156     1108     1150
##  1      1129 9.24 156     1109     1150
## 
## Results are averaged over the levels of: vsex 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $contrasts
##  contrast estimate   SE  df t.ratio p.value
##  -1 - 1     -0.579 13.2 156 -0.044  0.9651 
## 
## Results are averaged over the levels of: vsex
# check inferential statistics model 5
reportObject <- reproCheck(reportedValue = "<1", obtainedValue = anova5["plev", "F value"], valueType = "F", eyeballCheck = TRUE)
## [1] "MATCH for F. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = ">.05", obtainedValue = anova5["plev", "Pr(>F)"], valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# check adjusted means model 5
reportObject <- reproCheck(reportedValue = "1129.39", obtainedValue = 1129.384, valueType = "mean")
## [1] "MINOR_ERROR for mean. The reported value (1129.39) and the obtained value (1129.38) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "1128.81", obtainedValue = 1128.806, valueType = "mean")
## [1] "MATCH for mean. The reported value (1128.81) and the obtained value (1128.81) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# model 6: resonance variability
model6 <- aov(res_var_p ~ plev + vsex + res_var + plev:vsex, data=sound_power)
anova6 <- Anova(model6, type="III")
anova6
## Anova Table (Type III tests)
## 
## Response: res_var_p
##                 Sum Sq  Df F value    Pr(>F)    
## (Intercept) 5.3682e+09   1 28.8227 2.826e-07 ***
## plev        8.7633e+07   1  0.4705    0.4938    
## vsex        4.5357e+07   1  0.2435    0.6224    
## res_var     4.9801e+08   1  2.6739    0.1040    
## plev:vsex   1.5279e+07   1  0.0820    0.7749    
## Residuals   2.9055e+10 156                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
lsmeans(model6,
        pairwise ~ plev,
        adjust="tukey")
## $lsmeans
##  plev lsmean   SE  df lower.CL upper.CL
##  -1    43655 1541 156    40175    47134
##  1     42171 1511 156    38759    45582
## 
## Results are averaged over the levels of: vsex 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 2 estimates 
## 
## $contrasts
##  contrast estimate   SE  df t.ratio p.value
##  -1 - 1       1484 2163 156 0.686   0.4938 
## 
## Results are averaged over the levels of: vsex
# check inferential statistics model 6
reportObject <- reproCheck(reportedValue = "<1", obtainedValue = anova6["plev", "F value"], valueType = "F", eyeballCheck = TRUE)
## [1] "MATCH for F. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = ">.05", obtainedValue = anova6["plev", "Pr(>F)"], valueType = "p", eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
# check adjusted means model 6
reportObject <- reproCheck(reportedValue = "42170.78", obtainedValue = 42170.78, valueType = "mean")
## [1] "MATCH for mean. The reported value (42170.78) and the obtained value (42170.78) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "43654.54", obtainedValue = 43654.54, valueType = "mean")
## [1] "MATCH for mean. The reported value (43654.54) and the obtained value (43654.54) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."

Step 5: Conclusion

All reported tests aligned with the ANCOVAs and the accompanying adjusted means produced in this replication minus one major error for an F-value in model3: loudness mean with sex. In the results section it was stated that all F-values of the non-significant tests were <1, but in model 3, the F-value was 2.21. This is automatically classified as a major error, but this probably would not affect the conclusions much, since the p-value was still non-significant.

We contacted the first author on Apr 25 and both the first author and last author on May 22 to ask for assistance. We’ve received no response as of July 22. We have therefore concluded this reproducibility check. The single observed discrepancy is very unlikely to be consequential for any of the authors’ original conclusions.

Author_Assistance = FALSE # was author assistance provided? (if so, enter TRUE)

Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?

# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).

locus_typo <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- 0 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- 1 # how many discrete issues were there for which you could not identify the cause

# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- NA # how many discrete issues were there for which you could not identify the cause

Affects_Conclusion <- FALSE # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
  filter(dummyRow == FALSE) %>% # remove the dummy row
  select(-dummyRow) %>% # remove dummy row designation
  mutate(articleID = articleID) %>% # add the articleID 
  select(articleID, everything()) # make articleID first column

# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
  finalOutcome <- "Failure without author assistance"
  if(Author_Assistance == T){
    finalOutcome <- "Failure despite author assistance"
  }
}else{
  finalOutcome <- "Success without author assistance"
  if(Author_Assistance == T){
    finalOutcome <- "Success with author assistance"
  }
}

# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)

# save report objects
if(reportType == "pilot"){
  write_csv(reportObject, "pilotReportDetailed.csv")
  write_csv(reportExtras, "pilotReportExtras.csv")
}

if(reportType == "final"){
  write_csv(reportObject, "finalReportDetailed.csv")
  write_csv(reportExtras, "finalReportExtras.csv")
}

Session information

devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
##  setting  value                       
##  version  R version 4.0.0 (2020-04-24)
##  os       macOS Catalina 10.15.4      
##  system   x86_64, darwin17.0          
##  ui       X11                         
##  language (EN)                        
##  collate  en_US.UTF-8                 
##  ctype    en_US.UTF-8                 
##  tz       Europe/London               
##  date     2020-05-14                  
## 
## ─ Packages ───────────────────────────────────────────────────────────────────
##  package      * version   date       lib
##  abind          1.4-5     2016-07-21 [1]
##  assertthat     0.2.1     2019-03-21 [1]
##  backports      1.1.6     2020-04-05 [1]
##  broom          0.5.6     2020-04-20 [1]
##  callr          3.4.3     2020-03-28 [1]
##  car          * 3.0-7     2020-03-11 [1]
##  carData      * 3.0-3     2019-11-16 [1]
##  cellranger     1.1.0     2016-07-27 [1]
##  cli            2.0.2     2020-02-28 [1]
##  colorspace     1.4-1     2019-03-18 [1]
##  compute.es   * 0.2-5     2020-04-01 [1]
##  crayon         1.3.4     2017-09-16 [1]
##  curl           4.3       2019-12-02 [1]
##  data.table     1.12.8    2019-12-09 [1]
##  DBI            1.1.0     2019-12-15 [1]
##  dbplyr         1.4.3     2020-04-19 [1]
##  desc           1.2.0     2018-05-01 [1]
##  devtools       2.3.0     2020-04-10 [1]
##  digest         0.6.25    2020-02-23 [1]
##  dplyr        * 0.8.5     2020-03-07 [1]
##  ellipsis       0.3.0     2019-09-20 [1]
##  emmeans      * 1.4.6     2020-04-19 [1]
##  estimability   1.3       2018-02-11 [1]
##  evaluate       0.14      2019-05-28 [1]
##  fansi          0.4.1     2020-01-08 [1]
##  forcats      * 0.5.0     2020-03-01 [1]
##  foreign        0.8-78    2020-04-13 [1]
##  fs             1.4.1     2020-04-04 [1]
##  generics       0.0.2     2018-11-29 [1]
##  ggplot2      * 3.3.0     2020-03-05 [1]
##  glue           1.4.0     2020-04-03 [1]
##  gtable         0.3.0     2019-03-25 [1]
##  haven        * 2.2.0     2019-11-08 [1]
##  hms            0.5.3     2020-01-08 [1]
##  htmltools      0.4.0     2019-10-04 [1]
##  httr           1.4.1     2019-08-05 [1]
##  jsonlite       1.6.1     2020-02-02 [1]
##  knitr        * 1.28      2020-02-06 [1]
##  lattice        0.20-41   2020-04-02 [1]
##  lifecycle      0.2.0     2020-03-06 [1]
##  lsmeans      * 2.30-0    2018-11-02 [1]
##  lubridate      1.7.8     2020-04-06 [1]
##  magrittr       1.5       2014-11-22 [1]
##  memoise        1.1.0     2017-04-21 [1]
##  mnormt         1.5-6     2020-02-03 [1]
##  modelr         0.1.7     2020-04-30 [1]
##  munsell        0.5.0     2018-06-12 [1]
##  mvtnorm        1.1-0     2020-02-24 [1]
##  nlme           3.1-147   2020-04-13 [1]
##  openxlsx       4.1.4     2019-12-06 [1]
##  pillar         1.4.4     2020-05-05 [1]
##  pkgbuild       1.0.7     2020-04-25 [1]
##  pkgconfig      2.0.3     2019-09-22 [1]
##  pkgload        1.0.2     2018-10-29 [1]
##  plyr           1.8.6     2020-03-03 [1]
##  prettyunits    1.1.1     2020-01-24 [1]
##  processx       3.4.2     2020-02-09 [1]
##  ps             1.3.2     2020-02-13 [1]
##  psych        * 1.9.12.31 2020-01-08 [1]
##  purrr        * 0.3.4     2020-04-17 [1]
##  R6             2.4.1     2019-11-12 [1]
##  Rcpp           1.0.4.6   2020-04-09 [1]
##  readr        * 1.3.1     2018-12-21 [1]
##  readxl       * 1.3.1     2019-03-13 [1]
##  remotes        2.1.1     2020-02-15 [1]
##  reprex         0.3.0     2019-05-16 [1]
##  ReproReports * 0.1       2020-05-06 [1]
##  rio            0.5.16    2018-11-26 [1]
##  rlang          0.4.6     2020-05-02 [1]
##  rmarkdown      2.1       2020-01-20 [1]
##  rprojroot      1.3-2     2018-01-03 [1]
##  rstudioapi     0.11      2020-02-07 [1]
##  rvest          0.3.5     2019-11-08 [1]
##  scales         1.1.0     2019-11-18 [1]
##  sessioninfo    1.1.1     2018-11-05 [1]
##  stringi        1.4.6     2020-02-17 [1]
##  stringr      * 1.4.0     2019-02-10 [1]
##  testthat       2.3.2     2020-03-02 [1]
##  tibble       * 3.0.1     2020-04-20 [1]
##  tidyr        * 1.0.2     2020-01-24 [1]
##  tidyselect     1.0.0     2020-01-27 [1]
##  tidyverse    * 1.3.0     2019-11-21 [1]
##  usethis        1.6.1     2020-04-29 [1]
##  vctrs          0.2.4     2020-03-10 [1]
##  withr          2.2.0     2020-04-20 [1]
##  xfun           0.13      2020-04-13 [1]
##  xml2           1.3.2     2020-04-23 [1]
##  xtable         1.8-4     2019-04-21 [1]
##  yaml           2.2.1     2020-02-01 [1]
##  zip            2.0.4     2019-09-01 [1]
##  source                                     
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  Github (METRICS-CARPS/CARPSreports@3277f85)
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
## 
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library