articleID <- "10-7-2014_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Jaclyn Schwartz, Drew Tysen Dauer, Erik Santoro" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Ben Peloquin" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 420 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 240 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("10/26/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("06/20/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("06/20/18", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
The aim of “Study 1” was to test if activating “caregiving motivation” would strengthen bias against out-groups. The authors recruited 300 participants (all Jewish mothers) to participate in an online study. N = 300 were recruited with 75 participants per condition. There were 4 condtions. However, 14 were excluded, leaving N = 286. The 4 conditions were based on 2 manipulations: caregiving-salience manipulation (with 2 groups: caregiving-salience group and no-caregiving-salience group) and out-group threat manipulation (with 2 groups: out-group-threat condition and natural-threat condition). Condition assignment was random and created by crossing the two caregiving-salience conditions with the two out-group-threat conditions. The DV of out-group bias was operationalized with an out-group bias questionnaire. The authors normalized the out-group bias scores, and then performed a factorial ANOVA with caregiving (caregiving salience vs. no caregiving salience) and threat (out-group threat vs. natural threat) as IVs, and out-group bias as the DV." After finding an interaction, simple effects analyses were performed.
The results showed a significant interaction between caregiving and threat, F(1, 282) = 4.220, p = .041, ηp2 = .014 (Fig. 1). Simple-effects analysis revealed that in the out-group-threat condition, participants who were primed for caregiving salience showed significantly higher levels of out-group bias (M = 0.851, SD = 0.528) than did the no-caregiving-salience group (M = 0.654, SD = 0.488), F(1, 282), p = .027, Cohen’s d = 0.387. In the natural-threat condition, there was no difference between participants in the caregiving-salience (M = 0.703, SD = 0.594) and no-caregiving-salience (M = 0.773, SD = 0.583) groups, F < 1. No further effects attained significance.
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(ggplot2)
library(ggthemes)
library(lsr)
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)
caregiving_salience <- read_excel("data/caregiving_salience_paper_raw_data.xlsx", sheet = 1) %>%
mutate(story=factor(story, levels=c("control", "caregiving")), # for reproducing plots need this order
tnaythreat=factor(tnaythreat, levels=c("threat", "control")))
head(caregiving_salience)
## # A tibble: 6 x 3
## story tnaythreat bias
## <fct> <fct> <dbl>
## 1 caregiving control -0.484
## 2 caregiving control 1.43
## 3 control control 1.78
## 4 control control 0.859
## 5 caregiving control 0.575
## 6 control threat 1.25
str(caregiving_salience)
## tibble [286 × 3] (S3: tbl_df/tbl/data.frame)
## $ story : Factor w/ 2 levels "control","caregiving": 2 2 1 1 2 1 2 2 2 1 ...
## $ tnaythreat: Factor w/ 2 levels "threat","control": 2 2 2 2 2 1 1 1 2 1 ...
## $ bias : num [1:286] -0.484 1.429 1.778 0.859 0.575 ...
# the data was already in long format and the bias scores were already normalized
Check exclusions:
cat('Exclusions not included in (Study 1 methods.)?:', nrow(caregiving_salience) == 300 - 14)
## Exclusions not included in (Study 1 methods.)?: TRUE
The exclusions have already been performed.
df_descriptives <- caregiving_salience %>%
group_by(story, tnaythreat) %>%
summarise(mean=mean(bias),
sd=sd(bias))
kable(df_descriptives, caption="Descriptive stats")
| story | tnaythreat | mean | sd |
|---|---|---|---|
| control | threat | 0.6542341 | 0.4889919 |
| control | control | 0.7735317 | 0.5834817 |
| caregiving | threat | 0.8519226 | 0.5280573 |
| caregiving | control | 0.7035284 | 0.5943702 |
res_descriptives <- data.frame()
descriptives_codes <- list(
# outgroup threat
c('0.851','threat', 'caregiving', 'mean'),
c('0.528','threat', 'caregiving', 'sd'),
c('0.654','threat', 'control', 'mean'),
c('0.488','threat', 'control', 'sd'),
# natural threat
c('0.703','control', 'caregiving', 'mean'),
c('0.594','control', 'caregiving', 'sd'),
c('0.773','control', 'control', 'mean'),
c('0.583','control', 'control', 'sd'))
for (i in 1:length(descriptives_codes)) {
reported_stat <- descriptives_codes[[i]][1]
tnaythreat_ <- descriptives_codes[[i]][2]
story_ <- descriptives_codes[[i]][3]
stat_type <- descriptives_codes[[i]][4]
if (stat_type == 'mean') {
reportObject <- reproCheck("reportedValue"=reported_stat,
"obtainedValue"=df_descriptives %>%
filter(tnaythreat==tnaythreat_, story==story_) %>%
ungroup %>%
select(mean) %>%
as.numeric(),
"valueType"=stat_type)
} else {
reportObject <- reproCheck("reportedValue"=reported_stat,
"obtainedValue"=df_descriptives %>%
filter(tnaythreat==tnaythreat_, story==story_) %>%
ungroup %>%
select(sd) %>%
as.numeric(),
"valueType"=stat_type)
}
}
## [1] "MINOR_ERROR for mean. The reported value (0.851) and the obtained value (0.852) differed by 0.12%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MATCH for sd. The reported value (0.528) and the obtained value (0.528) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MATCH for mean. The reported value (0.654) and the obtained value (0.654) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MINOR_ERROR for sd. The reported value (0.488) and the obtained value (0.489) differed by 0.2%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MINOR_ERROR for mean. The reported value (0.703) and the obtained value (0.704) differed by 0.14%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MATCH for sd. The reported value (0.594) and the obtained value (0.594) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MINOR_ERROR for mean. The reported value (0.773) and the obtained value (0.774) differed by 0.13%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
## [1] "MATCH for sd. The reported value (0.583) and the obtained value (0.583) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
Reproducing fig1:
caregiving_salience %>%
group_by(story, tnaythreat) %>%
summarise(mean_=mean(bias),
sd_=sd(bias),
n_=n(),
se_= sd_ / sqrt(n_)) %>%
ggplot(aes(x=tnaythreat, y=mean_, fill=story)) +
geom_bar(stat='identity', position='dodge') +
geom_errorbar(aes(ymin=mean_-se_, ymax=mean_+se_), position=position_dodge(.95), width=0.25) +
labs(title="Outgroup Bias ratings by caregiving salience and threat condition",
y="normalized out-group bias",
x="threat condition ") +
ylim(0, 1) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5))
ANOVA with caregiving (caregiving salience vs. no caregiving salience) and threat (out-group threat vs. natural threat) as independent variables and out-group bias as the dependent variable
aov_raw <- aov(bias ~ story * tnaythreat, data=caregiving_salience)
aov_summary <- summary(aov_raw)
The results showed a significant interaction between caregiving and threat, F(1, 282) = 4.220, p = .041, ηp2 = .014 (Fig. 1).
reportObject <- reproCheck(reportedValue="1", obtainedValue=aov_summary[[1]]$Df[3], valueType="df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue="282", obtainedValue=aov_summary[[1]]$Df[4], valueType="df")
## [1] "MATCH for df. The reported value (282) and the obtained value (282) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue="4.220", obtainedValue=aov_summary[[1]]$`F value`[3], valueType="F")
## [1] "MATCH for F. The reported value (4.22) and the obtained value (4.22) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue="0.041", obtainedValue=aov_summary[[1]]$`Pr(>F)`[3], valueType="p")
## [1] "MATCH for p. The reported value (0.041) and the obtained value (0.041) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
eta_sq <- etaSquared(aov_raw , type=2, anova=TRUE)# Comparing partial eta squared
reportObject <- reproCheck(reportedValue = ".014", obtainedValue = eta_sq[7], valueType = 'pes') # MINOR NUMERICAL ERROR
## [1] "MINOR_ERROR for pes. The reported value (0.014) and the obtained value (0.015) differed by 7.14%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
Simple-effects analysis revealed that in the out-group-threat condition, participants who were primed for caregiving salience showed significantly higher levels of out-group bias (M = 0.851, SD = 0.528) than did the no-caregiving-salience group (M = 0.654, SD = 0.488), F(1, 282), p = .027, Cohen’s d = 0.387.
Means and SDs were checked above. Here we check the Cohen’s d:
cohendsD_threat <- caregiving_salience %>%
filter(tnaythreat=="threat") %>%
cohensD(x=bias~story, data=.)
reportObject <- reproCheck(reportedValue="0.387", obtainedValue=cohendsD_threat, valueType="d")
## [1] "MATCH for d. The reported value (0.387) and the obtained value (0.387) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
aov_summary_control <- summary(aov(bias ~ story, data=subset(caregiving_salience,
tnaythreat=='control')))
# Note: these analyses were incorrect.
# aov_summary_threat <- summary(aov(bias ~ story, data=subset(caregiving_salience,
# tnaythreat=='threat')))
# aov_summary_contrast <- summary(aov(bias~story*tnaythreat, data=caregiving_salience))
tmp <- caregiving_salience %>%
mutate(tnaythreat = recode_factor(tnaythreat, control = "noThreat"),
condition = factor(paste0(story,"_",tnaythreat)))
threat_story <- c(0, -1, 0, 1)
nothreat_story <- c(-1, 0, 1, 0)
threat_care <- c(-1, 1, 0, 0)
threat_control <- c(0, 0, -1, 1)
contrasts(tmp$condition) <- threat_story
aov.out <- aov(bias ~ condition, data=tmp)
summ <- summary.lm(aov.out)
# Using Tom's contrast analysis
reportObject <- reproCheck(reportedValue=".027", obtainedValue=summ$coefficients[14], valueType="p")
## [1] "MATCH for p. The reported value (0.027) and the obtained value (0.027) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# Does not use Tom's contrasts analysis
reportObject <- reproCheck(reportedValue="<1.0",
obtainedValue=aov_summary_control[[1]]$`F value`[1],
valueType="F",
eyeballCheck=TRUE)
## [1] "MATCH for F. Eyeball comparison only."
We could initially reproduce all values except for one p-value reported for a simple effects analysis. After the original authors shared SPSS syntax with us it became clear that we were implementing the analysis in a different way. When we used the original method, we were able to reproduce the p-value successfully.
Author_Assistance = TRUE # was author assistance provided? (if so, enter TRUE)
Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?
# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).
locus_typo <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- 1 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- 0 # how many discrete issues were there for which you could not identify the cause
# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- 1 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- 0 # how many discrete issues were there for which you could not identify the cause
Affects_Conclusion <- 0 # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
filter(dummyRow == FALSE) %>% # remove the dummy row
select(-dummyRow) %>% # remove dummy row designation
mutate(articleID = articleID) %>% # add the articleID
select(articleID, everything()) # make articleID first column
# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
finalOutcome <- "Failure without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Failure despite author assistance"
}
}else{
finalOutcome <- "Success without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Success with author assistance"
}
}
# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)
# save report objects
if(reportType == "pilot"){
write_csv(reportObject, "pilotReportDetailed.csv")
write_csv(reportExtras, "pilotReportExtras.csv")
}
if(reportType == "final"){
write_csv(reportObject, "finalReportDetailed.csv")
write_csv(reportExtras, "finalReportExtras.csv")
}
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.0 (2020-04-24)
## os macOS Catalina 10.15.4
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/London
## date 2020-05-11
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## assertthat 0.2.1 2019-03-21 [1]
## backports 1.1.6 2020-04-05 [1]
## broom 0.5.6 2020-04-20 [1]
## callr 3.4.3 2020-03-28 [1]
## cellranger 1.1.0 2016-07-27 [1]
## cli 2.0.2 2020-02-28 [1]
## colorspace 1.4-1 2019-03-18 [1]
## crayon 1.3.4 2017-09-16 [1]
## DBI 1.1.0 2019-12-15 [1]
## dbplyr 1.4.3 2020-04-19 [1]
## desc 1.2.0 2018-05-01 [1]
## devtools 2.3.0 2020-04-10 [1]
## digest 0.6.25 2020-02-23 [1]
## dplyr * 0.8.5 2020-03-07 [1]
## ellipsis 0.3.0 2019-09-20 [1]
## evaluate 0.14 2019-05-28 [1]
## fansi 0.4.1 2020-01-08 [1]
## farver 2.0.3 2020-01-16 [1]
## forcats * 0.5.0 2020-03-01 [1]
## fs 1.4.1 2020-04-04 [1]
## generics 0.0.2 2018-11-29 [1]
## ggplot2 * 3.3.0 2020-03-05 [1]
## ggthemes * 4.2.0 2019-05-13 [1]
## glue 1.4.0 2020-04-03 [1]
## gtable 0.3.0 2019-03-25 [1]
## haven * 2.2.0 2019-11-08 [1]
## highr 0.8 2019-03-20 [1]
## hms 0.5.3 2020-01-08 [1]
## htmltools 0.4.0 2019-10-04 [1]
## httr 1.4.1 2019-08-05 [1]
## jsonlite 1.6.1 2020-02-02 [1]
## knitr * 1.28 2020-02-06 [1]
## labeling 0.3 2014-08-23 [1]
## lattice 0.20-41 2020-04-02 [1]
## lifecycle 0.2.0 2020-03-06 [1]
## lsr * 0.5 2015-03-02 [1]
## lubridate 1.7.8 2020-04-06 [1]
## magrittr 1.5 2014-11-22 [1]
## memoise 1.1.0 2017-04-21 [1]
## modelr 0.1.7 2020-04-30 [1]
## munsell 0.5.0 2018-06-12 [1]
## nlme 3.1-147 2020-04-13 [1]
## pillar 1.4.4 2020-05-05 [1]
## pkgbuild 1.0.7 2020-04-25 [1]
## pkgconfig 2.0.3 2019-09-22 [1]
## pkgload 1.0.2 2018-10-29 [1]
## prettyunits 1.1.1 2020-01-24 [1]
## processx 3.4.2 2020-02-09 [1]
## ps 1.3.2 2020-02-13 [1]
## purrr * 0.3.4 2020-04-17 [1]
## R6 2.4.1 2019-11-12 [1]
## Rcpp 1.0.4.6 2020-04-09 [1]
## readr * 1.3.1 2018-12-21 [1]
## readxl * 1.3.1 2019-03-13 [1]
## remotes 2.1.1 2020-02-15 [1]
## reprex 0.3.0 2019-05-16 [1]
## ReproReports * 0.1 2020-05-06 [1]
## rlang 0.4.6 2020-05-02 [1]
## rmarkdown 2.1 2020-01-20 [1]
## rprojroot 1.3-2 2018-01-03 [1]
## rstudioapi 0.11 2020-02-07 [1]
## rvest 0.3.5 2019-11-08 [1]
## scales 1.1.0 2019-11-18 [1]
## sessioninfo 1.1.1 2018-11-05 [1]
## stringi 1.4.6 2020-02-17 [1]
## stringr * 1.4.0 2019-02-10 [1]
## testthat 2.3.2 2020-03-02 [1]
## tibble * 3.0.1 2020-04-20 [1]
## tidyr * 1.0.2 2020-01-24 [1]
## tidyselect 1.0.0 2020-01-27 [1]
## tidyverse * 1.3.0 2019-11-21 [1]
## usethis 1.6.1 2020-04-29 [1]
## utf8 1.1.4 2018-05-24 [1]
## vctrs 0.2.4 2020-03-10 [1]
## withr 2.2.0 2020-04-20 [1]
## xfun 0.13 2020-04-13 [1]
## xml2 1.3.2 2020-04-23 [1]
## yaml 2.2.1 2020-02-01 [1]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (METRICS-CARPS/CARPSreports@3277f85)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library