articleID <- "11-12-2014 PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Michael Ko, Danielle Boles" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Tom Hardwicke" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 60 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 20 # insert the co-pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("10/27/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("04/22/19", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("04/22/19", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
In study 1, 150 introductory psychology students were randomly assigned to a “real” or a “hypothetical” condition. In the real condition, participants believed that they would have a real opportuniy to connect with potential romantic partners. In the hypothetical condition, participants simply imagined that they are on a date. All participants were required to select their favorite profile and answer whether they were willing to exchange contact information.
We next tested our primary hypothesis that participants would be more reluctant to reject the unattractive date when they believed the situation to be real rather than hypothetical. Only 10 of the 61 participants in the hypothetical condition chose to exchange contact information with the unattractive potential date (16%). In contrast, 26 of the 71 participants in the real condition chose to exchange contact information (37%). A chi-square test of independence indicated that participants were significantly less likely to reject the unattractive potential date in the real condition compared with the hypothetical condition, X^2(1, N = 132) = 6.77, p = .009.
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(broom)
library(labelled)# converts SPSS's labelled to R's factor
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)
# We are only looking into Study 1
d <- read_sav('data/Empathy Gap Study 1 data.sav')
d_tidy <- d %>% select(condition, exchangeinfo)
d_tidy$condition <- to_factor(d_tidy$condition)
d_tidy$exchangeinfo <- to_factor(d_tidy$exchangeinfo)
We will first attempt to reproduce the number and percentage of participants who exchange contact information in both conditions.
Only 10 of the 61 participants in the hypothetical condition chose to exchange contact information with the unattractive potential date (16%). In contrast, 26 of the 71 participants in the real condition chose to exchange contact information (37%).
thisTable <- table(d_tidy$condition, d_tidy$exchangeinfo)
kable(thisTable)
| yes | no | |
|---|---|---|
| hypothetical | 10 | 51 |
| real | 26 | 45 |
Looks to be a match. Record:
reportObject <- reproCheck(reportedValue = '10', obtainedValue = thisTable["hypothetical",]['yes'], valueType = 'n')
## [1] "MATCH for n. The reported value (10) and the obtained value (10) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '61', obtainedValue = sum(thisTable["hypothetical",]), valueType = 'n')
## [1] "MATCH for n. The reported value (61) and the obtained value (61) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '16', obtainedValue = thisTable["hypothetical",]['yes']/sum(thisTable["hypothetical",])*100, valueType = 'n')
## [1] "MATCH for n. The reported value (16) and the obtained value (16) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '26', obtainedValue = thisTable["real",]['yes'], valueType = 'n')
## [1] "MATCH for n. The reported value (26) and the obtained value (26) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '71', obtainedValue = sum(thisTable["real",]), valueType = 'n')
## [1] "MATCH for n. The reported value (71) and the obtained value (71) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '37', obtainedValue = thisTable["real",]['yes']/sum(thisTable["real",])*100, valueType = 'n')
## [1] "MATCH for n. The reported value (37) and the obtained value (37) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
Next we will try to reproduce the chi squared test examining significance of conditional difference in exchacnging contact information.
A chi-square test of independence indicated that participants were significantly less likely to reject the unattractive potential date in the real condition compared with the hypothetical condition, X^2(1, N = 132) = 6.77, p = .009.
test <- chisq.test(d_tidy$condition, d_tidy$exchangeinfo, correct = FALSE)
kable(tidy(test))
| statistic | p.value | parameter | method |
|---|---|---|---|
| 6.767375 | 0.0092839 | 1 | Pearson’s Chi-squared test |
Looks like all values match. Report:
reportObject <- reproCheck(reportedValue = '1', obtainedValue = test$parameter, valueType = 'df')
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '132', obtainedValue = length(d_tidy$condition), valueType = 'n')
## [1] "MATCH for n. The reported value (132) and the obtained value (132) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '6.77', obtainedValue = test$statistic, valueType = 'x2')
## [1] "MATCH for x2. The reported value (6.77) and the obtained value (6.77) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = '0.009', obtainedValue = test$p.value, valueType = 'p')
## [1] "MATCH for p. The reported value (0.009) and the obtained value (0.009) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
The reproducibility check was a success. Our findings yielded the same descriptive and inferrential statistics as the original paper.
Author_Assistance = FALSE # was author assistance provided? (if so, enter TRUE)
Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?
# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).
locus_typo <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- NA # how many discrete issues were there for which you could not identify the cause
# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- NA # how many discrete issues were there for which you could not identify the cause
Affects_Conclusion <- NA # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
filter(dummyRow == FALSE) %>% # remove the dummy row
select(-dummyRow) %>% # remove dummy row designation
mutate(articleID = articleID) %>% # add the articleID
select(articleID, everything()) # make articleID first column
# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
finalOutcome <- "Failure without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Failure despite author assistance"
}
}else{
finalOutcome <- "Success without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Success with author assistance"
}
}
# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)
# save report objects
if(reportType == "pilot"){
write_csv(reportObject, "pilotReportDetailed.csv")
write_csv(reportExtras, "pilotReportExtras.csv")
}
if(reportType == "final"){
write_csv(reportObject, "finalReportDetailed.csv")
write_csv(reportExtras, "finalReportExtras.csv")
}
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.0 (2020-04-24)
## os macOS Catalina 10.15.4
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/London
## date 2020-05-06
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## assertthat 0.2.1 2019-03-21 [1]
## backports 1.1.6 2020-04-05 [1]
## broom * 0.5.6 2020-04-20 [1]
## callr 3.4.3 2020-03-28 [1]
## cellranger 1.1.0 2016-07-27 [1]
## cli 2.0.2 2020-02-28 [1]
## colorspace 1.4-1 2019-03-18 [1]
## crayon 1.3.4 2017-09-16 [1]
## DBI 1.1.0 2019-12-15 [1]
## dbplyr 1.4.3 2020-04-19 [1]
## desc 1.2.0 2018-05-01 [1]
## devtools 2.3.0 2020-04-10 [1]
## digest 0.6.25 2020-02-23 [1]
## dplyr * 0.8.5 2020-03-07 [1]
## ellipsis 0.3.0 2019-09-20 [1]
## evaluate 0.14 2019-05-28 [1]
## fansi 0.4.1 2020-01-08 [1]
## forcats * 0.5.0 2020-03-01 [1]
## fs 1.4.1 2020-04-04 [1]
## generics 0.0.2 2018-11-29 [1]
## ggplot2 * 3.3.0 2020-03-05 [1]
## glue 1.4.0 2020-04-03 [1]
## gtable 0.3.0 2019-03-25 [1]
## haven * 2.2.0 2019-11-08 [1]
## highr 0.8 2019-03-20 [1]
## hms 0.5.3 2020-01-08 [1]
## htmltools 0.4.0 2019-10-04 [1]
## httr 1.4.1 2019-08-05 [1]
## jsonlite 1.6.1 2020-02-02 [1]
## knitr * 1.28 2020-02-06 [1]
## labelled * 2.3.1.9000 2020-05-06 [1]
## lattice 0.20-41 2020-04-02 [1]
## lifecycle 0.2.0 2020-03-06 [1]
## lubridate 1.7.8 2020-04-06 [1]
## magrittr 1.5 2014-11-22 [1]
## memoise 1.1.0 2017-04-21 [1]
## modelr 0.1.7 2020-04-30 [1]
## munsell 0.5.0 2018-06-12 [1]
## nlme 3.1-147 2020-04-13 [1]
## pillar 1.4.4 2020-05-05 [1]
## pkgbuild 1.0.7 2020-04-25 [1]
## pkgconfig 2.0.3 2019-09-22 [1]
## pkgload 1.0.2 2018-10-29 [1]
## prettyunits 1.1.1 2020-01-24 [1]
## processx 3.4.2 2020-02-09 [1]
## ps 1.3.2 2020-02-13 [1]
## purrr * 0.3.4 2020-04-17 [1]
## R6 2.4.1 2019-11-12 [1]
## Rcpp 1.0.4.6 2020-04-09 [1]
## readr * 1.3.1 2018-12-21 [1]
## readxl * 1.3.1 2019-03-13 [1]
## remotes 2.1.1 2020-02-15 [1]
## reprex 0.3.0 2019-05-16 [1]
## ReproReports * 0.1 2020-05-06 [1]
## rlang 0.4.6 2020-05-02 [1]
## rmarkdown 2.1 2020-01-20 [1]
## rprojroot 1.3-2 2018-01-03 [1]
## rstudioapi 0.11 2020-02-07 [1]
## rvest 0.3.5 2019-11-08 [1]
## scales 1.1.0 2019-11-18 [1]
## sessioninfo 1.1.1 2018-11-05 [1]
## stringi 1.4.6 2020-02-17 [1]
## stringr * 1.4.0 2019-02-10 [1]
## testthat 2.3.2 2020-03-02 [1]
## tibble * 3.0.1 2020-04-20 [1]
## tidyr * 1.0.2 2020-01-24 [1]
## tidyselect 1.0.0 2020-01-27 [1]
## tidyverse * 1.3.0 2019-11-21 [1]
## usethis 1.6.1 2020-04-29 [1]
## vctrs 0.2.4 2020-03-10 [1]
## withr 2.2.0 2020-04-20 [1]
## xfun 0.13 2020-04-13 [1]
## xml2 1.3.2 2020-04-23 [1]
## yaml 2.2.1 2020-02-01 [1]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (larmarange/labelled@a7878c6)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (METRICS-CARPS/CARPSreports@3277f85)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library