articleID <- "3-9-2014_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Katherine Hermann, Benjamin deMayo" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Emily Hembacher" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 240 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 240 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("11/04/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("06/13/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("06/14/18", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")

Methods summary:

This analysis pertains to Experiment 2 of de la Fuente et al. (2014). In that experiment, the researchers tested the question of whether temporal focus differs between Moroccan and Spanish cultures, hypothesizing that Moroccans are more past-focused, whereas Spaniards are more future-focused. Two groups of participants (\(N = 40\) Moroccan and \(N=40\) Spanish) completed a temporal-focus questionnaire that contained questions about past-focused (“PAST”) and future-focused (“FUTURE”) topics. In response to each question, participants provided a rating on a 5-point Likert scale on which lower scores indicated less agreement and higher scores indicated greater agreement. The authors then performed a mixed-design ANOVA with agreement score as the dependent variable, group (Moroccan or Spanish, between-subjects) as the fixed-effects factor, and temporal focus (past or future, within-subjects) as the random effects factor. In addition, the authors performed unpaired two-sample t-tests to determine whether there was a significant difference between the two groups in agreement scores for PAST questions, and whether there was a significant difference in scores for FUTURE questions.


Target outcomes:

According to a mixed analysis of variance (ANOVA) with group (Spanish vs. Moroccan) as a between-subjects factor and temporal focus (past vs. future) as a within-subjectS factor, temporal focus differed significantly between Spaniards and Moroccans, as indicated by a significant interaction of temporal focus and group, F(1, 78) = 19.12, p = .001, ηp2 = .20 (Fig. 2). Moroccans showed greater agreement with past-focused statements than Spaniards did, t(78) = 4.04, p = .001, and Spaniards showed greater agreement with future-focused statements than Moroccans did, t(78) = −3.32, p = .001. (de la Fuente et al., 2014, p. 1685).


Step 1: Load packages

library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(afex) # anova functions
library(ez) # anova functions 2
library(scales) # for plotting
std.err <- function(x) sd(x)/sqrt(length(x)) # standard error
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)

Step 2: Load data

Initially we encountered problems reproducing some degrees of freedom values. After contacting the original authors, it appeared that the original data posted on OSF had two typos in the “participant” column, which apparently were not present in the data used to conduct analyses for the paper. Due to these typos, it appeared that 2 participants were missing data for one of the levels of the comparison variable, and were thus dropped from the ANOVA and t-tests that we originally conducted. Once we fixed this problem, the degrees of freedom reported in the paper matched our analyses. Below you can see that rather than loading the original data file, we are loading a file that has the participant typos corrected.

#data_path <- 'data/DeLaFuenteEtAl_2014_RawData.xls'

data_path <- 'data/DeLaFuenteEtAl_2014_RawData-OSF-corrected_EHedit.xls' 
#note that I had to add an extra correction to the corrected file the authors sent- they fixed one of the typos they described in the email but not the other

d <- read_excel(data_path, sheet=3)

Step 3: Tidy data

# rename agreement
colnames(d)[5] <- 'agreement'

# change variable formats 
d$group <- factor(d$group)
d$participant <- d$participant
d$subscale <- factor(d$subscale) # "subscale" is the temporal focus
d$item <- factor(d$item)
d$agreement <- as.integer(d$agreement)

#rename participants so that identifiers are distinct in the two groups
moroccan_ids = d$participant[d$group=='Moroccan']
d$participant[d$group=='young Spaniard'] = d$participant[d$group=='young Spaniard'] + tail(moroccan_ids, 1)
d$participant <- factor(d$participant)

# view
head(d)
## # A tibble: 6 x 5
##   group   participant subscale item                                    agreement
##   <fct>   <fct>       <fct>    <fct>                                       <int>
## 1 Morocc… 1           PAST     1. Para mí son muy importantes las tra…         4
## 2 Morocc… 1           PAST     2. Los jóvenes deben conservar las tra…         4
## 3 Morocc… 1           PAST     3. Creo que las personas eran más feli…         5
## 4 Morocc… 1           PAST     4. La juventud de hoy en día necesita …         2
## 5 Morocc… 1           PAST     5. Los ancianos saben más que los jóve…         4
## 6 Morocc… 1           PAST     6. El modo correcto de hacer las cosas…         3

Step 4: Run analysis

Pre-processing

# aggregate the data at the participant level
#   - collapse agreement scores for items within each level of subscale into a single (mean) value
d_agg <- d %>% 
  group_by(group, participant, subscale) %>% 
  summarise(agreement_score = mean(agreement))

Descriptive statistics

Try to recreate Figure 2:

Figure 2

Figure 2

descript <- d_agg %>% 
  ungroup() %>%
  mutate(group = factor(group, levels = c("young Spaniard", "Moroccan")),
         subscale = factor(subscale, levels = c("PAST", "FUTURE"))) %>%
  group_by(group, subscale) %>% 
  summarise(mean_rating = mean(agreement_score), se = std.err(agreement_score)) 

descript %>%
  ggplot(aes(x = group, y = mean_rating, ymin = mean_rating - se, ymax = mean_rating + se, fill = subscale, group = subscale)) +
  theme_minimal() +
  geom_bar(position=position_dodge(), stat="identity") +
  geom_errorbar(position = position_dodge()) +
  scale_y_continuous(limits=c(2,4), oob = rescale_none)

Looks at least approximately correct.

reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "young Spaniard", subscale == "PAST") %>% pull(mean_rating), valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "young Spaniard", subscale == "PAST") %>% pull(mean_rating), valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "young Spaniard", subscale == "FUTURE") %>% pull(mean_rating), valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "young Spaniard", subscale == "FUTURE") %>% pull(mean_rating), valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "Moroccan", subscale == "PAST") %>% pull(mean_rating), valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "Moroccan", subscale == "PAST") %>% pull(mean_rating), valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "Moroccan", subscale == "FUTURE") %>% pull(mean_rating), valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "figure", obtainedValue = filter(descript, group == "Moroccan", subscale == "FUTURE") %>% pull(mean_rating), valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."

Inferential statistics

In this section we will check the reproducibility of the results of Experiment 2, described as follows:

According to a mixed analysis of variance (ANOVA) with group (Spanish vs. Moroccan) as a between-subjects factor and temporal focus (past vs. future) as a within-subjects factor, temporal focus differed significantly between Spaniards and Moroccans, as indicated by a significant interaction of temporal focus and group, F(1, 78) = 19.12, p = .001, ηp2 = .20 (Fig. 2).

# ANALYSIS 1: Mixed-design ANOVA with group as between-subjects variable (fixed effect) and subscale as within-subjects variable (random effect), with partial eta-squared's
aov_group_subscale <- aov_car(agreement_score ~ group*subscale + Error(participant/subscale), data=d_agg, anova_table=list(es="pes"), type = "III")
summary(aov_group_subscale) # detailed print-out
## 
## Univariate Type III Repeated-Measures ANOVA Assuming Sphericity
## 
##                 Sum Sq num Df Error SS den Df   F value    Pr(>F)    
## (Intercept)    1582.74      1   16.341     78 7554.7954 < 2.2e-16 ***
## group             0.60      1   16.341     78    2.8811  0.093611 .  
## subscale          4.15      1   39.987     78    8.0980  0.005659 ** 
## group:subscale    9.81      1   39.987     78   19.1445 3.713e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
print(aov_group_subscale) # truncated output but includes partial eta-squared's
## Anova Table (Type III tests)
## 
## Response: agreement_score
##           Effect    df  MSE         F  pes p.value
## 1          group 1, 78 0.21    2.88 + .036    .094
## 2       subscale 1, 78 0.51   8.10 ** .094    .006
## 3 group:subscale 1, 78 0.51 19.14 *** .197   <.001
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
df_group_subscale <- aov_group_subscale$anova_table$`den Df`[3]
F_group_subscale <- aov_group_subscale$anova_table$F[3]
p_group_subscale <- aov_group_subscale$anova_table$`Pr(>F)`[3]
pes_group_subscale <- aov_group_subscale$anova_table$pes[3]

reportObject <- reproCheck(reportedValue = "78", obtainedValue = df_group_subscale, valueType = 'df')
## [1] "MATCH for df. The reported value (78) and the obtained value (78) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "19.12", obtainedValue = F_group_subscale, valueType = 'F') # group:subscale F
## [1] "MINOR_ERROR for F. The reported value (19.12) and the obtained value (19.14) differed by 0.1%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = ".001", obtainedValue = p_group_subscale, valueType = 'p') # group:subscale p
## [1] "MAJOR_ERROR for p. The reported value (0.001) and the obtained value (0) differed by 100%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = ".20", obtainedValue = pes_group_subscale, valueType = 'pes') # partial eta-squared
## [1] "MATCH for pes. The reported value (0.2) and the obtained value (0.2) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."

There is a major error for the p-value here - it does not equal .001 as reported in the article, but is actually lower.

Moroccans showed greater agreement with past-focused statements than Spaniards did, t(78) = 4.04, p = .001,

moroccan_past <- d_agg %>% filter(subscale=='PAST', group=='Moroccan')
spanish_past <- d_agg %>% filter(subscale=='PAST', group=='young Spaniard')

t_past <- t.test(moroccan_past$agreement_score, spanish_past$agreement_score, paired=FALSE, var.equal=TRUE) # two-tailed Student's 

df_ttest_past <- unname(t_past$parameter)
t_ttest_past <- unname(t_past$statistic)
p_ttest_past <- unname(t_past$p.value)

# compare reported with two-tailed Student's result
reportObject <- reproCheck(reportedValue = "78", obtainedValue = df_ttest_past, valueType = 'df') # df
## [1] "MATCH for df. The reported value (78) and the obtained value (78) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "4.04", obtainedValue = t_ttest_past, valueType = 't') # t-statistic
## [1] "MINOR_ERROR for t. The reported value (4.04) and the obtained value (4) differed by 0.99%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = ".001", obtainedValue = p_ttest_past, valueType = 'p') # p
## [1] "MAJOR_ERROR for p. The reported value (0.001) and the obtained value (0) differed by 100%. Note that the obtained value was rounded to 3 decimal places to match the reported value."

There is another major error for a p-value here - it does not equal .001 as reported in the article, but is actually lower.

and Spaniards showed greater agreement with future-focused statements than Moroccans did, t(78) = −3.32, p = .001.(de la Fuente et al., 2014, p. 1685)

moroccan_future <- d_agg %>% filter(subscale=='FUTURE', group=='Moroccan')
spanish_future <- d_agg %>% filter(subscale=='FUTURE', group=='young Spaniard')

t_future <- t.test(moroccan_future$agreement_score, spanish_future$agreement_score, paired=FALSE, var.equal=TRUE) # two-tailed Student's 

df_ttest_future <- unname(t_future$parameter)
t_ttest_future <- unname(t_future$statistic)
p_ttest_future <- unname(t_future$p.value)

# compare reported with two-tailed Student's result
reportObject <- reproCheck(reportedValue = "78", obtainedValue = df_ttest_future, valueType = 'df') # df
## [1] "MATCH for df. The reported value (78) and the obtained value (78) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "-3.32", obtainedValue = t_ttest_future, valueType = 't') # t-statistic
## [1] "MINOR_ERROR for t. The reported value (-3.32) and the obtained value (-3.36) differed by 1.2%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = ".001", obtainedValue = p_ttest_future, valueType = 'p') # p
## [1] "MATCH for p. The reported value (0.001) and the obtained value (0.001) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."

Step 5: Conclusion

We could not reproduce some reported values.

On the first pass, for the ANOVA, we noted that two participants (participant 25 for both the Moroccan and Spaniard samples) had missing data for the PAST condition. The degrees of freedom reported in the paper appeared to reflect that the participants were included in this analysis. When we followed up with the authors, they clarified that there were typos in the data file they had posted on OSF. The wrong participant ID had been added for some lines of data, making it appear that two participants were missing data. Once we corrected the typos, the degrees of freedom in our analyses matched those in the paper.

Two major errors with p-values remained. The authors reported that p = .001, but the values we obtained were lower than this, even after accounting for rounding error. Importantly, these discrepancies clearly do not undermine the conclusions drawn in the original article.

We discussed this issue with the authors and they said that it is their policy to report p values that are <.001 as p = .001. However this could be problematic, because the p values not being accurately reported, potentially introducing bias when they are used with various meta-analytic tools (e.g., p-curve)

An alternative would be to use scientific notation to concisely report exact p-values. APA style recommends using p <.001, which is less informative, but is at least accurate:

“When reporting p values, report exact p values (e.g., p = .031) to two or three decimal places. However, report p values less than .001 as p < .001. The tradition of reporting p values in the form p < .10, p < .05, p < .01, and so forth, was appropriate in a time when only limited tables of critical values were available.” (APA, 2009; p. 114)

Author_Assistance = TRUE # was author assistance provided? (if so, enter TRUE)

Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?

# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).

locus_typo <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- 0 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- 1 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- 1 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- 0 # how many discrete issues were there for which you could not identify the cause

# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- 0 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- 1 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- 0 # how many discrete issues were there for which you could not identify the cause

Affects_Conclusion <- FALSE # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
  filter(dummyRow == FALSE) %>% # remove the dummy row
  select(-dummyRow) %>% # remove dummy row designation
  mutate(articleID = articleID) %>% # add the articleID 
  select(articleID, everything()) # make articleID first column

# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
  finalOutcome <- "Failure without author assistance"
  if(Author_Assistance == T){
    finalOutcome <- "Failure despite author assistance"
  }
}else{
  finalOutcome <- "Success without author assistance"
  if(Author_Assistance == T){
    finalOutcome <- "Success with author assistance"
  }
}

# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)

# save report objects
if(reportType == "pilot"){
  write_csv(reportObject, "pilotReportDetailed.csv")
  write_csv(reportExtras, "pilotReportExtras.csv")
}

if(reportType == "final"){
  write_csv(reportObject, "finalReportDetailed.csv")
  write_csv(reportExtras, "finalReportExtras.csv")
}

Session information

devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
##  setting  value                       
##  version  R version 4.0.0 (2020-04-24)
##  os       macOS Catalina 10.15.4      
##  system   x86_64, darwin17.0          
##  ui       X11                         
##  language (EN)                        
##  collate  en_US.UTF-8                 
##  ctype    en_US.UTF-8                 
##  tz       Europe/London               
##  date     2020-05-06                  
## 
## ─ Packages ───────────────────────────────────────────────────────────────────
##  package      * version    date       lib
##  abind          1.4-5      2016-07-21 [1]
##  afex         * 0.27-2     2020-03-28 [1]
##  assertthat     0.2.1      2019-03-21 [1]
##  backports      1.1.6      2020-04-05 [1]
##  boot           1.3-24     2019-12-20 [1]
##  broom          0.5.6      2020-04-20 [1]
##  callr          3.4.3      2020-03-28 [1]
##  car            3.0-7      2020-03-11 [1]
##  carData        3.0-3      2019-11-16 [1]
##  cellranger     1.1.0      2016-07-27 [1]
##  cli            2.0.2      2020-02-28 [1]
##  colorspace     1.4-1      2019-03-18 [1]
##  crayon         1.3.4      2017-09-16 [1]
##  curl           4.3        2019-12-02 [1]
##  data.table     1.12.8     2019-12-09 [1]
##  DBI            1.1.0      2019-12-15 [1]
##  dbplyr         1.4.3      2020-04-19 [1]
##  desc           1.2.0      2018-05-01 [1]
##  devtools       2.3.0      2020-04-10 [1]
##  digest         0.6.25     2020-02-23 [1]
##  dplyr        * 0.8.5      2020-03-07 [1]
##  ellipsis       0.3.0      2019-09-20 [1]
##  emmeans        1.4.6      2020-04-19 [1]
##  estimability   1.3        2018-02-11 [1]
##  evaluate       0.14       2019-05-28 [1]
##  ez           * 4.4-0      2016-11-02 [1]
##  fansi          0.4.1      2020-01-08 [1]
##  farver         2.0.3      2020-01-16 [1]
##  forcats      * 0.5.0      2020-03-01 [1]
##  foreign        0.8-78     2020-04-13 [1]
##  fs             1.4.1      2020-04-04 [1]
##  generics       0.0.2      2018-11-29 [1]
##  ggplot2      * 3.3.0      2020-03-05 [1]
##  glue           1.4.0      2020-04-03 [1]
##  gtable         0.3.0      2019-03-25 [1]
##  haven        * 2.2.0      2019-11-08 [1]
##  hms            0.5.3      2020-01-08 [1]
##  htmltools      0.4.0      2019-10-04 [1]
##  httr           1.4.1      2019-08-05 [1]
##  jsonlite       1.6.1      2020-02-02 [1]
##  knitr        * 1.28       2020-02-06 [1]
##  labeling       0.3        2014-08-23 [1]
##  lattice        0.20-41    2020-04-02 [1]
##  lifecycle      0.2.0      2020-03-06 [1]
##  lme4         * 1.1-23     2020-04-07 [1]
##  lmerTest       3.1-2      2020-04-08 [1]
##  lubridate      1.7.8      2020-04-06 [1]
##  magrittr       1.5        2014-11-22 [1]
##  MASS           7.3-51.5   2019-12-20 [1]
##  Matrix       * 1.2-18     2019-11-27 [1]
##  memoise        1.1.0      2017-04-21 [1]
##  mgcv           1.8-31     2019-11-09 [1]
##  minqa          1.2.4      2014-10-09 [1]
##  modelr         0.1.7      2020-04-30 [1]
##  munsell        0.5.0      2018-06-12 [1]
##  mvtnorm        1.1-0      2020-02-24 [1]
##  nlme           3.1-147    2020-04-13 [1]
##  nloptr         1.2.2.1    2020-03-11 [1]
##  numDeriv       2016.8-1.1 2019-06-06 [1]
##  openxlsx       4.1.4      2019-12-06 [1]
##  pillar         1.4.4      2020-05-05 [1]
##  pkgbuild       1.0.7      2020-04-25 [1]
##  pkgconfig      2.0.3      2019-09-22 [1]
##  pkgload        1.0.2      2018-10-29 [1]
##  plyr           1.8.6      2020-03-03 [1]
##  prettyunits    1.1.1      2020-01-24 [1]
##  processx       3.4.2      2020-02-09 [1]
##  ps             1.3.2      2020-02-13 [1]
##  purrr        * 0.3.4      2020-04-17 [1]
##  R6             2.4.1      2019-11-12 [1]
##  Rcpp           1.0.4.6    2020-04-09 [1]
##  readr        * 1.3.1      2018-12-21 [1]
##  readxl       * 1.3.1      2019-03-13 [1]
##  remotes        2.1.1      2020-02-15 [1]
##  reprex         0.3.0      2019-05-16 [1]
##  ReproReports * 0.1        2020-05-06 [1]
##  reshape2       1.4.4      2020-04-09 [1]
##  rio            0.5.16     2018-11-26 [1]
##  rlang          0.4.6      2020-05-02 [1]
##  rmarkdown      2.1        2020-01-20 [1]
##  rprojroot      1.3-2      2018-01-03 [1]
##  rstudioapi     0.11       2020-02-07 [1]
##  rvest          0.3.5      2019-11-08 [1]
##  scales       * 1.1.0      2019-11-18 [1]
##  sessioninfo    1.1.1      2018-11-05 [1]
##  statmod        1.4.34     2020-02-17 [1]
##  stringi        1.4.6      2020-02-17 [1]
##  stringr      * 1.4.0      2019-02-10 [1]
##  testthat       2.3.2      2020-03-02 [1]
##  tibble       * 3.0.1      2020-04-20 [1]
##  tidyr        * 1.0.2      2020-01-24 [1]
##  tidyselect     1.0.0      2020-01-27 [1]
##  tidyverse    * 1.3.0      2019-11-21 [1]
##  usethis        1.6.1      2020-04-29 [1]
##  utf8           1.1.4      2018-05-24 [1]
##  vctrs          0.2.4      2020-03-10 [1]
##  withr          2.2.0      2020-04-20 [1]
##  xfun           0.13       2020-04-13 [1]
##  xml2           1.3.2      2020-04-23 [1]
##  xtable         1.8-4      2019-04-21 [1]
##  yaml           2.2.1      2020-02-01 [1]
##  zip            2.0.4      2019-09-01 [1]
##  source                                     
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  Github (METRICS-CARPS/CARPSreports@3277f85)
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
##  CRAN (R 4.0.0)                             
## 
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library