articleID <- "9-2-2015_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Dawn Finzi, Kiara Sanchez" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Tom Hardwicke" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 227 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 60 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("11/04/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("05/09/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("05/09/18", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
The authors tested 20 participants on a working memory test with 160 trials. In 155 of these trials, participants had to identify the location of a target letter. However, on one surprise trial (the 156th trial), there was a surprise memory test about the target’s identity and color before they were asked about the location of the target letter. After this, there were four control trials in the same format as the surprise trial.
The target outcomes were: location accuracy on the presurprise trials, color accuracy, identity accuracy and location accuracy on the suprise trial, color accuracy, identity accuracy and location accuracy on the first control trial, and color accuracy, identity accuracy and location accuracy on the final three control trials combined. Additionally, two chi-square tests comparing color accuracy on the surprise trial and the first control trial, and identity accuracy on the surprise trial and the first control trial.
From the article: “On the presurprise trials, 89% of responses in the location task were correct, which indicates that participants could easily locate the target by using the critical attribute. To analyze the data from the surprise trial, we first divided participants into two groups defined by the order of the surprise tasks (identity task first vs. color task first). We found that the results were almost the same in these two groups. Accordingly, we combined the data for these groups in the analyses reported here. Only 6 of 20 (30%) participants correctly reported the color of the target letter, which is not much better than chance level of 25% (because there were four choices). Furthermore, performance on the identity task (25% correct) was exactly at chance level. These results demonstrate that participants were not capable of reporting a task-relevant attribute of a stimulus that had reached awareness less than 1 s before (i.e., attribute amnesia). Moreover, in the surprise trial, participants’ performance on the location task, unlike their performance on the color and identity tasks, was good (80% correct), and in fact was approximately as good as their performance on the location task in the presurprise trials (89% correct). This indicates that the poor performance on the color and identity tasks was not induced by the surprise test itself; it more likely reflects participants’ failure to remember these attributes. Participants exhibited a dramatic increase in reporting accuracy for the target letter’s color (70% correct) and identity (75% correct) on the first control trial (i.e., the trial immediately after the surprise trial). The improvement in each case was significant—color: 70% versus 30%, χ2(1, N = 40) = 6.40, p = .011, ϕ = .40; identity: 75% versus 25%, χ2(1, N = 40) = 10.00, p < .005, ϕ = .50. Performance on these two tasks remained constant on the final three control trials (color: 75%, 70%, and 80% correct; identity: 75%, 80%, and 75% correct). Participants’ performance on the location task was almost the same on the surprise trial (80% correct) as on the control trials (80%, 85%, 80%, and 70% correct). These results indicate a crucial role for expectation in controlling participants’ ability to report the attributes of a consciously perceived object. Therefore, Experiment 1a showed that when participants did not expect to report a particular attribute of an attended object, they were incapable of doing so, even when that same attribute had reached awareness immediately prior to the test.”
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(psych) # to determine phi value
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)
d <- read.csv("data/materials-9859-Top-level_materials/12022-Exp1.csv", header=FALSE)
d.tidy <- d %>%
select(V1,V3,V7,V10,V11,V12)
# rename variables
d.tidy <- d.tidy %>%
rename(subject_id = V1,
trial_num = V3,
color_accuracy = V10,
identity_accuracy = V11,
location_accuracy = V12)
# code for trial type
d.tidy <- d.tidy %>%
mutate(trial_type = ifelse(trial_num < 156, "no_surprise", ifelse(trial_num == 156, "surprise", ifelse(trial_num == 157, "first_control", ifelse(trial_num == 158, "second_control", ifelse(trial_num == 159, "third_control", "fourth_control"))))))
# trial number no longer neccesary
d.tidy <- d.tidy %>%
select(subject_id, trial_type, color_accuracy, identity_accuracy, location_accuracy)
# make data tidy using gather
d.tidy <- d.tidy %>%
gather(probe, accuracy, color_accuracy, identity_accuracy, location_accuracy)
reportedValues <- data.frame("Reported_Average" = c(.7,.75,.80,.80,.75,.70,"NA","NA",.89,.75,.75,.85,.30,.25,.80,.70,.80,.80))
accuracies <- d.tidy %>%
group_by(trial_type, probe) %>%
summarise(Obtained_Average = round((mean(accuracy)), digits = 2))
allAccuracies <- bind_cols(accuracies, reportedValues)
knitr::kable(allAccuracies, caption = "Accuracy")
| trial_type | probe | Obtained_Average | Reported_Average |
|---|---|---|---|
| first_control | color_accuracy | 0.70 | 0.7 |
| first_control | identity_accuracy | 0.75 | 0.75 |
| first_control | location_accuracy | 0.80 | 0.8 |
| fourth_control | color_accuracy | 0.80 | 0.8 |
| fourth_control | identity_accuracy | 0.75 | 0.75 |
| fourth_control | location_accuracy | 0.70 | 0.7 |
| no_surprise | color_accuracy | 0.00 | NA |
| no_surprise | identity_accuracy | 0.00 | NA |
| no_surprise | location_accuracy | 0.89 | 0.89 |
| second_control | color_accuracy | 0.75 | 0.75 |
| second_control | identity_accuracy | 0.75 | 0.75 |
| second_control | location_accuracy | 0.85 | 0.85 |
| surprise | color_accuracy | 0.30 | 0.3 |
| surprise | identity_accuracy | 0.25 | 0.25 |
| surprise | location_accuracy | 0.80 | 0.8 |
| third_control | color_accuracy | 0.70 | 0.7 |
| third_control | identity_accuracy | 0.80 | 0.8 |
| third_control | location_accuracy | 0.80 | 0.8 |
Everything seems to match. Let’s compare and record all of these values.
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "color_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "color_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.7) and the obtained value (0.7) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "identity_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "identity_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.75) and the obtained value (0.75) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "location_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "location_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.8) and the obtained value (0.8) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "fourth_control", probe == "color_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "fourth_control", probe == "color_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.8) and the obtained value (0.8) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "fourth_control", probe == "identity_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "fourth_control", probe == "identity_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.75) and the obtained value (0.75) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "fourth_control", probe == "location_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "fourth_control", probe == "location_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.7) and the obtained value (0.7) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "no_surprise", probe == "location_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "no_surprise", probe == "location_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.89) and the obtained value (0.89) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "second_control", probe == "color_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "second_control", probe == "color_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.75) and the obtained value (0.75) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "second_control", probe == "identity_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "second_control", probe == "identity_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.75) and the obtained value (0.75) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "second_control", probe == "location_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "second_control", probe == "location_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.85) and the obtained value (0.85) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "surprise", probe == "color_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "surprise", probe == "color_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.3) and the obtained value (0.3) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "surprise", probe == "identity_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "surprise", probe == "identity_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.25) and the obtained value (0.25) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "surprise", probe == "location_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "surprise", probe == "location_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.8) and the obtained value (0.8) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "third_control", probe == "color_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "third_control", probe == "color_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.7) and the obtained value (0.7) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "first_control", probe == "identity_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "third_control", probe == "identity_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.8) and the obtained value (0.8) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = allAccuracies %>% filter(trial_type == "third_control", probe == "location_accuracy") %>% pull(Obtained_Average),
reportedValue = allAccuracies %>% filter(trial_type == "third_control", probe == "location_accuracy") %>% pull(Reported_Average) %>% as.character(),
valueType = "mean")
## [1] "MATCH for mean. The reported value (0.8) and the obtained value (0.8) differed by 0%. Note that the obtained value was rounded to 1 decimal places to match the reported value."
# create a counts variable for chi square testing
counts <- d.tidy %>%
group_by(trial_type, probe) %>%
summarise(accurate = sum(accuracy==1), inaccurate = sum(accuracy==0))
# first chi square test - color accuracy between surprise trial and first control trial
# reported: χ2(1, N = 40) = 6.40, p = .011, ϕ = .40
color <- counts %>%
filter(probe == "color_accuracy") %>%
filter(trial_type == "surprise" | trial_type == "first_control")
chi.out <- chisq.test(color[,3:4], correct=FALSE) # had to turn off Yates’ continuity correction.
phi.out <- abs(phi(color[,3:4]))
reportObject <- reproCheck(obtainedValue = chi.out$parameter,
reportedValue = "1",
valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = sum(chi.out$observed),
reportedValue = "40",
valueType = "n")
## [1] "MATCH for n. The reported value (40) and the obtained value (40) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = chi.out$statistic,
reportedValue = "6.40",
valueType = "x2")
## [1] "MATCH for x2. The reported value (6.4) and the obtained value (6.4) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = chi.out$p.value,
reportedValue = ".011",
valueType = "p")
## [1] "MATCH for p. The reported value (0.011) and the obtained value (0.011) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = phi.out,
reportedValue = ".40",
valueType = "phi")
## [1] "MATCH for phi. The reported value (0.4) and the obtained value (0.4) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# second chi square test - identity accuracy between surprise trial and first control trial
# reported: χ2(1, N = 40) = 10.00, p < .005, ϕ = .50
identity <- counts %>%
filter(probe == "identity_accuracy") %>%
filter(trial_type == "surprise" | trial_type == "first_control")
chi.out <- chisq.test(identity[,3:4], correct=FALSE)
phi.out <- abs(phi(identity[,3:4]))
reportObject <- reproCheck(obtainedValue = chi.out$parameter,
reportedValue = "1",
valueType = "df")
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = sum(chi.out$observed),
reportedValue = "40",
valueType = "n")
## [1] "MATCH for n. The reported value (40) and the obtained value (40) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = chi.out$statistic,
reportedValue = "10.00",
valueType = "x2")
## [1] "MATCH for x2. The reported value (10) and the obtained value (10) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(obtainedValue = chi.out$p.value,
reportedValue = "<.005",
valueType = "p",
eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(obtainedValue = phi.out,
reportedValue = ".50",
valueType = "phi")
## [1] "MATCH for phi. The reported value (0.5) and the obtained value (0.5) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
This reproducibility check was a success, with every finding in the target outcomes able to be reproduced from the data provided.
Author_Assistance = FALSE # was author assistance provided? (if so, enter TRUE)
Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?
# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).
locus_typo <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- NA # how many discrete issues were there for which you could not identify the cause
# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- NA # how many discrete issues were there for which you could not identify the cause
Affects_Conclusion <- NA # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
filter(dummyRow == FALSE) %>% # remove the dummy row
select(-dummyRow) %>% # remove dummy row designation
mutate(articleID = articleID) %>% # add the articleID
select(articleID, everything()) # make articleID first column
# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
finalOutcome <- "Failure without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Failure despite author assistance"
}
}else{
finalOutcome <- "Success without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Success with author assistance"
}
}
# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)
# save report objects
if(reportType == "pilot"){
write_csv(reportObject, "pilotReportDetailed.csv")
write_csv(reportExtras, "pilotReportExtras.csv")
}
if(reportType == "final"){
write_csv(reportObject, "finalReportDetailed.csv")
write_csv(reportExtras, "finalReportExtras.csv")
}
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.0 (2020-04-24)
## os macOS Catalina 10.15.4
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/London
## date 2020-05-06
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## assertthat 0.2.1 2019-03-21 [1]
## backports 1.1.6 2020-04-05 [1]
## broom 0.5.6 2020-04-20 [1]
## callr 3.4.3 2020-03-28 [1]
## cellranger 1.1.0 2016-07-27 [1]
## cli 2.0.2 2020-02-28 [1]
## colorspace 1.4-1 2019-03-18 [1]
## crayon 1.3.4 2017-09-16 [1]
## DBI 1.1.0 2019-12-15 [1]
## dbplyr 1.4.3 2020-04-19 [1]
## desc 1.2.0 2018-05-01 [1]
## devtools 2.3.0 2020-04-10 [1]
## digest 0.6.25 2020-02-23 [1]
## dplyr * 0.8.5 2020-03-07 [1]
## ellipsis 0.3.0 2019-09-20 [1]
## evaluate 0.14 2019-05-28 [1]
## fansi 0.4.1 2020-01-08 [1]
## forcats * 0.5.0 2020-03-01 [1]
## fs 1.4.1 2020-04-04 [1]
## generics 0.0.2 2018-11-29 [1]
## ggplot2 * 3.3.0 2020-03-05 [1]
## glue 1.4.0 2020-04-03 [1]
## gtable 0.3.0 2019-03-25 [1]
## haven * 2.2.0 2019-11-08 [1]
## highr 0.8 2019-03-20 [1]
## hms 0.5.3 2020-01-08 [1]
## htmltools 0.4.0 2019-10-04 [1]
## httr 1.4.1 2019-08-05 [1]
## jsonlite 1.6.1 2020-02-02 [1]
## knitr * 1.28 2020-02-06 [1]
## lattice 0.20-41 2020-04-02 [1]
## lifecycle 0.2.0 2020-03-06 [1]
## lubridate 1.7.8 2020-04-06 [1]
## magrittr 1.5 2014-11-22 [1]
## memoise 1.1.0 2017-04-21 [1]
## mnormt 1.5-6 2020-02-03 [1]
## modelr 0.1.7 2020-04-30 [1]
## munsell 0.5.0 2018-06-12 [1]
## nlme 3.1-147 2020-04-13 [1]
## pillar 1.4.4 2020-05-05 [1]
## pkgbuild 1.0.7 2020-04-25 [1]
## pkgconfig 2.0.3 2019-09-22 [1]
## pkgload 1.0.2 2018-10-29 [1]
## prettyunits 1.1.1 2020-01-24 [1]
## processx 3.4.2 2020-02-09 [1]
## ps 1.3.2 2020-02-13 [1]
## psych * 1.9.12.31 2020-01-08 [1]
## purrr * 0.3.4 2020-04-17 [1]
## R6 2.4.1 2019-11-12 [1]
## Rcpp 1.0.4.6 2020-04-09 [1]
## readr * 1.3.1 2018-12-21 [1]
## readxl * 1.3.1 2019-03-13 [1]
## remotes 2.1.1 2020-02-15 [1]
## reprex 0.3.0 2019-05-16 [1]
## ReproReports * 0.1 2020-05-06 [1]
## rlang 0.4.6 2020-05-02 [1]
## rmarkdown 2.1 2020-01-20 [1]
## rprojroot 1.3-2 2018-01-03 [1]
## rstudioapi 0.11 2020-02-07 [1]
## rvest 0.3.5 2019-11-08 [1]
## scales 1.1.0 2019-11-18 [1]
## sessioninfo 1.1.1 2018-11-05 [1]
## stringi 1.4.6 2020-02-17 [1]
## stringr * 1.4.0 2019-02-10 [1]
## testthat 2.3.2 2020-03-02 [1]
## tibble * 3.0.1 2020-04-20 [1]
## tidyr * 1.0.2 2020-01-24 [1]
## tidyselect 1.0.0 2020-01-27 [1]
## tidyverse * 1.3.0 2019-11-21 [1]
## usethis 1.6.1 2020-04-29 [1]
## vctrs 0.2.4 2020-03-10 [1]
## withr 2.2.0 2020-04-20 [1]
## xfun 0.13 2020-04-13 [1]
## xml2 1.3.2 2020-04-23 [1]
## yaml 2.2.1 2020-02-01 [1]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (METRICS-CARPS/CARPSreports@3277f85)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library