articleID <- "4-1-2015_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Sean Zion, Mufan Luo" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Erica Yoon, Tom Hardwicke" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 500 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 220 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("11/5/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("10/19/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("06/12/19", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
The authors conducted a repeated measures ANOVA using DFT as a repeated measure and judgement type as between-subjects factor. They report a significant main effect of DFT and a signficant interaction effect of DFT (Distance from the typical face) with judgment type (trustworthiness vss. attractiveness). They plot mean judgement for attractiveness and mean judgement for trustworthiness across the ten faces in figure 2.
We complemented our by-face analysis with a by-participant repeated measures analysis of variance (ANOVA) with DFT as a repeated measure and judgment type (trustworthiness vs. attractiveness) as a between-subjects factor. The observed effects supported the same conclusions as the by-face analysis. The main effect of DFT was significant, F(10, 37) = 4.05, p < .001, ηp2 = .52. More important, this main effect was qualified by a significant interaction, F(10, 37) = 5.95, p < .001, ηp2 = .62.
Fig.2
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(ggplot2)
library(psych)
library(corrplot)
library(car)
library(ez) #For ANOVA Models
library(afex) #to run aov_ez
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)
faces <- read.csv("data/Experiment _1_Rps _v2.csv")
faces$subid <- c(1:48) #adding a new column for subject ID
faces_tidy <- faces %>%
gather(key="DFT_Rating", value, 3:35) %>%
separate(DFT_Rating, c("dft", "value_trial"), sep="T_") %>%
separate(value_trial, c("dft_value", "trial_num"), sep="_") %>%
mutate(trial_num = paste("trial", trial_num, sep="")) %>%
mutate(dft_value = as.numeric(dft_value)) %>%
mutate(trust_attract = fct_recode(factor(Trust1Attrc0), "trust" = "1", "attractive" = "0")) %>%
select(-dft, -Trust1Attrc0)
faces_prepr <- faces_tidy %>%
spread(trial_num, value) %>%
transmute(subid, dft_value, trust_attract,
avg_value = rowMeans(select(., trial1:trial3))) %>%
mutate(trust_attract = factor(trust_attract),
dft_value = factor(dft_value),
subid = factor(subid))
#Mean and SD for ratings across judement type (trustworthy / attractiveness)
desc_stat <- faces_tidy %>%
group_by(trust_attract, dft_value) %>%
dplyr::summarise(mean = mean(value), sd = sd(value)) %>%
gather(stat, value, -trust_attract, -dft_value) %>%
unite(judg_stat, trust_attract, stat) %>%
spread(judg_stat, value)
print(desc_stat)
## # A tibble: 11 x 5
## dft_value attractive_mean attractive_sd trust_mean trust_sd
## <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 0 4.32 1.54 6.28 1.58
## 2 10 4.72 1.47 6.33 1.37
## 3 20 5.21 1.53 6.47 1.29
## 4 30 5.79 1.57 6.39 1.24
## 5 40 6.21 1.73 6.15 1.29
## 6 50 6.69 1.53 6.12 1.42
## 7 60 7.04 1.63 5.89 1.34
## 8 70 7.28 1.59 5.56 1.52
## 9 80 7.58 1.49 5.49 1.54
## 10 90 7.53 1.45 5.07 1.57
## 11 100 7.54 1.55 5.06 1.63
#Figure 2 Replication
faces_tidy %>%
mutate(dft_value = as.numeric(as.character(dft_value))) %>%
group_by(trust_attract, dft_value) %>%
dplyr::summarise(mean = mean(value), sd = sd(value)) %>%
mutate(ci_lower = mean - sd/sqrt(48),
ci_upper = mean + sd/sqrt(48)) %>%
ggplot(., aes(x = dft_value, y=mean, col=trust_attract)) +
geom_smooth(aes(group=trust_attract), se=F) +
geom_linerange(aes(ymin = ci_lower, ymax = ci_upper)) +
xlab('DFT Value') +
ylab('Mean Judgment') +
scale_y_continuous(limits= c(0.5, 9.5), breaks=seq(0,10,1)) +
scale_x_continuous(breaks=seq(0,100,10))
Replicated Figure 2: Mean trustworthiness and attractiveness judgments as a function of DFT. Error bars represent within-subjects standard errors (computed by sd / number of participants).
Original Figure 2
by-participant repeated measures analysis of variance (ANOVA) with DFT as a repeated measure and judgment type (trustworthiness vs. attractiveness) as a between-subjects factor. The observed effects supported the same conclusions as the by-face analysis. The main effect of DFT was significant, F(10, 37) = 4.05, p < .001, ηp2 = .52. More important, this main effect was qualified by a significant interaction, F(10, 37) = 5.95, p < .001, ηp2 = .62.
Our goal was to reproduce the analysis of the authors in experiment one. We conducted a by-participant repeated measures analysis of variance using the value of DFT as a repeated measure and judgment type (either trustworthiness or attractiveness) as a between-subjects factor. We tried several different ways of specifying the analysis.
#Model 1 - Repeated Measures ANOVA
model1 <- with(faces_prepr, aov(avg_value ~ (dft_value * trust_attract) + Error(subid / (dft_value))))
model1_summary <- summary(model1)
model1_summary
##
## Error: subid
## Df Sum Sq Mean Sq F value Pr(>F)
## trust_attract 1 28.5 28.50 2.428 0.126
## Residuals 46 540.0 11.74
##
## Error: subid:dft_value
## Df Sum Sq Mean Sq F value Pr(>F)
## dft_value 10 78.5 7.85 10.50 4.6e-16 ***
## dft_value:trust_attract 10 327.1 32.71 43.75 < 2e-16 ***
## Residuals 460 343.9 0.75
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Model 2 - Repeated Measures ANOVA
model2 <- ezANOVA(data = faces_prepr, dv = .(avg_value), wid = .(subid), within = .(dft_value), between = .(trust_attract), detailed = TRUE, return_aov=T)
model2$ANOVA
## Effect DFn DFd SSn SSd F p
## 1 (Intercept) 1 46 19800.08418 539.9832 1686.72642 6.705597e-38
## 2 trust_attract 1 46 28.49832 539.9832 2.42771 1.260611e-01
## 3 dft_value 10 460 78.48064 343.8965 10.49766 4.597896e-16
## 4 trust_attract:dft_value 10 460 327.05724 343.8965 43.74757 1.213988e-60
## p<.05 ges
## 1 * 0.95726740
## 2 0.03123521
## 3 * 0.08155017
## 4 * 0.27008612
model3 <- aov_ez(data = faces_prepr,
id = "subid",
between = "trust_attract",
dv = "avg_value",
within = c("dft_value"),
anova_table = list(correction = 'none', es = 'pes'))
print(model3)
## Anova Table (Type 3 tests)
##
## Response: avg_value
## Effect df MSE F pes p.value
## 1 trust_attract 1, 46 11.74 2.43 .050 .126
## 2 dft_value 10, 460 0.75 10.50 *** .186 <.001
## 3 trust_attract:dft_value 10, 460 0.75 43.75 *** .487 <.001
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
# TOM SANITY CHECK - CONCURS WITH OUR PREVIOUS EFFORTS
faces_forAOV <- faces_tidy %>% mutate(subid = as.factor(subid), dft_value = as.factor(dft_value)) %>% group_by(subid, dft_value, trust_attract) %>% summarize(avg = mean(value))
thisAov <- aov(avg ~ trust_attract*dft_value + Error(subid/dft_value), data=faces_forAOV)
summary(thisAov)
##
## Error: subid
## Df Sum Sq Mean Sq F value Pr(>F)
## trust_attract 1 28.5 28.50 2.428 0.126
## Residuals 46 540.0 11.74
##
## Error: subid:dft_value
## Df Sum Sq Mean Sq F value Pr(>F)
## dft_value 10 78.5 7.85 10.50 4.6e-16 ***
## trust_attract:dft_value 10 327.1 32.71 43.75 < 2e-16 ***
## Residuals 460 343.9 0.75
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
All models concur with each other but don’t match the reported outcomes.
At this stage we contacted the original authors for assistance. They correctly pointed out that we had missed an important footnote (footnote 2) which read: The sphericity assumption for this analysis and the corresponding analysis in Experiments 2 and 3 was not met. We report multivariate test results, as recommended by Maxwell and Delaney (2004), because these tests are more optimal than correcting for sphericity. The pattern of results did not differ depending on whether we used either a multivariate test or sphericity correction." They also sent SPSS syntax and a screenshot of the output - which demonstrates successful reproduction of the target outcomes (see below):
GLM DFT_100 DFT_90 DFT_80 DFT_70 DFT_60 DFT_50 DFT_40 DFT_30 DFT_20 DFT_10 DFT_00 BY Trust1Attrc0 /WSFACTOR=factor1 11 Polynomial /METHOD=SSTYPE(3) /PRINT=ETASQ /CRITERIA=ALPHA(.05) /WSDESIGN=factor1 /DESIGN=Trust1Attrc0.
Unfortunately at this time our team does not have access to SPSS nor the expertise to implement these multivariate tests in R - the analysis appears to beyond our operational definition of a ‘reasonably straightforward analysis’. We have decided to classify this case as a ‘reproducible with author assistance’ - but it should be noted that we have not verified this in an independent analysis.
# Main effect of DFT:
#Output - Main Effect of DFT
reportObject <- reproCheck(reportedValue = "4.05", obtainedValue = 4.06, valueType = 'F')
## [1] "MINOR_ERROR for F. The reported value (4.05) and the obtained value (4.06) differed by 0.25%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
#Output - num df
reportObject <- reproCheck(reportedValue = "10", obtainedValue = 10, valueType = 'df')
## [1] "MATCH for df. The reported value (10) and the obtained value (10) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
#Output - den df
reportObject <- reproCheck(reportedValue = "37", obtainedValue = 37, valueType = 'df')
## [1] "MATCH for df. The reported value (37) and the obtained value (37) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
#Output - p value
reportObject <- reproCheck(reportedValue = "<.001", obtainedValue = "<.001", valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
#Output - pes value
reportObject <- reproCheck(reportedValue = ".52", obtainedValue = .523, valueType = 'pes')
## [1] "MATCH for pes. The reported value (0.52) and the obtained value (0.52) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# Interaction:
#Output - Interaction Effect of DFT with Judgment Type
reportObject <- reproCheck(reportedValue = "5.95", obtainedValue = 5.95, valueType = 'F')
## [1] "MATCH for F. The reported value (5.95) and the obtained value (5.95) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
#Output - num df
reportObject <- reproCheck(reportedValue = "10", obtainedValue = 10, valueType = 'df')
## [1] "MATCH for df. The reported value (10) and the obtained value (10) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
#Output - den df
reportObject <- reproCheck(reportedValue = "37", obtainedValue = 37, valueType = 'df')
## [1] "MATCH for df. The reported value (37) and the obtained value (37) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
#Output - p value
reportObject <- reproCheck(reportedValue = "<.001", obtainedValue = "<.001", valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
#Output - pes value
reportObject <- reproCheck(reportedValue = ".62", obtainedValue = .617, valueType = 'pes')
## [1] "MATCH for pes. The reported value (0.62) and the obtained value (0.62) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
We initially could not reproduce some degrees of freedom and F-values despite trying multiple different model specifications. We contacted the authors for assistance and recieved a reply. They correctly pointed out that we had missed an important footnote (footnote 2) which read: “The sphericity assumption for this analysis and the corresponding analysis in Experiments 2 and 3 was not met. We report multivariate test results, as recommended by Maxwell and Delaney (2004), because these tests are more optimal than correcting for sphericity. The pattern of results did not differ depending on whether we used either a multivariate test or sphericity correction.” They also sent SPSS syntax and a screenshot of the output - which demonstrated successful reproduction of the target outcomes.
Unfortunately at this time our team does not have access to SPSS nor the expertise to implement these multivariate tests in R - the analysis appears to be beyond our operational definition of a ‘reasonably straightforward analysis’. We have decided to classify this case as a ‘reproducible with author assistance’ - but it should be noted that we have not verified this in an independent analysis.
Author_Assistance = TRUE # was author assistance provided? (if so, enter TRUE)
Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?
# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).
locus_typo <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- 0 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- 1 # how many discrete issues were there for which you could not identify the cause
# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- 0 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- 1 # how many discrete issues were there for which you could not identify the cause
Affects_Conclusion <- FALSE # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
filter(dummyRow == FALSE) %>% # remove the dummy row
select(-dummyRow) %>% # remove dummy row designation
mutate(articleID = articleID) %>% # add the articleID
select(articleID, everything()) # make articleID first column
# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
finalOutcome <- "Failure without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Failure despite author assistance"
}
}else{
finalOutcome <- "Success without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Success with author assistance"
}
}
# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)
# save report objects
if(reportType == "pilot"){
write_csv(reportObject, "pilotReportDetailed.csv")
write_csv(reportExtras, "pilotReportExtras.csv")
}
if(reportType == "final"){
write_csv(reportObject, "finalReportDetailed.csv")
write_csv(reportExtras, "finalReportExtras.csv")
}
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.0 (2020-04-24)
## os macOS Catalina 10.15.4
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/London
## date 2020-05-11
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## abind 1.4-5 2016-07-21 [1]
## afex * 0.27-2 2020-03-28 [1]
## assertthat 0.2.1 2019-03-21 [1]
## backports 1.1.6 2020-04-05 [1]
## boot 1.3-24 2019-12-20 [1]
## broom 0.5.6 2020-04-20 [1]
## callr 3.4.3 2020-03-28 [1]
## car * 3.0-7 2020-03-11 [1]
## carData * 3.0-3 2019-11-16 [1]
## cellranger 1.1.0 2016-07-27 [1]
## cli 2.0.2 2020-02-28 [1]
## colorspace 1.4-1 2019-03-18 [1]
## corrplot * 0.84 2017-10-16 [1]
## crayon 1.3.4 2017-09-16 [1]
## curl 4.3 2019-12-02 [1]
## data.table 1.12.8 2019-12-09 [1]
## DBI 1.1.0 2019-12-15 [1]
## dbplyr 1.4.3 2020-04-19 [1]
## desc 1.2.0 2018-05-01 [1]
## devtools 2.3.0 2020-04-10 [1]
## digest 0.6.25 2020-02-23 [1]
## dplyr * 0.8.5 2020-03-07 [1]
## ellipsis 0.3.0 2019-09-20 [1]
## emmeans 1.4.6 2020-04-19 [1]
## estimability 1.3 2018-02-11 [1]
## evaluate 0.14 2019-05-28 [1]
## ez * 4.4-0 2016-11-02 [1]
## fansi 0.4.1 2020-01-08 [1]
## farver 2.0.3 2020-01-16 [1]
## forcats * 0.5.0 2020-03-01 [1]
## foreign 0.8-78 2020-04-13 [1]
## fs 1.4.1 2020-04-04 [1]
## generics 0.0.2 2018-11-29 [1]
## ggplot2 * 3.3.0 2020-03-05 [1]
## glue 1.4.0 2020-04-03 [1]
## gtable 0.3.0 2019-03-25 [1]
## haven * 2.2.0 2019-11-08 [1]
## hms 0.5.3 2020-01-08 [1]
## htmltools 0.4.0 2019-10-04 [1]
## httr 1.4.1 2019-08-05 [1]
## jsonlite 1.6.1 2020-02-02 [1]
## knitr * 1.28 2020-02-06 [1]
## lattice 0.20-41 2020-04-02 [1]
## lifecycle 0.2.0 2020-03-06 [1]
## lme4 * 1.1-23 2020-04-07 [1]
## lmerTest 3.1-2 2020-04-08 [1]
## lubridate 1.7.8 2020-04-06 [1]
## magrittr 1.5 2014-11-22 [1]
## MASS 7.3-51.5 2019-12-20 [1]
## Matrix * 1.2-18 2019-11-27 [1]
## memoise 1.1.0 2017-04-21 [1]
## mgcv 1.8-31 2019-11-09 [1]
## minqa 1.2.4 2014-10-09 [1]
## mnormt 1.5-6 2020-02-03 [1]
## modelr 0.1.7 2020-04-30 [1]
## munsell 0.5.0 2018-06-12 [1]
## mvtnorm 1.1-0 2020-02-24 [1]
## nlme 3.1-147 2020-04-13 [1]
## nloptr 1.2.2.1 2020-03-11 [1]
## numDeriv 2016.8-1.1 2019-06-06 [1]
## openxlsx 4.1.4 2019-12-06 [1]
## pillar 1.4.4 2020-05-05 [1]
## pkgbuild 1.0.7 2020-04-25 [1]
## pkgconfig 2.0.3 2019-09-22 [1]
## pkgload 1.0.2 2018-10-29 [1]
## plyr 1.8.6 2020-03-03 [1]
## prettyunits 1.1.1 2020-01-24 [1]
## processx 3.4.2 2020-02-09 [1]
## ps 1.3.2 2020-02-13 [1]
## psych * 1.9.12.31 2020-01-08 [1]
## purrr * 0.3.4 2020-04-17 [1]
## R6 2.4.1 2019-11-12 [1]
## Rcpp 1.0.4.6 2020-04-09 [1]
## readr * 1.3.1 2018-12-21 [1]
## readxl * 1.3.1 2019-03-13 [1]
## remotes 2.1.1 2020-02-15 [1]
## reprex 0.3.0 2019-05-16 [1]
## ReproReports * 0.1 2020-05-06 [1]
## reshape2 1.4.4 2020-04-09 [1]
## rio 0.5.16 2018-11-26 [1]
## rlang 0.4.6 2020-05-02 [1]
## rmarkdown 2.1 2020-01-20 [1]
## rprojroot 1.3-2 2018-01-03 [1]
## rstudioapi 0.11 2020-02-07 [1]
## rvest 0.3.5 2019-11-08 [1]
## scales 1.1.0 2019-11-18 [1]
## sessioninfo 1.1.1 2018-11-05 [1]
## statmod 1.4.34 2020-02-17 [1]
## stringi 1.4.6 2020-02-17 [1]
## stringr * 1.4.0 2019-02-10 [1]
## testthat 2.3.2 2020-03-02 [1]
## tibble * 3.0.1 2020-04-20 [1]
## tidyr * 1.0.2 2020-01-24 [1]
## tidyselect 1.0.0 2020-01-27 [1]
## tidyverse * 1.3.0 2019-11-21 [1]
## usethis 1.6.1 2020-04-29 [1]
## utf8 1.1.4 2018-05-24 [1]
## vctrs 0.2.4 2020-03-10 [1]
## withr 2.2.0 2020-04-20 [1]
## xfun 0.13 2020-04-13 [1]
## xml2 1.3.2 2020-04-23 [1]
## xtable 1.8-4 2019-04-21 [1]
## yaml 2.2.1 2020-02-01 [1]
## zip 2.0.4 2019-09-01 [1]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (METRICS-CARPS/CARPSreports@3277f85)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library