articleID <- "9-5-2014_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Kari Leibowitz, Yochai Shavit" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Kyle MacDonald, Tom Hardwicke" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 300 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 450 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("11/1/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("6/15/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("09/07/18", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
In the present study, there were four age groups: 3-4 year olds (n=37), 5-6 year olds(n=50), 7-10 year olds (n=54) - tested in lab or at a local museum - and adults (n=99, tested online). Cogsdill et al. presented participants with pairs of faces that were pre-determined to be high (+3 s.d) or low (-3 s.d) in three traits: trustworthiness (measured as nice vs. mean), competence (measured as smart vs. not smart), or dominance (measured as strong vs. not strong). For each trait, 3 “high on trait” and 3 “low on trait” faces were selected and each pair consisted of one face that was a “high on trait” face and the other a “low on trait” face. All 9 possible iterations were presented to participants in a counter balanced order across traits. The authors assessed the ability of each age group’s members to accurately identify which of two faces was high in a particular trait, by asking participants to indicate which of these two people is very nice/strong/smart. The authors were interested in the consistency of judgements across age groups in terms of the ability to “correctly” identify traits of faces based on a pre-determined consensus.
For this article we focused on the findings reported in the results section of Experiment 1.
Specifically, we attempted to reproduce all descriptive and inferential analyses reported in the text below and associated tables/figures (broken down by procedure):
Figures 2, 3, and 4 summarize results for all age groups and traits; higher percentages of expected responses (i.e., those predicted on the basis of prior data–e.g., that trustworthy faces would be identified as nice and untrustworthy faces as mean) indicate stronger consensus. Combined, all four age groups showed significant consensus compared with chance (50%) when identifying faces as mean or nice (93%; Fig. 2), strong or not strong (85%; Fig. 3), and smart or not smart (76%; Fig. 4). Critically, all age groups attributed all three traits with significant consensus, ps < .001, ds > 1.08. However, an analysis of variance (ANOVA) revealed a significant main effect of age group, F(3, 236) = 17.91, p < .001. Although 3- to 4-year-olds responded with robust and adult like consensus (72% across all traits), they were less consistent than 5- to 6-year-olds (81%), 7- to 10-year-olds (88%), and adults (89%). One-way ANOVAs followed by post hoc tests with Sidak corrections for multiple comparisons were used to analyze age differences for each trait. These analyses revealed that when attributing both trustworthiness and dominance, 3- to 4-year-olds were less consistent than all other age groups (all ps < .01, ds > 0.59), which exhibited equivalent consistency (all ps > .23, ds < 0.40).
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(effsize) #used to calculate effect size
library(car)
library(lsr) # using this to get effect sizes
library(broom)
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA,
percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)
d <- read_xlsx("DATA/Cogsdill_FaceTrait_Experiment1Public_04Feb14.xlsx")
Clean up column names and gather the ratings for each age group.
colnames(d) <- colnames(d) %>%
str_replace(pattern = " ", "_") %>%
str_to_lower()
d_tidy <- d %>% gather(key = attribute, value = rating, nice:smart)
Compute summary statisics for each age group and attribute.
ms <- d_tidy %>%
group_by(age_group, attribute) %>%
summarise(m = mean(rating),
stdev = sd(rating),
n = n(),
sem = stdev / sqrt(n))
ms %>% kable(digits = 2)
| age_group | attribute | m | stdev | n | sem |
|---|---|---|---|---|---|
| 3-4 year olds | nice | 0.82 | 0.23 | 37 | 0.04 |
| 3-4 year olds | smart | 0.68 | 0.28 | 37 | 0.05 |
| 3-4 year olds | strong | 0.65 | 0.28 | 37 | 0.05 |
| 5-6 year olds | nice | 0.93 | 0.12 | 50 | 0.02 |
| 5-6 year olds | smart | 0.66 | 0.31 | 50 | 0.04 |
| 5-6 year olds | strong | 0.85 | 0.19 | 50 | 0.03 |
| 7-10 year olds | nice | 0.97 | 0.12 | 54 | 0.02 |
| 7-10 year olds | smart | 0.84 | 0.22 | 54 | 0.03 |
| 7-10 year olds | strong | 0.84 | 0.22 | 54 | 0.03 |
| Adults | nice | 0.95 | 0.14 | 99 | 0.01 |
| Adults | smart | 0.80 | 0.23 | 99 | 0.02 |
| Adults | strong | 0.92 | 0.16 | 99 | 0.02 |
ms %>%
filter(attribute == "nice") %>%
ggplot(aes(x = age_group, y = m)) +
geom_bar(stat = "identity", fill="grey", color="black", width = 0.5) +
geom_errorbar(aes(ymin = m - sem, ymax= m + sem), width=0.2)+
ylim(0,1)+
theme_bw()+
labs(x="Age Group", y="Percent expected responses (Trustworthy='Nice')")+
ggtitle("figure 2 reproduced: 'nice' by Age group")
Add eyeball repro checks for the mean values in Figure 2.
grp34 <- ms %>% filter(attribute == 'nice', age_group == '3-4 year olds') %>% pull(m)
grp34_sem <- ms %>% filter(attribute == 'nice', age_group == '3-4 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp34, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp34_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grp56 <- ms %>% filter(attribute == 'nice', age_group == '5-6 year olds') %>% pull(m)
grp56_sem <- ms %>% filter(attribute == 'nice', age_group == '5-6 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp56, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp56_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grp710 <- ms %>% filter(attribute == 'nice', age_group == '7-10 year olds') %>% pull(m)
grp710_sem <- ms %>% filter(attribute == 'nice', age_group == '7-10 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp710, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp710_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grpAdult <- ms %>% filter(attribute == 'nice', age_group == 'Adults') %>% pull(m)
grpAdult_sem <- ms %>% filter(attribute == 'nice', age_group == 'Adults') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grpAdult, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grpAdult_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
Reproducing this finding:
“Combined, all four age groups showed significant consensus compared with chance (50%) when identifying faces as mean or nice (93%; Fig. 2)… Cogsdill et al., page 1134”
mean_nice <- d_tidy %>% filter(attribute == "nice") %>% pull(rating) %>% mean()
reportObject <- reproCheck(reportedValue = "0.93", obtainedValue = mean_nice, valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.93) and the obtained value (0.93) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
ms %>%
filter(attribute == "strong") %>%
ggplot(aes(x = age_group, y = m)) +
geom_bar(stat = "identity", fill="grey", color="black", width = 0.5) +
geom_errorbar(aes(ymin = m - sem, ymax = m + sem), width=0.2)+
ylim(0,1)+
theme_bw()+
labs(x="Age Group", y="Percent expected responses (Dominant='Nice')")+
ggtitle("figure 2 reproduced: 'strong' by Age group")
Add eyeball repro checks for the mean values in Figure 3.
grp34 <- ms %>% filter(attribute == 'strong', age_group == '3-4 year olds') %>% pull(m)
grp34_sem <- ms %>% filter(attribute == 'strong', age_group == '3-4 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp34, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp34_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grp56 <- ms %>% filter(attribute == 'strong', age_group == '5-6 year olds') %>% pull(m)
grp56_sem <- ms %>% filter(attribute == 'strong', age_group == '5-6 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp56, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp56_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grp710 <- ms %>% filter(attribute == 'strong', age_group == '7-10 year olds') %>% pull(m)
grp710_sem <- ms %>% filter(attribute == 'strong', age_group == '7-10 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp710, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp710_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grpAdult <- ms %>% filter(attribute == 'strong', age_group == 'Adults') %>% pull(m)
grpAdult_sem <- ms %>% filter(attribute == 'strong', age_group == 'Adults') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grpAdult, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grpAdult_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
Next, we try to reproduce this finding:
“Combined, all four age groups showed significant consensus compared with chance (50%) when identifying faces as…strong or not strong (85%; Fig. 3)… Cogsdill et al., page 1134”
mean_strong <- d_tidy %>% filter(attribute == "strong") %>% pull(rating) %>% mean()
reportObject <- reproCheck(reportedValue = "0.85", obtainedValue = mean_strong, valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.85) and the obtained value (0.85) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
ms %>%
filter(attribute == "smart") %>%
ggplot(aes(x = age_group, y = m)) +
geom_bar(stat = "identity", fill="grey", color="black", width = 0.5) +
geom_errorbar(aes(ymin = m - sem, ymax = m + sem), width=0.2)+
ylim(0,1)+
theme_bw()+
labs(x="Age Group", y="Percent expected responses (Competent='Smart')")+
ggtitle("figure 2 reproduced: 'smart' by Age group")
Add eyeball repro checks for the mean values in Figure 3.
grp34 <- ms %>% filter(attribute == 'smart', age_group == '3-4 year olds') %>% pull(m)
grp34_sem <- ms %>% filter(attribute == 'smart', age_group == '3-4 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp34, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp34_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grp56 <- ms %>% filter(attribute == 'smart', age_group == '5-6 year olds') %>% pull(m)
grp56_sem <- ms %>% filter(attribute == 'smart', age_group == '5-6 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp56, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp56_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grp710 <- ms %>% filter(attribute == 'smart', age_group == '7-10 year olds') %>% pull(m)
grp710_sem <- ms %>% filter(attribute == 'smart', age_group == '7-10 year olds') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp710, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grp710_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
grpAdult <- ms %>% filter(attribute == 'smart', age_group == 'Adults') %>% pull(m)
grpAdult_sem <- ms %>% filter(attribute == 'smart', age_group == 'Adults') %>% pull(sem)
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grpAdult, valueType = 'mean', eyeballCheck = TRUE)
## [1] "MATCH for mean. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "fig", obtainedValue = grpAdult_sem, valueType = 'se', eyeballCheck = TRUE)
## [1] "MATCH for se. Eyeball comparison only."
Next, we try to reproduce this finding:
“Combined, all four age groups showed significant consensus compared with chance (50%) when identifying faces as…smart or not smart (76%; Fig. 4). Cogsdill et al., page 1134”
mean_smart <- d_tidy %>% filter(attribute == "smart") %>% pull(rating) %>% mean()
reportObject <- reproCheck(reportedValue = "0.76", obtainedValue = mean_smart, valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.76) and the obtained value (0.76) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
“Although 3- to 4-year-olds responded with robust and adult like consensus (72% across all traits), they were less consistent than 5- to 6-year-olds (81%), 7- to 10-year-olds (88%), and adults (89%). Cogsdill et al., page 1134”
by_age_ms <- d_tidy %>%
group_by(age_group) %>%
summarise(m = mean(rating))
by_age_ms %>% kable(digits = 2)
| age_group | m |
|---|---|
| 3-4 year olds | 0.72 |
| 5-6 year olds | 0.81 |
| 7-10 year olds | 0.88 |
| Adults | 0.89 |
All consistent with statistics reported in paper. Add repro checks for these values.
reportObject <- reproCheck(reportedValue = "0.72", obtainedValue = by_age_ms$m[1], valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.72) and the obtained value (0.72) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "0.81", obtainedValue = by_age_ms$m[2], valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.81) and the obtained value (0.81) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "0.88", obtainedValue = by_age_ms$m[3], valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.88) and the obtained value (0.88) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "0.89", obtainedValue = by_age_ms$m[4], valueType = 'mean')
## [1] "MATCH for mean. The reported value (0.89) and the obtained value (0.89) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
Let’s try reproducing this claim:
“Combined, all four age groups showed significant consensus compared with chance (50%) when identifying faces as mean or nice , strong or not strong, and smart or not smart.”
The authors did not report which test they used to come up with the conclusion that this percentage is significantly different from 50%. This was initially an insufficient information error. We contacted the original authors and they tell us that this was a t-test.
# wrapper function for t-test
t_test_fun <- function(df) {
m <- t.test(df$rating, mu = 0.5, alternative = "two.sided")
m %>% broom::glance()
}
# nest the data for each attribute
d_by_attr <- d_tidy %>%
group_by(attribute) %>%
nest()
# map the t-test function against random responding (0.5) to each attribute
d_by_attr <- d_by_attr %>%
mutate(t_test = purrr::map(data, t_test_fun))
by_attr_t <- d_by_attr %>% unnest(t_test) %>% select(-data)
by_attr_t %>% kable(digits = 2)
| attribute | estimate | statistic | p.value | parameter | conf.low | conf.high | method | alternative |
|---|---|---|---|---|---|---|---|---|
| nice | 0.93 | 42.98 | 0 | 239 | 0.91 | 0.95 | One Sample t-test | two.sided |
| strong | 0.85 | 24.39 | 0 | 239 | 0.82 | 0.87 | One Sample t-test | two.sided |
| smart | 0.76 | 15.46 | 0 | 239 | 0.73 | 0.79 | One Sample t-test | two.sided |
Exract and check the t-value for each attribute.
p_strong <- by_attr_t %>% filter(attribute == "strong") %>% pull(p.value)
p_smart <- by_attr_t %>% filter(attribute == "smart") %>% pull(p.value)
p_nice <- by_attr_t %>% filter(attribute == "nice") %>% pull(p.value)
All groups were significantly different from random responding as reported in the paper. Can’t check these values quantitatively because nothing was reported in the paper. Run eyeball checks.
reportObject <- reproCheck(reportedValue = "significant", obtainedValue = p_strong, valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "significant", obtainedValue = p_smart, valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "significant", obtainedValue = p_nice, valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
Next, we try to reproduce these tests:
“Critically, all age groups attributed all three traits with significant consensus, ps < .001, ds > 1.08.”
Note: the article does not specify what analytical tests used and there is some ambiguity in the phrasing here. Thus this was initially an insufficient information error. However, the authors informed us via e-mail that t-tests were used and the effect size is Cohen’s d.
# nest the data for each attribute
d_by_age_and_attr <- d_tidy %>%
group_by(age_group, attribute) %>%
nest()
# map the t-test function against random responding (0.5) to each attribute
d_by_age_and_attr <- d_by_age_and_attr %>%
mutate(t_test = purrr::map(data, t_test_fun))
by_age_attr_t <- d_by_age_and_attr %>% unnest(t_test) %>% select(-data) %>% mutate(d = 2*statistic/sqrt(parameter))
by_age_attr_t %>% kable(digits = 2)
| age_group | attribute | estimate | statistic | p.value | parameter | conf.low | conf.high | method | alternative | d |
|---|---|---|---|---|---|---|---|---|---|---|
| 5-6 year olds | nice | 0.93 | 24.36 | 0 | 49 | 0.89 | 0.96 | One Sample t-test | two.sided | 6.96 |
| 7-10 year olds | nice | 0.97 | 29.49 | 0 | 53 | 0.93 | 1.00 | One Sample t-test | two.sided | 8.10 |
| 3-4 year olds | nice | 0.82 | 8.49 | 0 | 36 | 0.75 | 0.90 | One Sample t-test | two.sided | 2.83 |
| Adults | nice | 0.95 | 33.04 | 0 | 98 | 0.92 | 0.98 | One Sample t-test | two.sided | 6.68 |
| 5-6 year olds | strong | 0.85 | 12.88 | 0 | 49 | 0.79 | 0.90 | One Sample t-test | two.sided | 3.68 |
| 7-10 year olds | strong | 0.84 | 11.50 | 0 | 53 | 0.78 | 0.90 | One Sample t-test | two.sided | 3.16 |
| 3-4 year olds | strong | 0.65 | 3.24 | 0 | 36 | 0.56 | 0.74 | One Sample t-test | two.sided | 1.08 |
| Adults | strong | 0.92 | 26.63 | 0 | 98 | 0.89 | 0.95 | One Sample t-test | two.sided | 5.38 |
| 5-6 year olds | smart | 0.66 | 3.77 | 0 | 49 | 0.58 | 0.75 | One Sample t-test | two.sided | 1.08 |
| 7-10 year olds | smart | 0.84 | 11.21 | 0 | 53 | 0.78 | 0.90 | One Sample t-test | two.sided | 3.08 |
| 3-4 year olds | smart | 0.68 | 3.85 | 0 | 36 | 0.58 | 0.77 | One Sample t-test | two.sided | 1.28 |
| Adults | smart | 0.80 | 13.08 | 0 | 98 | 0.75 | 0.84 | One Sample t-test | two.sided | 2.64 |
Looks good, all ps < .001, ds > 1.08. Let’s run reproCheck:
## repro check values
for(i in by_age_attr_t$p.value){
reportObject <- reproCheck(reportedValue = "ps < .001", obtainedValue = i, valueType = 'p', eyeballCheck = TRUE)
}
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
## [1] "MATCH for p. Eyeball comparison only."
for(i in by_age_attr_t$d){
reportObject <- reproCheck(reportedValue = "ds > 1.08", obtainedValue = i, valueType = 'd', eyeballCheck = TRUE)
}
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
## [1] "MATCH for d. Eyeball comparison only."
We next attempt to reproduce this claim:
“However, an analysis of variance (ANOVA) revealed a significant main effect of age group, F(3, 236) = 17.91, p < .001”
anova1 <- d_tidy %>%
distinct(participant_id, overall, age_group) %>%
aov(overall ~ age_group, data = .) %>%
broom::glance()
anova1 %>% kable(digits = 2, caption="ANOVA to compare group means on overall percent correct")
| r.squared | adj.r.squared | sigma | statistic | p.value | df | logLik | AIC | BIC | deviance | df.residual |
|---|---|---|---|---|---|---|---|---|---|---|
| 0.19 | 0.18 | 0.13 | 17.95 | 0 | 4 | 148.98 | -287.96 | -270.55 | 4.06 | 236 |
Add repro check values:
reportObject <- reproCheck(reportedValue = "3", obtainedValue = anova1$df, valueType = 'df')
## [1] "MAJOR_ERROR for df. The reported value (3) and the obtained value (4) differed by 33.33%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "236", obtainedValue = anova1$df.residual, valueType = 'df')
## [1] "MATCH for df. The reported value (236) and the obtained value (236) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "17.91", obtainedValue = anova1$statistic, valueType = 'F')
## [1] "MINOR_ERROR for F. The reported value (17.91) and the obtained value (17.95) differed by 0.22%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
reportObject <- reproCheck(reportedValue = "p < .001", obtainedValue = anova1$p.value, valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
Major error for the degrees of freedom here, possible typo but cause unknown.
Next, we attempt to reproduce this claim:
“One-way ANOVAs followed by post hoc tests with Sidak corrections for multiple comparisons were used to analyze age differences for each trait. These analyses revealed that when attributing both trustworthiness and dominance, 3- to 4-year-olds were less consistent than all other age groups (all ps < .01, ds > 0.59), which exhibited equivalent consistency (all ps > .23, ds < 0.40).”
Note: trustworthiness (measured as nice vs. mean), competence (measured as smart vs. not smart), or dominance (measured as strong vs. not strong)
Note: Outcome of ANOVAs is not reported.
Note: We do not have sufficient information to run this analysis completely. Specifically, it is unclear how the Sidak corrections were applied because it is unclear what the family of hypothesis tests was. As we cannot adjust the alpha threshold, we cannot determine if there were decision errors. However, we can compare the obtained p values to the reported p values. So below we will check the corresondance of the reported ps and ds with the reported threshold, but we will not draw conclusions about decision errors.
INSUFFICIENT INFORMATION ERROR.
Run follow-up tests and calculate Cohen’s d.
# compare 3-4 year olds to the other three groups for nice (trustworthiness) and check that "all ps < .01, ds > 0.59"
nice34_56 <- d_tidy %>% filter(attribute == 'nice', age_group %in% c("3-4 year olds", "5-6 year olds"))
t.out <- pairwise.t.test(nice34_56$rating, nice34_56$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = nice34_56)
reportObject <- reproCheck(reportedValue = "ps < .01", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds > 0.59", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
nice34_710 <- d_tidy %>% filter(attribute == 'nice', age_group %in% c("3-4 year olds", "7-10 year olds"))
t.out <- pairwise.t.test(nice34_710$rating, nice34_710$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = nice34_710)
reportObject <- reproCheck(reportedValue = "ps < .01", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds > 0.59", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
nice34_Adults <- d_tidy %>% filter(attribute == 'nice', age_group %in% c("3-4 year olds", "Adults"))
t.out <- pairwise.t.test(nice34_Adults$rating, nice34_Adults$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = nice34_Adults)
reportObject <- reproCheck(reportedValue = "ps < .01", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds > 0.59", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
# compare 5-6 year olds to the other two groups for nice (trustworthiness) and check that "all ps > .23, ds < 0.40"
nice56_710 <- d_tidy %>% filter(attribute == 'nice', age_group %in% c("5-6 year olds", "7-10 year olds"))
t.out <- pairwise.t.test(nice56_710$rating, nice56_710$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = nice56_710)
reportObject <- reproCheck(reportedValue = "ps > .23", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = FALSE)
## [1] "EYEBALL CHECK ERROR for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds < 0.40", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
nice56_Adults <- d_tidy %>% filter(attribute == 'nice', age_group %in% c("5-6 year olds", "Adults"))
t.out <- pairwise.t.test(nice56_Adults$rating, nice56_Adults$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = nice56_Adults)
reportObject <- reproCheck(reportedValue = "ps > .23", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds < 0.40", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
# compare 7-10 year olds to the adult group for nice (trustworthiness) and check that "all ps > .23, ds < 0.40"
nice710_Adults <- d_tidy %>% filter(attribute == 'nice', age_group %in% c("7-10 year olds", "Adults"))
t.out <- pairwise.t.test(nice710_Adults$rating, nice710_Adults$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = nice710_Adults)
reportObject <- reproCheck(reportedValue = "ps > .23", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds < 0.40", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
# compare 3-4 year olds to the other three groups for dominance (strong) and check that "all ps < .01, ds > 0.59"
strong34_56 <- d_tidy %>% filter(attribute == 'strong', age_group %in% c("3-4 year olds", "5-6 year olds"))
t.out <- pairwise.t.test(strong34_56$rating, strong34_56$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = strong34_56)
reportObject <- reproCheck(reportedValue = "ps < .01", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds > 0.59", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
strong34_710 <- d_tidy %>% filter(attribute == 'strong', age_group %in% c("3-4 year olds", "7-10 year olds"))
t.out <- pairwise.t.test(strong34_710$rating, strong34_710$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = strong34_710)
reportObject <- reproCheck(reportedValue = "ps < .01", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds > 0.59", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
strong34_Adults <- d_tidy %>% filter(attribute == 'strong', age_group %in% c("3-4 year olds", "Adults"))
t.out <- pairwise.t.test(strong34_Adults$rating, strong34_Adults$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = strong34_Adults)
reportObject <- reproCheck(reportedValue = "ps < .01", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds > 0.59", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
# compare 5-6 year olds to the other two groups for dominance (strong) and check that "all ps > .23, ds < 0.40"
strong56_710 <- d_tidy %>% filter(attribute == 'strong', age_group %in% c("5-6 year olds", "7-10 year olds"))
t.out <- pairwise.t.test(strong56_710$rating, strong56_710$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = strong56_710)
reportObject <- reproCheck(reportedValue = "ps > .23", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = TRUE)
## [1] "MATCH for p. Eyeball comparison only."
reportObject <- reproCheck(reportedValue = "ds < 0.40", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE)
## [1] "MATCH for d. Eyeball comparison only."
strong56_Adults <- d_tidy %>% filter(attribute == 'strong', age_group %in% c("5-6 year olds", "Adults"))
t.out <- pairwise.t.test(strong56_Adults$rating, strong56_Adults$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = strong56_Adults)
reportObject <- reproCheck(reportedValue = "ps > .23", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = FALSE)
## [1] "EYEBALL CHECK ERROR for p. Eyeball comparison only."
Note: This could be a decision error but we cannot determine that because we do not have sufficient information to apply the Sidak corrections.
reportObject <- reproCheck(reportedValue = "ds < 0.40", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE) # NOTE - although the obtained t is not below 0.40, it is only a minor numerical error so it passess the eyeball check
## [1] "MATCH for d. Eyeball comparison only."
# compare 7-10 year olds to the adult group for dominance (strong) and check that "all ps > .23, ds < 0.40"
strong710_Adults <- d_tidy %>% filter(attribute == 'strong', age_group %in% c("7-10 year olds", "Adults"))
t.out <- pairwise.t.test(strong710_Adults$rating, strong710_Adults$age_group, paired = FALSE, p.adjust.method = 'none')
d <- cohensD(rating ~ age_group, data = strong710_Adults)
reportObject <- reproCheck(reportedValue = "ps > .23", obtainedValue = unname(t.out$p.value), valueType = 'p', eyeballCheck = FALSE)
## [1] "EYEBALL CHECK ERROR for p. Eyeball comparison only."
Note: This could be a decision error but we cannot determine that because we do not have sufficient information to apply the Sidak corrections.
reportObject <- reproCheck(reportedValue = "ds < 0.40", obtainedValue = d, valueType = 'd', eyeballCheck = TRUE) # NOTE - although the obtained d is not below 0.40, it is only a minor numerical error so it passess the eyeball check
## [1] "MATCH for d. Eyeball comparison only."
There were some major numerical errors for 3 p-values here. Note that the article says that there were “Sidak corrections for multiple comparisons”. Such corrections should be applied to the alpha threshold rather than directly to p-values, but it is possible they were applied to p-values in this case, which could explain the differences. It is not possible to try and emulate this process because the family of tests to which the corrections apply is not defined. If we try applying Sidak corrections to p-values based on all of the post-hoc tests then some p-values match but others that previously matched do not match anymore. We have assumed that the Sidak corrections were correctly applied to the alpha threshold (rather than p-values), in which case we have major errors for some p values, but cannot determine whether they are decision errors because we do not have sufficient information to correct the alpha threshold.
We were initially able to reproduce the descriptive statistics and the three key figures. But there were four insufficient information errors that prevented a full reproducibility check:
We contacted the original authors for assistance and they provided some additional information about the analyses specifications that resolved some issues.
However, we are still having difficulties with the section reporting post-hoc comparisons. There were three major errors for p-values. The authors report using ‘Sidak corrections’ and it is unclear if they were applied to the alpha threshold (correctly) or the p-values themselves. If corrections were applied to p values, then this could potentially explain the differences, but the article does not identify the family of hypotheses for which corrections were applied. So we also cannot correct the alpha threshold. We have decided to report the major errors for p values, but record an insufficient information error that prevents us determining whether these were decision errors or not.
Additionally, for one degree of freedom, we obtained 4 whereas the article reports 3. This may be a typo but the cause is not clear.
Author_Assistance = TRUE # was author assistance provided? (if so, enter TRUE)
Insufficient_Information_Errors <- 1 # how many discrete insufficient information issues did you encounter?
# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).
locus_typo <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- 4 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- 1 # how many discrete issues were there for which you could not identify the cause
# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- 0 # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- 3 # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- 0 # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- 0 # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- 0 # how many discrete issues were there for which you could not identify the cause
Affects_Conclusion <- "Unclear" # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
filter(dummyRow == FALSE) %>% # remove the dummy row
select(-dummyRow) %>% # remove dummy row designation
mutate(articleID = articleID) %>% # add the articleID
select(articleID, everything()) # make articleID first column
# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
finalOutcome <- "Failure without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Failure despite author assistance"
}
}else{
finalOutcome <- "Success without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Success with author assistance"
}
}
# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)
# save report objects
if(reportType == "pilot"){
write_csv(reportObject, "pilotReportDetailed.csv")
write_csv(reportExtras, "pilotReportExtras.csv")
}
if(reportType == "final"){
write_csv(reportObject, "finalReportDetailed.csv")
write_csv(reportExtras, "finalReportExtras.csv")
}
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.0 (2020-04-24)
## os macOS Catalina 10.15.4
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/London
## date 2020-05-14
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## abind 1.4-5 2016-07-21 [1]
## assertthat 0.2.1 2019-03-21 [1]
## backports 1.1.6 2020-04-05 [1]
## broom * 0.5.6 2020-04-20 [1]
## callr 3.4.3 2020-03-28 [1]
## car * 3.0-7 2020-03-11 [1]
## carData * 3.0-3 2019-11-16 [1]
## cellranger 1.1.0 2016-07-27 [1]
## cli 2.0.2 2020-02-28 [1]
## colorspace 1.4-1 2019-03-18 [1]
## crayon 1.3.4 2017-09-16 [1]
## curl 4.3 2019-12-02 [1]
## data.table 1.12.8 2019-12-09 [1]
## DBI 1.1.0 2019-12-15 [1]
## dbplyr 1.4.3 2020-04-19 [1]
## desc 1.2.0 2018-05-01 [1]
## devtools 2.3.0 2020-04-10 [1]
## digest 0.6.25 2020-02-23 [1]
## dplyr * 0.8.5 2020-03-07 [1]
## effsize * 0.8.0 2020-04-09 [1]
## ellipsis 0.3.0 2019-09-20 [1]
## evaluate 0.14 2019-05-28 [1]
## fansi 0.4.1 2020-01-08 [1]
## farver 2.0.3 2020-01-16 [1]
## forcats * 0.5.0 2020-03-01 [1]
## foreign 0.8-78 2020-04-13 [1]
## fs 1.4.1 2020-04-04 [1]
## generics 0.0.2 2018-11-29 [1]
## ggplot2 * 3.3.0 2020-03-05 [1]
## glue 1.4.0 2020-04-03 [1]
## gtable 0.3.0 2019-03-25 [1]
## haven * 2.2.0 2019-11-08 [1]
## highr 0.8 2019-03-20 [1]
## hms 0.5.3 2020-01-08 [1]
## htmltools 0.4.0 2019-10-04 [1]
## httr 1.4.1 2019-08-05 [1]
## jsonlite 1.6.1 2020-02-02 [1]
## knitr * 1.28 2020-02-06 [1]
## labeling 0.3 2014-08-23 [1]
## lattice 0.20-41 2020-04-02 [1]
## lifecycle 0.2.0 2020-03-06 [1]
## lsr * 0.5 2015-03-02 [1]
## lubridate 1.7.8 2020-04-06 [1]
## magrittr 1.5 2014-11-22 [1]
## memoise 1.1.0 2017-04-21 [1]
## modelr 0.1.7 2020-04-30 [1]
## munsell 0.5.0 2018-06-12 [1]
## nlme 3.1-147 2020-04-13 [1]
## openxlsx 4.1.4 2019-12-06 [1]
## pillar 1.4.4 2020-05-05 [1]
## pkgbuild 1.0.7 2020-04-25 [1]
## pkgconfig 2.0.3 2019-09-22 [1]
## pkgload 1.0.2 2018-10-29 [1]
## prettyunits 1.1.1 2020-01-24 [1]
## processx 3.4.2 2020-02-09 [1]
## ps 1.3.2 2020-02-13 [1]
## purrr * 0.3.4 2020-04-17 [1]
## R6 2.4.1 2019-11-12 [1]
## Rcpp 1.0.4.6 2020-04-09 [1]
## readr * 1.3.1 2018-12-21 [1]
## readxl * 1.3.1 2019-03-13 [1]
## remotes 2.1.1 2020-02-15 [1]
## reprex 0.3.0 2019-05-16 [1]
## ReproReports * 0.1 2020-05-06 [1]
## rio 0.5.16 2018-11-26 [1]
## rlang 0.4.6 2020-05-02 [1]
## rmarkdown 2.1 2020-01-20 [1]
## rprojroot 1.3-2 2018-01-03 [1]
## rstudioapi 0.11 2020-02-07 [1]
## rvest 0.3.5 2019-11-08 [1]
## scales 1.1.0 2019-11-18 [1]
## sessioninfo 1.1.1 2018-11-05 [1]
## stringi 1.4.6 2020-02-17 [1]
## stringr * 1.4.0 2019-02-10 [1]
## testthat 2.3.2 2020-03-02 [1]
## tibble * 3.0.1 2020-04-20 [1]
## tidyr * 1.0.2 2020-01-24 [1]
## tidyselect 1.0.0 2020-01-27 [1]
## tidyverse * 1.3.0 2019-11-21 [1]
## usethis 1.6.1 2020-04-29 [1]
## vctrs 0.2.4 2020-03-10 [1]
## withr 2.2.0 2020-04-20 [1]
## xfun 0.13 2020-04-13 [1]
## xml2 1.3.2 2020-04-23 [1]
## yaml 2.2.1 2020-02-01 [1]
## zip 2.0.4 2019-09-01 [1]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (METRICS-CARPS/CARPSreports@3277f85)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library