articleID <- "6-1-2015_PS" # insert the article ID code here e.g., "10-3-2015_PS"
reportType <- 'final'
pilotNames <- "Elizabeth Blevins, Barbara Barbosa Born" # insert the pilot's name here e.g., "Tom Hardwicke". If there are multiple pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
copilotNames <- "Manuel Bohn" # insert the co-pilot's name here e.g., "Michael Frank". If there are multiple co-pilots enter both names in a character string e.g., "Tom Hardwicke, Bob Dylan"
pilotTTC <- 420 # insert the pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
copilotTTC <- 60 # insert the co- pilot's estimated time to complete (in minutes, fine to approximate) e.g., 120
pilotStartDate <- as.Date("10/27/17", format = "%m/%d/%y") # insert the pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
copilotStartDate <- as.Date("10/19/18", format = "%m/%d/%y") # insert the co-pilot's start date in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
completionDate <- as.Date("10/19/18", format = "%m/%d/%y") # copilot insert the date of final report completion (after any necessary rounds of author assistance) in US format e.g., as.Date("01/25/18", format = "%m/%d/%y")
The target outcomes are from Experiment 1, in which ten participants took part. Participants completed two blocks consisting each of a different task. In the “grasping task”, participants lifted a target disk from the table using their thumb and forefinger; in the “manual estimation task”, participants used their thumb and forefinger to indicate the size of the target before physically lifting it. Within each block, the target disk varied in size (3.00 cm or 3.75 cm) and whether it was presented on the table by itself or accompanied by other disks. Additionally, participants wore liquid-crystal goggles. On half of the trials, which the researchers refer to as “open-loop” trials, the goggles were closed at the beginning of the trial meaning “that participants could not see their moving hand or the disks” (Chen, Sperandio, & Goodale, 2015, p. 60). On the remaining trials, which were labeled as “closed-loop”, the goggles were closed after three seconds, allowing participants to track both their motion and the target.
Experiment 1 consisted of a 2 Task (grip, estimate) X 2 View (closed-loop, open-loop) X 2 Condition (isolated, crowded) X 2 Size (3.00 cm, 3.75 cm) within subjects design. Participants completed each of the 16 trial types ten times, resulting in a total of 160 trials. For each trial type, the researchers averaged participants’ grip and size estimates. “A four-way repeated measures analysis of variance (ANOVA) was used to analyze the main effects of crowding condition, viewing condition, task, and target size, as well as their interactions” (Chen, Sperandio, & Goodale, 2015, p. 62). To follow up results from the ANOVA, the researchers then conducted paired t-tests, which they specified as two-tailed tests.
“Experiment 1 was designed to explore the effects of crowding on perception and action, with a particular focus on whether participants could scale their grip aperture to the size of the target even when they could not consciously identify the size of the target. We carried out a four-way repeated measures ANOVA on the manual estimates and PGAs with task (estimation vs. grasping), crowding condition (uncrowded vs. crowded), viewing condition (closed- vs. open-loop), and target size (3.0 vs. 3.75 cm) as main factors. The significant interaction between task and crowding condition, F(1, 9) = 6.818, p = .028, suggested that crowding had different effects on performance of the grasping and manual estimation tasks. Not surprisingly, when the target was presented in isolation, participants were able to manually estimate the sizes of the two targets—and this was true for both closed-loop trials, t(9) = 7.23, p < .001, and open-loop trials, t(9) = 9.19, p < .001. Similarly, participants showed excellent grip scaling for targets presented in isolation on both closed-loop trials, t(9) = 4.29, p = .002, and openloop trials, t(9) = 4.79, p = .001 (Fig. 3). Things were quite different, however, when the target disks were surrounded by flankers. In this condition, par- ticipants could no longer discriminate between the two disk sizes using a manual estimate—closed-loop trials: t(9) = 1.02, p = .334; open-loop trials: t(9) = 1.78, p = .108 presumably because the size of the target was per- ceptually invisible. (Note that we use the term invisible to refer to the fact that participants could not identify the size of the target, even though they were aware of its presence and position.) In contrast, when participants were asked to grasp the same targets, their PGAs were still scaled to target size—closed-loop trials: t(9) = 4.21, p = .002; open-loop trials: t(9) = 3.392, p = .008 (Fig. 3).” (Chen, Sperandio, & Goodale, 2015, pp 62-63).
# load packages
library(tidyverse) # for data munging
library(knitr) # for kable table formating
library(haven) # import and export 'SPSS', 'Stata' and 'SAS' Files
library(readxl) # import excel files
library(ReproReports) # custom report functions
library(ez) # for repeated measures ANOVA's
library(broom)
# Prepare report object. This will be updated automatically by the reproCheck function each time values are compared.
reportObject <- data.frame(dummyRow = TRUE, reportedValue = NA, obtainedValue = NA, valueType = NA, percentageError = NA, comparisonOutcome = NA, eyeballCheck = NA)
I read in the data file, “data_Exp1.xlsx”, from the “summary” tab.
# read data from Exp 1 summary sheet
# ignore column names
d <- read_xlsx("data/data_Exp1.xlsx", sheet ="summary", range = cell_rows(4:13), col_names = c('X__1', 'X__2', 'X__3', 'X__4', 'X__5', 'X__6', 'X__7', 'X__8', 'X__9', 'X__10', 'X__11', 'X__12', 'X__13', 'X__14', 'X__15', 'X__16', 'X__17', 'X__18', 'X__19', 'X__20'))
# organize data in wide format
d_wide <- d %>%
select(-c(X__6, X__11, X__16)) %>% # drop empty columns
# rename columns according to condition
rename(subj = X__1,
grip_clos_isol_3.00cm = X__2,
grip_clos_isol_3.75cm = X__3,
grip_clos_crow_3.00cm = X__4,
grip_clos_crow_3.75cm = X__5,
grip_open_isol_3.00cm = X__7,
grip_open_isol_3.75cm = X__8,
grip_open_crow_3.00cm = X__9,
grip_open_crow_3.75cm = X__10,
esti_clos_isol_3.00cm = X__12,
esti_clos_isol_3.75cm = X__13,
esti_clos_crow_3.00cm = X__14,
esti_clos_crow_3.75cm = X__15,
esti_open_isol_3.00cm = X__17,
esti_open_isol_3.75cm = X__18,
esti_open_crow_3.00cm = X__19,
esti_open_crow_3.75cm = X__20)
# check structure
str(d_wide)
## tibble [10 × 17] (S3: tbl_df/tbl/data.frame)
## $ subj : chr [1:10] "sub1" "sub2" "sub3" "sub4" ...
## $ grip_clos_isol_3.00cm: num [1:10] 5.06 5.94 5.45 5.13 6.2 ...
## $ grip_clos_isol_3.75cm: num [1:10] 5.32 7.45 6.14 5.61 7.67 ...
## $ grip_clos_crow_3.00cm: num [1:10] 5.04 5.99 5.06 5.15 5.45 ...
## $ grip_clos_crow_3.75cm: num [1:10] 4.96 6.23 5.34 5.45 5.96 ...
## $ grip_open_isol_3.00cm: num [1:10] 5.01 4.95 5.42 5.93 6.12 ...
## $ grip_open_isol_3.75cm: num [1:10] 5.28 6.73 5.96 6.34 7.55 ...
## $ grip_open_crow_3.00cm: num [1:10] 5.13 4.21 4.86 5.42 5.78 ...
## $ grip_open_crow_3.75cm: num [1:10] 5.21 5.61 5.14 5.6 6.65 ...
## $ esti_clos_isol_3.00cm: num [1:10] 3.53 2.47 4.11 3.38 5.29 ...
## $ esti_clos_isol_3.75cm: num [1:10] 4.39 3.65 5.84 3.62 6.67 ...
## $ esti_clos_crow_3.00cm: num [1:10] 3.7 2.84 4.34 3.49 5.47 ...
## $ esti_clos_crow_3.75cm: num [1:10] 3.8 2.52 4.17 3.55 5.91 ...
## $ esti_open_isol_3.00cm: num [1:10] 3.26 2.33 4.46 2.72 4.72 ...
## $ esti_open_isol_3.75cm: num [1:10] 4.07 3.34 5.5 3.25 5.53 ...
## $ esti_open_crow_3.00cm: num [1:10] 3.12 2.51 3.37 3.25 4.86 ...
## $ esti_open_crow_3.75cm: num [1:10] 3 2.6 3.84 3.32 5.13 ...
# convert from wide to long format
d_tidy <- d_wide %>%
gather(trial_type, size_measurement_cm, grip_clos_isol_3.00cm:esti_open_crow_3.75cm) %>%
# separate trial_type into four columns
separate(trial_type, into = c("task","view","cond","size"), sep = "\\_") %>%
# convert trial types to factors
mutate(task = factor(task, levels = c("grip", "esti")),
view = factor(view, levels = c("clos", "open")),
cond = factor(cond, levels = c("isol", "crow")),
size = factor(size, levels = c("3.75cm", "3.00cm")))
str(d_tidy)
## tibble [160 × 6] (S3: tbl_df/tbl/data.frame)
## $ subj : chr [1:160] "sub1" "sub2" "sub3" "sub4" ...
## $ task : Factor w/ 2 levels "grip","esti": 1 1 1 1 1 1 1 1 1 1 ...
## $ view : Factor w/ 2 levels "clos","open": 1 1 1 1 1 1 1 1 1 1 ...
## $ cond : Factor w/ 2 levels "isol","crow": 1 1 1 1 1 1 1 1 1 1 ...
## $ size : Factor w/ 2 levels "3.75cm","3.00cm": 2 2 2 2 2 2 2 2 2 2 ...
## $ size_measurement_cm: num [1:160] 5.06 5.94 5.45 5.13 6.2 ...
# task: grasping (grip) vs. manual (esti)
# view: open (open) vs. closed (clos)
# cond: isolated (isol) vs. crowded (crow)
# size: 3.00 vs. 3.75 cm
I began the analysis by computing the mean and standard deviation within each of the sixteen conditions. While not explicitly part of the target outcomes, I wanted to compare the values with those depicted in Figure 3.
# compute mean and standard deviation per condition
descriptives = d_tidy %>%
group_by(task, view, cond, size) %>%
summarise(mean = mean(size_measurement_cm),
stdv = sd(size_measurement_cm))
descriptives_table <- kable(descriptives, digits = 2) # round to two digits
descriptives_table
| task | view | cond | size | mean | stdv |
|---|---|---|---|---|---|
| grip | clos | isol | 3.75cm | 6.36 | 0.79 |
| grip | clos | isol | 3.00cm | 5.61 | 0.61 |
| grip | clos | crow | 3.75cm | 5.42 | 0.51 |
| grip | clos | crow | 3.00cm | 5.18 | 0.54 |
| grip | open | isol | 3.75cm | 6.48 | 0.70 |
| grip | open | isol | 3.00cm | 5.66 | 0.59 |
| grip | open | crow | 3.75cm | 5.52 | 0.63 |
| grip | open | crow | 3.00cm | 5.02 | 0.60 |
| esti | clos | isol | 3.75cm | 4.55 | 1.02 |
| esti | clos | isol | 3.00cm | 3.51 | 0.89 |
| esti | clos | crow | 3.75cm | 3.76 | 0.94 |
| esti | clos | crow | 3.00cm | 3.67 | 0.83 |
| esti | open | isol | 3.75cm | 4.24 | 0.97 |
| esti | open | isol | 3.00cm | 3.39 | 0.89 |
| esti | open | crow | 3.75cm | 3.51 | 0.73 |
| esti | open | crow | 3.00cm | 3.37 | 0.73 |
Next, I conducted a four-way repeated measures ANOVA whereby task, view, condition, and size were treated as within subjects factors. Additionally, I specified Type III sum of squares in the model.
From the target outcomes: > The significant interaction between task and crowding condition, F(1, 9) = 6.818, p = .028, suggested that crowding had different effects on performance of the grasping and manual estimation tasks.
# conduct repeated measures ANOVA
# specified type III SS
model <- ezANOVA(d_tidy, dv = .(size_measurement_cm), wid = .(subj), within = .(task, view, cond, size), detailed = TRUE, type = "III")
# return model
print(model) %>% kable(digits = 3)
## $ANOVA
## Effect DFn DFd SSn SSd F
## 1 (Intercept) 1 9 3.539853e+03 37.9465210 839.56772811
## 2 task 1 9 1.452149e+02 29.2825581 44.63182989
## 3 view 1 9 4.903254e-01 2.0580878 2.14418862
## 4 cond 1 9 1.186461e+01 2.7497543 38.83310239
## 5 size 1 9 1.217704e+01 2.3007280 47.63422308
## 6 task:view 1 9 7.397368e-01 3.0946706 2.15132135
## 7 task:cond 1 9 1.554030e+00 2.0512607 6.81837696
## 8 view:cond 1 9 7.231502e-02 0.6840352 0.95146444
## 9 task:size 1 9 2.565616e-02 1.0149270 0.22750942
## 10 view:size 1 9 1.888431e-02 0.3533509 0.48099152
## 11 cond:size 1 9 3.935599e+00 0.5327346 66.48787404
## 12 task:view:cond 1 9 7.681746e-03 0.5092932 0.13574835
## 13 task:view:size 1 9 1.348386e-01 0.4728168 2.56663343
## 14 task:cond:size 1 9 4.201056e-01 0.5297112 7.13775779
## 15 view:cond:size 1 9 1.146078e-01 0.3140613 3.28429651
## 16 task:view:cond:size 1 9 1.727883e-03 0.1725707 0.09011346
## p p<.05 ges
## 1 3.385903e-10 * 9.768022e-01
## 2 9.050583e-05 * 6.333463e-01
## 3 1.771464e-01 5.798728e-03
## 4 1.529579e-04 * 1.236777e-01
## 5 7.053141e-05 * 1.265225e-01
## 6 1.764987e-01 8.722610e-03
## 7 2.821507e-02 * 1.815008e-02
## 8 3.548342e-01 8.594669e-04
## 9 6.447455e-01 3.050937e-04
## 10 5.054769e-01 2.245833e-04
## 11 1.900094e-05 * 4.472135e-02
## 12 7.210622e-01 9.136803e-05
## 13 1.436036e-01 1.601372e-03
## 14 2.555293e-02 * 4.972418e-03
## 15 1.033635e-01 1.361434e-03
## 16 7.708511e-01 2.055319e-05
|
I then conducted paired t-tests to decompose the interaction. (The target outcomes require only the first four t-tests, although I ran all pairwise comparisons.)
From the target outcomes: > Not surprisingly, when the target was presented in isolation, participants were able to manually estimate the sizes of the two targets and this was true for both closed-loop trials, t(9) = 7.23, p < .001, and open-loop trials, t(9) = 9.19, p < .001. Similarly, participants showed excellent grip scaling for targets presented in isolation on both closed-loop trials, t(9) = 4.29, p = .002, and openloop trials, t(9) = 4.79, p = .001 (Fig. 3) Things were quite different, however, when the target disks were surrounded by flankers. In this condition, par- ticipants could no longer discriminate between the two disk sizes using a manual estimate—closed-loop trials: t(9) = 1.02, p = .334; open-loop trials: t(9) = 1.78, p = .108 presumably because the size of the target was per- ceptually invisible. (Note that we use the term invisible to refer to the fact that participants could not identify the size of the target, even though they were aware of its presence and position.) In contrast, when participants were asked to grasp the same targets, their PGAs were still scaled to target size—closed-loop trials: t(9) = 4.21, p = .002; open-loop trials: t(9) = 3.392, p = .008 (Fig. 3)." (Chen, Sperandio, & Goodale, 2015, pp 62-63).
# paired t-tests to compare 3cm vs. 3.75cm
# manual estimation of isolated targets on closed-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "esti",
cond == "isol",
view == "clos")
ttest_esti_isol_clos = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# manual estimation of isolated targets on open-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "esti",
cond == "isol",
view == "open")
ttest_esti_isol_open = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# grip of isolated targets on closed-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "grip",
cond == "isol",
view == "clos")
ttest_grip_isol_clos = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# grip of isolated targets on open-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "grip",
cond == "isol",
view == "open")
ttest_grip_isol_open = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# manual estimation of crowded targets on closed-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "esti",
cond == "crow",
view == "clos")
ttest_esti_crow_clos = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# manual estimation of crowded targets on open-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "esti",
cond == "crow",
view == "open")
ttest_esti_crow_open = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# grip of crowded targets on closed-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "grip",
cond == "crow",
view == "clos")
ttest_grip_crow_clos = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# grip of crowded targets on open-loop trials
d_tidy_filter <- d_tidy %>%
filter(task == "grip",
cond == "crow",
view == "open")
ttest_grip_crow_open = t.test(size_measurement_cm ~ size, data = d_tidy_filter, paired = TRUE)
# combine t-test results in one table
ttest_results <- map_df(list(ttest_esti_isol_clos, ttest_esti_isol_open, ttest_grip_isol_clos, ttest_grip_isol_open,
ttest_esti_crow_clos, ttest_esti_crow_open, ttest_grip_crow_clos, ttest_grip_crow_open), tidy)
# add column names
ttest_type = c("esti_isol_clos","esti_isol_open","grip_isol_clos","grip_isol_open","esti_crow_clos","esti_crow_open","grip_crow_clos","grip_crow_open")
ttest_results_name <- cbind(ttest_type,ttest_results)
# only include columns related to stats in manuscript
ttest_results_clean <- kable(ttest_results_name[c("ttest_type","estimate", "statistic", "p.value", "parameter", "method", "alternative")])
ttest_results_clean
| ttest_type | estimate | statistic | p.value | parameter | method | alternative |
|---|---|---|---|---|---|---|
| esti_isol_clos | 1.0390083 | 7.234218 | 0.0000490 | 9 | Paired t-test | two.sided |
| esti_isol_open | 0.8461444 | 9.198307 | 0.0000071 | 9 | Paired t-test | two.sided |
| grip_isol_clos | 0.7554306 | 4.295371 | 0.0020042 | 9 | Paired t-test | two.sided |
| grip_isol_open | 0.8210965 | 4.796483 | 0.0009785 | 9 | Paired t-test | two.sided |
| esti_crow_clos | 0.0865001 | 1.021195 | 0.3338263 | 9 | Paired t-test | two.sided |
| esti_crow_open | 0.1340361 | 1.782047 | 0.1084252 | 9 | Paired t-test | two.sided |
| grip_crow_clos | 0.2391417 | 4.216005 | 0.0022527 | 9 | Paired t-test | two.sided |
| grip_crow_open | 0.4926279 | 3.392246 | 0.0079707 | 9 | Paired t-test | two.sided |
Lastly, I used the compareValues function to record the number of errors. For two t-tests, the associated p-values were recorded as p < .001 in the manuscript. I could not use the compareValues function, but since the p-values I obtained similarly fell below .001, I did not count them as errors. (I consulted with Tom via email regarding this issue.)
# compare ANOVA results
# df numerator
reportObject <- reproCheck(reportedValue = "1", obtainedValue = anova_taskxcond_DFn, valueType = 'df')
## [1] "MATCH for df. The reported value (1) and the obtained value (1) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# df denominator
reportObject <- reproCheck(reportedValue = "9", obtainedValue = anova_taskxcond_DFd, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# F statistic
reportObject <- reproCheck(reportedValue = "6.818", obtainedValue = anova_taskxcond_fval, valueType = 'F')
## [1] "MATCH for F. The reported value (6.818) and the obtained value (6.818) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# p value
reportObject <- reproCheck(reportedValue = ".028", obtainedValue = anova_taskxcond_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.028) and the obtained value (0.028) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# compare t-test results
# ttest for manual estimation of isolated targets on closed-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest1_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "7.23", obtainedValue = ttest1_tval, valueType = 't')
## [1] "MATCH for t. The reported value (7.23) and the obtained value (7.23) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# p value reported as < .001
# ttest for manual estimation of isolated targets on open-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest2_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "9.19", obtainedValue = ttest2_tval, valueType = 't')
## [1] "MINOR_ERROR for t. The reported value (9.19) and the obtained value (9.2) differed by 0.11%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# p value reported as < .001
# ttest for grip of isolated targets on closed-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest3_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "4.29", obtainedValue = ttest3_tval, valueType = 't')
## [1] "MINOR_ERROR for t. The reported value (4.29) and the obtained value (4.3) differed by 0.23%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# t p value
reportObject <- reproCheck(reportedValue = ".002", obtainedValue = ttest3_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.002) and the obtained value (0.002) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# ttest for grip of isolated targets on open-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest4_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "4.79", obtainedValue = ttest4_tval, valueType = 't')
## [1] "MINOR_ERROR for t. The reported value (4.79) and the obtained value (4.8) differed by 0.21%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# t p value
reportObject <- reproCheck(reportedValue = ".001", obtainedValue = ttest4_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.001) and the obtained value (0.001) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# compare t-test results
# ttest for manual estimation of crowded targets on closed-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest5_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "1.02", obtainedValue = ttest5_tval, valueType = 't')
## [1] "MATCH for t. The reported value (1.02) and the obtained value (1.02) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# t p value
reportObject <- reproCheck(reportedValue = ".334", obtainedValue = ttest5_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.334) and the obtained value (0.334) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# compare t-test results
# ttest for manual estimation of crowded targets on open-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest6_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "1.78", obtainedValue = ttest6_tval, valueType = 't')
## [1] "MATCH for t. The reported value (1.78) and the obtained value (1.78) differed by 0%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# t p value
reportObject <- reproCheck(reportedValue = ".108", obtainedValue = ttest6_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.108) and the obtained value (0.108) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# compare t-test results
# ttest for grip of crowded targets on closed-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest7_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "4.21", obtainedValue = ttest7_tval, valueType = 't')
## [1] "MINOR_ERROR for t. The reported value (4.21) and the obtained value (4.22) differed by 0.24%. Note that the obtained value was rounded to 2 decimal places to match the reported value."
# t p value
reportObject <- reproCheck(reportedValue = ".002", obtainedValue = ttest7_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.002) and the obtained value (0.002) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# compare t-test results
# ttest for grip of crowded targets on open-loop trials (compare 3cm vs. 3.75cm)
# t df
reportObject <- reproCheck(reportedValue = "9", obtainedValue = ttest8_df, valueType = 'df')
## [1] "MATCH for df. The reported value (9) and the obtained value (9) differed by 0%. Note that the obtained value was rounded to 0 decimal places to match the reported value."
# t statistic
reportObject <- reproCheck(reportedValue = "3.392", obtainedValue = ttest8_tval, valueType = 't')
## [1] "MATCH for t. The reported value (3.392) and the obtained value (3.392) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
# t p value
reportObject <- reproCheck(reportedValue = ".008", obtainedValue = ttest8_pval, valueType = 'p')
## [1] "MATCH for p. The reported value (0.008) and the obtained value (0.008) differed by 0%. Note that the obtained value was rounded to 3 decimal places to match the reported value."
The study results were successfully replicated. It appears the researchers consistently rounded the t-statistics down, producing three minor numerical errors. In the following summary, I report the statistics to three decimal places.
A 2 Task (grip, estimate) X 2 View (closed-loop, open-loop) X 2 Condition (isolated, crowded) X 2 Size (3.00 cm, 3.75 cm) repeated measures ANOVA revealed a significant Task by Condition interaction, F(1, 9) = 6.818, p = 0.028. I then conducted post-hoc t-tests to decomposed the interation.
Comparing performance in the manual estimation condition where the target disk was presented by itself, participants successfully differentiated between the two disk sizes on closed-loop trials, t(9) = 7.234, p < .001, as well as on open-loop trials, t(9) = 9.198, p < .001. A similar pattern of results was obtained when participants lifted the target disk in the isolation condition. Again, participants distinguished between the two disk sizes on closed-loop trials, t(9) = 4.295, p = 0.002, and on open-loop trials, t(9) = 4.796, p = 0.001.
In the crowded condition, paricipants did not distinguish between disk sizes in when manually estimating its size (closed loop trials: t(9) = 1.021, p = 0.334; open loop trials: t(9) = 1.782, p = 0.108). In contrast, they still distinguished between sizes when manually grasping the disk (closed loop trials: t(9) = 4.216, p = 0.002; open loop trials: t(9) = 3.392, p = 0.008).
Author_Assistance = FALSE # was author assistance provided? (if so, enter TRUE)
Insufficient_Information_Errors <- 0 # how many discrete insufficient information issues did you encounter?
# Assess the causal locus (discrete reproducibility issues) of any reproducibility errors. Note that there doesn't necessarily have to be a one-to-one correspondance between discrete reproducibility issues and reproducibility errors. For example, it could be that the original article neglects to mention that a Greenhouse-Geisser correct was applied to ANOVA outcomes. This might result in multiple reproducibility errors, but there is a single causal locus (discrete reproducibility issue).
locus_typo <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified <- NA # how many discrete issues were there for which you could not identify the cause
# How many of the above issues were resolved through author assistance?
locus_typo_resolved <- NA # how many discrete issues did you encounter that related to typographical errors?
locus_specification_resolved <- NA # how many discrete issues did you encounter that related to incomplete, incorrect, or unclear specification of the original analyses?
locus_analysis_resolved <- NA # how many discrete issues did you encounter that related to errors in the authors' original analyses?
locus_data_resolved <- NA # how many discrete issues did you encounter that related to errors in the data files shared by the authors?
locus_unidentified_resolved <- NA # how many discrete issues were there for which you could not identify the cause
Affects_Conclusion <- NA # Do any reproducibility issues encounter appear to affect the conclusions made in the original article? This is a subjective judgement, but you should taking into account multiple factors, such as the presence/absence of decision errors, the number of target outcomes that could not be reproduced, the type of outcomes that could or could not be reproduced, the difference in magnitude of effect sizes, and the predictions of the specific hypothesis under scrutiny.
reportObject <- reportObject %>%
filter(dummyRow == FALSE) %>% # remove the dummy row
select(-dummyRow) %>% # remove dummy row designation
mutate(articleID = articleID) %>% # add the articleID
select(articleID, everything()) # make articleID first column
# decide on final outcome
if(any(!(reportObject$comparisonOutcome %in% c("MATCH", "MINOR_ERROR"))) | Insufficient_Information_Errors > 0){
finalOutcome <- "Failure without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Failure despite author assistance"
}
}else{
finalOutcome <- "Success without author assistance"
if(Author_Assistance == T){
finalOutcome <- "Success with author assistance"
}
}
# collate report extra details
reportExtras <- data.frame(articleID, pilotNames, copilotNames, pilotTTC, copilotTTC, pilotStartDate, copilotStartDate, completionDate, Author_Assistance, finalOutcome, Insufficient_Information_Errors, locus_typo, locus_specification, locus_analysis, locus_data, locus_unidentified, locus_typo_resolved, locus_specification_resolved, locus_analysis_resolved, locus_data_resolved, locus_unidentified_resolved)
# save report objects
if(reportType == "pilot"){
write_csv(reportObject, "pilotReportDetailed.csv")
write_csv(reportExtras, "pilotReportExtras.csv")
}
if(reportType == "final"){
write_csv(reportObject, "finalReportDetailed.csv")
write_csv(reportExtras, "finalReportExtras.csv")
}
devtools::session_info()
## ─ Session info ───────────────────────────────────────────────────────────────
## setting value
## version R version 4.0.0 (2020-04-24)
## os macOS Catalina 10.15.4
## system x86_64, darwin17.0
## ui X11
## language (EN)
## collate en_US.UTF-8
## ctype en_US.UTF-8
## tz Europe/London
## date 2020-05-13
##
## ─ Packages ───────────────────────────────────────────────────────────────────
## package * version date lib
## abind 1.4-5 2016-07-21 [1]
## assertthat 0.2.1 2019-03-21 [1]
## backports 1.1.6 2020-04-05 [1]
## boot 1.3-24 2019-12-20 [1]
## broom * 0.5.6 2020-04-20 [1]
## callr 3.4.3 2020-03-28 [1]
## car 3.0-7 2020-03-11 [1]
## carData 3.0-3 2019-11-16 [1]
## cellranger 1.1.0 2016-07-27 [1]
## cli 2.0.2 2020-02-28 [1]
## colorspace 1.4-1 2019-03-18 [1]
## crayon 1.3.4 2017-09-16 [1]
## curl 4.3 2019-12-02 [1]
## data.table 1.12.8 2019-12-09 [1]
## DBI 1.1.0 2019-12-15 [1]
## dbplyr 1.4.3 2020-04-19 [1]
## desc 1.2.0 2018-05-01 [1]
## devtools 2.3.0 2020-04-10 [1]
## digest 0.6.25 2020-02-23 [1]
## dplyr * 0.8.5 2020-03-07 [1]
## ellipsis 0.3.0 2019-09-20 [1]
## evaluate 0.14 2019-05-28 [1]
## ez * 4.4-0 2016-11-02 [1]
## fansi 0.4.1 2020-01-08 [1]
## forcats * 0.5.0 2020-03-01 [1]
## foreign 0.8-78 2020-04-13 [1]
## fs 1.4.1 2020-04-04 [1]
## generics 0.0.2 2018-11-29 [1]
## ggplot2 * 3.3.0 2020-03-05 [1]
## glue 1.4.0 2020-04-03 [1]
## gtable 0.3.0 2019-03-25 [1]
## haven * 2.2.0 2019-11-08 [1]
## highr 0.8 2019-03-20 [1]
## hms 0.5.3 2020-01-08 [1]
## htmltools 0.4.0 2019-10-04 [1]
## httr 1.4.1 2019-08-05 [1]
## jsonlite 1.6.1 2020-02-02 [1]
## knitr * 1.28 2020-02-06 [1]
## lattice 0.20-41 2020-04-02 [1]
## lifecycle 0.2.0 2020-03-06 [1]
## lme4 1.1-23 2020-04-07 [1]
## lubridate 1.7.8 2020-04-06 [1]
## magrittr 1.5 2014-11-22 [1]
## MASS 7.3-51.5 2019-12-20 [1]
## Matrix 1.2-18 2019-11-27 [1]
## memoise 1.1.0 2017-04-21 [1]
## mgcv 1.8-31 2019-11-09 [1]
## minqa 1.2.4 2014-10-09 [1]
## modelr 0.1.7 2020-04-30 [1]
## munsell 0.5.0 2018-06-12 [1]
## nlme 3.1-147 2020-04-13 [1]
## nloptr 1.2.2.1 2020-03-11 [1]
## openxlsx 4.1.4 2019-12-06 [1]
## pillar 1.4.4 2020-05-05 [1]
## pkgbuild 1.0.7 2020-04-25 [1]
## pkgconfig 2.0.3 2019-09-22 [1]
## pkgload 1.0.2 2018-10-29 [1]
## plyr 1.8.6 2020-03-03 [1]
## prettyunits 1.1.1 2020-01-24 [1]
## processx 3.4.2 2020-02-09 [1]
## ps 1.3.2 2020-02-13 [1]
## purrr * 0.3.4 2020-04-17 [1]
## R6 2.4.1 2019-11-12 [1]
## Rcpp 1.0.4.6 2020-04-09 [1]
## readr * 1.3.1 2018-12-21 [1]
## readxl * 1.3.1 2019-03-13 [1]
## remotes 2.1.1 2020-02-15 [1]
## reprex 0.3.0 2019-05-16 [1]
## ReproReports * 0.1 2020-05-06 [1]
## reshape2 1.4.4 2020-04-09 [1]
## rio 0.5.16 2018-11-26 [1]
## rlang 0.4.6 2020-05-02 [1]
## rmarkdown 2.1 2020-01-20 [1]
## rprojroot 1.3-2 2018-01-03 [1]
## rstudioapi 0.11 2020-02-07 [1]
## rvest 0.3.5 2019-11-08 [1]
## scales 1.1.0 2019-11-18 [1]
## sessioninfo 1.1.1 2018-11-05 [1]
## statmod 1.4.34 2020-02-17 [1]
## stringi 1.4.6 2020-02-17 [1]
## stringr * 1.4.0 2019-02-10 [1]
## testthat 2.3.2 2020-03-02 [1]
## tibble * 3.0.1 2020-04-20 [1]
## tidyr * 1.0.2 2020-01-24 [1]
## tidyselect 1.0.0 2020-01-27 [1]
## tidyverse * 1.3.0 2019-11-21 [1]
## usethis 1.6.1 2020-04-29 [1]
## vctrs 0.2.4 2020-03-10 [1]
## withr 2.2.0 2020-04-20 [1]
## xfun 0.13 2020-04-13 [1]
## xml2 1.3.2 2020-04-23 [1]
## yaml 2.2.1 2020-02-01 [1]
## zip 2.0.4 2019-09-01 [1]
## source
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## Github (METRICS-CARPS/CARPSreports@3277f85)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
## CRAN (R 4.0.0)
##
## [1] /Library/Frameworks/R.framework/Versions/4.0/Resources/library