3.2.2 Main trials
For the rest of the analyses, focus on the participants with good catch rate performance. Select the subjects with good catch trial rates from the original tbl_all.
tbl_good_catch_acc_all_main_acc <- tbl_all[(tbl_all$workerId %in% tbl_good_catch_acc_rate$workerId),]
#head(tbl_good,10)
Verify subject count.
nrow(tbl_good_catch_acc_all_main_acc %>% distinct(workerId,.keep_all = FALSE))
## [1] 34
Here, now, is table containing the number of trials for each individual after excluding main trials based on accuracy. Again, there were 80 target-present trials that were 60% valid (48 trials) and 40% invalid (16 trials for each type).
tbl_good_catch_acc_all_main_acc_counts <- tbl_good_catch_acc_all_main_acc %>%
group_by(workerId,validity) %>%
filter((validity=='valid_same' | validity=='invalid_same' | validity=='invalid_different') & click_ACC == 1) %>%
dplyr::summarize(counts = n()) %>%
spread(validity,counts)
tbl_good_catch_acc_all_main_acc_counts$sum = rowSums(tbl_good_catch_acc_all_main_acc_counts[,c(-1)], na.rm = TRUE)
#head(tbl_good_catch_acc_all_main_acc_counts,10)
Same table, but binned.
tbl_good_catch_acc_all_main_acc_counts_bin <- tbl_good_catch_acc_all_main_acc %>%
group_by(workerId,validity,bin) %>%
filter((validity=='valid_same' | validity=='invalid_same' | validity=='invalid_different') & click_ACC == 1) %>%
dplyr::summarize(counts = n()) %>%
spread(validity,counts)
tbl_good_catch_acc_all_main_acc_counts_bin$sum = rowSums(tbl_good_catch_acc_all_main_acc_counts_bin[,c(-1:-2)], na.rm = TRUE)
#head(tbl_good_catch_acc_all_main_acc_counts_bin,10)
Some subjects may have no surviving data for a particular condition. These subjects should be tossed because they have an unequal number of conditions compared to the other subjects.
tbl_good_catch_acc_all_main_acc_NA_conditions_removed <- tbl_good_catch_acc_all_main_acc_counts %>%
filter(valid_same!="NA" & invalid_same!="NA" & invalid_different!="NA")
#head(tbl_good_catch_acc_all_main_acc_NA_conditions_removed,10)
Same table, but binned.
tbl_good_catch_acc_all_main_acc_NA_conditions_removed_bin <- tbl_good_catch_acc_all_main_acc_counts_bin[(tbl_good_catch_acc_all_main_acc_counts_bin$workerId %in% tbl_good_catch_acc_all_main_acc_NA_conditions_removed$workerId),]
#head(tbl_good_catch_acc_all_main_acc_NA_conditions_removed_bin,10)
Now, let’s get rid of any subjects with NA from tbl_good_catch_acc_all_main_acc.
tbl_good_catch_acc_all_main_acc_NA_subjs_removed <- tbl_good_catch_acc_all_main_acc[(tbl_good_catch_acc_all_main_acc$workerId %in% tbl_good_catch_acc_all_main_acc_NA_conditions_removed$workerId),]
#head(tbl_good_catch_acc_all_main_acc_NA_subjs_removed,10)
And let’s check the number of subjects we are now working with.
nrow(tbl_good_catch_acc_all_main_acc_NA_subjs_removed %>% distinct(workerId,.keep_all = FALSE))
## [1] 34
After dropping subjects based on catch trial performance and for accuracy on the main trials (dropping any additional subjects with unequal conditions), get the original number of trials for the relevant subjects.
tbl_good_catch_acc_all_main_acc_NA_subjs_removed_counts <- tbl_all_counts_no_catch[(tbl_all_counts_no_catch$workerId %in% tbl_good_catch_acc_all_main_acc_NA_conditions_removed$workerId),]
#head(tbl_good_catch_acc_all_main_acc_NA_subjs_removed_counts,10)
Plot the overall accuracy at the group level (collasped across workerId and condition).
tbl_overall_good_acc <- (tbl_good_catch_acc_all_main_acc_NA_conditions_removed$sum / tbl_good_catch_acc_all_main_acc_NA_subjs_removed_counts$sum)
tbl_overall_good_acc <- cbind.data.frame(tbl_good_catch_acc_all_main_acc_NA_subjs_removed_counts[,1], tbl_overall_good_acc)
colnames(tbl_overall_good_acc) <- c("workerId", "ACC")
tbl_overall_good_acc %>%
ggbarplot(y = "ACC", ylab = "Accuracy", fill = "#f7a800", color = "#f7a800", add = "mean_se", ylim = c(0, 1), xlab = "Group", width = 0.5, label = TRUE, lab.nb.digits = 2, lab.vjust = -1.8, title = "Main Trial Accuracy")
Look at the overall accuracy at the group level (collasped across workerId and condition) over time.
tbl_good_no_NA_bin <- tbl_good_catch_acc_all_main_acc_NA_subjs_removed %>%
group_by(workerId,validity,bin) %>%
filter(validity=='valid_same' | validity=='invalid_same' | validity=='invalid_different') %>%
dplyr::summarize(counts = n()) %>%
spread(validity,counts)
tbl_good_no_NA_bin$sum = rowSums(tbl_good_no_NA_bin[,c(-1:-2)], na.rm = TRUE)
#head(tbl_good_no_NA_bin,10)
tbl_overall_good_acc_bin <- (tbl_good_catch_acc_all_main_acc_NA_conditions_removed_bin$sum / tbl_good_no_NA_bin$sum)
tbl_overall_good_acc_bin <- cbind.data.frame(tbl_good_no_NA_bin[,1:2], tbl_overall_good_acc_bin)
colnames(tbl_overall_good_acc_bin) <- c("workerId", "bin", "ACC")
tbl_overall_good_acc_bin %>%
ggbarplot(y = "ACC", x = "bin", ylab = "Accuracy", fill = "#f7a800", color = "#f7a800", add = "mean_se", ylim = c(0, 1), xlab = " Bin", width = 0.5, label = TRUE, lab.nb.digits = 2, lab.vjust = c(-1.8, -1.8, -1.8, -2.2), title = "Main Trial Accuracy Over Time", na.rm = TRUE)
Here are some descriptive and inferential statistics for the effect of accuracy over time.
tbl_overall_good_acc_bin %>%
group_by(bin) %>%
get_summary_stats(ACC, type = "mean_se")
## # A tibble: 4 x 5
## bin variable n mean se
## <chr> <chr> <dbl> <dbl> <dbl>
## 1 block_1 ACC 34 0.884 0.03
## 2 block_2 ACC 34 0.854 0.036
## 3 block_3 ACC 34 0.839 0.042
## 4 block_4 ACC 34 0.834 0.046
tbl_overall_good_acc_bin %>%
group_by(bin) %>%
identify_outliers(ACC)
## # A tibble: 18 x 5
## bin workerId ACC is.outlier is.extreme
## <chr> <chr> <dbl> <lgl> <lgl>
## 1 block_1 A1PUWQYUQRGCO 0.429 TRUE FALSE
## 2 block_1 A2CHD0TNWHFW1R 0.211 TRUE TRUE
## 3 block_2 A1PUWQYUQRGCO 0.4 TRUE FALSE
## 4 block_2 A2CHD0TNWHFW1R 0.25 TRUE TRUE
## 5 block_2 A3CASN6JG7104 0.182 TRUE TRUE
## 6 block_3 A1PUWQYUQRGCO 0.476 TRUE TRUE
## 7 block_3 A2CHD0TNWHFW1R 0.2 TRUE TRUE
## 8 block_3 A3CASN6JG7104 0.25 TRUE TRUE
## 9 block_3 A3E8NUUS90EWXW 0.65 TRUE TRUE
## 10 block_3 A3S4M1GQAMPFZB 0.529 TRUE TRUE
## 11 block_3 A3SYY5R44RAATE 0.143 TRUE TRUE
## 12 block_3 A4W9APAHFWVLO 0.526 TRUE TRUE
## 13 block_4 A1PUWQYUQRGCO 0.389 TRUE TRUE
## 14 block_4 A2CHD0TNWHFW1R 0.238 TRUE TRUE
## 15 block_4 A3CASN6JG7104 0.2 TRUE TRUE
## 16 block_4 A3E8NUUS90EWXW 0.5 TRUE FALSE
## 17 block_4 A3S4M1GQAMPFZB 0.190 TRUE TRUE
## 18 block_4 A3SYY5R44RAATE 0.238 TRUE TRUE
res.aov <- anova_test(data = tbl_overall_good_acc_bin, dv = ACC, wid = workerId, within = bin)
get_anova_table(res.aov, correction = "none")
## ANOVA Table (type III tests)
##
## Effect DFn DFd F p p<.05 ges
## 1 bin 3 99 1.365 0.258 0.007
pwc <- tbl_overall_good_acc_bin %>%
pairwise_t_test(
ACC ~ bin, paired = TRUE,
p.adjust.method = "bonferroni"
)
pwc
## # A tibble: 6 x 10
## .y. group1 group2 n1 n2 statistic df p p.adj p.adj.signif
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 ACC block_1 block_2 34 34 1.13 33 0.266 1 ns
## 2 ACC block_1 block_3 34 34 1.30 33 0.204 1 ns
## 3 ACC block_1 block_4 34 34 1.27 33 0.211 1 ns
## 4 ACC block_2 block_3 34 34 0.876 33 0.387 1 ns
## 5 ACC block_2 block_4 34 34 0.947 33 0.35 1 ns
## 6 ACC block_3 block_4 34 34 0.278 33 0.783 1 ns
Look at the overall accuracy for the group by validity (valid, invalid-same etc.).
tbl_overall_good_acc_cond <- (tbl_good_catch_acc_all_main_acc_NA_conditions_removed[-1] / tbl_good_catch_acc_all_main_acc_NA_subjs_removed_counts[-1])
tbl_overall_good_acc_cond <- cbind.data.frame(tbl_good_catch_acc_all_main_acc_NA_subjs_removed_counts[,1], tbl_overall_good_acc_cond)
tbl_overall_good_acc_cond <- gather(tbl_overall_good_acc_cond, validity, acc, valid_same:invalid_different, factor_key=TRUE)
tbl_overall_good_acc_cond %>%
ggbarplot(x = "validity", y = "acc", ylab = "Accuracy", fill = "validity" , color = "validity", palette = c("#0d2240", "#00a8e1", "#f7a800", "#E31818", "#dfdddc"), add = "mean_se", ylim = c(0, 1), na.rm = TRUE, label = TRUE, lab.nb.digits = 2, lab.vjust = c(-1.6, -2, -1.6), title = "Main Trial Accuracy By Validity", xlab = "Validity")
Here are some descriptive and inferential statistics for the effect of accuracy by validity.
tbl_overall_good_acc_cond %>%
group_by(validity) %>%
get_summary_stats(acc, type = "mean_sd")
## # A tibble: 3 x 5
## validity variable n mean sd
## <fct> <chr> <dbl> <dbl> <dbl>
## 1 valid_same acc 34 0.871 0.205
## 2 invalid_same acc 34 0.822 0.238
## 3 invalid_different acc 34 0.825 0.227
tbl_overall_good_acc_cond %>%
group_by(validity) %>%
identify_outliers(acc)
## # A tibble: 17 x 6
## validity workerId sum acc is.outlier is.extreme
## <fct> <chr> <dbl> <dbl> <lgl> <lgl>
## 1 valid_same A1PUWQYUQRGCO 0.425 0.438 TRUE TRUE
## 2 valid_same A2CHD0TNWHFW1R 0.225 0.312 TRUE TRUE
## 3 valid_same A3CASN6JG7104 0.35 0.292 TRUE TRUE
## 4 valid_same A3S4M1GQAMPFZB 0.6 0.667 TRUE FALSE
## 5 valid_same A3SYY5R44RAATE 0.45 0.417 TRUE TRUE
## 6 valid_same A4W9APAHFWVLO 0.738 0.688 TRUE FALSE
## 7 invalid_same A1PUWQYUQRGCO 0.425 0.438 TRUE FALSE
## 8 invalid_same A2CHD0TNWHFW1R 0.225 0.0625 TRUE TRUE
## 9 invalid_same A3CASN6JG7104 0.35 0.438 TRUE FALSE
## 10 invalid_same A3E8NUUS90EWXW 0.638 0.312 TRUE FALSE
## 11 invalid_same A3S4M1GQAMPFZB 0.6 0.438 TRUE FALSE
## 12 invalid_same A3SYY5R44RAATE 0.45 0.5 TRUE FALSE
## 13 invalid_different A1PUWQYUQRGCO 0.425 0.375 TRUE FALSE
## 14 invalid_different A2CHD0TNWHFW1R 0.225 0.125 TRUE TRUE
## 15 invalid_different A3CASN6JG7104 0.35 0.438 TRUE FALSE
## 16 invalid_different A3E8NUUS90EWXW 0.638 0.312 TRUE FALSE
## 17 invalid_different A3SYY5R44RAATE 0.45 0.5 TRUE FALSE
res.aov <- anova_test(data = tbl_overall_good_acc_cond, dv = acc, wid = workerId, within = validity)
get_anova_table(res.aov, correction = "none")
## ANOVA Table (type III tests)
##
## Effect DFn DFd F p p<.05 ges
## 1 validity 2 66 4.07 0.022 * 0.01
pwc <- tbl_overall_good_acc_cond %>%
pairwise_t_test(
acc ~ validity, paired = TRUE,
p.adjust.method = "bonferroni"
)
pwc
## # A tibble: 3 x 10
## .y. group1 group2 n1 n2 statistic df p p.adj p.adj.signif
## * <chr> <chr> <chr> <int> <int> <dbl> <dbl> <dbl> <dbl> <chr>
## 1 acc valid_sa… invalid_… 34 34 2.22 33 0.034 0.1 ns
## 2 acc valid_sa… invalid_… 34 34 2.29 33 0.029 0.087 ns
## 3 acc invalid_… invalid_… 34 34 -0.247 33 0.807 1 ns
Third, we can look at the accuracy for each individual subject.
tbl_overall_good_acc %>%
ggbarplot(x = "workerId", y = "ACC", ylab = "Accuracy", fill = "#f7a800", color = "#f7a800", ylim = c(0, 1), title = "Individual Accuracy", sort.val = c("asc"), font.xtickslab = 8) + rotate_x_text()