#rm(list = ls())
library(data.table, quietly=TRUE)
library(psych, quietly=TRUE)
library(BayesFactor, quietly=TRUE)
library(perm, quietly=TRUE)
library(effsize)
if(require("BayesFactorExtras", quietly=TRUE)){
library(devtools, quietly=TRUE)
install_github("richarddmorey/BayesFactorExtras", subdir="BayesFactorExtras")
}
library(BayesFactorExtras, quietly=TRUE)
Reading in all data files and binding them into a single data frame.
setwd(dir = "~/OneDrive/MANUSCRIPTS/2016 Many Labs 5/Pilot2/Analyses")
pilot2 <- as.data.frame(rbindlist(lapply(list.files(path=getwd(),
pattern = '*.csv'), read.csv), use.names = TRUE, fill = TRUE, idcol = TRUE))
#View(pilot2)
Computing means of the two scale items for each scenario. If one of the two target items is missing, the score of the other item is used as the estimate for the given scenario.
pilot2$dv <- rowMeans(pilot2[,c("item_05", "item_08")], na.rm = TRUE)
pilot2$prime <- as.factor(pilot2$prime)
pilot2$dv <- as.numeric(pilot2$dv)
Split by condition
Control condition [Prime code = 0]
describe(pilot2$dv[pilot2$prime == 0], na.rm = TRUE, fast = TRUE)
## vars n mean sd min max range se
## X1 1 178 5.83 1.73 1.5 9 7.5 0.13
Aggression condition [Prime code = 1]
describe(pilot2$dv[pilot2$prime == 1], na.rm = TRUE, fast = TRUE)
## vars n mean sd min max range se
## X1 1 185 5.68 1.88 1 9 8 0.14
Total sample size
nrow(pilot2)
## [1] 363
With mean aggression rating as DV and prime condition as IV. Gives ordinary asymptotic two-tailed \(p\)-value.
t.test(dv ~ prime, data = pilot2, na.rm = TRUE)
##
## Welch Two Sample t-test
##
## data: dv by prime
## t = 0.79179, df = 360.26, p-value = 0.429
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.2231209 0.5238801
## sample estimates:
## mean in group 0 mean in group 1
## 5.831461 5.681081
cohen.d.formula(dv ~ prime, data = pilot2)
##
## Cohen's d
##
## d estimate: 0.08299653 (negligible)
## 95 percent confidence interval:
## inf sup
## -0.1241376 0.2901306
Exact two-tailed \(p\)-value and associated 99%CI for the \(p\)-value from a Monte-Carlo randomization (permutation) test.
Apart from precision, permutation test sidesteps conditioning on any distributional assumptions.
with(pilot2, permTS(x = dv[prime == 0], y = dv[prime == 1], method = "exact.mc",
control = permControl(nmc = 100000, setSEED = FALSE, tsmethod = "abs")))
##
## Exact Permutation Test Estimated by Monte Carlo
##
## data: GROUP 1 and GROUP 2
## p-value = 0.4313
## alternative hypothesis: true mean GROUP 1 - mean GROUP 2 is 0
## sample estimates:
## mean GROUP 1 - mean GROUP 2
## 0.1503796
##
## p-value estimated from 1e+05 Monte Carlo replications
## 99 percent confidence interval on p-value:
## 0.4272542 0.4353323
Prior width (\(r\) scale)
r.scale <- 1/2
Computing Bayes Factor for independent \(t\)-test of a one-tailed hypothesis.
For the purpose of quantifying the available evidence. The Bayes factor is the relative evidence in the data. The evidence in the data favors one hypothesis, relative to another, exactly to the degree that the hypothesis predicts the observed data better than the other (answering the question how many times are the data more likely under one hypothesis as oposed to the other. BF10 = evidence in favor of Ha, BF01 = evidence in favor of H0. More info here.
Testing a directional, one-tailed hypothesis that aggressive priming leads to a higher rating of aggression. Using a prior width of 1/2.
bf <- ttestBF(formula = dv ~ prime, data = pilot2, rscale = r.scale, nullInterval=c(-Inf,0))[1]
paste(ifelse(extractBF(bf)$bf > 1, "BF10 = ", "BF01 = "),
round(ifelse(extractBF(bf)$bf > 1, extractBF(bf)$bf, 1/extractBF(bf)$bf), 2), sep = "")
## [1] "BF01 = 10.4"
Plotting evolving empirical support (relative likelihood) in favor of Ha or H0 as more and more data come in. More info here.
bf.seq <- seqBF(bf, min.n = 10, step = 1, verbose = FALSE)
plot(bf.seq)
## NULL