#install.packages("afex")
#install.packages("emmeans")
#install.packages("ggbeeswarm")
library(psych) # for the describe() command
library(ggplot2) # to visualize our results
##
## Attaching package: 'ggplot2'
## The following objects are masked from 'package:psych':
##
## %+%, alpha
library(expss) # for the cross_cases() command
## Loading required package: maditr
##
## To aggregate all non-grouping columns: take_all(mtcars, mean, by = am)
##
## Attaching package: 'expss'
## The following object is masked from 'package:ggplot2':
##
## vars
library(car) # for the leveneTest() command
## Loading required package: carData
##
## Attaching package: 'car'
## The following object is masked from 'package:expss':
##
## recode
## The following object is masked from 'package:psych':
##
## logit
library(afex) # to run the ANOVA
## Loading required package: lme4
## Loading required package: Matrix
##
## Attaching package: 'lme4'
## The following object is masked from 'package:expss':
##
## dummy
## ************
## Welcome to afex. For support visit: http://afex.singmann.science/
## - Functions for ANOVAs: aov_car(), aov_ez(), and aov_4()
## - Methods for calculating p-values with mixed(): 'S', 'KR', 'LRT', and 'PB'
## - 'afex_aov' and 'mixed' objects can be passed to emmeans() for follow-up tests
## - Get and set global package options with: afex_options()
## - Set sum-to-zero contrasts globally: set_sum_contrasts()
## - For example analyses see: browseVignettes("afex")
## ************
##
## Attaching package: 'afex'
## The following object is masked from 'package:lme4':
##
## lmer
library(ggbeeswarm) # to run plot results
library(emmeans) # for posthoc tests
## Welcome to emmeans.
## Caution: You lose important information if you filter this package's results.
## See '? untidy'
# For HW, import the project dataset you cleaned previously this will be the dataset you'll use throughout the rest of the semester
d <- read.csv(file="Data/projectdata.csv", header=T)
# new code! this adds a column with a number for each row. It will make it easier if we need to drop outliers later
d$row_id <- 1:nrow(d)
Note: For your HW, you will choose to run EITHER a one-way ANOVA (a single IV with at least 3 levels) OR a two-way ANOVA (two IVs, each with 2 levels). You will need to specify your hypothesis and customize your code based on the choice you make (i.e., delete code that is not relevant). We will run BOTH versions in the lab for illustrative purposes.
One-Way Hypothesis: We predict that there will be a significant difference in exploitativeness by people’s level of gender, between male, female, and non-binary participants.
IV = genders DV = exploitativeness
# you only need to check the variables you're using in the current analysis
# even if you checked them previously, it's always a good idea to look them over again and be sure that everything is correct
str(d)
## 'data.frame': 3132 obs. of 8 variables:
## $ ResponseID : chr "R_BJN3bQqi1zUMid3" "R_2TGbiBXmAtxywsD" "R_12G7bIqN2wB2N65" "R_39pldNoon8CePfP" ...
## $ gender : chr "f" "m" "m" "f" ...
## $ sibling : chr "at least one sibling" "at least one sibling" "at least one sibling" "at least one sibling" ...
## $ moa_maturity: num 3.67 3.33 3.67 3 3.67 ...
## $ exploit : num 2 3.67 4.33 1.67 4 ...
## $ swb : num 4.33 4.17 1.83 5.17 3.67 ...
## $ mindful : num 2.4 1.8 2.2 2.2 3.2 ...
## $ row_id : int 1 2 3 4 5 6 7 8 9 10 ...
# make our categorical variables of interest factors
# because we'll use our newly created row ID variable for this analysis, so make sure it's coded as a factor, too.
d$gender <- as.factor(d$gender)
d$row_id <- as.factor(d$row_id)
# We're going to recode our race variable into two groups for the Two-Way ANOVA: poc and white
# in doing so, we are creating a new variable "poc" that has 2 levels
table(d$gender)
##
## f m nb
## 2295 783 54
d$gender[d$gender == "f"] <- "f"
d$gender[d$gender == "m"] <- "m"
d$gender[d$gender == "nb"] <- "nb"
# check that all our categorical variables of interest are now factors
str(d)
## 'data.frame': 3132 obs. of 8 variables:
## $ ResponseID : chr "R_BJN3bQqi1zUMid3" "R_2TGbiBXmAtxywsD" "R_12G7bIqN2wB2N65" "R_39pldNoon8CePfP" ...
## $ gender : Factor w/ 3 levels "f","m","nb": 1 2 2 1 2 1 1 1 1 1 ...
## $ sibling : chr "at least one sibling" "at least one sibling" "at least one sibling" "at least one sibling" ...
## $ moa_maturity: num 3.67 3.33 3.67 3 3.67 ...
## $ exploit : num 2 3.67 4.33 1.67 4 ...
## $ swb : num 4.33 4.17 1.83 5.17 3.67 ...
## $ mindful : num 2.4 1.8 2.2 2.2 3.2 ...
## $ row_id : Factor w/ 3132 levels "1","2","3","4",..: 1 2 3 4 5 6 7 8 9 10 ...
# check our DV skew and kurtosis
describe(d$exploit)
## vars n mean sd median trimmed mad min max range skew kurtosis se
## X1 1 3132 2.39 1.37 2 2.21 1.48 1 7 6 0.94 0.36 0.02
# we'll use the describeBy() command to view our DV's skew and kurtosis across our IVs' levels
describeBy(d$exploit, group = d$gender )
##
## Descriptive statistics by group
## group: f
## vars n mean sd median trimmed mad min max range skew kurtosis se
## X1 1 2295 2.26 1.3 2 2.08 1.48 1 7 6 1.04 0.62 0.03
## ------------------------------------------------------------
## group: m
## vars n mean sd median trimmed mad min max range skew kurtosis se
## X1 1 783 2.77 1.49 2.67 2.63 1.98 1 7 6 0.63 -0.26 0.05
## ------------------------------------------------------------
## group: nb
## vars n mean sd median trimmed mad min max range skew kurtosis se
## X1 1 54 2.2 1.27 2 2.05 1.48 1 7 6 1.19 1.74 0.17
# also use histograms to examine your continuous variable
hist(d$exploit)
# REMEMBER your test's level of POWER is determined by your SMALLEST subsample
# One-Way
table(d$gender)
##
## f m nb
## 2295 783 54
# use the leveneTest() command from the car package to test homogeneity of variance
# uses the 'formula' setup: formula is y~x1*x2, where y is our DV and x1 is our first IV and x2 is our second IV
# One-Way
leveneTest(exploit~gender, data = d)
## Levene's Test for Homogeneity of Variance (center = median)
## Df F value Pr(>F)
## group 2 20.244 1.838e-09 ***
## 3129
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# use this commented out section below ONLY IF if you need to remove outliers
# to drop a single outlier, use this code:
d <- subset(d, row_id!=c(1108))
# to drop multiple outliers, use this code:
# d <- subset(d, row_id!=c(1108) & row_id!=c(602))
# use the lm() command to run the regression
# formula is y~x1*x2 + c, where y is our DV, x1 is our first IV, x2 is our second IV.
reg_model <- lm(exploit~gender, data = d) #for One-Way
# Cook's distance
plot(reg_model, 4)
# Residuals VS Leverage
plot(reg_model, 5)
Our cell sizes are very unbalanced between the gender type group levels. A small sample size for one of the levels of our variable limits our power and increases our Type II error rate.
Levene’s test was significant for our three-level variable with the One-Way ANOVA. We are ignoring this and continuing with the analysis anyway for class purposes.
# One-Way
aov_model <- aov_ez(data = d,
id = "ResponseID",
between = c("gender"),
dv = "exploit",
anova_table = list(es = "pes"))
## Contrasts set to contr.sum for the following variables: gender
# FOR your HW, the id variable in this code will be "X" for the ARC data and "ResponseId" for the EMMi2 data
nice(aov_model)
## Anova Table (Type 3 tests)
##
## Response: exploit
## Effect df MSE F pes p.value
## 1 gender 2, 3128 1.83 42.76 *** .027 <.001
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
ANOVA Effect Size [partial eta-squared] cutoffs from Cohen (1988): * η^2 < 0.01 indicates a trivial effect * η^2 >= 0.01 indicates a small effect * η^2 >= 0.06 indicates a medium effect * η^2 >= 0.14 indicates a large effect
# One-Way
afex_plot(aov_model, x = "gender")
# NOTE: for the Two-Way, we will decide which plot version makes the MOST SENSE based on the data / rationale when we make the nice Figure 2 at the end
ONLY run posthoc IF the ANOVA test is SIGNIFICANT! E.g., only run the posthoc tests on pet type if there is a main effect for pet type
emmeans(aov_model, specs="gender", adjust="sidak")
## gender emmean SE df lower.CL upper.CL
## f 2.26 0.0282 3128 2.19 2.33
## m 2.77 0.0483 3128 2.66 2.89
## nb 2.20 0.1840 3128 1.76 2.64
##
## Confidence level used: 0.95
## Conf-level adjustment: sidak method for 3 estimates
pairs(emmeans(aov_model, specs="gender", adjust="sidak"))
## contrast estimate SE df t.ratio p.value
## f - m -0.5142 0.0559 3128 -9.194 <.0001
## f - nb 0.0548 0.1860 3128 0.295 0.9533
## m - nb 0.5690 0.1900 3128 2.993 0.0078
##
## P value adjustment: tukey method for comparing a family of 3 estimates
To test our hypothesis that there will be a significant difference in people’s level of exploitativeness based on the gender (female, male, non-binary), we used a one-way ANOVA. Our data was unbalanced, with many more people who are female participating in our survey (n = 2295) than who are male (n = 783) or non-binary (n = 54). This significantly reduces the power of our test and increases the chances of a Type II error. We also did not identify a single outlier following visual analysis of Cook’s Distance and Residuals VS Leverage plots. A significant Levene’s test (*p < 0.001) also indicates that our data violates the assumption of homogeneity of variance. This suggests that there is an increased chance of Type II error. We continued with our analysis for the purpose of this class.
We found a significant effect of gender , F(2, 3128) = 42.76, p < .001, ηp2 = .027 (small effect size; Cohen, 1988). Posthoc tests using Sidak’s adjustment revealed that participants who are female (M = 2.26, SE = 0.03) reported less exploitativeness than those who are male (M = 2.77, SE = 0.05) but more exploitativeness than those who are non-binary (M = 2.20, SE = 0.18); participants who are male reported the highest amount of exploitativiness overall (see Figure 1 for a comparison).
References
Cohen J. (1988). Statistical Power Analysis for the Behavioral Sciences. New York, NY: Routledge Academic.