#install.packages("broom")
#install.packages("ggplot2")
library(psych) # for the describe() command
library(broom) # for the augment() command
library(ggplot2) # to visualize our results
##
## Attaching package: 'ggplot2'
## The following objects are masked from 'package:psych':
##
## %+%, alpha
# For HW, import the dataset you cleaned previously, this will be the dataset you'll use throughout the rest of the semester
d <- read.csv(file="Data/eammi2_data_final_SP25good.csv", header=T)
We hypothesize that people’s reported level of mindfulness will significantly predict their level of support, and that the relationship will be positive. This means that as people report higher levels of mindfulness, they will also report higher levels of support.
My independent variable (the one doing the predicting) is: mindfulness My dependent variable (the one being predicted) is: support
# you only need to check the variables you're using in the current analysis
# although you checked them previously, it's always a good idea to look them over again to be sure that everything is correct
str(d)
## 'data.frame': 3182 obs. of 27 variables:
## $ ResponseID : chr "R_BJN3bQqi1zUMid3" "R_2TGbiBXmAtxywsD" "R_12G7bIqN2wB2N65" "R_39pldNoon8CePfP" ...
## $ gender : chr "f" "m" "m" "f" ...
## $ race_rc : chr "white" "white" "white" "other" ...
## $ age : chr "1 between 18 and 25" "1 between 18 and 25" "1 between 18 and 25" "1 between 18 and 25" ...
## $ income : chr "1 low" "1 low" "rather not say" "rather not say" ...
## $ edu : chr "2 Currently in college" "5 Completed Bachelors Degree" "2 Currently in college" "2 Currently in college" ...
## $ sibling : chr "at least one sibling" "at least one sibling" "at least one sibling" "at least one sibling" ...
## $ party_rc : chr "democrat" "independent" "apolitical" "apolitical" ...
## $ disability : chr NA NA "psychiatric" NA ...
## $ marriage5 : chr "are currently divorced from one another" "are currently married to one another" "are currently married to one another" "are currently married to one another" ...
## $ phys_sym : chr "high number of symptoms" "high number of symptoms" "high number of symptoms" "high number of symptoms" ...
## $ pipwd : num NA NA 2.33 NA NA ...
## $ moa_independence: num 3.67 3.67 3.5 3 3.83 ...
## $ moa_role : num 3 2.67 2.5 2 2.67 ...
## $ moa_safety : num 2.75 3.25 3 1.25 2.25 2.5 4 3.25 2.75 3.5 ...
## $ moa_maturity : num 3.67 3.33 3.67 3 3.67 ...
## $ idea : num 3.75 3.88 3.75 3.75 3.5 ...
## $ swb : num 4.33 4.17 1.83 5.17 3.67 ...
## $ mindful : num 2.4 1.8 2.2 2.2 3.2 ...
## $ belong : num 2.8 4.2 3.6 4 3.4 4.2 3.9 3.6 2.9 2.5 ...
## $ efficacy : num 3.4 3.4 2.2 2.8 3 2.4 2.3 3 3 3.7 ...
## $ support : num 6 6.75 5.17 5.58 6 ...
## $ socmeduse : int 47 23 34 35 37 13 37 43 37 29 ...
## $ usdream : chr "american dream is important and achievable for me" "american dream is important and achievable for me" "american dream is not important and maybe not achievable for me" "american dream is not important and maybe not achievable for me" ...
## $ npi : num 0.6923 0.1538 0.0769 0.0769 0.7692 ...
## $ exploit : num 2 3.67 4.33 1.67 4 ...
## $ stress : num 3.3 3.3 4 3.2 3.1 3.5 3.3 2.4 2.9 2.7 ...
# you can use the describe() command on an entire dataframe (d) or just on a single variable
describe(d)
## vars n mean sd median trimmed mad min max
## ResponseID* 1 3182 1591.50 918.71 1591.50 1591.50 1179.41 1.00 3182.0
## gender* 2 3178 1.28 0.49 1.00 1.21 0.00 1.00 3.0
## race_rc* 3 3173 5.53 2.13 7.00 5.88 0.00 1.00 7.0
## age* 4 2169 1.11 0.43 1.00 1.00 0.00 1.00 4.0
## income* 5 3157 2.44 1.16 2.00 2.42 1.48 1.00 4.0
## edu* 6 3174 2.51 1.25 2.00 2.18 0.00 1.00 7.0
## sibling* 7 3182 1.10 0.29 1.00 1.00 0.00 1.00 2.0
## party_rc* 8 3165 2.46 1.01 2.00 2.45 0.00 1.00 4.0
## disability* 9 864 3.71 1.70 5.00 3.78 1.48 1.00 6.0
## marriage5* 10 3172 1.88 0.60 2.00 1.83 0.00 1.00 4.0
## phys_sym* 11 3174 2.26 0.86 3.00 2.32 0.00 1.00 3.0
## pipwd 12 1624 2.93 0.56 3.00 2.93 0.40 1.13 5.0
## moa_independence 13 3107 3.54 0.47 3.67 3.61 0.49 1.00 4.0
## moa_role 14 3111 2.97 0.72 3.00 3.00 0.74 1.00 4.0
## moa_safety 15 3123 3.20 0.64 3.25 3.26 0.74 1.00 4.0
## moa_maturity 16 3146 3.59 0.43 3.67 3.65 0.49 1.00 4.0
## idea 17 3177 3.57 0.38 3.62 3.62 0.37 1.00 4.0
## swb 18 3178 4.47 1.32 4.67 4.53 1.48 1.00 7.0
## mindful 19 3173 3.71 0.84 3.73 3.71 0.79 1.13 6.0
## belong 20 3175 3.23 0.60 3.30 3.25 0.59 1.30 5.0
## efficacy 21 3176 3.13 0.45 3.10 3.13 0.44 1.00 4.0
## support 22 3182 5.53 1.14 5.75 5.65 0.99 0.00 7.0
## socmeduse 23 3175 34.45 8.58 35.00 34.72 7.41 11.00 55.0
## usdream* 24 3171 2.39 1.55 2.00 2.24 1.48 1.00 5.0
## npi 25 3167 0.28 0.31 0.15 0.24 0.23 0.00 1.0
## exploit 26 3177 2.39 1.37 2.00 2.21 1.48 1.00 7.0
## stress 27 3176 3.05 0.60 3.00 3.05 0.59 1.30 4.7
## range skew kurtosis se
## ResponseID* 3181.00 0.00 -1.20 16.29
## gender* 2.00 1.40 0.88 0.01
## race_rc* 6.00 -0.98 -0.68 0.04
## age* 3.00 4.42 21.17 0.01
## income* 3.00 0.14 -1.44 0.02
## edu* 6.00 2.18 3.66 0.02
## sibling* 1.00 2.74 5.53 0.01
## party_rc* 3.00 0.42 -1.04 0.02
## disability* 5.00 -0.44 -1.35 0.06
## marriage5* 3.00 0.47 1.48 0.01
## phys_sym* 2.00 -0.52 -1.46 0.02
## pipwd 3.87 0.12 1.34 0.01
## moa_independence 3.00 -1.44 2.53 0.01
## moa_role 3.00 -0.33 -0.84 0.01
## moa_safety 3.00 -0.71 0.03 0.01
## moa_maturity 3.00 -1.20 1.87 0.01
## idea 3.00 -1.54 4.42 0.01
## swb 6.00 -0.36 -0.46 0.02
## mindful 4.87 -0.06 -0.13 0.01
## belong 3.70 -0.26 -0.12 0.01
## efficacy 3.00 -0.29 0.63 0.01
## support 7.00 -1.14 1.61 0.02
## socmeduse 44.00 -0.31 0.26 0.15
## usdream* 4.00 0.62 -1.13 0.03
## npi 1.00 0.94 -0.69 0.01
## exploit 6.00 0.95 0.37 0.02
## stress 3.40 0.04 -0.17 0.01
# next, use histograms to examine your continuous variables
hist(d$mindful)
hist(d$support)
# last, use scatterplots to examine your continuous variables together
# Remember to put INDEPENDENT VARIABLE FIRST, so that it goes on the x-axis
plot(d$mindful, d$support)
# to calculate standardized coefficients for the regression, we have to standardize our IV
d$mindful_std <- scale(d$mindful, center=T, scale=T)
# use the lm() command to run the regression
# dependent/outcome variable on the left of the ~, standardized independent/predictor variable on the right.
reg_model <- lm(support~mindful_std, data = d)
# NO PEEKING AT YOUR MODEL RESULTS YET!
# Create Plots
model.diag.metrics <- augment(reg_model)
# View Raw Residuals Plot
# NOTE: only replace the variables in 3 places in this line of code
ggplot(model.diag.metrics, aes(x = mindful_std, y = support)) +
geom_point() +
stat_smooth(method = lm, se = FALSE) +
geom_segment(aes(xend = mindful_std, yend = .fitted), color = "red", size = 0.3)
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## `geom_smooth()` using formula = 'y ~ x'
The plot below shows the residuals for each case and the fitted line. The red line is the average residual for the specified point of the dependent variable. If the assumption of linearity is met, the red line should be horizontal. This indicates that the residuals average to around zero. You can see that for this lab, the plot shows some non-linearity because there are more data points below the regression line than there are above it. Thus, there are some negative residuals that don’t have positive residuals to cancel them out. However, a bit of deviation is okay – just like with skewness and kurtosis with non-normality – there is a range of acceptability that we can work in before non-linearity becomes a critical issue.
For some examples of good Residuals vs Fitted plot and ones that show serious errors, check out this page. Looking at these examples, you can see the first case has a plot in which the red line sticks pretty closely to the zero line, while the other cases show some serious deviation. Our plot for the lab is much closer to the ‘good’ plot than it is to the ‘serious issues’ plots. So we’ll consider our data okay and proceed with our analysis. Obviously, this is quite a subjective decision. The key takeaway is that these evaluations are closely tied to the context of our sample, our data, and what we’re studying. It’s almost always a judgement call.
You’ll notice in the bottom right corner, there are some points with numbers included: these are participants (“cases”, indicated by row number) who have the most influence on the regression line (and so they might be outliers). We’ll cover more about outliers in the next section.
[NOTE: All of the above text is informational. You do NOT need to edit it for the HW.]
plot(reg_model, 1) #Residual vs Fitted plot
Interpretation: Our Residual vs Fitted plot suggests there is some minor non-linearity between our independent and dependent variables, but we are okay to proceed with the regression.
[Remember to revise the above interpretation in you HW assignment. Then delete this reminder and the bolded text below.]
For your HW: You need to generate this plot and then talk about how your plot compares to the ‘good’ / ‘bad, problematic’ plots linked to above in the “Issues with my Data” section below. Is it closer to the ‘good’ plots or one of the ‘bad’ plots? This is going to be a judgement call, so just do your best!
The plot below addresses leverage, or how much each data point is able to influence the regression line. Outliers are points that have undue influence on the regression line, the way that Bill Gates entering the room has an undue influence on the mean income.
The Cook’s distance plot is a visualization of a score called (you guessed it) Cook’s distance, calculated for each case (aka participant) in the dataframe. Cook’s distance tells us how much the regression would change if that data point was removed. Ideally, we want all points to have the same influence on the regression line, although we accept that there will be some variability. The cutoff for a high Cook’s distance score is .50. For our lab data, some points do exert more influence than others, but none of them are close to the cutoff. Remember, the plot will always identify the 3 most extreme values; it is your job to identify if any of those values are beyond the cutoff value.
[NOTE: All of the above text is informational. You do NOT need to edit it for the HW.]
# Cook's distance
plot(reg_model, 4)
Interpretation: Our data does not have severe outliers.
[Remember to revise the above interpretation in you HW assignment. Then delete this reminder and the bolded text below.]
For your HW: You need to generate the plot, assess Cook’s distance in your dataset and identify any potential cases/participants that are prominent outliers using the cutoff for a high Cook’s distance score of .50. You will summarize this in the “Issues with my Data” section below.
Before interpreting our results, we assessed our variables to see if they met the assumptions for a simple linear regression. Analysis of a Residuals vs Fitted plot suggested that there is some minor non-linearity but not enough to violate the assumption of linearity. We also checked Cook’s distance plot to detect outliers. All cases were below the recommended cutoff for Cook’s distance of 0.50, so no outliers were detected.
[Remember to revise the above paragraph in you HW assignment. Then delete this reminder.]
summary(reg_model)
##
## Call:
## lm(formula = support ~ mindful_std, data = d)
##
## Residuals:
## Min 1Q Median 3Q Max
## -5.5333 -0.5984 0.2148 0.8346 2.0201
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 5.52729 0.01993 277.38 <2e-16 ***
## mindful_std 0.20575 0.01993 10.32 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.122 on 3171 degrees of freedom
## (9 observations deleted due to missingness)
## Multiple R-squared: 0.03252, Adjusted R-squared: 0.03221
## F-statistic: 106.6 on 1 and 3171 DF, p-value: < 2.2e-16
# NOTE: For the write-up section below, to type lowercase Beta (ß) you need to hold down Alt key and type 225 on numeric keypad. If that doesn't work (upon releasing the Alt key), you should be able to copy/paste it from somewhere else in the write-up.
Effect size, based on Regression ß (Beta Estimate) value in our output * Trivial: Less than 0.10 (ß < 0.10) * Small: 0.10–0.29 (0.10 < ß < 0.29) * Medium: 0.30–0.49 (0.30 < ß < 0.49) * Large: 0.50 or greater (ß > 0.50)
To test our hypothesis that social exclusion, and that the relationship would be positive, we used a simple linear regression to model the relationship between those variables. We confirmed that our data met the assumptions of a linear regression, checking the linearity of the relationship using a Residuals vs Fitted plot and checking for outliers using Cook’s distance plot. (Note: We are skipping the assumptions of normality and homogeneity of variance for this analysis.)
As predicted, we found that found that social exclusion significantly predicted loneliness, Adj. R2 = 0.16, F(1, 3164) = 584.10, p < 0.001. Additionally, the relationship between social exclusion and loneliness was positive, ß = 0.52, t(3164) = 24.17, p < 0.001 (refer to Figure 1). According to Cohen (1988), this constitutes a large effect size (ß > 0.50).
[Remember to revise the above paragraphs in you HW assignment. Then delete this reminder.]
References
Cohen J. (1988). Statistical Power Analysis for the Behavioral Sciences. New York, NY: Routledge Academic.