1 Load Libraries

library(psych)
library(kableExtra)
library(ggplot2)
library(tidyr)
library(dplyr)
library(nFactors)
library(corrplot)
library(sjPlot)

2 Load Data

df1 <- read.csv(file="data/sona.csv", header=T)
labels1 <- data.frame(t(cbind(df1[1,],samp="Sample Source")))
df1 <- df1[-c(1,2), ]
df1 <- subset(df1, Progress == "100", select=-c(4,14:15))
df1$samp <- "SONA"

df2 <- read.csv(file="data/prolific.csv", header=T)
labels2 <- data.frame(t(cbind(df2[1,],samp="Sample Source")))
df2 <- df2[-c(1,2), ]
df2_quality <- subset(df2, select=c(8,173:175))
df2 <- subset(df2, Progress == "100" & Q45 == 2 & Q46 < 4, select=-c(15:17,173:175))
colnames(df2)[169] <- "id"
df2$samp <- "Prolific"

df <- rbind.data.frame(df1,df2)

2.1 Attention Checks

attn <- subset(labels1, grepl("attention", X1))
attn_chk <- rownames(attn)

# 3, 2, 5, 1, 1
correct_responses <- c(3, 2, 5, 1, 1)

# Check if the values in df match the correct responses
correct_counts <- apply(df[attn_chk], 1, function(row) sum(row == correct_responses))

# Display results
table(correct_counts)
## correct_counts
##   0   1   2   3   4   5 
##  11  13  31  37  21 402
df$attn <- correct_counts

df <- subset(df, attn > 3)

bad_ids <- c("663e9d84425d4157a22f3167",
            "666f47d2c81ed466b97cd82d",
            "634ef87aec0966557c825573",
            "669d8f03a8742ea4d11b35d8",
            "6701a4d42a2e8c434e0fc50a",
            "671736c2a43ff993ff9a070b",
            "67348c5f2fd2234d5e8f7d58")

df <- subset(df, !(id %in% bad_ids))

3 Sample

samp <- subset(df, select=c(156:169))

samp$gen[is.na(samp$Q32)] <- NA
samp$gen[samp$Q32 == 1] <- "W"
samp$gen[samp$Q32 == 2] <- "M"
samp$gen[samp$Q32 == 3] <- "N"
samp$gen[samp$Q32 == 4] <- "N"
samp$gen[samp$Q32 == 5] <- "N"
samp$gen[samp$Q32 == 6] <- "N"
samp$gen[samp$Q32 == 7] <- "N"
samp$gen[samp$Q32 == "1,2"] <- "N"
samp$gen[samp$Q32 == "1,4"] <- "N"
samp$gen[samp$Q32 == "1,5"] <- "W"
samp$gen[samp$Q32 == "1,6"] <- "W"
samp$gen[samp$Q32 == "2,3,6"] <- "N"
samp$gen[samp$Q32 == "2,5"] <- "M"
samp$gen[samp$Q32 == "2,6"] <- "M"
samp$gen[samp$Q32 == "3,4"] <- "N"
samp$gen[samp$Q32 == "4,6"] <- "N"
table(samp$Q32, useNA = "always")
## 
##           1   1,2   1,4   1,5   1,6     2 2,3,6   2,5   2,6     3   3,4     4 
##     1   261     1     1     3     1   139     1     2     1     1     1     5 
##   4,6     6  <NA> 
##     1     1     0
table(samp$gen, useNA = "always")
## 
##    M    N    W <NA> 
##  142   12  265    1
table(samp$Q32, samp$gen, useNA = "always")
##        
##           M   N   W <NA>
##           0   0   0    1
##   1       0   0 261    0
##   1,2     0   1   0    0
##   1,4     0   1   0    0
##   1,5     0   0   3    0
##   1,6     0   0   1    0
##   2     139   0   0    0
##   2,3,6   0   1   0    0
##   2,5     2   0   0    0
##   2,6     1   0   0    0
##   3       0   1   0    0
##   3,4     0   1   0    0
##   4       0   5   0    0
##   4,6     0   1   0    0
##   6       0   1   0    0
##   <NA>    0   0   0    0
# 1 == Woman
# 2 == Man
# 3 == Agender
# 4 == Non-binary
# 5 == Cisgender
# 6 == Transgender
# 7 == Another gender not listed
######
# N == Trans, Non-binary, Agender, or Another Gender Not Listed

4 Descriptives

4.1 Subset

rm(df1, df2)

df2 <- df

df2 <- subset(df, select = grep("^Q9|^Q18", colnames(df), value = TRUE))
df2 <- subset(df2, select = setdiff(colnames(df2), attn_chk))

4.2 Describe

# Convert all columns in df2 to numeric
df2[] <- lapply(df2, function(x) as.numeric(x))
# Get descriptive statistics for df2
desc_stats <- describe(df2)

# Create a new column to identify rows with skew or kurtosis greater than 2
desc_stats <- desc_stats %>%
  mutate(highlight = ifelse(abs(skew) > 2 | abs(kurtosis) > 2, "highlight", "no"))

# Create a table with highlighted rows for high skewness or kurtosis
desc_stats %>%
  kable(format = "html", digits = 2) %>%
  kable_styling(full_width = FALSE, bootstrap_options = c("striped", "hover", "condensed")) %>%
  row_spec(which(desc_stats$highlight == "highlight"), background = "red")
vars n mean sd median trimmed mad min max range skew kurtosis se highlight
Q9_1 1 420 2.19 1.11 2 2.06 1.48 1 5 4 0.83 -0.08 0.05 no
Q9_2 2 420 2.11 1.09 2 1.97 1.48 1 5 4 0.91 0.08 0.05 no
Q9_3 3 419 2.89 1.34 3 2.87 1.48 1 5 4 0.16 -1.24 0.07 no
Q9_4 4 419 1.70 1.03 1 1.48 0.00 1 5 4 1.64 2.11 0.05 highlight
Q9_5 5 420 2.11 1.18 2 1.95 1.48 1 5 4 0.95 -0.05 0.06 no
Q9_6 6 420 2.22 1.31 2 2.04 1.48 1 5 4 0.84 -0.50 0.06 no
Q9_7 7 420 2.90 1.31 3 2.88 1.48 1 5 4 0.25 -1.21 0.06 no
Q9_8 8 420 1.94 1.15 2 1.74 1.48 1 5 4 1.20 0.50 0.06 no
Q9_9 9 419 2.38 1.31 2 2.23 1.48 1 5 4 0.63 -0.73 0.06 no
Q9_10 10 419 2.42 1.30 2 2.28 1.48 1 5 4 0.59 -0.79 0.06 no
Q9_11 11 420 1.87 1.13 1 1.65 0.00 1 5 4 1.32 0.88 0.06 no
Q9_12 12 420 2.10 1.17 2 1.93 1.48 1 5 4 0.97 0.01 0.06 no
Q9_13 13 420 2.49 1.28 2 2.37 1.48 1 5 4 0.50 -0.87 0.06 no
Q9_14 14 420 2.55 1.36 2 2.43 1.48 1 5 4 0.50 -1.01 0.07 no
Q9_15 15 420 2.77 1.34 2 2.71 1.48 1 5 4 0.31 -1.14 0.07 no
Q9_16 16 420 2.80 1.37 3 2.74 1.48 1 5 4 0.24 -1.23 0.07 no
Q18_1 17 420 1.96 0.86 2 1.90 1.48 1 4 3 0.48 -0.66 0.04 no
Q18_2 18 419 2.28 0.98 2 2.23 1.48 1 4 3 0.07 -1.11 0.05 no
Q18_3 19 420 2.46 1.01 3 2.45 1.48 1 4 3 -0.10 -1.11 0.05 no
Q18_4 20 420 2.49 1.02 3 2.49 1.48 1 4 3 -0.12 -1.12 0.05 no
Q18_5 21 420 2.37 0.99 2 2.33 1.48 1 4 3 0.06 -1.08 0.05 no
Q18_6 22 420 2.53 1.04 3 2.54 1.48 1 4 3 -0.17 -1.16 0.05 no
Q18_7 23 420 1.82 0.78 2 1.74 1.48 1 4 3 0.72 0.12 0.04 no
Q18_8 24 420 1.66 0.79 1 1.54 0.00 1 4 3 1.00 0.27 0.04 no
Q18_9 25 420 1.93 0.95 2 1.83 1.48 1 4 3 0.60 -0.79 0.05 no
Q18_11 26 420 2.30 1.02 2 2.24 1.48 1 4 3 0.16 -1.14 0.05 no
Q18_12 27 420 1.86 0.92 2 1.76 1.48 1 4 3 0.64 -0.76 0.05 no
Q18_13 28 420 2.79 0.87 3 2.84 1.48 1 4 3 -0.33 -0.56 0.04 no
Q18_14 29 420 2.76 0.87 3 2.82 1.48 1 4 3 -0.45 -0.39 0.04 no
Q18_15 30 420 2.24 0.91 2 2.19 1.48 1 4 3 0.21 -0.82 0.04 no
Q18_16 31 420 2.11 0.79 2 2.10 1.48 1 4 3 0.24 -0.50 0.04 no

5 Factor Analysis

5.1 Difficulties in Emotion Regulation Scale (DERS-16)

d <- na.omit(subset(df2, select = grep("^Q9", colnames(df2), value = TRUE)))
d <- subset(d, select=-c(14,16))

# colnames(d) <- c("I have difficulty making sense out of my feelings",
#                   "I am confused about how I feel.",
#                   "When I am upset, I have difficulty getting work done.",
#                   "When I am upset, I become out of control.",
#                   "When I am upset, I believe that I will remain that way for a long time.",
#                   "When I am upset, I believe that I’ll end up feeling very depressed.",
#                   "When I am upset, I have difficulty focusing on other things.",
#                   "When I am upset, I feel out of control.",
#                   "When I am upset, I feel ashamed with myself for feeling that way.",
#                   "When I am upset, I feel like I am weak.",
#                   "When I am upset, I have difficulty controlling my behaviors.",
#                   "When I am upset, I believe that there is nothing I can do to make myself feel better.",
#                   "When I am upset, I become irritated with myself for feeling that way.",
#                   # "When I am upset, I start to feel very bad about myself.",
#                   "When I am upset, I have difficulty thinking about anything else.")
#                   # "When I am upset, my emotions feel overwhelming.")

ev <- eigen(cor(d)) # get eigenvalues
ap <- parallel(subject=nrow(d),var=ncol(d),rep=100,cent=.05) # run the parallel analysis, gives us another perspective on how many factors should be used in the model
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea) # creates the scree plot
plotnScree(nS) # shows us the scree plot, look for the elbows

# EFA <- factanal(d, factors = 1, rotation = "promax")
# print(EFA, digits=3, cutoff=.4, sort=F)

# EFA <- factanal(d, factors = 2, rotation = "promax")
# print(EFA, digits=3, cutoff=.4, sort=F)

EFA <- factanal(d, factors = 5, rotation = "promax")
print(EFA, digits=3, cutoff=.4, sort=T)
## 
## Call:
## factanal(x = d, factors = 5, rotation = "promax")
## 
## Uniquenesses:
##  Q9_1  Q9_2  Q9_3  Q9_4  Q9_5  Q9_6  Q9_7  Q9_8  Q9_9 Q9_10 Q9_11 Q9_12 Q9_13 
## 0.153 0.152 0.190 0.131 0.287 0.230 0.136 0.227 0.194 0.274 0.305 0.296 0.267 
## Q9_15 
## 0.254 
## 
## Loadings:
##       Factor1 Factor2 Factor3 Factor4 Factor5
## Q9_9   0.896                                 
## Q9_10  0.776                                 
## Q9_13  0.882                                 
## Q9_4           0.994                         
## Q9_8           0.776                         
## Q9_11          0.767                         
## Q9_3                   0.866                 
## Q9_7                   0.954                 
## Q9_15                  0.665                 
## Q9_5                           0.724         
## Q9_6                           0.876         
## Q9_12                          0.658         
## Q9_1                                   0.916 
## Q9_2                                   0.871 
## 
##                Factor1 Factor2 Factor3 Factor4 Factor5
## SS loadings      2.254   2.221   2.137   1.797   1.639
## Proportion Var   0.161   0.159   0.153   0.128   0.117
## Cumulative Var   0.161   0.320   0.472   0.601   0.718
## 
## Factor Correlations:
##         Factor1 Factor2 Factor3 Factor4 Factor5
## Factor1   1.000  -0.582   0.673  -0.660   0.791
## Factor2  -0.582   1.000  -0.569   0.621  -0.656
## Factor3   0.673  -0.569   1.000  -0.612   0.695
## Factor4  -0.660   0.621  -0.612   1.000  -0.732
## Factor5   0.791  -0.656   0.695  -0.732   1.000
## 
## Test of the hypothesis that 5 factors are sufficient.
## The chi square statistic is 76.1 on 31 degrees of freedom.
## The p-value is 1.16e-05

5.2 Factor Scores

df2 <- df2 %>%
  mutate(clarity = rowMeans(select(., Q9_1, Q9_2), na.rm = TRUE)) %>%
  mutate(goals = rowMeans(select(., Q9_3, Q9_7, Q9_15), na.rm = TRUE)) %>%
  mutate(impulse = rowMeans(select(., Q9_4, Q9_8, Q9_11), na.rm = TRUE)) %>%
  mutate(strategies = rowMeans(select(., Q9_5, Q9_6, Q9_12), na.rm = TRUE)) %>%
  mutate(nona = rowMeans(select(.,  Q9_9, Q9_10, Q9_13), na.rm = TRUE)) %>%
  mutate(idm_ib = rowMeans(select(., Q18_3:Q18_6), na.rm = TRUE)) %>%
  mutate(idm_dp = rowMeans(select(., Q18_12:Q18_15), na.rm = TRUE))

# psych::alpha(select(df2, starts_with("Q39")), na.rm = TRUE)
# psych::alpha(select(df2, Q39_1:Q39_2), na.rm = TRUE)
# psych::alpha(select(df2, Q39_3:Q39_9), na.rm = TRUE)
# psych::alpha(select(df2, starts_with("Q40")), na.rm = TRUE)
# psych::alpha(select(df2, starts_with("Q18")), na.rm = TRUE)
# psych::alpha(select(df2, Q18_2, Q18_8:Q18_11), na.rm = TRUE)
# psych::alpha(select(df2, Q18_3:Q18_6), na.rm = TRUE)
# psych::alpha(select(df2, Q18_12:Q18_15), na.rm = TRUE)

# Get descriptive statistics for df2
desc_stats <- describe(subset(df2, select=c(clarity, nona, impulse, goals, strategies,  idm_ib, idm_dp)))

# Create a new column to identify rows with skew or kurtosis greater than 2
desc_stats <- desc_stats %>%
  mutate(highlight = ifelse(abs(skew) > 2 | abs(kurtosis) > 2, "highlight", "no"))

# Create a table with highlighted rows for high skewness or kurtosis
desc_stats %>%
  kable(format = "html", digits = 2) %>%
  kable_styling(full_width = FALSE, bootstrap_options = c("striped", "hover", "condensed")) %>%
  row_spec(which(desc_stats$highlight == "highlight"), background = "red")
vars n mean sd median trimmed mad min max range skew kurtosis se highlight
clarity 1 420 2.15 1.05 2.00 2.02 1.48 1 5 4 0.85 0.03 0.05 no
nona 2 420 2.43 1.18 2.33 2.32 1.48 1 5 4 0.58 -0.62 0.06 no
impulse 3 420 1.84 1.01 1.33 1.65 0.49 1 5 4 1.37 1.30 0.05 no
goals 4 420 2.85 1.23 2.67 2.81 1.48 1 5 4 0.27 -1.11 0.06 no
strategies 5 420 2.14 1.10 2.00 2.00 0.99 1 5 4 0.88 -0.16 0.05 no
idm_ib 6 420 2.46 0.88 2.50 2.47 1.11 1 4 3 -0.12 -0.98 0.04 no
idm_dp 7 420 2.41 0.66 2.50 2.41 0.74 1 4 3 0.05 -0.25 0.03 no

6 Correlation Matrix

# Select the relevant columns from df2
df2_subset <- df2 %>% select(clarity, nona, impulse, goals, strategies,  idm_ib, idm_dp)

# Compute the correlation matrix with significance tests
cor_results <- corr.test(df2_subset, use = "pairwise.complete.obs")

# Extract the correlation matrix
cor_matrix <- cor_results$r

# Extract the p-values matrix
p_matrix <- cor_results$p

# Display the correlation matrix and p-values
# print(cor_matrix)
# print(p_matrix)

rownames(cor_matrix) <- c("1 Lack of Emotional Clarity",
                          "2 Nonacceptance of Emotional Responses",
                          "3 Impulse Control Difficulties",
                          "4 Difficulties Engaging in Goal-Directed Behavior",
                          "5 Limited Access to Emotion Regulation Strategies",
                          "6 Impact of Betrayal",
                          "7 Defensive Processing")

colnames(cor_matrix) <- c("1",
                          "2",
                          "3",
                          "4",
                          "5",
                          "6",
                          "7")

corrplot(cor_matrix, 
         method = "color",      # Use color for correlation visualization
         type = "upper",        # Display only upper triangle
         addCoef.col = "white", # Add coefficients in black
         tl.col = "black",      # Set text label color
         number.cex = 0.8,      # Adjust text size
         p.mat = p_matrix,      # Provide p-values for significance
         sig.level = 0.05,      # Set significance level
         insig = "blank")       # Hide insignificant correlations
## Warning in corrplot(cor_matrix, method = "color", type = "upper", addCoef.col =
## "white", : p.mat and corr may be not paired, their rownames and colnames are
## not totally same!

7 Emotion Dysregulation & IDM

For a coefficient β, effect sizes between 0.10–0.29 are said to be only small, effect sizes between 0.30–0.49 are medium, and effect sizes of 0.50 or greater are large

df2$strategies_std <- c(scale(df2$strategies, center = T, scale= T))
df2$clarity_std <- c(scale(df2$clarity, center = T, scale= T))
df2$nona_std <- c(scale(df2$nona, center = T, scale= T))
df2$impulse_std <- c(scale(df2$impulse, center = T, scale= T))
df2$goals_std <- c(scale(df2$goals, center = T, scale= T))
df2$idm_dp_std <- c(scale(df2$idm_dp, center = T, scale= T))
df2$idm_ib_std <- c(scale(df2$idm_ib, center = T, scale= T))

df2$gender <- samp$gen
table(df2$gender)
## 
##   M   N   W 
## 142  12 265
df3 <- subset(df2, gender!="N")
df3$gender[df3$gender == "M"] <- "Men"
df3$gender[df3$gender == "W"] <- "Women"
df3$gender <- as.factor(df3$gender)

reg_model <- lm(idm_dp_std ~ clarity_std + nona_std + impulse_std + strategies_std + goals_std, data = df3)
car::vif(reg_model)
##    clarity_std       nona_std    impulse_std strategies_std      goals_std 
##       1.927044       2.266653       2.128176       3.306924       2.788022
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_dp_std ~ clarity_std + nona_std + impulse_std + 
##     strategies_std + goals_std, data = df3)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.09564 -0.57126 -0.05099  0.55474  2.88918 
## 
## Coefficients:
##                 Estimate Std. Error t value Pr(>|t|)    
## (Intercept)    -0.020746   0.044668  -0.464 0.642575    
## clarity_std     0.036159   0.062476   0.579 0.563076    
## nona_std        0.108410   0.066991   1.618 0.106387    
## impulse_std    -0.004926   0.064755  -0.076 0.939399    
## strategies_std  0.310249   0.081280   3.817 0.000156 ***
## goals_std       0.021437   0.074704   0.287 0.774296    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9007 on 401 degrees of freedom
## Multiple R-squared:  0.1893, Adjusted R-squared:  0.1792 
## F-statistic: 18.73 on 5 and 401 DF,  p-value: < 2.2e-16
plot_model(reg_model, type="pred")$strategies_std +
  xlab("Limited Access to Emotion Regulation Strategies") +
  ylab("Inequality-Driven Mistrust: Defensive Processing") +
  ylim(min(df3$idm_dp_std), max(df3$idm_dp_std)) +
  ggtitle("Figure 2: Relationship between emotion dysregulation and defensive processing")
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

reg_model <- lm(idm_ib_std ~ clarity_std + nona_std + impulse_std + strategies_std + goals_std, data = df3)
car::vif(reg_model)
##    clarity_std       nona_std    impulse_std strategies_std      goals_std 
##       1.927044       2.266653       2.128176       3.306924       2.788022
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_ib_std ~ clarity_std + nona_std + impulse_std + 
##     strategies_std + goals_std, data = df3)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.12316 -0.68461  0.00562  0.60304  2.30601 
## 
## Coefficients:
##                 Estimate Std. Error t value Pr(>|t|)    
## (Intercept)    -0.009229   0.043739  -0.211    0.833    
## clarity_std     0.098778   0.061176   1.615    0.107    
## nona_std        0.029237   0.065597   0.446    0.656    
## impulse_std    -0.015363   0.063407  -0.242    0.809    
## strategies_std  0.384879   0.079589   4.836 1.89e-06 ***
## goals_std       0.014232   0.073150   0.195    0.846    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8819 on 401 degrees of freedom
## Multiple R-squared:  0.2279, Adjusted R-squared:  0.2183 
## F-statistic: 23.67 on 5 and 401 DF,  p-value: < 2.2e-16
plot_model(reg_model, type="pred")$strategies_std +
  xlab("Limited Access to Emotion Regulation Strategies") +
  ylab("Inequality-Driven Mistrust: Impact of Betrayal") +
  ylim(min(df3$idm_dp_std), max(df3$idm_dp_std)) +
  ggtitle("Figure 3: Relationship between emotion dysregulation and impact of betrayal")
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

8 Gender & Emotion Dysregulation

reg_model <- lm(clarity_std ~ gender, data = df3)
# car::vif(reg_model)
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = clarity_std ~ gender, data = df3)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.1793 -0.8830 -0.2294  0.5419  2.9166 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)   
## (Intercept)  -0.2071     0.0826  -2.508   0.0125 * 
## genderWomen   0.2963     0.1024   2.895   0.0040 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9843 on 405 degrees of freedom
## Multiple R-squared:  0.02027,    Adjusted R-squared:  0.01785 
## F-statistic: 8.379 on 1 and 405 DF,  p-value: 0.004001
plot_model(reg_model, type="pred") +
  xlab("Gender") + ylab("Lack of Emotional Clarity") +
  ylim(min(df3$clarity_std), max(df3$clarity_std)) +
  ggtitle("Figure 4: Gender differences in emotion dysregulation")
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

reg_model <- lm(impulse_std ~ gender, data = df3)
# car::vif(reg_model)
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = impulse_std ~ gender, data = df3)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -0.9023 -0.6699 -0.3407  0.4142  3.2798 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)  
## (Intercept)  -0.1557     0.0841  -1.852   0.0648 .
## genderWomen   0.2325     0.1042   2.231   0.0263 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.002 on 405 degrees of freedom
## Multiple R-squared:  0.01214,    Adjusted R-squared:  0.009697 
## F-statistic: 4.975 on 1 and 405 DF,  p-value: 0.02626
plot_model(reg_model, type="pred") +
  xlab("Gender") + ylab("Impulse Control Difficulties") +
  ylim(min(df3$impulse_std), max(df3$impulse_std)) +
  ggtitle("Figure 5: Gender differences in emotion dysregulation")
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

reg_model <- lm(strategies_std ~ gender, data = df3)
# car::vif(reg_model)
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = strategies_std ~ gender, data = df3)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.1062 -0.8453 -0.1977  0.6270  2.7468 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)  
## (Intercept) -0.15111    0.08357  -1.808   0.0713 .
## genderWomen  0.21902    0.10356   2.115   0.0351 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9958 on 405 degrees of freedom
## Multiple R-squared:  0.01092,    Adjusted R-squared:  0.008481 
## F-statistic: 4.473 on 1 and 405 DF,  p-value: 0.03505
plot_model(reg_model, type="pred") +
  xlab("Gender") + ylab("Limited Access to Emotion Regulation Strategies") +
  ylim(min(df3$impulse_std), max(df3$impulse_std)) +
  ggtitle("Figure 6: Gender differences in emotion dysregulation")
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.

9 IDM & Gender

reg_model <- lm(idm_dp_std ~ gender, data = df3)
# car::vif(reg_model)
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_dp_std ~ gender, data = df3)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.18401 -0.56724  0.09356  0.57154  2.46952 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)
## (Intercept)  0.04020    0.08344   0.482    0.630
## genderWomen -0.09838    0.10340  -0.951    0.342
## 
## Residual standard error: 0.9943 on 405 degrees of freedom
## Multiple R-squared:  0.00223,    Adjusted R-squared:  -0.0002334 
## F-statistic: 0.9053 on 1 and 405 DF,  p-value: 0.3419
# plot_model(reg_model, type="pred")

reg_model <- lm(idm_ib_std ~ gender, data = df3)
# car::vif(reg_model)
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_ib_std ~ gender, data = df3)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.7053 -0.7783  0.1488  0.7197  1.8616 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.10595    0.08362  -1.267    0.206
## genderWomen  0.14123    0.10363   1.363    0.174
## 
## Residual standard error: 0.9964 on 405 degrees of freedom
## Multiple R-squared:  0.004565,   Adjusted R-squared:  0.002107 
## F-statistic: 1.857 on 1 and 405 DF,  p-value: 0.1737
# plot_model(reg_model, type="pred")