1 Load Libraries

library(psych)
## Warning: package 'psych' was built under R version 4.1.3
library(kableExtra)
## Warning: package 'kableExtra' was built under R version 4.1.3
library(ggplot2)
## 
## Attaching package: 'ggplot2'
## The following objects are masked from 'package:psych':
## 
##     %+%, alpha
library(tidyr)
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following object is masked from 'package:kableExtra':
## 
##     group_rows
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
library(gtools)
## 
## Attaching package: 'gtools'
## The following object is masked from 'package:psych':
## 
##     logit
library(nFactors)
## Loading required package: lattice
## 
## Attaching package: 'nFactors'
## The following object is masked from 'package:lattice':
## 
##     parallel
library(corrplot)
## corrplot 0.92 loaded
library(sjPlot)
library(afex)
## Loading required package: lme4
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
## ************
## Welcome to afex. For support visit: http://afex.singmann.science/
## - Functions for ANOVAs: aov_car(), aov_ez(), and aov_4()
## - Methods for calculating p-values with mixed(): 'S', 'KR', 'LRT', and 'PB'
## - 'afex_aov' and 'mixed' objects can be passed to emmeans() for follow-up tests
## - NEWS: emmeans() for ANOVA models now uses model = 'multivariate' as default.
## - Get and set global package options with: afex_options()
## - Set orthogonal sum-to-zero contrasts globally: set_sum_contrasts()
## - For example analyses see: browseVignettes("afex")
## ************
## 
## Attaching package: 'afex'
## The following object is masked from 'package:lme4':
## 
##     lmer
library(broom)

2 Load Data

df1 <- read.csv(file="data/Navigating Psych (SONA)_December 12, 2024_08.15.csv", header=T)
labels1 <- data.frame(t(cbind(df1[1,],samp="Sample Source")))
df1 <- df1[-c(1,2), ]
df1 <- subset(df1, Progress == "100", select=-c(4,14:15))
df1$samp <- "SONA"

df2 <- read.csv(file="data/Navigating psych (Prolific Final)_December 5, 2024_07.40.csv", header=T)
labels2 <- data.frame(t(cbind(df2[1,],samp="Sample Source")))
df2 <- df2[-c(1,2), ]
df2_quality <- subset(df2, select=c(8,173:175))
df2 <- subset(df2, Progress == "100" & Q45 == 2 & Q46 < 4, select=-c(15:17,173:175))
colnames(df2)[169] <- "id"
df2$samp <- "Prolific"

df <- rbind.data.frame(df1,df2)

2.1 Attention Checks

attn <- subset(labels1, grepl("attention", X1))
attn_chk <- rownames(attn)

# 3, 2, 5, 1, 1
correct_responses <- c(3, 2, 5, 1, 1)

# Check if the values in df match the correct responses
correct_counts <- apply(df[attn_chk], 1, function(row) sum(row == correct_responses))

# Display results
correct_counts
##    3    4    5    6    7    8    9   10   11   12   13   14   15   16   17   18 
##    2    5    5    4    1    5    4    3    5    3    5    5    5    5    5    2 
##   19   20   21   22   23   24   25   26   27   28   29   30   31   32   33   34 
##    5    2    1    3    3    4    5    5    5    5    5    5    5    5    5    5 
##   35   36   37   38   39   40   41   42   43   44   45   46   47   48   50   51 
##    4    1    5    4    0    5    2    5    2    5    5    5    3    5    2    5 
##   52   53   54   55   56   57   58   59   60   61   62   63   64   65   66   67 
##    4    5    5    4    5    5    5    1    5    5    5    5    5    5    5    5 
##   68   69   70   71   72   73   74   75   76   77   78   79   80   81   82   83 
##    5    5    5    0    5    5    5    3    5    5    5    5    5    5    5    5 
##   84   85   86   87   88   89   90   91   92   93   94   96   97   98   99  100 
##    5    5    5    5    5    5    3    5    5    5    5    5    2    5    3    0 
##  102  103  104  105  106  107  108  109  110  111  112  113  114  115  116  117 
##    2    5    4    5    5    2    4    5    4    5    4    5    3    3    5    5 
##  118  119  120  121  122  123  124  125  126  127  128  129  130  131  132  133 
##    5    5    5    5    4    2    2    5    5    5    5    5    5    3    5    5 
##  134  135  136  137  138  139  140  141  142  143  144  146  147  148  149  150 
##    1    5    2    5    5    5    5    5    5    5    5    3    5    5    5    5 
##  151  152  153  155  156  157  158  159  160  161  162  163  164  165  166  167 
##    5    3    5    5    5    5    2    3    5    5    5    3    2    3    5    5 
##  168  170  171  172  173  174  175  176  177  178  179  180  182  183  184  185 
##    5    0    5    5    5    5    0    2    5    1    5    3    5    5    5    5 
##  186  187  188  189  190  191  192  193  194  195  196  197  200  201  202  203 
##    5    2    5    5    4    5    5    5    5    4    5    3    5    5    5    5 
##  204  206  208  209  210  211  212  213  214  215  216  217  218  220  221  222 
##    5    5    1    5    5    5    5    5    3    2    3    1    5    5    5    2 
##  223  224  225  226  227  228  229  230  231  232  233  234  235  236  237  238 
##    1    5    5    5    2    5    5    5    5    2    2    2    1    4    5    5 
##  239  240  241  242  243  244  245  246  247  248  249  250  251  252  253  254 
##    5    5    2    5    1    5    5    5    5    5    4    0    0    5    5    2 
##  255  256  257  258  259  260  262  263  264  265  266  267  268  269  270  271 
##    2    5    5    5    3    5    5    5    5    5    5    3    5    2    5    5 
##  272   49  510  610  810 1210 1310 1410 1510 1610 1710 1810 1910 2010 2110  501 
##    5    0    0    0    0    5    5    5    5    5    5    5    5    5    5    5 
##  511  521  541  551  561  571  591  601  611  621  631  651  661  671  681  691 
##    5    5    5    5    5    2    5    5    5    5    5    5    5    5    2    5 
##  701  711  721  741  751  761  771  781  791  801  811  821  831  841  851  871 
##    5    1    5    5    5    5    5    5    5    5    5    5    5    5    5    5 
##  881  891  901  911  921  931  941   95  961  971  981  991 1001  101 1021 1031 
##    5    5    5    5    5    5    1    5    5    5    5    5    5    5    5    5 
## 1041 1051 1061 1071 1081 1091 1101 1111 1121 1131 1141 1151 1161 1171 1181 1191 
##    5    5    5    3    5    5    5    5    2    5    5    5    5    5    5    5 
## 1201 1211 1221 1231 1241 1251 1261 1271 1281 1291 1301 1311 1321 1331 1341 1351 
##    5    2    5    4    5    5    5    5    5    5    5    5    5    5    5    5 
## 1361 1371 1391 1401 1411 1421 1431 1441  145 1461 1471 1481 1491 1501 1511 1521 
##    5    5    5    5    5    5    5    5    5    5    5    5    5    5    5    5 
## 1531  154 1551 1571 1581 1591 1601 1611 1621 1631 1641 1651 1661 1671 1681  169 
##    5    5    5    5    5    5    5    5    5    5    5    5    3    5    2    5 
## 1701 1711 1721 1731 1741 1751 1761 1771 1781 1791 1801  181 1821 1831 1841 1851 
##    5    5    3    5    5    5    5    5    5    3    5    5    5    5    5    5 
## 1861 1871 1881 1891 1901 1911 1921 1931 1941 1951 1961 1971  198  199 2001 2011 
##    5    5    5    5    5    5    5    5    5    5    5    3    3    5    5    3 
## 2021 2031 2041  205 2061  207 2081 2091 2101 2111 2121 2131 2151 2161 2171 2181 
##    5    5    5    5    5    5    5    5    5    5    5    5    5    5    4    5 
##  219 2201 2211 2221 2231 2241 2251 2261 2271 2281 2291 2301 2311 2321 2331 2341 
##    5    5    5    4    5    5    5    5    5    5    5    5    5    5    5    5 
## 2351 2361 2371 2381 2391 2401 2411 2421 2431 2441 2451 2461 2471 2481 2491 2501 
##    5    5    5    5    5    5    5    5    5    5    5    5    5    5    5    5 
## 2511 2521 2531 2541 2561 2571 2581 2591 2601  261 2621 2631 2641 2651 2661 2671 
##    5    5    5    3    5    5    5    5    3    5    5    3    5    5    5    5 
## 2681 2691 2701 2711 2721  273  274  275  277  278  279  280  281  282  283  284 
##    5    5    5    5    5    5    5    5    4    5    3    3    5    5    5    5 
##  285  286  287  288  289  290  291  292  293  295  296  298  301  302  303  304 
##    5    3    5    5    3    5    5    5    5    5    4    5    5    5    3    5 
##  305  306  307 
##    5    5    5
table(correct_counts)
## correct_counts
##   0   1   2   3   4   5 
##  11  13  31  37  21 402
df$attn <- correct_counts

df <- subset(df, attn > 3)

bad_ids <- c("663e9d84425d4157a22f3167",
            "666f47d2c81ed466b97cd82d",
            "634ef87aec0966557c825573",
            "669d8f03a8742ea4d11b35d8",
            "6701a4d42a2e8c434e0fc50a",
            "671736c2a43ff993ff9a070b",
            "67348c5f2fd2234d5e8f7d58")

df <- subset(df, !(id %in% bad_ids))

3 Sample

samp <- subset(df, select=c(156:170))

table(samp$samp)
## 
## Prolific     SONA 
##      229      191
table(samp$Q29, useNA = "always")
## 
##       1 1,3,5,8   1,3,8     1,8       2     2,3   2,4,8     2,8       3   3,4,5 
##       2       1       1       4      27       2       1       4      45       1 
## 3,4,5,8     3,5   3,5,8     3,8       4     4,5   4,5,8     4,8       5     5,8 
##       1       1       1       6      13       5       1       8       7       3 
##       6     6,8     7,8       8     8,9       9    <NA> 
##       1       2       1     279       2       1       0
table(samp$Q29,samp$Q29_9_TEXT, useNA = "always")
##          
##               Cajun creole  klingon White or Caucasian <NA>
##   1         2             0       0                  0    0
##   1,3,5,8   1             0       0                  0    0
##   1,3,8     1             0       0                  0    0
##   1,8       4             0       0                  0    0
##   2        27             0       0                  0    0
##   2,3       2             0       0                  0    0
##   2,4,8     1             0       0                  0    0
##   2,8       4             0       0                  0    0
##   3        45             0       0                  0    0
##   3,4,5     1             0       0                  0    0
##   3,4,5,8   1             0       0                  0    0
##   3,5       1             0       0                  0    0
##   3,5,8     1             0       0                  0    0
##   3,8       6             0       0                  0    0
##   4        13             0       0                  0    0
##   4,5       5             0       0                  0    0
##   4,5,8     1             0       0                  0    0
##   4,8       8             0       0                  0    0
##   5         7             0       0                  0    0
##   5,8       3             0       0                  0    0
##   6         1             0       0                  0    0
##   6,8       2             0       0                  0    0
##   7,8       1             0       0                  0    0
##   8       279             0       0                  0    0
##   8,9       0             0       1                  1    0
##   9         0             1       0                  0    0
##   <NA>      0             0       0                  0    0
samp$race <- "Bi/Multiracial"
samp$race[is.na(samp$Q29)] <- NA
samp$race[samp$Q29 == 1] <- "American Indian or Alaska Native"
samp$race[samp$Q29 == 2] <- "Asian"
samp$race[samp$Q29 == 3] <- "Black or African American"
samp$race[samp$Q29 == 4] <- "Hispanic or Spanish origin"
samp$race[samp$Q29 == 5] <- "Latino, Latina, or Latinx"
samp$race[samp$Q29 == 6] <- "Middle Eastern or North African"
samp$race[samp$Q29 == 7] <- "Native Hawaiian or Other Pacific Islander"
samp$race[samp$Q29 == 8] <- "White"
samp$race[samp$Q29 == 9] <- "Another race/ethnicity not listed"
samp$race[samp$Q29 == "4,8"] <- "White"
table(samp$race, useNA = "always")
## 
##  American Indian or Alaska Native Another race/ethnicity not listed 
##                                 2                                 1 
##                             Asian                    Bi/Multiracial 
##                                27                                37 
##         Black or African American        Hispanic or Spanish origin 
##                                45                                13 
##         Latino, Latina, or Latinx   Middle Eastern or North African 
##                                 7                                 1 
##                             White                              <NA> 
##                               287                                 0
samp$race2 <- "PoC"
samp$race2[samp$race == "White"] <- "WP"
table(samp$race2, useNA = "always")
## 
##  PoC   WP <NA> 
##  133  287    0
table(samp$race, samp$samp, useNA = "always")
##                                    
##                                     Prolific SONA <NA>
##   American Indian or Alaska Native         2    0    0
##   Another race/ethnicity not listed        1    0    0
##   Asian                                    4   23    0
##   Bi/Multiracial                          24   13    0
##   Black or African American               26   19    0
##   Hispanic or Spanish origin               9    4    0
##   Latino, Latina, or Latinx                4    3    0
##   Middle Eastern or North African          0    1    0
##   White                                  159  128    0
##   <NA>                                     0    0    0
table(samp$Q32, useNA = "always")
## 
##           1   1,2   1,4   1,5   1,6     2 2,3,6   2,5   2,6     3   3,4     4 
##     1   261     1     1     3     1   139     1     2     1     1     1     5 
##   4,6     6  <NA> 
##     1     1     0
samp$gen[is.na(samp$Q32)] <- NA
samp$gen[samp$Q32 == 1] <- "W"
samp$gen[samp$Q32 == 2] <- "M"
samp$gen[samp$Q32 == 3] <- "N"
samp$gen[samp$Q32 == 4] <- "N"
samp$gen[samp$Q32 == 5] <- "A"
samp$gen[samp$Q32 == 6] <- "N"
samp$gen[samp$Q32 == 7] <- "A"
samp$gen[samp$Q32 == "1,2"] <- "N"
samp$gen[samp$Q32 == "1,4"] <- "N"
samp$gen[samp$Q32 == "1,5"] <- "W"
samp$gen[samp$Q32 == "1,6"] <- "W"
samp$gen[samp$Q32 == "2,3,6"] <- "N"
samp$gen[samp$Q32 == "2,5"] <- "M"
samp$gen[samp$Q32 == "2,6"] <- "M"
samp$gen[samp$Q32 == "3,4"] <- "N"
samp$gen[samp$Q32 == "4,6"] <- "N"
table(samp$gen, useNA = "always")
## 
##    M    N    W <NA> 
##  142   12  265    1
table(samp$gen, samp$samp, useNA = "always")
##       
##        Prolific SONA <NA>
##   M          94   48    0
##   N          10    2    0
##   W         124  141    0
##   <NA>        1    0    0
table(samp$Q32, samp$gen, useNA = "always")
##        
##           M   N   W <NA>
##           0   0   0    1
##   1       0   0 261    0
##   1,2     0   1   0    0
##   1,4     0   1   0    0
##   1,5     0   0   3    0
##   1,6     0   0   1    0
##   2     139   0   0    0
##   2,3,6   0   1   0    0
##   2,5     2   0   0    0
##   2,6     1   0   0    0
##   3       0   1   0    0
##   3,4     0   1   0    0
##   4       0   5   0    0
##   4,6     0   1   0    0
##   6       0   1   0    0
##   <NA>    0   0   0    0
# 1 == Woman
# 2 == Man
# 3 == Trans, Non-binary, Agender, or Other Combination
# 4 == Trans, Non-binary, Agender, or Other Combination
# 5 == Cisgender
# 6 == Trans, Non-binary, Agender, or Other Combination
# 7 == Another gender not listed
######
# N == Trans, Non-binary, Agender, or Other Combination
# A == Another gender not listed

# 1 Woman
# 2 Man
# 3 Agender
# 4 Non-binary
# 5 Cisgender
# 6 Transgender
# 7 Another gender not listed

table(samp$Q33, useNA = "always")
## 
##    1  1,3    2  2,5    3  3,4    4  4,5    5  5,6    6 <NA> 
##  341    3   14    1   41    3    7    1    4    2    3    0
samp$sexid[is.na(samp$Q33)] <- NA
samp$sexid[samp$Q33 == 1] <- "Heterosexual/straight"
samp$sexid[samp$Q33 == 2] <- "Homosexual/gay/lesbian"
samp$sexid[samp$Q33 == 3] <- "Bisexual"
samp$sexid[samp$Q33 == 4] <- "Pansexual"
samp$sexid[samp$Q33 == 5] <- "Asexual"
samp$sexid[samp$Q33 == 6] <- "Another sexual identity not listed"
samp$sexid[samp$Q33 == "1,3"] <- "Another sexual identity not listed"
samp$sexid[samp$Q33 == "2,5"] <- "Asexual"
samp$sexid[samp$Q33 == "3,4"] <- "Pansexual"
samp$sexid[samp$Q33 == "4,5"] <- "Asexual"
samp$sexid[samp$Q33 == "5,6"] <- "Another sexual identity not listed"
table(samp$Q33_6_TEXT)
## 
##           BiCurious     Queer    Unsure 
##       415         1         3         1
table(samp$sexid, useNA = "always")
## 
## Another sexual identity not listed                            Asexual 
##                                  8                                  6 
##                           Bisexual              Heterosexual/straight 
##                                 41                                341 
##             Homosexual/gay/lesbian                          Pansexual 
##                                 14                                 10 
##                               <NA> 
##                                  0
samp$sexid2 <- "LGB"
samp$sexid2[samp$sexid == "Heterosexual/straight"] <- "Straight"
table(samp$sexid2, useNA = "always")
## 
##      LGB Straight     <NA> 
##       79      341        0
table(samp$sexid2, samp$samp, useNA = "always")
##           
##            Prolific SONA <NA>
##   LGB            48   31    0
##   Straight      181  160    0
##   <NA>            0    0    0

4 Descriptives

4.1 Subset

rm(df1, df2)

df2 <- df

df2 <- subset(df, select = grep("^Q6|^Q39|^Q40|^Q18|^Q27.1|^Q27.3", colnames(df), value = TRUE))
df2 <- subset(df2, select = setdiff(colnames(df2), attn_chk))

4.2 Describe

# Convert all columns in df2 to numeric
df2[] <- lapply(df2, function(x) as.numeric(x))
# Get descriptive statistics for df2
desc_stats <- describe(df2)

# Create a new column to identify rows with skew or kurtosis greater than 2
desc_stats <- desc_stats %>%
  mutate(highlight = ifelse(abs(skew) > 2 | abs(kurtosis) > 2, "highlight", "no"))

# Create a table with highlighted rows for high skewness or kurtosis
desc_stats %>%
  kable(format = "html", digits = 2) %>%
  kable_styling(full_width = FALSE, bootstrap_options = c("striped", "hover", "condensed")) %>%
  row_spec(which(desc_stats$highlight == "highlight"), background = "red")
vars n mean sd median trimmed mad min max range skew kurtosis se highlight
Q6_1 1 420 3.00 0.97 3 3.01 1.48 1 5 4 -0.07 -0.26 0.05 no
Q6_2 2 420 3.12 1.17 3 3.15 1.48 1 5 4 -0.07 -0.74 0.06 no
Q6_3 3 420 3.77 1.08 4 3.88 1.48 1 5 4 -0.67 -0.20 0.05 no
Q6_4 4 420 3.36 1.03 3 3.38 1.48 1 5 4 -0.29 -0.34 0.05 no
Q6_5 5 420 3.03 0.96 3 3.02 1.48 1 5 4 0.06 -0.24 0.05 no
Q6_6 6 420 2.73 1.09 3 2.71 1.48 1 5 4 0.18 -0.57 0.05 no
Q6_7 7 420 3.24 0.97 3 3.24 1.48 1 5 4 -0.20 -0.31 0.05 no
Q6_8 8 420 3.18 1.01 3 3.16 1.48 1 5 4 -0.09 -0.49 0.05 no
Q6_9 9 420 3.00 1.07 3 3.00 1.48 1 5 4 0.00 -0.63 0.05 no
Q6_10 10 420 2.82 1.22 3 2.78 1.48 1 5 4 0.16 -0.87 0.06 no
Q39_1 11 420 2.85 1.30 3 2.78 1.48 1 6 5 0.28 -0.47 0.06 no
Q39_2 12 420 2.82 1.27 3 2.75 1.48 1 6 5 0.35 -0.33 0.06 no
Q39_3 13 420 1.99 1.04 2 1.85 1.48 1 6 5 0.97 0.66 0.05 no
Q39_4 14 420 2.77 1.40 3 2.67 1.48 1 6 5 0.38 -0.74 0.07 no
Q39_5 15 420 2.00 1.22 2 1.79 1.48 1 6 5 1.18 0.76 0.06 no
Q39_6 16 419 2.15 1.17 2 2.00 1.48 1 6 5 0.88 0.20 0.06 no
Q39_8 17 420 3.10 1.29 3 3.10 1.48 1 6 5 0.05 -0.69 0.06 no
Q39_9 18 420 2.25 1.24 2 2.10 1.48 1 6 5 0.90 0.24 0.06 no
Q39_10 19 420 1.76 0.92 2 1.63 1.48 1 6 5 1.35 2.37 0.05 highlight
Q40_1 20 420 1.69 0.89 1 1.55 0.00 1 4 3 1.09 0.19 0.04 no
Q40_2 21 420 1.45 0.71 1 1.30 0.00 1 4 3 1.45 1.22 0.03 no
Q40_3 22 420 1.38 0.71 1 1.20 0.00 1 4 3 1.85 2.62 0.03 highlight
Q40_4 23 420 1.80 0.92 2 1.68 1.48 1 4 3 0.80 -0.51 0.04 no
Q40_5 24 420 1.82 0.95 2 1.70 1.48 1 4 3 0.78 -0.61 0.05 no
Q40_6 25 420 1.85 0.93 2 1.76 1.48 1 4 3 0.65 -0.80 0.05 no
Q18_1 26 420 1.96 0.86 2 1.90 1.48 1 4 3 0.48 -0.66 0.04 no
Q18_2 27 419 2.28 0.98 2 2.23 1.48 1 4 3 0.07 -1.11 0.05 no
Q18_3 28 420 2.46 1.01 3 2.45 1.48 1 4 3 -0.10 -1.11 0.05 no
Q18_4 29 420 2.49 1.02 3 2.49 1.48 1 4 3 -0.12 -1.12 0.05 no
Q18_5 30 420 2.37 0.99 2 2.33 1.48 1 4 3 0.06 -1.08 0.05 no
Q18_6 31 420 2.53 1.04 3 2.54 1.48 1 4 3 -0.17 -1.16 0.05 no
Q18_7 32 420 1.82 0.78 2 1.74 1.48 1 4 3 0.72 0.12 0.04 no
Q18_8 33 420 1.66 0.79 1 1.54 0.00 1 4 3 1.00 0.27 0.04 no
Q18_9 34 420 1.93 0.95 2 1.83 1.48 1 4 3 0.60 -0.79 0.05 no
Q18_11 35 420 2.30 1.02 2 2.24 1.48 1 4 3 0.16 -1.14 0.05 no
Q18_12 36 420 1.86 0.92 2 1.76 1.48 1 4 3 0.64 -0.76 0.05 no
Q18_13 37 420 2.79 0.87 3 2.84 1.48 1 4 3 -0.33 -0.56 0.04 no
Q18_14 38 420 2.76 0.87 3 2.82 1.48 1 4 3 -0.45 -0.39 0.04 no
Q18_15 39 420 2.24 0.91 2 2.19 1.48 1 4 3 0.21 -0.82 0.04 no
Q18_16 40 420 2.11 0.79 2 2.10 1.48 1 4 3 0.24 -0.50 0.04 no
Q27.1_1 41 416 2.41 1.06 2 2.33 1.48 1 5 4 0.77 -0.04 0.05 no
Q27.1_2 42 413 2.44 1.22 2 2.33 1.48 1 5 4 0.61 -0.62 0.06 no
Q27.1_3 43 414 2.66 1.17 2 2.60 1.48 1 5 4 0.40 -0.72 0.06 no
Q27.1_4 44 413 2.86 1.26 3 2.82 1.48 1 5 4 0.28 -1.02 0.06 no
Q27.1_5 45 413 2.43 1.22 2 2.33 1.48 1 5 4 0.50 -0.77 0.06 no
Q27.1_6 46 415 2.38 1.23 2 2.27 1.48 1 5 4 0.60 -0.69 0.06 no
Q27.1_7 47 411 3.16 1.19 3 3.17 1.48 1 5 4 -0.04 -0.97 0.06 no
Q27.1_8 48 413 2.75 1.26 3 2.69 1.48 1 5 4 0.21 -1.03 0.06 no
Q27.1_9 49 415 2.22 1.16 2 2.09 1.48 1 5 4 0.84 -0.18 0.06 no
Q27.1_10 50 415 2.07 1.07 2 1.92 1.48 1 5 4 0.98 0.29 0.05 no
Q27.1_11 51 416 2.33 1.16 2 2.22 1.48 1 5 4 0.72 -0.31 0.06 no
Q27.1_12 52 416 2.48 1.21 2 2.39 1.48 1 5 4 0.49 -0.71 0.06 no
Q27.1_13 53 415 2.11 1.13 2 1.96 1.48 1 5 4 0.90 0.04 0.06 no
Q27.1_14 54 413 2.50 1.26 2 2.39 1.48 1 5 4 0.49 -0.83 0.06 no
Q27.1_16 55 414 2.80 1.15 3 2.77 1.48 1 5 4 0.21 -0.84 0.06 no
Q27.1_17 56 415 2.27 1.16 2 2.13 1.48 1 5 4 0.81 -0.17 0.06 no
Q27.1_18 57 416 2.65 1.19 2 2.59 1.48 1 5 4 0.36 -0.87 0.06 no
Q27.1_19 58 415 2.32 1.22 2 2.19 1.48 1 5 4 0.73 -0.47 0.06 no
Q27.1_20 59 416 2.24 1.14 2 2.10 1.48 1 5 4 0.84 -0.04 0.06 no
Q27.1_21 60 413 2.34 1.12 2 2.25 1.48 1 5 4 0.65 -0.39 0.06 no
Q27.1_22 61 414 2.65 1.16 2 2.59 1.48 1 5 4 0.45 -0.67 0.06 no
Q27.1_23 62 416 2.45 1.19 2 2.36 1.48 1 5 4 0.51 -0.68 0.06 no
Q27.1_24 63 416 2.88 1.21 3 2.84 1.48 1 5 4 0.20 -0.93 0.06 no
Q27.1_25 64 415 2.27 1.18 2 2.15 1.48 1 5 4 0.69 -0.48 0.06 no
Q27.3_1 65 213 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_2 66 176 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_3 67 164 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_4 68 236 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_5 69 110 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_6 70 100 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_7 71 239 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_8 72 189 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_9 73 95 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_10 74 117 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_11 75 142 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_12 76 160 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_13 77 107 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_14 78 127 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_16 79 178 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_17 80 139 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_18 81 168 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_19 82 148 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_20 83 112 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_21 84 119 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_22 85 145 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_23 86 151 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_24 87 186 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA
Q27.3_25 88 106 1.00 0.00 1 1.00 0.00 1 1 0 NaN NaN 0.00 NA

5 Factor Analyses

5.1 Perceived Stress Scale

0-4, Never - Very Often, “In the last month, how often have you”

d <- subset(df2, select = grep("^Q6", colnames(df2), value = TRUE))
colnames(d) <- c("... been upset because of something that happened unexpectedly?",
                "... felt that you were unable to control the important things in your life?",
                "... felt nervous and 'stressed'?",
                "... felt confident about your ability to handle your personal problems?",
                "... felt that things were going your way?",
                "... found that you could not cope with all the things that you had to do?",
                "... been able to control irritations in your life?",
                "... felt that you were on top of things?",
                "... been angered because of things that were outside of your control?",
                "... felt difficulties were piling up so high that you could not overcome them?")
d <- na.omit(d)

ev <- eigen(cor(d)) # get eigenvalues
ap <- parallel(subject=nrow(d),var=ncol(d),rep=100,cent=.05) # run the parallel analysis, gives us another perspective on how many factors should be used in the model
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea) # creates the scree plot
plotnScree(nS) # shows us the scree plot, look for the elbows

EFA <- factanal(d, factors = 2, rotation = "promax")
print(EFA, digits=3, cutoff=.4, sort=F)
## 
## Call:
## factanal(x = d, factors = 2, rotation = "promax")
## 
## Uniquenesses:
##                ... been upset because of something that happened unexpectedly? 
##                                                                          0.379 
##    ... felt that you were unable to control the important things in your life? 
##                                                                          0.372 
##                                               ... felt nervous and 'stressed'? 
##                                                                          0.423 
##        ... felt confident about your ability to handle your personal problems? 
##                                                                          0.353 
##                                      ... felt that things were going your way? 
##                                                                          0.370 
##      ... found that you could not cope with all the things that you had to do? 
##                                                                          0.409 
##                             ... been able to control irritations in your life? 
##                                                                          0.655 
##                                       ... felt that you were on top of things? 
##                                                                          0.415 
##          ... been angered because of things that were outside of your control? 
##                                                                          0.474 
## ... felt difficulties were piling up so high that you could not overcome them? 
##                                                                          0.309 
## 
## Loadings:
##                                                                                Factor1
## ... been upset because of something that happened unexpectedly?                 0.877 
## ... felt that you were unable to control the important things in your life?     0.670 
## ... felt nervous and 'stressed'?                                                0.776 
## ... felt confident about your ability to handle your personal problems?               
## ... felt that things were going your way?                                             
## ... found that you could not cope with all the things that you had to do?       0.630 
## ... been able to control irritations in your life?                                    
## ... felt that you were on top of things?                                              
## ... been angered because of things that were outside of your control?           0.778 
## ... felt difficulties were piling up so high that you could not overcome them?  0.678 
##                                                                                Factor2
## ... been upset because of something that happened unexpectedly?                       
## ... felt that you were unable to control the important things in your life?           
## ... felt nervous and 'stressed'?                                                      
## ... felt confident about your ability to handle your personal problems?         0.819 
## ... felt that things were going your way?                                       0.796 
## ... found that you could not cope with all the things that you had to do?             
## ... been able to control irritations in your life?                              0.595 
## ... felt that you were on top of things?                                        0.750 
## ... been angered because of things that were outside of your control?                 
## ... felt difficulties were piling up so high that you could not overcome them?        
## 
##                Factor1 Factor2
## SS loadings      3.283   2.385
## Proportion Var   0.328   0.239
## Cumulative Var   0.328   0.567
## 
## Factor Correlations:
##         Factor1 Factor2
## Factor1   1.000  -0.593
## Factor2  -0.593   1.000
## 
## Test of the hypothesis that 2 factors are sufficient.
## The chi square statistic is 78.78 on 26 degrees of freedom.
## The p-value is 3.23e-07

5.2 Everyday Discrimination Scale

1-6, Never-Almost everyday

d <- subset(df2, select = grep("^Q39", colnames(df2), value = TRUE))
colnames(d) <- c("You are treated with less courtesy than other people are.",
                  "You are treated with less respect than other people are.",
                  "You receive poorer service than other people at restaurants or stores.",
                  "People act as if they think you are not smart.",
                  "People act as if they are afraid of you.",
                  "People act as if they think you are dishonest.",
                  "People act as if they’re better than you are.",
                  "You are called names or insulted.",
                  "You are threatened or harassed.")
d <- na.omit(d)

ev <- eigen(cor(d)) # get eigenvalues
ap <- parallel(subject=nrow(d),var=ncol(d),rep=100,cent=.05) # run the parallel analysis, gives us another perspective on how many factors should be used in the model
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea) # creates the scree plot
plotnScree(nS) # shows us the scree plot, look for the elbows

EFA <- factanal(d, factors = 1, rotation = "promax")
print(EFA, digits=3, cutoff=0, sort=F)
## 
## Call:
## factanal(x = d, factors = 1, rotation = "promax")
## 
## Uniquenesses:
##              You are treated with less courtesy than other people are. 
##                                                                  0.175 
##               You are treated with less respect than other people are. 
##                                                                  0.151 
## You receive poorer service than other people at restaurants or stores. 
##                                                                  0.684 
##                         People act as if they think you are not smart. 
##                                                                  0.542 
##                               People act as if they are afraid of you. 
##                                                                  0.804 
##                         People act as if they think you are dishonest. 
##                                                                  0.658 
##                          People act as if they’re better than you are. 
##                                                                  0.504 
##                                      You are called names or insulted. 
##                                                                  0.678 
##                                        You are threatened or harassed. 
##                                                                  0.716 
## 
## Loadings:
##                                                                        Factor1
## You are treated with less courtesy than other people are.              0.908  
## You are treated with less respect than other people are.               0.921  
## You receive poorer service than other people at restaurants or stores. 0.562  
## People act as if they think you are not smart.                         0.677  
## People act as if they are afraid of you.                               0.443  
## People act as if they think you are dishonest.                         0.585  
## People act as if they’re better than you are.                          0.705  
## You are called names or insulted.                                      0.568  
## You are threatened or harassed.                                        0.533  
## 
##                Factor1
## SS loadings      4.089
## Proportion Var   0.454
## 
## Test of the hypothesis that 1 factor is sufficient.
## The chi square statistic is 373.2 on 27 degrees of freedom.
## The p-value is 1.39e-62

5.3 Expectation of Rejection Scale

1-4, Disagree Strongly-Agree Strongly

d <- subset(df2, select = grep("^Q40", colnames(df2), value = TRUE))
colnames(d) <- c("Most employers will not hire a person like you",
                  "Most people believe that a person like you cannot be trusted",
                  "Most people think that a person like you is dangerous and unpredictable",
                  "Most people think less of a person like you",
                  "Most people look down on people like you",
                  "Most people think people like you are not as intelligent as the average person")

ev <- eigen(cor(d)) # get eigenvalues
ap <- parallel(subject=nrow(d),var=ncol(d),rep=100,cent=.05) # run the parallel analysis, gives us another perspective on how many factors should be used in the model
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea) # creates the scree plot
plotnScree(nS) # shows us the scree plot, look for the elbows

EFA <- factanal(d, factors = 1, rotation = "promax")
print(EFA, digits=3, cutoff=.4, sort=F)
## 
## Call:
## factanal(x = d, factors = 1, rotation = "promax")
## 
## Uniquenesses:
##                                 Most employers will not hire a person like you 
##                                                                          0.529 
##                   Most people believe that a person like you cannot be trusted 
##                                                                          0.488 
##        Most people think that a person like you is dangerous and unpredictable 
##                                                                          0.556 
##                                    Most people think less of a person like you 
##                                                                          0.125 
##                                       Most people look down on people like you 
##                                                                          0.138 
## Most people think people like you are not as intelligent as the average person 
##                                                                          0.357 
## 
## Loadings:
##                                                                                Factor1
## Most employers will not hire a person like you                                 0.686  
## Most people believe that a person like you cannot be trusted                   0.716  
## Most people think that a person like you is dangerous and unpredictable        0.666  
## Most people think less of a person like you                                    0.935  
## Most people look down on people like you                                       0.928  
## Most people think people like you are not as intelligent as the average person 0.802  
## 
##                Factor1
## SS loadings      3.807
## Proportion Var   0.635
## 
## Test of the hypothesis that 1 factor is sufficient.
## The chi square statistic is 140.3 on 9 degrees of freedom.
## The p-value is 8.97e-26

5.4 Inequality-driven Mistrust

1-4, Disagree Strongly-Agree Strongly

d <- subset(df2, select = grep("^Q18", colnames(df2), value = TRUE))
d <- subset(d, select = -c(Q18_1, Q18_7))
colnames(d) <- c(# "I assume others will not treat me fairly.",
                  "I've noticed that news stories or other 'factual' information aimed at people like me is actually misinformation or propaganda.",
                  "It is difficult for me to trust other people because of the ways I have been treated.",
                  "It is hard to believe that others' intentions are sincere due to my past experiences.",
                  "I've learned that I can't trust other people.",
                  "People like me will be taken advantage of if we are not careful.",
                  # "If someone like me fell on the sidewalk, no one would stop to help.",
                  "People are generally out to get people like me.",
                  "People like me cannot trust authority figures.",
                  "In my experience, public officials and experts are NOT trustworthy and just.",
                  "Society has treated me unjustly.",
                  "I’d prefer to find things out for myself on the internet rather than asking others.",
                  "If you put too much faith in what people tell you, you are likely to get hurt.",
                  "When someone tells me something, my immediate reaction is to wonder why they are telling me this.",
                  "I don’t usually act on advice that I get from others even when I think it’s probably sound.")

# Specify the desired length (e.g., 30 characters)
max_length <- 50
# Shorten the column names to the specified length
colnames(d) <- sapply(colnames(d), function(x) {
  substr(x, 1, max_length)
})

d <- na.omit(d)

ev <- eigen(cor(d)) # get eigenvalues
ap <- parallel(subject=nrow(d),var=ncol(d),rep=100,cent=.05) # run the parallel analysis, gives us another perspective on how many factors should be used in the model
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea) # creates the scree plot
plotnScree(nS) # shows us the scree plot, look for the elbows

# EFA <- factanal(d, factors = 1, rotation = "promax", cutoff = 0.3)
# print(EFA, digits=3, cutoff=.4, sort=TRUE)

EFA <- factanal(d, factors = 3, rotation = "promax", cutoff = 0.3)
print(EFA, digits=3, cutoff=.4, sort=TRUE)
## 
## Call:
## factanal(x = d, factors = 3, rotation = "promax", cutoff = 0.3)
## 
## Uniquenesses:
## I've noticed that news stories or other 'factual'  
##                                              0.650 
## It is difficult for me to trust other people becau 
##                                              0.233 
## It is hard to believe that others' intentions are  
##                                              0.152 
##      I've learned that I can't trust other people. 
##                                              0.342 
## People like me will be taken advantage of if we ar 
##                                              0.501 
##    People are generally out to get people like me. 
##                                              0.574 
##     People like me cannot trust authority figures. 
##                                              0.318 
## In my experience, public officials and experts are 
##                                              0.416 
##                   Society has treated me unjustly. 
##                                              0.408 
## I’d prefer to find things out for myself on the in 
##                                              0.747 
## If you put too much faith in what people tell you, 
##                                              0.422 
## When someone tells me something, my immediate reac 
##                                              0.414 
## I don’t usually act on advice that I get from othe 
##                                              0.687 
## 
## Loadings:
##                                                    Factor1 Factor2 Factor3
## I've noticed that news stories or other 'factual'   0.590                 
## People are generally out to get people like me.     0.616                 
## People like me cannot trust authority figures.      0.918                 
## In my experience, public officials and experts are  0.825                 
## Society has treated me unjustly.                    0.728                 
## It is difficult for me to trust other people becau          0.908         
## It is hard to believe that others' intentions are           0.923         
## I've learned that I can't trust other people.               0.625         
## If you put too much faith in what people tell you,                  0.721 
## When someone tells me something, my immediate reac                  0.839 
## I don’t usually act on advice that I get from othe                  0.566 
## People like me will be taken advantage of if we ar          0.438         
## I’d prefer to find things out for myself on the in                  0.448 
## 
##                Factor1 Factor2 Factor3
## SS loadings      2.914   2.322   1.805
## Proportion Var   0.224   0.179   0.139
## Cumulative Var   0.224   0.403   0.542
## 
## Factor Correlations:
##         Factor1 Factor2 Factor3
## Factor1   1.000  -0.645  -0.676
## Factor2  -0.645   1.000   0.693
## Factor3  -0.676   0.693   1.000
## 
## Test of the hypothesis that 3 factors are sufficient.
## The chi square statistic is 63.31 on 42 degrees of freedom.
## The p-value is 0.0184
# EFA <- factanal(d, factors = 4, rotation = "promax", cutoff = 0.3)
# print(EFA, digits=3, cutoff=.4, sort=TRUE)

5.5 Factor Scores

IDM

Distrust in authority: * 9 - People like me cannot trust authority figures. * 10 - In my experience, public officials and experts are NOT trustworthy and just. * 11 - Society has treated me unjustly. * 8 - People are generally out to get people like me. * 2 - I’ve noticed that news stories or other ‘factual’ information aimed at people like me is actually misinformation or propaganda.

Impact of betrayal * 4 - It is hard to believe that others’ intentions are sincere due to my past experiences. * 3 - It is difficult for me to trust other people because of the ways I have been treated. * 5 - I’ve learned that I can’t trust other people. * 6 - People like me will be taken advantage of if we are not careful.

Defensive processing * 14 - When someone tells me something, my immediate reaction is to wonder why they are telling me this. * 13 - If you put too much faith in what people tell you, you are likely to get hurt. * 15 - I don’t usually act on advice that I get from others even when I think it’s probably sound. * 12 - I’d prefer to find things out for myself on the internet rather than asking others.

df2 <- df2 %>%
  mutate(phs = rowMeans(select(., Q6_1:Q6_3, Q6_6, Q6_9:Q6_10), na.rm = TRUE)) %>%
  mutate(pse = rowMeans(select(., Q6_4:Q6_5, Q6_7:Q6_8), na.rm = TRUE)) %>%
  mutate(eds = rowMeans(select(., starts_with("Q39")), na.rm = TRUE)) %>%
  mutate(ers = rowMeans(select(., starts_with("Q40")), na.rm = TRUE)) %>%
  mutate(idm_all = rowMeans(select(., starts_with("Q18")), na.rm = TRUE)) %>%
  mutate(idm_da = rowMeans(select(., Q18_2, Q18_8:Q18_11), na.rm = TRUE)) %>%
  mutate(idm_ib = rowMeans(select(., Q18_3:Q18_6), na.rm = TRUE)) %>%
  mutate(idm_dp = rowMeans(select(., Q18_12:Q18_15), na.rm = TRUE))

psych::alpha(select(df2, Q6_1:Q6_3, Q6_6, Q6_9:Q6_10), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, Q6_1:Q6_3, Q6_6, Q6_9:Q6_10), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
##       0.89      0.89    0.88      0.59 8.5 0.0079  3.1 0.89     0.59
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.88  0.89  0.91
## Duhachek  0.88  0.89  0.91
## 
##  Reliability if an item is dropped:
##       raw_alpha std.alpha G6(smc) average_r S/N alpha se   var.r med.r
## Q6_1       0.88      0.88    0.86      0.59 7.1   0.0094 0.00366  0.58
## Q6_2       0.87      0.87    0.86      0.58 6.9   0.0098 0.00387  0.58
## Q6_3       0.88      0.88    0.86      0.59 7.1   0.0094 0.00426  0.59
## Q6_6       0.88      0.88    0.85      0.59 7.1   0.0095 0.00097  0.59
## Q6_9       0.88      0.88    0.87      0.60 7.6   0.0089 0.00308  0.60
## Q6_10      0.87      0.87    0.84      0.57 6.6   0.0102 0.00189  0.58
## 
##  Item statistics 
##         n raw.r std.r r.cor r.drop mean   sd
## Q6_1  420  0.79  0.81  0.76   0.71  3.0 0.97
## Q6_2  420  0.83  0.82  0.77   0.73  3.1 1.17
## Q6_3  420  0.80  0.80  0.75   0.71  3.8 1.08
## Q6_6  420  0.81  0.80  0.76   0.71  2.7 1.09
## Q6_9  420  0.77  0.77  0.71   0.67  3.0 1.07
## Q6_10 420  0.85  0.85  0.82   0.77  2.8 1.22
## 
## Non missing response frequency for each item
##          1    2    3    4    5 miss
## Q6_1  0.07 0.21 0.43 0.23 0.05    0
## Q6_2  0.10 0.19 0.35 0.22 0.15    0
## Q6_3  0.04 0.09 0.24 0.34 0.30    0
## Q6_6  0.14 0.27 0.35 0.16 0.06    0
## Q6_9  0.08 0.24 0.35 0.24 0.08    0
## Q6_10 0.16 0.25 0.30 0.18 0.11    0
psych::alpha(select(df2, Q6_4:Q6_5, Q6_7:Q6_8), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, Q6_4:Q6_5, Q6_7:Q6_8), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##       0.83      0.83    0.79      0.54 4.8 0.014  3.2 0.81     0.55
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt      0.8  0.83  0.85
## Duhachek   0.8  0.83  0.85
## 
##  Reliability if an item is dropped:
##      raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## Q6_4      0.75      0.75    0.68      0.51 3.1    0.021 0.0105  0.46
## Q6_5      0.76      0.76    0.69      0.52 3.2    0.020 0.0041  0.51
## Q6_7      0.83      0.83    0.77      0.62 4.9    0.014 0.0010  0.62
## Q6_8      0.77      0.77    0.71      0.53 3.4    0.019 0.0123  0.51
## 
##  Item statistics 
##        n raw.r std.r r.cor r.drop mean   sd
## Q6_4 420  0.85  0.85  0.79   0.71  3.4 1.03
## Q6_5 420  0.83  0.83  0.77   0.69  3.0 0.96
## Q6_7 420  0.74  0.74  0.59   0.54  3.2 0.97
## Q6_8 420  0.83  0.82  0.74   0.67  3.2 1.01
## 
## Non missing response frequency for each item
##         1    2    3    4    5 miss
## Q6_4 0.05 0.14 0.36 0.32 0.13    0
## Q6_5 0.05 0.22 0.44 0.22 0.07    0
## Q6_7 0.04 0.17 0.39 0.32 0.09    0
## Q6_8 0.05 0.20 0.37 0.29 0.09    0
psych::alpha(select(df2, starts_with("Q39")), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, starts_with("Q39")), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
##       0.89      0.89     0.9      0.47 7.8 0.0081  2.4 0.88     0.46
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.87  0.89   0.9
## Duhachek  0.87  0.89   0.9
## 
##  Reliability if an item is dropped:
##        raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## Q39_1       0.87      0.87    0.87      0.45 6.5   0.0097 0.0096  0.45
## Q39_2       0.86      0.86    0.86      0.44 6.4   0.0098 0.0095  0.45
## Q39_3       0.88      0.88    0.89      0.48 7.3   0.0087 0.0168  0.46
## Q39_4       0.87      0.87    0.88      0.45 6.7   0.0095 0.0157  0.45
## Q39_5       0.89      0.89    0.90      0.50 7.9   0.0080 0.0129  0.47
## Q39_6       0.87      0.87    0.89      0.47 7.0   0.0090 0.0173  0.45
## Q39_8       0.87      0.87    0.88      0.46 6.7   0.0094 0.0147  0.46
## Q39_9       0.88      0.88    0.88      0.47 7.2   0.0086 0.0148  0.46
## Q39_10      0.88      0.88    0.89      0.48 7.4   0.0086 0.0145  0.47
## 
##  Item statistics 
##          n raw.r std.r r.cor r.drop mean   sd
## Q39_1  420  0.81  0.80  0.82   0.74  2.8 1.30
## Q39_2  420  0.83  0.82  0.84   0.77  2.8 1.27
## Q39_3  420  0.67  0.68  0.62   0.58  2.0 1.04
## Q39_4  420  0.79  0.77  0.74   0.70  2.8 1.40
## Q39_5  420  0.60  0.60  0.52   0.48  2.0 1.22
## Q39_6  419  0.72  0.72  0.67   0.64  2.1 1.17
## Q39_8  420  0.77  0.76  0.73   0.69  3.1 1.29
## Q39_9  420  0.69  0.69  0.65   0.59  2.3 1.24
## Q39_10 420  0.64  0.66  0.61   0.56  1.8 0.92
## 
## Non missing response frequency for each item
##           1    2    3    4    5    6 miss
## Q39_1  0.20 0.18 0.35 0.17 0.08 0.03    0
## Q39_2  0.18 0.20 0.37 0.15 0.08 0.03    0
## Q39_3  0.40 0.31 0.20 0.06 0.02 0.00    0
## Q39_4  0.24 0.22 0.24 0.19 0.09 0.03    0
## Q39_5  0.48 0.23 0.17 0.07 0.04 0.01    0
## Q39_6  0.37 0.29 0.20 0.10 0.03 0.01    0
## Q39_8  0.14 0.18 0.29 0.24 0.12 0.03    0
## Q39_9  0.34 0.30 0.19 0.11 0.03 0.02    0
## Q39_10 0.49 0.32 0.15 0.03 0.00 0.01    0
psych::alpha(select(df2, starts_with("Q40")), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, starts_with("Q40")), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
##       0.91      0.91    0.91      0.63  10 0.0065  1.7 0.71     0.62
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt      0.9  0.91  0.92
## Duhachek   0.9  0.91  0.92
## 
##  Reliability if an item is dropped:
##       raw_alpha std.alpha G6(smc) average_r  S/N alpha se  var.r med.r
## Q40_1      0.91      0.91    0.91      0.67 10.2   0.0067 0.0100  0.64
## Q40_2      0.90      0.90    0.89      0.64  8.7   0.0075 0.0162  0.62
## Q40_3      0.91      0.91    0.90      0.66  9.7   0.0070 0.0111  0.64
## Q40_4      0.88      0.88    0.87      0.60  7.4   0.0092 0.0088  0.59
## Q40_5      0.88      0.88    0.87      0.60  7.6   0.0091 0.0080  0.61
## Q40_6      0.89      0.90    0.90      0.63  8.5   0.0080 0.0133  0.63
## 
##  Item statistics 
##         n raw.r std.r r.cor r.drop mean   sd
## Q40_1 420  0.76  0.76  0.68   0.65  1.7 0.89
## Q40_2 420  0.81  0.83  0.78   0.73  1.4 0.71
## Q40_3 420  0.76  0.78  0.72   0.67  1.4 0.71
## Q40_4 420  0.91  0.90  0.91   0.86  1.8 0.92
## Q40_5 420  0.91  0.89  0.89   0.85  1.8 0.95
## Q40_6 420  0.85  0.84  0.80   0.76  1.9 0.93
## 
## Non missing response frequency for each item
##          1    2    3    4 miss
## Q40_1 0.55 0.27 0.13 0.05    0
## Q40_2 0.67 0.22 0.10 0.01    0
## Q40_3 0.74 0.16 0.08 0.02    0
## Q40_4 0.49 0.27 0.19 0.05    0
## Q40_5 0.50 0.25 0.20 0.06    0
## Q40_6 0.47 0.26 0.23 0.05    0
psych::alpha(select(df2, starts_with("Q18")), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, starts_with("Q18")), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
##       0.91      0.91    0.92      0.41  10 0.0062  2.2 0.62     0.39
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt      0.9  0.91  0.92
## Duhachek   0.9  0.91  0.92
## 
##  Reliability if an item is dropped:
##        raw_alpha std.alpha G6(smc) average_r  S/N alpha se var.r med.r
## Q18_1       0.91      0.90    0.92      0.40  9.5   0.0066 0.013  0.38
## Q18_2       0.91      0.91    0.92      0.42 10.0   0.0063 0.013  0.39
## Q18_3       0.90      0.90    0.91      0.40  9.4   0.0068 0.011  0.38
## Q18_4       0.90      0.90    0.91      0.40  9.3   0.0068 0.010  0.38
## Q18_5       0.90      0.90    0.91      0.40  9.2   0.0069 0.011  0.38
## Q18_6       0.90      0.90    0.92      0.40  9.4   0.0067 0.013  0.38
## Q18_7       0.91      0.91    0.92      0.41  9.9   0.0064 0.013  0.39
## Q18_8       0.91      0.91    0.92      0.41  9.6   0.0065 0.013  0.38
## Q18_9       0.90      0.90    0.91      0.40  9.4   0.0067 0.013  0.38
## Q18_11      0.91      0.91    0.92      0.41  9.6   0.0066 0.013  0.38
## Q18_12      0.90      0.90    0.91      0.40  9.3   0.0068 0.013  0.38
## Q18_13      0.91      0.91    0.92      0.43 10.4   0.0062 0.012  0.41
## Q18_14      0.91      0.91    0.92      0.41  9.6   0.0066 0.013  0.39
## Q18_15      0.91      0.91    0.92      0.41  9.7   0.0065 0.014  0.39
## Q18_16      0.91      0.91    0.92      0.42 10.2   0.0063 0.012  0.41
## 
##  Item statistics 
##          n raw.r std.r r.cor r.drop mean   sd
## Q18_1  420  0.70  0.70  0.67   0.64  2.0 0.86
## Q18_2  419  0.58  0.58  0.53   0.50  2.3 0.98
## Q18_3  420  0.74  0.73  0.73   0.68  2.5 1.01
## Q18_4  420  0.76  0.75  0.76   0.71  2.5 1.02
## Q18_5  420  0.77  0.76  0.76   0.72  2.4 0.99
## Q18_6  420  0.73  0.72  0.69   0.67  2.5 1.04
## Q18_7  420  0.59  0.60  0.56   0.53  1.8 0.78
## Q18_8  420  0.66  0.67  0.64   0.61  1.7 0.79
## Q18_9  420  0.72  0.72  0.70   0.66  1.9 0.95
## Q18_11 420  0.69  0.68  0.66   0.62  2.3 1.02
## Q18_12 420  0.74  0.74  0.72   0.69  1.9 0.92
## Q18_13 420  0.50  0.51  0.45   0.42  2.8 0.87
## Q18_14 420  0.68  0.68  0.66   0.63  2.8 0.87
## Q18_15 420  0.65  0.65  0.62   0.59  2.2 0.91
## Q18_16 420  0.52  0.54  0.48   0.46  2.1 0.79
## 
## Non missing response frequency for each item
##           1    2    3    4 miss
## Q18_1  0.35 0.38 0.22 0.04    0
## Q18_2  0.27 0.27 0.35 0.10    0
## Q18_3  0.23 0.24 0.37 0.16    0
## Q18_4  0.22 0.24 0.37 0.17    0
## Q18_5  0.24 0.30 0.33 0.14    0
## Q18_6  0.22 0.21 0.37 0.19    0
## Q18_7  0.38 0.46 0.13 0.03    0
## Q18_8  0.52 0.34 0.12 0.03    0
## Q18_9  0.42 0.29 0.22 0.07    0
## Q18_11 0.28 0.29 0.30 0.14    0
## Q18_12 0.46 0.27 0.22 0.05    0
## Q18_13 0.08 0.26 0.45 0.21    0
## Q18_14 0.10 0.22 0.50 0.18    0
## Q18_15 0.24 0.38 0.30 0.09    0
## Q18_16 0.22 0.48 0.26 0.04    0
psych::alpha(select(df2, Q18_2, Q18_8:Q18_11), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, Q18_2, Q18_8:Q18_11), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##       0.79      0.79    0.75      0.49 3.8 0.016    2 0.74     0.48
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.76  0.79  0.82
## Duhachek  0.76  0.79  0.82
## 
##  Reliability if an item is dropped:
##        raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## Q18_2       0.77      0.77    0.71      0.53 3.4    0.019 0.0118  0.54
## Q18_8       0.77      0.77    0.71      0.53 3.4    0.019 0.0090  0.50
## Q18_9       0.68      0.68    0.59      0.42 2.1    0.026 0.0021  0.42
## Q18_11      0.72      0.73    0.65      0.47 2.6    0.023 0.0078  0.50
## 
##  Item statistics 
##          n raw.r std.r r.cor r.drop mean   sd
## Q18_2  419  0.75  0.74  0.60   0.54  2.3 0.98
## Q18_8  420  0.71  0.74  0.60   0.53  1.7 0.79
## Q18_9  420  0.85  0.85  0.81   0.72  1.9 0.95
## Q18_11 420  0.82  0.80  0.72   0.64  2.3 1.02
## 
## Non missing response frequency for each item
##           1    2    3    4 miss
## Q18_2  0.27 0.27 0.35 0.10    0
## Q18_8  0.52 0.34 0.12 0.03    0
## Q18_9  0.42 0.29 0.22 0.07    0
## Q18_11 0.28 0.29 0.30 0.14    0
psych::alpha(select(df2, Q18_3:Q18_6), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, Q18_3:Q18_6), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
##       0.89      0.89    0.86      0.66 7.8 0.0093  2.5 0.88     0.65
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.87  0.89   0.9
## Duhachek  0.87  0.89   0.9
## 
##  Reliability if an item is dropped:
##       raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## Q18_3      0.84      0.84    0.78      0.63 5.2   0.0138 0.0061  0.60
## Q18_4      0.82      0.82    0.77      0.61 4.7   0.0150 0.0054  0.58
## Q18_5      0.85      0.85    0.81      0.66 5.7   0.0131 0.0169  0.60
## Q18_6      0.90      0.90    0.86      0.74 8.6   0.0089 0.0032  0.72
## 
##  Item statistics 
##         n raw.r std.r r.cor r.drop mean   sd
## Q18_3 420  0.88  0.89  0.85   0.79  2.5 1.01
## Q18_4 420  0.91  0.91  0.89   0.82  2.5 1.02
## Q18_5 420  0.86  0.87  0.80   0.76  2.4 0.99
## Q18_6 420  0.80  0.79  0.67   0.64  2.5 1.04
## 
## Non missing response frequency for each item
##          1    2    3    4 miss
## Q18_3 0.23 0.24 0.37 0.16    0
## Q18_4 0.22 0.24 0.37 0.17    0
## Q18_5 0.24 0.30 0.33 0.14    0
## Q18_6 0.22 0.21 0.37 0.19    0
psych::alpha(select(df2, Q18_12:Q18_15), na.rm = TRUE)
## 
## Reliability analysis   
## Call: psych::alpha(x = select(df2, Q18_12:Q18_15), na.rm = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##       0.72      0.72    0.67      0.39 2.6 0.022  2.4 0.66     0.37
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.67  0.72  0.76
## Duhachek  0.68  0.72  0.76
## 
##  Reliability if an item is dropped:
##        raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## Q18_12      0.70      0.70    0.62      0.43 2.3    0.026 0.0159  0.37
## Q18_13      0.71      0.71    0.63      0.45 2.4    0.025 0.0132  0.38
## Q18_14      0.61      0.61    0.51      0.34 1.6    0.033 0.0023  0.35
## Q18_15      0.61      0.61    0.52      0.35 1.6    0.033 0.0024  0.37
## 
##  Item statistics 
##          n raw.r std.r r.cor r.drop mean   sd
## Q18_12 420  0.70  0.70  0.52   0.44  1.9 0.92
## Q18_13 420  0.68  0.68  0.49   0.43  2.8 0.87
## Q18_14 420  0.79  0.79  0.71   0.59  2.8 0.87
## Q18_15 420  0.79  0.78  0.70   0.58  2.2 0.91
## 
## Non missing response frequency for each item
##           1    2    3    4 miss
## Q18_12 0.46 0.27 0.22 0.05    0
## Q18_13 0.08 0.26 0.45 0.21    0
## Q18_14 0.10 0.22 0.50 0.18    0
## Q18_15 0.24 0.38 0.30 0.09    0
# Get descriptive statistics for df2
desc_stats <- describe(subset(df2, select=c(phs, pse, eds, ers, idm_all, idm_da, idm_ib, idm_dp)))

# Create a new column to identify rows with skew or kurtosis greater than 2
desc_stats <- desc_stats %>%
  mutate(highlight = ifelse(abs(skew) > 2 | abs(kurtosis) > 2, "highlight", "no"))

# Create a table with highlighted rows for high skewness or kurtosis
desc_stats %>%
  kable(format = "html", digits = 2) %>%
  kable_styling(full_width = FALSE, bootstrap_options = c("striped", "hover", "condensed")) %>%
  row_spec(which(desc_stats$highlight == "highlight"), background = "red")
vars n mean sd median trimmed mad min max range skew kurtosis se highlight
phs 1 420 3.07 0.89 3.17 3.09 0.74 1 5.00 4.00 -0.20 -0.41 0.04 no
pse 2 420 3.20 0.81 3.25 3.21 0.74 1 5.00 4.00 -0.08 -0.07 0.04 no
eds 3 420 2.41 0.88 2.33 2.38 0.82 1 5.22 4.22 0.38 -0.19 0.04 no
ers 4 420 1.66 0.71 1.50 1.56 0.74 1 4.00 3.00 1.00 0.25 0.03 no
idm_all 5 420 2.24 0.62 2.27 2.23 0.69 1 4.00 3.00 0.13 -0.52 0.03 no
idm_da 6 420 2.04 0.74 2.00 2.01 0.74 1 4.00 3.00 0.25 -0.90 0.04 no
idm_ib 7 420 2.46 0.88 2.50 2.47 1.11 1 4.00 3.00 -0.12 -0.98 0.04 no
idm_dp 8 420 2.41 0.66 2.50 2.41 0.74 1 4.00 3.00 0.05 -0.25 0.03 no

6 Correlations

# Select the relevant columns from df2
df2_subset <- df2 %>% select(phs, pse, eds, ers, idm_da, idm_ib, idm_dp)

# Compute the correlation matrix with significance tests
cor_results <- corr.test(df2_subset, use = "pairwise.complete.obs")

# Extract the correlation matrix
cor_matrix <- cor_results$r

# Extract the p-values matrix
p_matrix <- cor_results$p

custom_labels <- c("1. Perceived Helplessness Scale", "2. Perceived Self-Efficacy Scale", "3. Everyday Discrimination Scale", "4. Expectation of Rejection Scale", "5. Distrust of Authority", "6. Impact of Betrayal", "7. Defensive Processing")

rownames(cor_matrix) <- custom_labels
colnames(cor_matrix) <- c("1","2","3","4","5","6","7")
rownames(p_matrix) <- custom_labels
colnames(p_matrix) <- c("1","2","3","4","5","6","7")

# Display the correlation matrix and p-values
print(cor_matrix)
##                                            1          2          3          4
## 1. Perceived Helplessness Scale    1.0000000 -0.5525927  0.4487035  0.2473386
## 2. Perceived Self-Efficacy Scale  -0.5525927  1.0000000 -0.2751909 -0.2914625
## 3. Everyday Discrimination Scale   0.4487035 -0.2751909  1.0000000  0.4781745
## 4. Expectation of Rejection Scale  0.2473386 -0.2914625  0.4781745  1.0000000
## 5. Distrust of Authority           0.2039244 -0.2801179  0.4060215  0.4810972
## 6. Impact of Betrayal              0.3516796 -0.3581316  0.4642664  0.3858347
## 7. Defensive Processing            0.2895630 -0.2907679  0.4159427  0.4839643
##                                            5          6          7
## 1. Perceived Helplessness Scale    0.2039244  0.3516796  0.2895630
## 2. Perceived Self-Efficacy Scale  -0.2801179 -0.3581316 -0.2907679
## 3. Everyday Discrimination Scale   0.4060215  0.4642664  0.4159427
## 4. Expectation of Rejection Scale  0.4810972  0.3858347  0.4839643
## 5. Distrust of Authority           1.0000000  0.5684352  0.6498982
## 6. Impact of Betrayal              0.5684352  1.0000000  0.6695232
## 7. Defensive Processing            0.6498982  0.6695232  1.0000000
print(p_matrix)
##                                              1            2            3
## 1. Perceived Helplessness Scale   0.000000e+00 1.070555e-33 4.398481e-21
## 2. Perceived Self-Efficacy Scale  5.947526e-35 0.000000e+00 2.942543e-08
## 3. Everyday Discrimination Scale  3.383447e-22 9.808477e-09 0.000000e+00
## 4. Expectation of Rejection Scale 2.839391e-07 1.142711e-09 2.205667e-25
## 5. Distrust of Authority          2.541488e-05 5.190695e-09 4.225280e-18
## 6. Impact of Betrayal             1.133861e-13 3.726904e-14 7.683528e-24
## 7. Defensive Processing           1.479247e-09 1.256112e-09 5.308695e-19
##                                              4            5            6
## 1. Perceived Helplessness Scale   5.678782e-07 2.541488e-05 9.070885e-13
## 2. Perceived Self-Efficacy Scale  7.998980e-09 2.076278e-08 3.354214e-13
## 3. Everyday Discrimination Scale  3.308501e-24 4.647808e-17 1.075694e-22
## 4. Expectation of Rejection Scale 0.000000e+00 1.638917e-24 2.334976e-15
## 5. Distrust of Authority          1.024323e-25 0.000000e+00 4.899512e-36
## 6. Impact of Betrayal             2.334976e-16 2.578690e-37 0.000000e+00
## 7. Defensive Processing           4.792185e-26 9.162543e-52 6.114285e-56
##                                              7
## 1. Perceived Helplessness Scale   7.998980e-09
## 2. Perceived Self-Efficacy Scale  7.998980e-09
## 3. Everyday Discrimination Scale  6.370434e-18
## 4. Expectation of Rejection Scale 8.146715e-25
## 5. Distrust of Authority          1.832509e-50
## 6. Impact of Betrayal             1.284000e-54
## 7. Defensive Processing           0.000000e+00
corrplot(cor_matrix, 
         method = "color",      # Use color for correlation visualization
         type = "upper",        # Display only upper triangle
         addCoef.col = "white", # Add coefficients in black
         tl.col = "black",      # Set text label color
         number.cex = 0.8,      # Adjust text size
         p.mat = p_matrix,      # Provide p-values for significance
         sig.level = 0.05,      # Set significance level
         insig = "blank")       # Hide insignificant correlations

7 Regressions

α = 0.05 / 18, p < .001

For a coefficient β, effect sizes between 0.10–0.29 are said to be only small, effect sizes between 0.30–0.49 are medium, and effect sizes of 0.50 or greater are large

df2$phs_std <- c(scale(df2$phs, center = T, scale= T))
df2$pse_std <- c(scale(df2$pse, center = T, scale= T))
df2$eds_std <- c(scale(df2$eds, center = T, scale= T))
df2$ers_std <- c(scale(df2$ers, center = T, scale= T))
df2$idm_da_std <- c(scale(df2$idm_da, center = T, scale= T))
df2$idm_ib_std <- c(scale(df2$idm_ib, center = T, scale= T))
df2$idm_dp_std <- c(scale(df2$idm_dp, center = T, scale= T))

df2 <- cbind.data.frame(df2, samp[15:20])
df2$race2 <- as.factor(df2$race2)
df2$gen <- as.factor(df2$gen)
df2$sexid2 <- as.factor(df2$sexid2)
df2$samp <- as.factor(df2$samp)

7.0.1 Defensive Processing

reg_model <- lm(idm_dp_std ~ eds_std + ers_std + phs_std + pse_std, data = df2)
car::vif(reg_model)
##  eds_std  ers_std  phs_std  pse_std 
## 1.530476 1.349837 1.674678 1.498287
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_dp_std ~ eds_std + ers_std + phs_std + pse_std, 
##     data = df2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.07213 -0.53931 -0.05537  0.50460  2.63659 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.180e-16  4.114e-02   0.000 1.000000    
## eds_std      1.957e-01  5.096e-02   3.840 0.000142 ***
## ers_std      3.456e-01  4.786e-02   7.222 2.47e-12 ***
## phs_std      5.905e-02  5.331e-02   1.108 0.268584    
## pse_std     -1.036e-01  5.042e-02  -2.054 0.040618 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8432 on 415 degrees of freedom
## Multiple R-squared:  0.2959, Adjusted R-squared:  0.2891 
## F-statistic: 43.59 on 4 and 415 DF,  p-value: < 2.2e-16
confint(reg_model)
##                   2.5 %       97.5 %
## (Intercept) -0.08087288  0.080872884
## eds_std      0.09552146  0.295859681
## ers_std      0.25152990  0.439674293
## phs_std     -0.04572965  0.163834122
## pse_std     -0.20266386 -0.004443616
plot_model(reg_model, type="pred", show.data = T, axis.lim(-3,3))
## $eds_std

## 
## $ers_std

## 
## $phs_std

## 
## $pse_std

# Reshape data from wide to long format
long_data <- df2 %>%
  pivot_longer(cols = c(eds_std, ers_std, phs_std, pse_std), 
               names_to = "IV", 
               values_to = "Value")

# Get the coefficients (intercept and slopes)
coefficients <- coef(reg_model)
coeff_df <- data.frame(
  IV = names(coefficients)[-1],  # Remove intercept
  Slope = coefficients[-1],      # Extract slopes
  Intercept = coefficients[1]    # Store intercept separately
)

# Create a named vector for facet label replacements
facet_labels <- c(
  "eds_std" = "Everyday Discrimination",
  "ers_std" = "Expectation of Rejection",
  "phs_std" = "Perceived Helplessness",
  "pse_std" = "Perceived Self-Efficacy"
)

ggplot(long_data, aes(x = Value, y = idm_dp_std)) +
  geom_point(alpha = 0.1) +
  geom_abline(data = coeff_df, aes(slope = Slope, intercept = Intercept), linetype = "dashed", size=1.2) +
  facet_wrap(~IV, nrow = 1, strip.position = "bottom", labeller = as_labeller(facet_labels)) +  # Move facet labels to the bottom
  xlim(-3, 3) + ylim(-3, 3) +
  theme_minimal() +
  theme(strip.background = element_blank(),  # Remove strip background
        strip.placement = "outside",         # Move labels outside the plot area
        axis.title.x = element_blank(),    # Remove "IV Value" text
        text = element_text(size = 14),      # Increase overall text size
        axis.text = element_text(size = 11), # Increase axis label size
        strip.text = element_text(size = 11)) +
  labs(y = "IDM (Defensive Processing)")
## Warning: Removed 6 rows containing missing values (geom_point).

7.0.1.1 Comparison

reg_model <- lm(idm_dp_std ~ samp + sexid2 + race2 + gen, data = df2)
car::vif(reg_model, type = "predictor")
## GVIFs computed for predictors
##            GVIF Df GVIF^(1/(2*Df)) Interacts With    Other Predictors
## samp   1.049343  1        1.024374           --    sexid2, race2, gen
## sexid2 1.115890  1        1.056357           --      samp, race2, gen
## race2  1.008641  1        1.004311           --     samp, sexid2, gen
## gen    1.155103  2        1.036705           --   samp, sexid2, race2
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_dp_std ~ samp + sexid2 + race2 + gen, data = df2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.55685 -0.56236  0.07097  0.60701  2.37022 
## 
## Coefficients:
##                Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     0.71396    0.15058   4.741 2.93e-06 ***
## sampSONA       -0.51053    0.09462  -5.396 1.15e-07 ***
## sexid2Straight -0.27925    0.12485  -2.237 0.025843 *  
## race2WP        -0.37192    0.09926  -3.747 0.000204 ***
## genN            0.40040    0.29746   1.346 0.179023    
## genW           -0.02168    0.10047  -0.216 0.829294    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9417 on 413 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.1243, Adjusted R-squared:  0.1137 
## F-statistic: 11.72 on 5 and 413 DF,  p-value: 1.305e-10
confint(reg_model)
##                     2.5 %      97.5 %
## (Intercept)     0.4179560  1.00997046
## sampSONA       -0.6965224 -0.32452797
## sexid2Straight -0.5246742 -0.03382504
## race2WP        -0.5670370 -0.17680626
## genN           -0.1843291  0.98513367
## genW           -0.2191827  0.17582902
plot_model(reg_model, type="est", show.data = T)

# Convert regression results into a tidy format
reg_results <- tidy(reg_model, conf.int = TRUE)
reg_results$term <- factor(reg_results$term, levels = rev(reg_results$term))  # Keeps original order

# Plot Coefficient Estimates with 95% CI
ggplot(reg_results, aes(x = estimate, y = term)) +
  geom_point(color = "blue", size = 3) +  # Point estimate
  geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0.2, color = "black") +  # CI bars
  geom_vline(xintercept = 0, linetype = "dashed", color = "red") +  # Reference line at 0
  xlim(-1,1.5) +
  scale_y_discrete(labels = c(
    "sampSONA" = "Current College Student", 
    "sexid2Straight" = "Sexuality (Straight)", 
    "race2WP" = "Race (White)",
    "genN" = "Gender (Nonbinary)",
    "genW" = "Gender (Woman)"
  )) +
  theme(
    text = element_text(family = "Arial Narrow"),  # Set all text to Arial Narrow
    axis.text = element_text(family = "Arial Narrow"),
    axis.title = element_text(family = "Arial Narrow"),
    plot.title = element_text(family = "Arial Narrow", face = "bold"),
    legend.text = element_text(family = "Arial Narrow"),
    legend.title = element_text(family = "Arial Narrow")
  ) +
  theme_minimal() +
  labs(x = NULL,
       y = NULL)

7.0.2 Distrust of Authority

reg_model <- lm(idm_da_std ~ eds_std + ers_std + phs_std + pse_std, data = df2)
car::vif(reg_model)
##  eds_std  ers_std  phs_std  pse_std 
## 1.530476 1.349837 1.674678 1.498287
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_da_std ~ eds_std + ers_std + phs_std + pse_std, 
##     data = df2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.9647 -0.6496 -0.1008  0.5313  2.9961 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) -3.650e-16  4.137e-02   0.000  1.00000    
## eds_std      2.309e-01  5.125e-02   4.506 8.60e-06 ***
## ers_std      3.428e-01  4.813e-02   7.122 4.73e-12 ***
## phs_std     -7.045e-02  5.361e-02  -1.314  0.18951    
## pse_std     -1.556e-01  5.071e-02  -3.069  0.00229 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8479 on 415 degrees of freedom
## Multiple R-squared:  0.2879, Adjusted R-squared:  0.281 
## F-statistic: 41.94 on 4 and 415 DF,  p-value: < 2.2e-16
confint(reg_model)
##                   2.5 %      97.5 %
## (Intercept) -0.08133043  0.08133043
## eds_std      0.13018128  0.33165294
## ers_std      0.24814662  0.43735547
## phs_std     -0.17582377  0.03492564
## pse_std     -0.25527304 -0.05593133
plot_model(reg_model, type="pred", show.data = T, axis.lim(-3,3))
## $eds_std

## 
## $ers_std

## 
## $phs_std

## 
## $pse_std

# Reshape data from wide to long format
long_data <- df2 %>%
  pivot_longer(cols = c(eds_std, ers_std, phs_std, pse_std), 
               names_to = "IV", 
               values_to = "Value")

# Get the coefficients (intercept and slopes)
coefficients <- coef(reg_model)
coeff_df <- data.frame(
  IV = names(coefficients)[-1],  # Remove intercept
  Slope = coefficients[-1],      # Extract slopes
  Intercept = coefficients[1]    # Store intercept separately
)

# Create a named vector for facet label replacements
facet_labels <- c(
  "eds_std" = "Everyday Discrimination",
  "ers_std" = "Expectation of Rejection",
  "phs_std" = "Perceived Helplessness",
  "pse_std" = "Perceived Self-Efficacy"
)

ggplot(long_data, aes(x = Value, y = idm_da_std)) +
  geom_point(alpha = 0.1) +
  geom_abline(data = coeff_df, aes(slope = Slope, intercept = Intercept), linetype = "dashed", size=1.2) +
  facet_wrap(~IV, nrow = 1, strip.position = "bottom", labeller = as_labeller(facet_labels)) +  # Move facet labels to the bottom
  xlim(-3, 3) + ylim(-3, 3) +
  theme_minimal() +
  theme(strip.background = element_blank(),  # Remove strip background
        strip.placement = "outside",         # Move labels outside the plot area
        axis.title.x = element_blank(),    # Remove "IV Value" text
        text = element_text(size = 14),      # Increase overall text size
        axis.text = element_text(size = 11), # Increase axis label size
        strip.text = element_text(size = 11)) +
  labs(y = "IDM (Distrust of Authority)")
## Warning: Removed 6 rows containing missing values (geom_point).

7.0.2.1 Comparison

reg_model <- lm(idm_da_std ~ samp + sexid2 + race2 + gen, data = df2)
car::vif(reg_model, type = "predictor")
## GVIFs computed for predictors
##            GVIF Df GVIF^(1/(2*Df)) Interacts With    Other Predictors
## samp   1.049343  1        1.024374           --    sexid2, race2, gen
## sexid2 1.115890  1        1.056357           --      samp, race2, gen
## race2  1.008641  1        1.004311           --     samp, sexid2, gen
## gen    1.155103  2        1.036705           --   samp, sexid2, race2
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_da_std ~ samp + sexid2 + race2 + gen, data = df2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.07781 -0.76126  0.02336  0.61454  2.18557 
## 
## Coefficients:
##                Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     1.12943    0.14250   7.926 2.13e-14 ***
## sampSONA       -0.57119    0.08954  -6.379 4.78e-10 ***
## sexid2Straight -0.46626    0.11815  -3.946 9.32e-05 ***
## race2WP        -0.53228    0.09393  -5.667 2.73e-08 ***
## genN            0.24497    0.28149   0.870   0.3847    
## genW           -0.21308    0.09508  -2.241   0.0256 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8911 on 413 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.2166, Adjusted R-squared:  0.2071 
## F-statistic: 22.83 on 5 and 413 DF,  p-value: < 2.2e-16
confint(reg_model)
##                     2.5 %      97.5 %
## (Intercept)     0.8493162  1.40953760
## sampSONA       -0.7471982 -0.39518105
## sexid2Straight -0.6985029 -0.23401381
## race2WP        -0.7169182 -0.34764408
## genN           -0.3083633  0.79829571
## genW           -0.3999794 -0.02618095
plot_model(reg_model, type="est", show.data = T)

# Convert regression results into a tidy format
reg_results <- tidy(reg_model, conf.int = TRUE)
reg_results$term <- factor(reg_results$term, levels = rev(reg_results$term))  # Keeps original order

# Plot Coefficient Estimates with 95% CI
ggplot(reg_results, aes(x = estimate, y = term)) +
  geom_point(color = "blue", size = 3) +  # Point estimate
  geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0.2, color = "black") +  # CI bars
  geom_vline(xintercept = 0, linetype = "dashed", color = "red") +  # Reference line at 0
  xlim(-1,1.5) +
  scale_y_discrete(labels = c(
    "sampSONA" = "Current College Student", 
    "sexid2Straight" = "Sexuality (Straight)", 
    "race2WP" = "Race (White)",
    "genN" = "Gender (Nonbinary)",
    "genW" = "Gender (Woman)"
  )) +
  theme(
    text = element_text(family = "Arial Narrow"),  # Set all text to Arial Narrow
    axis.text = element_text(family = "Arial Narrow"),
    axis.title = element_text(family = "Arial Narrow"),
    plot.title = element_text(family = "Arial Narrow", face = "bold"),
    legend.text = element_text(family = "Arial Narrow"),
    legend.title = element_text(family = "Arial Narrow")
  ) +
  theme_minimal() +
  labs(x = NULL,
       y = NULL)

7.0.3 Impact of Betrayal

reg_model <- lm(idm_ib_std ~ eds_std + ers_std + phs_std + pse_std, data = df2)
car::vif(reg_model)
##  eds_std  ers_std  phs_std  pse_std 
## 1.530476 1.349837 1.674678 1.498287
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_ib_std ~ eds_std + ers_std + phs_std + pse_std, 
##     data = df2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.40500 -0.60080 -0.01854  0.55490  2.31150 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  6.784e-16  4.112e-02   0.000 1.000000    
## eds_std      2.989e-01  5.093e-02   5.869 8.99e-09 ***
## ers_std      1.708e-01  4.783e-02   3.570 0.000398 ***
## phs_std      7.254e-02  5.327e-02   1.362 0.174014    
## pse_std     -1.860e-01  5.039e-02  -3.692 0.000252 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8426 on 415 degrees of freedom
## Multiple R-squared:  0.2968, Adjusted R-squared:   0.29 
## F-statistic: 43.78 on 4 and 415 DF,  p-value: < 2.2e-16
confint(reg_model)
##                   2.5 %      97.5 %
## (Intercept) -0.08082060  0.08082060
## eds_std      0.19876583  0.39897453
## ers_std      0.07674827  0.26477103
## phs_std     -0.03217235  0.17725594
## pse_std     -0.28507516 -0.08698306
plot_model(reg_model, type="pred", show.data = T, axis.lim(-3,3))
## $eds_std

## 
## $ers_std

## 
## $phs_std

## 
## $pse_std

# Reshape data from wide to long format
long_data <- df2 %>%
  pivot_longer(cols = c(eds_std, ers_std, phs_std, pse_std), 
               names_to = "IV", 
               values_to = "Value")

# Get the coefficients (intercept and slopes)
coefficients <- coef(reg_model)
coeff_df <- data.frame(
  IV = names(coefficients)[-1],  # Remove intercept
  Slope = coefficients[-1],      # Extract slopes
  Intercept = coefficients[1]    # Store intercept separately
)

# Create a named vector for facet label replacements
facet_labels <- c(
  "eds_std" = "Everyday Discrimination",
  "ers_std" = "Expectation of Rejection",
  "phs_std" = "Perceived Helplessness",
  "pse_std" = "Perceived Self-Efficacy"
)

ggplot(long_data, aes(x = Value, y = idm_ib_std)) +
  geom_point(alpha = 0.1) +
  geom_abline(data = coeff_df, aes(slope = Slope, intercept = Intercept), linetype = "dashed", size=1.2) +
  facet_wrap(~IV, nrow = 1, strip.position = "bottom", labeller = as_labeller(facet_labels)) +  # Move facet labels to the bottom
  xlim(-3, 3) + ylim(-3, 3) +
  theme_minimal() +
  theme(strip.background = element_blank(),  # Remove strip background
        strip.placement = "outside",         # Move labels outside the plot area
        axis.title.x = element_blank(),    # Remove "IV Value" text
        text = element_text(size = 14),      # Increase overall text size
        axis.text = element_text(size = 11), # Increase axis label size
        strip.text = element_text(size = 11)) +
  labs(y = "IDM (Impact of Betrayal)")
## Warning: Removed 6 rows containing missing values (geom_point).

7.0.3.1 Comparison

reg_model <- lm(idm_ib_std ~ samp + sexid2 + race2 + gen, data = df2)
car::vif(reg_model, type = "predictor")
## GVIFs computed for predictors
##            GVIF Df GVIF^(1/(2*Df)) Interacts With    Other Predictors
## samp   1.049343  1        1.024374           --    sexid2, race2, gen
## sexid2 1.115890  1        1.056357           --      samp, race2, gen
## race2  1.008641  1        1.004311           --     samp, sexid2, gen
## gen    1.155103  2        1.036705           --   samp, sexid2, race2
plot(reg_model, 1)

plot(reg_model, 2)

plot(reg_model, 4)

plot(reg_model, 5)

plot(reg_model, 3)

summary(reg_model)
## 
## Call:
## lm(formula = idm_ib_std ~ samp + sexid2 + race2 + gen, data = df2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.1755 -0.7515  0.1049  0.7219  2.1032 
## 
## Coefficients:
##                Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     0.53385    0.15234   3.504 0.000508 ***
## sampSONA       -0.51233    0.09572  -5.352 1.44e-07 ***
## sexid2Straight -0.34074    0.12631  -2.698 0.007269 ** 
## race2WP        -0.23942    0.10042  -2.384 0.017565 *  
## genN            0.19139    0.30094   0.636 0.525134    
## genW            0.21106    0.10165   2.076 0.038478 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9527 on 413 degrees of freedom
##   (1 observation deleted due to missingness)
## Multiple R-squared:  0.1046, Adjusted R-squared:  0.09377 
## F-statistic:  9.65 on 5 and 413 DF,  p-value: 9.982e-09
confint(reg_model)
##                      2.5 %      97.5 %
## (Intercept)     0.23438370  0.83330891
## sampSONA       -0.70050328 -0.32416644
## sexid2Straight -0.58902992 -0.09245092
## race2WP        -0.43681299 -0.04202701
## genN           -0.40016419  0.78295005
## genW            0.01124702  0.41086986
plot_model(reg_model, type="est", show.data = T)

# Convert regression results into a tidy format
reg_results <- tidy(reg_model, conf.int = TRUE)
reg_results$term <- factor(reg_results$term, levels = rev(reg_results$term))  # Keeps original order

# Plot Coefficient Estimates with 95% CI
ggplot(reg_results, aes(x = estimate, y = term)) +
  geom_point(color = "blue", size = 3) +  # Point estimate
  geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0.2, color = "black") +  # CI bars
  geom_vline(xintercept = 0, linetype = "dashed", color = "red") +  # Reference line at 0
  xlim(-1,1.5) +
  scale_y_discrete(labels = c(
    "sampSONA" = "Current College Student", 
    "sexid2Straight" = "Sexuality (Straight)", 
    "race2WP" = "Race (White)",
    "genN" = "Gender (Nonbinary)",
    "genW" = "Gender (Woman)"
  )) +
  theme(
    text = element_text(family = "Arial Narrow"),  # Set all text to Arial Narrow
    axis.text = element_text(family = "Arial Narrow"),
    axis.title = element_text(family = "Arial Narrow"),
    plot.title = element_text(family = "Arial Narrow", face = "bold"),
    legend.text = element_text(family = "Arial Narrow"),
    legend.title = element_text(family = "Arial Narrow")
  ) +
  theme_minimal() +
  labs(x = NULL,
       y = NULL)