Load library and data

library(tidyverse) 
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
data <- read.csv("~/Google Drive/My Drive/YEAR 2/PROJECTS/DEREK/Outliers and Credibility Pilot/outlier_cred_raw_data.csv") %>% 
  slice(-c(1:2)) %>% 
  filter(attn == 24) 

Did the manipulation work?

ggplot(data = data, 
       aes(x = condition, y = as.numeric(mc))) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

Yep! Looks good.

Are deviants seen as less warm?

data <- data %>% 
  mutate(warmth_1 = as.numeric(warmth_1)) %>% 
  mutate(warmth_2 = as.numeric(warmth_2)) %>% 
  mutate(warmth_3 = as.numeric(warmth_3)) %>% 
  mutate(warmth_4 = as.numeric(warmth_4)) %>% 
  rowwise() %>% 
  mutate(warmth = mean(warmth_1:warmth_4, na.rm = T)) %>% 
  ungroup()
ggplot(data = data, 
       aes(x = condition, y = warmth)) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

t.test(warmth ~ condition, data = data, var.equal = F)
## 
##  Welch Two Sample t-test
## 
## data:  warmth by condition
## t = 3.5453, df = 187.07, p-value = 0.0004955
## alternative hypothesis: true difference in means between group conform and group deviant is not equal to 0
## 95 percent confidence interval:
##  0.1614312 0.5664476
## sample estimates:
## mean in group conform mean in group deviant 
##              3.470000              3.106061

Yep!

Are deviants seen as less competent?

data <- data %>% 
  mutate(competence_1 = as.numeric(competence_1)) %>% 
  mutate(competence_2 = as.numeric(competence_2)) %>% 
  mutate(competence_3 = as.numeric(competence_3)) %>% 
  mutate(competence_4 = as.numeric(competence_4)) %>% 
  mutate(competence_5 = as.numeric(competence_5)) %>% 
  rowwise() %>% 
  mutate(competence = mean(competence_1:competence_5, na.rm = T)) %>% 
  ungroup()
ggplot(data = data, 
       aes(x = condition, y = competence)) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

t.test(competence ~ condition, data = data, var.equal = F)
## 
##  Welch Two Sample t-test
## 
## data:  competence by condition
## t = 3.0208, df = 195.95, p-value = 0.002857
## alternative hypothesis: true difference in means between group conform and group deviant is not equal to 0
## 95 percent confidence interval:
##  0.1168354 0.5562959
## sample estimates:
## mean in group conform mean in group deviant 
##              3.680000              3.343434

Yep!

Are deviants seen as less competent (from a status beliefs angle)?

data <- data %>% 
  mutate(status.beliefs_1 = as.numeric(status.beliefs_1)) %>% 
  mutate(status.beliefs_2 = as.numeric(status.beliefs_2)) %>% 
  mutate(status.beliefs_3 = as.numeric(status.beliefs_3)) %>% 
  rowwise() %>% 
  mutate(status_beliefs = mean(status.beliefs_1:status.beliefs_3, na.rm = T)) %>% 
  ungroup()
ggplot(data = data, 
       aes(x = condition, y = status_beliefs)) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

t.test(status_beliefs ~ condition, data = data, var.equal = F)
## 
##  Welch Two Sample t-test
## 
## data:  status_beliefs by condition
## t = 3.5927, df = 193.27, p-value = 0.0004152
## alternative hypothesis: true difference in means between group conform and group deviant is not equal to 0
## 95 percent confidence interval:
##  0.1940744 0.6665317
## sample estimates:
## mean in group conform mean in group deviant 
##              3.900000              3.469697

Yep!

Are deviants seen as less credible (face valid item)?

ggplot(data = data, 
       aes(x = condition, y = as.numeric(credibility))) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

t.test(as.numeric(credibility) ~ condition, data = data, var.equal = F)
## 
##  Welch Two Sample t-test
## 
## data:  as.numeric(credibility) by condition
## t = 3.5628, df = 195.54, p-value = 0.000461
## alternative hypothesis: true difference in means between group conform and group deviant is not equal to 0
## 95 percent confidence interval:
##  0.1642837 0.5716759
## sample estimates:
## mean in group conform mean in group deviant 
##               3.57000               3.20202

Yep!

Are deviants seen as less trustworthy (face valid item)?

ggplot(data = data, 
       aes(x = condition, y = as.numeric(trustworthy))) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

t.test(as.numeric(trustworthy) ~ condition, data = data, var.equal = F)
## 
##  Welch Two Sample t-test
## 
## data:  as.numeric(trustworthy) by condition
## t = 4.1393, df = 193.11, p-value = 5.199e-05
## alternative hypothesis: true difference in means between group conform and group deviant is not equal to 0
## 95 percent confidence interval:
##  0.2402364 0.6775413
## sample estimates:
## mean in group conform mean in group deviant 
##              3.570000              3.111111

Are deviants seen as having less expertise (face valid item)?

ggplot(data = data, 
       aes(x = condition, y = as.numeric(expertise))) +
  geom_point(alpha = 0.1,
             size = 2,
             position = position_jitter(0.1)) +
  stat_summary(fun.data = "mean_cl_boot",
               size = 1,
               geom = "linerange",
               color = "grey50")+
  stat_summary(fun = "mean",
               size = 0.3)+
  theme_bw() 
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_segment()`).

t.test(as.numeric(expertise) ~ condition, data = data, var.equal = F)
## 
##  Welch Two Sample t-test
## 
## data:  as.numeric(expertise) by condition
## t = 3.8015, df = 194.23, p-value = 0.0001924
## alternative hypothesis: true difference in means between group conform and group deviant is not equal to 0
## 95 percent confidence interval:
##  0.2106067 0.6647468
## sample estimates:
## mean in group conform mean in group deviant 
##              3.670000              3.232323

Yep!

What gender do participants think Alex is?

table(data$condition, data$gender_assumption)
##          
##            1  2  3
##   conform 87  5  8
##   deviant 84  9  6

Both conditions think Alex is a man… ok…

Exploratory Factor analysis for a single credibility item

data_factor <- data %>% 
  select(competence_1:status.beliefs_3) %>% 
  mutate_if(is.character, as.numeric)

ev <- eigen(cor(data_factor)) # get eigenvalues
ev$values
##  [1] 8.2676074 1.8381067 1.2725481 0.7012298 0.4662896 0.3855797 0.3570250
##  [8] 0.3169809 0.2998643 0.2400694 0.2361933 0.1898956 0.1585337 0.1562831
## [15] 0.1137936
library(psych)
## 
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
## 
##     %+%, alpha
scree(data_factor, pc=FALSE) 

efa_model <- fa(data_factor, nfactors = 1, rotate = "varimax", fm = "ml")  # Maximum likelihood extraction
print(efa_model$loadings)
## 
## Loadings:
##                  ML1  
## competence_1     0.862
## competence_2     0.577
## competence_3     0.387
## competence_4     0.287
## competence_5     0.854
## warmth_1         0.560
## warmth_2         0.664
## warmth_3         0.695
## warmth_4         0.717
## credibility      0.809
## trustworthy      0.805
## expertise        0.829
## status.beliefs_1 0.822
## status.beliefs_2 0.814
## status.beliefs_3 0.844
## 
##                  ML1
## SS loadings    7.829
## Proportion Var 0.522
print(efa_model)
## Factor Analysis using method =  ml
## Call: fa(r = data_factor, nfactors = 1, rotate = "varimax", fm = "ml")
## Standardized loadings (pattern matrix) based upon correlation matrix
##                   ML1    h2   u2 com
## competence_1     0.86 0.743 0.26   1
## competence_2     0.58 0.333 0.67   1
## competence_3     0.39 0.150 0.85   1
## competence_4     0.29 0.083 0.92   1
## competence_5     0.85 0.729 0.27   1
## warmth_1         0.56 0.314 0.69   1
## warmth_2         0.66 0.441 0.56   1
## warmth_3         0.69 0.483 0.52   1
## warmth_4         0.72 0.514 0.49   1
## credibility      0.81 0.654 0.35   1
## trustworthy      0.81 0.648 0.35   1
## expertise        0.83 0.687 0.31   1
## status.beliefs_1 0.82 0.675 0.32   1
## status.beliefs_2 0.81 0.663 0.34   1
## status.beliefs_3 0.84 0.712 0.29   1
## 
##                 ML1
## SS loadings    7.83
## Proportion Var 0.52
## 
## Mean item complexity =  1
## Test of the hypothesis that 1 factor is sufficient.
## 
## df null model =  105  with the objective function =  12.89 with Chi Square =  2477.9
## df of  the model are 90  and the objective function was  3.65 
## 
## The root mean square of the residuals (RMSR) is  0.11 
## The df corrected root mean square of the residuals is  0.12 
## 
## The harmonic n.obs is  199 with the empirical chi square  534.45  with prob <  2.4e-64 
## The total n.obs was  199  with Likelihood Chi Square =  698.49  with prob <  7.2e-95 
## 
## Tucker Lewis Index of factoring reliability =  0.7
## RMSEA index =  0.184  and the 90 % confidence intervals are  0.172 0.198
## BIC =  222.1
## Fit based upon off diagonal values = 0.96
## Measures of factor score adequacy             
##                                                    ML1
## Correlation of (regression) scores with factors   0.98
## Multiple R square of scores with factors          0.96
## Minimum correlation of possible factor scores     0.91

Kinda seems like competence 3 and 4, and warmth 1 don’t fit well? Let’s check it out without those items

data_factor_1 <- data_factor %>% 
  select(-c(competence_3:competence_4, warmth_1))
efa_model <- fa(data_factor_1, nfactors = 1, rotate = "varimax", fm = "ml")  # Maximum likelihood extraction
print(efa_model$loadings)
## 
## Loadings:
##                  ML1  
## competence_1     0.860
## competence_2     0.560
## competence_5     0.844
## warmth_2         0.649
## warmth_3         0.685
## warmth_4         0.709
## credibility      0.811
## trustworthy      0.808
## expertise        0.834
## status.beliefs_1 0.832
## status.beliefs_2 0.823
## status.beliefs_3 0.853
## 
##                  ML1
## SS loadings    7.260
## Proportion Var 0.605
print(efa_model)
## Factor Analysis using method =  ml
## Call: fa(r = data_factor_1, nfactors = 1, rotate = "varimax", fm = "ml")
## Standardized loadings (pattern matrix) based upon correlation matrix
##                   ML1   h2   u2 com
## competence_1     0.86 0.74 0.26   1
## competence_2     0.56 0.31 0.69   1
## competence_5     0.84 0.71 0.29   1
## warmth_2         0.65 0.42 0.58   1
## warmth_3         0.68 0.47 0.53   1
## warmth_4         0.71 0.50 0.50   1
## credibility      0.81 0.66 0.34   1
## trustworthy      0.81 0.65 0.35   1
## expertise        0.83 0.70 0.30   1
## status.beliefs_1 0.83 0.69 0.31   1
## status.beliefs_2 0.82 0.68 0.32   1
## status.beliefs_3 0.85 0.73 0.27   1
## 
##                 ML1
## SS loadings    7.26
## Proportion Var 0.61
## 
## Mean item complexity =  1
## Test of the hypothesis that 1 factor is sufficient.
## 
## df null model =  66  with the objective function =  10.75 with Chi Square =  2077.48
## df of  the model are 54  and the objective function was  2.1 
## 
## The root mean square of the residuals (RMSR) is  0.08 
## The df corrected root mean square of the residuals is  0.09 
## 
## The harmonic n.obs is  199 with the empirical chi square  166.84  with prob <  1.9e-13 
## The total n.obs was  199  with Likelihood Chi Square =  403.68  with prob <  5.3e-55 
## 
## Tucker Lewis Index of factoring reliability =  0.787
## RMSEA index =  0.18  and the 90 % confidence intervals are  0.165 0.198
## BIC =  117.84
## Fit based upon off diagonal values = 0.98
## Measures of factor score adequacy             
##                                                    ML1
## Correlation of (regression) scores with factors   0.98
## Multiple R square of scores with factors          0.96
## Minimum correlation of possible factor scores     0.91

Seems a little better?

What about breaking it down into ability and intent (two factors)?

Nfacs <- 2  # This is for four factors. You can change this as needed.

fit <- factanal(data_factor, Nfacs, rotation="promax")


print(fit, digits=2, cutoff=0.3, sort=TRUE)
## 
## Call:
## factanal(x = data_factor, factors = Nfacs, rotation = "promax")
## 
## Uniquenesses:
##     competence_1     competence_2     competence_3     competence_4 
##             0.28             0.68             0.84             0.93 
##     competence_5         warmth_1         warmth_2         warmth_3 
##             0.30             0.46             0.26             0.26 
##         warmth_4      credibility      trustworthy        expertise 
##             0.31             0.37             0.38             0.33 
## status.beliefs_1 status.beliefs_2 status.beliefs_3 
##             0.23             0.22             0.19 
## 
## Loadings:
##                  Factor1 Factor2
## competence_1      0.69          
## competence_5      0.61          
## credibility       0.66          
## trustworthy       0.60          
## expertise         0.59          
## status.beliefs_1  0.96          
## status.beliefs_2  0.99          
## status.beliefs_3  0.95          
## warmth_1                  0.82  
## warmth_2                  0.94  
## warmth_3                  0.87  
## warmth_4                  0.78  
## competence_2      0.50          
## competence_3      0.46          
## competence_4                    
## 
##                Factor1 Factor2
## SS loadings       5.35    3.27
## Proportion Var    0.36    0.22
## Cumulative Var    0.36    0.57
## 
## Factor Correlations:
##         Factor1 Factor2
## Factor1    1.00   -0.72
## Factor2   -0.72    1.00
## 
## Test of the hypothesis that 2 factors are sufficient.
## The chi square statistic is 445.32 on 76 degrees of freedom.
## The p-value is 1.27e-53
library(psych)

loads <- fit$loadings

fa.diagram(loads)

Yeah, seems like the warmth items are a second factor. I just feel like trustworthiness should be loading with them, and I’m kind of confused why it isn’t.

Check alpha of the single factor

alpha(data_factor, check.keys=TRUE)
## 
## Reliability analysis   
## Call: alpha(x = data_factor, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean   sd median_r
##       0.94      0.94    0.96       0.5  15 0.0068  3.5 0.62     0.52
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.92  0.94  0.95
## Duhachek  0.92  0.94  0.95
## 
##  Reliability if an item is dropped:
##                  raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r med.r
## competence_1          0.93      0.93    0.96      0.48  13   0.0077 0.038  0.51
## competence_2          0.93      0.93    0.96      0.50  14   0.0070 0.041  0.54
## competence_3          0.94      0.94    0.96      0.53  16   0.0064 0.033  0.54
## competence_4          0.94      0.94    0.96      0.54  16   0.0062 0.027  0.55
## competence_5          0.93      0.93    0.95      0.48  13   0.0078 0.039  0.51
## warmth_1              0.93      0.94    0.96      0.51  15   0.0069 0.036  0.54
## warmth_2              0.93      0.93    0.96      0.50  14   0.0072 0.039  0.52
## warmth_3              0.93      0.93    0.96      0.50  14   0.0072 0.037  0.51
## warmth_4              0.93      0.93    0.96      0.50  14   0.0073 0.038  0.51
## credibility           0.93      0.93    0.96      0.49  13   0.0074 0.038  0.52
## trustworthy           0.93      0.93    0.96      0.49  13   0.0074 0.038  0.51
## expertise             0.93      0.93    0.96      0.49  13   0.0075 0.037  0.51
## status.beliefs_1      0.93      0.93    0.96      0.49  13   0.0075 0.037  0.52
## status.beliefs_2      0.93      0.93    0.96      0.49  13   0.0075 0.037  0.52
## status.beliefs_3      0.93      0.93    0.95      0.49  13   0.0076 0.036  0.51
## 
##  Item statistics 
##                    n raw.r std.r r.cor r.drop mean   sd
## competence_1     199  0.85  0.85  0.85   0.82  3.5 0.82
## competence_2     199  0.68  0.67  0.65   0.62  3.7 0.84
## competence_3     199  0.50  0.49  0.46   0.42  3.6 0.94
## competence_4     199  0.41  0.40  0.36   0.32  3.2 0.91
## competence_5     199  0.88  0.88  0.87   0.85  3.5 0.87
## warmth_1         199  0.61  0.61  0.58   0.55  3.3 0.84
## warmth_2         199  0.72  0.73  0.71   0.68  3.1 0.86
## warmth_3         199  0.73  0.74  0.73   0.69  3.3 0.84
## warmth_4         199  0.75  0.75  0.74   0.71  3.3 0.85
## credibility      199  0.80  0.80  0.79   0.76  3.4 0.75
## trustworthy      199  0.79  0.80  0.78   0.75  3.3 0.81
## expertise        199  0.81  0.81  0.80   0.77  3.5 0.84
## status.beliefs_1 199  0.79  0.79  0.79   0.75  3.6 0.91
## status.beliefs_2 199  0.80  0.80  0.79   0.76  3.7 0.89
## status.beliefs_3 199  0.82  0.81  0.81   0.78  3.7 0.90
## 
## Non missing response frequency for each item
##                     1    2    3    4    5 miss
## competence_1     0.01 0.06 0.44 0.37 0.12    0
## competence_2     0.01 0.06 0.31 0.45 0.18    0
## competence_3     0.02 0.08 0.32 0.40 0.18    0
## competence_4     0.04 0.15 0.49 0.25 0.08    0
## competence_5     0.02 0.07 0.47 0.31 0.14    0
## warmth_1         0.04 0.10 0.50 0.31 0.06    0
## warmth_2         0.04 0.15 0.54 0.22 0.06    0
## warmth_3         0.02 0.11 0.49 0.31 0.08    0
## warmth_4         0.03 0.09 0.49 0.33 0.07    0
## credibility      0.02 0.06 0.50 0.37 0.06    0
## trustworthy      0.02 0.11 0.44 0.39 0.05    0
## expertise        0.02 0.08 0.44 0.36 0.10    0
## status.beliefs_1 0.02 0.07 0.38 0.35 0.19    0
## status.beliefs_2 0.01 0.07 0.31 0.41 0.21    0
## status.beliefs_3 0.01 0.05 0.35 0.36 0.23    0

The raw alpha looks pretty good!

Check alpha of the two factors

f1 <- data_factor[ , c("competence_1", "competence_2", "competence_3", "competence_4", "competence_5", "credibility", "trustworthy", "expertise", "status.beliefs_1", "status.beliefs_2", "status.beliefs_3")]
f2 <- data_factor[ , c("warmth_1", "warmth_2", "warmth_3", "warmth_4")]
alpha(f1, check.keys=TRUE)$total[1]
##  raw_alpha
##  0.9244412
alpha(f2, check.keys=TRUE)$total[1]
##  raw_alpha
##  0.8934805

These factors also look good.