t_small
Error: object 't_small' not found
table(t$decision_severity)
3 4 5 6 7
4 2 11 5 1
Severity is only correlated with time variables (at p>0.3) are time closing and time overview. Removing all others from further consideration.
I use p<0.3, assuming that these will drop with n increasing:
temp <- t %>%
select(-starts_with('time'), -ends_with('time'), time_closing, time_overview)
p_matrix <- ggcorrplot::cor_pmat(temp)
ggcorrplot::ggcorrplot(cor(temp, use = "pairwise.complete.obs"),
lab = TRUE,
lab_size = 3,
colors = c("red", "white", "blue"),
title = "Correlation Matrix (p<.2)",
sig.level = 0.2,
p.mat = p_matrix,
insig = 'blank',
ggtheme = theme_minimal())
Assuming a pop around 69 (bootstrapping) three times,
Going to a pop of around 200, all above reach sig.
temp <- triple(select(t, -starts_with('time'), -ends_with('time')) )
m <- lm(decision_severity ~ ., data = temp)
summary(m)
Call:
lm(formula = decision_severity ~ ., data = temp)
Residuals:
Min 1Q Median 3Q Max
-1.3521 -0.7413 0.2600 0.5868 1.0686
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 6.409210 1.161352 5.519 7.39e-07 ***
pre_skepticism -0.015997 0.008851 -1.807 0.0757 .
pre_numeracy -0.118533 0.081129 -1.461 0.1491
condition_isDataViz 1.411916 0.228886 6.169 6.07e-08 ***
decision_confidence 0.033854 0.087401 0.387 0.6999
cl_intrinsic 0.199526 0.147256 1.355 0.1804
cl_germane -0.043080 0.117471 -0.367 0.7151
cl_extrinsic -0.176244 0.155574 -1.133 0.2617
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.7915 on 61 degrees of freedom
Multiple R-squared: 0.5212, Adjusted R-squared: 0.4663
F-statistic: 9.487 on 7 and 61 DF, p-value: 6.574e-08
caret::varImp(m)
print(pca_results)
Standard deviations (1, .., p=6):
[1] 1.4084773 1.1344122 0.9971643 0.9046241 0.8108966 0.5089855
Rotation (n x k) = (6 x 6):
PC1 PC2 PC3 PC4 PC5 PC6
pre_skepticism 0.43767103 0.2618586 -0.3788836 -0.002465966 -0.76715101 0.08828701
pre_numeracy -0.26473757 -0.1022290 -0.8600394 -0.259954670 0.26347298 0.20688417
decision_severity -0.14837812 0.6160380 0.2492391 -0.690686558 0.03252771 0.24136078
cl_intrinsic 0.58535244 0.1950367 -0.1633801 -0.207084206 0.41085128 -0.61720944
cl_germane -0.03620795 0.6713515 -0.1273448 0.640857218 0.29124292 0.19036568
cl_extrinsic 0.61024055 -0.2296179 0.1083955 -0.042281499 0.29560454 0.68844951
temp <- t %>%
mutate(pre_skepticism = ifelse(pre_skepticism < 120, 'low',
ifelse(pre_skepticism < 130, 'middle', 'high'))) %>%
mutate(pre_skepticism = factor(pre_skepticism, levels = c('low', 'middle', 'high')),
condition_isDataViz = factor(condition_isDataViz, levels = c(0, 1), labels = c('table', 'graph'))
)
table(temp$pre_skepticism)
low middle high
5 10 8
m <- aov(decision_severity ~ pre_skepticism + condition_isDataViz,
data = triple(temp))
summary(m)
Df Sum Sq Mean Sq F value Pr(>F)
pre_skepticism 2 4.23 2.11 3.655 0.0313 *
condition_isDataViz 1 38.02 38.02 65.778 1.86e-11 ***
Residuals 65 37.57 0.58
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
TukeyHSD(m)
Tukey multiple comparisons of means
95% family-wise confidence level
Fit: aov(formula = decision_severity ~ pre_skepticism + condition_isDataViz, data = triple(temp))
$pre_skepticism
diff lwr upr p adj
middle-low -0.6 -1.17668880 -0.0233112 0.0396107
high-low -0.2 -0.80023673 0.4002367 0.7048456
high-middle 0.4 -0.09942715 0.8994272 0.1409069
$condition_isDataViz
diff lwr upr p adj
graph-table 1.359091 0.9931447 1.725037 0
library(lavaan)
library(lavaanPlot)
temp <- t %>%
mutate(condition_isDataViz = factor(condition_isDataViz, levels = c(0, 1), labels = c('table', 'graph'))
) %>%
rename(condition = condition_isDataViz,
pre_skepticism = pre_skepticism) %>% # skepticism_score
mutate(cognitive_load = cl_extrinsic ) #cl_extrinsic, cl_intrinsic, cl_germane
model <- '
# Mediation path
cognitive_load ~ a1*condition + a2*pre_skepticism
decision_severity ~ b1*cognitive_load + b2*pre_skepticism
# Indirect effect (mediation)
indirect := a1 * b1
# Total effect (direct + indirect)
total := (a1 * b1)
'
fit <- sem(model, data = triple(temp), meanstructure = T)
summary(fit, fit.measures = TRUE, standardized = TRUE)
lavaan 0.6-19 ended normally after 1 iteration
Estimator ML
Optimization method NLMINB
Number of model parameters 8
Number of observations 69
Model Test User Model:
Test statistic 41.780
Degrees of freedom 1
P-value (Chi-square) 0.000
Model Test Baseline Model:
Test statistic 59.903
Degrees of freedom 5
P-value 0.000
User Model versus Baseline Model:
Comparative Fit Index (CFI) 0.257
Tucker-Lewis Index (TLI) -2.714
Loglikelihood and Information Criteria:
Loglikelihood user model (H0) -191.715
Loglikelihood unrestricted model (H1) -170.825
Akaike (AIC) 399.429
Bayesian (BIC) 417.302
Sample-size adjusted Bayesian (SABIC) 392.106
Root Mean Square Error of Approximation:
RMSEA 0.769
90 Percent confidence interval - lower 0.580
90 Percent confidence interval - upper 0.976
P-value H_0: RMSEA <= 0.050 0.000
P-value H_0: RMSEA >= 0.080 1.000
Standardized Root Mean Square Residual:
SRMR 0.161
Parameter Estimates:
Standard errors Standard
Information Expected
Information saturated (h1) model Structured
Regressions:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
cognitive_load ~
condition (a1) -0.628 0.223 -2.812 0.005 -0.628 -0.314
pr_skptcs (a2) 0.028 0.009 3.034 0.002 0.028 0.339
decision_severity ~
cogntv_ld (b1) -0.297 0.130 -2.285 0.022 -0.297 -0.276
pr_skptcs (b2) 0.005 0.011 0.432 0.666 0.005 0.052
Intercepts:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.cognitive_load 0.379 1.144 0.331 0.740 0.379 0.380
.decision_svrty 5.166 1.299 3.977 0.000 5.166 4.803
Variances:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
.cognitive_load 0.826 0.141 5.874 0.000 0.826 0.828
.decision_svrty 1.075 0.183 5.874 0.000 1.075 0.929
Defined Parameters:
Estimate Std.Err z-value P(>|z|) Std.lv Std.all
indirect 0.187 0.105 1.773 0.076 0.187 0.087
total 0.187 0.105 1.773 0.076 0.187 0.087
lavaanPlot(model = fit,
node_options = list(shape = 'box', fontname = 'Helvetica'),
edge_options = list(color = 'grey'),
coefs = T,
sig = 0.3,
stars = 'regress')
NA
NA