This is an analysis of the experimental data.
source("C:/Users/admin/Dropbox/PhD/03_DISSERTATION/Part1-ExpData&Analysis/Source1_DataTidying.R")
expData <- doDataTidying()
setwd("C:/Users/admin/Dropbox/PhD/03_DISSERTATION/Part1-ExpData&Analysis")
H0a: Providing information about a public charity’s high (low) overhead costs will increase (lower) donor perceptions of the charity’s performance H1a: Providing information about a public charity’s high (low) overall performance rating will increase (lower) donor perceptions of the charity’s performance
H2a: Providing information about a high (low) level of overhead cost weakens (strengthens) the effect of a high charity rating on the perceived organizational performance
expData %>% dplyr::group_by(Treatment) %>%
dplyr::summarize(Low.Rating_Low.Overhead = round(mean(perf_LR.LO), digits = 2),
High.Rating_High.Overhead = round(mean(perf_HR.HO), digits = 2)) %>%
kable(caption = "Table 1. Donor percepions of charity's performance (means)") %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Treatment | Low.Rating_Low.Overhead | High.Rating_High.Overhead |
|---|---|---|
| T1_NR.NO | 3.75 | 3.75 |
| T2_O | 4.10 | 3.50 |
| T3_R | 3.32 | 3.91 |
| T4_R.O | 3.69 | 3.68 |
expData %>% dplyr::group_by(Treatment) %>%
dplyr::summarize(Low.Rating_Low.Overhead = mean(perf_LR.LO),
High.Rating_High.Overhead = mean(perf_HR.HO)) %>%
gather(org_type, percieved_perf, -Treatment ) %>%
ggplot(aes(x=Treatment, y=percieved_perf, col=org_type, group=org_type)) +
geom_line() + geom_point() +
ylim(1,5) + theme(legend.position = "right") +
geom_text(aes(label=round(percieved_perf,digits = 2)), size = 3.5, vjust = -0.75, show.legend = F) +
geom_hline(yintercept = 3, colour = "gray") +
geom_hline(yintercept = 3, colour = "gray") +
labs(y = "Perceived performance (composite)") +
labs(title = "Figure 1. Differences in Perceived Performance")
Regressing the Perceived Performance indicator on Treatment:
#lm(perf_LR.LO ~ Treatment, data = expData) %>% summary()
#lm(perf_HR.HO ~ Treatment, data = expData) %>% summary()
fit_LR <- lm(perf_LR.LO ~ Treatment, data = expData)
fit_HR <- lm(perf_HR.HO ~ Treatment, data = expData)
tab_model(fit_LR, fit_HR)
| perf LR LO | perf HR HO | |||||
|---|---|---|---|---|---|---|
| Predictors | Estimates | CI | p | Estimates | CI | p |
| (Intercept) | 3.75 | 3.64 – 3.86 | <0.001 | 3.75 | 3.64 – 3.86 | <0.001 |
| TreatmentT2_O | 0.35 | 0.20 – 0.50 | <0.001 | -0.25 | -0.40 – -0.10 | 0.001 |
| TreatmentT3_R | -0.43 | -0.58 – -0.27 | <0.001 | 0.16 | 0.01 – 0.32 | 0.037 |
| TreatmentT4_R.O | -0.07 | -0.22 – 0.08 | 0.388 | -0.07 | -0.22 – 0.08 | 0.375 |
| Observations | 873 | 873 | ||||
| R2 / adjusted R2 | 0.107 / 0.104 | 0.033 / 0.030 | ||||
Pairwise t-test with Bonferroni (conservative) correction of p-values:
pwt1 <- pairwise.t.test(expData$perf_LR.LO, expData$Treatment, p.adj = "bonf")
pwt1$p.value %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover"))
| T1_NR.NO | T2_O | T3_R | |
|---|---|---|---|
| T2_O | 3.99e-05 | NA | NA |
| T3_R | 3.00e-07 | 0e+00 | NA |
| T4_R.O | 1.00e+00 | 3e-07 | 1.67e-05 |
pwt2 <- pairwise.t.test(expData$perf_HR.HO, expData$Treatment, p.adj = "bonf")
pwt2$p.value %>% kable(digits = 2) %>% kable_styling(bootstrap_options = c("striped", "hover"))
| T1_NR.NO | T2_O | T3_R | |
|---|---|---|---|
| T2_O | 0.01 | NA | NA |
| T3_R | 0.22 | 0.0 | NA |
| T4_R.O | 1.00 | 0.1 | 0.02 |
Cohen’s effect size ‘d’:
eff_size <- expData %>% dplyr::group_by(Treatment) %>%
dplyr::summarize(Low.Rating_Low.Overhead = mean(perf_LR.LO),
High.Rating_High.Overhead = mean(perf_HR.HO))
# Low-Overhead Org:
(d_Overhead_LR <- (eff_size$Low.Rating_Low.Overhead[1]-eff_size$Low.Rating_Low.Overhead[2])/sd(expData$perf_LR.LO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T2_O"]))
## [1] -0.4719194
#High-Overhead Org:
(d_Overhead_HR <- (eff_size$High.Rating_High.Overhead[1]-eff_size$High.Rating_High.Overhead[2])/sd(expData$perf_HR.HO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T2_O"]))
## [1] 0.2993744
# Low-Rating Org:
(d_Rating_LR <- (eff_size$Low.Rating_Low.Overhead[1]-eff_size$Low.Rating_Low.Overhead[3])/sd(expData$perf_LR.LO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T3_R"]))
## [1] 0.5370172
#High-Rating Org:
(d_Rating_HR <- (eff_size$High.Rating_High.Overhead[1]-eff_size$High.Rating_High.Overhead[3])/sd(expData$perf_HR.HO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T3_R"]))
## [1] -0.2161782
Within-treatment differences
Differences between the two oranization types (High Rating & High Overhead vs. Low Rating & Low Overhead) within each treatment (T1_NR.NO, T2_NR.O, T3_R.NO):
expData_T2 <- expData %>% filter(Treatment == "T2_O")
t2 <- t.test(expData_T2$perf_HR.HO - expData_T2$perf_LR.LO)
pander(t2)
| Test statistic | df | P value | Alternative hypothesis | mean of x |
|---|---|---|---|---|
| -7.61 | 224 | 7.561e-13 * * * | two.sided | -0.6022 |
# Effect size for T2:
(d_T2 <- mean(expData_T2$perf_HR.HO - expData_T2$perf_LR.LO)/sd(c(expData_T2$perf_LR.LO, expData_T2$perf_HR.HO))) %>% round(digits = 2)
## [1] -0.68
expData_T3 <- expData %>% filter(Treatment == "T3_R")
t3 <- t.test(expData_T3$perf_HR.HO - expData_T3$perf_LR.LO)
pander(t3)
| Test statistic | df | P value | Alternative hypothesis | mean of x |
|---|---|---|---|---|
| 8.356 | 213 | 8.34e-15 * * * | two.sided | 0.5888 |
# Effect size for T3:
(d_T3 <- mean(expData_T3$perf_HR.HO - expData_T3$perf_LR.LO)/sd(c(expData_T3$perf_LR.LO, expData_T3$perf_HR.HO))) %>% round(digits = 2)
## [1] 0.68
Perceived Performance between the no-treatment group (with no performance information presented) and the group with the overhead information provided are statistically significant for both types (low- and high-overhead) of entities.
Perceived Performance between the two types of organizations within the treatments groups are statistically significant and the effect sizes are large and similar. In particular:
Measures of internal consistency (Cronbach’s Alpha) for the Perceived Performance indicator:
library(psych)
psych::alpha(expData[, c("q3_LR_impact", "q3_LR_efficiently")])$total$raw_alpha %>% round(digits = 2)
## [1] 0.68
psych::alpha(expData[, c("q5_HR_impact", "q5_HR_efficiently")])$total$raw_alpha %>% round(digits = 2)
## [1] 0.72
Unfortunately, comparing the scores on the composite index constructed based on 5-point-scale ordinal variables is not satisfactorily insightful and meaningful. To get a more elaborate picture of what’s going on there, the following analysis looks at the two individual performance indicators that comprise the composite performance score - perceived impact and perceived efficiency spending money. The two measures are analyzed separately using the ordinal logistic regression analysis.
Sample differences for Perceieved Impact (“This nonprofit is most likely to have an impact on its cause”):
expData %>% dplyr::group_by(Treatment) %>% dplyr::summarize(Low.Rating_Low.Overhead = mean(q3_LR_impact),
High.Rating_High.Overhead = mean(q5_HR_impact)) %>%
kable(align = "l", digits = 3) %>%
kable_styling(bootstrap_options = c("striped", "hover"))
| Treatment | Low.Rating_Low.Overhead | High.Rating_High.Overhead |
|---|---|---|
| T1_NR.NO | 3.904 | 3.894 |
| T2_O | 4.027 | 3.787 |
| T3_R | 3.463 | 3.958 |
| T4_R.O | 3.637 | 3.965 |
# Fit Ordinal Logit for the Low-Rating-Low-Overhead:
ol_impact_LR <- MASS::polr(as.factor(q3_LR_impact) ~ Treatment, data = expData)
#Coefficients:
ctable <- summary(ol_impact_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3) %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Value | Std. Error | t value | p value | |
|---|---|---|---|---|
| TreatmentT2_O | 0.362 | 0.178 | 2.034 | 0.042 |
| TreatmentT3_R | -0.871 | 0.181 | -4.802 | 0.000 |
| TreatmentT4_R.O | -0.452 | 0.177 | -2.549 | 0.011 |
| 1|2 | -4.045 | 0.252 | -16.077 | 0.000 |
| 2|3 | -2.475 | 0.162 | -15.291 | 0.000 |
| 3|4 | -0.984 | 0.134 | -7.370 | 0.000 |
| 4|5 | 1.133 | 0.136 | 8.329 | 0.000 |
# Change the reference group to see the difference T4-T3
ol_impact_LR <- MASS::polr(as.factor(q3_LR_impact) ~ relevel(Treatment, "T3_R"), data = expData)
#Coefficients:
ctable <- summary(ol_impact_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3) %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Value | Std. Error | t value | p value | |
|---|---|---|---|---|
| relevel(Treatment, “T3_R”)T1_NR.NO | 0.871 | 0.181 | 4.802 | 0.000 |
| relevel(Treatment, “T3_R”)T2_O | 1.233 | 0.183 | 6.740 | 0.000 |
| relevel(Treatment, “T3_R”)T4_R.O | 0.419 | 0.178 | 2.359 | 0.018 |
| 1|2 | -3.174 | 0.241 | -13.198 | 0.000 |
| 2|3 | -1.605 | 0.148 | -10.859 | 0.000 |
| 3|4 | -0.114 | 0.130 | -0.873 | 0.382 |
| 4|5 | 2.003 | 0.149 | 13.461 | 0.000 |
# Fit Ordinal Logit for the High-Rating-High-Overhead:
ol_impact_HR <- polr(as.factor(q5_HR_impact) ~ Treatment, data = expData) # FIT ORDINAL LOGIT for HR
#Coefficients:
ctable <- summary(ol_impact_HR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3) %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Value | Std. Error | t value | p value | |
|---|---|---|---|---|
| TreatmentT2_O | -0.229 | 0.180 | -1.269 | 0.205 |
| TreatmentT3_R | 0.124 | 0.182 | 0.683 | 0.495 |
| TreatmentT4_R.O | 0.101 | 0.178 | 0.569 | 0.569 |
| 1|2 | -4.830 | 0.396 | -12.194 | 0.000 |
| 2|3 | -2.854 | 0.187 | -15.259 | 0.000 |
| 3|4 | -0.916 | 0.136 | -6.731 | 0.000 |
| 4|5 | 1.104 | 0.138 | 8.008 | 0.000 |
The followin sections describes the discussed effects in terms of probabilities.
Predicted probabilities for the different level of agreement with the statement:
predictData <- data.frame(Treatment = c(1,2,3,4)) #CREATE SMALL DATAFRAME FOR PREDICTING PROBS
predictData$Treatment <- factor(predictData$Treatment, labels = c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O"))
predProbDataLR <- predict(ol_impact_LR, predictData, type = "p") # CALCULATE PREDICTED PROBABILITIES
row.names(predProbDataLR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O") # RESHAPE TABLE W PRED PROBS for GRAPHING
predProbDataLR <- as.data.frame(predProbDataLR)
predProbDataLR <- rownames_to_column(predProbDataLR)
colnames(predProbDataLR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataLR$Treatment <- factor(predProbDataLR$Treatment)
#Pred.prob. table greyed out to move it after the graph:
#predProbDataLR %>% kable(digits = 3, caption = "Predicted probabilities for a Low-Rating-Low-Overhead NP") %>%
# kable_styling(bootstrap_options = c("striped", "hover"))
#predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>%
# ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()
predProbDataHR <- predict(ol_impact_HR, predictData, type = "p") # CALCULATE PREDICTED PROBABILITIES
row.names(predProbDataHR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O") # RESHATE TABLE W PRED PROBS for GRAPHING
predProbDataHR <- as.data.frame(predProbDataHR)
predProbDataHR <- rownames_to_column(predProbDataHR)
colnames(predProbDataHR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataHR$Treatment <- factor(predProbDataHR$Treatment)
#Pred.prob. table greyed out to move it after the graph:
#predProbDataHR %>% kable(digits = 3, caption = "Predicted probabilities for a High-Rating-High-Overhead NP") %>%
# kable_styling(bootstrap_options = c("striped", "hover"))
#predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>%
# ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()
# GRID FIGURE:
predProbDataLR1 <- predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "LowRating_LowOverhead")
predProbDataHR1 <- predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "HighRating_HighOverhead")
predProbData <- rbind(predProbDataHR1, predProbDataLR1)
predProbData$org_type <- factor(predProbData$org_type, levels = c("LowRating_LowOverhead", "HighRating_HighOverhead"))
predProbData %>% ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) +
geom_line() + geom_point() + facet_grid(. ~ org_type) +
labs(title = "Figure 2. Probability Differences in Perceived Impact")
Predicted probability tables:
predProbDataLR %>% kable(digits = 3, caption = "Predicted probabilities for a Low-Rating-Low-Overhead NP") %>%
kable_styling(bootstrap_options = c("striped", "hover"))
| Treatment | 1.Strongly_Disagree | 2.Somewhat_Disagree | 3.Neither_Agree_Nor_Disagree | 4.Somewhat_Agree | 5.Strongly_Agree |
|---|---|---|---|---|---|
| T1_NR.NO | 0.017 | 0.060 | 0.194 | 0.484 | 0.244 |
| T2_O | 0.012 | 0.043 | 0.151 | 0.477 | 0.316 |
| T3_R | 0.040 | 0.127 | 0.304 | 0.410 | 0.119 |
| T4_R.O | 0.027 | 0.090 | 0.253 | 0.460 | 0.170 |
predProbDataHR %>% kable(digits = 3, caption = "Predicted probabilities for a High-Rating-High-Overhead NP") %>%
kable_styling(bootstrap_options = c("striped", "hover"))
| Treatment | 1.Strongly_Disagree | 2.Somewhat_Disagree | 3.Neither_Agree_Nor_Disagree | 4.Somewhat_Agree | 5.Strongly_Agree |
|---|---|---|---|---|---|
| T1_NR.NO | 0.008 | 0.047 | 0.231 | 0.465 | 0.249 |
| T2_O | 0.010 | 0.058 | 0.267 | 0.457 | 0.209 |
| T3_R | 0.007 | 0.041 | 0.213 | 0.466 | 0.273 |
| T4_R.O | 0.007 | 0.042 | 0.216 | 0.466 | 0.268 |
perceptions of impact, but the latter are stronger than the improvements due to higher efficiency.Sample differences for Perceieved Efficiency (q3_LR_efficiently, q5_HR_efficiently: “This nonprofit efficiently spends money on its cause”):
# Means across treatments for two types of orgs:
expData %>% dplyr::group_by(Treatment) %>% dplyr::summarize(Low.Rating_High.Progr.Exp = mean(q3_LR_efficiently),
High.Rating_Low.Progr.Exp = mean(q5_HR_efficiently)) %>% kable(digits = 3) %>%
kable_styling(bootstrap_options = c("striped", "hover"))
| Treatment | Low.Rating_High.Progr.Exp | High.Rating_Low.Progr.Exp |
|---|---|---|
| T1_NR.NO | 3.601 | 3.606 |
| T2_O | 4.178 | 3.213 |
| T3_R | 3.187 | 3.869 |
| T4_R.O | 3.735 | 3.398 |
Tables with coefficients for LR_LO and HR_HO organizations:
# Low-Rating-Low-Overhead:
ol_efficiently_LR <- polr(as.factor(q3_LR_efficiently) ~ Treatment, data = expData) # FIT ORDINAL LOGIT for LR
ctable <- summary(ol_efficiently_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3, caption = "Low-Rating-Low-Overhead entity:") %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Value | Std. Error | t value | p value | |
|---|---|---|---|---|
| TreatmentT2_O | 1.277 | 0.179 | 7.123 | 0.000 |
| TreatmentT3_R | -0.754 | 0.179 | -4.211 | 0.000 |
| TreatmentT4_R.O | 0.411 | 0.174 | 2.360 | 0.018 |
| 1|2 | -3.697 | 0.248 | -14.902 | 0.000 |
| 2|3 | -2.228 | 0.159 | -13.984 | 0.000 |
| 3|4 | -0.038 | 0.125 | -0.303 | 0.762 |
| 4|5 | 1.533 | 0.139 | 11.020 | 0.000 |
ol_efficiently_LR <- polr(as.factor(q3_LR_efficiently) ~ relevel(Treatment, "T2_O"), data = expData) # FIT ORDINAL LOGIT for LR
ctable <- summary(ol_efficiently_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3, caption = "Same but switched reference groups to see the diff. T4-T2:") %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Value | Std. Error | t value | p value | |
|---|---|---|---|---|
| relevel(Treatment, “T2_O”)T1_NR.NO | -1.276 | 0.179 | -7.119 | 0.000 |
| relevel(Treatment, “T2_O”)T3_R | -2.030 | 0.189 | -10.732 | 0.000 |
| relevel(Treatment, “T2_O”)T4_R.O | -0.865 | 0.177 | -4.875 | 0.000 |
| 1|2 | -4.975 | 0.260 | -19.134 | 0.000 |
| 2|3 | -3.504 | 0.177 | -19.742 | 0.000 |
| 3|4 | -1.314 | 0.138 | -9.548 | 0.000 |
| 4|5 | 0.257 | 0.128 | 1.999 | 0.046 |
#High-Rating-High-Overhead:
ol_efficiently_HR <- polr(as.factor(q5_HR_efficiently) ~ Treatment, data = expData) # FIT ORDINAL LOGIT for HR
ctable <- summary(ol_efficiently_HR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3, caption = "High-Rating-High-Overhead entity:") %>% kable_styling(bootstrap_options = c("striped", "hover"))
| Value | Std. Error | t value | p value | |
|---|---|---|---|---|
| TreatmentT2_O | -0.640 | 0.178 | -3.592 | 0.000 |
| TreatmentT3_R | 0.552 | 0.173 | 3.194 | 0.001 |
| TreatmentT4_R.O | -0.260 | 0.174 | -1.500 | 0.133 |
| 1|2 | -3.700 | 0.232 | -15.919 | 0.000 |
| 2|3 | -2.002 | 0.147 | -13.655 | 0.000 |
| 3|4 | -0.110 | 0.124 | -0.886 | 0.375 |
| 4|5 | 1.584 | 0.141 | 11.239 | 0.000 |
efficiency spending money, although the size of the coefficient is twice smaller for the High Overhead nonprofits.perceived efficiency: the Low Rating lowers the propensity to agree about efficiency and the High rating increases the propensity to agree with the efficiency statement.Overhead cost and Perceived efficiency
Predicted probability differences across the treatments for LR_LO and HR_HO entities:
predictData <- data.frame(Treatment = c(1,2,3,4)) #CREATE SMALL DATAFRAME FOR PREDICTING PROBS
predictData$Treatment <- factor(predictData$Treatment, labels = c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O"))
predProbDataLR <- predict(ol_efficiently_LR, predictData, type = "p") # CALCULATE PREDICTED PROBABILITIES
row.names(predProbDataLR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O") # RESHAPE TABLE W PRED PROBS for GRAPHING
predProbDataLR <- as.data.frame(predProbDataLR)
predProbDataLR <- rownames_to_column(predProbDataLR)
colnames(predProbDataLR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataLR$Treatment <- factor(predProbDataLR$Treatment)
#predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>%
# ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()
predProbDataHR <- predict(ol_efficiently_HR, predictData, type = "p")
row.names(predProbDataHR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O")
predProbDataHR <- as.data.frame(predProbDataHR)
predProbDataHR <- rownames_to_column(predProbDataHR)
colnames(predProbDataHR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataHR$Treatment <- factor(predProbDataHR$Treatment)
#predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>%
# ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()
predProbDataLR1 <- predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "LowRating_LowOverhead")
predProbDataHR1 <- predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "HighRating_HighOverhead")
predProbData <- rbind(predProbDataHR1, predProbDataLR1)
predProbData$org_type <- factor(predProbData$org_type, levels = c("LowRating_LowOverhead", "HighRating_HighOverhead"))
predProbData %>% ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) +
geom_line() + geom_point() + theme(axis.text.x = element_text(angle = 0)) + facet_grid(. ~ org_type)
Probability tables for LR and HR organizations:
predProbDataLR %>% knitr::kable(digits = 3, caption = "Predicted probabilities for Low-Rating-Low-Overhead Entity:") %>% kable_styling()
| Treatment | 1.Strongly_Disagree | 2.Somewhat_Disagree | 3.Neither_Agree_Nor_Disagree | 4.Somewhat_Agree | 5.Strongly_Agree |
|---|---|---|---|---|---|
| T1_NR.NO | 0.024 | 0.073 | 0.393 | 0.332 | 0.178 |
| T2_O | 0.007 | 0.022 | 0.183 | 0.352 | 0.436 |
| T3_R | 0.050 | 0.136 | 0.485 | 0.236 | 0.092 |
| T4_R.O | 0.016 | 0.051 | 0.323 | 0.365 | 0.246 |
predProbDataHR %>% knitr::kable(digits = 3, caption = "Predicted probabilities for High-Rating-High-Overhead Entity:") %>% kable_styling()
| Treatment | 1.Strongly_Disagree | 2.Somewhat_Disagree | 3.Neither_Agree_Nor_Disagree | 4.Somewhat_Agree | 5.Strongly_Agree |
|---|---|---|---|---|---|
| T1_NR.NO | 0.024 | 0.095 | 0.354 | 0.357 | 0.170 |
| T2_O | 0.045 | 0.159 | 0.426 | 0.273 | 0.098 |
| T3_R | 0.014 | 0.058 | 0.268 | 0.397 | 0.263 |
| T4_R.O | 0.031 | 0.118 | 0.388 | 0.326 | 0.137 |
efficiency statement are nearly identical for the two types of organizationsfinancial efficiency:
Overhead cost dominates appears to make the greatest difference: information on a low Overhead adds a substantial amount of confidence in a nonprofit’s finanacial efficiency. Even presenting a low charity rating alongside with the _Olow verhead doens’t completely offsets the effect of a low overhead indicator.Outcome variable: TRUST IN A NONPROFIT AGENCY (trust_LR.LO/trust_LR.LO) - a composite indicator computed as the average five variables.
Measures of internal consistency for the indicator:
library(psych)
psych::alpha(expData[, c("q3_LR_actbestinterest", "q3_LR_ethically", "q3_LR_usefundsappr", "q3_LR_notexploitdon", "q3_LR_ftechniques" )])$total$raw_alpha %>% round(digits = 2)
## [1] 0.94
psych::alpha(expData[, c("q5_HR_actbestinterest", "q5_HR_ethically", "q5_HR_usefundsappr", "q5_HR_notexploitdon", "q5_HR_ftechniques" )])$total$raw_alpha %>% round(digits = 2)
## [1] 0.95
Hypotheses:
H0b: Providing information about a public charity’s high (low) overhead costs will generate a higher (lower) degree of trust in that organization
H1b: Providing information about a public charity’s high (low) overall performance rating will generate a higher (lower) degree of trust in that organization
H2b: Providing information about a high (low) level of overhead cost weakens (strengthens) the level of trust in the nonprofit
expData %>% dplyr::group_by(Treatment) %>%
dplyr::summarize(Low.Rating_Low.Overhead = round(mean(trust_LR.LO), digits = 3),
High.Rating_High.Overhead = round(mean(trust_HR.HO), digits = 3)) %>%
kable(caption = "Trust in a Nonprofit Agency") %>% kable_styling()
| Treatment | Low.Rating_Low.Overhead | High.Rating_High.Overhead |
|---|---|---|
| T1_NR.NO | 3.799 | 3.842 |
| T2_O | 4.107 | 3.691 |
| T3_R | 3.328 | 3.929 |
| T4_R.O | 3.676 | 3.761 |
expData %>% dplyr::group_by(Treatment) %>%
dplyr::summarize(Low.Rating_Low.Overhead = mean(trust_LR.LO),
High.Rating_High.Overhead = mean(trust_HR.HO)) %>%
gather(org_type, percieved_trust, -Treatment ) %>%
ggplot(aes(x=Treatment, y=percieved_trust, col=org_type, group=org_type)) +
geom_line() + geom_point() +
ylim(1,5) + theme(legend.position = "top") +
geom_text(aes(label=round(percieved_trust,digits = 2)), size = 3.5, vjust = -0.75, show.legend = F)
Pairwise comparisons with a conservative correction of p-values:
pwt1 <- pairwise.t.test(expData$trust_LR.LO, expData$Treatment, p.adj = "bonf")
pwt1$p.value %>% kable() %>% kable_styling()
| T1_NR.NO | T2_O | T3_R | |
|---|---|---|---|
| T2_O | 0.0012211 | NA | NA |
| T3_R | 0.0000001 | 0e+00 | NA |
| T4_R.O | 0.8158395 | 7e-07 | 0.0001381 |
pwt2 <- pairwise.t.test(expData$trust_HR.HO, expData$Treatment, p.adj = "bonf")
pwt2$p.value %>% kable() %>% kable_styling()
| T1_NR.NO | T2_O | T3_R | |
|---|---|---|---|
| T2_O | 0.3800324 | NA | NA |
| T3_R | 1.0000000 | 0.020006 | NA |
| T4_R.O | 1.0000000 | 1.000000 | 0.2290957 |
Regressions
T1_NR_NO is the reference group:
fit_LR <- lm(trust_LR.LO ~ Treatment, data = expData)
fit_HR <- lm(trust_HR.HO ~ Treatment, data = expData)
tab_model(fit_LR, fit_HR)
| trust LR LO | trust HR HO | |||||
|---|---|---|---|---|---|---|
| Predictors | Estimates | CI | p | Estimates | CI | p |
| (Intercept) | 3.80 | 3.68 – 3.92 | <0.001 | 3.84 | 3.73 – 3.96 | <0.001 |
| TreatmentT2_O | 0.31 | 0.15 – 0.47 | <0.001 | -0.15 | -0.31 – 0.01 | 0.063 |
| TreatmentT3_R | -0.47 | -0.63 – -0.31 | <0.001 | 0.09 | -0.08 – 0.25 | 0.294 |
| TreatmentT4_R.O | -0.12 | -0.28 – 0.04 | 0.136 | -0.08 | -0.24 – 0.08 | 0.319 |
| Observations | 873 | 873 | ||||
| R2 / adjusted R2 | 0.096 / 0.093 | 0.011 / 0.008 | ||||
T3_R is the reference group:
fit_LR <- lm(trust_LR.LO ~ relevel(Treatment, ref = "T3_R"), data = expData)
fit_HR <- lm(trust_HR.HO ~ relevel(Treatment, ref = "T3_R"), data = expData)
tab_model(fit_LR, fit_HR)
| trust LR LO | trust HR HO | |||||
|---|---|---|---|---|---|---|
| Predictors | Estimates | CI | p | Estimates | CI | p |
| (Intercept) | 3.33 | 3.21 – 3.44 | <0.001 | 3.93 | 3.82 – 4.04 | <0.001 |
| relevel(Treatment, ref = “T3_R”)T1_NR.NO | 0.47 | 0.31 – 0.63 | <0.001 | -0.09 | -0.25 – 0.08 | 0.294 |
| relevel(Treatment, ref = “T3_R”)T2_O | 0.78 | 0.62 – 0.94 | <0.001 | -0.24 | -0.40 – -0.08 | 0.003 |
| relevel(Treatment, ref = “T3_R”)T4_R.O | 0.35 | 0.19 – 0.51 | <0.001 | -0.17 | -0.33 – -0.01 | 0.038 |
| Observations | 873 | 873 | ||||
| R2 / adjusted R2 | 0.096 / 0.093 | 0.011 / 0.008 | ||||
T2_O is the reference group:
fit_LR <- lm(trust_LR.LO ~ relevel(Treatment, ref = "T2_O"), data = expData)
fit_HR <- lm(trust_HR.HO ~ relevel(Treatment, ref = "T2_O"), data = expData)
tab_model(fit_LR, fit_HR)
| trust LR LO | trust HR HO | |||||
|---|---|---|---|---|---|---|
| Predictors | Estimates | CI | p | Estimates | CI | p |
| (Intercept) | 4.11 | 3.99 – 4.22 | <0.001 | 3.69 | 3.58 – 3.80 | <0.001 |
| relevel(Treatment, ref = “T2_O”)T1_NR.NO | -0.31 | -0.47 – -0.15 | <0.001 | 0.15 | -0.01 – 0.31 | 0.063 |
| relevel(Treatment, ref = “T2_O”)T3_R | -0.78 | -0.94 – -0.62 | <0.001 | 0.24 | 0.08 – 0.40 | 0.003 |
| relevel(Treatment, ref = “T2_O”)T4_R.O | -0.43 | -0.59 – -0.27 | <0.001 | 0.07 | -0.09 – 0.23 | 0.378 |
| Observations | 873 | 873 | ||||
| R2 / adjusted R2 | 0.096 / 0.093 | 0.011 / 0.008 | ||||
(LR_HPrExp/ HR_LPrExp ~ Treatment)
Means accross treatments in the sample (graphically):
results <- expData %>% group_by(Treatment) %>%
summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp),
High.Rating_High.Overhead = mean(HR_LPrExp)) #results %>% %>% kable(caption = "Wilingness to donate") %>% kable_styling()
gather(results, Charity_Type, Dollars, -Treatment) %>%
ggplot(aes(x = Treatment, y=Dollars, fill = Charity_Type)) + geom_col() + theme(legend.position = "top") +
geom_text(aes(label=round(Dollars, digits = 2)), size = 3.5, show.legend = F, position = position_stack(vjust = 0.5))
Linear regressions:
T1_NR.NO = reference group
r1 <- lm(LR_HPrExp~Treatment+Pair, data = expData)
tab_model(r1)
| LR H Pr Exp | |||
|---|---|---|---|
| Predictors | Estimates | CI | p |
| (Intercept) | 55.34 | 50.37 – 60.31 | <0.001 |
| TreatmentT2_O | 6.25 | 1.06 – 11.45 | 0.018 |
| TreatmentT3_R | -19.06 | -24.29 – -13.83 | <0.001 |
| TreatmentT4_R.O | -8.92 | -14.08 – -3.76 | 0.001 |
| PairP2 | 3.44 | -2.46 – 9.33 | 0.254 |
| PairP3 | -1.38 | -6.51 – 3.74 | 0.596 |
| PairP4 | -3.15 | -8.16 – 1.85 | 0.217 |
| Observations | 873 | ||
| R2 / adjusted R2 | 0.112 / 0.106 | ||
T2_O = reference group
expData_refactor <- expData # Refactoring to change the reference group
expData_refactor$Treatment <- factor(expData_refactor$Treatment, levels = c("T2_O", "T1_NR.NO", "T3_R", "T4_R.O"))
# T2_O = reference group
r2 <- lm(LR_HPrExp~Treatment+Pair, data = expData_refactor)
tab_model(r2)
| LR H Pr Exp | |||
|---|---|---|---|
| Predictors | Estimates | CI | p |
| (Intercept) | 61.60 | 56.50 – 66.69 | <0.001 |
| TreatmentT1_NR.NO | -6.25 | -11.45 – -1.06 | 0.018 |
| TreatmentT3_R | -25.32 | -30.47 – -20.16 | <0.001 |
| TreatmentT4_R.O | -15.18 | -20.24 – -10.11 | <0.001 |
| PairP2 | 3.44 | -2.46 – 9.33 | 0.254 |
| PairP3 | -1.38 | -6.51 – 3.74 | 0.596 |
| PairP4 | -3.15 | -8.16 – 1.85 | 0.217 |
| Observations | 873 | ||
| R2 / adjusted R2 | 0.112 / 0.106 | ||
T3_R = reference group
expData_refactor$Treatment <- factor(expData_refactor$Treatment, levels = c("T3_R", "T1_NR.NO", "T2_O", "T4_R.O"))
r3 <- lm(LR_HPrExp~Treatment+Pair, data = expData_refactor)
tab_model(r3)
| LR H Pr Exp | |||
|---|---|---|---|
| Predictors | Estimates | CI | p |
| (Intercept) | 36.28 | 31.43 – 41.13 | <0.001 |
| TreatmentT1_NR.NO | 19.06 | 13.83 – 24.29 | <0.001 |
| TreatmentT2_O | 25.32 | 20.16 – 30.47 | <0.001 |
| TreatmentT4_R.O | 10.14 | 5.01 – 15.26 | <0.001 |
| PairP2 | 3.44 | -2.46 – 9.33 | 0.254 |
| PairP3 | -1.38 | -6.51 – 3.74 | 0.596 |
| PairP4 | -3.15 | -8.16 – 1.85 | 0.217 |
| Observations | 873 | ||
| R2 / adjusted R2 | 0.112 / 0.106 | ||
rating and the willingness to donate.H4: Higher mission commitment (valence) will moderate the relationship between charity ratings and giving behavior
Create the variable Dissimilar Cause (1=Dissimilar Cause 0=Similar Cause):
expData <- expData %>% mutate(Dissimilar_Cause = if_else(Pair == "P1" | Pair == "P2", 0, 1))
expData <- expData %>% mutate(Dissimilar_Cause_factor = if_else(Pair == "P1" | Pair == "P2", 0, 1))
expData$Dissimilar_Cause_factor <- factor(expData$Dissimilar_Cause_factor, labels = c("Similar Cause", "Dissimilar Cause"))
expData <- expData %>% mutate(Similar_Cause = if_else(Pair == "P1" | Pair == "P2", 1, 0))
expData <- expData %>% mutate(Similar_Cause_factor = if_else(Pair == "P1" | Pair == "P2", 1, 0))
expData$Similar_Cause_factor <- factor(expData$Similar_Cause_factor, labels = c("Dissimilar Cause", "Similar Cause"))
Sample differences:
#Print sample differences for Similar Cause
#results_SimMi <-
expData %>% group_by(Dissimilar_Cause_factor, Treatment) %>%
summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp),
High.Rating_High.Overhead = mean(HR_LPrExp)) %>% filter(Dissimilar_Cause_factor == "Similar Cause") # %>% gather(Charity_Type, Dollars, -Treatment, -Dissimilar_Mission)
## # A tibble: 4 x 4
## # Groups: Dissimilar_Cause_factor [1]
## Dissimilar_Cause_fa~ Treatment Low.Rating_Low.Over~ High.Rating_High.Ove~
## <fct> <fct> <dbl> <dbl>
## 1 Similar Cause T1_NR.NO 52.5 47.5
## 2 Similar Cause T2_O 64.7 35.3
## 3 Similar Cause T3_R 39.4 60.6
## 4 Similar Cause T4_R.O 49.3 50.7
#Print sample differences for DisSimilar Cause
#results_DissimMi <-
expData %>% group_by(Dissimilar_Cause_factor, Treatment) %>%
summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp),
High.Rating_High.Overhead = mean(HR_LPrExp)) %>% filter(Dissimilar_Cause_factor == "Dissimilar Cause") # %>% gather(Charity_Type, Dollars, -Treatment, -Dissimilar_Mission)
## # A tibble: 4 x 4
## # Groups: Dissimilar_Cause_factor [1]
## Dissimilar_Cause_fa~ Treatment Low.Rating_Low.Over~ High.Rating_High.Ove~
## <fct> <fct> <dbl> <dbl>
## 1 Dissimilar Cause T1_NR.NO 56.5 43.5
## 2 Dissimilar Cause T2_O 58.5 41.5
## 3 Dissimilar Cause T3_R 32.6 67.4
## 4 Dissimilar Cause T4_R.O 43.2 56.8
Barplot of sample differences:
results_Mi <- expData %>% group_by(Dissimilar_Cause_factor, Treatment) %>%
summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp),
High.Rating_High.Overhead = mean(HR_LPrExp))
gather(results_Mi, Charity_Type, Dollars, -Treatment, -Dissimilar_Cause_factor) %>%
ggplot(aes(x = Treatment, y=Dollars, fill = Charity_Type)) + geom_col() +
facet_grid(. ~ Dissimilar_Cause_factor) +
geom_text(aes(label=round(Dollars, digits = 2)), size=3.5, show.legend = F, position = position_stack(vjust = 0.5)) +
theme(legend.position = "top")
Does mission valence make a statistically significant difference?
Regression analysis
fit_Mission <- lm(LR_HPrExp~Treatment + Dissimilar_Cause*Treatment, data = expData)
summary(fit_Mission)
##
## Call:
## lm(formula = LR_HPrExp ~ Treatment + Dissimilar_Cause * Treatment,
## data = expData)
##
## Residuals:
## Min 1Q Median 3Q Max
## -64.739 -19.253 -2.543 17.421 67.421
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 52.543 2.851 18.430 < 2e-16 ***
## TreatmentT2_O 12.196 4.355 2.800 0.00522 **
## TreatmentT3_R -13.189 4.021 -3.280 0.00108 **
## TreatmentT4_R.O -3.291 4.089 -0.805 0.42124
## Dissimilar_Cause 3.965 3.818 1.039 0.29927
## TreatmentT2_O:Dissimilar_Cause -10.185 5.496 -1.853 0.06420 .
## TreatmentT3_R:Dissimilar_Cause -10.741 5.366 -2.002 0.04563 *
## TreatmentT4_R.O:Dissimilar_Cause -9.981 5.343 -1.868 0.06211 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 27.35 on 865 degrees of freedom
## Multiple R-squared: 0.1159, Adjusted R-squared: 0.1088
## F-statistic: 16.2 on 7 and 865 DF, p-value: < 2.2e-16
Multiple comparisons for each value of Dissimilar_Cause:
expData_DCause0 <- expData %>% filter(Dissimilar_Cause == 0) #Similar Cause
expData_DCause1 <- expData %>% filter(Dissimilar_Cause == 1) #Dissimilar Cause
#Similar Cause:
pairwise.t.test(expData_DCause0$LR_HPrExp, expData_DCause0$Treatment, p.adj="bonf")
##
## Pairwise comparisons using t tests with pooled SD
##
## data: expData_DCause0$LR_HPrExp and expData_DCause0$Treatment
##
## T1_NR.NO T2_O T3_R
## T2_O 0.0332 - -
## T3_R 0.0071 8e-08 -
## T4_R.O 1.0000 0.0031 0.0964
##
## P value adjustment method: bonferroni
#Dissimilar Cause:
pairwise.t.test(expData_DCause1$LR_HPrExp, expData_DCause1$Treatment, p.adj="bonf")
##
## Pairwise comparisons using t tests with pooled SD
##
## data: expData_DCause1$LR_HPrExp and expData_DCause1$Treatment
##
## T1_NR.NO T2_O T3_R
## T2_O 1.00000 - -
## T3_R 2.4e-10 1.4e-13 -
## T4_R.O 0.00075 1.2e-05 0.01067
##
## P value adjustment method: bonferroni
There are four pairs ( LRLO & HRHO) of organizations randomly assigned to experimental conditions. Two of those pairs represent nonprofits with similar missions (helping children or fighting disease in each pair) and the other two pairs are represented by nonprofits with similar missions (helping children and fighting disease in one pair). If there is astronger preference for a particular cause in the population of donors, we would expect presentation of performance information to make greater differences across treeatments in those who were considering nonprofits addressing similar causes. The sample differences show that:
performance infomration and mission valence seems to kick in the overhead condition: for two nonprofits addressing a similar cause, the high-overhead charity gets significantly less money than the low-overhead entity, while among two dissimilar nonprofits didn’t the low-performer didn’t get punished. It looks like learning about the overhead cost makes a statistically significant difference (at 10 percent level), whereas it does not in the case where two nonprofits address different sorts of issuesperformance infomration and mission valence appears to be even more distinct in the rating condition, but unlike in the previous, more punishment for the low rating gets the LR entity when when the allocation is in the pair that addresses dissimilar issues.mission attachment is similar to that in the previous case.H5: The effect of performance rating information will be stronger for more altruistic individuals
Altruism – 5-point scale measure of altruism; Altruism_Bi – binary measure of altruism (1=Altruism >= median, 0=Altruism < median)Measure of internal consistency for Altruism:
psych::alpha(expData[, c("q7_anticipateneeds", "q7_lovehelp", "q7_concerned", "q7_goodword", "q7_indifferent_Rev", "q7_uncomfortable_Rev", "q7_turnmyback_Rev", "q7_takenotime_Rev")])$total$raw_alpha %>% round(digits = 2)
## [1] 0.77
Interaction of Treatment and Altruism (binary measure):
fit_Altruism_Bi <- lm(HR_LPrExp~Treatment + Altruism_Bi*Treatment, data = expData)
summary(fit_Altruism_Bi)
##
## Call:
## lm(formula = HR_LPrExp ~ Treatment + Altruism_Bi * Treatment,
## data = expData)
##
## Residuals:
## Min 1Q Median 3Q Max
## -68.579 -18.579 1.943 20.358 63.301
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 43.057 2.669 16.134 < 2e-16 ***
## TreatmentT2_O -6.358 3.894 -1.633 0.10286
## TreatmentT3_R 25.521 3.647 6.998 5.22e-12 ***
## TreatmentT4_R.O 11.585 3.739 3.098 0.00201 **
## Altruism_Bi 4.419 3.792 1.165 0.24429
## TreatmentT2_O:Altruism_Bi 0.481 5.300 0.091 0.92771
## TreatmentT3_R:Altruism_Bi -13.857 5.348 -2.591 0.00973 **
## TreatmentT4_R.O:Altruism_Bi -4.796 5.257 -0.912 0.36186
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 27.35 on 865 degrees of freedom
## Multiple R-squared: 0.1159, Adjusted R-squared: 0.1088
## F-statistic: 16.2 on 7 and 865 DF, p-value: < 2.2e-16
Predicted donation intention across treatments interacted with Altrruism (binary) for HR&HO agency:
expData$Altruism_Bi <- factor(expData$Altruism_Bi, labels = c("Below_median", "At_or_above_median"))
fit_Altruism_Bi <- lm(HR_LPrExp~Treatment + Altruism_Bi*Treatment, data = expData)
predictDataT1 <- data.frame(Altruism_Bi = c("Below_median", "At_or_above_median"), Treatment = "T1_NR.NO")
predictDataT3 <- data.frame(Altruism_Bi = c("Below_median", "At_or_above_median"), Treatment = "T3_R")
predictDataT4 <- data.frame(Altruism_Bi = c("Below_median", "At_or_above_median"), Treatment = "T4_R.O")
pred_T1 <- predict.lm(fit_Altruism_Bi, predictDataT1) # add.opts: interval = "confidence", level = 0.95
pred_T3 <- predict.lm(fit_Altruism_Bi, predictDataT3)
pred_T4 <- predict.lm(fit_Altruism_Bi, predictDataT4)
predict_Altruism <- as.data.frame(cbind(pred_T1, pred_T3, pred_T4))
colnames(predict_Altruism) <- c("T1_NR.NO", "T3_R", "T4_R.O" )
predict_Altruism <- predict_Altruism %>% mutate(Altruism = c("Below_median", "At_or_above_median"))
predict_Altruism %>% gather(Treatment, Dollars, -Altruism) %>%
ggplot(aes(x=Treatment, y=Dollars, group = Altruism, col = Altruism)) + geom_line() + geom_point() + ylim(0,100) +
geom_text(aes(label=round(Dollars,digits = 2)), size = 3, vjust = -0.75, show.legend = F)
The result of the analysis show an outcome that is opposite to the expectation formulated in the hypothesis:
AltruismAltruism is such that more altruistic individuals appear to be less responsive to rating information than those who are below the average on the measure of altruism