Overview

This is an analysis of the experimental data.

Part 1: Data Tidying

source("C:/Users/admin/Dropbox/PhD/03_DISSERTATION/Part1-ExpData&Analysis/Source1_DataTidying.R")
expData <- doDataTidying()
setwd("C:/Users/admin/Dropbox/PhD/03_DISSERTATION/Part1-ExpData&Analysis")

Part 2: Data Analysis

2.1: Donor Percepion of charity’s overall performance (Perf_LR / perf_HR ~ Treatment)

H0a: Providing information about a public charity’s high (low) overhead costs will increase (lower) donor perceptions of the charity’s performance H1a: Providing information about a public charity’s high (low) overall performance rating will increase (lower) donor perceptions of the charity’s performance
H2a: Providing information about a high (low) level of overhead cost weakens (strengthens) the effect of a high charity rating on the perceived organizational performance

expData %>% dplyr::group_by(Treatment) %>% 
    dplyr::summarize(Low.Rating_Low.Overhead = round(mean(perf_LR.LO), digits = 2), 
            High.Rating_High.Overhead = round(mean(perf_HR.HO), digits = 2)) %>% 
                kable(caption = "Table 1. Donor percepions of charity's performance (means)") %>% kable_styling(bootstrap_options = c("striped", "hover"))
Table 1. Donor percepions of charity’s performance (means)
Treatment Low.Rating_Low.Overhead High.Rating_High.Overhead
T1_NR.NO 3.75 3.75
T2_O 4.10 3.50
T3_R 3.32 3.91
T4_R.O 3.69 3.68
expData %>% dplyr::group_by(Treatment) %>% 
    dplyr::summarize(Low.Rating_Low.Overhead = mean(perf_LR.LO), 
                      High.Rating_High.Overhead = mean(perf_HR.HO))   %>%
    gather(org_type, percieved_perf, -Treatment ) %>%
    ggplot(aes(x=Treatment, y=percieved_perf, col=org_type, group=org_type)) + 
        geom_line() + geom_point() + 
        ylim(1,5) + theme(legend.position = "right") +
        geom_text(aes(label=round(percieved_perf,digits = 2)), size = 3.5, vjust = -0.75,  show.legend = F) + 
        geom_hline(yintercept = 3, colour = "gray") + 
        geom_hline(yintercept = 3, colour = "gray") + 
        labs(y = "Perceived performance (composite)") + 
        labs(title = "Figure 1. Differences in Perceived Performance") 


Are the differences statistically significant?

Regressing the Perceived Performance indicator on Treatment:

#lm(perf_LR.LO ~ Treatment, data = expData) %>% summary()
#lm(perf_HR.HO ~ Treatment, data = expData) %>% summary()
fit_LR <- lm(perf_LR.LO ~ Treatment, data = expData)
fit_HR <- lm(perf_HR.HO ~ Treatment, data = expData)
tab_model(fit_LR, fit_HR)
  perf LR LO perf HR HO
Predictors Estimates CI p Estimates CI p
(Intercept) 3.75 3.64 – 3.86 <0.001 3.75 3.64 – 3.86 <0.001
TreatmentT2_O 0.35 0.20 – 0.50 <0.001 -0.25 -0.40 – -0.10 0.001
TreatmentT3_R -0.43 -0.58 – -0.27 <0.001 0.16 0.01 – 0.32 0.037
TreatmentT4_R.O -0.07 -0.22 – 0.08 0.388 -0.07 -0.22 – 0.08 0.375
Observations 873 873
R2 / adjusted R2 0.107 / 0.104 0.033 / 0.030

 

Pairwise t-test with Bonferroni (conservative) correction of p-values:

pwt1 <- pairwise.t.test(expData$perf_LR.LO, expData$Treatment, p.adj = "bonf")
pwt1$p.value %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover"))
T1_NR.NO T2_O T3_R
T2_O 3.99e-05 NA NA
T3_R 3.00e-07 0e+00 NA
T4_R.O 1.00e+00 3e-07 1.67e-05
pwt2 <- pairwise.t.test(expData$perf_HR.HO, expData$Treatment, p.adj = "bonf")
pwt2$p.value %>% kable(digits = 2) %>% kable_styling(bootstrap_options = c("striped", "hover"))
T1_NR.NO T2_O T3_R
T2_O 0.01 NA NA
T3_R 0.22 0.0 NA
T4_R.O 1.00 0.1 0.02

 

Cohen’s effect size ‘d’:

Showing the overhead:
eff_size <- expData %>% dplyr::group_by(Treatment) %>% 
                        dplyr::summarize(Low.Rating_Low.Overhead = mean(perf_LR.LO), 
                                         High.Rating_High.Overhead = mean(perf_HR.HO))
# Low-Overhead Org:
(d_Overhead_LR <- (eff_size$Low.Rating_Low.Overhead[1]-eff_size$Low.Rating_Low.Overhead[2])/sd(expData$perf_LR.LO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T2_O"]))
## [1] -0.4719194
#High-Overhead Org:
(d_Overhead_HR <- (eff_size$High.Rating_High.Overhead[1]-eff_size$High.Rating_High.Overhead[2])/sd(expData$perf_HR.HO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T2_O"]))
## [1] 0.2993744
Showing the rating:
# Low-Rating Org:
(d_Rating_LR <- (eff_size$Low.Rating_Low.Overhead[1]-eff_size$Low.Rating_Low.Overhead[3])/sd(expData$perf_LR.LO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T3_R"]))
## [1] 0.5370172
#High-Rating Org:
(d_Rating_HR <- (eff_size$High.Rating_High.Overhead[1]-eff_size$High.Rating_High.Overhead[3])/sd(expData$perf_HR.HO[expData$Treatment=="T1_NR.NO"|expData$Treatment=="T3_R"]))
## [1] -0.2161782

 

Within-treatment differences
Differences between the two oranization types (High Rating & High Overhead vs. Low Rating & Low Overhead) within each treatment (T1_NR.NO, T2_NR.O, T3_R.NO):

expData_T2 <- expData %>% filter(Treatment == "T2_O")
t2 <- t.test(expData_T2$perf_HR.HO - expData_T2$perf_LR.LO)
pander(t2)
One Sample t-test: expData_T2$perf_HR.HO - expData_T2$perf_LR.LO
Test statistic df P value Alternative hypothesis mean of x
-7.61 224 7.561e-13 * * * two.sided -0.6022
# Effect size for T2:
(d_T2 <- mean(expData_T2$perf_HR.HO - expData_T2$perf_LR.LO)/sd(c(expData_T2$perf_LR.LO, expData_T2$perf_HR.HO))) %>% round(digits = 2)
## [1] -0.68
expData_T3 <- expData %>% filter(Treatment ==  "T3_R")
t3 <- t.test(expData_T3$perf_HR.HO - expData_T3$perf_LR.LO)
pander(t3)
One Sample t-test: expData_T3$perf_HR.HO - expData_T3$perf_LR.LO
Test statistic df P value Alternative hypothesis mean of x
8.356 213 8.34e-15 * * * two.sided 0.5888
# Effect size for T3:
(d_T3 <- mean(expData_T3$perf_HR.HO - expData_T3$perf_LR.LO)/sd(c(expData_T3$perf_LR.LO, expData_T3$perf_HR.HO))) %>% round(digits = 2)
## [1] 0.68

Takeaways:
  • When no objective performance information is presented, the indicator of individual perceived performance shows the same level for two different organizations.
  • The differences in Perceived Performance between the no-treatment group (with no performance information presented) and the group with the overhead information provided are statistically significant for both types (low- and high-overhead) of entities.
    • For a low-overhead (low-rating) organization, the availability of the information about its overhead cost leads to a statisticaly significant increase in the level of its perceived performance.
    • For a high-overhead (high-rating) nonprofit, there was a statistically significant decrease in the perceived performance.
    • Speaking of practical significance, the size of the effect (as measuredby Cohen’s d) is different for the low- and high-overhead nonprofits: When the overhead is presented, the effect size for a low-overhead entity is moderate and for high-overhead entity is rather small. This might indicate that, performance-wise, donors might not so much negatively view high-overhead operations as they positively view efficient operations.
  • The differences in Perceived Performance between the two types of organizations within the treatments groups are statistically significant and the effect sizes are large and similar. In particular:
    • In the Overhead condition, the positive effect of the low overhead appears to be more sizable than the negative effect of the large overhead.
    • At the same time, in the Rating condition, the negative effect of the poor rating significantly dominates over the positive effect of a high rating.
  • In the Overhead & Rating condition the two performance indicators offset each other’s effects.
  • OVERALL:
    • Low overhead is great and contributes to positive perceptions of org. performance; high overhead detracts from the perceived performance but this effect is close to be trivial and is not so much of a problem
    • High rating migh or migh not add a little to positive perceptions of org. performance in comparison to no-information condition, but poor rating is significantly damaging for the perceived performance of a nonprofit organization.

Measures of internal consistency (Cronbach’s Alpha) for the Perceived Performance indicator:

library(psych)
psych::alpha(expData[, c("q3_LR_impact", "q3_LR_efficiently")])$total$raw_alpha %>% round(digits = 2)
## [1] 0.68
psych::alpha(expData[, c("q5_HR_impact", "q5_HR_efficiently")])$total$raw_alpha %>% round(digits = 2)
## [1] 0.72

Unfortunately, comparing the scores on the composite index constructed based on 5-point-scale ordinal variables is not satisfactorily insightful and meaningful. To get a more elaborate picture of what’s going on there, the following analysis looks at the two individual performance indicators that comprise the composite performance score - perceived impact and perceived efficiency spending money. The two measures are analyzed separately using the ordinal logistic regression analysis.



EACH PERFORMANCE INDICATOR ANALYZED SEPARATELY (ORDINAL LOGIT)

2.2. IMPACT

Sample differences for Perceieved Impact (“This nonprofit is most likely to have an impact on its cause”):

expData %>% dplyr::group_by(Treatment) %>% dplyr::summarize(Low.Rating_Low.Overhead = mean(q3_LR_impact), 
                                              High.Rating_High.Overhead = mean(q5_HR_impact)) %>% 
                                              kable(align = "l", digits = 3) %>% 
                                              kable_styling(bootstrap_options = c("striped", "hover"))
Treatment Low.Rating_Low.Overhead High.Rating_High.Overhead
T1_NR.NO 3.904 3.894
T2_O 4.027 3.787
T3_R 3.463 3.958
T4_R.O 3.637 3.965
# Fit Ordinal Logit for the Low-Rating-Low-Overhead:
ol_impact_LR <- MASS::polr(as.factor(q3_LR_impact) ~ Treatment, data = expData)     
#Coefficients:
ctable <- summary(ol_impact_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3) %>% kable_styling(bootstrap_options = c("striped", "hover"))
Value Std. Error t value p value
TreatmentT2_O 0.362 0.178 2.034 0.042
TreatmentT3_R -0.871 0.181 -4.802 0.000
TreatmentT4_R.O -0.452 0.177 -2.549 0.011
1|2 -4.045 0.252 -16.077 0.000
2|3 -2.475 0.162 -15.291 0.000
3|4 -0.984 0.134 -7.370 0.000
4|5 1.133 0.136 8.329 0.000
# Change the reference group to see the difference T4-T3     
ol_impact_LR <- MASS::polr(as.factor(q3_LR_impact) ~ relevel(Treatment, "T3_R"), data = expData) 
#Coefficients:
ctable <- summary(ol_impact_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3) %>% kable_styling(bootstrap_options = c("striped", "hover"))
Value Std. Error t value p value
relevel(Treatment, “T3_R”)T1_NR.NO 0.871 0.181 4.802 0.000
relevel(Treatment, “T3_R”)T2_O 1.233 0.183 6.740 0.000
relevel(Treatment, “T3_R”)T4_R.O 0.419 0.178 2.359 0.018
1|2 -3.174 0.241 -13.198 0.000
2|3 -1.605 0.148 -10.859 0.000
3|4 -0.114 0.130 -0.873 0.382
4|5 2.003 0.149 13.461 0.000
# Fit Ordinal Logit for the High-Rating-High-Overhead:
ol_impact_HR <- polr(as.factor(q5_HR_impact) ~ Treatment, data = expData)  # FIT ORDINAL LOGIT for HR
#Coefficients:
ctable <- summary(ol_impact_HR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3) %>% kable_styling(bootstrap_options = c("striped", "hover"))
Value Std. Error t value p value
TreatmentT2_O -0.229 0.180 -1.269 0.205
TreatmentT3_R 0.124 0.182 0.683 0.495
TreatmentT4_R.O 0.101 0.178 0.569 0.569
1|2 -4.830 0.396 -12.194 0.000
2|3 -2.854 0.187 -15.259 0.000
3|4 -0.916 0.136 -6.731 0.000
4|5 1.104 0.138 8.008 0.000
Takeaways:
  • Looking at the cut-points, we can see that the log-odds (and therefore the probabilities) that individuals in the NO-TREATMENT group exhibit a certain level of agreement (from Strongly Agree to Strongly Disagree) about an organization’s capacity to make an impact are nearly identical for the two types of organizations (Low-Rating-Low-Overhead and High-Rating-High-Overhead)
  • In the OVERHEAD treatment condition:
    • Presenting the Low-overhead information statistically significantly increases an individual’s propensity to agree
    • By contrast, the effect of presenting the High-overhead information doesn’t reach statistically significance
  • In the RATING treatment condition:
    • Facing the information about a charity low rating significantly lowers individual propensity to agree with the impact statement
    • At the same time, the information about a charity’s high rating doesn’t lead to a significant change in individual perceptions of the organizations capacity to produce an impact
  • In the RATING & OVERHEAD condition:
    • Presenting a low rating along with a low overhead has a significant effect of the same direction as the one in the RATING condition but with a lower magnitude thus confirming the moderation effect of the low overhead.
    • Presenting the High Rating along with the High Overhead makes no significant difference in individual propensiity to agree with the impact statement compared to the NO-TRETMENT group or RATING only group.

The followin sections describes the discussed effects in terms of probabilities.

Predicted probabilities for the different level of agreement with the statement:

predictData <- data.frame(Treatment = c(1,2,3,4)) #CREATE SMALL DATAFRAME FOR PREDICTING PROBS
predictData$Treatment <- factor(predictData$Treatment, labels = c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O"))

predProbDataLR <- predict(ol_impact_LR, predictData, type = "p")  # CALCULATE PREDICTED PROBABILITIES
row.names(predProbDataLR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O") # RESHAPE TABLE W PRED PROBS for GRAPHING
predProbDataLR <- as.data.frame(predProbDataLR)
predProbDataLR <- rownames_to_column(predProbDataLR)
colnames(predProbDataLR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataLR$Treatment <- factor(predProbDataLR$Treatment)
#Pred.prob. table greyed out to move it after the graph:
#predProbDataLR %>% kable(digits = 3, caption = "Predicted probabilities for a Low-Rating-Low-Overhead NP") %>% 
#                    kable_styling(bootstrap_options = c("striped", "hover")) 
#predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% 
#             ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()


predProbDataHR <- predict(ol_impact_HR, predictData, type = "p")  # CALCULATE PREDICTED PROBABILITIES
row.names(predProbDataHR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O")  # RESHATE TABLE W PRED PROBS for GRAPHING
predProbDataHR <- as.data.frame(predProbDataHR)
predProbDataHR <- rownames_to_column(predProbDataHR)
colnames(predProbDataHR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataHR$Treatment <- factor(predProbDataHR$Treatment)
#Pred.prob. table greyed out to move it after the graph:
#predProbDataHR %>% kable(digits = 3, caption = "Predicted probabilities for a High-Rating-High-Overhead NP") %>% 
#                    kable_styling(bootstrap_options = c("striped", "hover"))
#predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% 
#    ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()


# GRID FIGURE:
predProbDataLR1 <- predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "LowRating_LowOverhead")
predProbDataHR1 <- predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "HighRating_HighOverhead")
predProbData <- rbind(predProbDataHR1, predProbDataLR1)
predProbData$org_type <- factor(predProbData$org_type, levels = c("LowRating_LowOverhead", "HighRating_HighOverhead"))
predProbData %>% ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) +
                    geom_line() + geom_point() + facet_grid(. ~ org_type) + 
                    labs(title = "Figure 2. Probability Differences in Perceived Impact") 

Predicted probability tables:

predProbDataLR %>% kable(digits = 3, caption = "Predicted probabilities for a Low-Rating-Low-Overhead NP") %>% 
                    kable_styling(bootstrap_options = c("striped", "hover")) 
Predicted probabilities for a Low-Rating-Low-Overhead NP
Treatment 1.Strongly_Disagree 2.Somewhat_Disagree 3.Neither_Agree_Nor_Disagree 4.Somewhat_Agree 5.Strongly_Agree
T1_NR.NO 0.017 0.060 0.194 0.484 0.244
T2_O 0.012 0.043 0.151 0.477 0.316
T3_R 0.040 0.127 0.304 0.410 0.119
T4_R.O 0.027 0.090 0.253 0.460 0.170
predProbDataHR %>% kable(digits = 3, caption = "Predicted probabilities for a High-Rating-High-Overhead NP") %>% 
                    kable_styling(bootstrap_options = c("striped", "hover"))
Predicted probabilities for a High-Rating-High-Overhead NP
Treatment 1.Strongly_Disagree 2.Somewhat_Disagree 3.Neither_Agree_Nor_Disagree 4.Somewhat_Agree 5.Strongly_Agree
T1_NR.NO 0.008 0.047 0.231 0.465 0.249
T2_O 0.010 0.058 0.267 0.457 0.209
T3_R 0.007 0.041 0.213 0.466 0.273
T4_R.O 0.007 0.042 0.216 0.466 0.268
Takeaways:
  • The left facet shows the probability changes across the treatment groups for the Low-Rating-Low-Overhead condition (statistically significant effects) and the right panel show the probabilities for the High-Rating-High-Overhead entity.
  • As the ordinal regression coefficients indicated, the left figure clearly shows much larger differences across the treatment groups for the Low-Ratingl-Low-Overhead entity compared to the High-Rating-High-Overhead entity:
    • For instance, we can se how the spread in the probability between 3. Neither_Agree_Nor_Disagree and 5. Strongly Agree widens from five percentage points to 16.5 percentage points towards the Strongly Agree end of the scale.
    • Showing the Low Rating only leads to even wider differences across all levels of propensity to agree with the impact statement: the probability of Strongly Agreeing drops from 24.4 percent to 11.9 as well as the probbaility of Somewhat Agreeing drops from 48.4 to 41.0 percent, while the probability of Neither Agreeing Nor Disagreeing increases from 19.4 to 30.4 percent and the probability of Somewhat Disagreeing increases from 6.0 to 12.7 percent.
    • The availability of both performance indicators makes a similar, although weaker, effect as the Low Rating only, suggesting the moderation effect.
    • For the High-Rating-High-Overhead entity the probability the probability differences across the treatments are substantially smaller, which shows that either High Rating or High Overhead don’t affect individual perception of an organizations capacity to make an impact (which makes perfect sense!!!)
  • OVERALL, when it comes the the individual perceptions of impact:
    • Low overhead cost somewhat improves individual perceptions of nonprofit impact while showing a high overhad does not hurt
    • High charity rating doesn’t significantly improve donor perceptions about a nonprofit’s capacity to make an impact (in line with the apriory expectation) whereas a low performance rating can significantly damage the view of organizational ability to produce results.
    • Low overhead costs can mitifate the adverse effects of negative ratings on the perceptions of impact, but the latter are stronger than the improvements due to higher efficiency.

2.3 EFFICIENCY

Sample differences for Perceieved Efficiency (q3_LR_efficiently, q5_HR_efficiently: “This nonprofit efficiently spends money on its cause”):

# Means across treatments for two types of orgs:
expData %>% dplyr::group_by(Treatment) %>% dplyr::summarize(Low.Rating_High.Progr.Exp = mean(q3_LR_efficiently), 
                                              High.Rating_Low.Progr.Exp = mean(q5_HR_efficiently)) %>%                                                          kable(digits = 3) %>% 
                                              kable_styling(bootstrap_options = c("striped", "hover"))
Treatment Low.Rating_High.Progr.Exp High.Rating_Low.Progr.Exp
T1_NR.NO 3.601 3.606
T2_O 4.178 3.213
T3_R 3.187 3.869
T4_R.O 3.735 3.398

Tables with coefficients for LR_LO and HR_HO organizations:

# Low-Rating-Low-Overhead:
ol_efficiently_LR <- polr(as.factor(q3_LR_efficiently) ~ Treatment, data = expData)  # FIT ORDINAL LOGIT for LR
ctable <- summary(ol_efficiently_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3, caption = "Low-Rating-Low-Overhead entity:") %>% kable_styling(bootstrap_options = c("striped", "hover"))
Low-Rating-Low-Overhead entity:
Value Std. Error t value p value
TreatmentT2_O 1.277 0.179 7.123 0.000
TreatmentT3_R -0.754 0.179 -4.211 0.000
TreatmentT4_R.O 0.411 0.174 2.360 0.018
1|2 -3.697 0.248 -14.902 0.000
2|3 -2.228 0.159 -13.984 0.000
3|4 -0.038 0.125 -0.303 0.762
4|5 1.533 0.139 11.020 0.000
ol_efficiently_LR <- polr(as.factor(q3_LR_efficiently) ~ relevel(Treatment, "T2_O"), data = expData)  # FIT ORDINAL LOGIT for LR
ctable <- summary(ol_efficiently_LR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3, caption = "Same but switched reference groups to see the diff. T4-T2:") %>% kable_styling(bootstrap_options = c("striped", "hover"))
Same but switched reference groups to see the diff. T4-T2:
Value Std. Error t value p value
relevel(Treatment, “T2_O”)T1_NR.NO -1.276 0.179 -7.119 0.000
relevel(Treatment, “T2_O”)T3_R -2.030 0.189 -10.732 0.000
relevel(Treatment, “T2_O”)T4_R.O -0.865 0.177 -4.875 0.000
1|2 -4.975 0.260 -19.134 0.000
2|3 -3.504 0.177 -19.742 0.000
3|4 -1.314 0.138 -9.548 0.000
4|5 0.257 0.128 1.999 0.046
#High-Rating-High-Overhead:
ol_efficiently_HR <- polr(as.factor(q5_HR_efficiently) ~ Treatment, data = expData)  # FIT ORDINAL LOGIT for HR
ctable <- summary(ol_efficiently_HR) %>% coef()
pval <- pnorm(abs(ctable[,"t value"]), lower.tail = FALSE)*2
(ctable <- cbind(ctable, "p value" = round(pval, 3))) %>% kable(digits = 3, caption = "High-Rating-High-Overhead entity:") %>% kable_styling(bootstrap_options = c("striped", "hover"))
High-Rating-High-Overhead entity:
Value Std. Error t value p value
TreatmentT2_O -0.640 0.178 -3.592 0.000
TreatmentT3_R 0.552 0.173 3.194 0.001
TreatmentT4_R.O -0.260 0.174 -1.500 0.133
1|2 -3.700 0.232 -15.919 0.000
2|3 -2.002 0.147 -13.655 0.000
3|4 -0.110 0.124 -0.886 0.375
4|5 1.584 0.141 11.239 0.000
Takeaways:
  • This measure is appears to be sensitive to both high and low values of both the Rating and the Overhead indicators.
  • Compared to No-information treatment, Low Overhead tends to give individuals more and High Overhead less confidence in a nonprofit’s efficiency spending money, although the size of the coefficient is twice smaller for the High Overhead nonprofits.
  • With noticeably weaker coefficients, information about charity Ratings also significantly affects perceived efficiency: the Low Rating lowers the propensity to agree about efficiency and the High rating increases the propensity to agree with the efficiency statement.
  • _Presenting both the Low Rating and Low Overhead works the same way as the Low Overhead only, although has a smaller-size coefficient, which, along with the the statistically significant difference T4_R.O - T2_R, confirms the moderation effect of the Low Rating on the relationship between the Overhead cost and Perceived efficiency

 

Predicted probability differences across the treatments for LR_LO and HR_HO entities:

predictData <- data.frame(Treatment = c(1,2,3,4)) #CREATE SMALL DATAFRAME FOR PREDICTING PROBS
predictData$Treatment <- factor(predictData$Treatment, labels = c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O"))

predProbDataLR <- predict(ol_efficiently_LR, predictData, type = "p") # CALCULATE PREDICTED PROBABILITIES
row.names(predProbDataLR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O") # RESHAPE TABLE W PRED PROBS for GRAPHING
predProbDataLR <- as.data.frame(predProbDataLR)
predProbDataLR <- rownames_to_column(predProbDataLR)
colnames(predProbDataLR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataLR$Treatment <- factor(predProbDataLR$Treatment)
#predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% 
#    ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()

predProbDataHR <- predict(ol_efficiently_HR, predictData, type = "p")
row.names(predProbDataHR) <- c("T1_NR.NO", "T2_O", "T3_R", "T4_R.O")
predProbDataHR <- as.data.frame(predProbDataHR)
predProbDataHR <- rownames_to_column(predProbDataHR)
colnames(predProbDataHR) <- c("Treatment", "1.Strongly_Disagree", "2.Somewhat_Disagree", "3.Neither_Agree_Nor_Disagree", "4.Somewhat_Agree", "5.Strongly_Agree")
predProbDataHR$Treatment <- factor(predProbDataHR$Treatment)
#predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% 
#    ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) + geom_line() + geom_point()

predProbDataLR1 <- predProbDataLR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "LowRating_LowOverhead")
predProbDataHR1 <- predProbDataHR %>% gather(Level_of_Agreement, Probability, -Treatment) %>% mutate(org_type = "HighRating_HighOverhead")
predProbData <- rbind(predProbDataHR1, predProbDataLR1)
predProbData$org_type <- factor(predProbData$org_type, levels = c("LowRating_LowOverhead", "HighRating_HighOverhead"))
predProbData %>% ggplot(aes(x=Treatment, y=Probability, group=Level_of_Agreement, col=Level_of_Agreement)) +
    geom_line() + geom_point() + theme(axis.text.x = element_text(angle = 0)) + facet_grid(. ~ org_type)

Probability tables for LR and HR organizations:

predProbDataLR %>% knitr::kable(digits = 3, caption = "Predicted probabilities for Low-Rating-Low-Overhead Entity:") %>% kable_styling()
Predicted probabilities for Low-Rating-Low-Overhead Entity:
Treatment 1.Strongly_Disagree 2.Somewhat_Disagree 3.Neither_Agree_Nor_Disagree 4.Somewhat_Agree 5.Strongly_Agree
T1_NR.NO 0.024 0.073 0.393 0.332 0.178
T2_O 0.007 0.022 0.183 0.352 0.436
T3_R 0.050 0.136 0.485 0.236 0.092
T4_R.O 0.016 0.051 0.323 0.365 0.246
predProbDataHR %>% knitr::kable(digits = 3, caption = "Predicted probabilities for High-Rating-High-Overhead Entity:") %>% kable_styling()
Predicted probabilities for High-Rating-High-Overhead Entity:
Treatment 1.Strongly_Disagree 2.Somewhat_Disagree 3.Neither_Agree_Nor_Disagree 4.Somewhat_Agree 5.Strongly_Agree
T1_NR.NO 0.024 0.095 0.354 0.357 0.170
T2_O 0.045 0.159 0.426 0.273 0.098
T3_R 0.014 0.058 0.268 0.397 0.263
T4_R.O 0.031 0.118 0.388 0.326 0.137
Takeaways:
  • Under the No-information condition:
    • the initial probabilities describing the propensity to agrree with the efficiency statement are nearly identical for the two types of organizations
    • the most likely responses (in the order of descending probailities) are Neither Agee Nor Disagree (39.3% for LRLO and 35.4% for HRHO), Somewhat Agree (33.2% and 35.7%), and Strongly Agree (17.8% and 17.0%).
  • In the Overhead only treatment condition:
    • The probability ranking of response levels reverses for the Low-Rating-Low-Overhed organization to Strongly Disagree (43.6%), followed by Somewhat Agree (35.2%), and followed by Neither Agee Nor Disagree (18.3%).
    • For the High-Rating-High_Overhead type of organization the ranking of the most likely response levels remains almost the same except that the most likely category Neither Agee Nor Disagree becomes even more likely (42.6% instead of 35.5%) and the probaility of the Somewhat Agree response category drops to 27.3% from 35.7%). The Somewhat Disagree becomes more likely than Somewhat Agree thus replacing the third position in the ranking.
  • In the Rating only treatment group:
    • The most likely response levele for the Low-Rating-Low_Overhead type of organization lne up in the same order as in the No-information condition although the Neither Agree Nor Disagree response becomes even more likely (48.5%) while the probability of repsonding Somewhat Agree or Strongly Agree drops.
    • For the HR_HO organization, the most likely response is Somewhat Agree followed by Neither Agree Nor Disagree and Strongly Agree.
  • In the Rating and Overhead treatment group:
    • For the LR_LO organization: the highest probablity of Somewhat Agree (36.5) is followed by Neither Agree Nor Disagree (32.3%) and Strongly Agree(24.6%)
    • For the HR_HO organization: the highest probability response is Neither Agree Nor Disagree (38.8%), followed by Somewhat Agree (32.6%), and Strongly Agree (13.7%).
  • OVERALL, when it comes to the perceptions of financial efficiency:
    • Charity ratings make a difference but the low Overhead cost dominates appears to make the greatest difference: information on a low Overhead adds a substantial amount of confidence in a nonprofit’s finanacial efficiency. Even presenting a low charity rating alongside with the _Olow verhead doens’t completely offsets the effect of a low overhead indicator.
    • For a High-Rating-High-Overhead organization, both performance measures have implications for the perceptions of the financial efficiency but the magnitudes are smaller in comparison to the effects of the low rating and low overhead.



2.4. EFFECTS OF TREATMENT ON DONOR TRUST IN A NONPROFIT AGENCY

Outcome variable: TRUST IN A NONPROFIT AGENCY (trust_LR.LO/trust_LR.LO) - a composite indicator computed as the average five variables.

Measures of internal consistency for the indicator:

library(psych)
psych::alpha(expData[, c("q3_LR_actbestinterest", "q3_LR_ethically", "q3_LR_usefundsappr", "q3_LR_notexploitdon", "q3_LR_ftechniques" )])$total$raw_alpha %>% round(digits = 2)
## [1] 0.94
psych::alpha(expData[, c("q5_HR_actbestinterest", "q5_HR_ethically", "q5_HR_usefundsappr", "q5_HR_notexploitdon", "q5_HR_ftechniques" )])$total$raw_alpha %>% round(digits = 2)
## [1] 0.95

Hypotheses:

H0b: Providing information about a public charity’s high (low) overhead costs will generate a higher (lower) degree of trust in that organization

H1b: Providing information about a public charity’s high (low) overall performance rating will generate a higher (lower) degree of trust in that organization

H2b: Providing information about a high (low) level of overhead cost weakens (strengthens) the level of trust in the nonprofit

expData %>% dplyr::group_by(Treatment) %>% 
    dplyr::summarize(Low.Rating_Low.Overhead = round(mean(trust_LR.LO), digits = 3), 
                     High.Rating_High.Overhead = round(mean(trust_HR.HO), digits = 3)) %>% 
                        kable(caption = "Trust in a Nonprofit Agency") %>% kable_styling()
Trust in a Nonprofit Agency
Treatment Low.Rating_Low.Overhead High.Rating_High.Overhead
T1_NR.NO 3.799 3.842
T2_O 4.107 3.691
T3_R 3.328 3.929
T4_R.O 3.676 3.761
expData %>% dplyr::group_by(Treatment) %>% 
    dplyr::summarize(Low.Rating_Low.Overhead = mean(trust_LR.LO), 
                     High.Rating_High.Overhead = mean(trust_HR.HO))   %>%
    gather(org_type, percieved_trust, -Treatment ) %>%
    ggplot(aes(x=Treatment, y=percieved_trust, col=org_type, group=org_type)) + 
    geom_line() + geom_point() + 
    ylim(1,5) + theme(legend.position = "top") +
    geom_text(aes(label=round(percieved_trust,digits = 2)), size = 3.5, vjust = -0.75,  show.legend = F)

Pairwise comparisons with a conservative correction of p-values:

pwt1 <- pairwise.t.test(expData$trust_LR.LO, expData$Treatment, p.adj = "bonf")
pwt1$p.value %>% kable() %>% kable_styling()
T1_NR.NO T2_O T3_R
T2_O 0.0012211 NA NA
T3_R 0.0000001 0e+00 NA
T4_R.O 0.8158395 7e-07 0.0001381
pwt2 <- pairwise.t.test(expData$trust_HR.HO, expData$Treatment, p.adj = "bonf")
pwt2$p.value %>% kable() %>% kable_styling()
T1_NR.NO T2_O T3_R
T2_O 0.3800324 NA NA
T3_R 1.0000000 0.020006 NA
T4_R.O 1.0000000 1.000000 0.2290957

Regressions

T1_NR_NO is the reference group:

fit_LR <- lm(trust_LR.LO ~ Treatment, data = expData)
fit_HR <- lm(trust_HR.HO ~ Treatment, data = expData)
tab_model(fit_LR, fit_HR)
  trust LR LO trust HR HO
Predictors Estimates CI p Estimates CI p
(Intercept) 3.80 3.68 – 3.92 <0.001 3.84 3.73 – 3.96 <0.001
TreatmentT2_O 0.31 0.15 – 0.47 <0.001 -0.15 -0.31 – 0.01 0.063
TreatmentT3_R -0.47 -0.63 – -0.31 <0.001 0.09 -0.08 – 0.25 0.294
TreatmentT4_R.O -0.12 -0.28 – 0.04 0.136 -0.08 -0.24 – 0.08 0.319
Observations 873 873
R2 / adjusted R2 0.096 / 0.093 0.011 / 0.008

 

T3_R is the reference group:

fit_LR <- lm(trust_LR.LO ~ relevel(Treatment, ref = "T3_R"), data = expData)
fit_HR <- lm(trust_HR.HO ~ relevel(Treatment, ref = "T3_R"), data = expData)
tab_model(fit_LR, fit_HR)
  trust LR LO trust HR HO
Predictors Estimates CI p Estimates CI p
(Intercept) 3.33 3.21 – 3.44 <0.001 3.93 3.82 – 4.04 <0.001
relevel(Treatment, ref = “T3_R”)T1_NR.NO 0.47 0.31 – 0.63 <0.001 -0.09 -0.25 – 0.08 0.294
relevel(Treatment, ref = “T3_R”)T2_O 0.78 0.62 – 0.94 <0.001 -0.24 -0.40 – -0.08 0.003
relevel(Treatment, ref = “T3_R”)T4_R.O 0.35 0.19 – 0.51 <0.001 -0.17 -0.33 – -0.01 0.038
Observations 873 873
R2 / adjusted R2 0.096 / 0.093 0.011 / 0.008

 

T2_O is the reference group:

fit_LR <- lm(trust_LR.LO ~ relevel(Treatment, ref = "T2_O"), data = expData)
fit_HR <- lm(trust_HR.HO ~ relevel(Treatment, ref = "T2_O"), data = expData)
tab_model(fit_LR, fit_HR)
  trust LR LO trust HR HO
Predictors Estimates CI p Estimates CI p
(Intercept) 4.11 3.99 – 4.22 <0.001 3.69 3.58 – 3.80 <0.001
relevel(Treatment, ref = “T2_O”)T1_NR.NO -0.31 -0.47 – -0.15 <0.001 0.15 -0.01 – 0.31 0.063
relevel(Treatment, ref = “T2_O”)T3_R -0.78 -0.94 – -0.62 <0.001 0.24 0.08 – 0.40 0.003
relevel(Treatment, ref = “T2_O”)T4_R.O -0.43 -0.59 – -0.27 <0.001 0.07 -0.09 – 0.23 0.378
Observations 873 873
R2 / adjusted R2 0.096 / 0.093 0.011 / 0.008

 

Takeaways:
  • Again, as with the perceived performance, learning about a low overhead statistically significantly increases trust in an agency relative to the no-information condition, whereas a high overhead doesn’t make a significant difference for the perceptions trust;
  • A low rating, in turn, damages trust while a hight rating doesn’t add to it in comparison to the no-information condition
  • When two peformance measures are presented, a low overhead and a high rating offset each others effects



WILLINGNESS TO DONATE

(LR_HPrExp/ HR_LPrExp ~ Treatment)

Means accross treatments in the sample (graphically):

results <- expData %>% group_by(Treatment) %>% 
                summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp), 
                          High.Rating_High.Overhead = mean(HR_LPrExp)) #results %>% %>% kable(caption = "Wilingness to donate") %>% kable_styling()


gather(results, Charity_Type, Dollars, -Treatment) %>%
    ggplot(aes(x = Treatment, y=Dollars, fill = Charity_Type)) + geom_col() + theme(legend.position = "top") +
    geom_text(aes(label=round(Dollars, digits = 2)), size = 3.5, show.legend = F, position = position_stack(vjust = 0.5))

Linear regressions:

 

T1_NR.NO = reference group

r1 <- lm(LR_HPrExp~Treatment+Pair, data = expData)
tab_model(r1)
  LR H Pr Exp
Predictors Estimates CI p
(Intercept) 55.34 50.37 – 60.31 <0.001
TreatmentT2_O 6.25 1.06 – 11.45 0.018
TreatmentT3_R -19.06 -24.29 – -13.83 <0.001
TreatmentT4_R.O -8.92 -14.08 – -3.76 0.001
PairP2 3.44 -2.46 – 9.33 0.254
PairP3 -1.38 -6.51 – 3.74 0.596
PairP4 -3.15 -8.16 – 1.85 0.217
Observations 873
R2 / adjusted R2 0.112 / 0.106

 

T2_O = reference group

expData_refactor <- expData # Refactoring to change the reference group
expData_refactor$Treatment <- factor(expData_refactor$Treatment, levels = c("T2_O", "T1_NR.NO", "T3_R", "T4_R.O"))
#                     T2_O = reference group
r2 <- lm(LR_HPrExp~Treatment+Pair, data = expData_refactor)
tab_model(r2)
  LR H Pr Exp
Predictors Estimates CI p
(Intercept) 61.60 56.50 – 66.69 <0.001
TreatmentT1_NR.NO -6.25 -11.45 – -1.06 0.018
TreatmentT3_R -25.32 -30.47 – -20.16 <0.001
TreatmentT4_R.O -15.18 -20.24 – -10.11 <0.001
PairP2 3.44 -2.46 – 9.33 0.254
PairP3 -1.38 -6.51 – 3.74 0.596
PairP4 -3.15 -8.16 – 1.85 0.217
Observations 873
R2 / adjusted R2 0.112 / 0.106

 

T3_R = reference group

expData_refactor$Treatment <- factor(expData_refactor$Treatment, levels = c("T3_R", "T1_NR.NO", "T2_O", "T4_R.O"))
r3 <- lm(LR_HPrExp~Treatment+Pair, data = expData_refactor)
tab_model(r3)
  LR H Pr Exp
Predictors Estimates CI p
(Intercept) 36.28 31.43 – 41.13 <0.001
TreatmentT1_NR.NO 19.06 13.83 – 24.29 <0.001
TreatmentT2_O 25.32 20.16 – 30.47 <0.001
TreatmentT4_R.O 10.14 5.01 – 15.26 <0.001
PairP2 3.44 -2.46 – 9.33 0.254
PairP3 -1.38 -6.51 – 3.74 0.596
PairP4 -3.15 -8.16 – 1.85 0.217
Observations 873
R2 / adjusted R2 0.112 / 0.106
Takeaways:
  • In the no-information condition, the designated amount to be donated to the charities is split between the two organizations in nearly equal proportions (out of $100, $45 goes to HR&HO & $55 to LR&LO agencies)
  • Learnign about the overhead cost leads to a statistically significant redistribution of donations towards the more efficient charity by $6.25, so that the High-Overhead gets $40 and the Low-Overhead agnecy gets $60.
  • Presenting Ratings only makes a statistically significant effect of stronger magnitude with a redistribution of funds towards the more higly-rated nonprofit. Here, the organization with a high rating gets $65 and the low-rating agency gets only $35.
  • When both performance measures are presented, we observe an effect of similar direction but lower magnitude to that in the Rating condition only. In comaprison to the No-information condition, the budget is again redistributed in favor of the more Highly Rated agency althoug the amaount of the redistributiin is smaller ($55 to HR&HO, $45 to LR&LO agency). Here, we can observe that the Overhead information moderates the the relarionship between the rating and the willingness to donate.



MISSION ATTACHMENT

H4: Higher mission commitment (valence) will moderate the relationship between charity ratings and giving behavior

Create the variable Dissimilar Cause (1=Dissimilar Cause 0=Similar Cause):

expData <- expData %>% mutate(Dissimilar_Cause = if_else(Pair == "P1" | Pair == "P2", 0, 1))
expData <- expData %>% mutate(Dissimilar_Cause_factor = if_else(Pair == "P1" | Pair == "P2", 0, 1))
expData$Dissimilar_Cause_factor <- factor(expData$Dissimilar_Cause_factor, labels = c("Similar Cause", "Dissimilar Cause")) 
expData <- expData %>% mutate(Similar_Cause = if_else(Pair == "P1" | Pair == "P2", 1, 0))
expData <- expData %>% mutate(Similar_Cause_factor = if_else(Pair == "P1" | Pair == "P2", 1, 0))
expData$Similar_Cause_factor <- factor(expData$Similar_Cause_factor, labels = c("Dissimilar Cause", "Similar Cause")) 

Sample differences:

#Print sample differences for Similar Cause
#results_SimMi <-
expData %>% group_by(Dissimilar_Cause_factor, Treatment) %>% 
    summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp), 
              High.Rating_High.Overhead = mean(HR_LPrExp)) %>% filter(Dissimilar_Cause_factor == "Similar Cause") # %>% gather(Charity_Type, Dollars, -Treatment, -Dissimilar_Mission)
## # A tibble: 4 x 4
## # Groups:   Dissimilar_Cause_factor [1]
##   Dissimilar_Cause_fa~ Treatment Low.Rating_Low.Over~ High.Rating_High.Ove~
##   <fct>                <fct>                    <dbl>                 <dbl>
## 1 Similar Cause        T1_NR.NO                  52.5                  47.5
## 2 Similar Cause        T2_O                      64.7                  35.3
## 3 Similar Cause        T3_R                      39.4                  60.6
## 4 Similar Cause        T4_R.O                    49.3                  50.7
#Print sample differences for DisSimilar Cause
#results_DissimMi <- 
expData %>% group_by(Dissimilar_Cause_factor, Treatment) %>% 
    summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp), 
              High.Rating_High.Overhead = mean(HR_LPrExp)) %>% filter(Dissimilar_Cause_factor == "Dissimilar Cause") # %>% gather(Charity_Type, Dollars, -Treatment, -Dissimilar_Mission)
## # A tibble: 4 x 4
## # Groups:   Dissimilar_Cause_factor [1]
##   Dissimilar_Cause_fa~ Treatment Low.Rating_Low.Over~ High.Rating_High.Ove~
##   <fct>                <fct>                    <dbl>                 <dbl>
## 1 Dissimilar Cause     T1_NR.NO                  56.5                  43.5
## 2 Dissimilar Cause     T2_O                      58.5                  41.5
## 3 Dissimilar Cause     T3_R                      32.6                  67.4
## 4 Dissimilar Cause     T4_R.O                    43.2                  56.8

Barplot of sample differences:

results_Mi <- expData %>% group_by(Dissimilar_Cause_factor, Treatment) %>% 
    summarize(Low.Rating_Low.Overhead = mean(LR_HPrExp), 
              High.Rating_High.Overhead = mean(HR_LPrExp))


gather(results_Mi, Charity_Type, Dollars, -Treatment, -Dissimilar_Cause_factor) %>%
    ggplot(aes(x = Treatment, y=Dollars, fill = Charity_Type)) + geom_col() +
    facet_grid(. ~ Dissimilar_Cause_factor) +
    geom_text(aes(label=round(Dollars, digits = 2)), size=3.5, show.legend = F, position = position_stack(vjust = 0.5)) +
    theme(legend.position = "top")

Does mission valence make a statistically significant difference?

Regression analysis

fit_Mission <- lm(LR_HPrExp~Treatment + Dissimilar_Cause*Treatment, data = expData)
summary(fit_Mission)
## 
## Call:
## lm(formula = LR_HPrExp ~ Treatment + Dissimilar_Cause * Treatment, 
##     data = expData)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -64.739 -19.253  -2.543  17.421  67.421 
## 
## Coefficients:
##                                  Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                        52.543      2.851  18.430  < 2e-16 ***
## TreatmentT2_O                      12.196      4.355   2.800  0.00522 ** 
## TreatmentT3_R                     -13.189      4.021  -3.280  0.00108 ** 
## TreatmentT4_R.O                    -3.291      4.089  -0.805  0.42124    
## Dissimilar_Cause                    3.965      3.818   1.039  0.29927    
## TreatmentT2_O:Dissimilar_Cause    -10.185      5.496  -1.853  0.06420 .  
## TreatmentT3_R:Dissimilar_Cause    -10.741      5.366  -2.002  0.04563 *  
## TreatmentT4_R.O:Dissimilar_Cause   -9.981      5.343  -1.868  0.06211 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 27.35 on 865 degrees of freedom
## Multiple R-squared:  0.1159, Adjusted R-squared:  0.1088 
## F-statistic:  16.2 on 7 and 865 DF,  p-value: < 2.2e-16

Multiple comparisons for each value of Dissimilar_Cause:

expData_DCause0 <- expData %>% filter(Dissimilar_Cause == 0) #Similar Cause
expData_DCause1 <- expData %>% filter(Dissimilar_Cause == 1) #Dissimilar Cause

#Similar Cause:
pairwise.t.test(expData_DCause0$LR_HPrExp, expData_DCause0$Treatment, p.adj="bonf")
## 
##  Pairwise comparisons using t tests with pooled SD 
## 
## data:  expData_DCause0$LR_HPrExp and expData_DCause0$Treatment 
## 
##        T1_NR.NO T2_O   T3_R  
## T2_O   0.0332   -      -     
## T3_R   0.0071   8e-08  -     
## T4_R.O 1.0000   0.0031 0.0964
## 
## P value adjustment method: bonferroni
#Dissimilar Cause:
pairwise.t.test(expData_DCause1$LR_HPrExp, expData_DCause1$Treatment, p.adj="bonf")
## 
##  Pairwise comparisons using t tests with pooled SD 
## 
## data:  expData_DCause1$LR_HPrExp and expData_DCause1$Treatment 
## 
##        T1_NR.NO T2_O    T3_R   
## T2_O   1.00000  -       -      
## T3_R   2.4e-10  1.4e-13 -      
## T4_R.O 0.00075  1.2e-05 0.01067
## 
## P value adjustment method: bonferroni
Takeaways:

There are four pairs ( LRLO & HRHO) of organizations randomly assigned to experimental conditions. Two of those pairs represent nonprofits with similar missions (helping children or fighting disease in each pair) and the other two pairs are represented by nonprofits with similar missions (helping children and fighting disease in one pair). If there is astronger preference for a particular cause in the population of donors, we would expect presentation of performance information to make greater differences across treeatments in those who were considering nonprofits addressing similar causes. The sample differences show that:

  • There is a four-dollar difference in the allocations among two organizations depending on the similarity of the cause, although the difference is not statitstically significant
  • An interaction of performance infomration and mission valence seems to kick in the overhead condition: for two nonprofits addressing a similar cause, the high-overhead charity gets significantly less money than the low-overhead entity, while among two dissimilar nonprofits didn’t the low-performer didn’t get punished. It looks like learning about the overhead cost makes a statistically significant difference (at 10 percent level), whereas it does not in the case where two nonprofits address different sorts of issues
  • An interaction of performance infomration and mission valence appears to be even more distinct in the rating condition, but unlike in the previous, more punishment for the low rating gets the LR entity when when the allocation is in the pair that addresses dissimilar issues.
  • In the rating and overhead treatment, the effect of the mission attachment is similar to that in the previous case.



ALTRUISM

H5: The effect of performance rating information will be stronger for more altruistic individuals

Measure of internal consistency for Altruism:

psych::alpha(expData[, c("q7_anticipateneeds", "q7_lovehelp", "q7_concerned", "q7_goodword", "q7_indifferent_Rev", "q7_uncomfortable_Rev", "q7_turnmyback_Rev", "q7_takenotime_Rev")])$total$raw_alpha %>% round(digits = 2)
## [1] 0.77

Interaction of Treatment and Altruism (binary measure):

fit_Altruism_Bi <- lm(HR_LPrExp~Treatment + Altruism_Bi*Treatment, data = expData)
summary(fit_Altruism_Bi)
## 
## Call:
## lm(formula = HR_LPrExp ~ Treatment + Altruism_Bi * Treatment, 
##     data = expData)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -68.579 -18.579   1.943  20.358  63.301 
## 
## Coefficients:
##                             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                   43.057      2.669  16.134  < 2e-16 ***
## TreatmentT2_O                 -6.358      3.894  -1.633  0.10286    
## TreatmentT3_R                 25.521      3.647   6.998 5.22e-12 ***
## TreatmentT4_R.O               11.585      3.739   3.098  0.00201 ** 
## Altruism_Bi                    4.419      3.792   1.165  0.24429    
## TreatmentT2_O:Altruism_Bi      0.481      5.300   0.091  0.92771    
## TreatmentT3_R:Altruism_Bi    -13.857      5.348  -2.591  0.00973 ** 
## TreatmentT4_R.O:Altruism_Bi   -4.796      5.257  -0.912  0.36186    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 27.35 on 865 degrees of freedom
## Multiple R-squared:  0.1159, Adjusted R-squared:  0.1088 
## F-statistic:  16.2 on 7 and 865 DF,  p-value: < 2.2e-16

Predicted donation intention across treatments interacted with Altrruism (binary) for HR&HO agency:

expData$Altruism_Bi <- factor(expData$Altruism_Bi, labels = c("Below_median", "At_or_above_median"))
fit_Altruism_Bi <- lm(HR_LPrExp~Treatment + Altruism_Bi*Treatment, data = expData)

predictDataT1 <- data.frame(Altruism_Bi = c("Below_median", "At_or_above_median"), Treatment = "T1_NR.NO")
predictDataT3 <- data.frame(Altruism_Bi = c("Below_median", "At_or_above_median"), Treatment = "T3_R")
predictDataT4 <- data.frame(Altruism_Bi = c("Below_median", "At_or_above_median"), Treatment = "T4_R.O")

pred_T1 <- predict.lm(fit_Altruism_Bi, predictDataT1)  # add.opts: interval = "confidence", level = 0.95
pred_T3 <- predict.lm(fit_Altruism_Bi, predictDataT3)
pred_T4 <- predict.lm(fit_Altruism_Bi, predictDataT4)
predict_Altruism <- as.data.frame(cbind(pred_T1, pred_T3, pred_T4))
colnames(predict_Altruism) <- c("T1_NR.NO", "T3_R", "T4_R.O" )
predict_Altruism <- predict_Altruism %>% mutate(Altruism = c("Below_median", "At_or_above_median"))

predict_Altruism %>% gather(Treatment, Dollars, -Altruism) %>% 
    ggplot(aes(x=Treatment, y=Dollars, group = Altruism, col = Altruism)) + geom_line() + geom_point() + ylim(0,100) +
    geom_text(aes(label=round(Dollars,digits = 2)), size = 3, vjust = -0.75,  show.legend = F)

Takeaways:

The result of the analysis show an outcome that is opposite to the expectation formulated in the hypothesis:

  • Rating information interacts with Altruism
  • The moderation effect of Altruism is such that more altruistic individuals appear to be less responsive to rating information than those who are below the average on the measure of altruism