Setup
library(tidyr)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(readr)
library(stringr)
library(ggplot2)
library(lm.beta)
library(lme4)
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
library(lmerTest)
##
## Attaching package: 'lmerTest'
## The following object is masked from 'package:lme4':
##
## lmer
## The following object is masked from 'package:stats':
##
## step
library(stats)
setwd("~/Box/LAB/Research/Active/REVIEWS/Exp 2/Data/5.5.20")
load data
data <- (readr::read_csv("widedata.csv", col_names = TRUE))
## Parsed with column specification:
## cols(
## .default = col_double(),
## Review = col_character(),
## Warning = col_character()
## )
## See spec(...) for full column specifications.
datachoice <- data %>%
gather(ChoiceTime,Choice,Choice1,Choice2)
datainterest1 <- data %>%
gather(Interest1Time, Interest1,Option1Interest1,Option1Interest2)
datainterest2 <- data %>%
gather(Interest2Time,Interest2,Option2Interest1,Option2Interest2)
Are people’s product choices affected by a single positive or negative review?
data %>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
glm(Choice ~ Review, data = .,) %>%
summary()
##
## Call:
## glm(formula = Choice ~ Review, data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.7143 -0.4868 -0.1726 0.2857 0.8275
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.17255 0.02785 6.196 9.70e-10 ***
## ReviewNone 0.31429 0.04053 7.754 3.01e-14 ***
## ReviewPositive 0.54174 0.03978 13.618 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.1977495)
##
## Null deviance: 180.41 on 727 degrees of freedom
## Residual deviance: 143.37 on 725 degrees of freedom
## AIC: 891.06
##
## Number of Fisher Scoring iterations: 2
data %>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
mutate(Choice = as.numeric(Choice)) %>%
group_by(Review) %>%
summarise(Prop = sum(Choice==1)/n(),
sd = sqrt((sum(Choice==1)/n())*(1-(sum(Choice==1)/n()))),
n = n(), # calculates the sample size per group
SE = ((sum(Choice==1)/n())*(1-(sum(Choice==1)/n())))/sqrt(n())) %>%
ggplot() + aes(x = Review, y = Prop, fill = Review) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Message Condition") + ylab("Proportion select option with review") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
geom_errorbar(aes(ymin=Prop-SE, ymax=Prop+SE), position=position_dodge(width=0.9), width=.1) +
geom_text(aes(label=round(Prop,digits=2)), position=position_dodge(width=.9), vjust=5) +
scale_fill_brewer(palette="Dark2")
#yes, very much so
Is influence of reviews on product choice affected by individual differences?
Gender = no difference
# gender = no difference
data %>%
filter(Gender == 0 | Gender == 1) %>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
glm(Choice ~ Pos*Gender + Neg*Gender, data = .,) %>%
summary()
##
## Call:
## glm(formula = Choice ~ Pos * Gender + Neg * Gender, data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.7339 -0.4528 -0.1339 0.3025 0.8661
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.525424 0.040893 12.849 < 2e-16 ***
## Pos 0.172055 0.057709 2.981 0.00297 **
## Gender -0.072594 0.059445 -1.221 0.22242
## Neg -0.321198 0.055333 -5.805 9.69e-09 ***
## Pos:Gender 0.108986 0.082360 1.323 0.18616
## Gender:Neg 0.002297 0.081762 0.028 0.97760
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.1973194)
##
## Null deviance: 178.79 on 720 degrees of freedom
## Residual deviance: 141.08 on 715 degrees of freedom
## AIC: 883.95
##
## Number of Fisher Scoring iterations: 2
Age = No difference
# age = no difference
data %>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
glm(Choice ~ Pos*Age + Neg*Age, data = .,) %>%
summary()
##
## Call:
## glm(formula = Choice ~ Pos * Age + Neg * Age, data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.8173 -0.4648 -0.1700 0.3180 0.8358
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.45231 0.07660 5.905 5.44e-09 ***
## Pos 0.16200 0.10777 1.503 0.13322
## Age 0.01252 0.02563 0.488 0.62545
## Neg -0.27067 0.10411 -2.600 0.00952 **
## Pos:Age 0.02131 0.03496 0.610 0.54229
## Age:Neg -0.01543 0.03404 -0.453 0.65054
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.1981786)
##
## Null deviance: 180.21 on 726 degrees of freedom
## Residual deviance: 142.89 on 721 degrees of freedom
## (1 observation deleted due to missingness)
## AIC: 894.4
##
## Number of Fisher Scoring iterations: 2
Trust in reviews = yes! More trust, greater effect of reviews on choice
# trust in reviews = yes!
data %>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
glm(Choice ~ Pos*Trust_Reviews + Neg*Trust_Reviews, data = .,) %>%
summary()
##
## Call:
## glm(formula = Choice ~ Pos * Trust_Reviews + Neg * Trust_Reviews,
## data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.8427 -0.4442 -0.1714 0.3220 0.8313
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.38791 0.12011 3.230 0.0013 **
## Pos 0.04299 0.16774 0.256 0.7978
## Trust_Reviews 0.02816 0.03315 0.849 0.3959
## Neg -0.20545 0.18098 -1.135 0.2567
## Pos:Trust_Reviews 0.05420 0.04679 1.158 0.2471
## Trust_Reviews:Neg -0.03091 0.04953 -0.624 0.5327
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.1966783)
##
## Null deviance: 180.41 on 727 degrees of freedom
## Residual deviance: 142.00 on 722 degrees of freedom
## AIC: 890.09
##
## Number of Fisher Scoring iterations: 2
# effect of reviews for those with high versus low trust in reviews
data %>%
mutate(Choice = ifelse(Choice1 == 1,"Product 1 - with review", "Product 2 - control")) %>%
ggplot() +
aes(x = Trust_Reviews, color = factor(Review), group = Review, y = Choice) +
labs(x = "How much do you trust product reviews?", y = "Product Choice") + geom_smooth(method = "lm") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) %>%
print(.)
## List of 4
## $ axis.line :List of 6
## ..$ colour : chr "black"
## ..$ size : NULL
## ..$ linetype : NULL
## ..$ lineend : NULL
## ..$ arrow : logi FALSE
## ..$ inherit.blank: logi FALSE
## ..- attr(*, "class")= chr [1:2] "element_line" "element"
## $ panel.background: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.major: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.minor: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## - attr(*, "class")= chr [1:2] "theme" "gg"
## - attr(*, "complete")= logi FALSE
## - attr(*, "validate")= logi TRUE
Use of reviews = yes! More use, greater effect of reviews on choice
# use of reviews = yes!
data %>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
glm(Choice ~ Pos*Use_Product_Reviews + Neg*Use_Product_Reviews, data = .,) %>%
summary()
##
## Call:
## glm(formula = Choice ~ Pos * Use_Product_Reviews + Neg * Use_Product_Reviews,
## data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.7682 -0.3679 -0.1330 0.3271 0.8670
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.24470 0.14125 1.732 0.0836 .
## Pos 0.28510 0.19600 1.455 0.1462
## Use_Product_Reviews 0.06162 0.03516 1.753 0.0801 .
## Neg 0.08830 0.20029 0.441 0.6595
## Pos:Use_Product_Reviews -0.01393 0.04878 -0.286 0.7752
## Use_Product_Reviews:Neg -0.10161 0.04941 -2.057 0.0401 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for gaussian family taken to be 0.1963961)
##
## Null deviance: 180.21 on 726 degrees of freedom
## Residual deviance: 141.60 on 721 degrees of freedom
## (1 observation deleted due to missingness)
## AIC: 887.83
##
## Number of Fisher Scoring iterations: 2
# effect of reviews for those with high versus low use of reviews
data %>%
mutate(Choice = ifelse(Choice1 == 1,"Product 1 - with review", "Product 2 - control")) %>%
ggplot() +
aes(x = Use_Product_Reviews, color = factor(Review), group = Review, y = Choice) +
labs(x = "How much do you use product reviews?", y = "Product Choice") + geom_smooth(method = "lm") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) %>%
print(.)
## List of 4
## $ axis.line :List of 6
## ..$ colour : chr "black"
## ..$ size : NULL
## ..$ linetype : NULL
## ..$ lineend : NULL
## ..$ arrow : logi FALSE
## ..$ inherit.blank: logi FALSE
## ..- attr(*, "class")= chr [1:2] "element_line" "element"
## $ panel.background: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.major: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.minor: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## - attr(*, "class")= chr [1:2] "theme" "gg"
## - attr(*, "complete")= logi FALSE
## - attr(*, "validate")= logi TRUE
## Warning: Removed 1 rows containing non-finite values (stat_smooth).
Are people’s product interests affected by a single positive or negative review?
data %>%
lm(Option1Interest1 ~ Review, data = .,) %>%
summary()
##
## Call:
## lm(formula = Option1Interest1 ~ Review, data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.49561 -0.49561 -0.01176 0.92653 2.98824
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2.01176 0.05728 35.12 <2e-16 ***
## ReviewNone 1.48385 0.08337 17.80 <2e-16 ***
## ReviewPositive 2.06170 0.08183 25.19 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9147 on 725 degrees of freedom
## Multiple R-squared: 0.4828, Adjusted R-squared: 0.4814
## F-statistic: 338.4 on 2 and 725 DF, p-value: < 2.2e-16
data %>%
group_by(Review) %>%
summarise(mean = mean(Option1Interest1),
sd = sd(Option1Interest1),
n = n(),
SE = sd(Option1Interest1)/sqrt(n())) %>%
ggplot() + aes(x = Review, y = mean, fill = Review) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Review Type") + ylab("Interest in reviewed product") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) + geom_text(aes(label=round(mean,digits=2)), position=position_dodge(width=.9), vjust=-.5) +scale_fill_brewer(palette="Dark2") + geom_errorbar(aes(ymin=mean-SE, ymax=mean+SE), position=position_dodge(width=0.9), width=.1) + geom_text(aes(label=round(mean,digits=2)), position=position_dodge(width=.9), vjust=5) +
scale_fill_brewer(palette="Dark2") %>%
print()
## <ggproto object: Class ScaleDiscrete, Scale, gg>
## aesthetics: fill
## axis_order: function
## break_info: function
## break_positions: function
## breaks: waiver
## call: call
## clone: function
## dimension: function
## drop: TRUE
## expand: waiver
## get_breaks: function
## get_breaks_minor: function
## get_labels: function
## get_limits: function
## guide: legend
## is_discrete: function
## is_empty: function
## labels: waiver
## limits: NULL
## make_sec_title: function
## make_title: function
## map: function
## map_df: function
## n.breaks.cache: NULL
## na.translate: TRUE
## na.value: NA
## name: waiver
## palette: function
## palette.cache: NULL
## position: left
## range: <ggproto object: Class RangeDiscrete, Range, gg>
## range: NULL
## reset: function
## train: function
## super: <ggproto object: Class RangeDiscrete, Range, gg>
## reset: function
## scale_name: brewer
## train: function
## train_df: function
## transform: function
## transform_df: function
## super: <ggproto object: Class ScaleDiscrete, Scale, gg>
## Scale for 'fill' is already present. Adding another scale for 'fill', which
## will replace the existing scale.
#yes, very much so
Is influence of reviews on product choice affected by individual differences?
Gender = yes for positive reviews
data %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
lm(Option1Interest1 ~ Pos*Gender + Neg*Gender, data = .,) %>%
summary()
##
## Call:
## lm(formula = Option1Interest1 ~ Pos * Gender + Neg * Gender,
## data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.5745 -0.5745 0.0356 0.8268 2.8965
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.57450 0.08299 43.073 < 2e-16 ***
## Pos 0.38990 0.11732 3.324 0.000934 ***
## Gender -0.15546 0.11510 -1.351 0.177249
## Neg -1.47102 0.11270 -13.053 < 2e-16 ***
## Pos:Gender 0.36422 0.16128 2.258 0.024226 *
## Gender:Neg -0.04968 0.16134 -0.308 0.758217
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9117 on 721 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.489, Adjusted R-squared: 0.4855
## F-statistic: 138 on 5 and 721 DF, p-value: < 2.2e-16
data %>%
filter(Gender == 0 | Gender == 1) %>%
mutate(Gender = ifelse(Gender == 0,"Male","Female")) %>%
mutate(Gender = as.character(Gender)) %>%
group_by(Review,Gender) %>%
summarise(mean = mean(Option1Interest1),
sd = sd(Option1Interest1),
n = n(),
SE = sd(Option1Interest1)/sqrt(n())) %>%
ggplot() + aes(x = Gender, y = mean, fill = Review) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Review Type") + ylab("Interest in reviewed product") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) + geom_text(aes(label=round(mean,digits=2)), position=position_dodge(width=.9), vjust=-.5) +scale_fill_brewer(palette="Dark2") + geom_errorbar(aes(ymin=mean-SE, ymax=mean+SE), position=position_dodge(width=0.9), width=.1) +
geom_text(aes(label=round(mean,digits=2)), position=position_dodge(width=.9), vjust=5) +
scale_fill_brewer(palette="Dark2") %>%
print()
## <ggproto object: Class ScaleDiscrete, Scale, gg>
## aesthetics: fill
## axis_order: function
## break_info: function
## break_positions: function
## breaks: waiver
## call: call
## clone: function
## dimension: function
## drop: TRUE
## expand: waiver
## get_breaks: function
## get_breaks_minor: function
## get_labels: function
## get_limits: function
## guide: legend
## is_discrete: function
## is_empty: function
## labels: waiver
## limits: NULL
## make_sec_title: function
## make_title: function
## map: function
## map_df: function
## n.breaks.cache: NULL
## na.translate: TRUE
## na.value: NA
## name: waiver
## palette: function
## palette.cache: NULL
## position: left
## range: <ggproto object: Class RangeDiscrete, Range, gg>
## range: NULL
## reset: function
## train: function
## super: <ggproto object: Class RangeDiscrete, Range, gg>
## reset: function
## scale_name: brewer
## train: function
## train_df: function
## transform: function
## transform_df: function
## super: <ggproto object: Class ScaleDiscrete, Scale, gg>
## Scale for 'fill' is already present. Adding another scale for 'fill', which
## will replace the existing scale.
# Females more influenced by positive review than males (same pattern for negative but not sig)
Age = nothing
data %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
lm(Option1Interest1 ~ Pos*Age + Neg*Age, data = .,) %>%
summary()
##
## Call:
## lm(formula = Option1Interest1 ~ Pos * Age + Neg * Age, data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.51421 -0.59129 -0.00287 0.63996 2.99713
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.28296 0.15702 20.907 < 2e-16 ***
## Pos 0.49705 0.22091 2.250 0.0248 *
## Age 0.07708 0.05253 1.467 0.1427
## Neg -1.29133 0.21341 -6.051 2.31e-09 ***
## Pos:Age 0.02222 0.07166 0.310 0.7566
## Age:Neg -0.07146 0.06977 -1.024 0.3061
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9125 on 721 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.4881, Adjusted R-squared: 0.4845
## F-statistic: 137.5 on 5 and 721 DF, p-value: < 2.2e-16
Trust reviews = somewhat for positive, no difference for neg
data %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
lm(Option1Interest1 ~ Pos*Trust_Reviews + Neg*Trust_Reviews, data = .,) %>%
summary()
##
## Call:
## lm(formula = Option1Interest1 ~ Pos * Trust_Reviews + Neg * Trust_Reviews,
## data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.50381 -0.50381 -0.00733 0.69260 3.05380
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.43646 0.24735 13.893 < 2e-16 ***
## Pos 0.12076 0.34544 0.350 0.726754
## Trust_Reviews 0.01684 0.06827 0.247 0.805268
## Neg -1.25610 0.37269 -3.370 0.000791 ***
## Pos:Trust_Reviews 0.13320 0.09636 1.382 0.167286
## Trust_Reviews:Neg -0.06367 0.10201 -0.624 0.532711
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9133 on 722 degrees of freedom
## Multiple R-squared: 0.4866, Adjusted R-squared: 0.483
## F-statistic: 136.8 on 5 and 722 DF, p-value: < 2.2e-16
data %>%
ggplot() +
aes(x = Trust_Reviews, color = factor(Review), group = Review, y = Option1Interest1) +
labs(x = "How much do you trust product reviews?", y = "Product Choice") + geom_smooth(method = "lm") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) %>%
print(.)
## List of 4
## $ axis.line :List of 6
## ..$ colour : chr "black"
## ..$ size : NULL
## ..$ linetype : NULL
## ..$ lineend : NULL
## ..$ arrow : logi FALSE
## ..$ inherit.blank: logi FALSE
## ..- attr(*, "class")= chr [1:2] "element_line" "element"
## $ panel.background: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.major: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.minor: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## - attr(*, "class")= chr [1:2] "theme" "gg"
## - attr(*, "complete")= logi FALSE
## - attr(*, "validate")= logi TRUE
# for pos reviews, people with low trust in reviews' interests are less influenced by reviews
Use of reviews = yes for positive, no difference for neg
data %>%
mutate(Pos = ifelse(Review == "Positive",1,0)) %>%
mutate(Neg = ifelse(Review == "Negative",1,0)) %>%
lm(Option1Interest1 ~ Pos*Use_Product_Reviews + Neg*Use_Product_Reviews, data = .,) %>%
summary()
##
## Call:
## lm(formula = Option1Interest1 ~ Pos * Use_Product_Reviews + Neg *
## Use_Product_Reviews, data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.66652 -0.57796 -0.01357 0.69599 3.14005
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.84364 0.28899 13.300 < 2e-16 ***
## Pos -0.59669 0.40099 -1.488 0.13718
## Use_Product_Reviews -0.08856 0.07193 -1.231 0.21866
## Neg -1.21557 0.40979 -2.966 0.00311 **
## Pos:Use_Product_Reviews 0.29997 0.09979 3.006 0.00274 **
## Use_Product_Reviews:Neg -0.06506 0.10109 -0.644 0.52001
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9067 on 721 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.4946, Adjusted R-squared: 0.4911
## F-statistic: 141.1 on 5 and 721 DF, p-value: < 2.2e-16
data %>%
ggplot() +
aes(x = Use_Product_Reviews, color = factor(Review), group = Review, y = Option1Interest1) +
labs(x = "How often do you use product reviews?", y = "Product Choice") + geom_smooth(method = "lm") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) %>%
print(.)
## List of 4
## $ axis.line :List of 6
## ..$ colour : chr "black"
## ..$ size : NULL
## ..$ linetype : NULL
## ..$ lineend : NULL
## ..$ arrow : logi FALSE
## ..$ inherit.blank: logi FALSE
## ..- attr(*, "class")= chr [1:2] "element_line" "element"
## $ panel.background: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.major: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.minor: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## - attr(*, "class")= chr [1:2] "theme" "gg"
## - attr(*, "complete")= logi FALSE
## - attr(*, "validate")= logi TRUE
## Warning: Removed 1 rows containing non-finite values (stat_smooth).
# for pos reviews, people with high trust in reviews' interests are more influenced by reviews
Do reviews affect people’s strength of preferences?
# is there a difference in preference depending on whether there's a review vs no review?
data %>%
dplyr::mutate(ReviewPresent = ifelse(Review == "None",0,1)) %>%
lm(PrefStrength1 ~ ReviewPresent, data = .,) %>%
summary()
##
## Call:
## lm(formula = PrefStrength1 ~ ReviewPresent, data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.6020 -0.6020 0.3980 0.5395 1.5395
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.46053 0.06606 52.381 <2e-16 ***
## ReviewPresent 0.14147 0.07972 1.775 0.0764 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9975 on 726 degrees of freedom
## Multiple R-squared: 0.00432, Adjusted R-squared: 0.002948
## F-statistic: 3.15 on 1 and 726 DF, p-value: 0.07636
data %>%
dplyr::mutate(Review = ifelse(Review == "None","No","Yes")) %>%
group_by(Review) %>%
summarise(mean = mean(PrefStrength1),
sd = sd(PrefStrength1),
n = n(),
SE = sd(PrefStrength1)/sqrt(n())) %>%
ggplot() + aes(x = Review, y = mean, fill = Review) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Review Type") + ylab("Preference Strength") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) + geom_text(aes(label=round(mean,digits=2)), position=position_dodge(width=.9), vjust=-.5) +scale_fill_brewer(palette="Dark2") + geom_errorbar(aes(ymin=mean-SE, ymax=mean+SE), position=position_dodge(width=0.9), width=.1) %>%
print(.)
## mapping: ymin = ~mean - SE, ymax = ~mean + SE
## geom_errorbar: na.rm = FALSE, width = 0.1
## stat_identity: na.rm = FALSE
## position_dodge
# marginal result review versus no review (stronger preference when there is a review as compared to no review)
# is there a difference in strength of preference between pos, neg, versus no review?
data %>%
lm(PrefStrength1 ~ Review, data = .,) %>%
summary()
##
## Call:
## lm(formula = PrefStrength1 ~ Review, data = .)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.6980 -0.5098 0.3020 0.5395 1.5395
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3.50980 0.06232 56.319 <2e-16 ***
## ReviewNone -0.04928 0.09071 -0.543 0.5871
## ReviewPositive 0.18816 0.08903 2.113 0.0349 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.9952 on 725 degrees of freedom
## Multiple R-squared: 0.01042, Adjusted R-squared: 0.007686
## F-statistic: 3.816 on 2 and 725 DF, p-value: 0.02247
data %>%
group_by(Review) %>%
summarise(mean = mean(PrefStrength1),
sd = sd(PrefStrength1),
n = n(),
SE = sd(PrefStrength1)/sqrt(n())) %>%
ggplot() + aes(x = Review, y = mean, fill = Review) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Review Type") + ylab("Preference Strength") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) + geom_text(aes(label=round(mean,digits=2)), position=position_dodge(width=.9), vjust=-.5) +scale_fill_brewer(palette="Dark2") + geom_errorbar(aes(ymin=mean-SE, ymax=mean+SE), position=position_dodge(width=0.9), width=.1)
# yes, people have a stronger preference for products when there is a positive as compared a negative or no review
Do people with stronger preferences make review-consistent product choices?
data%>%
mutate(Choice = ifelse(Choice1 == 1,1,0)) %>%
mutate(Choice = as.numeric(Choice)) %>%
glm(Choice ~ Review*PrefStrength1, ., family = binomial) %>%
summary()
##
## Call:
## glm(formula = Choice ~ Review * PrefStrength1, family = binomial,
## data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -1.8226 -1.1156 -0.5956 0.9060 1.9658
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.0957 0.6212 -1.764 0.0777 .
## ReviewNone 0.8193 0.7768 1.055 0.2915
## ReviewPositive 0.6165 0.8102 0.761 0.4467
## PrefStrength1 -0.1360 0.1745 -0.780 0.4357
## ReviewNone:PrefStrength1 0.2006 0.2171 0.924 0.3554
## ReviewPositive:PrefStrength1 0.5219 0.2245 2.325 0.0201 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 1002.86 on 727 degrees of freedom
## Residual deviance: 835.13 on 722 degrees of freedom
## AIC: 847.13
##
## Number of Fisher Scoring iterations: 4
data %>%
mutate(Choice = ifelse(Choice1 == 1,"Product 1 - with review", "Product 2 - control")) %>%
ggplot() +
aes(x = PrefStrength1, color = factor(Review), group = Review, y = Choice) +
labs(x = "Strength of preference", y = "Product Choice") + geom_smooth(method = "lm") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) %>%
print(.)
## List of 4
## $ axis.line :List of 6
## ..$ colour : chr "black"
## ..$ size : NULL
## ..$ linetype : NULL
## ..$ lineend : NULL
## ..$ arrow : logi FALSE
## ..$ inherit.blank: logi FALSE
## ..- attr(*, "class")= chr [1:2] "element_line" "element"
## $ panel.background: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.major: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## $ panel.grid.minor: list()
## ..- attr(*, "class")= chr [1:2] "element_blank" "element"
## - attr(*, "class")= chr [1:2] "theme" "gg"
## - attr(*, "complete")= logi FALSE
## - attr(*, "validate")= logi TRUE
# yes significant strength x review interaction! While there's no difference for no reviews, when people have stronger preferences, they tend to choose product 1 when it has a postiive review and product 2 (the other product) when product 1 has a negative review
Are people more likly to switch their choice if they saw a warning versus control message?
data %>%
mutate(Switch = ifelse(Choice1 == Choice2,0,1)) %>%
glm(Switch ~ Warning, ., family = binomial) %>%
summary()
##
## Call:
## glm(formula = Switch ~ Warning, family = binomial, data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.5533 -0.5533 -0.3211 -0.3211 2.4458
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -2.9394 0.2239 -13.128 < 2e-16 ***
## WarningWarning 1.1401 0.2768 4.119 3.81e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 438.08 on 727 degrees of freedom
## Residual deviance: 419.79 on 726 degrees of freedom
## AIC: 423.79
##
## Number of Fisher Scoring iterations: 5
data %>%
mutate(Switch = ifelse(Choice1 == Choice2,0,1)) %>%
group_by(Warning) %>%
summarise(Prop = sum(Switch==1)/n(),
sd = sqrt((sum(Switch==1)/n())*(1-(sum(Switch==1)/n()))),
n = n(), # calculates the sample size per group
SE = ((sum(Switch==1)/n())*(1-(sum(Switch==1)/n())))/sqrt(n())) %>%
ggplot() + aes(x = Warning, y = Prop, fill = Warning) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Message condition") + ylab("Proportion switched choice from time 1 to time 2") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
geom_errorbar(aes(ymin=Prop-SE, ymax=Prop+SE), position=position_dodge(width=0.9), width=.1) +
geom_text(aes(label=round(Prop,digits=2)), position=position_dodge(width=.9), vjust=5) +
scale_fill_brewer(palette="Dark2") %>%
print()
## <ggproto object: Class ScaleDiscrete, Scale, gg>
## aesthetics: fill
## axis_order: function
## break_info: function
## break_positions: function
## breaks: waiver
## call: call
## clone: function
## dimension: function
## drop: TRUE
## expand: waiver
## get_breaks: function
## get_breaks_minor: function
## get_labels: function
## get_limits: function
## guide: legend
## is_discrete: function
## is_empty: function
## labels: waiver
## limits: NULL
## make_sec_title: function
## make_title: function
## map: function
## map_df: function
## n.breaks.cache: NULL
## na.translate: TRUE
## na.value: NA
## name: waiver
## palette: function
## palette.cache: NULL
## position: left
## range: <ggproto object: Class RangeDiscrete, Range, gg>
## range: NULL
## reset: function
## train: function
## super: <ggproto object: Class RangeDiscrete, Range, gg>
## reset: function
## scale_name: brewer
## train: function
## train_df: function
## transform: function
## transform_df: function
## super: <ggproto object: Class ScaleDiscrete, Scale, gg>
# yes, massively. 3 x more people switched when they saw a fake review warning than if they didn't
Are people more likly to switch their choice if their initial preference was weak?
data %>%
mutate(Switch = ifelse(Choice1 == Choice2,0,1)) %>%
glm(Switch ~ PrefStrength1, ., family = binomial) %>%
summary()
##
## Call:
## glm(formula = Switch ~ PrefStrength1, family = binomial, data = .)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.5689 -0.4555 -0.4068 -0.3629 2.3466
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.5023 0.4506 -3.334 0.000856 ***
## PrefStrength1 -0.2370 0.1283 -1.847 0.064723 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 438.08 on 727 degrees of freedom
## Residual deviance: 434.69 on 726 degrees of freedom
## AIC: 438.69
##
## Number of Fisher Scoring iterations: 5
data %>%
mutate(PrefStrength1 = as.character(PrefStrength1)) %>%
mutate(Switch = ifelse(Choice1 == Choice2,0,1)) %>%
group_by(PrefStrength1) %>%
summarise(Prop = sum(Switch==1)/n(),
sd = sqrt((sum(Switch==1)/n())*(1-(sum(Switch==1)/n()))),
n = n(), # calculates the sample size per group
SE = ((sum(Switch==1)/n())*(1-(sum(Switch==1)/n())))/sqrt(n())) %>%
ggplot() + aes(x = PrefStrength1, y = Prop, fill = PrefStrength1) +
geom_bar(stat = "summary", fun.y = "mean", position = "dodge") +
xlab("Message condition") + ylab("Proportion switched choice from time 1 to time 2") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
geom_errorbar(aes(ymin=Prop-SE, ymax=Prop+SE), position=position_dodge(width=0.9), width=.1) +
geom_text(aes(label=round(Prop,digits=2)), position=position_dodge(width=.9), vjust=5) +
scale_fill_brewer(palette="Dark2") %>%
print()
## <ggproto object: Class ScaleDiscrete, Scale, gg>
## aesthetics: fill
## axis_order: function
## break_info: function
## break_positions: function
## breaks: waiver
## call: call
## clone: function
## dimension: function
## drop: TRUE
## expand: waiver
## get_breaks: function
## get_breaks_minor: function
## get_labels: function
## get_limits: function
## guide: legend
## is_discrete: function
## is_empty: function
## labels: waiver
## limits: NULL
## make_sec_title: function
## make_title: function
## map: function
## map_df: function
## n.breaks.cache: NULL
## na.translate: TRUE
## na.value: NA
## name: waiver
## palette: function
## palette.cache: NULL
## position: left
## range: <ggproto object: Class RangeDiscrete, Range, gg>
## range: NULL
## reset: function
## train: function
## super: <ggproto object: Class RangeDiscrete, Range, gg>
## reset: function
## scale_name: brewer
## train: function
## train_df: function
## transform: function
## transform_df: function
## super: <ggproto object: Class ScaleDiscrete, Scale, gg>
# yes, marginally
Are warnings effective in people switching their choices?
(models are failing to converge – still working on this)
datachoice %>%
mutate(Choice = ifelse(Choice == 1,1,0)) %>%
mutate(Choice = as.numeric(Choice)) %>%
glmer(Choice ~ ChoiceTime*Warning*Review + (1 | ID),., family = binomial) %>%
summary()
## Warning in (function (fn, par, lower = rep.int(-Inf, n), upper = rep.int(Inf, :
## failure to converge in 10000 evaluations
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 4.39498 (tol = 0.001, component 1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Choice ~ ChoiceTime * Warning * Review + (1 | ID)
## Data: .
##
## AIC BIC logLik deviance df.resid
## 1292.5 1361.2 -633.2 1266.5 1443
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.8577 -0.1373 -0.0172 0.1581 4.3104
##
## Random effects:
## Groups Name Variance Std.Dev.
## ID (Intercept) 27.07 5.203
## Number of obs: 1456, groups: ID, 722
##
## Fixed effects:
## Estimate Std. Error z value
## (Intercept) -5.4342 0.6761 -8.037
## ChoiceTimeChoice2 -0.3268 0.5658 -0.578
## WarningWarning -2.1877 1.0245 -2.135
## ReviewNone 6.4519 1.1596 5.564
## ReviewPositive 9.7341 1.3486 7.218
## ChoiceTimeChoice2:WarningWarning 4.4875 0.9978 4.498
## ChoiceTimeChoice2:ReviewNone 1.0336 0.7220 1.432
## ChoiceTimeChoice2:ReviewPositive -0.6988 0.7433 -0.940
## WarningWarning:ReviewNone -1.5682 1.4570 -1.076
## WarningWarning:ReviewPositive 2.5104 1.4604 1.719
## ChoiceTimeChoice2:WarningWarning:ReviewNone -5.7627 1.2640 -4.559
## ChoiceTimeChoice2:WarningWarning:ReviewPositive -5.2739 1.2682 -4.159
## Pr(>|z|)
## (Intercept) 9.18e-16 ***
## ChoiceTimeChoice2 0.5635
## WarningWarning 0.0327 *
## ReviewNone 2.64e-08 ***
## ReviewPositive 5.27e-13 ***
## ChoiceTimeChoice2:WarningWarning 6.87e-06 ***
## ChoiceTimeChoice2:ReviewNone 0.1523
## ChoiceTimeChoice2:ReviewPositive 0.3471
## WarningWarning:ReviewNone 0.2818
## WarningWarning:ReviewPositive 0.0856 .
## ChoiceTimeChoice2:WarningWarning:ReviewNone 5.14e-06 ***
## ChoiceTimeChoice2:WarningWarning:ReviewPositive 3.20e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ChcTC2 WrnngW RevwNn RvwPst ChTC2:WW CTC2:RN CTC2:RP WrW:RN
## ChoicTmChc2 -0.309
## WarnngWrnng 0.021 0.279
## ReviewNone -0.769 0.160 -0.155
## ReviewPostv -0.824 0.119 -0.258 0.702
## ChcTmCh2:WW -0.134 -0.601 -0.705 0.210 0.296
## ChcTmCh2:RN 0.195 -0.789 -0.255 -0.200 -0.038 0.505
## ChcTmCh2:RP 0.314 -0.753 -0.152 -0.198 -0.324 0.402 0.586
## WrnngWrn:RN 0.463 -0.144 -0.339 -0.650 -0.382 0.158 0.187 0.146
## WrnngWrn:RP -0.099 -0.205 -0.766 0.190 0.090 0.555 0.194 0.230 0.199
## ChTC2:WW:RN 0.154 0.480 0.594 -0.144 -0.291 -0.824 -0.620 -0.313 -0.240
## ChTC2:WW:RP 0.147 0.478 0.587 -0.205 -0.199 -0.817 -0.405 -0.559 -0.106
## WrW:RP CTC2:WW:RN
## ChoicTmChc2
## WarnngWrnng
## ReviewNone
## ReviewPostv
## ChcTmCh2:WW
## ChcTmCh2:RN
## ChcTmCh2:RP
## WrnngWrn:RN
## WrnngWrn:RP
## ChTC2:WW:RN -0.472
## ChTC2:WW:RP -0.641 0.676
## convergence code: 0
## Model failed to converge with max|grad| = 4.39498 (tol = 0.001, component 1)
## failure to converge in 10000 evaluations
datachoice %>%
mutate(Choice = ifelse(Choice == 1,1,0)) %>%
mutate(Choice = as.numeric(Choice)) %>%
mutate(Positive = ifelse(Review == "Positive",1,0)) %>%
mutate(Negative = ifelse(Review == "Negative",1,0)) %>%
glmer(Choice ~ ChoiceTime*Warning*Positive + ChoiceTime*Warning*Negative + (1 | ID), ., family = binomial) %>%
summary()
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 4.13439 (tol = 0.001, component 1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: Choice ~ ChoiceTime * Warning * Positive + ChoiceTime * Warning *
## Negative + (1 | ID)
## Data: .
##
## AIC BIC logLik deviance df.resid
## 1271.0 1339.6 -622.5 1245.0 1443
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.1598 -0.0949 -0.0091 0.1198 4.8211
##
## Random effects:
## Groups Name Variance Std.Dev.
## ID (Intercept) 43.32 6.582
## Number of obs: 1456, groups: ID, 722
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.62153 1.13010 -0.550 0.58233
## ChoiceTimeChoice2 0.09927 0.46333 0.214 0.83036
## WarningWarning -5.27275 1.29993 -4.056 4.99e-05
## Positive 6.31262 1.50018 4.208 2.58e-05
## Negative -5.41861 1.14262 -4.742 2.11e-06
## ChoiceTimeChoice2:WarningWarning 1.33085 0.82734 1.609 0.10771
## ChoiceTimeChoice2:Positive 0.12283 0.72201 0.170 0.86491
## WarningWarning:Positive 4.90569 1.66811 2.941 0.00327
## ChoiceTimeChoice2:Negative -0.50869 0.76745 -0.663 0.50744
## WarningWarning:Negative 2.30618 1.61943 1.424 0.15443
## ChoiceTimeChoice2:WarningWarning:Positive -3.34931 1.18674 -2.822 0.00477
## ChoiceTimeChoice2:WarningWarning:Negative 3.76464 1.27162 2.960 0.00307
##
## (Intercept)
## ChoiceTimeChoice2
## WarningWarning ***
## Positive ***
## Negative ***
## ChoiceTimeChoice2:WarningWarning
## ChoiceTimeChoice2:Positive
## WarningWarning:Positive **
## ChoiceTimeChoice2:Negative
## WarningWarning:Negative
## ChoiceTimeChoice2:WarningWarning:Positive **
## ChoiceTimeChoice2:WarningWarning:Negative **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) ChcTC2 WrnngW Positv Negatv ChTC2:WW CTC2:P WrnW:P CTC2:N
## ChoicTmChc2 -0.210
## WarnngWrnng -0.623 0.176
## Positive -0.892 0.161 0.421
## Negative -0.846 0.204 0.666 0.677
## ChcTmCh2:WW 0.029 -0.558 -0.418 0.037 -0.089
## ChcTmChc2:P 0.119 -0.641 -0.118 -0.210 -0.126 0.365
## WrnngWrnn:P 0.415 -0.135 -0.804 -0.341 -0.500 0.355 0.213
## ChcTmChc2:N 0.151 -0.604 -0.098 -0.133 -0.305 0.326 0.386 0.066
## WrnngWrnn:N 0.645 -0.145 -0.753 -0.547 -0.720 0.274 0.086 0.556 0.222
## ChcTC2:WW:P 0.041 0.387 0.312 -0.035 0.044 -0.723 -0.615 -0.470 -0.219
## ChcTC2:WW:N -0.183 0.367 0.215 0.212 0.210 -0.581 -0.227 -0.129 -0.616
## WrnW:N CTC2:WW:P
## ChoicTmChc2
## WarnngWrnng
## Positive
## Negative
## ChcTmCh2:WW
## ChcTmChc2:P
## WrnngWrnn:P
## ChcTmChc2:N
## WrnngWrnn:N
## ChcTC2:WW:P -0.164
## ChcTC2:WW:N -0.511 0.373
## convergence code: 0
## Model failed to converge with max|grad| = 4.13439 (tol = 0.001, component 1)
# yes, warnings effective at adjusting choices, but more so for negative reviews
Are warnings effective in people changing their interests?
data %>%
mutate(InterestChange = (Option1Interest2-Option1Interest1)) %>%
mutate(InterestChange = as.numeric(InterestChange)) %>%
lm(InterestChange ~ Review*Warning, ., family = binomial) %>%
summary()
## Warning: In lm.fit(x, y, offset = offset, singular.ok = singular.ok, ...) :
## extra argument 'family' will be disregarded
##
## Call:
## lm(formula = InterestChange ~ Review * Warning, data = ., family = binomial)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.18939 -0.18939 -0.09929 0.17699 3.08182
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.17241 0.06018 2.865 0.00429 **
## ReviewNone -0.07312 0.08571 -0.853 0.39388
## ReviewPositive 0.01698 0.08718 0.195 0.84563
## WarningWarning 0.74577 0.09163 8.139 1.75e-15 ***
## ReviewNone:WarningWarning -0.81058 0.13475 -6.015 2.85e-09 ***
## ReviewPositive:WarningWarning -1.11215 0.13047 -8.524 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.7247 on 722 degrees of freedom
## Multiple R-squared: 0.1683, Adjusted R-squared: 0.1625
## F-statistic: 29.22 on 5 and 722 DF, p-value: < 2.2e-16
data %>%
mutate(InterestChange = (Option1Interest2-Option1Interest1)) %>%
mutate(InterestChange = as.numeric(InterestChange)) %>%
mutate(Positive = ifelse(Review == "Positive",1,0)) %>%
mutate(Negative = ifelse(Review == "Negative",1,0)) %>%
lm(InterestChange ~ Positive*Warning + Negative*Warning, ., family = binomial) %>%
summary()
## Warning: In lm.fit(x, y, offset = offset, singular.ok = singular.ok, ...) :
## extra argument 'family' will be disregarded
##
## Call:
## lm(formula = InterestChange ~ Positive * Warning + Negative *
## Warning, data = ., family = binomial)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2.18939 -0.18939 -0.09929 0.17699 3.08182
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.09929 0.06103 1.627 0.1042
## Positive 0.09010 0.08777 1.027 0.3050
## WarningWarning -0.06481 0.09880 -0.656 0.5121
## Negative 0.07312 0.08571 0.853 0.3939
## Positive:WarningWarning -0.30158 0.13560 -2.224 0.0265 *
## WarningWarning:Negative 0.81058 0.13475 6.015 2.85e-09 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.7247 on 722 degrees of freedom
## Multiple R-squared: 0.1683, Adjusted R-squared: 0.1625
## F-statistic: 29.22 on 5 and 722 DF, p-value: < 2.2e-16