This uses data prior to the first exam only.
library(tidyverse)
library(car)
library(nlme)
library(haven)
recode_df <- read_csv('/Users/joshuarosenberg/Dropbox/1_Research/Engagement/yday.csv')
recode <- filter(recode_df, yday <= 48)
recode <- mutate(recode, wave = case_when(
yday <= 4 ~ 12,
yday > 4 & yday <= 8 ~ 11,
yday > 8 & yday <= 12 ~ 10,
yday > 12 & yday <= 16 ~ 9,
yday > 16 & yday <= 20 ~ 8,
yday > 20 & yday <= 24 ~ 7,
yday > 24 & yday <= 28 ~ 6,
yday > 28 & yday <= 32 ~ 5,
yday > 32 & yday <= 36 ~ 4,
yday > 36 & yday <= 40 ~ 3,
yday > 40 & yday <= 44 ~ 2,
yday > 44 & yday <= 48 ~ 1
))
recode <- recode %>%
group_by(student_ID, wave) %>%
summarize(mean_time_watched_minutes = sum(time_watched_minutes, na.rm = T))
recode_df %>%
group_by(yday) %>%
summarize(sum_tw = sum(time_watched_minutes)) %>%
ggplot(aes(x = yday, y = sum_tw)) +
geom_point() +
geom_line()
# library(lme4)
#
# m1 <- lmer(time_watched_minutes ~ wave + I(wave ^ 2) +
# (wave + I(wave ^ 2) | student_ID), data = recode,
# control = lmerControl(optimizer="bobyqa", optCtrl = list(maxfun = 100000)))
recode.grouped <- groupedData(mean_time_watched_minutes ~ wave|student_ID, data = recode, order.groups = F)
ctrl <- lmeControl(opt='optim', maxIter=1e8, msMaxIter = 1e8)
model5.1a <- lme(mean_time_watched_minutes ~ wave + I(wave^2),
random = ~ wave + I(wave^2), method = "REML",
data = recode.grouped, na.action = na.omit, control = ctrl)
# model5.1b <- lme(time_watched_minutes ~ wave + I(wave^2) + I(wave ^ 3),
# random = ~ wave + I(wave^2) + I(wave ^ 3), method = "REML",
# data = recode.grouped, na.action = na.omit, control = ctrl)
#
# model5.1bb <- lme(time_watched_minutes ~ wave + I(wave^2) + I(wave ^ 3),
# random = ~ wave + I(wave^2) + I(wave ^ 3), method = "REML",
# data = recode.grouped, na.action = na.omit, control = ctrl)
# pick a model
model5.1 <- model5.1a
# model5.1 <- lme(View~TimeN_tilexam + I(TimeN_tilexam^2) + I(TimeN_tilexam^3)
# + I(TimeN_tilexam^4) + I(TimeN_tilexam^5),
# random = ~TimeN_tilexam , method = "REML",
# data = recode.grouped, na.action=na.omit)
# summary(model5.1)
# model1a <- lme(View ~ TimeN_tilexam + I(TimeN_tilexam^2),
# random = ~TimeN_tilexam + I(TimeN_tilexam^2), method = "REML",
# data = recode.grouped, na.action=na.omit, control = ctrl)
# summary(model1a)
#
# model5.1 <- model1b
broom::glance(model5.1)
## sigma logLik AIC BIC deviance
## 1 29.419 -15875.97 31771.94 31832.84 NA
dff <- rownames_to_column(random.effects(model5.1))
dff <- tbl_df(dff)
names(dff) <- c("ID", "intercept", "linear_slope", "quadratic_slope")
dff <- mutate(dff,
intercept = intercept + fixed.effects(model5.1)[1],
linear_slope = linear_slope + fixed.effects(model5.1)[2],
quadratic_slope = quadratic_slope + fixed.effects(model5.1)[3])
# names(dff) <- c("ID", "intercept", "linear_slope", "quadratic_slope", "cubic_slope")
#
# dff <- mutate(dff,
# intercept = intercept + 17.52,
# linear_slope = linear_slope - 6.61,
# quadratic_slope = quadratic_slope + .979,
# cubic_slope = cubic_slope - .045)
dff$ID <- as.integer(dff$ID)
df_coef <- tbl_df(coef(model5.1))
names(df_coef) <- c("unadjusted_intercept", "unadjusted_linear_slope", "quadratic_slope")
# names(df_coef) <- c("unadjusted_intercept", "unadjusted_linear_slope", "quadratic_slope", "cubic_slope")
df_coef <- df_coef %>% rownames_to_column() %>% rename(ID = rowname) %>% mutate(ID = as.integer(ID))
dff <- left_join(dff, df_coef)
grades_data <- read_sav("~/Dropbox/1_research/Engagement/data/all_three_semesters.sav")
grades_data <- select(grades_data, ID, contains("Exam"), FinalGrade)
grades_data$ID <- as.integer(grades_data$ID)
IDs_data <- read_csv("~/Dropbox/1_Research/Engagement/Archive/ss15_key.csv")
IDs_data <- select(IDs_data, ID, username = Username)
demographic_data <- left_join(IDs_data, grades_data, by = "ID")
df <- left_join(dff, demographic_data)
all_three_semesters <- read_sav("~/Dropbox/1_Research/Engagement/Data/all_three_semesters.sav")
all_three_semesters$ID <- as.integer(all_three_semesters$ID)
dfm <- left_join(df, all_three_semesters, by = "ID")
dfm$T1Q003 <- ifelse(dfm$T1Q003 == 1, 5,
ifelse(dfm$T1Q003 == 2, 4,
ifelse(dfm$T1Q003 == 4, 2,
ifelse(dfm$T1Q003 == 5, 1, NA))))
dfm$T1Q017 <- ifelse(dfm$T1Q017 == 1, 5,
ifelse(dfm$T1Q017 == 2, 4,
ifelse(dfm$T1Q017 == 4, 2,
ifelse(dfm$T1Q017 == 5, 1, NA))))
dfm$T1Q042 <- ifelse(dfm$T1Q042 == 1, 5,
ifelse(dfm$T1Q042 == 2, 4,
ifelse(dfm$T1Q042 == 4, 2,
ifelse(dfm$T1Q042 == 5, 1, NA))))
dfm$T1Q034 <- ifelse(dfm$T1Q034 == 1, 5,
ifelse(dfm$T1Q034 == 2, 4,
ifelse(dfm$T1Q034 == 4, 2,
ifelse(dfm$T1Q034 == 5, 1, NA))))
dfm$T1Q025 <- ifelse(dfm$T1Q025 == 1, 5,
ifelse(dfm$T1Q025 == 2, 4,
ifelse(dfm$T1Q025 == 4, 2,
ifelse(dfm$T1Q025 == 5, 1, NA))))
dfm$cost_value <- jmRtools::composite_mean_maker(dfm, T1Q003, T1Q017, T1Q042, T1Q034, T1Q025)
dfm$perceived_competence <- jmRtools::composite_mean_maker(dfm, T1Q016, T1Q007, T1Q028, T1Q035, T1Q022)
dfm$utility_value <- jmRtools::composite_mean_maker(dfm, T1Q038, T1Q014, T1Q026, T1Q005, T1Q043)
dfm$interest_value <- jmRtools::composite_mean_maker(dfm, T1Q036, T1Q019, T1Q001, T1Q032, T1Q041)
dfm$attainment_value <- jmRtools::composite_mean_maker(dfm, T1Q024, T1Q009, T1Q045, T1Q030, T1Q012)
dfm$task_value <- jmRtools::composite_mean_maker(dfm, T1Q038, T1Q014, T1Q026, T1Q005, T1Q043, T1Q036, T1Q019, T1Q001, T1Q032, T1Q041, T1Q024, T1Q009, T1Q045, T1Q030, T1Q012)
dfm$mastery_approach <- jmRtools::composite_mean_maker(dfm, T1Q003, T1Q012, T1Q020, T1Q024, T1Q027)
dfm$mastery_avoid <- jmRtools::composite_mean_maker(dfm, T1Q036, T1Q015, T1Q006, T1Q022)
dfm$performance_approach <- jmRtools::composite_mean_maker(dfm, T1Q034, T1Q008, T1Q033, T1Q025, T1Q039)
dfm$performance_avoid <- jmRtools::composite_mean_maker(dfm, T1Q041, T1Q030, T1Q018, T1Q004)
dfm <- dfm %>% select(ID, cost_value, perceived_competence, utility_value, final_grade = Percent_FinalGrade, intercept, linear_slope, quadratic_slope, task_value, mastery_approach, mastery_avoid, performance_approach, performance_avoid)
# dfm <- dfm %>% select(ID, cost_value, perceived_competence, utility_value, final_grade = Percent_FinalGrade, intercept, linear_slope, quadratic_slope, cubic_slope, task_value)
library(prcr)
x <- create_profiles(dfm, mastery_approach, performance_approach, performance_avoid, n_profiles=3, to_center=T, to_scale=T)
plot(x)
For some reason, we get a different overall model when we use the population-level (not ID, i.e. not student but overall) predictions. There’s also some code for using the equation.
Here’s one using the student-level predictions.
In these models, the intercepts and slopes are adjusted based on the random effects for those terms.
Look out, lots going on.
dfm_b <- dfm
predictions <- predict(model5.1, recode.grouped, level = 1)
df_for_plot <- as.tibble(bind_cols(as.data.frame(recode.grouped), predictions = predictions))
df_for_plot$student_ID <- as.integer(as.character(df_for_plot$student_ID))
# x <- 0:5
# dat <- data.frame(x,
# y = 8.15 - (15.12 * x) + (14.78 * (x ^ 2)) - (6.12 * (x ^ 3)) + (1.15 * (x ^ 4)) - (.08 * (x ^ 5)))
#
# f <- function(x) 8.15 - (15.12 * x) + (14.78 * (x ^ 2)) - (6.12 * (x ^ 3)) + (1.15 * (x ^ 4)) - (.08 * (x ^ 5))
# ggplot(dat, aes(x,y)) +
# stat_function(fun=f, colour="black") +
# hrbrthemes::theme_ipsum() +
# ylab("Mean Minutes Watched / Day") +
# xlab("Weeks Before Exam")
dfm <- rename(df, student_ID = ID)
dfm$final_grade <- as.vector(dfm$final_grade)
## Warning: Unknown or uninitialised column: 'final_grade'.
df_for_plot %>%
left_join(dfm) %>%
filter(!is.na(FinalGrade) & !is.na(student_ID)) %>%
select(student_ID, wave, predictions) %>%
group_by(student_ID, wave) %>%
summarize(predictions = sum(predictions, na.rm = T)) %>%
spread(wave, predictions) %>%
gather(key, val, -student_ID) %>%
ungroup() %>%
mutate(ID = as.factor(student_ID),
key = factor(key, levels = 0:12)) %>%
ggplot(aes(x = key, y = val, group = ID)) +
geom_point() +
geom_line() +
viridis::scale_color_viridis() +
hrbrthemes::theme_ipsum() +
xlab("Wave") +
ylab("Mean Minutes Viewed")
## Joining, by = "student_ID"
Here are some models, first for antecedents of different intercepts and slopes.
##
## Call:
## lm(formula = intercept ~ task_value * perceived_competence, data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -47.50 -25.29 -10.17 19.14 169.86
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 6.656 101.287 0.066 0.948
## task_value 11.760 25.196 0.467 0.641
## perceived_competence 5.721 25.406 0.225 0.822
## task_value:perceived_competence -1.773 6.095 -0.291 0.771
##
## Residual standard error: 35.73 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.004037, Adjusted R-squared: -0.008011
## F-statistic: 0.3351 on 3 and 248 DF, p-value: 0.8
##
## Call:
## lm(formula = linear_slope ~ task_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -42.443 -4.837 2.935 6.665 12.714
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.8278 26.4521 0.031 0.975
## task_value -2.5231 6.5802 -0.383 0.702
## perceived_competence -1.1620 6.6352 -0.175 0.861
## task_value:perceived_competence 0.3540 1.5919 0.222 0.824
##
## Residual standard error: 9.332 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.00346, Adjusted R-squared: -0.008595
## F-statistic: 0.287 on 3 and 248 DF, p-value: 0.8347
##
## Call:
## lm(formula = quadratic_slope ~ task_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.7828 -0.4165 -0.1836 0.2943 2.5617
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.05343 1.62361 -0.033 0.974
## task_value 0.14405 0.40388 0.357 0.722
## perceived_competence 0.06513 0.40726 0.160 0.873
## task_value:perceived_competence -0.01960 0.09771 -0.201 0.841
##
## Residual standard error: 0.5728 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.003298, Adjusted R-squared: -0.008759
## F-statistic: 0.2735 on 3 and 248 DF, p-value: 0.8445
##
## Call:
## lm(formula = intercept ~ cost_value * perceived_competence, data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -48.880 -23.757 -9.145 19.901 163.033
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -66.742 69.299 -0.963 0.3364
## cost_value 30.535 20.016 1.526 0.1284
## perceived_competence 30.472 16.537 1.843 0.0666 .
## cost_value:perceived_competence -8.036 4.668 -1.721 0.0864 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 35.5 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.01681, Adjusted R-squared: 0.004913
## F-statistic: 1.413 on 3 and 248 DF, p-value: 0.2395
##
## Call:
## lm(formula = linear_slope ~ cost_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -40.451 -5.481 2.696 6.897 13.116
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 18.775 18.084 1.038 0.3002
## cost_value -6.919 5.223 -1.325 0.1865
## perceived_competence -7.471 4.315 -1.731 0.0846 .
## cost_value:perceived_competence 1.908 1.218 1.567 0.1185
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 9.265 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.01779, Adjusted R-squared: 0.005908
## F-statistic: 1.497 on 3 and 248 DF, p-value: 0.2159
##
## Call:
## lm(formula = quadratic_slope ~ cost_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.8079 -0.4283 -0.1695 0.3410 2.4354
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -1.13977 1.10958 -1.027 0.3053
## cost_value 0.40672 0.32049 1.269 0.2056
## perceived_competence 0.45104 0.26478 1.703 0.0897 .
## cost_value:perceived_competence -0.11397 0.07474 -1.525 0.1286
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.5684 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.01833, Adjusted R-squared: 0.00645
## F-statistic: 1.543 on 3 and 248 DF, p-value: 0.2039
##
## Call:
## lm(formula = intercept ~ utility_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -46.771 -25.237 -9.904 18.245 169.687
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 31.8942 98.2789 0.325 0.746
## utility_value 4.4745 23.2497 0.192 0.848
## perceived_competence 0.2288 25.3845 0.009 0.993
## utility_value:perceived_competence -0.2250 5.8125 -0.039 0.969
##
## Residual standard error: 35.76 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.002691, Adjusted R-squared: -0.009374
## F-statistic: 0.223 on 3 and 248 DF, p-value: 0.8804
##
## Call:
## lm(formula = linear_slope ~ utility_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -42.404 -4.804 2.755 6.733 12.560
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -3.66984 25.66687 -0.143 0.886
## utility_value -1.11282 6.07198 -0.183 0.855
## perceived_competence -0.32270 6.62951 -0.049 0.961
## utility_value:perceived_competence 0.08693 1.51802 0.057 0.954
##
## Residual standard error: 9.338 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.002094, Adjusted R-squared: -0.009977
## F-statistic: 0.1735 on 3 and 248 DF, p-value: 0.9143
##
## Call:
## lm(formula = quadratic_slope ~ utility_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.7741 -0.4260 -0.1802 0.2876 2.5593
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.178133 1.575389 0.113 0.910
## utility_value 0.068111 0.372688 0.183 0.855
## perceived_competence 0.025902 0.406908 0.064 0.949
## utility_value:perceived_competence -0.006081 0.093174 -0.065 0.948
##
## Residual standard error: 0.5732 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.001955, Adjusted R-squared: -0.01012
## F-statistic: 0.162 on 3 and 248 DF, p-value: 0.9219
Here are models with the antecedents predicting final grade
m3 <- lm(final_grade ~ task_value * perceived_competence, data = dfm_b)
summary(m3)
##
## Call:
## lm(formula = final_grade ~ task_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -41.019 -7.050 1.993 7.841 20.421
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 11.364 29.427 0.386 0.6997
## task_value 14.918 7.320 2.038 0.0426 *
## perceived_competence 14.440 7.381 1.956 0.0515 .
## task_value:perceived_competence -2.836 1.771 -1.601 0.1106
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.38 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.09329, Adjusted R-squared: 0.08232
## F-statistic: 8.506 on 3 and 248 DF, p-value: 2.125e-05
konfound::konfound(m3, task_value)
## To invalidate the inference, 3.36 % of the estimate would have to be due to bias.
## To invalidate the inference, 8 observations would have to be replaced with cases for which there is no effect.
m4 <- lm(final_grade ~ mastery_approach + performance_approach + performance_avoid, data = dfm_b)
summary(m4)
##
## Call:
## lm(formula = final_grade ~ mastery_approach + performance_approach +
## performance_avoid, data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -40.111 -7.308 1.791 8.539 20.870
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 68.0625 5.6055 12.142 <2e-16 ***
## mastery_approach 0.5390 1.8857 0.286 0.7752
## performance_approach 0.2353 1.3403 0.176 0.8608
## performance_avoid 3.4975 1.7647 1.982 0.0486 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.67 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.04301, Adjusted R-squared: 0.03143
## F-statistic: 3.715 on 3 and 248 DF, p-value: 0.01214
m3 <- lm(final_grade ~ cost_value * perceived_competence, data = dfm_b)
summary(m3)
##
## Call:
## lm(formula = final_grade ~ cost_value * perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -41.581 -6.738 1.752 7.463 20.691
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 36.204 20.359 1.778 0.0766 .
## cost_value 8.766 5.881 1.491 0.1373
## perceived_competence 9.632 4.858 1.983 0.0485 *
## cost_value:perceived_competence -1.614 1.371 -1.177 0.2404
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.43 on 248 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.08473, Adjusted R-squared: 0.07366
## F-statistic: 7.653 on 3 and 248 DF, p-value: 6.522e-05
Also looked at outcomes. Adjusted the linear term to be associated with changes per day, though I’m still having a hard time interpreting these coefficients. That said, it doesn’t look like the intercept is (linearly) associated with lower final grades, which is somewhat surprising, but maybe not entirely because quartile 3 demonstrated higher final grades.
##
## Call:
## lm(formula = final_grade ~ intercept + I(linear_slope * 4) +
## I(quadratic_slope * 4) + task_value + perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -33.153 -5.237 0.841 6.212 21.051
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 20.541 11.896 1.727 0.0855 .
## intercept 3.045 1.179 2.582 0.0104 *
## I(linear_slope * 4) 8.754 4.859 1.801 0.0729 .
## I(quadratic_slope * 4) 95.717 61.037 1.568 0.1181
## task_value 2.884 1.306 2.208 0.0282 *
## perceived_competence 3.276 1.344 2.438 0.0155 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 9.142 on 246 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.3026, Adjusted R-squared: 0.2884
## F-statistic: 21.35 on 5 and 246 DF, p-value: < 2.2e-16
##
## Call:
## lm(formula = final_grade ~ intercept, data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -39.648 -6.770 1.398 7.954 20.830
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 82.35151 1.12533 73.180 <2e-16 ***
## intercept 0.03416 0.01891 1.806 0.0721 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.75 on 254 degrees of freedom
## (16 observations deleted due to missingness)
## Multiple R-squared: 0.01268, Adjusted R-squared: 0.008789
## F-statistic: 3.261 on 1 and 254 DF, p-value: 0.07213
##
## Call:
## lm(formula = final_grade ~ linear_slope, data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -40.359 -6.918 1.693 8.017 20.565
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 83.41602 0.89905 92.782 <2e-16 ***
## linear_slope -0.06932 0.07277 -0.953 0.342
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.8 on 254 degrees of freedom
## (16 observations deleted due to missingness)
## Multiple R-squared: 0.00356, Adjusted R-squared: -0.0003628
## F-statistic: 0.9075 on 1 and 254 DF, p-value: 0.3417
##
## Call:
## lm(formula = final_grade ~ quadratic_slope, data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -40.537 -6.939 1.907 8.037 20.458
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 83.5936 0.8723 95.835 <2e-16 ***
## quadratic_slope 0.8348 1.1866 0.704 0.482
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 10.81 on 254 degrees of freedom
## (16 observations deleted due to missingness)
## Multiple R-squared: 0.001945, Adjusted R-squared: -0.001985
## F-statistic: 0.4949 on 1 and 254 DF, p-value: 0.4824
##
## Call:
## lm(formula = final_grade ~ intercept + I(linear_slope * 4) +
## I(quadratic_slope * 4) + cost_value + perceived_competence,
## data = dfm_b)
##
## Residuals:
## Min 1Q Median 3Q Max
## -33.656 -5.279 0.750 6.356 20.457
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 22.8493 11.9446 1.913 0.0569 .
## intercept 3.0694 1.1886 2.582 0.0104 *
## I(linear_slope * 4) 8.8874 4.9001 1.814 0.0709 .
## I(quadratic_slope * 4) 97.5715 61.5616 1.585 0.1143
## cost_value 0.9736 0.8502 1.145 0.2532
## perceived_competence 4.6189 1.1472 4.026 7.55e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 9.207 on 246 degrees of freedom
## (20 observations deleted due to missingness)
## Multiple R-squared: 0.2925, Adjusted R-squared: 0.2782
## F-statistic: 20.35 on 5 and 246 DF, p-value: < 2.2e-16
dfm_b %>% ggplot(aes(y = final_grade, x = intercept)) + geom_point() + geom_smooth(method = "lm")
## Warning: Removed 16 rows containing non-finite values (stat_smooth).
## Warning: Removed 16 rows containing missing values (geom_point).
dfm_b %>% ggplot(aes(y = final_grade, x = linear_slope)) + geom_point() + geom_smooth(method = 'lm')
## Warning: Removed 16 rows containing non-finite values (stat_smooth).
## Warning: Removed 16 rows containing missing values (geom_point).
dfm_b %>% ggplot(aes(y = final_grade, x = quadratic_slope)) + geom_point() + geom_smooth(method = 'lm')
## Warning: Removed 16 rows containing non-finite values (stat_smooth).
## Warning: Removed 16 rows containing missing values (geom_point).
dfm_b %>% ggplot(aes(y = final_grade, x = intercept)) + geom_point() + geom_smooth()
## `geom_smooth()` using method = 'loess' and formula 'y ~ x'
## Warning: Removed 16 rows containing non-finite values (stat_smooth).
## Warning: Removed 16 rows containing missing values (geom_point).
dfm_b %>% ggplot(aes(y = final_grade, x = linear_slope)) + geom_point() + geom_smooth()
## `geom_smooth()` using method = 'loess' and formula 'y ~ x'
## Warning: Removed 16 rows containing non-finite values (stat_smooth).
## Warning: Removed 16 rows containing missing values (geom_point).
dfm_b %>% ggplot(aes(y = final_grade, x = quadratic_slope)) + geom_point() + geom_smooth()
## `geom_smooth()` using method = 'loess' and formula 'y ~ x'
## Warning: Removed 16 rows containing non-finite values (stat_smooth).
## Warning: Removed 16 rows containing missing values (geom_point).