library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1
## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(ISLR2)
library(klaR)
## Loading required package: MASS
##
## Attaching package: 'MASS'
##
## The following object is masked from 'package:ISLR2':
##
## Boston
##
## The following object is masked from 'package:dplyr':
##
## select
library(caret)
## Loading required package: lattice
##
## Attaching package: 'caret'
##
## The following object is masked from 'package:purrr':
##
## lift
library(gam)
## Loading required package: splines
## Loading required package: foreach
##
## Attaching package: 'foreach'
##
## The following objects are masked from 'package:purrr':
##
## accumulate, when
##
## Loaded gam 1.22-5
In this exercise, you will further analyze the Wage data set considered throughout this chapter.
attach(Wage)
#####(a) Perform polynomial regression to predict wage using age. Use cross-validation to select the optimal degree d for the polynomial. What degree was chosen, and how does this compare to the results of hypothesis testing using ANOVA? Make a plot of the resulting polynomial fit to the data.
ctrl = trainControl(method = "cv", number = 10)
cv_rsme = c()
set.seed(23)
for (i in 1:10) {
model = train(y = Wage$wage,
x = poly(Wage$age, i, raw = TRUE, simple = TRUE),
trControl = ctrl,
method = "lm",
metric = "RMSE")
cv_rsme[i] = model$results$RMSE
}
which.min(cv_rsme)
## [1] 10
According to our CV we should pick a tenth-order polynomial however the small range of RMSE for 2nd-10th order polynomials suggests that a different seed may produce different results.
fit.1 = lm(wage ~ age, data = Wage)
fit.2 = lm(wage ~ poly(age, 2, raw = T), data = Wage)
fit.3 = lm(wage ~ poly(age, 3, raw = T), data = Wage)
fit.4 = lm(wage ~ poly(age, 4, raw = T), data = Wage)
fit.5 = lm(wage ~ poly(age, 5, raw = T), data = Wage)
fit.6 = lm(wage ~ poly(age, 6, raw = T), data = Wage)
fit.7 = lm(wage ~ poly(age, 7, raw = T), data = Wage)
fit.8 = lm(wage ~ poly(age, 8, raw = T), data = Wage)
fit.9 = lm(wage ~ poly(age, 9, raw = T), data = Wage)
fit.10 = lm(wage ~ poly(age, 10, raw = T), data = Wage)
anova(fit.1, fit.2, fit.3, fit.4, fit.5, fit.6, fit.7, fit.8, fit.9, fit.10)
## Analysis of Variance Table
##
## Model 1: wage ~ age
## Model 2: wage ~ poly(age, 2, raw = T)
## Model 3: wage ~ poly(age, 3, raw = T)
## Model 4: wage ~ poly(age, 4, raw = T)
## Model 5: wage ~ poly(age, 5, raw = T)
## Model 6: wage ~ poly(age, 6, raw = T)
## Model 7: wage ~ poly(age, 7, raw = T)
## Model 8: wage ~ poly(age, 8, raw = T)
## Model 9: wage ~ poly(age, 9, raw = T)
## Model 10: wage ~ poly(age, 10, raw = T)
## Res.Df RSS Df Sum of Sq F Pr(>F)
## 1 2998 5022216
## 2 2997 4793430 1 228786 143.7638 < 2.2e-16 ***
## 3 2996 4777674 1 15756 9.9005 0.001669 **
## 4 2995 4771604 1 6070 3.8143 0.050909 .
## 5 2994 4770322 1 1283 0.8059 0.369398
## 6 2993 4766389 1 3932 2.4709 0.116074
## 7 2992 4763834 1 2555 1.6057 0.205199
## 8 2991 4763707 1 127 0.0796 0.777865
## 9 2990 4756703 1 7004 4.4014 0.035994 *
## 10 2989 4756701 1 3 0.0017 0.967529
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
According to ANOVA we would be wise to select a third or fourth-order polynomial.
I am going to plot the third-order
ggplot(Wage, aes(x = age, y = wage)) +
geom_point(alpha = 0.3) +
geom_smooth(method = "lm", formula = "y ~ poly(x, 3, raw = T)") +
labs(title = "Predicting Wage from Age") +
theme_minimal()
#####(b) Fit a step function to predict wage using age, and perform cross-validation to choose the optimal number of cuts. Make a plot of the fit obtained.
ctrl = trainControl(method = "cv", number = 10)
cv_rsme = c()
set.seed(23)
for (i in 2:20) {
model = train(y = Wage$wage,
x = data.frame(cut(Wage$age, i)),
trControl = ctrl,
method = "lm",
metric = "RMSE")
cv_rsme[i - 1] = model$results$RMSE
}
which.min(cv_rsme)
## [1] 10
data.frame(cuts = 1:19, cv_rsme = cv_rsme) %>%
mutate(min_rmse = as.numeric(min(cv_rsme) == cv_rsme)) %>%
ggplot(aes(x = cuts, y = cv_rsme)) +
geom_point(aes(col = factor(min_rmse))) +
geom_line() +
theme(legend.position = "none")
The optimal number of cuts suggested by this seed is 10.
This question relates to the College data set.
#####(a) Split the data into a training set and a test set. Using out-of-state tuition as the response and the other variables as the predictors, perform forward stepwise selection on the training set in order to identify a satisfactory model that uses just a subset of the predictors.
set.seed(23)
split = 0.7
train_index = createDataPartition(College$Outstate, p=split, list = F)
data_train = College[train_index, ]
data_test = College[-train_index, ]
ctrl = trainControl(method = "repeatedcv", number = 10, repeats = 1, selectionFunction = "oneSE")
set.seed(23)
model_fwd = train(Outstate ~ .,
data = data_train,
method = "leapForward",
metric = "RMSE",
maximize = FALSE,
trControl = ctrl,
tuneGrid = data.frame(nvmax = 1:17)
)
model_fwd
## Linear Regression with Forward Selection
##
## 546 samples
## 17 predictor
##
## No pre-processing
## Resampling: Cross-Validated (10 fold, repeated 1 times)
## Summary of sample sizes: 492, 491, 492, 490, 490, 493, ...
## Resampling results across tuning parameters:
##
## nvmax RMSE Rsquared MAE
## 1 2875.282 0.4982617 2313.522
## 2 2460.366 0.6228309 1870.279
## 3 2224.090 0.6865013 1724.053
## 4 2104.315 0.7167507 1660.630
## 5 2110.847 0.7140454 1659.339
## 6 2057.734 0.7288053 1603.378
## 7 2072.987 0.7262976 1616.131
## 8 2047.841 0.7336333 1605.737
## 9 2064.515 0.7289002 1619.793
## 10 2032.352 0.7360141 1603.916
## 11 2001.610 0.7431730 1579.785
## 12 1984.742 0.7475637 1565.904
## 13 1984.232 0.7476007 1564.288
## 14 1972.563 0.7510243 1553.099
## 15 1967.176 0.7521733 1549.432
## 16 1966.326 0.7524733 1544.208
## 17 1965.317 0.7527371 1542.906
##
## RMSE was used to select the optimal model using the one SE rule.
## The final value used for the model was nvmax = 11.
An 11 variable model was selected.
#####(b) Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings.
coef(model_fwd$finalModel, id = 11)
## (Intercept) PrivateYes Apps Accept F.Undergrad
## -1866.2830727 2018.3923381 -0.2280087 0.7872454 -0.2437704
## Room.Board Personal Terminal S.F.Ratio perc.alumni
## 0.8028060 -0.1399407 34.6515954 -40.3704251 48.1484652
## Expend Grad.Rate
## 0.2682705 24.9459292
model_gam = gam(Outstate ~ Private + s(Apps) + s(Accept) + s(F.Undergrad) + s(Room.Board) + s(Personal) + s(Terminal) + s(S.F.Ratio) + s(perc.alumni) + s(Expend) + s(Grad.Rate), data = data_train)
plot(model_gam, se = T, col = "red")
There seems to be a linear relationship between outstate and Accept,
room.board and f.undergrad while the rest of the predictors seem to have
a non-linear relationship with outstate.
#####(c) Evaluate the model obtained on the test set, and explain the results obtained.
mean((predict(model_gam, newdata = data_test) - data_test$Outstate)^2)
## [1] 3871826
mean((predict(model_fwd, newdata = data_test) - data_test$Outstate)^2)
## [1] 4435419
According to the MSE the GAM model is better.
#####(d) For which variables, if any, is there evidence of a non-linear relationship with the response?
summary(model_gam)
##
## Call: gam(formula = Outstate ~ Private + s(Apps) + s(Accept) + s(F.Undergrad) +
## s(Room.Board) + s(Personal) + s(Terminal) + s(S.F.Ratio) +
## s(perc.alumni) + s(Expend) + s(Grad.Rate), data = data_train)
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -6574.75 -1031.37 89.46 1147.98 7619.39
##
## (Dispersion Parameter for gaussian family taken to be 3092029)
##
## Null Deviance: 8394827575 on 545 degrees of freedom
## Residual Deviance: 1558381570 on 503.9997 degrees of freedom
## AIC: 9751.386
##
## Number of Local Scoring Iterations: NA
##
## Anova for Parametric Effects
## Df Sum Sq Mean Sq F value Pr(>F)
## Private 1 2196286815 2196286815 710.3061 < 2.2e-16 ***
## s(Apps) 1 906801108 906801108 293.2706 < 2.2e-16 ***
## s(Accept) 1 186236849 186236849 60.2313 4.728e-14 ***
## s(F.Undergrad) 1 309860358 309860358 100.2126 < 2.2e-16 ***
## s(Room.Board) 1 689855812 689855812 223.1078 < 2.2e-16 ***
## s(Personal) 1 27058794 27058794 8.7511 0.003239 **
## s(Terminal) 1 381714849 381714849 123.4513 < 2.2e-16 ***
## s(S.F.Ratio) 1 204916134 204916134 66.2724 3.100e-15 ***
## s(perc.alumni) 1 231161321 231161321 74.7604 < 2.2e-16 ***
## s(Expend) 1 588408340 588408340 190.2985 < 2.2e-16 ***
## s(Grad.Rate) 1 54089497 54089497 17.4932 3.401e-05 ***
## Residuals 504 1558381570 3092029
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Anova for Nonparametric Effects
## Npar Df Npar F Pr(F)
## (Intercept)
## Private
## s(Apps) 3 1.6628 0.174090
## s(Accept) 3 5.0620 0.001836 **
## s(F.Undergrad) 3 2.4207 0.065283 .
## s(Room.Board) 3 1.8922 0.129911
## s(Personal) 3 2.3193 0.074587 .
## s(Terminal) 3 2.2654 0.080049 .
## s(S.F.Ratio) 3 4.3255 0.005031 **
## s(perc.alumni) 3 1.9523 0.120228
## s(Expend) 3 17.6269 6.775e-11 ***
## s(Grad.Rate) 3 3.4428 0.016676 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
According to the model summary, Accept, S.F.Ratio, Expend, and Grad.Rate have non-linear effects.
model_gam2 = gam(Outstate ~ Private + Apps + s(Accept) + F.Undergrad + Room.Board + Personal + Terminal + s(S.F.Ratio) + perc.alumni + s(Expend) + s(Grad.Rate), data = data_train)
mean((predict(model_gam, newdata = data_test) - data_test$Outstate)^2)
## [1] 3871826
mean((predict(model_fwd, newdata = data_test) - data_test$Outstate)^2)
## [1] 4435419
mean((predict(model_gam2, newdata = data_test) - data_test$Outstate)^2)
## [1] 3505134
By only smoothing the predictors with evidence of a non-linear effect we are able to improve and simplify the model.