Problem 6:

In this exercise, you will further analyze the Wage data set considered throughout this chapter.

library(ISLR)
attach(Wage)

(a) Perform polynomial regression to predict wage using age. Use cross-validation to select the optimal degree d for the polynomial. What degree was chosen, and how does this compare to the results of hypothesis testing using ANOVA? Make a plot of the resulting polynomial fit to the data.

set.seed(42)
library(boot)
cv_err<- rep(NA, 10)
for (v in 1:10) {
  fit_glm <-  glm(wage~poly(age, v), data=Wage)
  cv_err[v] <-  cv.glm(Wage, fit_glm, K=10)$delta[2]
}
plot(1:10, cv_err, xlab="Degree", ylab="CV error", type="l", pch=20, lwd=2, ylim=c(1590, 1700))

which.min(cv_err)
## [1] 9

The cv-error plot shows \(d=9\) as the smallest degree for the polynomial and the which.min function confirms the visual assessment.

fit_1 = lm(wage~poly(age, 1), data=Wage)
fit_2 = lm(wage~poly(age, 2), data=Wage)
fit_3 = lm(wage~poly(age, 3), data=Wage)
fit_4 = lm(wage~poly(age, 4), data=Wage)
fit_5 = lm(wage~poly(age, 5), data=Wage)
fit_6 = lm(wage~poly(age, 6), data=Wage)
fit_7 = lm(wage~poly(age, 7), data=Wage)
fit_8 = lm(wage~poly(age, 8), data=Wage)
fit_9 = lm(wage~poly(age, 9), data=Wage)
fit_10 = lm(wage~poly(age, 10), data=Wage)
anova(fit_1, fit_2, fit_3, fit_4, fit_5, fit_6, fit_7, fit_8, fit_9, fit_10)
## Analysis of Variance Table
## 
## Model  1: wage ~ poly(age, 1)
## Model  2: wage ~ poly(age, 2)
## Model  3: wage ~ poly(age, 3)
## Model  4: wage ~ poly(age, 4)
## Model  5: wage ~ poly(age, 5)
## Model  6: wage ~ poly(age, 6)
## Model  7: wage ~ poly(age, 7)
## Model  8: wage ~ poly(age, 8)
## Model  9: wage ~ poly(age, 9)
## Model 10: wage ~ poly(age, 10)
##    Res.Df     RSS Df Sum of Sq        F    Pr(>F)    
## 1    2998 5022216                                    
## 2    2997 4793430  1    228786 143.7638 < 2.2e-16 ***
## 3    2996 4777674  1     15756   9.9005  0.001669 ** 
## 4    2995 4771604  1      6070   3.8143  0.050909 .  
## 5    2994 4770322  1      1283   0.8059  0.369398    
## 6    2993 4766389  1      3932   2.4709  0.116074    
## 7    2992 4763834  1      2555   1.6057  0.205199    
## 8    2991 4763707  1       127   0.0796  0.777865    
## 9    2990 4756703  1      7004   4.4014  0.035994 *  
## 10   2989 4756701  1         3   0.0017  0.967529    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

ANOVA suggests the polynomials above \(d=3\), and possibly \(d=4\), are not significant. I will continue with \(d=3\) as it is the simplest model

plot(wage~age, data=Wage, col="steel blue")
agelims  <-  range(Wage$age)
age_grid = seq(from=agelims[1], to=agelims[2])
fit_lm = lm(wage~poly(age, 3), data=Wage)
# `age, 3` is the smallest polynomial degree
pred_lm = predict(fit_lm, data.frame(age=age_grid))
lines(age_grid, pred_lm, col="red", lwd=3)

(b) Fit a step function to predict wage using age, and perform crossvalidation to choose the optimal number of cuts. Make a plot of the fit obtained.

cvs = rep(NA, 10)
for (v in 2:10) {
  Wage$age.cut = cut(Wage$age, v)
  fit_lm = glm(wage~age.cut, data=Wage)
  cvs[v] = cv.glm(Wage, fit_lm, K=10)$delta[2]
}
plot(2:10, cvs[-1], xlab="Number of cuts", ylab="CV error", type="l", col="steel blue", pch=20, lwd=2)

which.min(cvs)
## [1] 8

In the CV error plot we can see \(k=8\) is the optimal number. Again, the which.min function confirms the visual assessment.

fit_glm <- glm(wage~cut(age, 8), data=Wage)
#`age, 8` is the number of cuts
pred_glm <-  predict(fit_glm, data.frame(age=age_grid))
plot(wage~age, data=Wage, col="steel blue")
lines(age_grid, pred_glm, col="red", lwd=2)

detach(Wage)

Problem 10:

This question relates to the College data set.

attach(College)

(a) Split the data into a training set and a test set. Using out-of-state tuition as the response and the other variables as the predictors, perform forward stepwise selection on the training set in order to identify a satisfactory model that uses just a subset of the predictors.

library(leaps)
## Warning: package 'leaps' was built under R version 3.6.3
train <- sample(length(Outstate), length(Outstate)/2)
test <- -train
C_train <- College[train, ]
C_test<-  College[test, ]

fit_rss<- regsubsets(Outstate ~ ., data = C_train, nvmax = 17, method = "forward")
summary_rss<- summary(fit_rss)
par(mfrow = c(1, 3))
plot(summary_rss$cp, xlab = "Number of Predictors", ylab = "Cp", type = "l")
cp_min<- min(summary_rss$cp)
cp_std<- sd(summary_rss$cp)
abline(h = cp_min + 0.2 * cp_std, col = "steel blue", lty = 2)
abline(h = cp_min - 0.2 * cp_std, col = "steel blue", lty = 2)
plot(summary_rss$bic, xlab = "Number of Predictors", ylab = "BIC", type = "l")
bic_min<- min(summary_rss$bic)
bic_std<- sd(summary_rss$bic)
abline(h = bic_min + 0.2 * bic_std, col = "steel blue", lty = 2)
abline(h = bic_min - 0.2 * bic_std, col = "steel blue", lty = 2)
plot(summary_rss$adjr2, xlab = "Number of Predictors", ylab = "Adjusted R2", type = "l", ylim = c(0.4, 0.84))
adjr2_max<- max(summary_rss$adjr2)
adjr2_std<- sd(summary_rss$adjr2)
abline(h = adjr2_max + 0.2 * adjr2_std, col = "steel blue", lty = 2)
abline(h = adjr2_max - 0.2 * adjr2_std, col = "steel blue", lty = 2)

Mallow’s Cp, Bayesian information criterion (BIC), and Adjusted \(R^{2}\) show the lowest number of subsets is \(6\).

fit_rss <-  regsubsets(Outstate ~ ., data = College, method = "forward")
coefi <-  coef(fit_rss, id = 6)
names(coefi)
## [1] "(Intercept)" "PrivateYes"  "Room.Board"  "PhD"         "perc.alumni"
## [6] "Expend"      "Grad.Rate"

THe best six predictors are, PrivateYes, Room.Board, PhD, perc.alumni, Expend, and Grad.Rate.

(b) Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings.

library(gam)
## Warning: package 'gam' was built under R version 3.6.3
## Loading required package: splines
## Loading required package: foreach
## Loaded gam 1.16.1
fit_gam <-  gam(Outstate ~ Private + s(Room.Board, df = 2) + s(PhD, df = 2) + s(perc.alumni, df = 2) + s(Expend, df = 5) + s(Grad.Rate, df = 2), data = C_train)
## Warning in model.matrix.default(mt, mf, contrasts): non-list contrasts argument
## ignored
par(mfrow = c(2, 3))
plot(fit_gam, se = T, col = "Steel blue")

(c) Evaluate the model obtained on the test set, and explain the results obtained.

pred_gam <-  predict(fit_gam, C_test)
gam_err <- mean((C_test$Outstate - pred_gam)^2)
gam_err
## [1] 3577219
tss_gam <- mean((C_test$Outstate - mean(C_test$Outstate))^2)
tss_gam
## [1] 16378233
rss_test <- 1 - gam_err/tss_gam
rss_test
## [1] 0.781587

The test \(R^{2}\) was \(0.78\) using GAM with \(6\) predictors. This is a slight improvement over a test RSS obtained using OLS \(0.74\).

(d) For which variables, if any, is there evidence of a non-linear relationship with the response?

summary(fit_gam)
## 
## Call: gam(formula = Outstate ~ Private + s(Room.Board, df = 2) + s(PhD, 
##     df = 2) + s(perc.alumni, df = 2) + s(Expend, df = 5) + s(Grad.Rate, 
##     df = 2), data = C_train)
## Deviance Residuals:
##      Min       1Q   Median       3Q      Max 
## -6924.00 -1181.49    35.14  1268.64  4760.81 
## 
## (Dispersion Parameter for gaussian family taken to be 3398023)
## 
##     Null Deviance: 6186894805 on 387 degrees of freedom
## Residual Deviance: 1267461695 on 372.9998 degrees of freedom
## AIC: 6952.816 
## 
## Number of Local Scoring Iterations: 2 
## 
## Anova for Parametric Effects
##                         Df     Sum Sq    Mean Sq F value    Pr(>F)    
## Private                  1 1701928660 1701928660 500.858 < 2.2e-16 ***
## s(Room.Board, df = 2)    1 1188212252 1188212252 349.678 < 2.2e-16 ***
## s(PhD, df = 2)           1  583329301  583329301 171.667 < 2.2e-16 ***
## s(perc.alumni, df = 2)   1  219279874  219279874  64.532 1.260e-14 ***
## s(Expend, df = 5)        1  567028405  567028405 166.870 < 2.2e-16 ***
## s(Grad.Rate, df = 2)     1   84038824   84038824  24.732 1.007e-06 ***
## Residuals              373 1267461695    3398023                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Anova for Nonparametric Effects
##                        Npar Df  Npar F     Pr(F)    
## (Intercept)                                         
## Private                                             
## s(Room.Board, df = 2)        1  1.5504   0.21385    
## s(PhD, df = 2)               1  1.9920   0.15897    
## s(perc.alumni, df = 2)       1  3.8967   0.04912 *  
## s(Expend, df = 5)            4 12.6286 1.199e-09 ***
## s(Grad.Rate, df = 2)         1  1.4752   0.22529    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

The Summary shows strong evidence of a non-linear relationship between Outstate and Expend, and a mild relationship with Room.Board.

detach(College)