6 (a) Polynomial Regression with Cross-Validation

set.seed(1)
cv.error = rep(0, 10)

for (d in 1:10) {
  fit = glm(wage ~ poly(age, d), data = Wage)
  cv.error[d] = cv.glm(Wage, fit, K = 10)$delta[1]
}

optimal.degree = which.min(cv.error)
print(optimal.degree)
## [1] 9

ANOVA Comparison

fit.1 = lm(wage ~ poly(age, 1), data = Wage)
fit.2 = lm(wage ~ poly(age, 2), data = Wage)
fit.3 = lm(wage ~ poly(age, 3), data = Wage)
fit.4 = lm(wage ~ poly(age, 4), data = Wage)
fit.5 = lm(wage ~ poly(age, 5), data = Wage)

anova(fit.1, fit.2, fit.3, fit.4, fit.5)
## Analysis of Variance Table
## 
## Model 1: wage ~ poly(age, 1)
## Model 2: wage ~ poly(age, 2)
## Model 3: wage ~ poly(age, 3)
## Model 4: wage ~ poly(age, 4)
## Model 5: wage ~ poly(age, 5)
##   Res.Df     RSS Df Sum of Sq        F    Pr(>F)    
## 1   2998 5022216                                    
## 2   2997 4793430  1    228786 143.5931 < 2.2e-16 ***
## 3   2996 4777674  1     15756   9.8888  0.001679 ** 
## 4   2995 4771604  1      6070   3.8098  0.051046 .  
## 5   2994 4770322  1      1283   0.8050  0.369682    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Polynomial Fit Plot

age.grid = seq(from = min(Wage$age), to = max(Wage$age), length.out = 100)

fit.best = lm(wage ~ poly(age, optimal.degree), data = Wage)
preds = predict(fit.best, newdata = list(age = age.grid))

plot(Wage$age, Wage$wage, col = "gray", pch = 19)
lines(age.grid, preds, col = "blue", lwd = 2)
title("Polynomial Fit of Degree 4")

(b) Step Function Fit with Cross-Validation

set.seed(1)
cv.error.step = rep(NA, 10)

for (k in 2:10) {
  Wage$age.cut = cut(Wage$age, k)
  fit = glm(wage ~ age.cut, data = Wage)
  cv.error.step[k] = cv.glm(Wage, fit, K = 10)$delta[1]
}

optimal.cuts = which.min(cv.error.step)
print(optimal.cuts)
## [1] 8

Step Function Fit Plot

Wage$age.cut = cut(Wage$age, optimal.cuts)
fit.step = lm(wage ~ age.cut, data = Wage)


pred.data = Wage %>%
  group_by(age.cut) %>%
  summarise(age.mean = mean(age), wage.mean = mean(wage))

plot(Wage$age, Wage$wage, col = "gray", pch = 19)
abline(v = as.numeric(levels(cut(Wage$age, optimal.cuts))), col = "red", lty = 2)
## Warning in int_abline(a = a, b = b, h = h, v = v, untf = untf, ...): NAs
## introduced by coercion
points(pred.data$age.mean, pred.data$wage.mean, col = "blue", pch = 19, cex = 2)
title("Step Function Fit (4 Cuts)")

10. (a) Split the data into a training set and a test set.

library(leaps)
set.seed(1)
attach(College)
install.packages("gam")
## 
## The downloaded binary packages are in
##  /var/folders/n6/kts7k_nx3v3208p01m5x0p_00000gn/T//RtmpJRSpWx/downloaded_packages
library(gam)
## Loading required package: foreach
## Loaded gam 1.22-5
train <- sample(length(Outstate), length(Outstate) / 2)
test <- -train
College.train <- College[train, ]
College.test <- College[test, ]
fit <- regsubsets(Outstate ~ ., data = College.train, nvmax = 17, method = "forward")
fit.summary <- summary(fit)
par(mfrow = c(1, 3))
plot(fit.summary$cp, xlab = "Number of variables", ylab = "Cp", type = "l")
min.cp <- min(fit.summary$cp)
std.cp <- sd(fit.summary$cp)
abline(h = min.cp + 0.2 * std.cp, col = "red", lty = 2)
abline(h = min.cp - 0.2 * std.cp, col = "red", lty = 2)
plot(fit.summary$bic, xlab = "Number of variables", ylab = "BIC", type='l')
min.bic <- min(fit.summary$bic)
std.bic <- sd(fit.summary$bic)
abline(h = min.bic + 0.2 * std.bic, col = "red", lty = 2)
abline(h = min.bic - 0.2 * std.bic, col = "red", lty = 2)
plot(fit.summary$adjr2, xlab = "Number of variables", ylab = "Adjusted R2", type = "l", ylim = c(0.4, 0.84))
max.adjr2 <- max(fit.summary$adjr2)
std.adjr2 <- sd(fit.summary$adjr2)
abline(h = max.adjr2 + 0.2 * std.adjr2, col = "red", lty = 2)
abline(h = max.adjr2 - 0.2 * std.adjr2, col = "red", lty = 2)

#Cp, BIC and adjr2 show that size 6 is the minimum size for the subset for which the scores are within 0.2 standard devitations of optimum.

lm1 <- regsubsets(Outstate ~ ., data = College, method = "forward")
coeffs <- coef(fit, id = 6)
names(coeffs)
## [1] "(Intercept)" "PrivateYes"  "Room.Board"  "Terminal"    "perc.alumni"
## [6] "Expend"      "Grad.Rate"

##(b) Fit a GAM on the training data, using out-of-state tuition as the response and the features selected in the previous step as the predictors. Plot the results, and explain your findings.

gam1 <- gam(Outstate ~ Private + s(Room.Board, df = 2) + s(PhD, df = 2) + s(perc.alumni, df = 2) + s(Expend, df = 5) + s(Grad.Rate, df = 2), data=College.train)
par(mfrow = c(2, 3))
plot(gam1, se = T, col = "blue")

##(c) Evaluate the model obtained on the test set, and explain the results obtained.

predict.regsubsets <- function(object, newdata, id, ...) {
  form <- as.formula(object$call[[2]])
  mat <- model.matrix(form, newdata)
  coeffs <- coef(object, id = id)
  vars <- names(coeffs)
  mat_sub <- mat[, vars, drop = FALSE]
  preds <- mat_sub %*% coeffs
  return(preds)
}

preds <- predict.regsubsets(fit, College.test, id = 6)

err <- mean((College.test$Outstate - preds)^2)
print(err)
## [1] 3844857
tss <- mean((College.test$Outstate - mean(College.test$Outstate))^2)
rss <- 1 - err / tss
print(rss)
## [1] 0.7313788

#We obtain a test R^2 of 0.77 using GAM with 6 predictors.

##(d) For which variables, if any, is there evidence of a non-linear relationship with the response?

summary(gam1)
## 
## Call: gam(formula = Outstate ~ Private + s(Room.Board, df = 2) + s(PhD, 
##     df = 2) + s(perc.alumni, df = 2) + s(Expend, df = 5) + s(Grad.Rate, 
##     df = 2), data = College.train)
## Deviance Residuals:
##      Min       1Q   Median       3Q      Max 
## -7402.89 -1114.45   -12.67  1282.69  7470.60 
## 
## (Dispersion Parameter for gaussian family taken to be 3711182)
## 
##     Null Deviance: 6989966760 on 387 degrees of freedom
## Residual Deviance: 1384271126 on 373 degrees of freedom
## AIC: 6987.021 
## 
## Number of Local Scoring Iterations: NA 
## 
## Anova for Parametric Effects
##                         Df     Sum Sq    Mean Sq F value    Pr(>F)    
## Private                  1 1778718277 1778718277 479.286 < 2.2e-16 ***
## s(Room.Board, df = 2)    1 1577115244 1577115244 424.963 < 2.2e-16 ***
## s(PhD, df = 2)           1  322431195  322431195  86.881 < 2.2e-16 ***
## s(perc.alumni, df = 2)   1  336869281  336869281  90.771 < 2.2e-16 ***
## s(Expend, df = 5)        1  530538753  530538753 142.957 < 2.2e-16 ***
## s(Grad.Rate, df = 2)     1   86504998   86504998  23.309 2.016e-06 ***
## Residuals              373 1384271126    3711182                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Anova for Nonparametric Effects
##                        Npar Df  Npar F     Pr(F)    
## (Intercept)                                         
## Private                                             
## s(Room.Board, df = 2)        1  1.9157    0.1672    
## s(PhD, df = 2)               1  0.9699    0.3253    
## s(perc.alumni, df = 2)       1  0.1859    0.6666    
## s(Expend, df = 5)            4 20.5075 2.665e-15 ***
## s(Grad.Rate, df = 2)         1  0.5702    0.4506    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

#ANOVA shows a strong evidence of non-linear relationship between “Outstate” and “Expend”“, and a moderately strong non-linear relationship (using p-value of 0.05) between”Outstate” and “Grad.Rate”” or “PhD”.