library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.2 ──
## ✔ ggplot2 3.4.1     ✔ purrr   1.0.1
## ✔ tibble  3.1.8     ✔ dplyr   1.1.0
## ✔ tidyr   1.3.0     ✔ stringr 1.5.0
## ✔ readr   2.1.4     ✔ forcats 1.0.0
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
library(caret)
## Loading required package: lattice
## 
## Attaching package: 'caret'
## 
## The following object is masked from 'package:purrr':
## 
##     lift
library(glmnet)
## Warning: package 'glmnet' was built under R version 4.2.3
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## 
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
## 
## Loaded glmnet 4.1-6
library(e1071)
## Warning: package 'e1071' was built under R version 4.2.3
library(knitr)
library(ISLR)

Question 5

  1. We have seen that we can fit an SVM with a non-linear kernel in order to perform classification using a non-linear decision boundary. We will now see that we can also obtain a non-linear decision boundary by performing logistic regression using non-linear transformations of the features.
  1. Generate a data set with n = 500 and p = 2, such that the observations belong to two classes with a quadratic decision boundary between them.
x1 <- runif(500)-.5
x2 <- runif(500)-.5
y=1*(x1^2-x2^2 > 0)
  1. Plot the observations, colored according to their class labels. Your plot should display X1 on the x-axis, and X2 on the yaxis.
plot(x1,x2, col=ifelse(y, 'red', 'blue'))

  1. Fit a logistic regression model to the data, using X1 and X2 as predictors.
glm.fit <- glm(y~., family = 'binomial', data = data.frame(x1,x2,y))
glm.fit
## 
## Call:  glm(formula = y ~ ., family = "binomial", data = data.frame(x1, 
##     x2, y))
## 
## Coefficients:
## (Intercept)           x1           x2  
##      0.1508       0.1616       0.5747  
## 
## Degrees of Freedom: 499 Total (i.e. Null);  497 Residual
## Null Deviance:       690.6 
## Residual Deviance: 687   AIC: 693
  1. Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be linear.
glm.preds <- predict(glm.fit, data.frame(x1,x2))
plot(x1,x2, col=ifelse(glm.preds > 0, 'red', 'blue'), pch=ifelse(as.integer(glm.preds > 0) == y,1,4))

  1. Now fit a logistic regression model to the data using non-linear functions of X1 and X2 as predictors (e.g. X2 1 , X1×X2, log(X2), and so forth).
data <- data.frame(x1,x2,y)
log.fit <- glm(y~ poly(x1,2)+log(x2), family = 'binomial', data = data)
## Warning in log(x2): NaNs produced
log.fit
## 
## Call:  glm(formula = y ~ poly(x1, 2) + log(x2), family = "binomial", 
##     data = data)
## 
## Coefficients:
##  (Intercept)  poly(x1, 2)1  poly(x1, 2)2       log(x2)  
##       -8.428         8.372        88.517        -5.686  
## 
## Degrees of Freedom: 238 Total (i.e. Null);  235 Residual
##   (261 observations deleted due to missingness)
## Null Deviance:       328.3 
## Residual Deviance: 93.13     AIC: 101.1
  1. Apply this model to the training data in order to obtain a predicted class label for each training observation. Plot the observations, colored according to the predicted class labels. The decision boundary should be obviously non-linear. If it is not, then repeat (a)-(e) until you come up with an example in which the predicted class labels are obviously non-linear.
log.preds <- predict(log.fit, data.frame(x1,x2))
## Warning in log(x2): NaNs produced
plot(x1,x2,col = ifelse(log.preds > 0, 'red', 'blue'), pch = ifelse(as.integer(log.preds > 0) == y,1,4))

  1. Fit a support vector classifier to the data with X1 and X2 as predictors. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels.
lin.svm <- svm(y~., data = data.frame(x1,x2, y =as.factor(y)), kernal = 'linear')

svm.preds <-  predict(lin.svm, data.frame(x1,x2), type = 'response')
plot(x1,x2, col = ifelse(svm.preds!= 0 , 'red', 'blue'), pch = ifelse(svm.preds == y,1,4))

  1. Fit a SVM using a non-linear kernel to the data. Obtain a class prediction for each training observation. Plot the observations, colored according to the predicted class labels.
poly.svm <- svm(y~., data = data.frame(x1,x2, y= as.factor(y)), kernel = 'polynomial', degree = 2)

poly.preds <- predict(poly.svm, data.frame(x1,x2), type = 'response')

plot(x1,x2, col = ifelse(poly.preds!=0, 'red', 'blue'), pch =ifelse(poly.preds == y,1,4))

  1. Comment on your results.
kable(table(y,as.integer(glm.preds>0)))
0 1
0 89 143
1 27 241
kable(table(y,as.integer(log.preds>0)))
0 1
0 96 10
1 8 125
kable(table(y,poly.preds))
0 1
0 217 15
1 3 265
kable(table(y,svm.preds)) 
0 1
0 217 15
1 6 262
e.glm <- (102+119)/500
e.log <- (17+17)/500
e.poly <- (20+2)/500
e.svm <- (15+8)/500

e.glm
## [1] 0.442
e.log
## [1] 0.068
e.poly
## [1] 0.044
e.svm
## [1] 0.046

It appears that our last three models are close to being perfect, with our polynomial model being the most accurate.

Question 7

  1. In this problem, you will use support vector approaches in order to predict whether a given car gets high or low gas mileage based on the Auto data set.
  1. Create a binary variable that takes on a 1 for cars with gas mileage above the median, and a 0 for cars with gas mileage below the median.
Auto$mpg <- ifelse(Auto$mpg>median(Auto$mpg),1,0)

head(Auto)
##   mpg cylinders displacement horsepower weight acceleration year origin
## 1   0         8          307        130   3504         12.0   70      1
## 2   0         8          350        165   3693         11.5   70      1
## 3   0         8          318        150   3436         11.0   70      1
## 4   0         8          304        150   3433         12.0   70      1
## 5   0         8          302        140   3449         10.5   70      1
## 6   0         8          429        198   4341         10.0   70      1
##                        name
## 1 chevrolet chevelle malibu
## 2         buick skylark 320
## 3        plymouth satellite
## 4             amc rebel sst
## 5               ford torino
## 6          ford galaxie 500
  1. Fit a support vector classifier to the data with various values of cost, in order to predict whether a car gets high or low gas mileage. Report the cross-validation errors associated with different values of this parameter. Comment on your results. Note you will need to fit the classifier without the gas mileage variable to produce sensible results.
cost <- data.frame(cost=seq(1,100,length.out=20))

cars.tune <- tune(svm, mpg~., data= Auto, ranges = cost, kernel = 'linear')
cars.tune
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost
##     1
## 
## - best performance: 0.09629424
cars.tune$best.parameters
##   cost
## 1    1

R recommends a cost of 1.

  1. Now repeat (b), this time using SVMs with radial and polynomial basis kernels, with different values of gamma and degree and cost. Comment on your results.
poly.p <- data.frame(cost=seq(.1,100, length.out = 5), degree = seq(1,100, length.out = 5))

cars.poly <- tune(svm, mpg~., data  =Auto, ranges = poly.p, kernel = 'polynomial')

cars.poly
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost degree
##   100      1
## 
## - best performance: 0.09864835
radial.p <- data.frame(cost=seq(.1,100, length.out = 5), gamma = seq(.1, 100, length.out =5))

cars.radial <- tune(svm, mpg~., data = Auto, ranges = radial.p, kernel = 'radial')
cars.radial
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##    cost gamma
##  25.075   0.1
## 
## - best performance: 0.06749972
  1. Make some plots to back up your assertions in (b) and (c).
par(mfrow=c(1,3))
plot(cars.tune$performance[,c(1,2)],type='l')
plot(cars.poly$performance[,c(1,2)])
plot(cars.radial$performance[,c(1,2)])

cars.tune$best.parameters
##   cost
## 1    1
cars.poly$best.parameters
##   cost degree
## 5  100      1
cars.radial$best.parameters
##     cost gamma
## 2 25.075   0.1

Question 8

  1. This problem involves the OJ data set which is part of the ISLR2 package.
  1. Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations.
set.seed(1)
Oj.part <-createDataPartition(OJ$Purchase, p = .746, list =FALSE, times = 1)

OJtrain <- OJ[Oj.part,]
OJtest <- OJ[-Oj.part,]
  1. Fit a support vector classifier to the training data using cost = 0.01, with Purchase as the response and the other variables as predictors. Use the summary() function to produce summary statistics, and describe the results obtained.
OJ.svm = svm(Purchase~., data = OJtrain, cost = .01, kernel = 'linear')
summary(OJ.svm)
## 
## Call:
## svm(formula = Purchase ~ ., data = OJtrain, cost = 0.01, kernel = "linear")
## 
## 
## Parameters:
##    SVM-Type:  C-classification 
##  SVM-Kernel:  linear 
##        cost:  0.01 
## 
## Number of Support Vectors:  446
## 
##  ( 223 223 )
## 
## 
## Number of Classes:  2 
## 
## Levels: 
##  CH MM
  1. What are the training and test error rates?
train.oj.preds <- predict(OJ.svm, OJtrain)

kable(table(OJtrain$Purchase, train.oj.preds))
CH MM
CH 436 52
MM 82 230
trainerror = (82+52)/800

test.oj.preds <- predict(OJ.svm, OJtest)

kable(table(OJtest$Purchase, test.oj.preds))
CH MM
CH 146 19
MM 20 85
testerror <- (20+19)/270

trainerror
## [1] 0.1675
testerror
## [1] 0.1444444
  1. Use the tune() function to select an optimal cost. Consider values in the range 0.01 to 10.
OJ.tune <- tune(svm,Purchase~., data = OJtrain, ranges = data.frame(cost = seq(.01,10,25)), kernel = 'linear')
summary(OJ.tune)
## 
## Error estimation of 'svm' using 10-fold cross validation: 0.185
OJ.tune$best.parameters
##   cost
## 1 0.01
  1. Compute the training and test error rates using this new value for cost.
train.oj.preds <- predict(OJ.tune$best.model, OJtrain)
kable(table(OJtrain$Purchase, train.oj.preds))
CH MM
CH 436 52
MM 82 230
tune.train.error <- (82+52)/800

test.oj.preds <- predict(OJ.tune$best.model, OJtest)
kable(table (OJtest$Purchase, test.oj.preds))
CH MM
CH 146 19
MM 20 85
tune.test.error <- (20+19)/270

tune.train.error
## [1] 0.1675
tune.test.error
## [1] 0.1444444
  1. Repeat parts (b) through (e) using a support vector machine with a radial kernel. Use the default value for gamma.
OJ.radial <- svm(Purchase~., data = OJtrain, cost = .01, kernel = 'radial')
summary(OJ.radial)
## 
## Call:
## svm(formula = Purchase ~ ., data = OJtrain, cost = 0.01, kernel = "radial")
## 
## 
## Parameters:
##    SVM-Type:  C-classification 
##  SVM-Kernel:  radial 
##        cost:  0.01 
## 
## Number of Support Vectors:  627
## 
##  ( 315 312 )
## 
## 
## Number of Classes:  2 
## 
## Levels: 
##  CH MM
radial.train.preds <- predict(OJ.radial, OJtrain)
kable(table(OJtrain$Purchase, radial.train.preds))
CH MM
CH 488 0
MM 312 0
radial.train.error <- 312/800

radial.test.preds <- predict(OJ.radial, OJtest)
kable(table(OJtest$Purchase, radial.test.preds))
CH MM
CH 165 0
MM 105 0
radial.test.error <- 105/270

radial.train.error
## [1] 0.39
radial.test.error
## [1] 0.3888889
OJ.radial.tune <-  tune(svm,Purchase~., data = OJtrain, ranges = data.frame(cost=seq(.01,10,25)))
summary(OJ.radial.tune)
## 
## Error estimation of 'svm' using 10-fold cross validation: 0.39
train.oj.preds.radial <- predict(OJ.radial.tune$best.model, OJtrain)
kable(table(OJtrain$Purchase, train.oj.preds.radial))
CH MM
CH 488 0
MM 312 0
tune.train.error <- 312/800

test.oj.preds.radial <- predict(OJ.radial.tune$best.model, OJtest)
kable(table (OJtest$Purchase, test.oj.preds.radial))
CH MM
CH 165 0
MM 105 0
tune.test.error <- 105/270

tune.train.error
## [1] 0.39
tune.test.error
## [1] 0.3888889
  1. Repeat parts (b) through (e) using a support vector machine with a polynomial kernel. Set degree = 2.
OJ.poly <- svm(Purchase~., data = OJtrain, cost = .01, degree = 2, kernel = 'polynomial')
summary(OJ.poly)
## 
## Call:
## svm(formula = Purchase ~ ., data = OJtrain, cost = 0.01, degree = 2, 
##     kernel = "polynomial")
## 
## 
## Parameters:
##    SVM-Type:  C-classification 
##  SVM-Kernel:  polynomial 
##        cost:  0.01 
##      degree:  2 
##      coef.0:  0 
## 
## Number of Support Vectors:  628
## 
##  ( 316 312 )
## 
## 
## Number of Classes:  2 
## 
## Levels: 
##  CH MM
poly.train.preds <- predict(OJ.poly, OJtrain)
kable(table(OJtrain$Purchase, poly.train.preds))
CH MM
CH 485 3
MM 291 21
poly.train.error <- (3+291)/800

poly.test.preds <- predict(OJ.poly, OJtest)
kable(table(OJtest$Purchase, poly.test.preds))
CH MM
CH 161 4
MM 97 8
poly.test.error <- (97+4)/270

poly.train.error
## [1] 0.3675
poly.test.error
## [1] 0.3740741
OJ.tune.poly <- tune(svm, Purchase~., data = OJtrain, ranges = data.frame(cost=seq(.01,10,25)), kernel = 'polynomial')
summary(OJ.tune.poly)
## 
## Error estimation of 'svm' using 10-fold cross validation: 0.3675
train.oj.preds.poly <- predict(OJ.tune.poly$best.model, OJtrain)
kable(table(OJtrain$Purchase, train.oj.preds.poly))
CH MM
CH 485 3
MM 291 21
tune.train.error.poly <- (291+3)/800

test.oj.preds.poly <- predict(OJ.tune.poly$best.model, OJtest)
kable(table (OJtest$Purchase, test.oj.preds.poly))
CH MM
CH 161 4
MM 97 8
tune.test.error.poly <- (97+4)/270

tune.train.error.poly
## [1] 0.3675
tune.test.error.poly
## [1] 0.3740741
  1. Overall, which approach seems to give the best results on this data?

It seems like our tuned linear model performed the best.