5

We have seen that we can fit an SVM with a non-linear kernel in order to perform classification using a non-linear decision boundary.We will now see that we can also obtain a non-linear decision boundary by performing logistic regression using non-linear transformations of the features.

set.seed(10)
x1=runif(500)-.5
x2=runif(500)-.5
y= as.integer(x1^2-x2^2 > 0)
plot(x1[y==0], x2[y==0], xlab='X1', ylab='X2', col='green', pch= '+')
points(x1[y==1], x2[y==1],col='blue', pch=4)

+ The plot shows a non-linear decision boundary
datax = data.frame(x1=x1, x2=x2, y=y)
lm.fit<-glm(y~ ., data= datax,  family=binomial)
summary(lm.fit)
## 
## Call:
## glm(formula = y ~ ., family = binomial, data = datax)
## 
## Deviance Residuals: 
##    Min      1Q  Median      3Q     Max  
## -1.347  -1.152   1.024   1.166   1.297  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)  
## (Intercept)  0.02449    0.08988   0.272   0.7853  
## x1           0.23573    0.31281   0.754   0.4511  
## x2          -0.52372    0.30367  -1.725   0.0846 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 693.12  on 499  degrees of freedom
## Residual deviance: 689.57  on 497  degrees of freedom
## AIC: 695.57
## 
## Number of Fisher Scoring iterations: 3
+ Predictors $X_1$ and $X_2$ are not statistically significant predictors for the y variable at the level of significance $\alpha$=0.05. This coincides with the non-linear boundry represented in the plot.
lm.prob <- predict(lm.fit, newdata = datax, type = 'response')
lm.pred <- ifelse(lm.prob > 0.5, 1, 0)
data.pos<-datax[lm.pred == 1, ]
data.neg<-datax[lm.pred == 0, ]
plot(data.pos$x1, data.pos$x2, col = "green", xlab = "X1", ylab = "X2", pch = "+")
points(data.neg$x1, data.neg$x2, col = "blue", pch = 4)

+ The threshold of 0.5 creates a new linear boundary
lmx.fit<-glm(y~poly(x1,2)+log(x2)+I(x1*x2), data=datax, family='binomial')
## Warning in log(x2): NaNs produced
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
summary(lmx.fit)
## 
## Call:
## glm(formula = y ~ poly(x1, 2) + log(x2) + I(x1 * x2), family = "binomial", 
##     data = datax)
## 
## Deviance Residuals: 
##      Min        1Q    Median        3Q       Max  
## -2.50938  -0.08764  -0.01227   0.08100   1.64313  
## 
## Coefficients:
##              Estimate Std. Error z value Pr(>|z|)    
## (Intercept)   -10.400      1.841  -5.650 1.61e-08 ***
## poly(x1, 2)1   -3.904     20.766  -0.188    0.851    
## poly(x1, 2)2   94.416     16.435   5.745 9.20e-09 ***
## log(x2)        -6.882      1.224  -5.621 1.90e-08 ***
## I(x1 * x2)      4.533      8.865   0.511    0.609    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 351.347  on 253  degrees of freedom
## Residual deviance:  76.659  on 249  degrees of freedom
##   (246 observations deleted due to missingness)
## AIC: 86.659
## 
## Number of Fisher Scoring iterations: 8
+ The new model using non-linear predictors resulted in a few predictors being statistically significant. The predictors that are statistically significant are ${X_1}^2$ and the log of $X_2$.
lmx.prob <- predict(lmx.fit, newdata = datax, type = 'response')
## Warning in log(x2): NaNs produced
lmx.pred <- ifelse(lmx.prob > 0.5, 1, 0)
data.pos = datax[lmx.pred == 1, ]
data.neg = datax[lmx.pred == 0, ]
plot(data.pos$x1, data.pos$x2, col = "blue", xlab = "X1", ylab = "X2", pch = "+")
points(data.neg$x1, data.neg$x2, col = "red", pch = 4)

+ The new model resulted in a non-linear boundary but different the original non-linear boundary.
library(e1071)
## Warning: package 'e1071' was built under R version 3.5.3
svm.fit<-svm(as.factor(y)~ x1+x2, data=datax, kernel='linear', cost=0.1)
svm.pred<-predict(svm.fit, newdata=datax)
data.pos = datax[svm.pred == 1, ]
data.neg = datax[svm.pred == 0, ]
plot(data.pos$x1, data.pos$x2, col = "blue", xlab = "X1", ylab = "X2", pch = "+")
points(data.neg$x1, data.neg$x2, col = "red", pch = 4)

+ The linear kernel creates a small linear boundary with a few values in one of the boundaries but majority of values are classified into one group.
nonsvm.fit<-svm(as.factor(y)~ x1+x2, data=datax, gamma=1)
nonsvm.pred<-predict(nonsvm.fit, newdata=datax)
data.pos = datax[nonsvm.pred == 1, ]
data.neg = datax[nonsvm.pred == 0, ]
plot(data.pos$x1, data.pos$x2, col = "blue", xlab = "X1", ylab = "X2", pch = "+")
points(data.neg$x1, data.neg$x2, col = "red", pch = 4)

+ The predicted labels of the non-linear boundary is similiar to the original decision boundary.

7

In this problem, you will use support vector approaches in order to predict whether a given car gets high or low gas mileage based on the Auto data set.

library(ISLR)
library(e1071)
attach(Auto)
gas.med<-median(mpg)
mpg01<-ifelse(mpg> gas.med,1,0)
Auto$mpglevel<-as.factor(mpg01)
set.seed(100)
tune.auto<-tune(svm, mpglevel ~ ., data=Auto, kernel='linear', ranges = list(cost=c(0.01,.1, 1, 10, 100)))
summary(tune.auto)
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost
##     1
## 
## - best performance: 0.007628205 
## 
## - Detailed performance results:
##    cost       error dispersion
## 1 1e-02 0.074038462 0.03915012
## 2 1e-01 0.043333333 0.02708741
## 3 1e+00 0.007628205 0.01228382
## 4 1e+01 0.020384615 0.02353300
## 5 1e+02 0.033205128 0.02720447
+ The cross validation error is most minimized at cost=1.
set.seed(15)
auto.tune=tune(svm, mpglevel ~ ., data=Auto, kernel='polynomial', ranges = list(cost=c(0.01,.1, 1, 10, 100),degree= c(2,3,4,5)))
summary(auto.tune)
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost degree
##   100      2
## 
## - best performance: 0.2985256 
## 
## - Detailed performance results:
##     cost degree     error dispersion
## 1  1e-02      2 0.5663462 0.04940035
## 2  1e-01      2 0.5663462 0.04940035
## 3  1e+00      2 0.5663462 0.04940035
## 4  1e+01      2 0.5304487 0.09679523
## 5  1e+02      2 0.2985256 0.08809643
## 6  1e-02      3 0.5663462 0.04940035
## 7  1e-01      3 0.5663462 0.04940035
## 8  1e+00      3 0.5663462 0.04940035
## 9  1e+01      3 0.5663462 0.04940035
## 10 1e+02      3 0.3445513 0.11008121
## 11 1e-02      4 0.5663462 0.04940035
## 12 1e-01      4 0.5663462 0.04940035
## 13 1e+00      4 0.5663462 0.04940035
## 14 1e+01      4 0.5663462 0.04940035
## 15 1e+02      4 0.5663462 0.04940035
## 16 1e-02      5 0.5663462 0.04940035
## 17 1e-01      5 0.5663462 0.04940035
## 18 1e+00      5 0.5663462 0.04940035
## 19 1e+01      5 0.5663462 0.04940035
## 20 1e+02      5 0.5663462 0.04940035
auto.tuner=tune(svm, mpglevel ~ ., data=Auto, kernel='radial', ranges = list(cost=c(0.01,.1, 1, 10, 100),gamma= c(0.001,.01,.1,1,10)))
summary(auto.tuner)
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost gamma
##   100  0.01
## 
## - best performance: 0.02288462 
## 
## - Detailed performance results:
##     cost gamma      error dispersion
## 1  1e-02 1e-03 0.55102564 0.04000260
## 2  1e-01 1e-03 0.52814103 0.05965057
## 3  1e+00 1e-03 0.08685897 0.05446434
## 4  1e+01 1e-03 0.07673077 0.05552436
## 5  1e+02 1e-03 0.02564103 0.02417459
## 6  1e-02 1e-02 0.55102564 0.04000260
## 7  1e-01 1e-02 0.08685897 0.05446434
## 8  1e+00 1e-02 0.07673077 0.05552436
## 9  1e+01 1e-02 0.03070513 0.02357884
## 10 1e+02 1e-02 0.02288462 0.01870128
## 11 1e-02 1e-01 0.18615385 0.07412233
## 12 1e-01 1e-01 0.07673077 0.05552436
## 13 1e+00 1e-01 0.05871795 0.03642670
## 14 1e+01 1e-01 0.02551282 0.02417610
## 15 1e+02 1e-01 0.03320513 0.02976888
## 16 1e-02 1e+00 0.55102564 0.04000260
## 17 1e-01 1e+00 0.55102564 0.04000260
## 18 1e+00 1e+00 0.06634615 0.03244101
## 19 1e+01 1e+00 0.06384615 0.03879626
## 20 1e+02 1e+00 0.06384615 0.03879626
## 21 1e-02 1e+01 0.55102564 0.04000260
## 22 1e-01 1e+01 0.55102564 0.04000260
## 23 1e+00 1e+01 0.52294872 0.06031079
## 24 1e+01 1e+01 0.51032051 0.05494774
## 25 1e+02 1e+01 0.51032051 0.05494774
+ The lowest cross-validation error for SVM polynomial based kernel is obtained with cost =100 and degree =2
+ The lowest cross-validation error for SVM radial based kernel is obtained with cost = 100 and gamma=0.01
svm.linear = svm(mpglevel ~ ., data = Auto, kernel = "linear", cost = 1)
svm.poly = svm(mpglevel ~ ., data = Auto, kernel = "polynomial", cost = 100, 
    degree = 2)
svm.radial = svm(mpglevel ~ ., data = Auto, kernel = "radial", cost = 100, gamma = 0.01)
plotpairs = function(fit) {
    for (name in names(Auto)[!(names(Auto) %in% c("mpg", "mpglevel", "name"))]) {
        plot(fit, Auto, as.formula(paste("mpg~", name, sep = "")))
    }
}
plotpairs(svm.linear)

plotpairs(svm.poly)

plotpairs(svm.radial)

detach(Auto)

8

This problem involves the OJ data set which is part of the ISLR package.

library(ISLR)
attach(OJ)
set.seed(53)
train<-sample(dim(OJ)[1], 800)
train.OJ<-OJ[train,]
test.OJ<-OJ[-train,]
library(e1071)
svm.linearOJ<- svm(Purchase ~ ., kernel='linear',  data=train.OJ, cost=0.01)
summary(svm.linearOJ)
## 
## Call:
## svm(formula = Purchase ~ ., data = train.OJ, kernel = "linear", 
##     cost = 0.01)
## 
## 
## Parameters:
##    SVM-Type:  C-classification 
##  SVM-Kernel:  linear 
##        cost:  0.01 
##       gamma:  0.05555556 
## 
## Number of Support Vectors:  444
## 
##  ( 222 222 )
## 
## 
## Number of Classes:  2 
## 
## Levels: 
##  CH MM
+ The support vector classifer creates 444 support vectors out of the 800 observations available in the dataset. Citrus Hill and Minute Maid are evenly split amongst the support vectors with 222 each.
train.pred<-predict(svm.linearOJ, train.OJ)
table(train.OJ$Purchase, train.pred)
##     train.pred
##       CH  MM
##   CH 423  59
##   MM  82 236
mean(train.pred != train.OJ$Purchase)
## [1] 0.17625
+ The training error rate is ~ 17.63%
test.pred<-predict(svm.linearOJ, test.OJ)
table(test.OJ$Purchase, test.pred)
##     test.pred
##       CH  MM
##   CH 155  16
##   MM  21  78
mean(test.pred != test.OJ$Purchase)
## [1] 0.137037
+ The test error rate is ~ 13.7%
tune.OJ<-tune(svm, Purchase ~ ., data=train.OJ, kernel='linear', ranges=list(cost=10^seq(-2,1, by=.5)))
summary(tune.OJ)
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost
##    10
## 
## - best performance: 0.1825 
## 
## - Detailed performance results:
##          cost   error dispersion
## 1  0.01000000 0.18375 0.03120831
## 2  0.03162278 0.18250 0.02443813
## 3  0.10000000 0.19000 0.02266912
## 4  0.31622777 0.18375 0.02766993
## 5  1.00000000 0.18625 0.03251602
## 6  3.16227766 0.18875 0.03197764
## 7 10.00000000 0.18250 0.03129164
+ The tuning selected cost=10 as the most optimal.
newsvm.linearoj<-svm(Purchase ~ ., kernel='linear', data=train.OJ, cost=tune.OJ$best.parameters$cost)
train.pred<-predict(newsvm.linearoj, train.OJ)
table(train.OJ$Purchase, train.pred)
##     train.pred
##       CH  MM
##   CH 425  57
##   MM  78 240
mean(train.pred != train.OJ$Purchase)
## [1] 0.16875
+ The training error has decreased with the use of the optimal cost to ~ 16.9%
test.pred<-predict(newsvm.linearoj, test.OJ)
table(test.OJ$Purchase, test.pred)
##     test.pred
##       CH  MM
##   CH 152  19
##   MM  19  80
mean(test.pred != test.OJ$Purchase)
## [1] 0.1407407
+ Unlike the training error rate, the test error rate increased with the use of the optimal cost to ~ 14.1%.
svm.radialoj<-svm(Purchase~ ., kernel='radial', data=train.OJ )
summary(svm.radialoj)
## 
## Call:
## svm(formula = Purchase ~ ., data = train.OJ, kernel = "radial")
## 
## 
## Parameters:
##    SVM-Type:  C-classification 
##  SVM-Kernel:  radial 
##        cost:  1 
##       gamma:  0.05555556 
## 
## Number of Support Vectors:  383
## 
##  ( 193 190 )
## 
## 
## Number of Classes:  2 
## 
## Levels: 
##  CH MM
+  The radial basis kernel with default gamma creates 383 support vectors out of the 800 observations available in the dataset. Citrus Hill and Minute Maid are split into 193 and 190 vectors respectively.
train.pred<-predict(svm.radialoj, train.OJ)
table(train.OJ$Purchase, train.pred)
##     train.pred
##       CH  MM
##   CH 437  45
##   MM  76 242
mean(train.pred != train.OJ$Purchase)
## [1] 0.15125
+ The training error for radial basis kernel is ~ 15.1%.
test.pred<-predict(svm.radialoj, test.OJ)
table(test.OJ$Purchase, test.pred)
##     test.pred
##       CH  MM
##   CH 152  19
##   MM  17  82
mean(test.pred != test.OJ$Purchase)
## [1] 0.1333333
+ The test error for the radial basis kernel is ~13.3%.
tunerad.OJ<-tune(svm, Purchase ~ ., data=train.OJ, kernel='radial', ranges=list(cost=10^seq(-2,1, by=.5)))
summary(tunerad.OJ)
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##      cost
##  3.162278
## 
## - best performance: 0.185 
## 
## - Detailed performance results:
##          cost   error dispersion
## 1  0.01000000 0.39750 0.04632314
## 2  0.03162278 0.33375 0.07242093
## 3  0.10000000 0.19125 0.05466120
## 4  0.31622777 0.18875 0.04581439
## 5  1.00000000 0.19000 0.03987829
## 6  3.16227766 0.18500 0.03855011
## 7 10.00000000 0.19375 0.03963812
+ The tuning selected cost=3.162278 as the most optimal.
newsvm.radialoj<-svm(Purchase ~ ., kernel='radial', data=train.OJ, cost=tunerad.OJ$best.parameters$cost)
train.pred<-predict(newsvm.radialoj, train.OJ)
table(train.OJ$Purchase, train.pred)
##     train.pred
##       CH  MM
##   CH 437  45
##   MM  77 241
mean(train.pred != train.OJ$Purchase)
## [1] 0.1525
+ The training error for the basis radial kernel with the most optimal cost is ~15.3%; this is a slight increase than the default gamma.
test.pred<-predict(newsvm.radialoj, test.OJ)
table(test.OJ$Purchase, test.pred)
##     test.pred
##       CH  MM
##   CH 153  18
##   MM  22  77
mean(test.pred != test.OJ$Purchase)
## [1] 0.1481481
+ The test error rate for the basis radial kernel with the most optimal cost is ~ 14.8%; this is an increase from the default gamma.
svm.polyoj<-svm(Purchase~ ., kernel='polynomial', data=train.OJ, degree=2 )
summary(svm.polyoj)
## 
## Call:
## svm(formula = Purchase ~ ., data = train.OJ, kernel = "polynomial", 
##     degree = 2)
## 
## 
## Parameters:
##    SVM-Type:  C-classification 
##  SVM-Kernel:  polynomial 
##        cost:  1 
##      degree:  2 
##       gamma:  0.05555556 
##      coef.0:  0 
## 
## Number of Support Vectors:  455
## 
##  ( 230 225 )
## 
## 
## Number of Classes:  2 
## 
## Levels: 
##  CH MM
+ The polynomial kernel with two degrees creae 455 support vectors. 230 vectors belong to Citrus Hill, while 225 vectors belong to Minute Maid.
train.pred<-predict(svm.polyoj, train.OJ)
table(train.OJ$Purchase, train.pred)
##     train.pred
##       CH  MM
##   CH 448  34
##   MM 115 203
mean(train.pred != train.OJ$Purchase)
## [1] 0.18625
+ The training error for the polynomial kernels with two degrees is ~18.6%
test.pred<-predict(svm.polyoj, test.OJ)
table(test.OJ$Purchase, test.pred)
##     test.pred
##       CH  MM
##   CH 158  13
##   MM  32  67
mean(test.pred != test.OJ$Purchase)
## [1] 0.1666667
+ The test error for the polynomial kernels with two degrees is ~16.7%
tunepoly.OJ<-tune(svm, Purchase ~ ., data=train.OJ, kernel='polynomial', degree=2, ranges=list(cost=10^seq(-2,1, by=.5)))
summary(tunepoly.OJ)
## 
## Parameter tuning of 'svm':
## 
## - sampling method: 10-fold cross validation 
## 
## - best parameters:
##  cost
##    10
## 
## - best performance: 0.1825 
## 
## - Detailed performance results:
##          cost   error dispersion
## 1  0.01000000 0.39250 0.05210833
## 2  0.03162278 0.36000 0.05163978
## 3  0.10000000 0.32875 0.05684103
## 4  0.31622777 0.22875 0.04678927
## 5  1.00000000 0.20750 0.04721405
## 6  3.16227766 0.18375 0.05304937
## 7 10.00000000 0.18250 0.03238227
+ The tuning selected cost=10 as the most optimal.
newsvm.polyoj<-svm(Purchase ~ ., kernel='polynomial', data=train.OJ, degree=2, cost=tunerad.OJ$best.parameters$cost)
train.pred<-predict(newsvm.polyoj, train.OJ)
table(train.OJ$Purchase, train.pred)
##     train.pred
##       CH  MM
##   CH 447  35
##   MM  94 224
mean(train.pred != train.OJ$Purchase)
## [1] 0.16125
+ The training error for the polynomial kernel with the most optimal cost is ~16.1%; this is a significant reduction of error compared to the original polynomial kernel.
test.pred<-predict(newsvm.polyoj, test.OJ)
table(test.OJ$Purchase, test.pred)
##     test.pred
##       CH  MM
##   CH 156  15
##   MM  24  75
mean(test.pred != test.OJ$Purchase)
## [1] 0.1444444
detach(OJ)
+ The test error for the polynomial kernel with the most optimal cost is ~14.4%; this is a significant reduction of error compared to the original polynomial kernel.