Question 10

10. This question should be answered using the Weekly data set, which is part of the ISLR package. This data is similar in nature to the Smarket data from this chapter’s lab, except that it contains 1, 089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010.


Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns?

data("Weekly")
attach(Weekly)
summary(Weekly)
##       Year           Lag1               Lag2               Lag3         
##  Min.   :1990   Min.   :-18.1950   Min.   :-18.1950   Min.   :-18.1950  
##  1st Qu.:1995   1st Qu.: -1.1540   1st Qu.: -1.1540   1st Qu.: -1.1580  
##  Median :2000   Median :  0.2410   Median :  0.2410   Median :  0.2410  
##  Mean   :2000   Mean   :  0.1506   Mean   :  0.1511   Mean   :  0.1472  
##  3rd Qu.:2005   3rd Qu.:  1.4050   3rd Qu.:  1.4090   3rd Qu.:  1.4090  
##  Max.   :2010   Max.   : 12.0260   Max.   : 12.0260   Max.   : 12.0260  
##       Lag4               Lag5              Volume            Today         
##  Min.   :-18.1950   Min.   :-18.1950   Min.   :0.08747   Min.   :-18.1950  
##  1st Qu.: -1.1580   1st Qu.: -1.1660   1st Qu.:0.33202   1st Qu.: -1.1540  
##  Median :  0.2380   Median :  0.2340   Median :1.00268   Median :  0.2410  
##  Mean   :  0.1458   Mean   :  0.1399   Mean   :1.57462   Mean   :  0.1499  
##  3rd Qu.:  1.4090   3rd Qu.:  1.4050   3rd Qu.:2.05373   3rd Qu.:  1.4050  
##  Max.   : 12.0260   Max.   : 12.0260   Max.   :9.32821   Max.   : 12.0260  
##  Direction 
##  Down:484  
##  Up  :605  
##            
##            
##            
## 
Weekly %>% keep(is.numeric) %>% 
  gather() %>% head()
##    key value
## 1 Year  1990
## 2 Year  1990
## 3 Year  1990
## 4 Year  1990
## 5 Year  1990
## 6 Year  1990
Weekly %>% keep(is.numeric) %>% 
  gather() %>% ggplot(aes(value)) +
  facet_wrap(~ key, scales = 'free') + geom_histogram(bins = 40)

pairs(Weekly[,-9])

cor(Weekly[,-9])
##               Year         Lag1        Lag2        Lag3         Lag4
## Year    1.00000000 -0.032289274 -0.03339001 -0.03000649 -0.031127923
## Lag1   -0.03228927  1.000000000 -0.07485305  0.05863568 -0.071273876
## Lag2   -0.03339001 -0.074853051  1.00000000 -0.07572091  0.058381535
## Lag3   -0.03000649  0.058635682 -0.07572091  1.00000000 -0.075395865
## Lag4   -0.03112792 -0.071273876  0.05838153 -0.07539587  1.000000000
## Lag5   -0.03051910 -0.008183096 -0.07249948  0.06065717 -0.075675027
## Volume  0.84194162 -0.064951313 -0.08551314 -0.06928771 -0.061074617
## Today  -0.03245989 -0.075031842  0.05916672 -0.07124364 -0.007825873
##                Lag5      Volume        Today
## Year   -0.030519101  0.84194162 -0.032459894
## Lag1   -0.008183096 -0.06495131 -0.075031842
## Lag2   -0.072499482 -0.08551314  0.059166717
## Lag3    0.060657175 -0.06928771 -0.071243639
## Lag4   -0.075675027 -0.06107462 -0.007825873
## Lag5    1.000000000 -0.05851741  0.011012698
## Volume -0.058517414  1.00000000 -0.033077783
## Today   0.011012698 -0.03307778  1.000000000

The summary suggests that all the lag variables are mostly normally distributed. Volume has right skewed distribution with outliers on the right tail.The scatter plot and correlation matrix suggests that there is no correlation among lags but there is trend between Year and Volume. The correlation matrix shows that there is significant correlation of 0.84 between Year and Volume.

(b) Use the full data set to perform a logistic regression with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones?

weekly.glm1 = glm(data = Weekly, formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, family = 'binomial' )
summary(weekly.glm1)
## 
## Call:
## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + 
##     Volume, family = "binomial", data = Weekly)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -1.6949  -1.2565   0.9913   1.0849   1.4579  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)   
## (Intercept)  0.26686    0.08593   3.106   0.0019 **
## Lag1        -0.04127    0.02641  -1.563   0.1181   
## Lag2         0.05844    0.02686   2.175   0.0296 * 
## Lag3        -0.01606    0.02666  -0.602   0.5469   
## Lag4        -0.02779    0.02646  -1.050   0.2937   
## Lag5        -0.01447    0.02638  -0.549   0.5833   
## Volume      -0.02274    0.03690  -0.616   0.5377   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 1496.2  on 1088  degrees of freedom
## Residual deviance: 1486.4  on 1082  degrees of freedom
## AIC: 1500.4
## 
## Number of Fisher Scoring iterations: 4

From the summary of the logistic model, we can observe that only Lag2 is significant with p-value less than 0.05.

(c) Compute the confusion matrix and overall fraction of correct predictions. Explain what the confusion matrix is telling you about the types of mistakes made by logistic regression.

weekly.glm1.preds = predict(weekly.glm1, type = 'response')
head(weekly.glm1.preds)
##         1         2         3         4         5         6 
## 0.6086249 0.6010314 0.5875699 0.4816416 0.6169013 0.5684190
weekly.glm1.class  = ifelse(weekly.glm1.preds > 0.5, 'Up','Down')
table(weekly.glm1.class, Direction)
##                  Direction
## weekly.glm1.class Down  Up
##              Down   54  48
##              Up    430 557
caret::confusionMatrix(as.factor(Direction), as.factor(weekly.glm1.class))
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction Down  Up
##       Down   54 430
##       Up     48 557
##                                          
##                Accuracy : 0.5611         
##                  95% CI : (0.531, 0.5908)
##     No Information Rate : 0.9063         
##     P-Value [Acc > NIR] : 1              
##                                          
##                   Kappa : 0.035          
##                                          
##  Mcnemar's Test P-Value : <2e-16         
##                                          
##             Sensitivity : 0.52941        
##             Specificity : 0.56434        
##          Pos Pred Value : 0.11157        
##          Neg Pred Value : 0.92066        
##              Prevalence : 0.09366        
##          Detection Rate : 0.04959        
##    Detection Prevalence : 0.44444        
##       Balanced Accuracy : 0.54687        
##                                          
##        'Positive' Class : Down           
## 

The overall fraction of correct predictions is (TP+FN)/total predictions that is around 56.11%. The mistakes made by the logistic model are 430 observations are predicted down when they are actually Up and 48 observations are predicted to be Up when they are actually down. This shows that model fails at predicting downward trend of the market, thus POS pred value being very low only 11.1% (Down is considered as positive class).

train = Year >= 1990 & Year <= 2008
Weekly.train = Weekly[train,]
Weekly.test = Weekly[!train,]
(head(Weekly.train))
##   Year   Lag1   Lag2   Lag3   Lag4   Lag5    Volume  Today Direction
## 1 1990  0.816  1.572 -3.936 -0.229 -3.484 0.1549760 -0.270      Down
## 2 1990 -0.270  0.816  1.572 -3.936 -0.229 0.1485740 -2.576      Down
## 3 1990 -2.576 -0.270  0.816  1.572 -3.936 0.1598375  3.514        Up
## 4 1990  3.514 -2.576 -0.270  0.816  1.572 0.1616300  0.712        Up
## 5 1990  0.712  3.514 -2.576 -0.270  0.816 0.1537280  1.178        Up
## 6 1990  1.178  0.712  3.514 -2.576 -0.270 0.1544440 -1.372      Down
(head(Weekly.test))
##     Year   Lag1   Lag2   Lag3   Lag4   Lag5   Volume  Today Direction
## 986 2009  6.760 -1.698  0.926  0.418 -2.251 3.793110 -4.448      Down
## 987 2009 -4.448  6.760 -1.698  0.926  0.418 5.043904 -4.518      Down
## 988 2009 -4.518 -4.448  6.760 -1.698  0.926 5.948758 -2.137      Down
## 989 2009 -2.137 -4.518 -4.448  6.760 -1.698 6.129763 -0.730      Down
## 990 2009 -0.730 -2.137 -4.518 -4.448  6.760 5.602004  5.173        Up
## 991 2009  5.173 -0.730 -2.137 -4.518 -4.448 6.217632 -4.808      Down
weekly.glm2 = glm(data = Weekly.train, formula =  Direction ~ Lag2, family = 'binomial')
summary(weekly.glm2)
## 
## Call:
## glm(formula = Direction ~ Lag2, family = "binomial", data = Weekly.train)
## 
## Deviance Residuals: 
##    Min      1Q  Median      3Q     Max  
## -1.536  -1.264   1.021   1.091   1.368  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)   
## (Intercept)  0.20326    0.06428   3.162  0.00157 **
## Lag2         0.05810    0.02870   2.024  0.04298 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 1354.7  on 984  degrees of freedom
## Residual deviance: 1350.5  on 983  degrees of freedom
## AIC: 1354.5
## 
## Number of Fisher Scoring iterations: 4
weekly.glm2.preds = predict(weekly.glm2, newdata = Weekly.test, type = 'response')
weekly.glm2.class = ifelse(weekly.glm2.preds > 0.5, 'Up','Down')
caret::confusionMatrix(Weekly.test$Direction, as.factor(weekly.glm2.class))
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction Down Up
##       Down    9 34
##       Up      5 56
##                                          
##                Accuracy : 0.625          
##                  95% CI : (0.5247, 0.718)
##     No Information Rate : 0.8654         
##     P-Value [Acc > NIR] : 1              
##                                          
##                   Kappa : 0.1414         
##                                          
##  Mcnemar's Test P-Value : 7.34e-06       
##                                          
##             Sensitivity : 0.64286        
##             Specificity : 0.62222        
##          Pos Pred Value : 0.20930        
##          Neg Pred Value : 0.91803        
##              Prevalence : 0.13462        
##          Detection Rate : 0.08654        
##    Detection Prevalence : 0.41346        
##       Balanced Accuracy : 0.63254        
##                                          
##        'Positive' Class : Down           
## 

The overall fraction of correct predictions can be given by (TP+TN)/Total Observations i.e, 62.5%. We can observe that the accuracy has been increased from the previous model and it does a better but not so good job in predicting the positive class(Down) around 20.9%.

(e) Repeat (d) using LDA.

weekly.lda = lda(data = Weekly.train, Direction ~ Lag2)
weekly.lda
## Call:
## lda(Direction ~ Lag2, data = Weekly.train)
## 
## Prior probabilities of groups:
##      Down        Up 
## 0.4477157 0.5522843 
## 
## Group means:
##             Lag2
## Down -0.03568254
## Up    0.26036581
## 
## Coefficients of linear discriminants:
##            LD1
## Lag2 0.4414162

We observe that 44.7% of the training data corresponds to the days where the market went down and 55.2% to the days where market went up.The negative sign on the lag 2 coefficient suggests that markets being negative on previous 2nd day suggest a downward trend and it being positive suggests an upward trend.

weekly.lda.preds = predict(weekly.lda, Weekly.test)
weekly.lda.class = weekly.lda.preds$class
mean(Weekly.test$Direction == weekly.lda.class)
## [1] 0.625
caret::confusionMatrix(Weekly.test$Direction , as.factor(weekly.lda.class))
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction Down Up
##       Down    9 34
##       Up      5 56
##                                          
##                Accuracy : 0.625          
##                  95% CI : (0.5247, 0.718)
##     No Information Rate : 0.8654         
##     P-Value [Acc > NIR] : 1              
##                                          
##                   Kappa : 0.1414         
##                                          
##  Mcnemar's Test P-Value : 7.34e-06       
##                                          
##             Sensitivity : 0.64286        
##             Specificity : 0.62222        
##          Pos Pred Value : 0.20930        
##          Neg Pred Value : 0.91803        
##              Prevalence : 0.13462        
##          Detection Rate : 0.08654        
##    Detection Prevalence : 0.41346        
##       Balanced Accuracy : 0.63254        
##                                          
##        'Positive' Class : Down           
## 

(f) Repeat (d) using QDA.

weekly.qda = qda(Direction ~ Lag2, Weekly.train)
weekly.qda
## Call:
## qda(Direction ~ Lag2, data = Weekly.train)
## 
## Prior probabilities of groups:
##      Down        Up 
## 0.4477157 0.5522843 
## 
## Group means:
##             Lag2
## Down -0.03568254
## Up    0.26036581
qda.class = predict(weekly.qda, newdata = Weekly.test)$class
mean(Weekly.test$Direction == qda.class)
## [1] 0.5865385
caret::confusionMatrix(Weekly.test$Direction, as.factor(qda.class))
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction Down Up
##       Down    0 43
##       Up      0 61
##                                           
##                Accuracy : 0.5865          
##                  95% CI : (0.4858, 0.6823)
##     No Information Rate : 1               
##     P-Value [Acc > NIR] : 1               
##                                           
##                   Kappa : 0               
##                                           
##  Mcnemar's Test P-Value : 1.504e-10       
##                                           
##             Sensitivity :     NA          
##             Specificity : 0.5865          
##          Pos Pred Value :     NA          
##          Neg Pred Value :     NA          
##              Prevalence : 0.0000          
##          Detection Rate : 0.0000          
##    Detection Prevalence : 0.4135          
##       Balanced Accuracy :     NA          
##                                           
##        'Positive' Class : Down            
## 

(g) Repeat (d) using KNN with K = 1.

train.X = cbind(Weekly[train, 3])
test.X = cbind(Weekly[!train, 3])
train.Direction = Weekly[train, 9]
test.Direction = Weekly[!train, 9]
knn.pred = knn(train.X,test.X,train.Direction, k = 1)
table(test.Direction, knn.pred)
##               knn.pred
## test.Direction Down Up
##           Down   21 22
##           Up     29 32
mean(test.Direction == knn.pred)
## [1] 0.5096154

h) Which of these methods appears to provide the best results on this data?

We can observe that highest accuracy among all different models tried belong to logistic regression and LDA which is about 62.5%.QDA has the next highest accuracy with 58.65% followed by KNN with 51%. We can say that logistic regression and LDA can be best suited for modeling direction in this Weekly dataset.

(i) Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held out data. Note that you should also experiment with values for K in the KNN classifier.

#Logistic regression with Lag2:Lag1
weekly.glm3 = glm(Direction ~ Lag2:Lag1, data = Weekly.train, family = "binomial")
summary(weekly.glm3)
## 
## Call:
## glm(formula = Direction ~ Lag2:Lag1, family = "binomial", data = Weekly.train)
## 
## Deviance Residuals: 
##    Min      1Q  Median      3Q     Max  
## -1.368  -1.269   1.077   1.089   1.353  
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  0.21333    0.06421   3.322 0.000893 ***
## Lag2:Lag1    0.00717    0.00697   1.029 0.303649    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 1354.7  on 984  degrees of freedom
## Residual deviance: 1353.6  on 983  degrees of freedom
## AIC: 1357.6
## 
## Number of Fisher Scoring iterations: 4
weekly.glm3.preds = predict(weekly.glm3, Weekly.test, type = "response")
weekly.glm3.class  = ifelse(weekly.glm3.preds > 0.5,"Up","Down")
table(weekly.glm3.class, Weekly.test$Direction)
##                  
## weekly.glm3.class Down Up
##              Down    1  1
##              Up     42 60
mean(weekly.glm3.class == Weekly.test$Direction)
## [1] 0.5865385
#lda with main and intercation effects
weekly.lda2 = lda(Direction ~ Lag1*Lag2, data = Weekly.train)
weekly.lda2.class = predict(weekly.lda2, Weekly.test)$class
mean(weekly.lda2.class == Weekly.test$Direction)
## [1] 0.5769231
weekly.qda2 <- qda(Direction ~ Lag2 + sqrt(abs(Lag2)), data = Weekly.train)
weekly.qda2.class <- predict(weekly.qda2, Weekly.test)$class
table(weekly.qda2.class, Weekly.test$Direction)
##                  
## weekly.qda2.class Down Up
##              Down   12 13
##              Up     31 48
mean(weekly.qda2.class == Weekly.test$Direction)
## [1] 0.5769231
knn.pred2 = knn(train.X,test.X,train.Direction, k = 5)
table(test.Direction, knn.pred2)
##               knn.pred2
## test.Direction Down Up
##           Down   15 28
##           Up     20 41
mean(test.Direction == knn.pred2)
## [1] 0.5384615
knn.pred3 = knn(train.X,test.X,train.Direction, k = 10)
table(test.Direction, knn.pred3)
##               knn.pred3
## test.Direction Down Up
##           Down   17 26
##           Up     18 43
mean(test.Direction == knn.pred3)
## [1] 0.5769231
knn.pred4 = knn(train.X,test.X,train.Direction, k = 20)
table(test.Direction, knn.pred4)
##               knn.pred4
## test.Direction Down Up
##           Down   21 22
##           Up     21 40
mean(test.Direction == knn.pred4)
## [1] 0.5865385

Of all the different models tried, the original logistic regression model and LDA with out any transformation performed best on the data with an accuracy of 62.5%. Among the different K values for KNN, K = 10 seems to be have the highest accuracy of 61.53% which is close to logistic regression and LDA models.

Question 11

In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set.

(a) Create a binary variable, mpg01, that contains a 1 if mpg contains a value above its median, and a 0 if mpg contains a value below its median. You can compute the median using the median() function. Note you may find it helpful to use the data.frame() function to create a single data set containing both mpg01 and the other Auto variables.

detach(Weekly)
attach(Auto)
## The following object is masked from package:ggplot2:
## 
##     mpg
mpg01 = ifelse(Auto$mpg > median(Auto$mpg), 1, 0)
str(Auto)
## 'data.frame':    392 obs. of  9 variables:
##  $ mpg         : num  18 15 18 16 17 15 14 14 14 15 ...
##  $ cylinders   : num  8 8 8 8 8 8 8 8 8 8 ...
##  $ displacement: num  307 350 318 304 302 429 454 440 455 390 ...
##  $ horsepower  : num  130 165 150 150 140 198 220 215 225 190 ...
##  $ weight      : num  3504 3693 3436 3433 3449 ...
##  $ acceleration: num  12 11.5 11 12 10.5 10 9 8.5 10 8.5 ...
##  $ year        : num  70 70 70 70 70 70 70 70 70 70 ...
##  $ origin      : num  1 1 1 1 1 1 1 1 1 1 ...
##  $ name        : Factor w/ 304 levels "amc ambassador brougham",..: 49 36 231 14 161 141 54 223 241 2 ...
Auto = data.frame(mpg01, Auto[, -1])
attach(Auto)
## The following object is masked _by_ .GlobalEnv:
## 
##     mpg01
## The following objects are masked from Auto (pos = 3):
## 
##     acceleration, cylinders, displacement, horsepower, name, origin,
##     weight, year
head(Auto)
##   mpg01 cylinders displacement horsepower weight acceleration year origin
## 1     0         8          307        130   3504         12.0   70      1
## 2     0         8          350        165   3693         11.5   70      1
## 3     0         8          318        150   3436         11.0   70      1
## 4     0         8          304        150   3433         12.0   70      1
## 5     0         8          302        140   3449         10.5   70      1
## 6     0         8          429        198   4341         10.0   70      1
##                        name
## 1 chevrolet chevelle malibu
## 2         buick skylark 320
## 3        plymouth satellite
## 4             amc rebel sst
## 5               ford torino
## 6          ford galaxie 500
pairs(Auto)

We can observe that displacement, acceleration, horsepower and weight have relation with mpg, and we can further explore these variables

boxplot(cylinders ~ mpg01, main = "Cylinders vs mpg01")

boxplot(horsepower ~ mpg01, main = "Horsepower vs mpg01")

boxplot(weight ~ mpg01, main = "weight vs mpg01")

boxplot(displacement ~ mpg01, main = "displacement vs mpg01")

boxplot(acceleration ~ mpg01, main = "accelaration vs mpg01")

From the above graphs following can be summarized: * cars with cylinders 6 and above have low mpg than median where as 4 cylinder cars have higher value of mpg. * cars with higher horsepower and weight have lower mpg than median value * cars with higher acceleration have higher mpg than median value.

(c) Split the data into a training set and a test set.

set.seed(468)
index = sample(nrow(Auto), 0.8*nrow(Auto), replace = F)
Auto.train = Auto[index,]
Auto.test = Auto[-index,]

(d) Perform LDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained?

auto.lda = lda(mpg01 ~ cylinders + acceleration + weight+ displacement+ horsepower, data = Auto.train)
auto.lda
## Call:
## lda(mpg01 ~ cylinders + acceleration + weight + displacement + 
##     horsepower, data = Auto.train)
## 
## Prior probabilities of groups:
##         0         1 
## 0.4920128 0.5079872 
## 
## Group means:
##   cylinders acceleration   weight displacement horsepower
## 0  6.818182     14.39610 3627.000     276.5584  130.58442
## 1  4.194969     16.47044 2340.956     115.9906   79.14465
## 
## Coefficients of linear discriminants:
##                        LD1
## cylinders    -0.3519850997
## acceleration  0.0098004725
## weight       -0.0009647283
## displacement -0.0035904724
## horsepower    0.0050694272
auto.lda.class = predict(auto.lda, Auto.test)$class
table(Auto.test$mpg01,auto.lda.class)
##    auto.lda.class
##      0  1
##   0 36  6
##   1  2 35
mean(Auto.test$mpg01 == auto.lda.class)
## [1] 0.8987342

The fraction of correct observations as predicted by the model (36+31)/79 which is 89.8%. The test error of the model is given by (6+1)/79 which is around 10.1%

(e) Perform QDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained?

auto.qda = qda(mpg01 ~ cylinders + acceleration + weight+ displacement+ horsepower, data = Auto.train)
auto.qda
## Call:
## qda(mpg01 ~ cylinders + acceleration + weight + displacement + 
##     horsepower, data = Auto.train)
## 
## Prior probabilities of groups:
##         0         1 
## 0.4920128 0.5079872 
## 
## Group means:
##   cylinders acceleration   weight displacement horsepower
## 0  6.818182     14.39610 3627.000     276.5584  130.58442
## 1  4.194969     16.47044 2340.956     115.9906   79.14465
auto.qda.class = predict(auto.qda, Auto.test)$class
table(Auto.test$mpg01, auto.qda.class)
##    auto.qda.class
##      0  1
##   0 37  5
##   1  3 34
mean(Auto.test$mpg01 != auto.qda.class)
## [1] 0.1012658

The error for QDA is similar to that of LDA and is around 10.1%

(f) Perform logistic regression on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained?

auto.glm = glm(mpg01 ~ cylinders + acceleration + weight+ displacement+ horsepower, data = Auto.train, family = "binomial")
summary(auto.glm)
## 
## Call:
## glm(formula = mpg01 ~ cylinders + acceleration + weight + displacement + 
##     horsepower, family = "binomial", data = Auto.train)
## 
## Deviance Residuals: 
##     Min       1Q   Median       3Q      Max  
## -2.4416  -0.1492   0.1211   0.3497   3.3033  
## 
## Coefficients:
##                Estimate Std. Error z value Pr(>|z|)    
## (Intercept)  10.8076250  2.8748375   3.759  0.00017 ***
## cylinders     0.2170296  0.3724232   0.583  0.56006    
## acceleration  0.0142867  0.1331921   0.107  0.91458    
## weight       -0.0021721  0.0009849  -2.205  0.02742 *  
## displacement -0.0160832  0.0090490  -1.777  0.07551 .  
## horsepower   -0.0331511  0.0217150  -1.527  0.12685    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 433.83  on 312  degrees of freedom
## Residual deviance: 166.31  on 307  degrees of freedom
## AIC: 178.31
## 
## Number of Fisher Scoring iterations: 7
auto.glm.preds = predict(auto.glm, Auto.test, type = "response")
auto.glm.class = ifelse(auto.glm.preds > 0.5, 1, 0)
table(Auto.test$mpg01, auto.glm.class)
##    auto.glm.class
##      0  1
##   0 37  5
##   1  2 35
mean(Auto.test$mpg01 != auto.glm.class)
## [1] 0.08860759

The error rate for the logistic regression model is lowest till now with 8.86%

(g) Perform KNN on the training data, with several values of K, in order to predict mpg01. Use only the variables that seemed most associated with mpg01 in (b). What test errors do you obtain? Which value of K seems to perform the best on this data set?

Auto.train.X = Auto.train[,c(2:6)]
Auto.test.X = Auto.test[,c(2:6)]

train.mpg01 = Auto.train[,c(1)]
test.mpg01 = Auto.test[,c(1)]

auto.knn = knn(Auto.train.X, Auto.test.X,train.mpg01, k=1)
table(test.mpg01, auto.knn)
##           auto.knn
## test.mpg01  0  1
##          0 37  5
##          1  4 33
mean(test.mpg01 != auto.knn)
## [1] 0.1139241
auto.knn = knn(Auto.train.X, Auto.test.X,train.mpg01, k=3)
table(test.mpg01, auto.knn)
##           auto.knn
## test.mpg01  0  1
##          0 37  5
##          1  3 34
mean(test.mpg01 != auto.knn)
## [1] 0.1012658
auto.knn = knn(Auto.train.X, Auto.test.X,train.mpg01, k=5)
table(test.mpg01, auto.knn)
##           auto.knn
## test.mpg01  0  1
##          0 36  6
##          1  4 33
mean(test.mpg01 != auto.knn)
## [1] 0.1265823
auto.knn = knn(Auto.train.X, Auto.test.X,train.mpg01, k=10)
table(test.mpg01, auto.knn)
##           auto.knn
## test.mpg01  0  1
##          0 34  8
##          1  5 32
mean(test.mpg01 != auto.knn)
## [1] 0.164557

The value of k=3, seems to be best perform on the data set.

Question 13

13. Using the Boston data set, fit classification models in order to predict whether a given suburb has a crime rate above or below the median. Explore logistic regression, LDA, and KNN models using various subsets of the predictors. Describe your findings.

detach(Auto)
attach(Boston)
Boston$crim01 = ifelse(crim >= median(crim),1,0)

set.seed(456)
index2 = sample(nrow(Boston), 0.8*nrow(Boston), replace  = F)
boston.train = Boston[index,]
boston.test = Boston[-index,]

Logistic Model

boston.glm = glm(crim01 ~ .-crim, data = boston.train, family = 'binomial')
summary(boston.glm)
## 
## Call:
## glm(formula = crim01 ~ . - crim, family = "binomial", data = boston.train)
## 
## Deviance Residuals: 
##      Min        1Q    Median        3Q       Max  
## -2.30597  -0.07568  -0.00028   0.00009   2.31998  
## 
## Coefficients:
##               Estimate Std. Error z value Pr(>|z|)    
## (Intercept) -48.702193   9.795993  -4.972 6.64e-07 ***
## zn           -0.207673   0.094931  -2.188  0.02870 *  
## indus        -0.174343   0.079564  -2.191  0.02844 *  
## chas          1.858004   1.111015   1.672  0.09446 .  
## nox          71.758745  13.854743   5.179 2.23e-07 ***
## rm           -0.698593   1.012441  -0.690  0.49019    
## age           0.023278   0.016981   1.371  0.17042    
## dis           0.688442   0.319103   2.157  0.03097 *  
## rad           0.753532   0.268985   2.801  0.00509 ** 
## tax          -0.006325   0.005121  -1.235  0.21680    
## ptratio       0.407715   0.184206   2.213  0.02687 *  
## black        -0.008153   0.005485  -1.486  0.13717    
## lstat         0.120701   0.074802   1.614  0.10661    
## medv          0.204033   0.095391   2.139  0.03244 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 416.73  on 312  degrees of freedom
## Residual deviance: 121.90  on 299  degrees of freedom
## AIC: 149.9
## 
## Number of Fisher Scoring iterations: 10
boston.glm.preds = predict(boston.glm, boston.test, type = 'response')
boston.glm.class = ifelse(boston.glm.preds > 0.5, 1, 0)
table(boston.test$crim01, boston.glm.class)
##    boston.glm.class
##       0   1
##   0  52   8
##   1   9 124
mean(boston.test$crim01 == boston.glm.class)
## [1] 0.9119171

The accuracy of logistic model is pretty high of about 91.1% with zn, indus, nox, dis, rad , ptratio, medv being the significant predictors.

LDA Model

boston.lda = lda(crim01~.-crim, data = boston.train)
boston.lda
## Call:
## lda(crim01 ~ . - crim, data = boston.train)
## 
## Prior probabilities of groups:
##         0         1 
## 0.6166134 0.3833866 
## 
## Group means:
##          zn     indus       chas       nox       rm      age      dis      rad
## 0 23.453368  6.229637 0.04663212 0.4635269 6.443238 49.49948 5.307502 4.176166
## 1  1.666667 13.870500 0.15000000 0.6372333 6.208417 86.67250 2.567443 9.608333
##        tax  ptratio    black     lstat     medv
## 0 294.3575 17.71917 388.3409  9.061347 25.66321
## 1 421.9417 18.14417 360.8849 14.586583 23.11750
## 
## Coefficients of linear discriminants:
##                   LD1
## zn      -0.0006369673
## indus    0.0022710731
## chas     0.0279588563
## nox      8.5388571418
## rm       0.3103443583
## age      0.0138233489
## dis     -0.0175835072
## rad      0.0065367644
## tax      0.0011917268
## ptratio  0.1100456706
## black   -0.0033193141
## lstat    0.0332766630
## medv     0.0302342668
boston.lda.class = predict(boston.lda, boston.test)$class
table(boston.test$crim01, boston.lda.class)
##    boston.lda.class
##       0   1
##   0  44  16
##   1  17 116
mean(boston.test$crim01 == boston.lda.class)
## [1] 0.8290155

LDA model has yielded accuracy of 82% which is less than that of the logistic model.

KNN Classification

boston.train.X = cbind(zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat, medv)[index2,]
boston.test.X <- cbind(zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat, medv)[-index2,]

train.crim01 <- Boston[index2,c(15)]
test.crim01 <- Boston[-index2,c(15)]

boston.knn=knn(boston.train.X,boston.test.X,train.crim01,k=1)
table(boston.knn,test.crim01)
##           test.crim01
## boston.knn  0  1
##          0 41  4
##          1  5 52
mean(boston.knn == test.crim01)
## [1] 0.9117647
boston.knn=knn(boston.train.X,boston.test.X,train.crim01,k=5)
table(boston.knn,test.crim01)
##           test.crim01
## boston.knn  0  1
##          0 43  4
##          1  3 52
mean(boston.knn == test.crim01)
## [1] 0.9313725
boston.knn=knn(boston.train.X,boston.test.X,train.crim01,k=10)
table(boston.knn,test.crim01)
##           test.crim01
## boston.knn  0  1
##          0 43  9
##          1  3 47
mean(boston.knn == test.crim01)
## [1] 0.8823529
boston.knn=knn(boston.train.X,boston.test.X,train.crim01,k=20)
table(boston.knn,test.crim01)
##           test.crim01
## boston.knn  0  1
##          0 42  8
##          1  4 48
mean(boston.knn == test.crim01)
## [1] 0.8823529

KNN classification model with k = 5 has the highest accuracy of 93.13% among the different values of K.