#13. This question should be answered using the Weekly data set, which is part of the ISLR2 package. This data is similar in nature to the Smarket data from this chapter’s lab, except that it contains 1, 089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010. ##(a) Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns?

library("ISLR")
## Warning: package 'ISLR' was built under R version 4.4.2
library(MASS)
library(class)
library(corrplot)
## Warning: package 'corrplot' was built under R version 4.4.2
## corrplot 0.95 loaded
attach(Auto)
?Weekly
## starting httpd help server ...
##  done
weekly = Weekly
summary(weekly)
##       Year           Lag1               Lag2               Lag3         
##  Min.   :1990   Min.   :-18.1950   Min.   :-18.1950   Min.   :-18.1950  
##  1st Qu.:1995   1st Qu.: -1.1540   1st Qu.: -1.1540   1st Qu.: -1.1580  
##  Median :2000   Median :  0.2410   Median :  0.2410   Median :  0.2410  
##  Mean   :2000   Mean   :  0.1506   Mean   :  0.1511   Mean   :  0.1472  
##  3rd Qu.:2005   3rd Qu.:  1.4050   3rd Qu.:  1.4090   3rd Qu.:  1.4090  
##  Max.   :2010   Max.   : 12.0260   Max.   : 12.0260   Max.   : 12.0260  
##       Lag4               Lag5              Volume            Today         
##  Min.   :-18.1950   Min.   :-18.1950   Min.   :0.08747   Min.   :-18.1950  
##  1st Qu.: -1.1580   1st Qu.: -1.1660   1st Qu.:0.33202   1st Qu.: -1.1540  
##  Median :  0.2380   Median :  0.2340   Median :1.00268   Median :  0.2410  
##  Mean   :  0.1458   Mean   :  0.1399   Mean   :1.57462   Mean   :  0.1499  
##  3rd Qu.:  1.4090   3rd Qu.:  1.4050   3rd Qu.:2.05373   3rd Qu.:  1.4050  
##  Max.   : 12.0260   Max.   : 12.0260   Max.   :9.32821   Max.   : 12.0260  
##  Direction 
##  Down:484  
##  Up  :605  
##            
##            
##            
## 
pairs(weekly)

corrplot(cor(weekly[,-9]), method="number")

It seems that volume and year show a correlation.

##(b) Use the full data set to perform a logistic regression with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones?

weekly.fit =glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=weekly, family = binomial)
summary(weekly.fit)
## 
## Call:
## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + 
##     Volume, family = binomial, data = weekly)
## 
## Coefficients:
##             Estimate Std. Error z value Pr(>|z|)   
## (Intercept)  0.26686    0.08593   3.106   0.0019 **
## Lag1        -0.04127    0.02641  -1.563   0.1181   
## Lag2         0.05844    0.02686   2.175   0.0296 * 
## Lag3        -0.01606    0.02666  -0.602   0.5469   
## Lag4        -0.02779    0.02646  -1.050   0.2937   
## Lag5        -0.01447    0.02638  -0.549   0.5833   
## Volume      -0.02274    0.03690  -0.616   0.5377   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 1496.2  on 1088  degrees of freedom
## Residual deviance: 1486.4  on 1082  degrees of freedom
## AIC: 1500.4
## 
## Number of Fisher Scoring iterations: 4

Lag2 is the only significant variable .0296 at the significance level of .05

##(c) Compute the confusion matrix and overall fraction of correct predictions. Explain what the confusion matrix is telling you about the types of mistakes made by logistic regression.

weekly.prob =predict(weekly.fit, type="response")
weekly.prob[1:10]
##         1         2         3         4         5         6         7         8 
## 0.6086249 0.6010314 0.5875699 0.4816416 0.6169013 0.5684190 0.5786097 0.5151972 
##         9        10 
## 0.5715200 0.5554287
weekly.pred =rep("Down", length(weekly.prob))
weekly.pred[weekly.prob > 0.5] = "Up"
table(weekly.pred, weekly$Direction)
##            
## weekly.pred Down  Up
##        Down   54  48
##        Up    430 557
x = mean(weekly.pred==weekly$Direction)
round(x, digits = 4)
## [1] 0.5611

This model on predicts weekly trends of the stock market data correctly up to 56% of the time.

##(d) Now fit the logistic regression model using a training data period from 1990 to 2008, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held-out data (that is, the data from 2009 and 2010).

train = (weekly$Year < 2009)
weekly.0910 <- weekly[!train,]
weekly.fit<-glm( Direction~Lag2, data = weekly, family = binomial, subset = train)
weekly.prob = predict(weekly.fit, weekly.0910, type = "response")
weekly.pred = rep("Down", length(weekly.prob))
weekly.pred[weekly.prob > 0.5] = "Up"
Direction.0910 = weekly$Direction[!train]
table(weekly.pred,Direction.0910)
##            Direction.0910
## weekly.pred Down Up
##        Down    9  5
##        Up     34 56
x = mean(weekly.pred==Direction.0910)
round(x, digits = 4)
## [1] 0.625

Our new model on predicts weekly trends of the stock market data correctly up to 62.5% of the time.

##(e) Repeat (d) using LDA.

weekly.fit<-lda( Direction~Lag2, data = weekly, family = binomial, subset = train)
weekly.pred = predict(weekly.fit, weekly.0910)$class
table(weekly.pred, Direction.0910)
##            Direction.0910
## weekly.pred Down Up
##        Down    9  5
##        Up     34 56
cat("\n")
mean(weekly.pred == Direction.0910)
## [1] 0.625

##(f) Repeat (d) using QDA.

weekly.fit<-qda( Direction~Lag2, data = weekly, subset = train)
weekly.pred = predict(weekly.fit, weekly.0910)$class
table(weekly.pred, Direction.0910)
##            Direction.0910
## weekly.pred Down Up
##        Down    0  0
##        Up     43 61
mean(weekly.pred == Direction.0910)
## [1] 0.5865385

##(g) Repeat (d) using KNN with K = 1.

train.X = as.matrix(weekly$Lag2[train])
test.X = as.matrix(weekly$Lag2[!train])
train.Direction = weekly$Direction[train]

set.seed(1)
knn.pred = knn(train.X, test.X, train.Direction, k = 1)

table(knn.pred, Direction.0910)
##         Direction.0910
## knn.pred Down Up
##     Down   21 30
##     Up     22 31
mean(knn.pred == Direction.0910)
## [1] 0.5

##(h) Repeat (d) using naive Bayes. ##(i) Which of these methods appears to provide the best results on this data? GLM and LDA with training data. If we are only considering overall prediction accuracy, it appears that logistic regression and linear discriminant analysis were equally good as the models that performed the best on this data. Quadratic discriminant analysis came in third place, and k -nearest neighbors with k=1 fourth

##(j) Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held-out data. Note that you should also experiment with values for K in the KNN classifier.

weekly.fit<-glm( Direction~Lag2:Lag1+Lag2, data = weekly, family = binomial, subset = train)
weekly.prob = predict(weekly.fit, weekly.0910, type = "response")
weekly.pred = rep("Down", length(weekly.prob))
weekly.pred[weekly.prob > 0.5] = "Up"
Direction.0910 = weekly$Direction[!train]
table(weekly.pred,Direction.0910)
##            Direction.0910
## weekly.pred Down Up
##        Down    3  3
##        Up     40 58
mean(weekly.pred == Direction.0910)
## [1] 0.5865385
weekly.fit<-lda( Direction~Lag2:Lag1+Lag2, data = weekly, family = binomial, subset = train)
weekly.pred = predict(weekly.fit, weekly.0910)$class
table(weekly.pred, Direction.0910)
##            Direction.0910
## weekly.pred Down Up
##        Down    3  3
##        Up     40 58
mean(weekly.pred == Direction.0910)
## [1] 0.5865385

The GLM and LDA training are still the best models. #################################################################

#14. In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set. ##(a) Create a binary variable, mpg01, that contains a 1 if mpg contains a value above its median, and a 0 if mpg contains a value below its median. You can compute the median using the median() function. Note you may find it helpful to use the data.frame() function to create a single data set containing both mpg01 and the other Auto variables.

mpg01 <- rep(0, length(mpg))
mpg01[mpg > median(mpg)] <- 1
Auto = data.frame(Auto, mpg01)

##(b) Explore the data graphically in order to investigate the association between mpg01 and the other features. Which of the other features seem most likely to be useful in predicting mpg01? Scatterplots and boxplots may be useful tools to answer this question. Describe your findings.

corrplot(cor(Auto[,-9]), method="number")

pairs(Auto)

Cylinders, displacement, horespower, and weight can possibly be good predictors of mpg.

##(c) Split the data into a training set and a test set.

train = (year %% 2 == 0) 
test = !train
Auto.train = Auto[train,]
Auto.test = Auto[test,]
mpg01.test = mpg01[test]

##(d) Perform LDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained?

Auto.fit = lda(mpg01~ cylinders+displacement+horsepower+weight, data=Auto, subset=train)
Auto.pred = predict(Auto.fit, Auto.test)
mean(Auto.pred$class != mpg01.test)
## [1] 0.1263736

12.6% error rate on the model above.

##(e) Perform QDA on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained?

Auto.fit = qda(mpg01~ cylinders+displacement+horsepower+weight, data=Auto, subset=train)
Auto.pred = predict(Auto.fit, Auto.test)
mean(Auto.pred$class != mpg01.test)
## [1] 0.1318681

13.2% error rated on the model above.

##(f) Perform logistic regression on the training data in order to predict mpg01 using the variables that seemed most associated with mpg01 in (b). What is the test error of the model obtained?

Auto.fit = glm(mpg01~ cylinders+displacement+horsepower+weight, data=Auto, subset=train)
Auto.prob = predict(Auto.fit, Auto.test, type ="response")
Auto.pred = rep(0, length(Auto.prob))
Auto.pred[Auto.prob > 0.5]= 1
mean(Auto.pred != mpg01.test)
## [1] 0.1263736

12.6% error rate on the model above.

##(h) Perform KNN on the training data, with several values of K, in order to predict mpg01. Use only the variables that seemed most associated with mpg01 in (b). What test errors do you obtain? Which value of K seems to perform the best on this data set?

train.X = cbind(cylinders, displacement, horsepower,weight)[train,]
test.X = cbind(cylinders, displacement, horsepower,weight)[test,]
train.mpg01 = mpg01[train]
set.seed(1)
knn.pred = knn(train.X, test.X, train.mpg01, k=1)
mean(knn.pred != mpg01.test)
## [1] 0.1538462
train.X = cbind(cylinders, displacement, horsepower,weight)[train,]
test.X = cbind(cylinders, displacement, horsepower,weight)[test,]
train.mpg01 = mpg01[train]
set.seed(1)
knn.pred = knn(train.X, test.X, train.mpg01, k=10)
mean(knn.pred != mpg01.test)
## [1] 0.1538462
train.X = cbind(cylinders, displacement, horsepower,weight)[train,]
test.X = cbind(cylinders, displacement, horsepower,weight)[test,]
train.mpg01 = mpg01[train]
set.seed(1)
knn.pred = knn(train.X, test.X, train.mpg01, k=100)
mean(knn.pred != mpg01.test)
## [1] 0.1428571

K 100 seems to have the lowest error rate.

#16. Using the Boston data set, fit classification models in order to predict whether a given census tract has a crime rate above or below the median. Explore logistic regression, LDA, naive Bayes, and KNN models using various subsets of the predictors. Describe your findings. Hint: You will have to create the response variable yourself, using the variables that are contained in the Boston data set.

attach(Boston)
crime01 = rep(0, length(crim))
crime01[crim > median(crim)] = 1
Boston = data.frame(Boston, crime01)
summary(Boston)
##       crim                zn             indus            chas        
##  Min.   : 0.00632   Min.   :  0.00   Min.   : 0.46   Min.   :0.00000  
##  1st Qu.: 0.08205   1st Qu.:  0.00   1st Qu.: 5.19   1st Qu.:0.00000  
##  Median : 0.25651   Median :  0.00   Median : 9.69   Median :0.00000  
##  Mean   : 3.61352   Mean   : 11.36   Mean   :11.14   Mean   :0.06917  
##  3rd Qu.: 3.67708   3rd Qu.: 12.50   3rd Qu.:18.10   3rd Qu.:0.00000  
##  Max.   :88.97620   Max.   :100.00   Max.   :27.74   Max.   :1.00000  
##       nox               rm             age              dis        
##  Min.   :0.3850   Min.   :3.561   Min.   :  2.90   Min.   : 1.130  
##  1st Qu.:0.4490   1st Qu.:5.886   1st Qu.: 45.02   1st Qu.: 2.100  
##  Median :0.5380   Median :6.208   Median : 77.50   Median : 3.207  
##  Mean   :0.5547   Mean   :6.285   Mean   : 68.57   Mean   : 3.795  
##  3rd Qu.:0.6240   3rd Qu.:6.623   3rd Qu.: 94.08   3rd Qu.: 5.188  
##  Max.   :0.8710   Max.   :8.780   Max.   :100.00   Max.   :12.127  
##       rad              tax           ptratio          black       
##  Min.   : 1.000   Min.   :187.0   Min.   :12.60   Min.   :  0.32  
##  1st Qu.: 4.000   1st Qu.:279.0   1st Qu.:17.40   1st Qu.:375.38  
##  Median : 5.000   Median :330.0   Median :19.05   Median :391.44  
##  Mean   : 9.549   Mean   :408.2   Mean   :18.46   Mean   :356.67  
##  3rd Qu.:24.000   3rd Qu.:666.0   3rd Qu.:20.20   3rd Qu.:396.23  
##  Max.   :24.000   Max.   :711.0   Max.   :22.00   Max.   :396.90  
##      lstat            medv          crime01   
##  Min.   : 1.73   Min.   : 5.00   Min.   :0.0  
##  1st Qu.: 6.95   1st Qu.:17.02   1st Qu.:0.0  
##  Median :11.36   Median :21.20   Median :0.5  
##  Mean   :12.65   Mean   :22.53   Mean   :0.5  
##  3rd Qu.:16.95   3rd Qu.:25.00   3rd Qu.:1.0  
##  Max.   :37.97   Max.   :50.00   Max.   :1.0
train = 1:(dim(Boston)[1]/2)

test = (dim(Boston)[1]/2+1):dim(Boston)[1]

Boston.train = Boston[train,]
Boston.test = Boston[test,]

crime01.test = crime01[test]
pairs(Boston)

corrplot(cor(Boston[,-9]), method = "number")

Boston.fit <-glm(crime01~ indus+nox+age+dis+rad+tax, data=Boston.train,family=binomial)
Boston.probs = predict(Boston.fit, Boston.test, type = "response")
Boston.pred = rep(0, length(Boston.probs))
Boston.pred[Boston.probs > 0.5] = 1
table(Boston.pred, crime01.test)
##            crime01.test
## Boston.pred   0   1
##           0  75   8
##           1  15 155
mean(Boston.pred != crime01.test)
## [1] 0.09090909
summary(Boston.fit)
## 
## Call:
## glm(formula = crime01 ~ indus + nox + age + dis + rad + tax, 
##     family = binomial, data = Boston.train)
## 
## Coefficients:
##               Estimate Std. Error z value Pr(>|z|)    
## (Intercept) -42.214032   7.617440  -5.542 2.99e-08 ***
## indus        -0.213126   0.073236  -2.910  0.00361 ** 
## nox          80.868029  16.066473   5.033 4.82e-07 ***
## age           0.003397   0.012032   0.282  0.77772    
## dis           0.307145   0.190502   1.612  0.10690    
## rad           0.847236   0.183767   4.610 4.02e-06 ***
## tax          -0.013760   0.004956  -2.777  0.00549 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 329.37  on 252  degrees of freedom
## Residual deviance: 144.44  on 246  degrees of freedom
## AIC: 158.44
## 
## Number of Fisher Scoring iterations: 8
Boston.fit <-qda(crime01~ indus+nox+age+dis+rad+tax, data=Boston.train,family=binomial)
Boston.pred = predict(Boston.fit, Boston.test)
table(Boston.pred$class, crime01.test)
##    crime01.test
##       0   1
##   0  79 146
##   1  11  17
mean(Boston.pred$class != crime01.test)
## [1] 0.6205534
train.X=cbind(indus,nox,age,dis,rad,tax)[train,]
test.X=cbind(indus,nox,age,dis,rad,tax)[test,]
Boston.pred=knn(train.X, test.X, crime01.test, k=1)
table(Boston.pred,crime01.test)
##            crime01.test
## Boston.pred   0   1
##           0  31 155
##           1  59   8
mean(Boston.pred !=crime01.test)
## [1] 0.8458498

The GLM model has the lowest error rate at 9.09 percent. I can probably take off the variable dis because it is not significant.