10.This question should be answered using the Weekly data set, which is part of the ISLR package. This data is similar in nature to the Smarket data from this chapter's lab, except that it contains 1, 089 weekly returns for 21 years, from the beginning of 1990 to the end of 2010.
library(ISLR)
library(corrplot)
## Warning: package 'corrplot' was built under R version 4.0.4
## corrplot 0.84 loaded
summary(Weekly)
## Year Lag1 Lag2 Lag3
## Min. :1990 Min. :-18.1950 Min. :-18.1950 Min. :-18.1950
## 1st Qu.:1995 1st Qu.: -1.1540 1st Qu.: -1.1540 1st Qu.: -1.1580
## Median :2000 Median : 0.2410 Median : 0.2410 Median : 0.2410
## Mean :2000 Mean : 0.1506 Mean : 0.1511 Mean : 0.1472
## 3rd Qu.:2005 3rd Qu.: 1.4050 3rd Qu.: 1.4090 3rd Qu.: 1.4090
## Max. :2010 Max. : 12.0260 Max. : 12.0260 Max. : 12.0260
## Lag4 Lag5 Volume Today
## Min. :-18.1950 Min. :-18.1950 Min. :0.08747 Min. :-18.1950
## 1st Qu.: -1.1580 1st Qu.: -1.1660 1st Qu.:0.33202 1st Qu.: -1.1540
## Median : 0.2380 Median : 0.2340 Median :1.00268 Median : 0.2410
## Mean : 0.1458 Mean : 0.1399 Mean :1.57462 Mean : 0.1499
## 3rd Qu.: 1.4090 3rd Qu.: 1.4050 3rd Qu.:2.05373 3rd Qu.: 1.4050
## Max. : 12.0260 Max. : 12.0260 Max. :9.32821 Max. : 12.0260
## Direction
## Down:484
## Up :605
##
##
##
##
corrplot(cor(Weekly[,-9]), method="square")
attach(Weekly)
Weekly.fit<-glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly,family=binomial)
summary(Weekly.fit)
##
## Call:
## glm(formula = Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 +
## Volume, family = binomial, data = Weekly)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -1.6949 -1.2565 0.9913 1.0849 1.4579
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.26686 0.08593 3.106 0.0019 **
## Lag1 -0.04127 0.02641 -1.563 0.1181
## Lag2 0.05844 0.02686 2.175 0.0296 *
## Lag3 -0.01606 0.02666 -0.602 0.5469
## Lag4 -0.02779 0.02646 -1.050 0.2937
## Lag5 -0.01447 0.02638 -0.549 0.5833
## Volume -0.02274 0.03690 -0.616 0.5377
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 1496.2 on 1088 degrees of freedom
## Residual deviance: 1486.4 on 1082 degrees of freedom
## AIC: 1500.4
##
## Number of Fisher Scoring iterations: 4
logWeekly.prob= predict(Weekly.fit, type='response')
logWeekly.pred =rep("Down", length(logWeekly.prob))
logWeekly.pred[logWeekly.prob > 0.5] = "Up"
table(logWeekly.pred, Direction)
## Direction
## logWeekly.pred Down Up
## Down 54 48
## Up 430 557
train = (Year<2009)
Weekly.0910 <-Weekly[!train,]
Weekly.fit<-glm(Direction~Lag2, data=Weekly,family=binomial, subset=train)
logWeekly.prob= predict(Weekly.fit, Weekly.0910, type = "response")
logWeekly.pred = rep("Down", length(logWeekly.prob))
logWeekly.pred[logWeekly.prob > 0.5] = "Up"
Direction.0910 = Direction[!train]
table(logWeekly.pred, Direction.0910)
## Direction.0910
## logWeekly.pred Down Up
## Down 9 5
## Up 34 56
mean(logWeekly.pred == Direction.0910)
## [1] 0.625
library(MASS)
Weeklylda.fit<-lda(Direction~Lag2, data=Weekly,family=binomial, subset=train)
Weeklylda.pred<-predict(Weeklylda.fit, Weekly.0910)
table(Weeklylda.pred$class, Direction.0910)
## Direction.0910
## Down Up
## Down 9 5
## Up 34 56
mean(Weeklylda.pred$class==Direction.0910)
## [1] 0.625
Weeklyqda.fit = qda(Direction ~ Lag2, data = Weekly, subset = train)
Weeklyqda.pred = predict(Weeklyqda.fit, Weekly.0910)$class
table(Weeklyqda.pred, Direction.0910)
## Direction.0910
## Weeklyqda.pred Down Up
## Down 0 0
## Up 43 61
mean(Weeklyqda.pred==Direction.0910)
## [1] 0.5865385
library(class)
Week.train=as.matrix(Lag2[train])
Week.test=as.matrix(Lag2[!train])
train.Direction =Direction[train]
set.seed(1)
Weekknn.pred=knn(Week.train,Week.test,train.Direction,k=1)
table(Weekknn.pred,Direction.0910)
## Direction.0910
## Weekknn.pred Down Up
## Down 21 30
## Up 22 31
mean(Weekknn.pred == Direction.0910)
## [1] 0.5
Which of these methods appears to provide the best results on this data? The methods that have the highest accuracy rates are the Logistic Regression and Linear Discriminant Analysis; both having rates of 62.5%.
Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results on the held out data. Note that you should also experiment with values for K in the KNN classifier.
#Logistic Regression with Interaction Lag2:Lag4
Weekly.fit<-glm(Direction~Lag2:Lag4+Lag2, data=Weekly,family=binomial, subset=train)
logWeekly.prob= predict(Weekly.fit, Weekly.0910, type = "response")
logWeekly.pred = rep("Down", length(logWeekly.prob))
logWeekly.pred[logWeekly.prob > 0.5] = "Up"
Direction.0910 = Direction[!train]
table(logWeekly.pred, Direction.0910)
## Direction.0910
## logWeekly.pred Down Up
## Down 3 4
## Up 40 57
mean(logWeekly.pred == Direction.0910)
## [1] 0.5769231
#LDA with Interaction Lag2:Lag4
Weeklylda.fit<-lda(Direction~Lag2:Lag4+Lag2, data=Weekly,family=binomial, subset=train)
Weeklylda.pred<-predict(Weeklylda.fit, Weekly.0910)
table(Weeklylda.pred$class, Direction.0910)
## Direction.0910
## Down Up
## Down 3 3
## Up 40 58
mean(Weeklylda.pred$class==Direction.0910)
## [1] 0.5865385
Weeklyqda.fit = qda(Direction ~ poly(Lag2,2), data = Weekly, subset = train)
Weeklyqda.pred = predict(Weeklyqda.fit, Weekly.0910)$class
table(Weeklyqda.pred, Direction.0910)
## Direction.0910
## Weeklyqda.pred Down Up
## Down 7 3
## Up 36 58
mean(Weeklyqda.pred==Direction.0910)
## [1] 0.625
#K=10
Week.train=as.matrix(Lag2[train])
Week.test=as.matrix(Lag2[!train])
train.Direction =Direction[train]
set.seed(1)
Weekknn.pred=knn(Week.train,Week.test,train.Direction,k=10)
table(Weekknn.pred,Direction.0910)
## Direction.0910
## Weekknn.pred Down Up
## Down 17 21
## Up 26 40
mean(Weekknn.pred == Direction.0910)
## [1] 0.5480769
#K=100
Week.train=as.matrix(Lag2[train])
Week.test=as.matrix(Lag2[!train])
train.Direction =Direction[train]
set.seed(1)
Weekknn.pred=knn(Week.train,Week.test,train.Direction,k=100)
table(Weekknn.pred,Direction.0910)
## Direction.0910
## Weekknn.pred Down Up
## Down 10 11
## Up 33 50
mean(Weekknn.pred == Direction.0910)
## [1] 0.5769231
Q11.In this problem, you will develop a model to predict whether a given car gets high or low gas mileage based on the Auto data set.
require(ISLR); require(tidyverse); require(ggthemes); require(GGally)
## Loading required package: tidyverse
## Warning: package 'tidyverse' was built under R version 4.0.4
## -- Attaching packages --------------------------------------- tidyverse 1.3.0 --
## v ggplot2 3.3.3 v purrr 0.3.4
## v tibble 3.0.6 v dplyr 1.0.3
## v tidyr 1.1.2 v stringr 1.4.0
## v readr 1.4.0 v forcats 0.5.1
## -- Conflicts ------------------------------------------ tidyverse_conflicts() --
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
## x dplyr::select() masks MASS::select()
## Loading required package: ggthemes
## Warning: package 'ggthemes' was built under R version 4.0.4
## Loading required package: GGally
## Warning: package 'GGally' was built under R version 4.0.4
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
require(knitr); require(kableExtra); require(broom)
## Loading required package: knitr
## Loading required package: kableExtra
## Warning: package 'kableExtra' was built under R version 4.0.4
##
## Attaching package: 'kableExtra'
## The following object is masked from 'package:dplyr':
##
## group_rows
## Loading required package: broom
theme_set(theme_tufte(base_size = 14))
set.seed(1)
data('Auto')
Auto <- Auto %>%
filter(!cylinders %in% c(3,5)) %>%
mutate(mpg01 = factor(ifelse(mpg > median(mpg), 1, 0)),
cylinders = factor(cylinders,
levels = c(4,6,8),
ordered = TRUE),
origin = factor(origin,
levels = c(1,2,3),
labels = c('American', 'European', 'Asian')))
median(Auto$mpg)
## [1] 23
Auto %>%
dplyr::select(mpg, mpg01) %>%
sample_n(5)
## mpg mpg01
## 333 29.8 1
## 171 23.0 0
## 133 25.0 1
## 306 28.4 1
## 276 17.0 0
Auto %>%
dplyr::select(-name, -mpg) %>%
ggpairs(aes(col = mpg01, fill = mpg01, alpha = 0.6),
upper = list(combo = 'box'),
diag = list(discrete = wrap('barDiag', position = 'fill')),
lower = list(combo = 'dot_no_facet')) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
Auto %>%
dplyr::select(-name, -mpg, - origin, -cylinders) %>%
gather(Variable, value, -mpg01) %>%
mutate(Variable = str_to_title(Variable)) %>%
ggplot(aes(mpg01, value, fill = mpg01)) +
geom_boxplot(alpha = 0.6) +
facet_wrap(~ Variable, scales = 'free', ncol = 1, switch = 'x') +
coord_flip() +
theme(legend.position = 'top') +
labs(x = '', y = '', title = 'Variable Boxplots by mpg01')
## Warning: 'switch' is deprecated.
## Use 'strip.position' instead.
## See help("Deprecated")
set.seed(1)
num_train <- nrow(Auto) * 0.75
inTrain <- sample(nrow(Auto), size = num_train)
training <- Auto[inTrain,]
testing <- Auto[-inTrain,]
require(MASS)
fmla <- as.formula('mpg01 ~ displacement + horsepower + weight + year + cylinders')
lda_model <- lda(fmla, data = training)
pred <- predict(lda_model, testing)
table(pred$class, testing$mpg01)
##
## 0 1
## 0 48 5
## 1 5 39
mean(pred$class == testing$mpg01)
## [1] 0.8969072
qda_model <- qda(fmla, data = training)
pred <- predict(qda_model, testing)
table(pred$class, testing$mpg01)
##
## 0 1
## 0 48 5
## 1 5 39
mean(pred$class == testing$mpg01)
## [1] 0.8969072
log_reg <- glm(fmla, data = training, family = binomial)
pred <- predict(log_reg, testing, type = 'response')
pred_values <- round(pred)
table(pred_values, testing$mpg01)
##
## pred_values 0 1
## 0 49 3
## 1 4 41
mean(pred_values == testing$mpg01)
## [1] 0.9278351
require(class)
set.seed(1)
acc <- list()
x_train <- training[,c('cylinders', 'displacement', 'horsepower', 'weight', 'year')]
y_train <- training$mpg0
x_test <- testing[,c('cylinders', 'displacement', 'horsepower', 'weight', 'year')]
for (i in 1:20) {
knn_pred <- knn(train = x_train, test = x_test, cl = y_train, k = i)
acc[as.character(i)] = mean(knn_pred == testing$mpg01)
}
acc <- unlist(acc)
Q13.Using the Boston data set, fit classification models in order to predict whether a given suburb has a crime rate above or below the median. Explore logistic regression, LDA, and KNN models using various subsets of the predictors. Describe your findings.
library(MASS)
attach(Boston)
crim01 <- rep(0, length(crim)) # create binary variable for "crim"
crim01[crim > median(crim)] <- 1
Boston <- data.frame(Boston, crim01)
train <- 1:(length(crim) / 2) # split first half of data to train set
test <- (length(crim) / 2 + 1):length(crim)
Boston.train <- Boston[train, ]
Boston.test <- Boston[test, ]
crim01.test <- crim01[test]
# logistic regression
glm.fit <- glm(crim01 ~ . - crim01 - crim, data = Boston, family = binomial, subset = train)
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
summary(glm.fit)
##
## Call:
## glm(formula = crim01 ~ . - crim01 - crim, family = binomial,
## data = Boston, subset = train)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.83229 -0.06593 0.00000 0.06181 2.61513
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -91.319906 19.490273 -4.685 2.79e-06 ***
## zn -0.815573 0.193373 -4.218 2.47e-05 ***
## indus 0.354172 0.173862 2.037 0.04164 *
## chas 0.167396 0.991922 0.169 0.86599
## nox 93.706326 21.202008 4.420 9.88e-06 ***
## rm -4.719108 1.788765 -2.638 0.00833 **
## age 0.048634 0.024199 2.010 0.04446 *
## dis 4.301493 0.979996 4.389 1.14e-05 ***
## rad 3.039983 0.719592 4.225 2.39e-05 ***
## tax -0.006546 0.007855 -0.833 0.40461
## ptratio 1.430877 0.359572 3.979 6.91e-05 ***
## black -0.017552 0.006734 -2.606 0.00915 **
## lstat 0.190439 0.086722 2.196 0.02809 *
## medv 0.598533 0.185514 3.226 0.00125 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 329.367 on 252 degrees of freedom
## Residual deviance: 69.568 on 239 degrees of freedom
## AIC: 97.568
##
## Number of Fisher Scoring iterations: 10
glm.probs <- predict(glm.fit, Boston.test, type = "response")
glm.pred <- rep(0, length(glm.probs))
glm.pred[glm.probs > 0.5] <- 1
table(glm.pred, crim01.test)
## crim01.test
## glm.pred 0 1
## 0 68 24
## 1 22 139
mean(glm.pred != crim01.test)
## [1] 0.1818182
# logistic regression (-chas -nox -tax)
glm.fit <- glm(crim01 ~ . - crim01 - crim -chas -nox -tax, data = Boston, family = binomial, subset = train)
## Warning: glm.fit: fitted probabilities numerically 0 or 1 occurred
summary(glm.fit)
##
## Call:
## glm(formula = crim01 ~ . - crim01 - crim - chas - nox - tax,
## family = binomial, data = Boston, subset = train)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -3.04443 -0.24461 -0.00114 0.38919 2.72999
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -17.291707 6.019497 -2.873 0.004071 **
## zn -0.478891 0.104276 -4.593 4.38e-06 ***
## indus 0.362719 0.082969 4.372 1.23e-05 ***
## rm -2.364642 0.967625 -2.444 0.014535 *
## age 0.063371 0.015457 4.100 4.14e-05 ***
## dis 1.494535 0.397249 3.762 0.000168 ***
## rad 1.756498 0.357330 4.916 8.85e-07 ***
## ptratio 0.575045 0.161917 3.551 0.000383 ***
## black -0.018916 0.006754 -2.801 0.005102 **
## lstat 0.057632 0.053051 1.086 0.277326
## medv 0.237282 0.081326 2.918 0.003527 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 329.37 on 252 degrees of freedom
## Residual deviance: 139.59 on 242 degrees of freedom
## AIC: 161.59
##
## Number of Fisher Scoring iterations: 9
glm.probs <- predict(glm.fit, Boston.test, type = "response")
glm.pred <- rep(0, length(glm.probs))
glm.pred[glm.probs > 0.5] <- 1
table(glm.pred, crim01.test)
## crim01.test
## glm.pred 0 1
## 0 78 28
## 1 12 135
mean(glm.pred != crim01.test)
## [1] 0.1581028
We may conclude that, for this logistic regression, we have a test error rate of 15.8102767%.
train.X <- cbind(zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat, medv)[train, ]
test.X <- cbind(zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat, medv)[test, ]
train.crim01 <- crim01[train]
set.seed(1)
knn.pred <- knn(train.X, test.X, train.crim01, k = 1)
table(knn.pred, crim01.test)
## crim01.test
## knn.pred 0 1
## 0 85 111
## 1 5 52
We may conclude that, for this KNN (k=1), we have a test error rate of 45.8498024%.
knn.pred <- knn(train.X, test.X, train.crim01, k = 10)
table(knn.pred, crim01.test)
## crim01.test
## knn.pred 0 1
## 0 83 23
## 1 7 140
We may conclude that, for this KNN (k=10), we have a test error rate of 11.8577075%.
knn.pred <- knn(train.X, test.X, train.crim01, k = 100)
table(knn.pred, crim01.test)
## crim01.test
## knn.pred 0 1
## 0 86 120
## 1 4 43
We may conclude that, for this KNN (k=100), we have a test error rate of 49.0118577%. Proceed to picking the model with the lowest test error rate, but there are more exploration left to do.