pdf(‘HW3.pdf’) ##HW3 (CH4:10,11,13)
##Q10. #a library(ISLR) summary(Weekly) cor(Weekly[, -9]) attach(Weekly) plot(Volume) #The only variables that show a correlation is between Year and Volume
#b fit.glm = glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume, data = Weekly, family = binomial) summary(fit.glm) #It would seem that Lag2 is the only predictor with a p-value < 0.05 so its statistically significant
#c probs = predict(fit.glm, type = “response”) pred.glm = rep(“Down”, length(probs)) pred.glm[probs > 0.5] = “Up” table(pred.glm, Direction) #The matrix shows false negatives and false positives. There are 430 false positives, 54/484 of the true negatives, 557 true positives, and 48/625 false positives
#d train = (Year < 2009) Weekly.20092010 = Weekly[!train, ] Direction.20092010 = Direction[!train] fit.glm2 = glm(Direction ~ Lag2, data = Weekly, family = binomial, subset = train) summary(fit.glm2) probs2 = predict(fit.glm2, Weekly.20092010, type = “response”) pred.glm2 = rep(“Down”, length(probs2)) pred.glm2[probs2 > 0.5] = “Up” table(pred.glm2, Direction.20092010)
#e fit.lda = lda(Direction ~ Lag2, data = Weekly, subset = train) fit.lda pred.lda = predict(fit.lda, Weekly.20092010) table(pred.lda$class, Direction.20092010)
#f fit.qda = qda(Direction ~ Lag2, data = Weekly, subset = train) fit.qda pred.qda = predict(fit.qda, Weekly.20092010) table(pred.qda$class, Direction.20092010)
#g train.X = as.matrix(Lag2[train]) test.X = as.matrix(Lag2[!train]) train.Direction = Direction[train] set.seed(1) pred.knn = knn(train.X, test.X, train.Direction, k = 1) table(pred.knn, Direction.20092010)
#h #The best results came from Logistic regression and LDA.
#i fit.glm3 = glm(Direction ~ Lag2:Lag1, data = Weekly, family = binomial, subset = train) probs3 = predict(fit.glm3, Weekly.20092010, type = “response”) pred.glm3 = rep(“Down”, length(probs3)) pred.glm3[probs3 > 0.5] = “Up” table(pred.glm3, Direction.20092010) mean(pred.glm3 == Direction.20092010) fit.lda2 = lda(Direction ~ Lag2:Lag1, data = Weekly, subset = train) pred.lda2 = predict(fit.lda2, Weekly.20092010) mean(pred.lda2\(class == Direction.20092010) fit.qda2 = qda(Direction ~ Lag2 + sqrt(abs(Lag2)), data = Weekly, subset = train) pred.qda2 = predict(fit.qda2, Weekly.20092010) table(pred.qda2\)class, Direction.20092010) mean(pred.qda2$class == Direction.20092010) pred.knn2 = knn(train.X, test.X, train.Direction, k = 10) table(pred.knn2, Direction.20092010) mean(pred.knn2 == Direction.20092010) pred.knn3 = knn(train.X, test.X, train.Direction, k = 100) table(pred.knn3, Direction.20092010) mean(pred.knn3 == Direction.20092010) #The original Logistic regression and LDA still have the best performance.
##Q11. #a attach(Auto) mpg01 = rep(0, length(mpg)) mpg01[mpg > median(mpg)] = 1 Auto = data.frame(Auto, mpg01) head(Auto)
#b par(mfrow=c(2,3)) boxplot(cylinders ~ mpg01, data = Auto, main = “Cylinders vs mpg01”) boxplot(displacement ~ mpg01, data = Auto, main = “Displacement vs mpg01”) boxplot(horsepower ~ mpg01, data = Auto, main = “Horsepower vs mpg01”) boxplot(weight ~ mpg01, data = Auto, main = “Weight vs mpg01”) boxplot(acceleration ~ mpg01, data = Auto, main = “Acceleration vs mpg01”) boxplot(year ~ mpg01, data = Auto, main = “Year vs mpg01”) cor(na.omit(auto[-9])) #The variables that appear to correlate strongly are Cylinders, Displacement, Horsepower and Weight
#c train = (year%%2 == 0) Auto.train = Auto[train, ] Auto.test = Auto[!train, ] mpg01.test = mpg01[!train]
#d lda.fit = lda(mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto, subset = train) lda.fit lda.pred = predict(lda.fit, Auto.test) lda.class = lda.pred$class table(lda.class, mpg01.test) mean(lda.class != mpg01.test) #Test error rate 12.6373626%.
#e qda.fit = qda(mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto, subset = train) qda.fit qda.pred = predict(qda.fit, Auto.test) qda.class = qda.pred$class table(qda.class, mpg01.test) mean(qda.class != mpg01.test) #Test error rate of 13.1868132%.
#f glm.fit = glm(mpg01 ~ cylinders + displacement + horsepower + weight, data = Auto, subset = train, family = binomial) summary(glm.fit)$coef glm.probs = predict(glm.fit, Auto.test, type = “response”) glm.pred = rep(0, length(glm.probs)) glm.pred[glm.probs > .5] = 1 table(glm.pred, mpg01.test) mean(glm.pred != mpg01.test) #Test error rate of 12.0879121%.
#g train.X = cbind(cylinders, displacement, horsepower, weight)[train, ] test.X = cbind(cylinders, displacement, horsepower, weight)[!train, ] train.mpg01 = mpg01[train] set.seed(1) knn.pred = knn(train.X, test.X, train.mpg01, k = 1) table(knn.pred, mpg01.test ) mean(knn.pred != mpg01.test) #test error rate of 15.3846154% for K = 1. knn.pred = knn(train.X, test.X, train.mpg01, k = 10) table(knn.pred, mpg01.test ) mean(knn.pred != mpg01.test) #est error rate of 16.4835165% for K = 10 knn.pred = knn(train.X, test.X, train.mpg01, k = 100) table(knn.pred, mpg01.test ) mean(knn.pred != mpg01.test) #test error rate of 14.2857143% for K = 100
##Q13. library(MASS) attach(Boston) crim01 = rep(0, length(crim)) crim01[crim > median(crim)] = 1 Boston = data.frame(Boston, crim01) train = 1:(length(crim) / 2) test = (length(crim) / 2 + 1):length(crim) Boston.train = Boston[train, ] Boston.test = Boston[test, ] crim01.test = crim01[test] glm.fit = glm(crim01 ~ . - crim01 - crim, data = Boston, family = binomial, subset = train) summary(glm.fit) glm.probs = predict(glm.fit, Boston.test, type = “response”) glm.pred = rep(0, length(glm.probs)) glm.pred[glm.probs > 0.5] = 1 table(glm.pred, crim01.test) mean(glm.pred != crim01.test) #The Logistic Regression has a test error rate of 18.1818182%.
glm.fit = glm(crim01 ~ . - crim01 - crim -chas -nox -tax, data = Boston, family = binomial, subset = train) summary(glm.fit) glm.probs = predict(glm.fit, Boston.test, type = “response”) glm.pred = rep(0, length(glm.probs)) glm.pred[glm.probs > 0.5] = 1 table(glm.pred, crim01.test) mean(glm.pred != crim01.test) #The Logistic regression (-chas -nox -tax) has a test error rate of 15.8102767%.
lda.fit = lda(crim01 ~ . - crim01 - crim, data = Boston, subset = train) lda.pred = predict(lda.fit, Boston.test) table(lda.pred\(class, crim01.test) mean(lda.pred\)class != crim01.test) #The LDA has a test error rate of 13.4387352%
lda.fit = lda(crim01 ~ . - crim01 - crim - chas - nox - tax, data = Boston, subset = train) lda.pred = predict(lda.fit, Boston.test) table(lda.pred\(class, crim01.test) mean(lda.pred\)class != crim01.test) #The LDA (-chas -nox -tax) has a test error rate of 13.8339921
train.X = cbind(zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat, medv)[train, ] test.X = cbind(zn, indus, chas, nox, rm, age, dis, rad, tax, ptratio, black, lstat, medv)[test, ] train.crim01 = crim01[train] set.seed(1) knn.pred = knn(train.X, test.X, train.crim01, k = 1) table(knn.pred, crim01.test) #KNN (k=1) has test error rate of 45.8498024% knn.pred = knn(train.X, test.X, train.crim01, k = 10) table(knn.pred, crim01.test) #KNN(k=10) has test error rate of 11.8577075%