##Question 3: Consider the Gini index, classification error, and entropy in a simple classification setting with two classes. Create a single plot that displays each of these quantities as a function of ˆpm1. The xaxis should display ˆpm1, ranging from 0 to 1, and the y-axis should display the value of the Gini index, classification error, and entropy. Hint: In a setting with two classes, pˆm1 = 1 − pˆm2. You could make this plot by hand, but it will be much easier to make in R.

p = seq(0, 1, 0.01)
gini = p * (1 - p) * 2
entropy = -(p * log(p) + (1 - p) * log(1 - p))
class.err = 1 - pmax(p, 1 - p)
matplot(p, cbind(gini, entropy, class.err), col = c("blue", "red", "purple"))

##Question 8: In the lab, a classification tree was applied to the Carseats data set after converting Sales into a qualitative response variable. Now we will seek to predict Sales using regression trees and related approaches, treating the response as a quantitative variable.

#(a): Split the data set into a training set and a test set.

library(ISLR)
## Warning: package 'ISLR' was built under R version 4.1.3
attach(Carseats)
set.seed(1)

train = sample(dim(Carseats)[1], dim(Carseats)[1]/2)
Carseats.train = Carseats[train, ]
Carseats.test = Carseats[-train, ]

#(b): Fit a regression tree to the training set. Plot the tree, and interpret the results. What test MSE do you obtain?

library(tree)
## Warning: package 'tree' was built under R version 4.1.3
tree.carseats = tree(Sales ~ ., data = Carseats.train)
summary(tree.carseats)
## 
## Regression tree:
## tree(formula = Sales ~ ., data = Carseats.train)
## Variables actually used in tree construction:
## [1] "ShelveLoc"   "Price"       "Age"         "Advertising" "CompPrice"  
## [6] "US"         
## Number of terminal nodes:  18 
## Residual mean deviance:  2.167 = 394.3 / 182 
## Distribution of residuals:
##     Min.  1st Qu.   Median     Mean  3rd Qu.     Max. 
## -3.88200 -0.88200 -0.08712  0.00000  0.89590  4.09900
plot(tree.carseats)
text(tree.carseats, pretty = 0)

pred.carseats = predict(tree.carseats, Carseats.test)
mean((Carseats.test$Sales - pred.carseats)^2)
## [1] 4.922039

#MSE is 4.15.

#(c): Use cross-validation in order to determine the optimal level of tree complexity. Does pruning the tree improve the test MSE?

cv.carseats = cv.tree(tree.carseats, FUN = prune.tree)
par(mfrow = c(1, 2))
plot(cv.carseats$size, cv.carseats$dev, type = "b")
plot(cv.carseats$k, cv.carseats$dev, type = "b")

# Best size = 9
pruned.carseats = prune.tree(tree.carseats, best = 9)
par(mfrow = c(1, 1))
plot(pruned.carseats)
text(pruned.carseats, pretty = 0)

pred.pruned = predict(pruned.carseats, Carseats.test)
mean((Carseats.test$Sales - pred.pruned)^2)
## [1] 4.918134

#MSE is 4.99.

#(d): Use the bagging approach in order to analyze this data. What test MSE do you obtain? Use the importance() function to determine which variables are most important.

library(randomForest)
## Warning: package 'randomForest' was built under R version 4.1.3
## randomForest 4.7-1
## Type rfNews() to see new features/changes/bug fixes.
bag.carseats = randomForest(Sales ~ ., data = Carseats.train, mtry = 10, ntree = 500, 
    importance = T)
bag.pred = predict(bag.carseats, Carseats.test)
mean((Carseats.test$Sales - bag.pred)^2)
## [1] 2.657296
importance(bag.carseats)
##                 %IncMSE IncNodePurity
## CompPrice   23.07909904    171.185734
## Income       2.82081527     94.079825
## Advertising 11.43295625     99.098941
## Population  -3.92119532     59.818905
## Price       54.24314632    505.887016
## ShelveLoc   46.26912996    361.962753
## Age         14.24992212    159.740422
## Education   -0.07662320     46.738585
## Urban        0.08530119      8.453749
## US           4.34349223     15.157608

#Bagging improves the MSE to 2.58. Using importance() that Price, Age and ShelveLoc are the 3 most important variables for Sale.

#(e): Use random forests to analyze this data. What test MSE do you obtain? Use the importance() function to determine which variables are most important. Describe the effect of m, the number of variables considered at each split, on the error rate obtained

rf.carseats = randomForest(Sales ~ ., data = Carseats.train, mtry = 5, ntree = 500, 
    importance = T)
rf.pred = predict(rf.carseats, Carseats.test)
mean((Carseats.test$Sales - rf.pred)^2)
## [1] 2.701665
importance(rf.carseats)
##                %IncMSE IncNodePurity
## CompPrice   19.8160444     162.73603
## Income       2.8940268     106.96093
## Advertising 11.6799573     106.30923
## Population  -1.6998805      79.04937
## Price       46.3454015     448.33554
## ShelveLoc   40.4412189     334.33610
## Age         12.5440659     169.06125
## Education    1.0762096      55.87510
## Urban        0.5703583      13.21963
## US           5.8799999      25.59797

#RF worsens the MSE to 2.87. Changing m varies test MSE between 2.6 - 3. Price, ShelveLoc and Age are three most important predictors of Sale.

##Question 9: This problem involves the OJ data set which is part of the ISLR package.

#(a): Create a training set containing a random sample of 800 observations, and a test set containing the remaining observations.

library(ISLR)
attach(OJ)
set.seed(1013)

train = sample(dim(OJ)[1], 800)
OJ.train = OJ[train, ]
OJ.test = OJ[-train, ]

#(b): Fit a tree to the training data, with Purchase as the response and the other variables as predictors. Use the summary() function to produce summary statistics about the tree, and describe the results obtained. What is the training error rate? How many terminal nodes does the tree have?

library(tree)
oj.tree = tree(Purchase ~ ., data = OJ.train)
summary(oj.tree)
## 
## Classification tree:
## tree(formula = Purchase ~ ., data = OJ.train)
## Variables actually used in tree construction:
## [1] "LoyalCH"       "PriceDiff"     "ListPriceDiff" "SalePriceMM"  
## Number of terminal nodes:  7 
## Residual mean deviance:  0.7564 = 599.8 / 793 
## Misclassification error rate: 0.1612 = 129 / 800

#The tree only uses two variables: LoyalCH and PriceDiff. It has 7 terminal nodes. Training error rate (misclassification error) for the tree is 0.155.

#(c): Type in the name of the tree object in order to get a detailed text output. Pick one of the terminal nodes, and interpret the information displayed.

oj.tree
## node), split, n, deviance, yval, (yprob)
##       * denotes terminal node
## 
##  1) root 800 1069.00 CH ( 0.61125 0.38875 )  
##    2) LoyalCH < 0.5036 344  407.30 MM ( 0.27907 0.72093 )  
##      4) LoyalCH < 0.276142 163  121.40 MM ( 0.12270 0.87730 ) *
##      5) LoyalCH > 0.276142 181  246.30 MM ( 0.41989 0.58011 )  
##       10) PriceDiff < 0.065 75   75.06 MM ( 0.20000 0.80000 ) *
##       11) PriceDiff > 0.065 106  144.50 CH ( 0.57547 0.42453 ) *
##    3) LoyalCH > 0.5036 456  366.30 CH ( 0.86184 0.13816 )  
##      6) LoyalCH < 0.753545 189  224.30 CH ( 0.71958 0.28042 )  
##       12) ListPriceDiff < 0.235 79  109.40 MM ( 0.48101 0.51899 )  
##         24) SalePriceMM < 1.64 22   20.86 MM ( 0.18182 0.81818 ) *
##         25) SalePriceMM > 1.64 57   76.88 CH ( 0.59649 0.40351 ) *
##       13) ListPriceDiff > 0.235 110   75.81 CH ( 0.89091 0.10909 ) *
##      7) LoyalCH > 0.753545 267   85.31 CH ( 0.96255 0.03745 ) *

#Let’s pick terminal node labeled “10)”. The splitting variable at this node is PriceDiff. The splitting value of this node is 0.05. There are 79 points in the subtree below this node. The deviance for all points contained in region below this node is 80. A * in the line denotes that this is in fact a terminal node. The prediction at this node is Sales = MM. About 19% points in this node have CH as value of Sales. Remaining 81% points have MM as value of Sales.

#(d): Create a plot of the tree, and interpret the results

plot(oj.tree)
text(oj.tree, pretty = 0)

#LoyalCH is the most important variable of the tree, in fact top 3 nodes contain LoyalCH. If LoyalCH<0.27, the tree predicts MM. If LoyalCH>0.76, the tree predicts CH. For intermediate values of LoyalCH, the decision also depends on the value of PriceDiff.

#(e): Predict the response on the test data, and produce a confusion matrix comparing the test labels to the predicted test labels. What is the test error rate?

oj.pred = predict(oj.tree, OJ.test, type = "class")
table(OJ.test$Purchase, oj.pred)
##     oj.pred
##       CH  MM
##   CH 149  15
##   MM  30  76

#(f): Apply the cv.tree() function to the training set in order to determine the optimal tree size.

cv.oj = cv.tree(oj.tree, FUN = prune.tree)

#(g): Produce a plot with tree size on the x-axis and cross-validated classification error rate on the y-axis.

plot(cv.oj$size, cv.oj$dev, type = "b", xlab = "Tree Size", ylab = "Deviance")

#(h): Which tree size corresponds to the lowest cross-validated classification error rate? Size of 6 gives lowest cross-validation error.

#(i): Produce a pruned tree corresponding to the optimal tree size obtained using cross-validation. If cross-validation does not lead to selection of a pruned tree, then create a pruned tree with five terminal nodes.

oj.pruned = prune.tree(oj.tree, best = 6)

#(j): Compare the training error rates between the pruned and unpruned trees. Which is higher?

summary(oj.pruned)
## 
## Classification tree:
## snip.tree(tree = oj.tree, nodes = 12L)
## Variables actually used in tree construction:
## [1] "LoyalCH"       "PriceDiff"     "ListPriceDiff"
## Number of terminal nodes:  6 
## Residual mean deviance:  0.7701 = 611.5 / 794 
## Misclassification error rate: 0.175 = 140 / 800

#Misclassification error of pruned tree is exactly same as that of original tree - 0.155.

#(k): Compare the test error rates between the pruned and unpruned trees. Which is higher?

pred.unpruned = predict(oj.tree, OJ.test, type = "class")
misclass.unpruned = sum(OJ.test$Purchase != pred.unpruned)
misclass.unpruned/length(pred.unpruned)
## [1] 0.1666667
pred.pruned = predict(oj.pruned, OJ.test, type = "class")
misclass.pruned = sum(OJ.test$Purchase != pred.pruned)
misclass.pruned/length(pred.pruned)
## [1] 0.2

#Pruned and unpruned trees have same test error rate of 0.189.