Problem 1. A.

library(ISLR2)
#splits data into test and train sets
set.seed(1)
train = sample(1:nrow(Carseats), nrow(Carseats) / 2) #split into train/test 50/50
test <- Carseats[-train, ]
carseats.test <- Carseats[-train, "Sales"]
library(tree)

tree.carseats <- tree(Sales ~ ., data = Carseats, subset = train)
Warning: NAs introduced by coercion
summary(tree.carseats)

Regression tree:
tree(formula = Sales ~ ., data = Carseats, subset = train)
Variables actually used in tree construction:
[1] "Price"       "Population"  "CompPrice"   "Income"     
[5] "Advertising" "Age"         "Education"  
Number of terminal nodes:  18 
Residual mean deviance:  2.726 = 496.2 / 182 
Distribution of residuals:
     Min.   1st Qu.    Median      Mean   3rd Qu.      Max. 
-4.361000 -1.092000  0.006545  0.000000  1.125000  4.989000 
plot(tree.carseats)
text(tree.carseats, pretty = 0)

yhat <- predict(tree.carseats, newdata = test)
Warning: NAs introduced by coercion
mean((yhat - carseats.test)^2)
[1] 7.871697

The tree is a confusing mess as the text overlaps; however, the test MSE is low at 7.87.

set.seed(1)
cv.carseats = cv.tree(tree.carseats)
Warning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercionWarning: NAs introduced by coercion
cv.carseats
$size
 [1] 18 17 16 14 13 12 11 10  7  6  5  4  2  1

$dev
 [1] 1322.684 1354.387 1340.895 1293.154 1323.226 1322.193
 [7] 1270.718 1320.904 1327.335 1337.650 1323.785 1433.244
[13] 1473.722 1590.085

$k
 [1]      -Inf  15.99975  18.35626  20.65982  20.95202
 [6]  22.72688  29.47418  38.15698  40.27422  41.01065
[11]  44.44327  93.39728 160.12831 269.57228

$method
[1] "deviance"

attr(,"class")
[1] "prune"         "tree.sequence"
plot(cv.carseats$size, cv.carseats$dev, type = "b")

prune.carseats <- prune.tree(tree.carseats, best = 11) #prune.tree for regression while prune.class used for classification
plot(prune.carseats)
text(prune.carseats, pretty = 0)

yhat <- predict(prune.carseats, newdata = test)
Warning: NAs introduced by coercion
mean((yhat - carseats.test)^2) #pruning to only 4 predictors
[1] 7.60365

Pruning increases test MSE compared to the original tree to 7.60.

library(randomForest)
set.seed(1)

bag.carseats <- randomForest(Sales ~ ., data = Carseats,
    subset = train, mtry = 10, importance = TRUE)
bag.carseats

Call:
 randomForest(formula = Sales ~ ., data = Carseats, mtry = 10,      importance = TRUE, subset = train) 
               Type of random forest: regression
                     Number of trees: 500
No. of variables tried at each split: 10

          Mean of squared residuals: 3.195026
                    % Var explained: 59.37
set.seed(1)
yhat.bag <- predict(bag.carseats, newdata = test)
mean((yhat.bag - carseats.test)^2) #better test error with bagging
[1] 3.113233
importance(bag.carseats)
              %IncMSE IncNodePurity
CompPrice   24.264977     214.88031
Income       6.330426     109.78464
Advertising  8.366619      99.70299
Population   2.407187      85.28203
Price       51.679914     526.59906
ShelveLoc   26.392677     223.56530
Age         12.968903     181.57228
Education    1.509590      55.68405
Urban        2.003649      10.43385
US           3.559154      16.29080
varImpPlot(bag.carseats)

set.seed(1)
rf.carseats <- randomForest(Sales ~ ., data = Carseats,
    subset = train, mtry = 6, importance = TRUE)

yhat.rf <- predict(rf.carseats, newdata = test)
mean((yhat.rf - carseats.test)^2)
[1] 2.995462

The best test MSE was 2.99 using 6 nodes.

importance(rf.carseats)
               %IncMSE IncNodePurity
CompPrice   21.6494396     192.25341
Income       4.5460176     122.37487
Advertising  9.7957496     102.45053
Population  -0.5380075      86.51329
Price       47.2200892     493.91143
ShelveLoc   28.3374685     245.16604
Age         13.6683556     177.28292
Education   -1.4358224      62.86011
Urban       -0.7109767      11.97869
US           4.1017980      24.78830
list = list()
for (x in 1:10) {
  set.seed(1)
  rf.carseats <- randomForest(Sales ~ ., data = Carseats,
    subset = train, mtry = x, importance = TRUE)

  yhat.rf <- predict(rf.carseats, newdata = test)
  mean = mean((yhat.rf - carseats.test)^2)
  list[x] = mean
}
list = unlist(list)
min = min(list)
plot(list, ylab = "Test MSE", xlab = "m")
points(match(min, list), min, col = 'red')

As the graph shows you, the test MSE rapidly decreases when m increases from 1 to 4. From then on, the test MSE increases only a bit with the lowest appearing at m = 6.

Problem 2.

set.seed(1)
train = sample(1:nrow(OJ), 800)
test = OJ[-train, ]
OJ.test <- OJ[-train, "Purchase"]
set.seed(1)
tree.OJ <- tree(Purchase ~ ., OJ, subset = train)
summary(tree.OJ)

Classification tree:
tree(formula = Purchase ~ ., data = OJ, subset = train)
Variables actually used in tree construction:
[1] "LoyalCH"       "PriceDiff"     "SpecialCH"    
[4] "ListPriceDiff" "PctDiscMM"    
Number of terminal nodes:  9 
Residual mean deviance:  0.7432 = 587.8 / 791 
Misclassification error rate: 0.1588 = 127 / 800 

Only 5 variables used in tree construction. The test MSE is 0.1588. There are 9 nodes.

tree.OJ
node), split, n, deviance, yval, (yprob)
      * denotes terminal node

 1) root 800 1073.00 CH ( 0.60625 0.39375 )  
   2) LoyalCH < 0.5036 365  441.60 MM ( 0.29315 0.70685 )  
     4) LoyalCH < 0.280875 177  140.50 MM ( 0.13559 0.86441 )  
       8) LoyalCH < 0.0356415 59   10.14 MM ( 0.01695 0.98305 ) *
       9) LoyalCH > 0.0356415 118  116.40 MM ( 0.19492 0.80508 ) *
     5) LoyalCH > 0.280875 188  258.00 MM ( 0.44149 0.55851 )  
      10) PriceDiff < 0.05 79   84.79 MM ( 0.22785 0.77215 )  
        20) SpecialCH < 0.5 64   51.98 MM ( 0.14062 0.85938 ) *
        21) SpecialCH > 0.5 15   20.19 CH ( 0.60000 0.40000 ) *
      11) PriceDiff > 0.05 109  147.00 CH ( 0.59633 0.40367 ) *
   3) LoyalCH > 0.5036 435  337.90 CH ( 0.86897 0.13103 )  
     6) LoyalCH < 0.764572 174  201.00 CH ( 0.73563 0.26437 )  
      12) ListPriceDiff < 0.235 72   99.81 MM ( 0.50000 0.50000 )  
        24) PctDiscMM < 0.196196 55   73.14 CH ( 0.61818 0.38182 ) *
        25) PctDiscMM > 0.196196 17   12.32 MM ( 0.11765 0.88235 ) *
      13) ListPriceDiff > 0.235 102   65.43 CH ( 0.90196 0.09804 ) *
     7) LoyalCH > 0.764572 261   91.20 CH ( 0.95785 0.04215 ) *

Terminal node 7: This node is labeled LoyalCH > 0.765. There were 261 observations in this branch with a deviance of 91.20. It classifies as CH. About 95.8% of observations got classified as CH while the remaining 4.2% are classified as MM.

plot(tree.OJ)
text(tree.OJ, pretty = 0)

The tree is somewhat messy since the text overlaps at some points, but it can be interperted with some effort. The variable LoyalCH is seems to be the most important one as its the first node.

tree.pred = predict(tree.OJ, test,
    type = "class")
table(tree.pred, OJ.test)
         OJ.test
tree.pred  CH  MM
       CH 160  38
       MM   8  64
100-((126+84)/270)*100
[1] 22.22222

The test MSE is 22.22%.

set.seed(1)
cv.OJ = cv.tree(tree.OJ, FUN = prune.misclass)
cv.OJ
$size
[1] 9 8 7 4 2 1

$dev
[1] 145 145 146 146 167 315

$k
[1]       -Inf   0.000000   3.000000   4.333333  10.500000
[6] 151.000000

$method
[1] "misclass"

attr(,"class")
[1] "prune"         "tree.sequence"
plot(cv.OJ$size, cv.OJ$dev, type = "b")

min = min(cv.OJ$dev)
plot(cv.OJ$size, cv.OJ$dev, type = "b")
points(cv.OJ$size[match(min, cv.OJ$dev)],
       min, col = 'red')

A tree size of 9 nodes produces the smallest cross validated error rate.

prune.OJ <- prune.misclass(tree.OJ, best = 5) #prune.tree for regression while prune.class used for classification
plot(prune.OJ)
text(prune.OJ, pretty = 0)

tree.pred <- predict(prune.OJ, newdata = test,
    type = "class")
table(tree.pred, OJ.test)
         OJ.test
tree.pred  CH  MM
       CH 160  36
       MM   8  66
100-(((125+89)/270)*100)
[1] 20.74074

The test MSE for the pruned tree is 20.74%, which is a bit better compared to the 22.22% test MSE of the unpruned tree. It’s also easier to understand.

LS0tCnRpdGxlOiAiQXNzaWdubWVudCAzIgpvdXRwdXQ6IGh0bWxfbm90ZWJvb2sKLS0tClByb2JsZW0gMS4KQS4KYGBge3J9CmxpYnJhcnkoSVNMUjIpCmBgYAoKYGBge3J9CiNzcGxpdHMgZGF0YSBpbnRvIHRlc3QgYW5kIHRyYWluIHNldHMKc2V0LnNlZWQoMSkKdHJhaW4gPSBzYW1wbGUoMTpucm93KENhcnNlYXRzKSwgbnJvdyhDYXJzZWF0cykgLyAyKSAjc3BsaXQgaW50byB0cmFpbi90ZXN0IDUwLzUwCnRlc3QgPC0gQ2Fyc2VhdHNbLXRyYWluLCBdCmNhcnNlYXRzLnRlc3QgPC0gQ2Fyc2VhdHNbLXRyYWluLCAiU2FsZXMiXQpgYGAKCkIuCmBgYHtyfQpsaWJyYXJ5KHRyZWUpCgp0cmVlLmNhcnNlYXRzIDwtIHRyZWUoU2FsZXMgfiAuLCBkYXRhID0gQ2Fyc2VhdHMsIHN1YnNldCA9IHRyYWluKQpzdW1tYXJ5KHRyZWUuY2Fyc2VhdHMpCmBgYAoKYGBge3J9CnBsb3QodHJlZS5jYXJzZWF0cykKdGV4dCh0cmVlLmNhcnNlYXRzLCBwcmV0dHkgPSAwKQpgYGAKCmBgYHtyfQp5aGF0IDwtIHByZWRpY3QodHJlZS5jYXJzZWF0cywgbmV3ZGF0YSA9IHRlc3QpCm1lYW4oKHloYXQgLSBjYXJzZWF0cy50ZXN0KV4yKQpgYGAKVGhlIHRyZWUgaXMgYSBjb25mdXNpbmcgbWVzcyBhcyB0aGUgdGV4dCBvdmVybGFwczsgaG93ZXZlciwgdGhlIHRlc3QgTVNFIGlzIGxvdyBhdCA3Ljg3LgoKQy4KYGBge3J9CnNldC5zZWVkKDEpCmN2LmNhcnNlYXRzID0gY3YudHJlZSh0cmVlLmNhcnNlYXRzKQpjdi5jYXJzZWF0cwpwbG90KGN2LmNhcnNlYXRzJHNpemUsIGN2LmNhcnNlYXRzJGRldiwgdHlwZSA9ICJiIikKYGBgCgoKYGBge3J9CnBydW5lLmNhcnNlYXRzIDwtIHBydW5lLnRyZWUodHJlZS5jYXJzZWF0cywgYmVzdCA9IDExKSAjcHJ1bmUudHJlZSBmb3IgcmVncmVzc2lvbiB3aGlsZSBwcnVuZS5jbGFzcyB1c2VkIGZvciBjbGFzc2lmaWNhdGlvbgpwbG90KHBydW5lLmNhcnNlYXRzKQp0ZXh0KHBydW5lLmNhcnNlYXRzLCBwcmV0dHkgPSAwKQpgYGAKYGBge3J9CnloYXQgPC0gcHJlZGljdChwcnVuZS5jYXJzZWF0cywgbmV3ZGF0YSA9IHRlc3QpCm1lYW4oKHloYXQgLSBjYXJzZWF0cy50ZXN0KV4yKSAjcHJ1bmluZyB0byBvbmx5IDQgcHJlZGljdG9ycwpgYGAKUHJ1bmluZyBpbmNyZWFzZXMgdGVzdCBNU0UgY29tcGFyZWQgdG8gdGhlIG9yaWdpbmFsIHRyZWUgdG8gNy42MC4KCkQuIApgYGB7cn0KbGlicmFyeShyYW5kb21Gb3Jlc3QpCnNldC5zZWVkKDEpCgpiYWcuY2Fyc2VhdHMgPC0gcmFuZG9tRm9yZXN0KFNhbGVzIH4gLiwgZGF0YSA9IENhcnNlYXRzLAogICAgc3Vic2V0ID0gdHJhaW4sIG10cnkgPSAxMCwgaW1wb3J0YW5jZSA9IFRSVUUpCmJhZy5jYXJzZWF0cwpgYGAKCmBgYHtyfQpzZXQuc2VlZCgxKQp5aGF0LmJhZyA8LSBwcmVkaWN0KGJhZy5jYXJzZWF0cywgbmV3ZGF0YSA9IHRlc3QpCm1lYW4oKHloYXQuYmFnIC0gY2Fyc2VhdHMudGVzdCleMikgI2JldHRlciB0ZXN0IGVycm9yIHdpdGggYmFnZ2luZwpgYGAKCmBgYHtyfQppbXBvcnRhbmNlKGJhZy5jYXJzZWF0cykKdmFySW1wUGxvdChiYWcuY2Fyc2VhdHMpCmBgYAoKRS4KYGBge3J9CnNldC5zZWVkKDEpCnJmLmNhcnNlYXRzIDwtIHJhbmRvbUZvcmVzdChTYWxlcyB+IC4sIGRhdGEgPSBDYXJzZWF0cywKICAgIHN1YnNldCA9IHRyYWluLCBtdHJ5ID0gNiwgaW1wb3J0YW5jZSA9IFRSVUUpCgp5aGF0LnJmIDwtIHByZWRpY3QocmYuY2Fyc2VhdHMsIG5ld2RhdGEgPSB0ZXN0KQptZWFuKCh5aGF0LnJmIC0gY2Fyc2VhdHMudGVzdCleMikKYGBgClRoZSBiZXN0IHRlc3QgTVNFIHdhcyAyLjk5IHVzaW5nIDYgbm9kZXMuCmBgYHtyfQppbXBvcnRhbmNlKHJmLmNhcnNlYXRzKQpgYGAKCmBgYHtyfQpsaXN0ID0gbGlzdCgpCmZvciAoeCBpbiAxOjEwKSB7CiAgc2V0LnNlZWQoMSkKICByZi5jYXJzZWF0cyA8LSByYW5kb21Gb3Jlc3QoU2FsZXMgfiAuLCBkYXRhID0gQ2Fyc2VhdHMsCiAgICBzdWJzZXQgPSB0cmFpbiwgbXRyeSA9IHgsIGltcG9ydGFuY2UgPSBUUlVFKQoKICB5aGF0LnJmIDwtIHByZWRpY3QocmYuY2Fyc2VhdHMsIG5ld2RhdGEgPSB0ZXN0KQogIG1lYW4gPSBtZWFuKCh5aGF0LnJmIC0gY2Fyc2VhdHMudGVzdCleMikKICBsaXN0W3hdID0gbWVhbgp9Cmxpc3QgPSB1bmxpc3QobGlzdCkKYGBgCgpgYGB7cn0KbWluID0gbWluKGxpc3QpCnBsb3QobGlzdCwgeWxhYiA9ICJUZXN0IE1TRSIsIHhsYWIgPSAibSIpCnBvaW50cyhtYXRjaChtaW4sIGxpc3QpLCBtaW4sIGNvbCA9ICdyZWQnKQpgYGAKQXMgdGhlIGdyYXBoIHNob3dzIHlvdSwgdGhlIHRlc3QgTVNFIHJhcGlkbHkgZGVjcmVhc2VzIHdoZW4gbSBpbmNyZWFzZXMgZnJvbSAxIHRvIDQuIEZyb20gdGhlbiBvbiwgdGhlIHRlc3QgTVNFIGluY3JlYXNlcyBvbmx5IGEgYml0IHdpdGggdGhlIGxvd2VzdCBhcHBlYXJpbmcgYXQgbSA9IDYuCgoKUHJvYmxlbSAyLgoKQS4KYGBge3J9CnNldC5zZWVkKDEpCnRyYWluID0gc2FtcGxlKDE6bnJvdyhPSiksIDgwMCkKdGVzdCA9IE9KWy10cmFpbiwgXQpPSi50ZXN0IDwtIE9KWy10cmFpbiwgIlB1cmNoYXNlIl0KYGBgCgpCLgpgYGB7cn0Kc2V0LnNlZWQoMSkKdHJlZS5PSiA8LSB0cmVlKFB1cmNoYXNlIH4gLiwgT0osIHN1YnNldCA9IHRyYWluKQpzdW1tYXJ5KHRyZWUuT0opCmBgYApPbmx5IDUgdmFyaWFibGVzIHVzZWQgaW4gdHJlZSBjb25zdHJ1Y3Rpb24uIFRoZSB0ZXN0IE1TRSBpcyAwLjE1ODguIFRoZXJlIGFyZSA5IG5vZGVzLgoKQy4KYGBge3J9CnRyZWUuT0oKYGBgClRlcm1pbmFsIG5vZGUgNzogVGhpcyBub2RlIGlzIGxhYmVsZWQgTG95YWxDSCA+IDAuNzY1LiBUaGVyZSB3ZXJlIDI2MSBvYnNlcnZhdGlvbnMgaW4gdGhpcyBicmFuY2ggd2l0aCBhIGRldmlhbmNlIG9mIDkxLjIwLiBJdCBjbGFzc2lmaWVzIGFzIENILiBBYm91dCA5NS44JSBvZiBvYnNlcnZhdGlvbnMgZ290IGNsYXNzaWZpZWQgYXMgQ0ggd2hpbGUgdGhlIHJlbWFpbmluZyA0LjIlIGFyZSBjbGFzc2lmaWVkIGFzIE1NLgoKRC4KYGBge3J9CnBsb3QodHJlZS5PSikKdGV4dCh0cmVlLk9KLCBwcmV0dHkgPSAwKQpgYGAKVGhlIHRyZWUgaXMgc29tZXdoYXQgbWVzc3kgc2luY2UgdGhlIHRleHQgb3ZlcmxhcHMgYXQgc29tZSBwb2ludHMsIGJ1dCBpdCBjYW4gYmUgaW50ZXJwZXJ0ZWQgd2l0aCBzb21lIGVmZm9ydC4gVGhlIHZhcmlhYmxlIExveWFsQ0ggaXMgc2VlbXMgdG8gYmUgdGhlIG1vc3QgaW1wb3J0YW50IG9uZSBhcyBpdHMgdGhlIGZpcnN0IG5vZGUuCgpFLgpgYGB7cn0KdHJlZS5wcmVkID0gcHJlZGljdCh0cmVlLk9KLCB0ZXN0LAogICAgdHlwZSA9ICJjbGFzcyIpCnRhYmxlKHRyZWUucHJlZCwgT0oudGVzdCkKYGBgCmBgYHtyfQoxMDAtKCgxMjYrODQpLzI3MCkqMTAwCmBgYApUaGUgdGVzdCBNU0UgaXMgMjIuMjIlLgoKRi4KYGBge3J9CnNldC5zZWVkKDEpCmN2Lk9KID0gY3YudHJlZSh0cmVlLk9KLCBGVU4gPSBwcnVuZS5taXNjbGFzcykKY3YuT0oKYGBgCgpHLgpgYGB7cn0KcGxvdChjdi5PSiRzaXplLCBjdi5PSiRkZXYsIHR5cGUgPSAiYiIpCmBgYAoKSC4KYGBge3J9Cm1pbiA9IG1pbihjdi5PSiRkZXYpCnBsb3QoY3YuT0okc2l6ZSwgY3YuT0okZGV2LCB0eXBlID0gImIiKQpwb2ludHMoY3YuT0okc2l6ZVttYXRjaChtaW4sIGN2Lk9KJGRldildLAogICAgICAgbWluLCBjb2wgPSAncmVkJykKYGBgCkEgdHJlZSBzaXplIG9mIDkgbm9kZXMgcHJvZHVjZXMgdGhlIHNtYWxsZXN0IGNyb3NzIHZhbGlkYXRlZCBlcnJvciByYXRlLgoKSS4KYGBge3J9CnBydW5lLk9KIDwtIHBydW5lLm1pc2NsYXNzKHRyZWUuT0osIGJlc3QgPSA1KSAjcHJ1bmUudHJlZSBmb3IgcmVncmVzc2lvbiB3aGlsZSBwcnVuZS5jbGFzcyB1c2VkIGZvciBjbGFzc2lmaWNhdGlvbgpwbG90KHBydW5lLk9KKQp0ZXh0KHBydW5lLk9KLCBwcmV0dHkgPSAwKQpgYGAKCkouCmBgYHtyfQp0cmVlLnByZWQgPC0gcHJlZGljdChwcnVuZS5PSiwgbmV3ZGF0YSA9IHRlc3QsCiAgICB0eXBlID0gImNsYXNzIikKdGFibGUodHJlZS5wcmVkLCBPSi50ZXN0KQpgYGAKYGBge3J9CjEwMC0oKCgxMjUrODkpLzI3MCkqMTAwKQpgYGAKVGhlIHRlc3QgTVNFIGZvciB0aGUgcHJ1bmVkIHRyZWUgaXMgMjAuNzQlLCB3aGljaCBpcyBhIGJpdCBiZXR0ZXIgY29tcGFyZWQgdG8gdGhlIDIyLjIyJSB0ZXN0IE1TRSBvZiB0aGUgdW5wcnVuZWQgdHJlZS4gSXQncyBhbHNvIGVhc2llciB0byB1bmRlcnN0YW5kLgo=