Questions 3, 8, 9
Ginni Index, Classification Error, and Entropy Plot
p = seq(0, 1, 0.01)
gini = p * (1 - p) * 2
entropy = -(p * log(p) + (1 - p) * log(1 - p))
class.err = 1 - pmax(p, 1 - p)
matplot(p, cbind(gini, entropy, class.err), col = c("purple", "blue", "hotpink"))
Carseats Data Set
library(ISLR2)
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.2 ──
## ✔ ggplot2 3.3.6 ✔ purrr 0.3.5
## ✔ tibble 3.1.8 ✔ dplyr 1.0.10
## ✔ tidyr 1.2.1 ✔ stringr 1.4.1
## ✔ readr 2.1.3 ✔ forcats 0.5.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
library(tree)
## Warning: package 'tree' was built under R version 4.2.2
library(randomForest)
## Warning: package 'randomForest' was built under R version 4.2.2
## randomForest 4.7-1.1
## Type rfNews() to see new features/changes/bug fixes.
##
## Attaching package: 'randomForest'
##
## The following object is masked from 'package:dplyr':
##
## combine
##
## The following object is masked from 'package:ggplot2':
##
## margin
library(BART)
## Warning: package 'BART' was built under R version 4.2.2
## Loading required package: nlme
##
## Attaching package: 'nlme'
##
## The following object is masked from 'package:dplyr':
##
## collapse
##
## Loading required package: nnet
## Loading required package: survival
str(Carseats)
## 'data.frame': 400 obs. of 11 variables:
## $ Sales : num 9.5 11.22 10.06 7.4 4.15 ...
## $ CompPrice : num 138 111 113 117 141 124 115 136 132 132 ...
## $ Income : num 73 48 35 100 64 113 105 81 110 113 ...
## $ Advertising: num 11 16 10 4 3 13 0 15 0 0 ...
## $ Population : num 276 260 269 466 340 501 45 425 108 131 ...
## $ Price : num 120 83 80 97 128 72 108 120 124 124 ...
## $ ShelveLoc : Factor w/ 3 levels "Bad","Good","Medium": 1 2 3 3 1 1 3 2 3 3 ...
## $ Age : num 42 65 59 55 38 78 71 67 76 76 ...
## $ Education : num 17 10 12 14 13 16 15 10 10 17 ...
## $ Urban : Factor w/ 2 levels "No","Yes": 2 2 2 2 2 1 2 2 1 1 ...
## $ US : Factor w/ 2 levels "No","Yes": 2 2 2 2 1 2 1 2 1 2 ...
attach(Carseats)
set.seed(13)
train = sample(nrow(Carseats), nrow(Carseats) / 2)
test = Carseats[-train, "Sales"]
Carseats.train = Carseats[train, ]
Carseats.test = Carseats[-train, ]
The test MSE is 5.20.
tree.carseats = tree(Sales~.,
data = Carseats.train)
summary(tree.carseats)
##
## Regression tree:
## tree(formula = Sales ~ ., data = Carseats.train)
## Variables actually used in tree construction:
## [1] "ShelveLoc" "Price" "Age" "US" "Advertising"
## [6] "Income" "Education" "CompPrice"
## Number of terminal nodes: 18
## Residual mean deviance: 1.925 = 350.4 / 182
## Distribution of residuals:
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -3.58600 -0.91000 -0.05472 0.00000 0.89790 3.39300
plot(tree.carseats)
text(tree.carseats,
pretty = 0,
cex = 0.5)
pred.carseats = predict(tree.carseats,
Carseats.test)
mean((Carseats.test$Sales - pred.carseats)^2)
## [1] 5.20011
The pruned test MSE is 5.07 which is lower than the unpruned test MSE.
cv.carseats = cv.tree(tree.carseats,
FUN = prune.tree)
par(mfrow=c(1, 2))
plot(cv.carseats$size,
cv.carseats$dev,
type="b")
plot(cv.carseats$k,
cv.carseats$dev,
type="b")
par(mfrow=c(1, 1))
pruned.carseats = prune.tree(tree.carseats,
best = which.min(cv.carseats$dev))
plot(pruned.carseats)
text(pruned.carseats,
pretty = 0,
cex = 0.5)
pruned.pred = predict(pruned.carseats,
Carseats[-train,])
mean((Carseats.test$Sales - pruned.pred)^2)
## [1] 5.070617
The test MSE is 2.55. Price, ShelveLoc, and Advertising are the three most important predictors of Sale, with Age and CompPrice very close to Advertising.
bag.carseats = randomForest(Sales~ . ,
data = Carseats.train,
mtry = 10,
ntree = 500,
importance = T)
bag.carseats
##
## Call:
## randomForest(formula = Sales ~ ., data = Carseats.train, mtry = 10, ntree = 500, importance = T)
## Type of random forest: regression
## Number of trees: 500
## No. of variables tried at each split: 10
##
## Mean of squared residuals: 2.682855
## % Var explained: 65.87
bag.pred = predict(bag.carseats,
Carseats.test)
mean((Carseats.test$Sales - bag.pred)^2)
## [1] 2.552914
importance(bag.carseats)
## %IncMSE IncNodePurity
## CompPrice 18.781855 120.392667
## Income 7.957625 81.100990
## Advertising 19.443155 181.749528
## Population -1.612994 51.633694
## Price 39.837359 375.449738
## ShelveLoc 59.511466 498.075210
## Age 18.564966 149.439259
## Education 2.344270 53.687163
## Urban -2.898076 6.863003
## US 3.987577 7.525822
varImpPlot(bag.carseats)
When mtry = 5, the MSE is 2.82. When mtry = 3, the MSE is 3.11.
set.seed(13)
rf3.cs = randomForest(Sales~.,
data = Carseats,
mtry = 3,
subset = train,
importance = T)
rf3.cs
##
## Call:
## randomForest(formula = Sales ~ ., data = Carseats, mtry = 3, importance = T, subset = train)
## Type of random forest: regression
## Number of trees: 500
## No. of variables tried at each split: 3
##
## Mean of squared residuals: 3.04104
## % Var explained: 61.31
yhat3.rf = predict(rf3.cs,
newdata = Carseats.test)
mean((yhat3.rf - Carseats.test$Sales)^2)
## [1] 3.116101
importance(rf3.cs)
## %IncMSE IncNodePurity
## CompPrice 10.8606149 126.80587
## Income 4.0653258 112.37187
## Advertising 16.0861095 169.83726
## Population -1.3661221 96.40981
## Price 26.8233313 305.66725
## ShelveLoc 42.3662156 393.63513
## Age 16.3863741 172.83321
## Education 1.8358305 75.44495
## Urban -0.9296608 13.78110
## US 5.0078433 27.46319
rf5.cs = randomForest(Sales~.,
data = Carseats,
mtry = 5,
subset = train,
importance = T)
rf5.cs
##
## Call:
## randomForest(formula = Sales ~ ., data = Carseats, mtry = 5, importance = T, subset = train)
## Type of random forest: regression
## Number of trees: 500
## No. of variables tried at each split: 5
##
## Mean of squared residuals: 2.80131
## % Var explained: 64.36
yhat5.rf = predict(rf5.cs,
newdata = Carseats.test)
mean((yhat5.rf - Carseats.test$Sales)^2)
## [1] 2.822125
importance(rf5.cs)
## %IncMSE IncNodePurity
## CompPrice 15.141167 123.057203
## Income 7.039337 93.414493
## Advertising 16.805976 185.378840
## Population -2.468590 73.583879
## Price 36.384216 337.217243
## ShelveLoc 51.825201 441.881588
## Age 17.735338 165.979160
## Education 3.804356 65.282405
## Urban -1.083013 8.802437
## US 5.848327 20.676322
par(mfrow = c(2,2))
varImpPlot(rf5.cs)
varImpPlot(rf3.cs)
par(mfrow = c(1,1))
The test MSE was 0.54.
x = Carseats[, 2:11]
y = Carseats[, "Sales"]
xtrain = x[train,]
ytrain = y[train]
xtest = x[test, ]
ytest = y[test]
set.seed(1)
bartfit = gbart(xtrain,
ytrain,
x.test = xtest)
## *****Calling gbart: type=1
## *****Data:
## data:n,p,np: 200, 14, 196
## y1,yn: -4.098800, 0.641200
## x1,x[n*p]: 108.000000, 1.000000
## xp1,xp[np*p]: 132.000000, 0.000000
## *****Number of Trees: 200
## *****Number of Cut Points: 61 ... 1
## *****burn,nd,thin: 100,1000,1
## *****Prior:beta,alpha,tau,nu,lambda,offset: 2,0.95,0.271529,3,0.198733,7.5688
## *****sigma: 1.010068
## *****w (weights): 1.000000 ... 1.000000
## *****Dirichlet:sparse,theta,omega,a,b,rho,augment: 0,0,1,0.5,1,14,0
## *****printevery: 100
##
## MCMC
## done 0 (out of 1100)
## done 100 (out of 1100)
## done 200 (out of 1100)
## done 300 (out of 1100)
## done 400 (out of 1100)
## done 500 (out of 1100)
## done 600 (out of 1100)
## done 700 (out of 1100)
## done 800 (out of 1100)
## done 900 (out of 1100)
## done 1000 (out of 1100)
## time: 3s
## trcnt,tecnt: 1000,1000
yhat.bart = bartfit$yhat.test.mean
mean((ytest - yhat.bart)^2)
## [1] 0.5364448
detach(Carseats)
rm(list = ls()) # clear all variables from this problem
OJ Data Set
oj = OJ
str(oj)
## 'data.frame': 1070 obs. of 18 variables:
## $ Purchase : Factor w/ 2 levels "CH","MM": 1 1 1 2 1 1 1 1 1 1 ...
## $ WeekofPurchase: num 237 239 245 227 228 230 232 234 235 238 ...
## $ StoreID : num 1 1 1 1 7 7 7 7 7 7 ...
## $ PriceCH : num 1.75 1.75 1.86 1.69 1.69 1.69 1.69 1.75 1.75 1.75 ...
## $ PriceMM : num 1.99 1.99 2.09 1.69 1.69 1.99 1.99 1.99 1.99 1.99 ...
## $ DiscCH : num 0 0 0.17 0 0 0 0 0 0 0 ...
## $ DiscMM : num 0 0.3 0 0 0 0 0.4 0.4 0.4 0.4 ...
## $ SpecialCH : num 0 0 0 0 0 0 1 1 0 0 ...
## $ SpecialMM : num 0 1 0 0 0 1 1 0 0 0 ...
## $ LoyalCH : num 0.5 0.6 0.68 0.4 0.957 ...
## $ SalePriceMM : num 1.99 1.69 2.09 1.69 1.69 1.99 1.59 1.59 1.59 1.59 ...
## $ SalePriceCH : num 1.75 1.75 1.69 1.69 1.69 1.69 1.69 1.75 1.75 1.75 ...
## $ PriceDiff : num 0.24 -0.06 0.4 0 0 0.3 -0.1 -0.16 -0.16 -0.16 ...
## $ Store7 : Factor w/ 2 levels "No","Yes": 1 1 1 1 2 2 2 2 2 2 ...
## $ PctDiscMM : num 0 0.151 0 0 0 ...
## $ PctDiscCH : num 0 0 0.0914 0 0 ...
## $ ListPriceDiff : num 0.24 0.24 0.23 0 0 0.3 0.3 0.24 0.24 0.24 ...
## $ STORE : num 1 1 1 1 0 0 0 0 0 0 ...
attach(oj)
set.seed(13)
train = sample(1:nrow(oj), 800)
oj.train = oj[train,]
oj.test = oj[-train,]
The tree uses 5 variables: “LoyalCH”, “PriceDiff”, “WeekofPurchase”, “ListPriceDiff”, and “DiscMM”. The misclassification error (training error) is 0.16. The tree has 9 terminal nodes.
oj.tree = tree(Purchase ~ .,
oj.train)
summary(oj.tree)
##
## Classification tree:
## tree(formula = Purchase ~ ., data = oj.train)
## Variables actually used in tree construction:
## [1] "LoyalCH" "PriceDiff" "WeekofPurchase" "ListPriceDiff"
## [5] "DiscMM"
## Number of terminal nodes: 9
## Residual mean deviance: 0.7313 = 578.5 / 791
## Misclassification error rate: 0.1562 = 125 / 800
Terminal node ‘4)’ shows variable LoyalCH. The splitting value of this node is 0.05. There are 57 observations in the subtree below this node. The deviance for all points contained in the region below this node is 10.07. A star in this line determines it is a terminal node.
oj.tree
## node), split, n, deviance, yval, (yprob)
## * denotes terminal node
##
## 1) root 800 1061.00 CH ( 0.62250 0.37750 )
## 2) LoyalCH < 0.5036 344 416.50 MM ( 0.29360 0.70640 )
## 4) LoyalCH < 0.051325 57 10.07 MM ( 0.01754 0.98246 ) *
## 5) LoyalCH > 0.051325 287 371.10 MM ( 0.34843 0.65157 )
## 10) PriceDiff < 0.065 117 110.10 MM ( 0.17949 0.82051 ) *
## 11) PriceDiff > 0.065 170 234.80 MM ( 0.46471 0.53529 )
## 22) WeekofPurchase < 249 73 89.35 MM ( 0.30137 0.69863 ) *
## 23) WeekofPurchase > 249 97 131.50 CH ( 0.58763 0.41237 )
## 46) LoyalCH < 0.430291 70 96.98 MM ( 0.48571 0.51429 ) *
## 47) LoyalCH > 0.430291 27 22.65 CH ( 0.85185 0.14815 ) *
## 3) LoyalCH > 0.5036 456 351.30 CH ( 0.87061 0.12939 )
## 6) LoyalCH < 0.764572 201 225.50 CH ( 0.75124 0.24876 )
## 12) ListPriceDiff < 0.235 78 108.10 CH ( 0.51282 0.48718 )
## 24) DiscMM < 0.15 40 47.05 CH ( 0.72500 0.27500 ) *
## 25) DiscMM > 0.15 38 45.73 MM ( 0.28947 0.71053 ) *
## 13) ListPriceDiff > 0.235 123 78.64 CH ( 0.90244 0.09756 ) *
## 7) LoyalCH > 0.764572 255 77.87 CH ( 0.96471 0.03529 ) *
plot(oj.tree)
text(oj.tree,
pretty = 0,
cex = 0.5)
oj.pred = predict(oj.tree,
oj.test,
type = "class")
table(oj.test$Purchase,
oj.pred)
## oj.pred
## CH MM
## CH 126 29
## MM 20 95
mean(oj.test$Purchase == oj.pred)
## [1] 0.8185185
1 - mean(oj.test$Purchase == oj.pred)
## [1] 0.1814815
oj.cv = cv.tree(oj.tree,
FUN = prune.misclass)
oj.cv
## $size
## [1] 9 8 5 2 1
##
## $dev
## [1] 149 149 158 158 302
##
## $k
## [1] -Inf 2.000000 5.333333 5.666667 142.000000
##
## $method
## [1] "misclass"
##
## attr(,"class")
## [1] "prune" "tree.sequence"
plot(oj.cv$size,
oj.cv$dev,
type = "b",
xlab = "Tree Size",
ylab = "CV Error Rate")
A tree size of 8 gives the lowest cv error.
oj.pruned = prune.tree(oj.tree, best=8)
summary(oj.tree)
##
## Classification tree:
## tree(formula = Purchase ~ ., data = oj.train)
## Variables actually used in tree construction:
## [1] "LoyalCH" "PriceDiff" "WeekofPurchase" "ListPriceDiff"
## [5] "DiscMM"
## Number of terminal nodes: 9
## Residual mean deviance: 0.7313 = 578.5 / 791
## Misclassification error rate: 0.1562 = 125 / 800
summary(oj.pruned)
##
## Classification tree:
## snip.tree(tree = oj.tree, nodes = 23L)
## Variables actually used in tree construction:
## [1] "LoyalCH" "PriceDiff" "WeekofPurchase" "ListPriceDiff"
## [5] "DiscMM"
## Number of terminal nodes: 8
## Residual mean deviance: 0.7454 = 590.3 / 792
## Misclassification error rate: 0.1588 = 127 / 800
Pruned is higher but not by much.
pred.unpruned = predict(oj.tree,
oj.test,
type="class")
misclass.unpruned = sum(oj.test$Purchase != pred.unpruned)
misclass.unpruned / length(pred.unpruned)
## [1] 0.1814815
pred.pruned = predict(oj.pruned,
oj.test,
type="class")
misclass.pruned = sum(oj.test$Purchase != pred.pruned)
misclass.pruned / length(pred.pruned)
## [1] 0.2074074