library (tree)
library (ISLR)

Attaching package: 㤼㸱ISLR㤼㸲

The following object is masked _by_ 㤼㸱.GlobalEnv㤼㸲:

    Carseats
attach(Carseats ) 
The following object is masked _by_ .GlobalEnv:

    High
High=ifelse(Sales <=8,"No","Yes")

Fitting Classi???cation Trees Armamos un arbol para predecir la altura notemos que sales es 0 (todas las variables - sales) esto implica que no va a aportar

Carseats =data.frame(Carseats ,High)
tree.carseats =tree(High∼.-Sales,Carseats) 
summary (tree.carseats )

Classification tree:
tree(formula = High ~ . - Sales, data = Carseats)
Variables actually used in tree construction:
[1] "High.1"
Number of terminal nodes:  2 
Residual mean deviance:  0 = 0 / 398 
Misclassification error rate: 0 = 0 / 400 
plot(tree.carseats )
text(tree.carseats ,pretty =0)

la siguiente instruccion visualizamos el comportamiento del branch y sus valores asociados

tree.carseats
node), split, n, deviance, yval, (yprob)
      * denotes terminal node

1) root 400 541.5 No ( 0.59 0.41 )  
  2) High.1: No 236   0.0 No ( 1.00 0.00 ) *
  3) High.1: Yes 164   0.0 Yes ( 0.00 1.00 ) *

creamos nuestro train y test verificaremos el porcentaje de prediccion

 set.seed(2)
 train=sample (1: nrow(Carseats ), 200)
 Carseats.test=Carseats [-train ,]
 High.test=High[-train]
 tree.carseats =tree(High∼.-Sales , Carseats ,subset=train)
 tree.pred=predict(tree.carseats ,Carseats.test ,type="class")
 table(tree.pred ,High.test)
         High.test
tree.pred  No Yes
      No  116   0
      Yes   0  84

buscaremos hacer pruning al arbol, al ejecutar cv.tree() se determinará un cross validation con el objetivo de determinar el nivel optimo del arbol

 set.seed(3)
 cv.carseats =cv.tree(tree.carseats ,FUN=prune.misclass )
 names(cv.carseats )
[1] "size"   "dev"    "k"      "method"
 cv.carseats
$size
[1] 2 1

$dev
[1]  0 80

$k
[1] -Inf   80

$method
[1] "misclass"

attr(,"class")
[1] "prune"         "tree.sequence"
par(mfrow=c(1,2))
plot(cv.carseats$size ,cv.carseats$dev ,type="b")
plot(cv.carseats$k ,cv.carseats$dev ,type="b")

prune.carseats =prune.misclass (tree.carseats ,best=9)
best is bigger than tree size
plot(prune.carseats )
text(prune.carseats ,pretty =0)

para verificar nuestro arbol nuevamente volveremos a predecir el carseats test

tree.pred=predict(prune.carseats ,Carseats.test , type="class")
table(tree.pred ,High.test)
         High.test
tree.pred  No Yes
      No  116   0
      Yes   0  84
(94+60) /200
[1] 0.77
prune.carseats =prune.misclass (tree.carseats ,best=15)
best is bigger than tree size
 plot(prune.carseats )
 text(prune.carseats ,pretty =0)

 tree.pred=predict(prune.carseats ,Carseats.test , type="class")
 table(tree.pred ,High.test)
         High.test
tree.pred  No Yes
      No  116   0
      Yes   0  84
 (86+62) /200
[1] 0.74

Como hemos podido observar este proceso de podado no es tan bueno como el anterior pues nuestros valores de exactitud ya son del 74%

fitting regression trees

trabajaremos el data set de boston, en este segmento de codigo contruimos nuestro set de entrenamiento

 library(MASS)
 set.seed(1)
 train = sample (1:nrow(Boston), nrow(Boston)/2)
 tree.boston=tree(medv∼.,Boston , subset=train)
 summary(tree.boston)

Regression tree:
tree(formula = medv ~ ., data = Boston, subset = train)
Variables actually used in tree construction:
[1] "lstat" "rm"    "dis"  
Number of terminal nodes:  8 
Residual mean deviance:  12.65 = 3099 / 245 
Distribution of residuals:
     Min.   1st Qu.    Median      Mean   3rd Qu.      Max. 
-14.10000  -2.04200  -0.05357   0.00000   1.96000  12.60000 

Nuestro arbol tiene 4 niveles con 8 nodos

plot(tree.boston)
text(tree.boston , pretty =0)

podaremos el arbol considerando los 5 mejores nodos

cv.boston=cv.tree(tree.boston)
cv.boston
$size
[1] 8 7 6 5 4 3 2 1

$dev
[1]  5226.322  5228.360  6462.626  6692.615  6397.438  7529.846 11958.691 21118.139

$k
[1]      -Inf  255.6581  451.9272  768.5087  818.8885 1559.1264 4276.5803 9665.3582

$method
[1] "deviance"

attr(,"class")
[1] "prune"         "tree.sequence"
plot(cv.boston$size,cv.boston$dev)

prune.boston=prune.tree(tree.boston ,best=5)
plot(prune.boston)
text(prune.boston , pretty =0)

Evaluaremos nuestra preddicion basados en el test del segmento restante de boston

 yhat=predict (tree.boston ,newdata=Boston[-train ,])
 boston.test=Boston[-train ,"medv"]
 plot(yhat ,boston.test)
 abline (0,1)

 mean((yhat -boston.test)^2)
[1] 25.04559

# Bagging and Random Forests

 library( randomForest)
randomForest 4.6-14
Type rfNews() to see new features/changes/bug fixes.
 set.seed(1)
 bag.boston= randomForest( medv∼.,data=Boston , subset=train ,
 mtry=13,importance =TRUE)
 bag.boston

Call:
 randomForest(formula = medv ~ ., data = Boston, mtry = 13, importance = TRUE,      subset = train) 
               Type of random forest: regression
                     Number of trees: 500
No. of variables tried at each split: 13

          Mean of squared residuals: 11.15723
                    % Var explained: 86.49

Como podemos ver tenemos 13 valores de predictores que se consideran en cada split del arbol , en la siguietne grafica vemos como es la preddiccion del arbol

 yhat.bag = predict (bag.boston , newdata=Boston[-train ,])
 plot(yhat.bag , boston.test)
 abline (0,1)

 mean((yhat.bag -boston.test)^2)
[1] 13.50808

la aplicación de randomforest nos permitira aplicar un train sobre toda la data, en este ejercicio el error calculada sera 13.33

 bag.boston= randomForest( medv∼.,data=Boston , subset=train ,
mtry=13,ntree=25)
 yhat.bag = predict (bag.boston , newdata=Boston[-train ,])
 mean((yhat.bag -boston.test)^2)
[1] 13.94835

haremos un ajuste para evaluar por importancia sin especificar el ntree

set.seed(1)
 rf.boston= randomForest(medv∼.,data=Boston , subset=train ,
mtry=6, importance =TRUE)
 yhat.rf = predict(rf.boston ,newdata=Boston[-train ,])
 mean((yhat.rf-boston.test)^2)
[1] 11.66454
 importance (rf.boston)
          %IncMSE IncNodePurity
crim    12.132320     986.50338
zn       1.955579      57.96945
indus    9.069302     882.78261
chas     2.210835      45.22941
nox     11.104823    1044.33776
rm      31.784033    6359.31971
age     10.962684     516.82969
dis     15.015236    1224.11605
rad      4.118011      95.94586
tax      8.587932     502.96719
ptratio 12.503896     830.77523
black    6.702609     341.30361
lstat   30.695224    7505.73936

como podemos la iportancia contiene dos valores el MSE por varibla y que tanto aporte aplicamos al modelo

varImpPlot (rf.boston)

Boosting

Aplicaremos un boost sobre nuestradata aplicando una distribución gausiana

library (gbm)
Loaded gbm 2.1.5
set.seed(1)
boost.boston=gbm(medv∼.,data=Boston[train ,], distribution="gaussian",n.trees=5000, interaction.depth=4)
 summary(boost.boston)
NA

par(mfrow=c(1,2))
plot(boost.boston ,i="rm")

plot(boost.boston ,i="lstat")

yhat.boost=predict (boost.boston ,newdata =Boston[-train ,],
n.trees=5000)
 mean((yhat.boost - boston.test)^2)
[1] 10.81479
 boost.boston=gbm(medv∼.,data=Boston[train ,], distribution="gaussian",n.trees =5000, interaction.depth =4, shrinkage =0.2,verbose=F)
 yhat.boost=predict (boost.boston ,newdata =Boston[-train ,],
n.trees=5000)
 mean((yhat.boost - boston.test)^2)
[1] 11.51109
LS0tDQp0aXRsZTogIlIgOC4zIElTUkwgVHJlZXMiDQpvdXRwdXQ6IGh0bWxfbm90ZWJvb2sNCi0tLQ0KDQpgYGB7cn0NCmxpYnJhcnkgKHRyZWUpDQpsaWJyYXJ5IChJU0xSKQ0KDQphdHRhY2goQ2Fyc2VhdHMgKSANCkhpZ2g9aWZlbHNlKFNhbGVzIDw9OCwiTm8iLCJZZXMiKQ0KDQpgYGANCg0KIEZpdHRpbmcgQ2xhc3NpPz8/Y2F0aW9uIFRyZWVzDQogQXJtYW1vcyB1biBhcmJvbCBwYXJhIHByZWRlY2lyIGxhIGFsdHVyYSBub3RlbW9zIHF1ZSBzYWxlcyBlcyAwICh0b2RhcyBsYXMgdmFyaWFibGVzIC0gc2FsZXMpIGVzdG8gaW1wbGljYSBxdWUgbm8gdmEgYSBhcG9ydGFyDQpgYGB7cn0NCkNhcnNlYXRzID1kYXRhLmZyYW1lKENhcnNlYXRzICxIaWdoKQ0KdHJlZS5jYXJzZWF0cyA9dHJlZShIaWdoPz8/Li1TYWxlcyxDYXJzZWF0cykgDQpzdW1tYXJ5ICh0cmVlLmNhcnNlYXRzICkNCg0KYGBgDQoNCmBgYHtyfQ0KcGxvdCh0cmVlLmNhcnNlYXRzICkNCnRleHQodHJlZS5jYXJzZWF0cyAscHJldHR5ID0wKQ0KDQpgYGANCmxhIHNpZ3VpZW50ZSBpbnN0cnVjY2lvbiB2aXN1YWxpemFtb3MgZWwgY29tcG9ydGFtaWVudG8gZGVsIGJyYW5jaCB5IHN1cyB2YWxvcmVzIGFzb2NpYWRvcw0KYGBge3J9DQp0cmVlLmNhcnNlYXRzDQpgYGANCmNyZWFtb3MgbnVlc3RybyB0cmFpbiB5IHRlc3QgdmVyaWZpY2FyZW1vcyBlbCBwb3JjZW50YWplIGRlIHByZWRpY2Npb24NCmBgYHtyfQ0KIHNldC5zZWVkKDIpDQogdHJhaW49c2FtcGxlICgxOiBucm93KENhcnNlYXRzICksIDIwMCkNCiBDYXJzZWF0cy50ZXN0PUNhcnNlYXRzIFstdHJhaW4gLF0NCiBIaWdoLnRlc3Q9SGlnaFstdHJhaW5dDQogdHJlZS5jYXJzZWF0cyA9dHJlZShIaWdoPz8/Li1TYWxlcyAsIENhcnNlYXRzICxzdWJzZXQ9dHJhaW4pDQogdHJlZS5wcmVkPXByZWRpY3QodHJlZS5jYXJzZWF0cyAsQ2Fyc2VhdHMudGVzdCAsdHlwZT0iY2xhc3MiKQ0KIHRhYmxlKHRyZWUucHJlZCAsSGlnaC50ZXN0KQ0KDQoNCmBgYA0KDQpidXNjYXJlbW9zIGhhY2VyIHBydW5pbmcgYWwgYXJib2wsIGFsIGVqZWN1dGFyIGN2LnRyZWUoKSBzZSBkZXRlcm1pbmFy4SB1biBjcm9zcyB2YWxpZGF0aW9uIGNvbiBlbCBvYmpldGl2byBkZSBkZXRlcm1pbmFyIGVsIG5pdmVsIG9wdGltbyBkZWwgYXJib2wgDQpgYGB7cn0NCiBzZXQuc2VlZCgzKQ0KIGN2LmNhcnNlYXRzID1jdi50cmVlKHRyZWUuY2Fyc2VhdHMgLEZVTj1wcnVuZS5taXNjbGFzcyApDQogbmFtZXMoY3YuY2Fyc2VhdHMgKQ0KIGN2LmNhcnNlYXRzDQoNCmBgYA0KYGBge3J9DQpwYXIobWZyb3c9YygxLDIpKQ0KcGxvdChjdi5jYXJzZWF0cyRzaXplICxjdi5jYXJzZWF0cyRkZXYgLHR5cGU9ImIiKQ0KcGxvdChjdi5jYXJzZWF0cyRrICxjdi5jYXJzZWF0cyRkZXYgLHR5cGU9ImIiKQ0KcHJ1bmUuY2Fyc2VhdHMgPXBydW5lLm1pc2NsYXNzICh0cmVlLmNhcnNlYXRzICxiZXN0PTkpDQpwbG90KHBydW5lLmNhcnNlYXRzICkNCnRleHQocHJ1bmUuY2Fyc2VhdHMgLHByZXR0eSA9MCkNCmBgYA0KDQpwYXJhIHZlcmlmaWNhciBudWVzdHJvIGFyYm9sIG51ZXZhbWVudGUgdm9sdmVyZW1vcyBhIHByZWRlY2lyIGVsIGNhcnNlYXRzIHRlc3QgDQpgYGB7cn0NCnRyZWUucHJlZD1wcmVkaWN0KHBydW5lLmNhcnNlYXRzICxDYXJzZWF0cy50ZXN0ICwgdHlwZT0iY2xhc3MiKQ0KdGFibGUodHJlZS5wcmVkICxIaWdoLnRlc3QpDQooOTQrNjApIC8yMDANCmBgYA0KIA0KYGBge3J9DQpwcnVuZS5jYXJzZWF0cyA9cHJ1bmUubWlzY2xhc3MgKHRyZWUuY2Fyc2VhdHMgLGJlc3Q9MTUpDQogcGxvdChwcnVuZS5jYXJzZWF0cyApDQogdGV4dChwcnVuZS5jYXJzZWF0cyAscHJldHR5ID0wKQ0KIHRyZWUucHJlZD1wcmVkaWN0KHBydW5lLmNhcnNlYXRzICxDYXJzZWF0cy50ZXN0ICwgdHlwZT0iY2xhc3MiKQ0KIHRhYmxlKHRyZWUucHJlZCAsSGlnaC50ZXN0KQ0KICg4Nis2MikgLzIwMA0KYGBgDQogQ29tbyBoZW1vcyBwb2RpZG8gb2JzZXJ2YXIgZXN0ZSBwcm9jZXNvIGRlIHBvZGFkbyBubyBlcyB0YW4gYnVlbm8gY29tbyBlbCBhbnRlcmlvciBwdWVzIG51ZXN0cm9zIHZhbG9yZXMgZGUgZXhhY3RpdHVkIHlhIHNvbiBkZWwgNzQlDQogDQojIGZpdHRpbmcgcmVncmVzc2lvbiB0cmVlcw0KdHJhYmFqYXJlbW9zIGVsIGRhdGEgc2V0IGRlIGJvc3RvbiwgZW4gZXN0ZSBzZWdtZW50byBkZSBjb2RpZ28gY29udHJ1aW1vcyBudWVzdHJvIHNldCBkZSBlbnRyZW5hbWllbnRvIA0KYGBge3J9DQogbGlicmFyeShNQVNTKQ0KIHNldC5zZWVkKDEpDQogdHJhaW4gPSBzYW1wbGUgKDE6bnJvdyhCb3N0b24pLCBucm93KEJvc3RvbikvMikNCiB0cmVlLmJvc3Rvbj10cmVlKG1lZHY/Pz8uLEJvc3RvbiAsIHN1YnNldD10cmFpbikNCiBzdW1tYXJ5KHRyZWUuYm9zdG9uKQ0KDQpgYGANCk51ZXN0cm8gYXJib2wgdGllbmUgNCBuaXZlbGVzIGNvbiA4IG5vZG9zDQpgYGB7cn0NCnBsb3QodHJlZS5ib3N0b24pDQp0ZXh0KHRyZWUuYm9zdG9uICwgcHJldHR5ID0wKQ0KYGBgDQogDQpwb2RhcmVtb3MgZWwgYXJib2wgY29uc2lkZXJhbmRvIGxvcyA1IG1lam9yZXMgbm9kb3MgDQogDQpgYGB7cn0NCmN2LmJvc3Rvbj1jdi50cmVlKHRyZWUuYm9zdG9uKQ0KY3YuYm9zdG9uDQpwbG90KGN2LmJvc3RvbiRzaXplLGN2LmJvc3RvbiRkZXYpDQpwcnVuZS5ib3N0b249cHJ1bmUudHJlZSh0cmVlLmJvc3RvbiAsYmVzdD01KQ0KcGxvdChwcnVuZS5ib3N0b24pDQp0ZXh0KHBydW5lLmJvc3RvbiAsIHByZXR0eSA9MCkNCmBgYA0KIEV2YWx1YXJlbW9zIG51ZXN0cmEgcHJlZGRpY2lvbiBiYXNhZG9zIGVuIGVsIHRlc3QgZGVsIHNlZ21lbnRvIHJlc3RhbnRlIGRlIGJvc3Rvbg0KYGBge3J9DQogeWhhdD1wcmVkaWN0ICh0cmVlLmJvc3RvbiAsbmV3ZGF0YT1Cb3N0b25bLXRyYWluICxdKQ0KIGJvc3Rvbi50ZXN0PUJvc3RvblstdHJhaW4gLCJtZWR2Il0NCiBwbG90KHloYXQgLGJvc3Rvbi50ZXN0KQ0KIGFibGluZSAoMCwxKQ0KIG1lYW4oKHloYXQgLWJvc3Rvbi50ZXN0KV4yKQ0KYGBgDQogIyBCYWdnaW5nIGFuZCBSYW5kb20gRm9yZXN0cw0KYGBge3J9DQogbGlicmFyeSggcmFuZG9tRm9yZXN0KQ0KIHNldC5zZWVkKDEpDQogYmFnLmJvc3Rvbj0gcmFuZG9tRm9yZXN0KCBtZWR2Pz8/LixkYXRhPUJvc3RvbiAsIHN1YnNldD10cmFpbiAsDQogbXRyeT0xMyxpbXBvcnRhbmNlID1UUlVFKQ0KIGJhZy5ib3N0b24NCg0KYGBgDQpDb21vIHBvZGVtb3MgdmVyIHRlbmVtb3MgMTMgdmFsb3JlcyBkZSBwcmVkaWN0b3JlcyBxdWUgc2UgY29uc2lkZXJhbiBlbiBjYWRhIHNwbGl0IGRlbCBhcmJvbCAsIGVuIGxhIHNpZ3VpZXRuZSBncmFmaWNhIHZlbW9zIGNvbW8gZXMgbGEgcHJlZGRpY2Npb24gZGVsIGFyYm9sDQpgYGB7cn0NCiB5aGF0LmJhZyA9IHByZWRpY3QgKGJhZy5ib3N0b24gLCBuZXdkYXRhPUJvc3RvblstdHJhaW4gLF0pDQogcGxvdCh5aGF0LmJhZyAsIGJvc3Rvbi50ZXN0KQ0KIGFibGluZSAoMCwxKQ0KIG1lYW4oKHloYXQuYmFnIC1ib3N0b24udGVzdCleMikNCmBgYA0KbGEgYXBsaWNhY2nzbiBkZSByYW5kb21mb3Jlc3Qgbm9zIHBlcm1pdGlyYSBhcGxpY2FyIHVuIHRyYWluIHNvYnJlIHRvZGEgbGEgZGF0YSwgZW4gZXN0ZSBlamVyY2ljaW8gZWwgZXJyb3IgY2FsY3VsYWRhIHNlcmEgMTMuMzMNCmBgYHtyfQ0KIGJhZy5ib3N0b249IHJhbmRvbUZvcmVzdCggbWVkdj8/Py4sZGF0YT1Cb3N0b24gLCBzdWJzZXQ9dHJhaW4gLA0KbXRyeT0xMyxudHJlZT0yNSkNCiB5aGF0LmJhZyA9IHByZWRpY3QgKGJhZy5ib3N0b24gLCBuZXdkYXRhPUJvc3RvblstdHJhaW4gLF0pDQogbWVhbigoeWhhdC5iYWcgLWJvc3Rvbi50ZXN0KV4yKQ0KYGBgDQpoYXJlbW9zIHVuIGFqdXN0ZSBwYXJhIGV2YWx1YXIgcG9yIGltcG9ydGFuY2lhIHNpbiBlc3BlY2lmaWNhciBlbCBudHJlZQ0KYGBge3J9DQpzZXQuc2VlZCgxKQ0KIHJmLmJvc3Rvbj0gcmFuZG9tRm9yZXN0KG1lZHY/Pz8uLGRhdGE9Qm9zdG9uICwgc3Vic2V0PXRyYWluICwNCm10cnk9NiwgaW1wb3J0YW5jZSA9VFJVRSkNCiB5aGF0LnJmID0gcHJlZGljdChyZi5ib3N0b24gLG5ld2RhdGE9Qm9zdG9uWy10cmFpbiAsXSkNCiBtZWFuKCh5aGF0LnJmLWJvc3Rvbi50ZXN0KV4yKQ0KIGltcG9ydGFuY2UgKHJmLmJvc3RvbikNCmBgYA0KIGNvbW8gcG9kZW1vcyBsYSBpcG9ydGFuY2lhIGNvbnRpZW5lIGRvcyB2YWxvcmVzIGVsIE1TRSBwb3IgdmFyaWJsYSB5IHF1ZSB0YW50byBhcG9ydGUgYXBsaWNhbW9zIGFsIG1vZGVsbw0KYGBge3J9DQp2YXJJbXBQbG90IChyZi5ib3N0b24pDQpgYGANCiMgQm9vc3RpbmcNCg0KQXBsaWNhcmVtb3MgdW4gYm9vc3Qgc29icmUgbnVlc3RyYWRhdGEgYXBsaWNhbmRvIHVuYSBkaXN0cmlidWNp824gZ2F1c2lhbmENCmBgYHtyfQ0KbGlicmFyeSAoZ2JtKQ0Kc2V0LnNlZWQoMSkNCmJvb3N0LmJvc3Rvbj1nYm0obWVkdj8/Py4sZGF0YT1Cb3N0b25bdHJhaW4gLF0sIGRpc3RyaWJ1dGlvbj0iZ2F1c3NpYW4iLG4udHJlZXM9NTAwMCwgaW50ZXJhY3Rpb24uZGVwdGg9NCkNCiBzdW1tYXJ5KGJvb3N0LmJvc3RvbikNCg0KYGBgDQogDQpgYGB7cn0NCnBhcihtZnJvdz1jKDEsMikpDQpwbG90KGJvb3N0LmJvc3RvbiAsaT0icm0iKQ0KcGxvdChib29zdC5ib3N0b24gLGk9ImxzdGF0IikNCnloYXQuYm9vc3Q9cHJlZGljdCAoYm9vc3QuYm9zdG9uICxuZXdkYXRhID1Cb3N0b25bLXRyYWluICxdLA0Kbi50cmVlcz01MDAwKQ0KIG1lYW4oKHloYXQuYm9vc3QgLSBib3N0b24udGVzdCleMikNCmBgYA0KDQpgYGB7cn0NCiBib29zdC5ib3N0b249Z2JtKG1lZHY/Pz8uLGRhdGE9Qm9zdG9uW3RyYWluICxdLCBkaXN0cmlidXRpb249ImdhdXNzaWFuIixuLnRyZWVzID01MDAwLCBpbnRlcmFjdGlvbi5kZXB0aCA9NCwgc2hyaW5rYWdlID0wLjIsdmVyYm9zZT1GKQ0KIHloYXQuYm9vc3Q9cHJlZGljdCAoYm9vc3QuYm9zdG9uICxuZXdkYXRhID1Cb3N0b25bLXRyYWluICxdLA0Kbi50cmVlcz01MDAwKQ0KIG1lYW4oKHloYXQuYm9vc3QgLSBib3N0b24udGVzdCleMikNCmBgYA0KIA0KIA==