library(caret)
## Loading required package: ggplot2
## Loading required package: lattice
datos <- read.csv("/cloud/project/breast-cancer-wisconsin.data", header=FALSE)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
glimpse(datos)
## Rows: 699
## Columns: 11
## $ V1 <int> 1000025, 1002945, 1015425, 1016277, 1017023, 1017122, 1018099, 101…
## $ V2 <int> 5, 5, 3, 6, 4, 8, 1, 2, 2, 4, 1, 2, 5, 1, 8, 7, 4, 4, 10, 6, 7, 10…
## $ V3 <int> 1, 4, 1, 8, 1, 10, 1, 1, 1, 2, 1, 1, 3, 1, 7, 4, 1, 1, 7, 1, 3, 5,…
## $ V4 <int> 1, 4, 1, 8, 1, 10, 1, 2, 1, 1, 1, 1, 3, 1, 5, 6, 1, 1, 7, 1, 2, 5,…
## $ V5 <int> 1, 5, 1, 1, 3, 8, 1, 1, 1, 1, 1, 1, 3, 1, 10, 4, 1, 1, 6, 1, 10, 3…
## $ V6 <int> 2, 7, 2, 3, 2, 7, 2, 2, 2, 2, 1, 2, 2, 2, 7, 6, 2, 2, 4, 2, 5, 6, …
## $ V7 <chr> "1", "10", "2", "4", "1", "10", "10", "1", "1", "1", "1", "1", "3"…
## $ V8 <int> 3, 3, 3, 3, 3, 9, 3, 3, 1, 2, 3, 2, 4, 3, 5, 4, 2, 3, 4, 3, 5, 7, …
## $ V9 <int> 1, 2, 1, 7, 1, 7, 1, 1, 1, 1, 1, 1, 4, 1, 5, 3, 1, 1, 1, 1, 4, 10,…
## $ V10 <int> 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, 4, 1, …
## $ V11 <int> 2, 2, 2, 2, 2, 4, 2, 2, 2, 2, 2, 2, 4, 2, 4, 4, 2, 2, 4, 2, 4, 4, …
datos$V7 <- ifelse(datos$V7 == '?', '1', datos$V7)
datos$V11 <- as.factor(datos$V11)
datos$V7 <- as.integer(datos$V7)
glimpse(datos)
## Rows: 699
## Columns: 11
## $ V1 <int> 1000025, 1002945, 1015425, 1016277, 1017023, 1017122, 1018099, 101…
## $ V2 <int> 5, 5, 3, 6, 4, 8, 1, 2, 2, 4, 1, 2, 5, 1, 8, 7, 4, 4, 10, 6, 7, 10…
## $ V3 <int> 1, 4, 1, 8, 1, 10, 1, 1, 1, 2, 1, 1, 3, 1, 7, 4, 1, 1, 7, 1, 3, 5,…
## $ V4 <int> 1, 4, 1, 8, 1, 10, 1, 2, 1, 1, 1, 1, 3, 1, 5, 6, 1, 1, 7, 1, 2, 5,…
## $ V5 <int> 1, 5, 1, 1, 3, 8, 1, 1, 1, 1, 1, 1, 3, 1, 10, 4, 1, 1, 6, 1, 10, 3…
## $ V6 <int> 2, 7, 2, 3, 2, 7, 2, 2, 2, 2, 1, 2, 2, 2, 7, 6, 2, 2, 4, 2, 5, 6, …
## $ V7 <int> 1, 10, 2, 4, 1, 10, 10, 1, 1, 1, 1, 1, 3, 3, 9, 1, 1, 1, 10, 1, 10…
## $ V8 <int> 3, 3, 3, 3, 3, 9, 3, 3, 1, 2, 3, 2, 4, 3, 5, 4, 2, 3, 4, 3, 5, 7, …
## $ V9 <int> 1, 2, 1, 7, 1, 7, 1, 1, 1, 1, 1, 1, 4, 1, 5, 3, 1, 1, 1, 1, 4, 10,…
## $ V10 <int> 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, 4, 1, …
## $ V11 <fct> 2, 2, 2, 2, 2, 4, 2, 2, 2, 2, 2, 2, 4, 2, 4, 4, 2, 2, 4, 2, 4, 4, …
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.2 ──
## ✔ tibble 3.1.8 ✔ purrr 0.3.5
## ✔ tidyr 1.2.1 ✔ stringr 1.4.1
## ✔ readr 2.1.3 ✔ forcats 0.5.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ✖ purrr::lift() masks caret::lift()
sapply(datos, function(x) sum(is.na(x)))
## V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11
## 0 0 0 0 0 0 0 0 0 0 0
ggplot(data = datos, aes(x = V11, y = ..count.., fill = V11)) +
geom_bar() +
scale_fill_manual(values = c("gray50", "orangered2")) +
labs(title = "Tipo de Tumor") +
theme_bw() +
theme(legend.position = "bottom")
table(datos$V11)
##
## 2 4
## 458 241
prop.table(table(datos$V11)) %>% round(digits = 3)
##
## 2 4
## 0.655 0.345
set.seed(100)
IndicesEntrenamiento <- createDataPartition(y = datos$V11,
p = 0.655,
list = FALSE)
Entrenamiento <- datos[IndicesEntrenamiento,]
Test <- datos[-IndicesEntrenamiento,]
round(prop.table(table(datos$V11)),3)
##
## 2 4
## 0.655 0.345
round(prop.table(table(Entrenamiento$V11)),3)
##
## 2 4
## 0.655 0.345
round(prop.table(table(Test$V11)),3)
##
## 2 4
## 0.656 0.344
names(getModelInfo())
## [1] "ada" "AdaBag" "AdaBoost.M1"
## [4] "adaboost" "amdai" "ANFIS"
## [7] "avNNet" "awnb" "awtan"
## [10] "bag" "bagEarth" "bagEarthGCV"
## [13] "bagFDA" "bagFDAGCV" "bam"
## [16] "bartMachine" "bayesglm" "binda"
## [19] "blackboost" "blasso" "blassoAveraged"
## [22] "bridge" "brnn" "BstLm"
## [25] "bstSm" "bstTree" "C5.0"
## [28] "C5.0Cost" "C5.0Rules" "C5.0Tree"
## [31] "cforest" "chaid" "CSimca"
## [34] "ctree" "ctree2" "cubist"
## [37] "dda" "deepboost" "DENFIS"
## [40] "dnn" "dwdLinear" "dwdPoly"
## [43] "dwdRadial" "earth" "elm"
## [46] "enet" "evtree" "extraTrees"
## [49] "fda" "FH.GBML" "FIR.DM"
## [52] "foba" "FRBCS.CHI" "FRBCS.W"
## [55] "FS.HGD" "gam" "gamboost"
## [58] "gamLoess" "gamSpline" "gaussprLinear"
## [61] "gaussprPoly" "gaussprRadial" "gbm_h2o"
## [64] "gbm" "gcvEarth" "GFS.FR.MOGUL"
## [67] "GFS.LT.RS" "GFS.THRIFT" "glm.nb"
## [70] "glm" "glmboost" "glmnet_h2o"
## [73] "glmnet" "glmStepAIC" "gpls"
## [76] "hda" "hdda" "hdrda"
## [79] "HYFIS" "icr" "J48"
## [82] "JRip" "kernelpls" "kknn"
## [85] "knn" "krlsPoly" "krlsRadial"
## [88] "lars" "lars2" "lasso"
## [91] "lda" "lda2" "leapBackward"
## [94] "leapForward" "leapSeq" "Linda"
## [97] "lm" "lmStepAIC" "LMT"
## [100] "loclda" "logicBag" "LogitBoost"
## [103] "logreg" "lssvmLinear" "lssvmPoly"
## [106] "lssvmRadial" "lvq" "M5"
## [109] "M5Rules" "manb" "mda"
## [112] "Mlda" "mlp" "mlpKerasDecay"
## [115] "mlpKerasDecayCost" "mlpKerasDropout" "mlpKerasDropoutCost"
## [118] "mlpML" "mlpSGD" "mlpWeightDecay"
## [121] "mlpWeightDecayML" "monmlp" "msaenet"
## [124] "multinom" "mxnet" "mxnetAdam"
## [127] "naive_bayes" "nb" "nbDiscrete"
## [130] "nbSearch" "neuralnet" "nnet"
## [133] "nnls" "nodeHarvest" "null"
## [136] "OneR" "ordinalNet" "ordinalRF"
## [139] "ORFlog" "ORFpls" "ORFridge"
## [142] "ORFsvm" "ownn" "pam"
## [145] "parRF" "PART" "partDSA"
## [148] "pcaNNet" "pcr" "pda"
## [151] "pda2" "penalized" "PenalizedLDA"
## [154] "plr" "pls" "plsRglm"
## [157] "polr" "ppr" "pre"
## [160] "PRIM" "protoclass" "qda"
## [163] "QdaCov" "qrf" "qrnn"
## [166] "randomGLM" "ranger" "rbf"
## [169] "rbfDDA" "Rborist" "rda"
## [172] "regLogistic" "relaxo" "rf"
## [175] "rFerns" "RFlda" "rfRules"
## [178] "ridge" "rlda" "rlm"
## [181] "rmda" "rocc" "rotationForest"
## [184] "rotationForestCp" "rpart" "rpart1SE"
## [187] "rpart2" "rpartCost" "rpartScore"
## [190] "rqlasso" "rqnc" "RRF"
## [193] "RRFglobal" "rrlda" "RSimca"
## [196] "rvmLinear" "rvmPoly" "rvmRadial"
## [199] "SBC" "sda" "sdwd"
## [202] "simpls" "SLAVE" "slda"
## [205] "smda" "snn" "sparseLDA"
## [208] "spikeslab" "spls" "stepLDA"
## [211] "stepQDA" "superpc" "svmBoundrangeString"
## [214] "svmExpoString" "svmLinear" "svmLinear2"
## [217] "svmLinear3" "svmLinearWeights" "svmLinearWeights2"
## [220] "svmPoly" "svmRadial" "svmRadialCost"
## [223] "svmRadialSigma" "svmRadialWeights" "svmSpectrumString"
## [226] "tan" "tanSearch" "treebag"
## [229] "vbmpRadial" "vglmAdjCat" "vglmContRatio"
## [232] "vglmCumulative" "widekernelpls" "WM"
## [235] "wsrf" "xgbDART" "xgbLinear"
## [238] "xgbTree" "xyf"
modelLookup(model="rpart") #Algoritmo Arbol Cart
## model parameter label forReg forClass probModel
## 1 rpart cp Complexity Parameter TRUE TRUE TRUE
modelLookup(model="knn") # k vecinos
## model parameter label forReg forClass probModel
## 1 knn k #Neighbors TRUE TRUE TRUE
modelLookup(model="rf") # R Forets
## model parameter label forReg forClass probModel
## 1 rf mtry #Randomly Selected Predictors TRUE TRUE TRUE
modelLookup(model="glm") # Regresión L
## model parameter label forReg forClass probModel
## 1 glm parameter parameter TRUE TRUE TRUE
modelLookup(model="rpart")
## model parameter label forReg forClass probModel
## 1 rpart cp Complexity Parameter TRUE TRUE TRUE
ctrl= trainControl(method = "cv", number = 10)
set.seed(123)
modelo_cart = train(V11~.,
data=datos,
method="rpart",
trControl=ctrl,
metric="Accuracy",
tuneLength=10)
modelo_cart
## CART
##
## 699 samples
## 10 predictor
## 2 classes: '2', '4'
##
## No pre-processing
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 629, 629, 629, 629, 629, 630, ...
## Resampling results across tuning parameters:
##
## cp Accuracy Kappa
## 0.00000000 0.9427927 0.8732828
## 0.08667589 0.9072388 0.7973820
## 0.17335178 0.9072388 0.7973820
## 0.26002766 0.9072388 0.7973820
## 0.34670355 0.9072388 0.7973820
## 0.43337944 0.9072388 0.7973820
## 0.52005533 0.9072388 0.7973820
## 0.60673121 0.9072388 0.7973820
## 0.69340710 0.9072388 0.7973820
## 0.78008299 0.7921249 0.4473301
##
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was cp = 0.
plot(modelo_cart)
### Predicción de la clase
CLASE.CART = predict(modelo_cart, data=Test)
head(CLASE.CART)
## [1] 2 4 2 4 2 4
## Levels: 2 4
PROBA.CART = predict(modelo_cart, data=Test, type="prob")
PROBA.CART = PROBA.CART[,2]
modelLookup("glm")
## model parameter label forReg forClass probModel
## 1 glm parameter parameter TRUE TRUE TRUE
ctrl= trainControl(method = "cv", number = 10)
set.seed(123)
modelo_RL = train(V11~.,
data=datos,
method="glm",
family="binomial",
trControl=ctrl,
metric="Accuracy",
tuneLength=5)
modelo_RL
## Generalized Linear Model
##
## 699 samples
## 10 predictor
## 2 classes: '2', '4'
##
## No pre-processing
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 629, 629, 629, 629, 629, 630, ...
## Resampling results:
##
## Accuracy Kappa
## 0.9656096 0.9236944
varImp(modelo_RL)
## glm variable importance
##
## Overall
## V7 100.000
## V2 84.104
## V8 55.361
## V5 42.675
## V10 36.108
## V9 31.631
## V4 31.428
## V6 7.023
## V1 2.145
## V3 0.000
plot(varImp(modelo_RL))
parametros <- preProcess(datos, method=c('pca'), pcaComp = 2)
datos.pca <- predict(parametros, datos)
head(datos.pca)
## V11 PC1 PC2
## 1 2 1.450506 -0.17319005
## 2 2 -1.468439 -0.16709276
## 3 2 1.574232 -0.16115648
## 4 2 -1.505934 0.05178307
## 5 2 1.325875 -0.15311485
## 6 4 -5.050669 0.02838723
plot(datos.pca$PC1, datos.pca$PC2, col=datos.pca$V11)
```