Today, we will be using Random Forest to predict Iris dataset
# load the data
data("iris")
head(iris)
## Sepal.Length Sepal.Width Petal.Length Petal.Width Species
## 1 5.1 3.5 1.4 0.2 setosa
## 2 4.9 3.0 1.4 0.2 setosa
## 3 4.7 3.2 1.3 0.2 setosa
## 4 4.6 3.1 1.5 0.2 setosa
## 5 5.0 3.6 1.4 0.2 setosa
## 6 5.4 3.9 1.7 0.4 setosa
str(iris)
## 'data.frame': 150 obs. of 5 variables:
## $ Sepal.Length: num 5.1 4.9 4.7 4.6 5 5.4 4.6 5 4.4 4.9 ...
## $ Sepal.Width : num 3.5 3 3.2 3.1 3.6 3.9 3.4 3.4 2.9 3.1 ...
## $ Petal.Length: num 1.4 1.4 1.3 1.5 1.4 1.7 1.4 1.5 1.4 1.5 ...
## $ Petal.Width : num 0.2 0.2 0.2 0.2 0.2 0.4 0.3 0.2 0.2 0.1 ...
## $ Species : Factor w/ 3 levels "setosa","versicolor",..: 1 1 1 1 1 1 1 1 1 1 ...
Now let’s get more info!
summary(iris)
## Sepal.Length Sepal.Width Petal.Length Petal.Width
## Min. :4.300 Min. :2.000 Min. :1.000 Min. :0.100
## 1st Qu.:5.100 1st Qu.:2.800 1st Qu.:1.600 1st Qu.:0.300
## Median :5.800 Median :3.000 Median :4.350 Median :1.300
## Mean :5.843 Mean :3.057 Mean :3.758 Mean :1.199
## 3rd Qu.:6.400 3rd Qu.:3.300 3rd Qu.:5.100 3rd Qu.:1.800
## Max. :7.900 Max. :4.400 Max. :6.900 Max. :2.500
## Species
## setosa :50
## versicolor:50
## virginica :50
##
##
##
dim(iris)
## [1] 150 5
names(iris)
## [1] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width" "Species"
Visualizing the data
# boxplot analysis
x_names <- names(iris)[-5]
# sepal length
plot_sepal_length <-
ggplot(iris, aes(x = Species, y = Sepal.Length, color = Species)) +
geom_boxplot(
outlier.color = "red",
outlier.shape = 8,
outlier.size = 4
) + labs(title = "Plot of Species by Sepal length",
x = "Species",
y = "Sepal Length (cm)")
plot_sepal_length + scale_color_brewer(palette = "Dark2") + theme_classic()
# sepal_width
plot_sepal_width <-
ggplot(iris, aes(x = Species, y = Sepal.Width, color = Species)) +
geom_boxplot(
outlier.color = "red",
outlier.shape = 8,
outlier.size = 4
) + labs(title = "Plot of Species by Sepal width",
x = "Species",
y = "Sepal width (cm)")
plot_sepal_width + scale_color_brewer(palette = "Dark2") + theme_classic()
# petal length
plot_petal_length <-
ggplot(iris, aes(x = Species, y = Petal.Length, color = Species)) +
geom_boxplot(
outlier.color = "red",
outlier.shape = 8,
outlier.size = 4
) + labs(title = "Plot of Species by petal length",
x = "Species",
y = "petal Length (cm)")
plot_petal_length + scale_color_brewer(palette = "Dark2") + theme_classic()
# petal width
plot_petal_width <-
ggplot(iris, aes(x = Species, y = Petal.Width, color = Species)) +
geom_boxplot(
outlier.color = "red",
outlier.shape = 8,
outlier.size = 4
) + labs(title = "Plot of Species by petal width",
x = "Species",
y = "petal width (cm)")
plot_petal_width + scale_color_brewer(palette = "Dark2") + theme_classic()
Here is what we will do now:
- do label encoding (if needed)
- use train test split for the data
- use standardization
- use Random Forest to predict test data
now proceeding to do standardScaling
# getting features only
dataset <- iris
# feature scaling
dataset[,-5] <- scale(dataset[,-5])
train/test split
validationIndex <- createDataPartition(dataset$Species, p=0.70, list=FALSE)
train_sample <- dataset[validationIndex,] # 70% of data to training
test_sample <- dataset[-validationIndex,] # remaining 30% for test
Building the model
for the list of models and tuning parameters, refer to this website
# 10 fold cross-validation repeated thrice
trainControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3)
metric <- "Accuracy"
set.seed(42)
grid <- expand.grid(mtry = seq(1, 4, by = 1)) # number of variables used to do splitting (using mtry)
fit.rf <- train(Species ~ .,
data = train_sample,
method = "rf",
metric = metric,
tuneGrid = grid,
trControl = trainControl)
print(fit.rf)
## Random Forest
##
## 105 samples
## 4 predictor
## 3 classes: 'setosa', 'versicolor', 'virginica'
##
## No pre-processing
## Resampling: Cross-Validated (10 fold, repeated 3 times)
## Summary of sample sizes: 95, 95, 94, 96, 93, 96, ...
## Resampling results across tuning parameters:
##
## mtry Accuracy Kappa
## 1 0.9572727 0.9355280
## 2 0.9517172 0.9271947
## 3 0.9483838 0.9221441
## 4 0.9517172 0.9271947
##
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was mtry = 1.
Splitting by \(2\) variables is good. We managed to get a validation accuracy of \(99\)%!
Confusion matrix
set.seed(42)
pred <- predict(fit.rf, newdata = test_sample) # auto recognizes the labels and features
cf <- confusionMatrix(pred, test_sample$Species)
print(cf)
## Confusion Matrix and Statistics
##
## Reference
## Prediction setosa versicolor virginica
## setosa 15 0 0
## versicolor 0 14 1
## virginica 0 1 14
##
## Overall Statistics
##
## Accuracy : 0.9556
## 95% CI : (0.8485, 0.9946)
## No Information Rate : 0.3333
## P-Value [Acc > NIR] : < 2.2e-16
##
## Kappa : 0.9333
##
## Mcnemar's Test P-Value : NA
##
## Statistics by Class:
##
## Class: setosa Class: versicolor Class: virginica
## Sensitivity 1.0000 0.9333 0.9333
## Specificity 1.0000 0.9667 0.9667
## Pos Pred Value 1.0000 0.9333 0.9333
## Neg Pred Value 1.0000 0.9667 0.9667
## Prevalence 0.3333 0.3333 0.3333
## Detection Rate 0.3333 0.3111 0.3111
## Detection Prevalence 0.3333 0.3333 0.3333
## Balanced Accuracy 1.0000 0.9500 0.9500
sample prediction
# predicting the 100th data point
test_prediction <- predict(fit.rf,
newdata = dataset[100,])
# prediction
print(test_prediction)
## [1] versicolor
## Levels: setosa versicolor virginica
# actual
print(dataset[100,]$Species)
## [1] versicolor
## Levels: setosa versicolor virginica