R Markdown

Pada kali ini, saya ingin memprediksi apakah jamur tersebut beracun atau tidak dari data https://www.kaggle.com/uciml/mushroom-classification

Disini kita menggunakan model keras RNN

library(dplyr)

library(keras)

library(caret)
library(recipes)
library(ROSE)
library(rsample)

library(tensorflow)
reticulate::use_python(python = "C:/Users/LENOVO/anaconda3/envs/r-tensorflow-gpu/python.exe",required=TRUE)

Pertama-tama kita akan membaca file yang berbentuk csv

data <- read.csv("dataset/mushrooms.csv")

glimpse(data)
## Rows: 8,124
## Columns: 23
## $ class                    <chr> "p", "e", "e", "p", "e", "e", "e", "e", "p", ~
## $ cap.shape                <chr> "x", "x", "b", "x", "x", "x", "b", "b", "x", ~
## $ cap.surface              <chr> "s", "s", "s", "y", "s", "y", "s", "y", "y", ~
## $ cap.color                <chr> "n", "y", "w", "w", "g", "y", "w", "w", "w", ~
## $ bruises                  <chr> "t", "t", "t", "t", "f", "t", "t", "t", "t", ~
## $ odor                     <chr> "p", "a", "l", "p", "n", "a", "a", "l", "p", ~
## $ gill.attachment          <chr> "f", "f", "f", "f", "f", "f", "f", "f", "f", ~
## $ gill.spacing             <chr> "c", "c", "c", "c", "w", "c", "c", "c", "c", ~
## $ gill.size                <chr> "n", "b", "b", "n", "b", "b", "b", "b", "n", ~
## $ gill.color               <chr> "k", "k", "n", "n", "k", "n", "g", "n", "p", ~
## $ stalk.shape              <chr> "e", "e", "e", "e", "t", "e", "e", "e", "e", ~
## $ stalk.root               <chr> "e", "c", "c", "e", "e", "c", "c", "c", "e", ~
## $ stalk.surface.above.ring <chr> "s", "s", "s", "s", "s", "s", "s", "s", "s", ~
## $ stalk.surface.below.ring <chr> "s", "s", "s", "s", "s", "s", "s", "s", "s", ~
## $ stalk.color.above.ring   <chr> "w", "w", "w", "w", "w", "w", "w", "w", "w", ~
## $ stalk.color.below.ring   <chr> "w", "w", "w", "w", "w", "w", "w", "w", "w", ~
## $ veil.type                <chr> "p", "p", "p", "p", "p", "p", "p", "p", "p", ~
## $ veil.color               <chr> "w", "w", "w", "w", "w", "w", "w", "w", "w", ~
## $ ring.number              <chr> "o", "o", "o", "o", "o", "o", "o", "o", "o", ~
## $ ring.type                <chr> "p", "p", "p", "p", "e", "p", "p", "p", "p", ~
## $ spore.print.color        <chr> "k", "n", "n", "k", "n", "k", "k", "n", "k", ~
## $ population               <chr> "s", "n", "n", "s", "a", "n", "n", "s", "v", ~
## $ habitat                  <chr> "u", "g", "m", "u", "g", "g", "m", "m", "g", ~

Di data tersebut, ternyata tidak ada data yang NA

anyNA(data)
## [1] FALSE

Disini kita menyesuaikan tipe columnya yang belum sesuai

df <- data %>% 
  mutate_all(as.factor) %>% 
  select(-veil.type)

Setelah tipe data sudah sesuai, kita akan membuat obj untuk scaling dan membuat dummy dengan menggunakan function recipe

rec_obj <- recipe(class ~ ., data = df) %>% 
  step_dummy(all_nominal_predictors()) %>% 
  prep(df)

setelah kita membuat object , kita akan mentransform datanya sesuai dengan transform dari obj recipe sebelumnya

df_transform <- bake(rec_obj, df)

head(df_transform)
## # A tibble: 6 x 96
##   class cap.shape_c cap.shape_f cap.shape_k cap.shape_s cap.shape_x
##   <fct>       <dbl>       <dbl>       <dbl>       <dbl>       <dbl>
## 1 p               0           0           0           0           1
## 2 e               0           0           0           0           1
## 3 e               0           0           0           0           0
## 4 p               0           0           0           0           1
## 5 e               0           0           0           0           1
## 6 e               0           0           0           0           1
## # ... with 90 more variables: cap.surface_g <dbl>, cap.surface_s <dbl>,
## #   cap.surface_y <dbl>, cap.color_c <dbl>, cap.color_e <dbl>,
## #   cap.color_g <dbl>, cap.color_n <dbl>, cap.color_p <dbl>, cap.color_r <dbl>,
## #   cap.color_u <dbl>, cap.color_w <dbl>, cap.color_y <dbl>, bruises_t <dbl>,
## #   odor_c <dbl>, odor_f <dbl>, odor_l <dbl>, odor_m <dbl>, odor_n <dbl>,
## #   odor_p <dbl>, odor_s <dbl>, odor_y <dbl>, gill.attachment_f <dbl>,
## #   gill.spacing_w <dbl>, gill.size_n <dbl>, gill.color_e <dbl>,
## #   gill.color_g <dbl>, gill.color_h <dbl>, gill.color_k <dbl>,
## #   gill.color_n <dbl>, gill.color_o <dbl>, gill.color_p <dbl>,
## #   gill.color_r <dbl>, gill.color_u <dbl>, gill.color_w <dbl>,
## #   gill.color_y <dbl>, stalk.shape_t <dbl>, stalk.root_b <dbl>,
## #   stalk.root_c <dbl>, stalk.root_e <dbl>, stalk.root_r <dbl>,
## #   stalk.surface.above.ring_k <dbl>, stalk.surface.above.ring_s <dbl>,
## #   stalk.surface.above.ring_y <dbl>, stalk.surface.below.ring_k <dbl>,
## #   stalk.surface.below.ring_s <dbl>, stalk.surface.below.ring_y <dbl>,
## #   stalk.color.above.ring_c <dbl>, stalk.color.above.ring_e <dbl>,
## #   stalk.color.above.ring_g <dbl>, stalk.color.above.ring_n <dbl>,
## #   stalk.color.above.ring_o <dbl>, stalk.color.above.ring_p <dbl>,
## #   stalk.color.above.ring_w <dbl>, stalk.color.above.ring_y <dbl>,
## #   stalk.color.below.ring_c <dbl>, stalk.color.below.ring_e <dbl>,
## #   stalk.color.below.ring_g <dbl>, stalk.color.below.ring_n <dbl>,
## #   stalk.color.below.ring_o <dbl>, stalk.color.below.ring_p <dbl>,
## #   stalk.color.below.ring_w <dbl>, stalk.color.below.ring_y <dbl>,
## #   veil.color_o <dbl>, veil.color_w <dbl>, veil.color_y <dbl>,
## #   ring.number_o <dbl>, ring.number_t <dbl>, ring.type_f <dbl>,
## #   ring.type_l <dbl>, ring.type_n <dbl>, ring.type_p <dbl>,
## #   spore.print.color_h <dbl>, spore.print.color_k <dbl>,
## #   spore.print.color_n <dbl>, spore.print.color_o <dbl>,
## #   spore.print.color_r <dbl>, spore.print.color_u <dbl>,
## #   spore.print.color_w <dbl>, spore.print.color_y <dbl>, population_c <dbl>,
## #   population_n <dbl>, population_s <dbl>, population_v <dbl>,
## #   population_y <dbl>, habitat_g <dbl>, habitat_l <dbl>, habitat_m <dbl>,
## #   habitat_p <dbl>, habitat_u <dbl>, habitat_w <dbl>

Setelah itu, kita akan melakukan spliting data menjadi training dan testing sebesar 0.8

set.seed(2021)

split <- initial_split(data = df_transform, prop = 0.8)

train <- training(split)
test <- testing(split)

Disini kita cek apakah data train kita targetnya sudah balanced

table(as.factor(train$class)) %>% prop.table()
## 
##         e         p 
## 0.5190029 0.4809971

setelah kita rasa data sudah balanced, maka kita akan membuat funtion untuk merubah target menjadi angka dan merubah kembali

encode_label <- function(x){
  ifelse(x == "p", 1, 0)
}

decode_label <- function(x){
  ifelse(x == 1, "p", "e")
}

setelah itu, kita akan memecah predictor dan target, dan merubahnya sesuai kebutuhan

train_x <- train %>% select(-class) %>% as.matrix()
test_x <- test %>% select(-class) %>% as.matrix()

train_x <- array_reshape(train_x, dim(train_x))
test_x <- array_reshape(test_x, dim(test_x))


train_y <- to_categorical(sapply(train$class, encode_label))
test_y <- to_categorical(sapply(test$class, encode_label))

n_input <- ncol(train_x)
n_output <- ncol(train_y)

Sekarang kita membuat arsitektur NN nya dengan menggunakan hidel_layer 3x, dan output_layer 1x. untuk seluruh hiden_layer kita menggunakan units 64 dan activation tanh

untuk optimizer kira menggunakan optimizer adam dengan learning rate default yaitu 0.0001

Untuk training model, kita memakai epoch 15 dan batch size sebesar 64

model_base <- keras_model_sequential(name = "model_base") %>% 
  layer_dense(units = 64,
              input_shape = n_input,
              activation = "tanh",
              name = "layer1") %>% 
  layer_dense(units = 64,
              activation = "tanh",
              name = "layer2") %>% 
  layer_dense(units = 64,
              activation = "tanh",
              name = "layer3") %>% 
  layer_dense(units = n_output,
              activation = "sigmoid",
              name = "output")

model_base %>% 
  compile(loss = "categorical_crossentropy",
          metrics = "accuracy",
          optimizer = optimizer_adam(learning_rate = 0.0001))

set.seed(2021)

history_base <- model_base %>% 
  fit(x = train_x,
      y= train_y,
      epoch = 10,
      batch_size = 64,
      validation_data = list(test_x, test_y))

plot(history_base)
## `geom_smooth()` using formula 'y ~ x'

Setelah model belajar, maka kita dapat melakukan prediksi menggunakan model tersebut.

pred_test <- predict(model_base, test_x) %>% k_argmax() %>% as.array()

setelah dirubah maka kita akan menjalankan confusionMatrix untuk melihat accuracy, recall, dan precision

confusionMatrix(as.factor(sapply(pred_test, decode_label)), as.factor(test$class))
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   e   p
##          e 835   4
##          p   0 786
##                                           
##                Accuracy : 0.9975          
##                  95% CI : (0.9937, 0.9993)
##     No Information Rate : 0.5138          
##     P-Value [Acc > NIR] : <2e-16          
##                                           
##                   Kappa : 0.9951          
##                                           
##  Mcnemar's Test P-Value : 0.1336          
##                                           
##             Sensitivity : 1.0000          
##             Specificity : 0.9949          
##          Pos Pred Value : 0.9952          
##          Neg Pred Value : 1.0000          
##              Prevalence : 0.5138          
##          Detection Rate : 0.5138          
##    Detection Prevalence : 0.5163          
##       Balanced Accuracy : 0.9975          
##                                           
##        'Positive' Class : e               
## 

Dari hasil di atas kita medapatkan accuracu di atas 99% yang hampir sempurna, maka bisa dibilang memakai metide neural network menurut saya terlalu overkill, bisa kita pakai logistic regression ataupun decision tree