1. Naive Bayes

# Load libraries
library(e1071)
## Warning: package 'e1071' was built under R version 4.3.3
library(caret)
## Warning: package 'caret' was built under R version 4.3.3
## Loading required package: ggplot2
## Warning: package 'ggplot2' was built under R version 4.3.3
## Loading required package: lattice
library(MLmetrics)
## Warning: package 'MLmetrics' was built under R version 4.3.3
## 
## Attaching package: 'MLmetrics'
## The following objects are masked from 'package:caret':
## 
##     MAE, RMSE
## The following object is masked from 'package:base':
## 
##     Recall
library(class)
library(FSelector)
## Warning: package 'FSelector' was built under R version 4.3.3
library(randomForest)
## Warning: package 'randomForest' was built under R version 4.3.3
## randomForest 4.7-1.2
## Type rfNews() to see new features/changes/bug fixes.
## 
## Attaching package: 'randomForest'
## The following object is masked from 'package:ggplot2':
## 
##     margin
library(party)
## Warning: package 'party' was built under R version 4.3.3
## Loading required package: grid
## Loading required package: mvtnorm
## Warning: package 'mvtnorm' was built under R version 4.3.3
## Loading required package: modeltools
## Loading required package: stats4
## Loading required package: strucchange
## Warning: package 'strucchange' was built under R version 4.3.3
## Loading required package: zoo
## Warning: package 'zoo' was built under R version 4.3.3
## 
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
## Loading required package: sandwich
## Warning: package 'sandwich' was built under R version 4.3.3
# Load dataset
data <- read.csv("prepro_databank.csv", header = TRUE)

Data Preparation

# Pastikan target 'y' bertipe faktor
data$y <- as.factor(data$y)

# Konversi variabel kategorikal menjadi faktor
categorical_cols <- c("job", "marital", "education", "default", "housing",
                      "loan", "contact", "month", "poutcome")
data[categorical_cols] <- lapply(data[categorical_cols], as.factor)

# Feature Selection menggunakan Information Gain
weights <- information.gain(y ~ ., data = data)
weights
##           attr_importance
## job          6.925136e-03
## marital      2.060405e-03
## education    1.639668e-03
## default      8.418705e-07
## balance      3.699592e-03
## housing      5.425481e-03
## loan         2.850844e-03
## contact      1.132256e-02
## day          0.000000e+00
## month        2.072607e-02
## duration     7.277524e-02
## campaign     0.000000e+00
## pdays        2.463002e-02
## previous     1.124728e-02
## poutcome     2.604928e-02
## Cluster      3.002471e-01
## age_group    7.798001e-03
selected_features <- cutoff.k(weights, 10)  # ambil 10 fitur terbaik
selected_features
##  [1] "Cluster"   "duration"  "poutcome"  "pdays"     "month"     "contact"  
##  [7] "previous"  "age_group" "job"       "housing"
# Ambil data berdasarkan fitur terpilih
selected_formula <- as.formula(paste("y ~", paste(selected_features, collapse = "+")))
data_selected <- data[, c("y", selected_features)]

# Bagi data jadi latih dan uji
set.seed(123)
trainIndex <- createDataPartition(data_selected$y, p = 0.75, list = FALSE)
trainData <- data_selected[trainIndex, ]
testData <- data_selected[-trainIndex, ]

Penjelasan: Model klasifikasi seperti Naive Bayes, biasanya butuh target (label) dalam bentuk factor, bukan numerik. Pada datasetnya, masih berupa angka (0, 1), sehingga diubah dulu menjadi faktor agar dikenali sebagai label kategorik bukan ordinal. Hal yang sama dilakukan pada variabel kategorikal lainnya. Selanjutnya dilakukan feature selection atau pemilihan atribut/variabel dengan information gain. Feature selection ini dilakukan untuk mengurangi dimensi data agar model lebih cepat, tidak overfitting, dan mudah ditafsirkan, serta agar model fokus hanya pada fitur yang paling informatif untuk prediksi variabel y. Penggunaan information gain dipilih karena information gain cocok untuk klasifikasi, dimana target y adalah kategori (misal: 0 dan 1). Information.gain() menghitung seberapa besar kontribusi tiap fitur dalam mengurangi ketidakpastian (entropi) terhadap target y. Semakin besar nilai Information Gain, maka fitur tersebut dianggap semakin relevan. Oleh sebab itu, disini dipilih 10 fitur teratas berdasarkan skor Information Gain.

Pembentukan Model Naive Bayes

# Naive Bayes Model
model_nb <- naiveBayes(selected_formula, data = trainData)
model_nb
## 
## Naive Bayes Classifier for Discrete Predictors
## 
## Call:
## naiveBayes.default(x = X, y = Y, laplace = laplace)
## 
## A-priori probabilities:
## Y
##         0         1 
## 0.8846948 0.1153052 
## 
## Conditional probabilities:
##    Cluster
## Y      [,1]      [,2]
##   0 4.06000 2.0389014
##   1 5.86445 0.5109465
## 
##    duration
## Y       [,1]     [,2]
##   0 224.8693 208.7637
##   1 536.6036 369.6727
## 
##    poutcome
## Y            1          2          3          4
##   0 0.83933333 0.10833333 0.04033333 0.01200000
##   1 0.63427110 0.12276215 0.07416880 0.16879795
## 
##    pdays
## Y       [,1]      [,2]
##   0 36.28833  96.14579
##   1 71.58312 126.36681
## 
##    month
## Y             1           2           3           4           5           6
##   0 0.034666667 0.046000000 0.006000000 0.063333333 0.326000000 0.114333333
##   1 0.035805627 0.071611253 0.043478261 0.107416880 0.194373402 0.092071611
##    month
## Y             7           8           9          10          11          12
##   0 0.159000000 0.138333333 0.008666667 0.011000000 0.089666667 0.003000000
##   1 0.125319693 0.140664962 0.030690537 0.066496164 0.071611253 0.020460358
## 
##    contact
## Y            1          2          3
##   0 0.62600000 0.30833333 0.06566667
##   1 0.80562660 0.10741688 0.08695652
## 
##    previous
## Y        [,1]     [,2]
##   0 0.4883333 1.679128
##   1 1.0869565 1.959228
## 
##    age_group
## Y        Adult Middle-aged     Senior      Young
##   0 0.52066667  0.43700000 0.01966667 0.02266667
##   1 0.47058824  0.37340153 0.10485934 0.05115090
## 
##    job
## Y            1          2          3          4          5          6
##   0 0.20533333 0.21600000 0.17133333 0.10933333 0.09600000 0.04500000
##   1 0.25319693 0.13299233 0.14833760 0.10485934 0.07672634 0.10230179
##    job
## Y            7          8          9         10         11         12
##   0 0.04033333 0.03700000 0.02766667 0.02633333 0.01666667 0.00900000
##   1 0.03580563 0.03324808 0.02813299 0.02813299 0.04092072 0.01534527
## 
##    housing
## Y           0         1
##   0 0.4153333 0.5846667
##   1 0.5754476 0.4245524

Tipe model yang digunakan adalah Naive Bayes Classifier untuk prediktor diskrit (Discrete Predictors). Artinya, semua fitur dianggap kategori (kalau numerik, diasumsikan distribusi normal sederhana per kelas). Model ini mengasumsikan independensi antar fitur (fitur-fitur dianggap tidak saling mempengaruhi secara langsung). Pada output yang dihasilkan, terdapat A-Priori Probabilities (Probabilitas Sebelum Observasi Data), di mana hasilnya adalah proporsi kelas di dataset yakni: Proporsi Kelas 0: 88.47% (mayoritas) Proporsi Kelas 1: 11.53% (minoritas) Artinya dataset yang digunakan sangat tidak seimbang ke arah kelas 0. Selanjutnya, ditampilkan pula Conditional Probabilities (Probabilitas Bersyarat per Fitur). Dengan fitur-fitur tersebut, model belajar pola probabilitas bersyarat agar bisa mengklasifikasikan apakah seorang nasabah akan diberi pinjaman (“yes”=1) atau tidak (“nol”=0) saat mengajukan pinjaman ke bank.

Selanjutnya, dilakukan prediksi untuk data training dan data testingnya, yang kemudian nantinya akan dibandingkan dengan data aslinya. ### Prediksi dengan Model Naive Bayes

# Prediksi untuk data training dan testing
pred_train <- predict(model_nb, trainData)
pred_test <- predict(model_nb, testData)

Evaluasi hasil prediksi tersebut dilakukan menggunakan confusion matrix dan berbagai metrik evaluasi, seperti Accuracy, Recall (Sensitivity), F1 Score, Balanced Accuracy, Macro Average F1-Score. ## Evaluasi Hasil Prediksi Model Naive Bayes ### Untuk Data Training

# === Evaluasi Data Training ===
#Membuat confussion metrik 
conf_train <- confusionMatrix(pred_train, trainData$y)

# Ambil confusion matrix table
conf_matrix <- conf_train$table

# Ambil metrik langsung dari confusion matrix
recall_kelas_0 <- conf_train$byClass["Sensitivity"] # Recall untuk kelas 0 (positif)
recall_kelas_1 <- conf_train$byClass["Specificity"] # Recall untuk kelas 1 (negatif)
f1_kelas_0     <- conf_train$byClass["F1"]          # F1-score untuk kelas 0 (positif)
# Hitung manual F1-score untuk kelas 1 (negatif)
# Precision kelas 1 = TP_1 / (TP_1 + FP_1)
# TP_1 = pred = 1, true = 1 => conf_matrix[2,2]
# FP_1 = pred = 1, true = 0 => conf_matrix[2,1]
precision_1 <- conf_matrix[2,2] / (conf_matrix[2,2] + conf_matrix[2,1])
recall_1 <- recall_kelas_1
f1_kelas_1 <- 2 * (precision_1 * recall_1) / (precision_1 + recall_1) # F1-score untuk kelas 1 (negatif)
accuracy_train <- conf_train$overall["Accuracy"]

# Hitung Balanced Accuracy dan F1 Macro
balanced_accuracy <- (recall_kelas_0 + recall_kelas_1) / 2
f1_macro <- (f1_kelas_0 + f1_kelas_1) / 2

# Menampilkan hasil evaluasi untuk data training
cat("\n=== Evaluasi Data Training ===\n")
## 
## === Evaluasi Data Training ===
print(conf_train)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction    0    1
##          0 2837  130
##          1  163  261
##                                           
##                Accuracy : 0.9136          
##                  95% CI : (0.9036, 0.9228)
##     No Information Rate : 0.8847          
##     P-Value [Acc > NIR] : 2.54e-08        
##                                           
##                   Kappa : 0.5915          
##                                           
##  Mcnemar's Test P-Value : 0.06156         
##                                           
##             Sensitivity : 0.9457          
##             Specificity : 0.6675          
##          Pos Pred Value : 0.9562          
##          Neg Pred Value : 0.6156          
##              Prevalence : 0.8847          
##          Detection Rate : 0.8366          
##    Detection Prevalence : 0.8750          
##       Balanced Accuracy : 0.8066          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (no): %.4f\n", recall_kelas_0))
## Recall kelas 0 (no): 0.9457
cat(sprintf("F1-score kelas 0 (no): %.4f\n", f1_kelas_0))
## F1-score kelas 0 (no): 0.9509
cat(sprintf("Recall kelas 1 (yes): %.4f\n", recall_kelas_1))
## Recall kelas 1 (yes): 0.6675
cat(sprintf("F1-score kelas 1 (yes): %.4f\n", f1_kelas_1))
## F1-score kelas 1 (yes): 0.6405
cat(sprintf("Accuracy: %.4f\n", accuracy_train))
## Accuracy: 0.9136
cat(sprintf("Balanced Accuracy: %.4f\n", balanced_accuracy))
## Balanced Accuracy: 0.8066
cat(sprintf("Macro Average F1-score: %.4f\n", f1_macro))
## Macro Average F1-score: 0.7957

Interpretasi Output: Confussion Matrix: Dari total data training, 2837 data diklasifikasikan benar sebagai kelas 0 (no). 261 data diklasifikasikan benar sebagai kelas 1 (yes). 130 data kelas 1 salah diklasifikasikan sebagai 0 (false negative). 163 data kelas 0 salah diklasifikasikan sebagai 1 (false positive).

Balanced Accuracy sebesar 0.8066, ini merupakan rata-rata recall dari kedua kelas, digunakan untuk menghindari bias karena data imbalance.

Macro F1-score = 0.7957 artinya secara rata-rata, performa model dalam menyeimbangkan precision dan recall untuk kedua kelas (0 dan 1) adalah sekitar 79.6%.

Untuk Data Testing

# === Evaluasi Data Testing ===
#Membuat confussion metrik
conf_test <- confusionMatrix(pred_test, testData$y)
# Ambil confusion matrix table untuk data testing
conf_matrix_test <- conf_test$table

# Ambil metrik langsung dari confusion matrix
recall_kelas_0_test <- conf_test$byClass["Sensitivity"]  # Recall kelas 0 (positif)
recall_kelas_1_test <- conf_test$byClass["Specificity"]  # Recall kelas 1 (negatif)
f1_kelas_0_test     <- conf_test$byClass["F1"]           # F1-score kelas 0 (positif)

# Hitung manual F1-score untuk kelas 1 (negatif)
# Precision kelas 1 = TP_1 / (TP_1 + FP_1)
precision_1_test <- conf_matrix_test[2,2] / (conf_matrix_test[2,2] + conf_matrix_test[2,1])
recall_1_test <- recall_kelas_1_test
f1_kelas_1_test <- 2 * (precision_1_test * recall_1_test) / (precision_1_test + recall_1_test)
accuracy_test <- conf_test$overall["Accuracy"]

# Hitung Balanced Accuracy dan Macro Average F1-score
balanced_accuracy_test <- (recall_kelas_0_test + recall_kelas_1_test) / 2
f1_macro_test <- (f1_kelas_0_test + f1_kelas_1_test) / 2

# Menampilkan hasil evaluasi untuk data testing
cat("\n=== Evaluasi Data Testing ===\n")
## 
## === Evaluasi Data Testing ===
print(conf_test)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   0   1
##          0 961  44
##          1  39  86
##                                           
##                Accuracy : 0.9265          
##                  95% CI : (0.9098, 0.9411)
##     No Information Rate : 0.885           
##     P-Value [Acc > NIR] : 2.202e-06       
##                                           
##                   Kappa : 0.6331          
##                                           
##  Mcnemar's Test P-Value : 0.6606          
##                                           
##             Sensitivity : 0.9610          
##             Specificity : 0.6615          
##          Pos Pred Value : 0.9562          
##          Neg Pred Value : 0.6880          
##              Prevalence : 0.8850          
##          Detection Rate : 0.8504          
##    Detection Prevalence : 0.8894          
##       Balanced Accuracy : 0.8113          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (no): %.4f\n", recall_kelas_0_test))
## Recall kelas 0 (no): 0.9610
cat(sprintf("F1-score kelas 0 (no): %.4f\n", f1_kelas_0_test))
## F1-score kelas 0 (no): 0.9586
cat(sprintf("Recall kelas 1 (yes): %.4f\n", recall_kelas_1_test))
## Recall kelas 1 (yes): 0.6615
cat(sprintf("F1-score kelas 1 (yes): %.4f\n", f1_kelas_1_test))
## F1-score kelas 1 (yes): 0.6745
cat(sprintf("Accuracy: %.4f\n", accuracy_test))
## Accuracy: 0.9265
cat(sprintf("Balanced Accuracy: %.4f\n", balanced_accuracy_test))
## Balanced Accuracy: 0.8113
cat(sprintf("Macro Average F1-score: %.4f\n", f1_macro_test))
## Macro Average F1-score: 0.8166

Interpretasi Output: Confussion Matrix: Dari total data testing, 961 data diklasifikasikan benar sebagai kelas 0 (no). 86 data diklasifikasikan benar sebagai kelas 1 (yes). 44 data kelas 1 salah diklasifikasikan sebagai 0 (false negative). 86 data kelas 0 salah diklasifikasikan sebagai 1 (false positive).

Balanced Accuracy sebesar 0.8113, ini merupakan rata-rata recall dari kedua kelas, digunakan untuk menghindari bias karena data imbalance. Nilai 81.13% menunjukkan model cukup seimbang dalam mengenali kedua kelas.

Macro F1-score = 0.8166 artinya secara rata-rata, performa model dalam menyeimbangkan precision dan recall untuk kedua kelas (0 dan 1) adalah sekitar 81.66%. Nilai 81.66% artinya model relatif adil terhadap kedua kelas, meskipun performa di kelas 1 (yes) masih lebih rendah dari kelas 0.

2. Model Random Forest

Prinsip dasar: Random Forest adalah kumpulan dari banyak decision tree (ensemble). Ia membuat prediksi dengan “voting” dari banyak pohon.

Cara kerja: 1. Buat banyak pohon keputusan dari subset acak data dan fitur. 2. Masing-masing pohon membuat prediksi. 3. Hasil akhir ditentukan oleh mayoritas vote (untuk klasifikasi) atau rata-rata (untuk regresi).

# Load libraries
library(e1071)
library(caret)
library(MLmetrics)
library(class)
library(FSelector)
library(randomForest)
library(party)

# Load dataset
data <- read.csv("prepro_databank.csv", header = TRUE)

Data Preparation

# Pastikan target 'y' bertipe faktor
data$y <- as.factor(data$y)

# Konversi variabel kategorikal menjadi faktor
categorical_cols <- c("job", "marital", "education", "default", "housing",
                      "loan", "contact", "month", "poutcome")
data[categorical_cols] <- lapply(data[categorical_cols], as.factor)

# Feature Selection menggunakan Information Gain
weights <- information.gain(y ~ ., data = data)
weights
##           attr_importance
## job          6.925136e-03
## marital      2.060405e-03
## education    1.639668e-03
## default      8.418705e-07
## balance      3.699592e-03
## housing      5.425481e-03
## loan         2.850844e-03
## contact      1.132256e-02
## day          0.000000e+00
## month        2.072607e-02
## duration     7.277524e-02
## campaign     0.000000e+00
## pdays        2.463002e-02
## previous     1.124728e-02
## poutcome     2.604928e-02
## Cluster      3.002471e-01
## age_group    7.798001e-03
selected_features <- cutoff.k(weights, 10)  # ambil 10 fitur terbaik
selected_features
##  [1] "Cluster"   "duration"  "poutcome"  "pdays"     "month"     "contact"  
##  [7] "previous"  "age_group" "job"       "housing"
# Ambil data berdasarkan fitur terpilih
selected_formula <- as.formula(paste("y ~", paste(selected_features, collapse = "+")))
data_selected <- data[, c("y", selected_features)]

# Bagi data jadi latih dan uji
set.seed(123)
trainIndex <- createDataPartition(data_selected$y, p = 0.75, list = FALSE)
trainData <- data_selected[trainIndex, ]
testData <- data_selected[-trainIndex, ]

Pembentukan Model Random Forest

library(caret)

# === 3. SMOTE untuk Imbalance (hanya untuk train set) ===
trainData$y <- as.factor(trainData$y)  # SMOTE butuh faktor

kontrol_cv <- trainControl(method = "cv", number = 4, sampling = "smote")

# Tuning ringan untuk mtry
grid_mtry <- expand.grid(mtry = c(2, 3, 4, 5))

set.seed(123)
model_rf <- train(
  y ~ .,
  data = trainData,
  method = "rf",
  trControl = kontrol_cv,
  tuneGrid = expand.grid(mtry = c(2, 3, 4)),
  ntree = 50,
  nodesize = 20, # memperbesar nodesize biar tiap pohon ga terlalu dalam
  maxnodes = 25, # batasi jumlah node biar gak overfit
  importance = TRUE
)
## Warning: package 'themis' was built under R version 4.3.3
## Loading required package: recipes
## Warning: package 'recipes' was built under R version 4.3.3
## Loading required package: dplyr
## Warning: package 'dplyr' was built under R version 4.3.3
## 
## Attaching package: 'dplyr'
## The following object is masked from 'package:party':
## 
##     where
## The following object is masked from 'package:randomForest':
## 
##     combine
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
## 
## Attaching package: 'recipes'
## The following object is masked from 'package:stats4':
## 
##     update
## The following object is masked from 'package:stats':
## 
##     step

4-Fold Cross Validation, yang mengurangi risiko overfitting karena model diuji pada data yang tidak digunakan saat pelatihan di setiap fold.

Evaluasi di data Training

# === 6. Evaluasi di train set ===
pred_train <- predict(model_rf, trainData)
evaluasi_train <- confusionMatrix(pred_train, trainData$y)

# === 7. Ambil metrik dan tampilkan ===
tabel_konfusi <- evaluasi_train$table
recall_0 <- evaluasi_train$byClass["Sensitivity"]
recall_1 <- evaluasi_train$byClass["Specificity"]
f1_0     <- evaluasi_train$byClass["F1"]
precision_1 <- tabel_konfusi[2,2] / sum(tabel_konfusi[2,])
f1_1 <- 2 * (precision_1 * recall_1) / (precision_1 + recall_1)
akurasi <- evaluasi_train$overall["Accuracy"]
akurasi_avg <- (recall_0 + recall_1) / 2
f1_macro <- (f1_0 + f1_1) / 2

# === 8. Cetak hasil evaluasi ===
cat("\n=== Evaluasi Model pada Data Test ===\n")
## 
## === Evaluasi Model pada Data Test ===
print(evaluasi_train)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction    0    1
##          0 2895   64
##          1  105  327
##                                           
##                Accuracy : 0.9502          
##                  95% CI : (0.9423, 0.9572)
##     No Information Rate : 0.8847          
##     P-Value [Acc > NIR] : < 2.2e-16       
##                                           
##                   Kappa : 0.7664          
##                                           
##  Mcnemar's Test P-Value : 0.002091        
##                                           
##             Sensitivity : 0.9650          
##             Specificity : 0.8363          
##          Pos Pred Value : 0.9784          
##          Neg Pred Value : 0.7569          
##              Prevalence : 0.8847          
##          Detection Rate : 0.8537          
##    Detection Prevalence : 0.8726          
##       Balanced Accuracy : 0.9007          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0))
## Recall kelas 0 (negatif): 0.9650
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0))
## F1-score kelas 0 (negatif): 0.9716
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1))
## Recall kelas 1 (positif): 0.8363
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1))
## F1-score kelas 1 (positif): 0.7947
cat(sprintf("Akurasi: %.4f\n", akurasi))
## Akurasi: 0.9502
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg))
## Balanced Accuracy: 0.9007
cat(sprintf("F1-score Macro: %.4f\n", f1_macro))
## F1-score Macro: 0.8831

Evaluasi di data Testing

# === Evaluasi di testing set ===
pred_test <- predict(model_rf, testData)
evaluasi_test <- confusionMatrix(pred_test, testData$y)

# === Ambil metrik dan tampilkan ===
tabel_konfusi <- evaluasi_test$table
recall_0 <- evaluasi_test$byClass["Sensitivity"]
recall_1 <- evaluasi_test$byClass["Specificity"]
f1_0     <- evaluasi_test$byClass["F1"]
precision_1 <- tabel_konfusi[2,2] / sum(tabel_konfusi[2,])
f1_1 <- 2 * (precision_1 * recall_1) / (precision_1 + recall_1)
akurasi <- evaluasi_test$overall["Accuracy"]
akurasi_avg <- (recall_0 + recall_1) / 2
f1_macro <- (f1_0 + f1_1) / 2

# === Cetak hasil evaluasi ===
cat("\n=== Evaluasi Model pada Data Test ===\n")
## 
## === Evaluasi Model pada Data Test ===
print(evaluasi_test)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   0   1
##          0 970  31
##          1  30  99
##                                           
##                Accuracy : 0.946           
##                  95% CI : (0.9312, 0.9585)
##     No Information Rate : 0.885           
##     P-Value [Acc > NIR] : 1.082e-12       
##                                           
##                   Kappa : 0.734           
##                                           
##  Mcnemar's Test P-Value : 1               
##                                           
##             Sensitivity : 0.9700          
##             Specificity : 0.7615          
##          Pos Pred Value : 0.9690          
##          Neg Pred Value : 0.7674          
##              Prevalence : 0.8850          
##          Detection Rate : 0.8584          
##    Detection Prevalence : 0.8858          
##       Balanced Accuracy : 0.8658          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0))
## Recall kelas 0 (negatif): 0.9700
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0))
## F1-score kelas 0 (negatif): 0.9695
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1))
## Recall kelas 1 (positif): 0.7615
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1))
## F1-score kelas 1 (positif): 0.7645
cat(sprintf("Akurasi: %.4f\n", akurasi))
## Akurasi: 0.9460
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg))
## Balanced Accuracy: 0.8658
cat(sprintf("F1-score Macro: %.4f\n", f1_macro))
## F1-score Macro: 0.8670

3. Model KNN

Prinsip dasar: KNN mengklasifikasikan data baru berdasarkan jarak terdekat ke sejumlah data tetangga yang sudah diketahui kelasnya. “K” adalah jumlah tetangga yang dilihat.

Cara kerja:

  1. Hitung jarak (biasanya Euclidean) antara data baru dan seluruh data pelatihan.
  2. Ambil K data dengan jarak terpendek.
  3. Lihat mayoritas kelas dari tetangga tersebut → itulah prediksinya.

Kelebihan: - Mudah dipahami. - Tidak perlu proses pelatihan (lazy learning).

Kekurangan: - Lambat untuk dataset besar. - Sensitif terhadap skala data (perlu normalisasi).

# Load libraries
library(e1071)
library(caret)
library(MLmetrics)
library(class)
library(FSelector)
library(randomForest)
library(party)

# Load dataset
data <- read.csv("prepro_databank.csv", header = TRUE)

Data Preparation

# Pastikan target 'y' bertipe faktor
data$y <- as.factor(data$y)

# Konversi variabel kategorikal menjadi faktor
categorical_cols <- c("job", "marital", "education", "default", "housing",
                      "loan", "contact", "month", "poutcome")
data[categorical_cols] <- lapply(data[categorical_cols], as.factor)

# Feature Selection menggunakan Information Gain
weights <- information.gain(y ~ ., data = data)
weights
##           attr_importance
## job          6.925136e-03
## marital      2.060405e-03
## education    1.639668e-03
## default      8.418705e-07
## balance      3.699592e-03
## housing      5.425481e-03
## loan         2.850844e-03
## contact      1.132256e-02
## day          0.000000e+00
## month        2.072607e-02
## duration     7.277524e-02
## campaign     0.000000e+00
## pdays        2.463002e-02
## previous     1.124728e-02
## poutcome     2.604928e-02
## Cluster      3.002471e-01
## age_group    7.798001e-03
selected_features <- cutoff.k(weights, 10)  # ambil 10 fitur terbaik
selected_features
##  [1] "Cluster"   "duration"  "poutcome"  "pdays"     "month"     "contact"  
##  [7] "previous"  "age_group" "job"       "housing"
# Ambil data berdasarkan fitur terpilih
selected_formula <- as.formula(paste("y ~", paste(selected_features, collapse = "+")))
data_selected <- data[, c("y", selected_features)]

# Bagi data jadi latih dan uji
set.seed(123)
trainIndex <- createDataPartition(data_selected$y, p = 0.75, list = FALSE)
trainData <- data_selected[trainIndex, ]
testData <- data_selected[-trainIndex, ]

Mencari K ## yang optimal

## =====================
## Model kNN dengan Cross-Validation & Preprocessing
## =====================

# === SMOTE untuk Imbalance (hanya untuk train set) ===
trainData$y <- as.factor(trainData$y)  # SMOTE butuh faktor

# Cross-validation 10-fold
ctrl <- trainControl(method = "cv", number = 10, sampling = "smote")

# Latih model kNN dan cari k terbaik, dengan preprocessing otomatis
set.seed(123)
knn_fit <- train(
  y ~ .,
  data = trainData,
  method = "knn",
  trControl = ctrl,
  preProcess = c("center", "scale"),  # caret akan handle scaling
  tuneLength = 25
)

# Lihat nilai k terbaik dan performa selama CV
print(knn_fit)
## k-Nearest Neighbors 
## 
## 3391 samples
##   10 predictor
##    2 classes: '0', '1' 
## 
## Pre-processing: centered (35), scaled (35) 
## Resampling: Cross-Validated (10 fold) 
## Summary of sample sizes: 3052, 3052, 3051, 3052, 3052, 3052, ... 
## Addtional sampling using SMOTE prior to pre-processing
## 
## Resampling results across tuning parameters:
## 
##   k   Accuracy   Kappa    
##    5  0.8504902  0.4016002
##    7  0.8384019  0.3868677
##    9  0.8322054  0.3709745
##   11  0.8342634  0.3842933
##   13  0.8389884  0.3902729
##   15  0.8310246  0.3757812
##   17  0.8274866  0.3785580
##   19  0.8322054  0.3836027
##   21  0.8325030  0.3875498
##   23  0.8295523  0.3830672
##   25  0.8245393  0.3748604
##   27  0.8251249  0.3680730
##   29  0.8263049  0.3734873
##   31  0.8292478  0.3812176
##   33  0.8257132  0.3800156
##   35  0.8295462  0.3813649
##   37  0.8192226  0.3650877
##   39  0.8180392  0.3690391
##   41  0.8174518  0.3668668
##   43  0.8147952  0.3673494
##   45  0.8168619  0.3708661
##   47  0.8091983  0.3607834
##   49  0.8094898  0.3634133
##   51  0.8083177  0.3575371
##   53  0.8097857  0.3631431
## 
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was k = 5.
plot(knn_fit)

## Pembentukan Model KNN

# One-hot encoding
dummies <- dummyVars(" ~ .", data = data_selected)
data_numeric <- data.frame(predict(dummies, newdata = data_selected))

# Tambahkan kembali label
data_numeric$y <- data_selected$y

# Hilangkan baris dengan NA/NaN/Inf
data_numeric <- data_numeric[complete.cases(data_numeric), ]
data_numeric <- data_numeric[!apply(data_numeric, 1, function(x) any(is.infinite(x))), ]

# Bagi data menjadi train dan test
set.seed(123)
trainIndex <- createDataPartition(data_numeric$y, p = 0.75, list = FALSE)
trainData <- data_numeric[trainIndex, ]
testData  <- data_numeric[-trainIndex, ]

# Pisahkan fitur dan label
x_train <- trainData[, setdiff(names(trainData), "y")]
y_train <- trainData$y
x_test  <- testData[, setdiff(names(testData), "y")]
y_test  <- testData$y

# Jalankan KNN (k = 35)
knn_train_pred <- knn(train = x_train, test = x_train, cl = y_train, k = 35)
knn_test_pred <- knn(train = x_train, test = x_test, cl = y_train, k = 35)
# === Evaluasi pada Training Set ===
evaluasi_knn_train <- confusionMatrix(knn_train_pred, y_train)
tabel_konfusi_knn_train <- evaluasi_knn_train$table
recall_0_knn_train <- evaluasi_knn_train$byClass["Sensitivity"]
recall_1_knn_train <- evaluasi_knn_train$byClass["Specificity"]
f1_0_knn_train     <- evaluasi_knn_train$byClass["F1"]
precision_1_knn_train <- tabel_konfusi_knn_train[2,2] / sum(tabel_konfusi_knn_train[2,])
f1_1_knn_train <- 2 * (precision_1_knn_train * recall_1_knn_train) / (precision_1_knn_train + recall_1_knn_train)
akurasi_knn_train <- evaluasi_knn_train$overall["Accuracy"]
akurasi_avg_knn_train <- (recall_0_knn_train + recall_1_knn_train) / 2
f1_macro_knn_train <- (f1_0_knn_train + f1_1_knn_train) / 2

cat("\n=== Evaluasi Model KNN pada Data Training ===\n")
## 
## === Evaluasi Model KNN pada Data Training ===
print(evaluasi_knn_train)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction    0    1
##          0 2931  299
##          1   69   92
##                                           
##                Accuracy : 0.8915          
##                  95% CI : (0.8805, 0.9018)
##     No Information Rate : 0.8847          
##     P-Value [Acc > NIR] : 0.1125          
##                                           
##                   Kappa : 0.2853          
##                                           
##  Mcnemar's Test P-Value : <2e-16          
##                                           
##             Sensitivity : 0.9770          
##             Specificity : 0.2353          
##          Pos Pred Value : 0.9074          
##          Neg Pred Value : 0.5714          
##              Prevalence : 0.8847          
##          Detection Rate : 0.8643          
##    Detection Prevalence : 0.9525          
##       Balanced Accuracy : 0.6061          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0_knn_train))
## Recall kelas 0 (negatif): 0.9770
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0_knn_train))
## F1-score kelas 0 (negatif): 0.9409
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1_knn_train))
## Recall kelas 1 (positif): 0.2353
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1_knn_train))
## F1-score kelas 1 (positif): 0.3333
cat(sprintf("Akurasi: %.4f\n", akurasi_knn_train))
## Akurasi: 0.8915
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg_knn_train))
## Balanced Accuracy: 0.6061
cat(sprintf("F1-score Macro: %.4f\n", f1_macro_knn_train))
## F1-score Macro: 0.6371
# === Evaluasi pada Testing Set ===
evaluasi_knn_test <- confusionMatrix(knn_test_pred, y_test)
tabel_konfusi_knn_test <- evaluasi_knn_test$table
recall_0_knn_test <- evaluasi_knn_test$byClass["Sensitivity"]
recall_1_knn_test <- evaluasi_knn_test$byClass["Specificity"]
f1_0_knn_test     <- evaluasi_knn_test$byClass["F1"]
precision_1_knn_test <- tabel_konfusi_knn_test[2,2] / sum(tabel_konfusi_knn_test[2,])
f1_1_knn_test <- 2 * (precision_1_knn_test * recall_1_knn_test) / (precision_1_knn_test + recall_1_knn_test)
akurasi_knn_test <- evaluasi_knn_test$overall["Accuracy"]
akurasi_avg_knn_test <- (recall_0_knn_test + recall_1_knn_test) / 2
f1_macro_knn_test <- (f1_0_knn_test + f1_1_knn_test) / 2

cat("\n=== Evaluasi Model KNN pada Data Testing ===\n")
## 
## === Evaluasi Model KNN pada Data Testing ===
print(evaluasi_knn_test)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   0   1
##          0 971  91
##          1  29  39
##                                           
##                Accuracy : 0.8938          
##                  95% CI : (0.8744, 0.9112)
##     No Information Rate : 0.885           
##     P-Value [Acc > NIR] : 0.1886          
##                                           
##                   Kappa : 0.3419          
##                                           
##  Mcnemar's Test P-Value : 2.569e-08       
##                                           
##             Sensitivity : 0.9710          
##             Specificity : 0.3000          
##          Pos Pred Value : 0.9143          
##          Neg Pred Value : 0.5735          
##              Prevalence : 0.8850          
##          Detection Rate : 0.8593          
##    Detection Prevalence : 0.9398          
##       Balanced Accuracy : 0.6355          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0_knn_test))
## Recall kelas 0 (negatif): 0.9710
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0_knn_test))
## F1-score kelas 0 (negatif): 0.9418
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1_knn_test))
## Recall kelas 1 (positif): 0.3000
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1_knn_test))
## F1-score kelas 1 (positif): 0.3939
cat(sprintf("Akurasi: %.4f\n", akurasi_knn_test))
## Akurasi: 0.8938
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg_knn_test))
## Balanced Accuracy: 0.6355
cat(sprintf("F1-score Macro: %.4f\n", f1_macro_knn_test))
## F1-score Macro: 0.6679

4. Model Decision Tree

# Load libraries
library(e1071)
library(caret)
library(MLmetrics)
library(class)
library(FSelector)
library(randomForest)
library(party)

# Load dataset
data <- read.csv("prepro_databank.csv", header = TRUE)

Data Preparation

# Pastikan target 'y' bertipe faktor
data$y <- as.factor(data$y)

# Konversi variabel kategorikal menjadi faktor
categorical_cols <- c("job", "marital", "education", "default", "housing",
                      "loan", "contact", "month", "poutcome")
data[categorical_cols] <- lapply(data[categorical_cols], as.factor)

# Feature Selection menggunakan Information Gain
weights <- information.gain(y ~ ., data = data)
weights
##           attr_importance
## job          6.925136e-03
## marital      2.060405e-03
## education    1.639668e-03
## default      8.418705e-07
## balance      3.699592e-03
## housing      5.425481e-03
## loan         2.850844e-03
## contact      1.132256e-02
## day          0.000000e+00
## month        2.072607e-02
## duration     7.277524e-02
## campaign     0.000000e+00
## pdays        2.463002e-02
## previous     1.124728e-02
## poutcome     2.604928e-02
## Cluster      3.002471e-01
## age_group    7.798001e-03
selected_features <- cutoff.k(weights, 10)  # ambil 10 fitur terbaik
selected_features
##  [1] "Cluster"   "duration"  "poutcome"  "pdays"     "month"     "contact"  
##  [7] "previous"  "age_group" "job"       "housing"
# Ambil data berdasarkan fitur terpilih
selected_formula <- as.formula(paste("y ~", paste(selected_features, collapse = "+")))
data_selected <- data[, c("y", selected_features)]

# Bagi data jadi latih dan uji
set.seed(123)
trainIndex <- createDataPartition(data_selected$y, p = 0.75, list = FALSE)
trainData <- data_selected[trainIndex, ]
testData <- data_selected[-trainIndex, ]
# Pastikan semua variabel character diubah menjadi factor
trainData[] <- lapply(trainData, function(x) {
  if (is.character(x)) factor(x) else x
})
testData[] <- lapply(testData, function(x) {
  if (is.character(x)) factor(x) else x
})

# Menyamakan level faktor antara data pelatihan dan data pengujian
levels_train <- levels(trainData$y)
trainData$y <- factor(trainData$y, levels = levels_train)
testData$y <- factor(testData$y, levels = levels_train)
# -----------------------------------------
# Decision Tree Model using ctree
# -----------------------------------------

# Melatih model Decision Tree menggunakan ctree
model_tree <- ctree(y ~ ., data = trainData)
print(model_tree)
## 
##   Conditional inference tree with 26 terminal nodes
## 
## Response:  y 
## Inputs:  Cluster, duration, poutcome, pdays, month, contact, previous, age_group, job, housing 
## Number of observations:  3391 
## 
## 1) duration <= 383; criterion = 1, statistic = 523.66
##   2) poutcome == {4}; criterion = 1, statistic = 432.766
##     3) Cluster <= 5; criterion = 1, statistic = 27.718
##       4)*  weights = 33 
##     3) Cluster > 5
##       5)*  weights = 40 
##   2) poutcome == {1, 2, 3}
##     6) month == {3, 9, 10}; criterion = 1, statistic = 228.702
##       7) duration <= 184; criterion = 1, statistic = 17.148
##         8)*  weights = 60 
##       7) duration > 184
##         9) Cluster <= 5; criterion = 0.986, statistic = 10.214
##           10)*  weights = 16 
##         9) Cluster > 5
##           11)*  weights = 22 
##     6) month == {1, 2, 4, 5, 6, 7, 8, 11, 12}
##       12) age_group == {Senior, Young}; criterion = 1, statistic = 81.432
##         13) Cluster <= 5; criterion = 1, statistic = 20.057
##           14)*  weights = 73 
##         13) Cluster > 5
##           15) Cluster <= 6; criterion = 1, statistic = 37
##             16)*  weights = 18 
##           15) Cluster > 6
##             17)*  weights = 20 
##       12) age_group == {Adult, Middle-aged}
##         18) duration <= 222; criterion = 1, statistic = 57.511
##           19) Cluster <= 5; criterion = 1, statistic = 36.503
##             20) poutcome == {3}; criterion = 0.967, statistic = 11.387
##               21)*  weights = 67 
##             20) poutcome == {1, 2}
##               22) pdays <= 366; criterion = 0.995, statistic = 15.39
##                 23)*  weights = 1248 
##               22) pdays > 366
##                 24)*  weights = 14 
##           19) Cluster > 5
##             25) Cluster <= 6; criterion = 1, statistic = 509
##               26)*  weights = 23 
##             25) Cluster > 6
##               27)*  weights = 487 
##         18) duration > 222
##           28) poutcome == {3}; criterion = 1, statistic = 39.538
##             29)*  weights = 25 
##           28) poutcome == {1, 2}
##             30) Cluster <= 3; criterion = 1, statistic = 29.366
##               31)*  weights = 312 
##             30) Cluster > 3
##               32) Cluster <= 6; criterion = 0.998, statistic = 19.071
##                 33) Cluster <= 5; criterion = 1, statistic = 49.222
##                   34) month == {1, 2, 4, 5, 6, 11, 12}; criterion = 1, statistic = 36.339
##                     35)*  weights = 54 
##                   34) month == {7, 8}
##                     36)*  weights = 7 
##                 33) Cluster > 5
##                   37)*  weights = 28 
##               32) Cluster > 6
##                 38)*  weights = 167 
## 1) duration > 383
##   39) Cluster <= 5; criterion = 1, statistic = 129.463
##     40) age_group == {Senior, Young}; criterion = 1, statistic = 54.157
##       41)*  weights = 11 
##     40) age_group == {Adult, Middle-aged}
##       42) pdays <= 364; criterion = 1, statistic = 37.685
##         43) previous <= 7; criterion = 1, statistic = 24.237
##           44)*  weights = 297 
##         43) previous > 7
##           45)*  weights = 7 
##       42) pdays > 364
##         46)*  weights = 10 
##   39) Cluster > 5
##     47) Cluster <= 6; criterion = 1, statistic = 314.661
##       48) duration <= 1309; criterion = 1, statistic = 47.296
##         49)*  weights = 201 
##       48) duration > 1309
##         50)*  weights = 24 
##     47) Cluster > 6
##       51)*  weights = 127
# Menampilkan Decision Tree
plot(model_tree)

# Membuat prediksi pada data latih dan uji
pred_tree_train <- predict(model_tree, trainData)
pred_tree_test <- predict(model_tree, testData)
# Menyamakan level faktor pada prediksi dengan data aktual
pred_tree_train <- factor(pred_tree_train, levels = levels_train)
pred_tree_test <- factor(pred_tree_test, levels = levels_train)
# === Evaluasi pada Training Set ===
evaluasi_tree_train <- confusionMatrix(pred_tree_train, y_train)
tabel_konfusi_tree_train <- evaluasi_tree_train$table
recall_0_tree_train <- evaluasi_tree_train$byClass["Sensitivity"]
recall_1_tree_train <- evaluasi_tree_train$byClass["Specificity"]
f1_0_tree_train     <- evaluasi_tree_train$byClass["F1"]
precision_1_tree_train <- tabel_konfusi_tree_train[2,2] / sum(tabel_konfusi_tree_train[2,])
f1_1_tree_train <- 2 * (precision_1_tree_train * recall_1_tree_train) / (precision_1_tree_train + recall_1_tree_train)
akurasi_tree_train <- evaluasi_tree_train$overall["Accuracy"]
akurasi_avg_tree_train <- (recall_0_tree_train + recall_1_tree_train) / 2
f1_macro_tree_train <- (f1_0_tree_train + f1_1_tree_train) / 2

cat("\n=== Evaluasi Model Tree pada Data Training ===\n")
## 
## === Evaluasi Model Tree pada Data Training ===
print(evaluasi_tree_train)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction    0    1
##          0 2982   46
##          1   18  345
##                                          
##                Accuracy : 0.9811         
##                  95% CI : (0.976, 0.9854)
##     No Information Rate : 0.8847         
##     P-Value [Acc > NIR] : < 2.2e-16      
##                                          
##                   Kappa : 0.9045         
##                                          
##  Mcnemar's Test P-Value : 0.0007382      
##                                          
##             Sensitivity : 0.9940         
##             Specificity : 0.8824         
##          Pos Pred Value : 0.9848         
##          Neg Pred Value : 0.9504         
##              Prevalence : 0.8847         
##          Detection Rate : 0.8794         
##    Detection Prevalence : 0.8930         
##       Balanced Accuracy : 0.9382         
##                                          
##        'Positive' Class : 0              
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0_tree_train))
## Recall kelas 0 (negatif): 0.9940
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0_tree_train))
## F1-score kelas 0 (negatif): 0.9894
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1_tree_train))
## Recall kelas 1 (positif): 0.8824
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1_tree_train))
## F1-score kelas 1 (positif): 0.9151
cat(sprintf("Akurasi: %.4f\n", akurasi_tree_train))
## Akurasi: 0.9811
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg_tree_train))
## Balanced Accuracy: 0.9382
cat(sprintf("F1-score Macro: %.4f\n", f1_macro_tree_train))
## F1-score Macro: 0.9523
# === Evaluasi pada Testing Set ===
evaluasi_tree_test <- confusionMatrix(pred_tree_test, y_test)
tabel_konfusi_tree_test <- evaluasi_tree_test$table
recall_0_tree_test <- evaluasi_tree_test$byClass["Sensitivity"]
recall_1_tree_test <- evaluasi_tree_test$byClass["Specificity"]
f1_0_tree_test     <- evaluasi_tree_test$byClass["F1"]
precision_1_tree_test <- tabel_konfusi_tree_test[2,2] / sum(tabel_konfusi_tree_test[2,])
f1_1_tree_test <- 2 * (precision_1_tree_test * recall_1_tree_test) / (precision_1_tree_test + recall_1_tree_test)
akurasi_tree_test <- evaluasi_tree_test$overall["Accuracy"]
akurasi_avg_tree_test <- (recall_0_tree_test + recall_1_tree_test) / 2
f1_macro_tree_test <- (f1_0_tree_test + f1_1_tree_test) / 2

cat("\n=== Evaluasi Model Tree pada Data Testing ===\n")
## 
## === Evaluasi Model Tree pada Data Testing ===
print(evaluasi_tree_test)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   0   1
##          0 990  18
##          1  10 112
##                                           
##                Accuracy : 0.9752          
##                  95% CI : (0.9644, 0.9835)
##     No Information Rate : 0.885           
##     P-Value [Acc > NIR] : <2e-16          
##                                           
##                   Kappa : 0.875           
##                                           
##  Mcnemar's Test P-Value : 0.1859          
##                                           
##             Sensitivity : 0.9900          
##             Specificity : 0.8615          
##          Pos Pred Value : 0.9821          
##          Neg Pred Value : 0.9180          
##              Prevalence : 0.8850          
##          Detection Rate : 0.8761          
##    Detection Prevalence : 0.8920          
##       Balanced Accuracy : 0.9258          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0_tree_test))
## Recall kelas 0 (negatif): 0.9900
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0_tree_test))
## F1-score kelas 0 (negatif): 0.9861
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1_tree_test))
## Recall kelas 1 (positif): 0.8615
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1_tree_test))
## F1-score kelas 1 (positif): 0.8889
cat(sprintf("Akurasi: %.4f\n", akurasi_tree_test))
## Akurasi: 0.9752
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg_tree_test))
## Balanced Accuracy: 0.9258
cat(sprintf("F1-score Macro: %.4f\n", f1_macro_tree_test))
## F1-score Macro: 0.9375

5. Model ANN

# Load libraries
library(e1071)
library(caret)
library(MLmetrics)
library(class)
library(FSelector)
library(party)
library(nnet)
## Warning: package 'nnet' was built under R version 4.3.3
# Load dataset
data <- read.csv("prepro_databank.csv", header = TRUE)

Data Preparation

# Pastikan target 'y' bertipe faktor
data$y <- as.factor(data$y)

# Konversi variabel kategorikal menjadi faktor
categorical_cols <- c("job", "marital", "education", "default", "housing",
                      "loan", "contact", "month", "poutcome")
data[categorical_cols] <- lapply(data[categorical_cols], as.factor)

# Feature Selection menggunakan Information Gain
weights <- information.gain(y ~ ., data = data)
weights
##           attr_importance
## job          6.925136e-03
## marital      2.060405e-03
## education    1.639668e-03
## default      8.418705e-07
## balance      3.699592e-03
## housing      5.425481e-03
## loan         2.850844e-03
## contact      1.132256e-02
## day          0.000000e+00
## month        2.072607e-02
## duration     7.277524e-02
## campaign     0.000000e+00
## pdays        2.463002e-02
## previous     1.124728e-02
## poutcome     2.604928e-02
## Cluster      3.002471e-01
## age_group    7.798001e-03
selected_features <- cutoff.k(weights, 10)  # ambil 10 fitur terbaik
selected_features
##  [1] "Cluster"   "duration"  "poutcome"  "pdays"     "month"     "contact"  
##  [7] "previous"  "age_group" "job"       "housing"
# Ambil data berdasarkan fitur terpilih
selected_formula <- as.formula(paste("y ~", paste(selected_features, collapse = "+")))
data_selected <- data[, c("y", selected_features)]

# Bagi data jadi latih dan uji
set.seed(123)
trainIndex <- createDataPartition(data_selected$y, p = 0.75, list = FALSE)
trainData <- data_selected[trainIndex, ]
testData <- data_selected[-trainIndex, ]
# One-hot encoding
dummies <- dummyVars(" ~ .", data = data_selected)
data_numeric <- data.frame(predict(dummies, newdata = data_selected))

# Tambahkan kembali label
data_numeric$y <- data_selected$y

# Hilangkan baris dengan NA/NaN/Inf
data_numeric <- data_numeric[complete.cases(data_numeric), ]
data_numeric <- data_numeric[!apply(data_numeric, 1, function(x) any(is.infinite(x))), ]

# Bagi data menjadi train dan test
set.seed(123)
trainIndex <- createDataPartition(data_numeric$y, p = 0.75, list = FALSE)
trainData <- data_numeric[trainIndex, ]
testData  <- data_numeric[-trainIndex, ]
# -----------------------------------------
# Artificial Neural Network (ANN) Model
# -----------------------------------------

# Melatih model Neural Network (ANN)
model_ann <- nnet(y ~ ., data = trainData, size = 5, decay = 0.05, maxit = 40, linout = FALSE, trace = FALSE)
print(model_ann)
## a 43-5-1 network with 226 weights
## inputs: y.0 y.1 Cluster duration poutcome.1 poutcome.2 poutcome.3 poutcome.4 pdays month.1 month.2 month.3 month.4 month.5 month.6 month.7 month.8 month.9 month.10 month.11 month.12 contact.1 contact.2 contact.3 previous age_groupAdult age_groupMiddle.aged age_groupSenior age_groupYoung job.1 job.2 job.3 job.4 job.5 job.6 job.7 job.8 job.9 job.10 job.11 job.12 housing.0 housing.1 
## output(s): y 
## options were - entropy fitting  decay=0.05
# Membuat prediksi pada data latih dan uji
pred_ann_train <- predict(model_ann, trainData, type = "class")
pred_ann_test <- predict(model_ann, testData, type = "class")
# Menyamakan level faktor pada prediksi dengan data aktual
pred_ann_train <- factor(pred_ann_train, levels = levels_train)
pred_ann_test <- factor(pred_ann_test, levels = levels_train)
# === Evaluasi pada Training Set ===
evaluasi_ann_train <- confusionMatrix(pred_ann_train, y_train)
tabel_konfusi_ann_train <- evaluasi_ann_train$table
recall_0_ann_train <- evaluasi_ann_train$byClass["Sensitivity"]
recall_1_ann_train <- evaluasi_ann_train$byClass["Specificity"]
f1_0_ann_train     <- evaluasi_ann_train$byClass["F1"]
precision_1_ann_train <- tabel_konfusi_ann_train[2,2] / sum(tabel_konfusi_ann_train[2,])
f1_1_ann_train <- 2 * (precision_1_ann_train * recall_1_ann_train) / (precision_1_ann_train + recall_1_ann_train)
akurasi_ann_train <- evaluasi_ann_train$overall["Accuracy"]
akurasi_avg_ann_train <- (recall_0_ann_train + recall_1_ann_train) / 2
f1_macro_ann_train <- (f1_0_ann_train + f1_1_ann_train) / 2

cat("\n=== Evaluasi Model ANN pada Data Training ===\n")
## 
## === Evaluasi Model ANN pada Data Training ===
print(evaluasi_ann_train)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction    0    1
##          0 2999  111
##          1    1  280
##                                           
##                Accuracy : 0.967           
##                  95% CI : (0.9604, 0.9727)
##     No Information Rate : 0.8847          
##     P-Value [Acc > NIR] : < 2.2e-16       
##                                           
##                   Kappa : 0.8155          
##                                           
##  Mcnemar's Test P-Value : < 2.2e-16       
##                                           
##             Sensitivity : 0.9997          
##             Specificity : 0.7161          
##          Pos Pred Value : 0.9643          
##          Neg Pred Value : 0.9964          
##              Prevalence : 0.8847          
##          Detection Rate : 0.8844          
##    Detection Prevalence : 0.9171          
##       Balanced Accuracy : 0.8579          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0_ann_train))
## Recall kelas 0 (negatif): 0.9997
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0_ann_train))
## F1-score kelas 0 (negatif): 0.9817
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1_ann_train))
## Recall kelas 1 (positif): 0.7161
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1_ann_train))
## F1-score kelas 1 (positif): 0.8333
cat(sprintf("Akurasi: %.4f\n", akurasi_ann_train))
## Akurasi: 0.9670
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg_ann_train))
## Balanced Accuracy: 0.8579
cat(sprintf("F1-score Macro: %.4f\n", f1_macro_ann_train))
## F1-score Macro: 0.9075
# === Evaluasi pada Testing Set ===
evaluasi_ann_test <- confusionMatrix(pred_ann_test, y_test)
tabel_konfusi_ann_test <- evaluasi_ann_test$table
recall_0_ann_test <- evaluasi_ann_test$byClass["Sensitivity"]
recall_1_ann_test <- evaluasi_ann_test$byClass["Specificity"]
f1_0_ann_test     <- evaluasi_ann_test$byClass["F1"]
precision_1_ann_test <- tabel_konfusi_ann_test[2,2] / sum(tabel_konfusi_ann_test[2,])
f1_1_ann_test <- 2 * (precision_1_ann_test * recall_1_ann_test) / (precision_1_ann_test + recall_1_ann_test)
akurasi_ann_test <- evaluasi_ann_test$overall["Accuracy"]
akurasi_avg_ann_test <- (recall_0_ann_test + recall_1_ann_test) / 2
f1_macro_ann_test <- (f1_0_ann_test + f1_1_ann_test) / 2

cat("\n=== Evaluasi Model ANN pada Data Testing ===\n")
## 
## === Evaluasi Model ANN pada Data Testing ===
print(evaluasi_ann_test)
## Confusion Matrix and Statistics
## 
##           Reference
## Prediction   0   1
##          0 999  36
##          1   1  94
##                                           
##                Accuracy : 0.9673          
##                  95% CI : (0.9551, 0.9768)
##     No Information Rate : 0.885           
##     P-Value [Acc > NIR] : < 2.2e-16       
##                                           
##                   Kappa : 0.8179          
##                                           
##  Mcnemar's Test P-Value : 2.276e-08       
##                                           
##             Sensitivity : 0.9990          
##             Specificity : 0.7231          
##          Pos Pred Value : 0.9652          
##          Neg Pred Value : 0.9895          
##              Prevalence : 0.8850          
##          Detection Rate : 0.8841          
##    Detection Prevalence : 0.9159          
##       Balanced Accuracy : 0.8610          
##                                           
##        'Positive' Class : 0               
## 
cat(sprintf("Recall kelas 0 (negatif): %.4f\n", recall_0_ann_test))
## Recall kelas 0 (negatif): 0.9990
cat(sprintf("F1-score kelas 0 (negatif): %.4f\n", f1_0_ann_test))
## F1-score kelas 0 (negatif): 0.9818
cat(sprintf("Recall kelas 1 (positif): %.4f\n", recall_1_ann_test))
## Recall kelas 1 (positif): 0.7231
cat(sprintf("F1-score kelas 1 (positif): %.4f\n", f1_1_ann_test))
## F1-score kelas 1 (positif): 0.8356
cat(sprintf("Akurasi: %.4f\n", akurasi_ann_test))
## Akurasi: 0.9673
cat(sprintf("Balanced Accuracy: %.4f\n", akurasi_avg_ann_test))
## Balanced Accuracy: 0.8610
cat(sprintf("F1-score Macro: %.4f\n", f1_macro_ann_test))
## F1-score Macro: 0.9087