Goal: Build a classification model to predict the spam email (yesno). Clickhere for data.
library(tidyverse)
library(correlationfunnel)
data <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2023/2023-08-15/spam.csv')
## Rows: 4601 Columns: 7
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (1): yesno
## dbl (6): crl.tot, dollar, bang, money, n000, make
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
skimr::skim(data)
| Name | data |
| Number of rows | 4601 |
| Number of columns | 7 |
| _______________________ | |
| Column type frequency: | |
| character | 1 |
| numeric | 6 |
| ________________________ | |
| Group variables | None |
Variable type: character
| skim_variable | n_missing | complete_rate | min | max | empty | n_unique | whitespace |
|---|---|---|---|---|---|---|---|
| yesno | 0 | 1 | 1 | 1 | 0 | 2 | 0 |
Variable type: numeric
| skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
|---|---|---|---|---|---|---|---|---|---|---|
| crl.tot | 0 | 1 | 283.29 | 606.35 | 1 | 35 | 95 | 266.00 | 15841.00 | ▇▁▁▁▁ |
| dollar | 0 | 1 | 0.08 | 0.25 | 0 | 0 | 0 | 0.05 | 6.00 | ▇▁▁▁▁ |
| bang | 0 | 1 | 0.27 | 0.82 | 0 | 0 | 0 | 0.32 | 32.48 | ▇▁▁▁▁ |
| money | 0 | 1 | 0.09 | 0.44 | 0 | 0 | 0 | 0.00 | 12.50 | ▇▁▁▁▁ |
| n000 | 0 | 1 | 0.10 | 0.35 | 0 | 0 | 0 | 0.00 | 5.45 | ▇▁▁▁▁ |
| make | 0 | 1 | 0.10 | 0.31 | 0 | 0 | 0 | 0.00 | 4.54 | ▇▁▁▁▁ |
Issues with data
# factors_vec <- data %>% select(crl.tot, dollar, bang, money, n000, make) %>% names()
data_clean <- data %>%
# Recode yesno
mutate(yesno = fct_relevel(yesno, "y"))
data_clean %>% count(yesno)
## # A tibble: 2 × 2
## yesno n
## <fct> <int>
## 1 y 1813
## 2 n 2788
data_clean %>%
ggplot(aes(yesno)) +
geom_bar()
yesno vs. money
data_clean %>%
ggplot(aes(yesno, money)) +
geom_boxplot()
correlation plot
# step 1: binarize
data_binarized <- data_clean %>%
select(-money) %>%
binarize()
data_binarized %>% glimpse()
## Rows: 4,601
## Columns: 15
## $ `crl.tot__-Inf_35` <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0…
## $ crl.tot__35_95 <dbl> 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0…
## $ crl.tot__95_266 <dbl> 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1…
## $ crl.tot__266_Inf <dbl> 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0…
## $ `dollar__-Inf_0.052` <dbl> 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1…
## $ dollar__0.052_Inf <dbl> 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0…
## $ `bang__-Inf_0.315` <dbl> 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0…
## $ bang__0.315_Inf <dbl> 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1…
## $ n000__0 <dbl> 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1…
## $ `n000__-OTHER` <dbl> 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0…
## $ make__0 <dbl> 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1…
## $ make__0.1 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0…
## $ `make__-OTHER` <dbl> 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0…
## $ yesno__y <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1…
## $ yesno__n <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0…
# step 2: correlation
data_correlation <- data_binarized %>%
correlate(yesno__n)
# step 3: plot
data_correlation %>%
correlationfunnel::plot_correlation_funnel()
library(tidymodels)
set.seed(1234)
data_clean <- data_clean %>% sample_n(1000)
data_split <- initial_split(data_clean, strata = yesno)
data_train <- training(data_split)
data_test <- testing(data_split)
data_cv <- rsample::vfold_cv(data_train, strata = yesno)
data_cv
## # 10-fold cross-validation using stratification
## # A tibble: 10 × 2
## splits id
## <list> <chr>
## 1 <split [673/76]> Fold01
## 2 <split [673/76]> Fold02
## 3 <split [674/75]> Fold03
## 4 <split [674/75]> Fold04
## 5 <split [674/75]> Fold05
## 6 <split [674/75]> Fold06
## 7 <split [674/75]> Fold07
## 8 <split [675/74]> Fold08
## 9 <split [675/74]> Fold09
## 10 <split [675/74]> Fold10
library(themis)
## Warning: package 'themis' was built under R version 4.3.3
xgboost_rec <- recipes::recipe(yesno ~ ., data = data_train) %>%
step_dummy(all_nominal_predictors()) %>%
step_smote(yesno)
xgboost_rec %>% prep() %>% juice() %>% glimpse()
## Rows: 914
## Columns: 7
## $ crl.tot <dbl> 50, 10, 8, 15, 15, 5, 17, 106, 146, 322, 5, 8, 9, 7, 59, 15, 1…
## $ dollar <dbl> 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.078, 0.000,…
## $ bang <dbl> 0.175, 0.000, 0.000, 0.000, 0.000, 3.260, 0.773, 0.000, 0.000,…
## $ money <dbl> 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.…
## $ n000 <dbl> 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.…
## $ make <dbl> 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.54, 0.00, 0.27, 0.…
## $ yesno <fct> n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n,…
xgboost_spec <-
boost_tree(trees = tune(), tree_depth = tune()) %>%
set_mode("classification") %>%
set_engine("xgboost")
xgboost_workflow <-
workflow() %>%
add_recipe(xgboost_rec) %>%
add_model(xgboost_spec)
tree_grid <- grid_regular(trees(),
tree_depth(),
levels = 5)
doParallel::registerDoParallel()
set.seed(65743)
xgboost_tune <-
tune_grid(xgboost_workflow,
resamples = data_cv,
grid = 5,
control = control_grid(save_pred = TRUE))
collect_metrics(xgboost_tune)
## # A tibble: 10 × 8
## trees tree_depth .metric .estimator mean n std_err .config
## <int> <int> <chr> <chr> <dbl> <int> <dbl> <chr>
## 1 1741 3 accuracy binary 0.822 10 0.0171 Preprocessor1_Model1
## 2 1741 3 roc_auc binary 0.888 10 0.0125 Preprocessor1_Model1
## 3 885 5 accuracy binary 0.824 10 0.0163 Preprocessor1_Model2
## 4 885 5 roc_auc binary 0.890 10 0.0122 Preprocessor1_Model2
## 5 325 7 accuracy binary 0.828 10 0.0162 Preprocessor1_Model3
## 6 325 7 roc_auc binary 0.891 10 0.0116 Preprocessor1_Model3
## 7 1312 12 accuracy binary 0.828 10 0.0153 Preprocessor1_Model4
## 8 1312 12 roc_auc binary 0.894 10 0.0118 Preprocessor1_Model4
## 9 555 15 accuracy binary 0.829 10 0.0152 Preprocessor1_Model5
## 10 555 15 roc_auc binary 0.895 10 0.0112 Preprocessor1_Model5
collect_predictions(xgboost_tune) %>%
group_by(id) %>%
roc_curve(yesno, .pred_y) %>%
autoplot()
xgboost_last <- xgboost_workflow %>%
finalize_workflow(select_best(xgboost_tune, metric = "accuracy")) %>%
last_fit(data_split)
## Warning: package 'xgboost' was built under R version 4.3.3
collect_metrics(xgboost_last)
## # A tibble: 2 × 4
## .metric .estimator .estimate .config
## <chr> <chr> <dbl> <chr>
## 1 accuracy binary 0.865 Preprocessor1_Model1
## 2 roc_auc binary 0.920 Preprocessor1_Model1
collect_predictions(xgboost_last) %>%
yardstick::conf_mat(yesno, .pred_class) %>%
autoplot()
library(vip)
xgboost_last %>%
workflows::extract_fit_engine() %>%
vip()
The previous model had accuracy of 0.865 and AUC of 0.920.