library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1
## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(correlationfunnel)
## Warning: package 'correlationfunnel' was built under R version 4.4.2
## ══ correlationfunnel Tip #3 ════════════════════════════════════════════════════
## Using `binarize()` with data containing many columns or many rows can increase dimensionality substantially.
## Try subsetting your data column-wise or row-wise to avoid creating too many columns.
## You can always make a big problem smaller by sampling. :)
library(tidymodels)
## Warning: package 'tidymodels' was built under R version 4.4.2
## ── Attaching packages ────────────────────────────────────── tidymodels 1.2.0 ──
## ✔ broom 1.0.8 ✔ rsample 1.2.1
## ✔ dials 1.3.0 ✔ tune 1.2.1
## ✔ infer 1.0.7 ✔ workflows 1.1.4
## ✔ modeldata 1.4.0 ✔ workflowsets 1.1.0
## ✔ parsnip 1.2.1 ✔ yardstick 1.3.2
## ✔ recipes 1.1.0
## Warning: package 'broom' was built under R version 4.4.3
## Warning: package 'dials' was built under R version 4.4.2
## Warning: package 'infer' was built under R version 4.4.2
## Warning: package 'modeldata' was built under R version 4.4.2
## Warning: package 'parsnip' was built under R version 4.4.2
## Warning: package 'tune' was built under R version 4.4.2
## Warning: package 'workflows' was built under R version 4.4.2
## Warning: package 'workflowsets' was built under R version 4.4.2
## Warning: package 'yardstick' was built under R version 4.4.2
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter() masks stats::filter()
## ✖ recipes::fixed() masks stringr::fixed()
## ✖ dplyr::lag() masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step() masks stats::step()
## • Dig deeper into tidy modeling with R at https://www.tmwr.org
library(themis)
## Warning: package 'themis' was built under R version 4.4.3
library(vip)
## Warning: package 'vip' was built under R version 4.4.3
##
## Attaching package: 'vip'
##
## The following object is masked from 'package:utils':
##
## vi
emails_raw <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/main/data/2023/2023-08-15/spam.csv")
## Rows: 4601 Columns: 7
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (1): yesno
## dbl (6): crl.tot, dollar, bang, money, n000, make
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
emails <- emails_raw %>%
mutate(label = factor(yesno, levels = c("y", "n")))
emails %>% count(label)
## # A tibble: 2 × 2
## label n
## <fct> <int>
## 1 y 1813
## 2 n 2788
emails %>%
ggplot(aes(x = label)) +
geom_bar(fill = "darkblue") +
labs(title = "Email Class Breakdown", x = "Label")
emails %>%
ggplot(aes(x = label, y = crl.tot)) +
geom_boxplot(fill = "tomato") +
labs(title = "Boxplot: crl.tot by Label")
binarized <- emails %>% binarize()
funnel_corr <- binarized %>% correlate(label__y) %>% arrange(desc(correlation))
plot_correlation_funnel(funnel_corr)
set.seed(2024)
split_obj <- initial_split(emails, strata = label)
train_set <- training(split_obj)
test_set <- testing(split_obj)
cv_folds <- vfold_cv(train_set, strata = label)
email_recipe <- recipe(label ~ ., data = train_set) %>%
step_YeoJohnson(all_numeric_predictors()) %>%
step_normalize(all_numeric_predictors()) %>%
step_dummy(all_nominal_predictors()) %>%
step_smote(label)
xgb_model <- boost_tree(trees = tune(), tree_depth = tune()) %>%
set_mode("classification") %>%
set_engine("xgboost")
rf_model <- rand_forest(trees = tune()) %>%
set_mode("classification") %>%
set_engine("ranger", importance = "impurity")
xgb_flow <- workflow() %>% add_recipe(email_recipe) %>% add_model(xgb_model)
rf_flow <- workflow() %>% add_recipe(email_recipe) %>% add_model(rf_model)
doParallel::registerDoParallel()
xgb_grid <- grid_regular(trees(), tree_depth(), levels = 5)
rf_grid <- grid_regular(trees(), levels = 5)
set.seed(2025)
xgb_tuned <- tune_grid(xgb_flow, resamples = cv_folds, grid = xgb_grid, control = control_grid(save_pred = TRUE))
rf_tuned <- tune_grid(rf_flow, resamples = cv_folds, grid = rf_grid, control = control_grid(save_pred = TRUE))
xgb_results <- collect_metrics(xgb_tuned)
rf_results <- collect_metrics(rf_tuned)
xgb_roc_plot <- collect_predictions(xgb_tuned) %>% group_by(id) %>% roc_curve(label, .pred_y) %>% autoplot()
rf_roc_plot <- collect_predictions(rf_tuned) %>% group_by(id) %>% roc_curve(label, .pred_y) %>% autoplot()
list(xgb_roc_plot, rf_roc_plot)
## [[1]]
##
## [[2]]
final_xgb <- xgb_flow %>%
finalize_workflow(select_best(xgb_tuned, metric = "accuracy")) %>%
last_fit(split_obj)
## Warning: package 'xgboost' was built under R version 4.4.2
final_rf <- rf_flow %>%
finalize_workflow(select_best(rf_tuned, metric = "accuracy")) %>%
last_fit(split_obj)
## Warning: package 'ranger' was built under R version 4.4.2
final_results <- bind_rows(collect_metrics(final_xgb), collect_metrics(final_rf))
final_results
## # A tibble: 6 × 4
## .metric .estimator .estimate .config
## <chr> <chr> <dbl> <chr>
## 1 accuracy binary 1 Preprocessor1_Model1
## 2 roc_auc binary 1 Preprocessor1_Model1
## 3 brier_class binary 0.126 Preprocessor1_Model1
## 4 accuracy binary 1 Preprocessor1_Model1
## 5 roc_auc binary 1 Preprocessor1_Model1
## 6 brier_class binary 0.000383 Preprocessor1_Model1
final_xgb %>% workflows::extract_fit_engine() %>% vip()
final_rf %>% workflows::extract_fit_engine() %>% vip()
The modeling pipeline introduced transformations, balancing techniques, and tuning to enhance predictions. Key highlights:
Performance results:
These outcomes confirm the benefit of tuning and ensembling for spam classification.