Goal

Automate the process of building and tuning a classification model to predict employee attrition using h2o::h2o.automl, with adjustments to explore alternative results.

Set up

Import data

library(h2o)
## Warning: package 'h2o' was built under R version 4.4.3
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## Attaching package: 'h2o'
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## The following objects are masked from 'package:base':
## 
##     %*%, %in%, &&, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::day()   masks h2o::day()
## ✖ dplyr::filter()    masks stats::filter()
## ✖ lubridate::hour()  masks h2o::hour()
## ✖ dplyr::lag()       masks stats::lag()
## ✖ lubridate::month() masks h2o::month()
## ✖ lubridate::week()  masks h2o::week()
## ✖ lubridate::year()  masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## Warning: package 'tidymodels' was built under R version 4.4.2
## ── Attaching packages ────────────────────────────────────── tidymodels 1.2.0 ──
## ✔ broom        1.0.8     ✔ rsample      1.2.1
## ✔ dials        1.3.0     ✔ tune         1.2.1
## ✔ infer        1.0.7     ✔ workflows    1.1.4
## ✔ modeldata    1.4.0     ✔ workflowsets 1.1.0
## ✔ parsnip      1.2.1     ✔ yardstick    1.3.2
## ✔ recipes      1.1.0
## Warning: package 'broom' was built under R version 4.4.3
## Warning: package 'dials' was built under R version 4.4.2
## Warning: package 'infer' was built under R version 4.4.2
## Warning: package 'modeldata' was built under R version 4.4.2
## Warning: package 'parsnip' was built under R version 4.4.2
## Warning: package 'tune' was built under R version 4.4.2
## Warning: package 'workflows' was built under R version 4.4.2
## Warning: package 'workflowsets' was built under R version 4.4.2
## Warning: package 'yardstick' was built under R version 4.4.2
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Search for functions across packages at https://www.tidymodels.org/find/
library(tidyquant)
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo 
## ── Attaching core tidyquant packages ──────────────────────── tidyquant 1.0.9 ──
## ✔ PerformanceAnalytics 2.0.4      ✔ TTR                  0.24.4
## ✔ quantmod             0.4.26     ✔ xts                  0.14.0── Conflicts ────────────────────────────────────────── tidyquant_conflicts() ──
## ✖ zoo::as.Date()                 masks base::as.Date()
## ✖ zoo::as.Date.numeric()         masks base::as.Date.numeric()
## ✖ scales::col_factor()           masks readr::col_factor()
## ✖ lubridate::day()               masks h2o::day()
## ✖ scales::discard()              masks purrr::discard()
## ✖ dplyr::filter()                masks stats::filter()
## ✖ xts::first()                   masks dplyr::first()
## ✖ recipes::fixed()               masks stringr::fixed()
## ✖ lubridate::hour()              masks h2o::hour()
## ✖ dplyr::lag()                   masks stats::lag()
## ✖ xts::last()                    masks dplyr::last()
## ✖ PerformanceAnalytics::legend() masks graphics::legend()
## ✖ TTR::momentum()                masks dials::momentum()
## ✖ lubridate::month()             masks h2o::month()
## ✖ yardstick::spec()              masks readr::spec()
## ✖ quantmod::summary()            masks h2o::summary(), base::summary()
## ✖ lubridate::week()              masks h2o::week()
## ✖ lubridate::year()              masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
data <- read_csv("../00_data/data_wrangled/data_clean.csv") %>%
  mutate(across(where(is.character), factor))
## Rows: 1470 Columns: 32
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr  (8): Attrition, BusinessTravel, Department, EducationField, Gender, Job...
## dbl (24): Age, DailyRate, DistanceFromHome, Education, EmployeeNumber, Envir...
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Split data

set.seed(5678)

data_split <- initial_split(data, strata = "Attrition")
train_tbl <- training(data_split)
test_tbl  <- testing(data_split)

Preprocessing Recipe

recipe_obj <- recipe(Attrition ~ ., data = train_tbl) %>%
  step_zv(all_predictors()) %>%
  step_normalize(all_numeric_predictors())

Model

h2o.init()
## 
## H2O is not running yet, starting it now...
## 
## Note:  In case of errors look at the following log files:
##     C:\Users\adamc\AppData\Local\Temp\RtmpKiHoKC\file32a835c7c84/h2o_adamc_started_from_r.out
##     C:\Users\adamc\AppData\Local\Temp\RtmpKiHoKC\file32a8796363f4/h2o_adamc_started_from_r.err
## 
## 
## Starting H2O JVM and connecting:  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         4 seconds 45 milliseconds 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    1 year, 4 months and 3 days 
##     H2O cluster name:           H2O_started_from_R_adamc_dxc201 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   3.47 GB 
##     H2O cluster total cores:    12 
##     H2O cluster allowed cores:  12 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.4.1 (2024-06-14 ucrt)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (1 year, 4 months and 3 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
split_h2o <- h2o.splitFrame(as.h2o(train_tbl), ratios = c(0.80), seed = 4321)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
train_h2o <- split_h2o[[1]]
valid_h2o <- split_h2o[[2]]
test_h2o  <- as.h2o(test_tbl)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
y <- "Attrition"
x <- setdiff(names(train_tbl), y)

start_time <- Sys.time()

models_h2o <- h2o.automl(
  x = x,
  y = y,
  training_frame    = train_h2o,
  validation_frame  = valid_h2o,
  leaderboard_frame = test_h2o,
  max_models        = 12,
  exclude_algos     = c("StackedEnsemble"),
  nfolds            = 3,
  seed              = 6543
)
##   |                                                                              |                                                                      |   0%  |                                                                              |==                                                                    |   3%
## 20:57:32.643: User specified a validation frame with cross-validation still enabled. Please note that the models will still be validated using cross-validation only, the validation frame will be used to provide purely informative validation metrics on the trained models.
## 20:57:32.662: AutoML: XGBoost is not available; skipping it.  |                                                                              |===                                                                   |   4%  |                                                                              |=========                                                             |  13%  |                                                                              |==================                                                    |  26%  |                                                                              |==================================                                    |  48%  |                                                                              |======================================================================| 100%
end_time <- Sys.time()
build_time <- end_time - start_time

Leaderboard and Best Model

models_h2o@leaderboard
##                                      model_id       auc   logloss     aucpr
## 1              GLM_1_AutoML_1_20250423_205732 0.8084142 0.3449359 0.5606262
## 2              GBM_1_AutoML_1_20250423_205732 0.7852751 0.3586866 0.5420927
## 3 GBM_grid_1_AutoML_1_20250423_205732_model_1 0.7808522 0.3576500 0.5603714
## 4              GBM_3_AutoML_1_20250423_205732 0.7710895 0.3647481 0.5226752
## 5              GBM_4_AutoML_1_20250423_205732 0.7638619 0.3624771 0.5382197
## 6 GBM_grid_1_AutoML_1_20250423_205732_model_2 0.7607875 0.3722016 0.4772664
##   mean_per_class_error      rmse       mse
## 1            0.2721683 0.3184301 0.1013977
## 2            0.2716828 0.3272289 0.1070787
## 3            0.2902104 0.3234541 0.1046225
## 4            0.2816343 0.3281420 0.1076772
## 5            0.2816343 0.3246604 0.1054044
## 6            0.2758091 0.3322451 0.1103868
## 
## [12 rows x 7 columns]
best_model <- models_h2o@leader

Save and Load Best Model

dir.create("h2o_models", showWarnings = FALSE)
best_model_path <- h2o.saveModel(best_model, path = "h2o_models/", force = TRUE)
best_model <- h2o.loadModel(best_model_path)

Make Predictions

predictions <- h2o.predict(best_model, newdata = test_h2o) %>%
  as_tibble() %>%
  bind_cols(test_tbl)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%

Model Evaluation

performance_h2o <- h2o.performance(best_model, newdata = test_h2o)
h2o_auc <- h2o.auc(performance_h2o)
conf_mat <- h2o.confusionMatrix(performance_h2o)

h2o_auc
## [1] 0.8084142
conf_mat
## Confusion Matrix (vertical: actual; across: predicted)  for max f1 @ threshold = 0.372540744000785:
##         No Yes    Error     Rate
## No     285  24 0.077670  =24/309
## Yes     28  32 0.466667   =28/60
## Totals 313  56 0.140921  =52/369

Comparison to Previous Model

previous_auc <- 0.81  # Adjusted placeholder value
cat("Previous Model AUC:", previous_auc, "\n")
## Previous Model AUC: 0.81
cat("New H2O Model AUC:", h2o_auc, "\n")
## New H2O Model AUC: 0.8084142

Conclusion

if (h2o_auc > previous_auc) {
  cat("The h2o model shows improvement with an AUC of", h2o_auc, 
      "over the previous model's AUC of", previous_auc, "\n")
} else {
  cat("The previous model outperformed the current model with an AUC of", previous_auc, 
      "compared to the h2o model's", h2o_auc, "\n")
}
## The previous model outperformed the current model with an AUC of 0.81 compared to the h2o model's 0.8084142
cat("Model training and selection using h2o.automl took", round(as.numeric(build_time), 2), "seconds.\n")
## Model training and selection using h2o.automl took 2.72 seconds.