Goal is to automate building and tuning a classification model to predict employee attrition, using the h2o::h2o.automl.

library(h2o)
## Warning: package 'h2o' was built under R version 4.2.3
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## Attaching package: 'h2o'
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## The following objects are masked from 'package:base':
## 
##     &&, %*%, %in%, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc
library(tidyverse)
## Warning: package 'readr' was built under R version 4.2.3
## Warning: package 'dplyr' was built under R version 4.2.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.0
## ✔ ggplot2   3.4.4     ✔ tibble    3.2.1
## ✔ lubridate 1.9.2     ✔ tidyr     1.3.0
## ✔ purrr     1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::day()   masks h2o::day()
## ✖ dplyr::filter()    masks stats::filter()
## ✖ lubridate::hour()  masks h2o::hour()
## ✖ dplyr::lag()       masks stats::lag()
## ✖ lubridate::month() masks h2o::month()
## ✖ lubridate::week()  masks h2o::week()
## ✖ lubridate::year()  masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.1.1 ──
## ✔ broom        1.0.5     ✔ rsample      1.2.0
## ✔ dials        1.2.0     ✔ tune         1.1.2
## ✔ infer        1.0.6     ✔ workflows    1.1.3
## ✔ modeldata    1.3.0     ✔ workflowsets 1.0.1
## ✔ parsnip      1.1.1     ✔ yardstick    1.3.0
## ✔ recipes      1.0.9
## Warning: package 'infer' was built under R version 4.2.3
## Warning: package 'modeldata' was built under R version 4.2.3
## Warning: package 'recipes' was built under R version 4.2.3
## Warning: package 'yardstick' was built under R version 4.2.3
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Use tidymodels_prefer() to resolve common conflicts.
library(tidyquant)
## Loading required package: PerformanceAnalytics
## Loading required package: xts
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## 
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
## 
## 
## Attaching package: 'xts'
## 
## The following objects are masked from 'package:dplyr':
## 
##     first, last
## 
## 
## Attaching package: 'PerformanceAnalytics'
## 
## The following object is masked from 'package:graphics':
## 
##     legend
## 
## Loading required package: quantmod
## Loading required package: TTR
## 
## Attaching package: 'TTR'
## 
## The following object is masked from 'package:dials':
## 
##     momentum
## 
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo
expedition <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-22/members.csv')
## Rows: 76519 Columns: 21
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (10): expedition_id, member_id, peak_id, peak_name, season, sex, citizen...
## dbl  (5): year, age, highpoint_metres, death_height_metres, injury_height_me...
## lgl  (6): hired, success, solo, oxygen_used, died, injured
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Cleaning Data

factors_vec <- expedition %>% select(year, age, highpoint_metres, death_height_metres, injury_height_metres) %>% names()

data_clean <- expedition %>% 
    
    # Drop Variables
    select(-c(death_height_metres, injury_height_metres, death_cause, injury_type)) %>% 
    
    # Drop Observations with missing values
    drop_na() %>% 
    
    # Mutate Logical Variables
    mutate(died = case_when(died == "TRUE" ~ "died", died == "FALSE" ~ "no")) %>% 
    
    mutate(across(where(is.logical), factor))

Split data

set.seed(1234)

data_split <- initial_split(expedition, strata = "died")
train_tbl <- training(data_split)
test_tbl <- testing(data_split)
recipe_obj <- recipe(died ~ ., data = train_tbl) %>%
    
    # Remove zero variance variables
    step_zv(all_predictors()) 

Model

# Initialize H2o
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         6 days 23 hours 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    4 months and 9 days 
##     H2O cluster name:           H2O_started_from_R_Reed_fyb567 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   0.04 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.2.2 (2022-10-31)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (4 months and 9 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
split.h2o <- h2o.splitFrame(as.h2o(train_tbl), ratios = c(0.85), seed = 2567)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
train_h2o <- split.h2o[[1]]
valid_h2o <- split.h2o[[2]]
test_h2o  <- as.h2o(test_tbl)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
y <- "died"
x <- setdiff(names(train_tbl), y)

auto_ml_models_h2o <- h2o.automl(
  x = x, 
  y = y, 
  training_frame    = train_h2o,
  validation_frame  = valid_h2o,
  leaderboard_frame = test_h2o, 
  max_runtime_secs  = 30, 
  nfolds            = 5,
  seed              = 3456
)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |===                                                                   |   4%
## 10:48:49.611: User specified a validation frame with cross-validation still enabled. Please note that the models will still be validated using cross-validation only, the validation frame will be used to provide purely informative validation metrics on the trained models.
## 10:48:49.664: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |==============                                                        |  19%
## 10:48:56.534: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |======================================                                |  55%
## 10:49:04.959: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
  |                                                                            
  |=============================================                         |  64%
## 10:49:07.691: GBM_1_AutoML_23_20240430_104849 [GBM def_5] failed: water.exceptions.H2OModelBuilderIllegalArgumentException: Illegal argument(s) for GBM model: GBM_1_AutoML_23_20240430_104849_cv_1.  Details: ERRR on field: _ntrees: The tree model will not fit in the driver node's memory (1.0 KB per tree x 10000 > Zero  ) - try decreasing ntrees and/or max_depth or increasing min_rows!
## 
## 10:49:07.716: _train param, Dropping unused columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |===================================================================   |  95%
## 10:49:18.403: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
  |                                                                            
  |======================================================================| 100%
auto_ml_models_h2o@leaderboard
##                                                   model_id       auc
## 1 StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849 0.9926976
## 2                      XGBoost_1_AutoML_23_20240430_104849 0.9926976
## 3                          GLM_1_AutoML_23_20240430_104849 0.6598395
##       logloss      aucpr mean_per_class_error       rmse          mse
## 1 0.003824179 0.98755542           0.01666667 0.02167596 0.0004698474
## 2 0.006979407 0.98755542           0.01666667 0.02218753 0.0004922865
## 3 0.072959431 0.04539119           0.41118966 0.11779778 0.0138763175
## 
## [3 rows x 7 columns]
auto_ml_models_h2o@leader
## Model Details:
## ==============
## 
## H2OBinomialModel: stackedensemble
## Model ID:  StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849 
## Model Summary for Stacked Ensemble: 
##                                    key            value
## 1                    Stacking strategy cross_validation
## 2 Number of base models (used / total)              1/2
## 3 # XGBoost base models (used / total)              1/1
## 4     # GLM base models (used / total)              0/1
## 5                Metalearner algorithm              GLM
## 6   Metalearner fold assignment scheme           Random
## 7                   Metalearner nfolds                5
## 8              Metalearner fold_column               NA
## 9   Custom metalearner hyperparameters             None
## 
## 
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
## 
## MSE:  0.0006041881
## RMSE:  0.02458024
## LogLoss:  0.004874646
## Mean Per-Class Error:  0.01986755
## AUC:  0.9920701
## AUCPR:  0.9780595
## Gini:  0.9841403
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error     Rate
## FALSE   9769    0 0.000000  =0/9769
## TRUE       6  145 0.039735   =6/151
## Totals  9775  145 0.000605  =6/9920
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.990079    0.979730  14
## 2                       max f2  0.990079    0.967957  14
## 3                 max f0point5  0.990079    0.991792  14
## 4                 max accuracy  0.990079    0.999395  14
## 5                max precision  0.999268    1.000000   0
## 6                   max recall  0.000530    1.000000  52
## 7              max specificity  0.999268    1.000000   0
## 8             max absolute_mcc  0.990079    0.979630  14
## 9   max min_per_class_accuracy  0.001192    0.980132  19
## 10 max mean_per_class_accuracy  0.001192    0.986125  19
## 11                     max tns  0.999268 9769.000000   0
## 12                     max fns  0.999268  136.000000   0
## 13                     max fps  0.000523 9769.000000  53
## 14                     max tps  0.000530  151.000000  52
## 15                     max tnr  0.999268    1.000000   0
## 16                     max fnr  0.999268    0.900662   0
## 17                     max fpr  0.000523    1.000000  53
## 18                     max tpr  0.000530    1.000000  52
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
## 
## MSE:  0.0004607463
## RMSE:  0.021465
## LogLoss:  0.003949776
## Mean Per-Class Error:  0.0141844
## AUC:  0.9922614
## AUCPR:  0.9790211
## Gini:  0.9845228
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error     Rate
## FALSE   8536    0 0.000000  =0/8536
## TRUE       4  137 0.028369   =4/141
## Totals  8540  137 0.000461  =4/8677
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.990079    0.985612  12
## 2                       max f2  0.990079    0.977175  12
## 3                 max f0point5  0.990079    0.994194  12
## 4                 max accuracy  0.990079    0.999539  12
## 5                max precision  0.999268    1.000000   0
## 6                   max recall  0.000530    1.000000  49
## 7              max specificity  0.999268    1.000000   0
## 8             max absolute_mcc  0.990079    0.985483  12
## 9   max min_per_class_accuracy  0.001427    0.978723  15
## 10 max mean_per_class_accuracy  0.001427    0.987546  15
## 11                     max tns  0.999268 8536.000000   0
## 12                     max fns  0.999268  122.000000   0
## 13                     max fps  0.000523 8536.000000  50
## 14                     max tps  0.000530  141.000000  49
## 15                     max tnr  0.999268    1.000000   0
## 16                     max fnr  0.999268    0.865248   0
## 17                     max fpr  0.000523    1.000000  50
## 18                     max tpr  0.000530    1.000000  49
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.0005154047
## RMSE:  0.02270253
## LogLoss:  0.00451254
## Mean Per-Class Error:  0.01798561
## AUC:  0.9830846
## AUCPR:  0.9712374
## Gini:  0.9661691
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error       Rate
## FALSE  48017    0 0.000000   =0/48017
## TRUE      25  670 0.035971    =25/695
## Totals 48042  670 0.000513  =25/48712
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold        value idx
## 1                       max f1  0.961868     0.981685 216
## 2                       max f2  0.961868     0.971014 216
## 3                 max f0point5  0.961868     0.992593 216
## 4                 max accuracy  0.961868     0.999487 216
## 5                max precision  0.997918     1.000000   0
## 6                   max recall  0.000471     1.000000 399
## 7              max specificity  0.997918     1.000000   0
## 8             max absolute_mcc  0.961868     0.981594 216
## 9   max min_per_class_accuracy  0.001278     0.971223 256
## 10 max mean_per_class_accuracy  0.001662     0.982996 238
## 11                     max tns  0.997918 48017.000000   0
## 12                     max fns  0.997918   694.000000   0
## 13                     max fps  0.000471 48017.000000 399
## 14                     max tps  0.000471   695.000000 399
## 15                     max tnr  0.997918     1.000000   0
## 16                     max fnr  0.997918     0.998561   0
## 17                     max fpr  0.000471     1.000000 399
## 18                     max tpr  0.000471     1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##               mean       sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy  0.999486 0.000282   0.999693   0.999380   0.999797   0.999483
## auc       0.985555 0.013253   0.991555   0.987596   0.999918   0.984481
## err       0.000514 0.000282   0.000307   0.000620   0.000203   0.000517
## err_count 5.000000 2.738613   3.000000   6.000000   2.000000   5.000000
## f0point5  0.992461 0.004409   0.995671   0.990415   0.997089   0.993103
##           cv_5_valid
## accuracy    0.999077
## auc         0.964224
## err         0.000923
## err_count   9.000000
## f0point5    0.986025
## 
## ---
##                        mean        sd cv_1_valid cv_2_valid cv_3_valid
## precision          1.000000  0.000000   1.000000   1.000000   1.000000
## r2                 0.963066  0.020856   0.978234   0.953183   0.985225
## recall             0.963689  0.020634   0.978723   0.953846   0.985611
## residual_deviance 87.436554 40.447500  60.203827  98.165150  43.186370
## rmse               0.022000  0.006274   0.017597   0.024906   0.014344
## specificity        1.000000  0.000000   1.000000   1.000000   1.000000
##                   cv_4_valid cv_5_valid
## precision           1.000000   1.000000
## r2                  0.965844   0.932844
## recall              0.966443   0.933824
## residual_deviance  87.124510 148.502910
## rmse                0.022760   0.030392
## specificity         1.000000   1.000000
best_model <- auto_ml_models_h2o@leader

best_model
## Model Details:
## ==============
## 
## H2OBinomialModel: stackedensemble
## Model ID:  StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849 
## Model Summary for Stacked Ensemble: 
##                                    key            value
## 1                    Stacking strategy cross_validation
## 2 Number of base models (used / total)              1/2
## 3 # XGBoost base models (used / total)              1/1
## 4     # GLM base models (used / total)              0/1
## 5                Metalearner algorithm              GLM
## 6   Metalearner fold assignment scheme           Random
## 7                   Metalearner nfolds                5
## 8              Metalearner fold_column               NA
## 9   Custom metalearner hyperparameters             None
## 
## 
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
## 
## MSE:  0.0006041881
## RMSE:  0.02458024
## LogLoss:  0.004874646
## Mean Per-Class Error:  0.01986755
## AUC:  0.9920701
## AUCPR:  0.9780595
## Gini:  0.9841403
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error     Rate
## FALSE   9769    0 0.000000  =0/9769
## TRUE       6  145 0.039735   =6/151
## Totals  9775  145 0.000605  =6/9920
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.990079    0.979730  14
## 2                       max f2  0.990079    0.967957  14
## 3                 max f0point5  0.990079    0.991792  14
## 4                 max accuracy  0.990079    0.999395  14
## 5                max precision  0.999268    1.000000   0
## 6                   max recall  0.000530    1.000000  52
## 7              max specificity  0.999268    1.000000   0
## 8             max absolute_mcc  0.990079    0.979630  14
## 9   max min_per_class_accuracy  0.001192    0.980132  19
## 10 max mean_per_class_accuracy  0.001192    0.986125  19
## 11                     max tns  0.999268 9769.000000   0
## 12                     max fns  0.999268  136.000000   0
## 13                     max fps  0.000523 9769.000000  53
## 14                     max tps  0.000530  151.000000  52
## 15                     max tnr  0.999268    1.000000   0
## 16                     max fnr  0.999268    0.900662   0
## 17                     max fpr  0.000523    1.000000  53
## 18                     max tpr  0.000530    1.000000  52
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
## 
## MSE:  0.0004607463
## RMSE:  0.021465
## LogLoss:  0.003949776
## Mean Per-Class Error:  0.0141844
## AUC:  0.9922614
## AUCPR:  0.9790211
## Gini:  0.9845228
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error     Rate
## FALSE   8536    0 0.000000  =0/8536
## TRUE       4  137 0.028369   =4/141
## Totals  8540  137 0.000461  =4/8677
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.990079    0.985612  12
## 2                       max f2  0.990079    0.977175  12
## 3                 max f0point5  0.990079    0.994194  12
## 4                 max accuracy  0.990079    0.999539  12
## 5                max precision  0.999268    1.000000   0
## 6                   max recall  0.000530    1.000000  49
## 7              max specificity  0.999268    1.000000   0
## 8             max absolute_mcc  0.990079    0.985483  12
## 9   max min_per_class_accuracy  0.001427    0.978723  15
## 10 max mean_per_class_accuracy  0.001427    0.987546  15
## 11                     max tns  0.999268 8536.000000   0
## 12                     max fns  0.999268  122.000000   0
## 13                     max fps  0.000523 8536.000000  50
## 14                     max tps  0.000530  141.000000  49
## 15                     max tnr  0.999268    1.000000   0
## 16                     max fnr  0.999268    0.865248   0
## 17                     max fpr  0.000523    1.000000  50
## 18                     max tpr  0.000530    1.000000  49
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.0005154047
## RMSE:  0.02270253
## LogLoss:  0.00451254
## Mean Per-Class Error:  0.01798561
## AUC:  0.9830846
## AUCPR:  0.9712374
## Gini:  0.9661691
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error       Rate
## FALSE  48017    0 0.000000   =0/48017
## TRUE      25  670 0.035971    =25/695
## Totals 48042  670 0.000513  =25/48712
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold        value idx
## 1                       max f1  0.961868     0.981685 216
## 2                       max f2  0.961868     0.971014 216
## 3                 max f0point5  0.961868     0.992593 216
## 4                 max accuracy  0.961868     0.999487 216
## 5                max precision  0.997918     1.000000   0
## 6                   max recall  0.000471     1.000000 399
## 7              max specificity  0.997918     1.000000   0
## 8             max absolute_mcc  0.961868     0.981594 216
## 9   max min_per_class_accuracy  0.001278     0.971223 256
## 10 max mean_per_class_accuracy  0.001662     0.982996 238
## 11                     max tns  0.997918 48017.000000   0
## 12                     max fns  0.997918   694.000000   0
## 13                     max fps  0.000471 48017.000000 399
## 14                     max tps  0.000471   695.000000 399
## 15                     max tnr  0.997918     1.000000   0
## 16                     max fnr  0.997918     0.998561   0
## 17                     max fpr  0.000471     1.000000 399
## 18                     max tpr  0.000471     1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##               mean       sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy  0.999486 0.000282   0.999693   0.999380   0.999797   0.999483
## auc       0.985555 0.013253   0.991555   0.987596   0.999918   0.984481
## err       0.000514 0.000282   0.000307   0.000620   0.000203   0.000517
## err_count 5.000000 2.738613   3.000000   6.000000   2.000000   5.000000
## f0point5  0.992461 0.004409   0.995671   0.990415   0.997089   0.993103
##           cv_5_valid
## accuracy    0.999077
## auc         0.964224
## err         0.000923
## err_count   9.000000
## f0point5    0.986025
## 
## ---
##                        mean        sd cv_1_valid cv_2_valid cv_3_valid
## precision          1.000000  0.000000   1.000000   1.000000   1.000000
## r2                 0.963066  0.020856   0.978234   0.953183   0.985225
## recall             0.963689  0.020634   0.978723   0.953846   0.985611
## residual_deviance 87.436554 40.447500  60.203827  98.165150  43.186370
## rmse               0.022000  0.006274   0.017597   0.024906   0.014344
## specificity        1.000000  0.000000   1.000000   1.000000   1.000000
##                   cv_4_valid cv_5_valid
## precision           1.000000   1.000000
## r2                  0.965844   0.932844
## recall              0.966443   0.933824
## residual_deviance  87.124510 148.502910
## rmse                0.022760   0.030392
## specificity         1.000000   1.000000

Examine the output of h2o.automl

auto_ml_models_h2o %>% typeof()
## [1] "S4"
auto_ml_models_h2o %>% slotNames()
## [1] "project_name"   "leader"         "leaderboard"    "event_log"     
## [5] "modeling_steps" "training_info"
auto_ml_models_h2o@leaderboard
##                                                   model_id       auc
## 1 StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849 0.9926976
## 2                      XGBoost_1_AutoML_23_20240430_104849 0.9926976
## 3                          GLM_1_AutoML_23_20240430_104849 0.6598395
##       logloss      aucpr mean_per_class_error       rmse          mse
## 1 0.003824179 0.98755542           0.01666667 0.02167596 0.0004698474
## 2 0.006979407 0.98755542           0.01666667 0.02218753 0.0004922865
## 3 0.072959431 0.04539119           0.41118966 0.11779778 0.0138763175
## 
## [3 rows x 7 columns]
auto_ml_models_h2o@leader
## Model Details:
## ==============
## 
## H2OBinomialModel: stackedensemble
## Model ID:  StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849 
## Model Summary for Stacked Ensemble: 
##                                    key            value
## 1                    Stacking strategy cross_validation
## 2 Number of base models (used / total)              1/2
## 3 # XGBoost base models (used / total)              1/1
## 4     # GLM base models (used / total)              0/1
## 5                Metalearner algorithm              GLM
## 6   Metalearner fold assignment scheme           Random
## 7                   Metalearner nfolds                5
## 8              Metalearner fold_column               NA
## 9   Custom metalearner hyperparameters             None
## 
## 
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
## 
## MSE:  0.0006041881
## RMSE:  0.02458024
## LogLoss:  0.004874646
## Mean Per-Class Error:  0.01986755
## AUC:  0.9920701
## AUCPR:  0.9780595
## Gini:  0.9841403
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error     Rate
## FALSE   9769    0 0.000000  =0/9769
## TRUE       6  145 0.039735   =6/151
## Totals  9775  145 0.000605  =6/9920
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.990079    0.979730  14
## 2                       max f2  0.990079    0.967957  14
## 3                 max f0point5  0.990079    0.991792  14
## 4                 max accuracy  0.990079    0.999395  14
## 5                max precision  0.999268    1.000000   0
## 6                   max recall  0.000530    1.000000  52
## 7              max specificity  0.999268    1.000000   0
## 8             max absolute_mcc  0.990079    0.979630  14
## 9   max min_per_class_accuracy  0.001192    0.980132  19
## 10 max mean_per_class_accuracy  0.001192    0.986125  19
## 11                     max tns  0.999268 9769.000000   0
## 12                     max fns  0.999268  136.000000   0
## 13                     max fps  0.000523 9769.000000  53
## 14                     max tps  0.000530  151.000000  52
## 15                     max tnr  0.999268    1.000000   0
## 16                     max fnr  0.999268    0.900662   0
## 17                     max fpr  0.000523    1.000000  53
## 18                     max tpr  0.000530    1.000000  52
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
## 
## MSE:  0.0004607463
## RMSE:  0.021465
## LogLoss:  0.003949776
## Mean Per-Class Error:  0.0141844
## AUC:  0.9922614
## AUCPR:  0.9790211
## Gini:  0.9845228
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error     Rate
## FALSE   8536    0 0.000000  =0/8536
## TRUE       4  137 0.028369   =4/141
## Totals  8540  137 0.000461  =4/8677
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.990079    0.985612  12
## 2                       max f2  0.990079    0.977175  12
## 3                 max f0point5  0.990079    0.994194  12
## 4                 max accuracy  0.990079    0.999539  12
## 5                max precision  0.999268    1.000000   0
## 6                   max recall  0.000530    1.000000  49
## 7              max specificity  0.999268    1.000000   0
## 8             max absolute_mcc  0.990079    0.985483  12
## 9   max min_per_class_accuracy  0.001427    0.978723  15
## 10 max mean_per_class_accuracy  0.001427    0.987546  15
## 11                     max tns  0.999268 8536.000000   0
## 12                     max fns  0.999268  122.000000   0
## 13                     max fps  0.000523 8536.000000  50
## 14                     max tps  0.000530  141.000000  49
## 15                     max tnr  0.999268    1.000000   0
## 16                     max fnr  0.999268    0.865248   0
## 17                     max fpr  0.000523    1.000000  50
## 18                     max tpr  0.000530    1.000000  49
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.0005154047
## RMSE:  0.02270253
## LogLoss:  0.00451254
## Mean Per-Class Error:  0.01798561
## AUC:  0.9830846
## AUCPR:  0.9712374
## Gini:  0.9661691
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        FALSE TRUE    Error       Rate
## FALSE  48017    0 0.000000   =0/48017
## TRUE      25  670 0.035971    =25/695
## Totals 48042  670 0.000513  =25/48712
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold        value idx
## 1                       max f1  0.961868     0.981685 216
## 2                       max f2  0.961868     0.971014 216
## 3                 max f0point5  0.961868     0.992593 216
## 4                 max accuracy  0.961868     0.999487 216
## 5                max precision  0.997918     1.000000   0
## 6                   max recall  0.000471     1.000000 399
## 7              max specificity  0.997918     1.000000   0
## 8             max absolute_mcc  0.961868     0.981594 216
## 9   max min_per_class_accuracy  0.001278     0.971223 256
## 10 max mean_per_class_accuracy  0.001662     0.982996 238
## 11                     max tns  0.997918 48017.000000   0
## 12                     max fns  0.997918   694.000000   0
## 13                     max fps  0.000471 48017.000000 399
## 14                     max tps  0.000471   695.000000 399
## 15                     max tnr  0.997918     1.000000   0
## 16                     max fnr  0.997918     0.998561   0
## 17                     max fpr  0.000471     1.000000 399
## 18                     max tpr  0.000471     1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##               mean       sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy  0.999486 0.000282   0.999693   0.999380   0.999797   0.999483
## auc       0.985555 0.013253   0.991555   0.987596   0.999918   0.984481
## err       0.000514 0.000282   0.000307   0.000620   0.000203   0.000517
## err_count 5.000000 2.738613   3.000000   6.000000   2.000000   5.000000
## f0point5  0.992461 0.004409   0.995671   0.990415   0.997089   0.993103
##           cv_5_valid
## accuracy    0.999077
## auc         0.964224
## err         0.000923
## err_count   9.000000
## f0point5    0.986025
## 
## ---
##                        mean        sd cv_1_valid cv_2_valid cv_3_valid
## precision          1.000000  0.000000   1.000000   1.000000   1.000000
## r2                 0.963066  0.020856   0.978234   0.953183   0.985225
## recall             0.963689  0.020634   0.978723   0.953846   0.985611
## residual_deviance 87.436554 40.447500  60.203827  98.165150  43.186370
## rmse               0.022000  0.006274   0.017597   0.024906   0.014344
## specificity        1.000000  0.000000   1.000000   1.000000   1.000000
##                   cv_4_valid cv_5_valid
## precision           1.000000   1.000000
## r2                  0.965844   0.932844
## recall              0.966443   0.933824
## residual_deviance  87.124510 148.502910
## rmse                0.022760   0.030392
## specificity         1.000000   1.000000

Make predictions

predictions <- h2o.predict(best_model, newdata = test_h2o)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
predictions_tbl <- predictions %>%
  as_tibble()

predictions_tbl %>%
  bind_cols(test_tbl)
## # A tibble: 19,130 × 24
##    predict FALSE.   TRUE. expedition_id member_id peak_id peak_name  year season
##    <fct>    <dbl>   <dbl> <chr>         <chr>     <chr>   <chr>     <dbl> <chr> 
##  1 FALSE    0.999 6.48e-4 AMAD78301     AMAD7830… AMAD    Ama Dabl…  1978 Autumn
##  2 FALSE    0.999 6.48e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  3 FALSE    0.999 6.72e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  4 FALSE    0.999 6.72e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  5 FALSE    0.999 6.72e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  6 FALSE    0.999 6.48e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  7 FALSE    0.999 6.48e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  8 FALSE    0.999 6.48e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
##  9 FALSE    0.999 6.72e-4 AMAD79101     AMAD7910… AMAD    Ama Dabl…  1979 Spring
## 10 FALSE    0.999 6.48e-4 AMAD79301     AMAD7930… AMAD    Ama Dabl…  1979 Autumn
## # ℹ 19,120 more rows
## # ℹ 15 more variables: sex <chr>, age <dbl>, citizenship <chr>,
## #   expedition_role <chr>, hired <lgl>, highpoint_metres <dbl>, success <lgl>,
## #   solo <lgl>, oxygen_used <lgl>, died <lgl>, death_cause <chr>,
## #   death_height_metres <dbl>, injured <lgl>, injury_type <chr>,
## #   injury_height_metres <dbl>

Evaluate model

?h2o.performance
performance_h2o <- h2o.performance(best_model, newdata = test_h2o)
typeof(performance_h2o)
## [1] "S4"
slotNames(performance_h2o)
## [1] "algorithm" "on_train"  "on_valid"  "on_xval"   "metrics"
performance_h2o@metrics
## $model
## $model$`__meta`
## $model$`__meta`$schema_version
## [1] 3
## 
## $model$`__meta`$schema_name
## [1] "ModelKeyV3"
## 
## $model$`__meta`$schema_type
## [1] "Key<Model>"
## 
## 
## $model$name
## [1] "StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849"
## 
## $model$type
## [1] "Key<Model>"
## 
## $model$URL
## [1] "/3/Models/StackedEnsemble_BestOfFamily_1_AutoML_23_20240430_104849"
## 
## 
## $model_checksum
## [1] "-995355060137004016"
## 
## $frame
## $frame$name
## [1] "test_tbl_sid_8238_3"
## 
## 
## $frame_checksum
## [1] "678340420273909232"
## 
## $description
## NULL
## 
## $scoring_time
## [1] 1.714489e+12
## 
## $predictions
## NULL
## 
## $MSE
## [1] 0.0004698474
## 
## $RMSE
## [1] 0.02167596
## 
## $nobs
## [1] 19130
## 
## $custom_metric_name
## NULL
## 
## $custom_metric_value
## [1] 0
## 
## $r2
## [1] 0.9662339
## 
## $logloss
## [1] 0.003824179
## 
## $AUC
## [1] 0.9926976
## 
## $pr_auc
## [1] 0.9875554
## 
## $Gini
## [1] 0.9853951
## 
## $mean_per_class_error
## [1] 0.01666667
## 
## $domain
## [1] "FALSE" "TRUE" 
## 
## $cm
## $cm$`__meta`
## $cm$`__meta`$schema_version
## [1] 3
## 
## $cm$`__meta`$schema_name
## [1] "ConfusionMatrixV3"
## 
## $cm$`__meta`$schema_type
## [1] "ConfusionMatrix"
## 
## 
## $cm$table
## Confusion Matrix: Row labels: Actual class; Column labels: Predicted class
##        FALSE TRUE  Error         Rate
## FALSE  18860    0 0.0000 = 0 / 18,860
## TRUE       9  261 0.0333 =    9 / 270
## Totals 18869  261 0.0005 = 9 / 19,130
## 
## 
## $thresholds_and_metric_scores
## Metrics for Thresholds: Binomial metrics as a function of classification thresholds
##   threshold       f1       f2 f0point5 accuracy precision   recall specificity
## 1  0.999268 0.205980 0.139514 0.393401 0.987507  1.000000 0.114815    1.000000
## 2  0.999240 0.235294 0.161290 0.434783 0.987768  1.000000 0.133333    1.000000
## 3  0.998555 0.383234 0.279720 0.608365 0.989232  1.000000 0.237037    1.000000
## 4  0.998499 0.402367 0.296167 0.627306 0.989441  1.000000 0.251852    1.000000
## 5  0.997793 0.872651 0.810706 0.944846 0.996811  1.000000 0.774074    1.000000
##   absolute_mcc min_per_class_accuracy mean_per_class_accuracy   tns fns fps tps
## 1     0.336717               0.114815                0.557407 18860 239   0  31
## 2     0.362904               0.133333                0.566667 18860 234   0  36
## 3     0.484227               0.237037                0.618519 18860 206   0  64
## 4     0.499182               0.251852                0.625926 18860 202   0  68
## 5     0.878395               0.774074                0.887037 18860  61   0 209
##        tnr      fnr      fpr      tpr idx
## 1 1.000000 0.885185 0.000000 0.114815   0
## 2 1.000000 0.866667 0.000000 0.133333   1
## 3 1.000000 0.762963 0.000000 0.237037   2
## 4 1.000000 0.748148 0.000000 0.251852   3
## 5 1.000000 0.225926 0.000000 0.774074   4
## 
## ---
##    threshold       f1       f2 f0point5 accuracy precision   recall specificity
## 56  0.000549 0.183442 0.358775 0.123223 0.875745  0.101098 0.988889    0.874125
## 57  0.000549 0.085797 0.189792 0.055426 0.702561  0.044844 0.988889    0.698462
## 58  0.000542 0.074095 0.166521 0.047648 0.651176  0.038489 0.988889    0.646341
## 59  0.000530 0.073422 0.165161 0.047203 0.647726  0.038127 0.988889    0.642842
## 60  0.000530 0.029189 0.069912 0.018445 0.061160  0.014811 1.000000    0.047720
## 61  0.000523 0.027835 0.066799 0.017580 0.014114  0.014114 1.000000    0.000000
##    absolute_mcc min_per_class_accuracy mean_per_class_accuracy   tns fns   fps
## 56     0.295114               0.874125                0.931507 16486   3  2374
## 57     0.175120               0.698462                0.843676 13173   3  5687
## 58     0.155863               0.646341                0.817615 12190   3  6670
## 59     0.154691               0.642842                0.815865 12124   3  6736
## 60     0.026585               0.047720                0.523860   900   0 17960
## 61     0.000000               0.000000                0.500000     0   0 18860
##    tps      tnr      fnr      fpr      tpr idx
## 56 267 0.874125 0.011111 0.125875 0.988889  55
## 57 267 0.698462 0.011111 0.301538 0.988889  56
## 58 267 0.646341 0.011111 0.353659 0.988889  57
## 59 267 0.642842 0.011111 0.357158 0.988889  58
## 60 270 0.047720 0.000000 0.952280 1.000000  59
## 61 270 0.000000 0.000000 1.000000 1.000000  60
## 
## $max_criteria_and_metric_scores
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold        value idx
## 1                       max f1  0.990079     0.983051  17
## 2                       max f2  0.001531     0.976419  19
## 3                 max f0point5  0.990079     0.993151  17
## 4                 max accuracy  0.990079     0.999530  17
## 5                max precision  0.999268     1.000000   0
## 6                   max recall  0.000530     1.000000  59
## 7              max specificity  0.999268     1.000000   0
## 8             max absolute_mcc  0.990079     0.982958  17
## 9   max min_per_class_accuracy  0.001192     0.988889  23
## 10 max mean_per_class_accuracy  0.001427     0.991691  20
## 11                     max tns  0.999268 18860.000000   0
## 12                     max fns  0.999268   239.000000   0
## 13                     max fps  0.000523 18860.000000  60
## 14                     max tps  0.000530   270.000000  59
## 15                     max tnr  0.999268     1.000000   0
## 16                     max fnr  0.999268     0.885185   0
## 17                     max fpr  0.000523     1.000000  60
## 18                     max tpr  0.000530     1.000000  59
## 
## $gains_lift_table
## Gains/Lift Table: Avg response rate:  1.41 %, avg score:  1.42 %
##   group cumulative_data_fraction lower_threshold      lift cumulative_lift
## 1     1               0.01092525        0.997793 70.851852       70.851852
## 2     2               0.02117094        0.001192 20.966364       46.709739
## 3     3               0.03193936        0.000991  0.000000       30.961448
## 4     4               0.04511239        0.000960  0.000000       21.920561
## 5     5               0.05101934        0.000790  0.000000       19.382628
## 6     6               0.12409827        0.000648  0.000000        7.968595
## 7     7               0.31123889        0.000549  0.000000        3.177266
## 8     8               0.95295348        0.000530  0.017315        1.049369
## 9     9               1.00000000        0.000523  0.000000        1.000000
##   response_rate    score cumulative_response_rate cumulative_score capture_rate
## 1      1.000000 0.998162                 1.000000         0.998162     0.774074
## 2      0.295918 0.264726                 0.659259         0.643215     0.214815
## 3      0.000000 0.001061                 0.436989         0.426711     0.000000
## 4      0.000000 0.000961                 0.309386         0.302390     0.000000
## 5      0.000000 0.000814                 0.273566         0.267474     0.000000
## 6      0.000000 0.000670                 0.112468         0.110358     0.000000
## 7      0.000000 0.000554                 0.044844         0.044335     0.000000
## 8      0.000244 0.000531                 0.014811         0.014837     0.011111
## 9      0.000000 0.000523                 0.014114         0.014164     0.000000
##   cumulative_capture_rate        gain cumulative_gain kolmogorov_smirnov
## 1                0.774074 6985.185185     6985.185185           0.774074
## 2                0.988889 1996.636432     4570.973937           0.981572
## 3                0.988889 -100.000000     2996.144754           0.970649
## 4                0.988889 -100.000000     2092.056135           0.957288
## 5                0.988889 -100.000000     1838.262750           0.951296
## 6                0.988889 -100.000000      696.859496           0.877171
## 7                0.988889 -100.000000      217.726645           0.687351
## 8                1.000000  -98.268528        4.936917           0.047720
## 9                1.000000 -100.000000        0.000000           0.000000
## 
## $residual_deviance
## [1] 146.3131
## 
## $null_deviance
## [1] 2836.923
## 
## $AIC
## [1] 150.3131
## 
## $loglikelihood
## [1] 0
## 
## $null_degrees_of_freedom
## [1] 19129
## 
## $residual_degrees_of_freedom
## [1] 19128
h2o.auc(performance_h2o)
## [1] 0.9926976
h2o.confusionMatrix(performance_h2o)
## Confusion Matrix (vertical: actual; across: predicted)  for max f1 @ threshold = 0.990079190848319:
##        FALSE TRUE    Error      Rate
## FALSE  18860    0 0.000000  =0/18860
## TRUE       9  261 0.033333    =9/270
## Totals 18869  261 0.000470  =9/19130
h2o.metric(performance_h2o) %>% as_tibble() %>% filter(threshold %>% between(0.41, 0.42))
## # A tibble: 0 × 20
## # ℹ 20 variables: threshold <dbl>, f1 <dbl>, f2 <dbl>, f0point5 <dbl>,
## #   accuracy <dbl>, precision <dbl>, recall <dbl>, specificity <dbl>,
## #   absolute_mcc <dbl>, min_per_class_accuracy <dbl>,
## #   mean_per_class_accuracy <dbl>, tns <dbl>, fns <dbl>, fps <dbl>, tps <dbl>,
## #   tnr <dbl>, fnr <dbl>, fpr <dbl>, tpr <dbl>, idx <int>