Goal is to automate building and tuning a classification model to predict employee attrition, using the h2o::h2o.automl.

Set up

Import data

Import the cleaned data from Module 7.

library(h2o)
## Warning: package 'h2o' was built under R version 4.2.3
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## Attaching package: 'h2o'
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## The following objects are masked from 'package:base':
## 
##     %*%, %in%, &&, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc
library(tidyverse)
## Warning: package 'tidyverse' was built under R version 4.2.3
## Warning: package 'ggplot2' was built under R version 4.2.3
## Warning: package 'tibble' was built under R version 4.2.3
## Warning: package 'tidyr' was built under R version 4.2.3
## Warning: package 'readr' was built under R version 4.2.3
## Warning: package 'purrr' was built under R version 4.2.3
## Warning: package 'dplyr' was built under R version 4.2.3
## Warning: package 'stringr' was built under R version 4.2.3
## Warning: package 'forcats' was built under R version 4.2.3
## Warning: package 'lubridate' was built under R version 4.2.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.4.4     ✔ tibble    3.2.1
## ✔ lubridate 1.9.2     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::day()   masks h2o::day()
## ✖ dplyr::filter()    masks stats::filter()
## ✖ lubridate::hour()  masks h2o::hour()
## ✖ dplyr::lag()       masks stats::lag()
## ✖ lubridate::month() masks h2o::month()
## ✖ lubridate::week()  masks h2o::week()
## ✖ lubridate::year()  masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## Warning: package 'tidymodels' was built under R version 4.2.3
## ── Attaching packages ────────────────────────────────────── tidymodels 1.1.1 ──
## ✔ broom        1.0.5     ✔ rsample      1.2.0
## ✔ dials        1.2.0     ✔ tune         1.1.2
## ✔ infer        1.0.6     ✔ workflows    1.1.3
## ✔ modeldata    1.3.0     ✔ workflowsets 1.0.1
## ✔ parsnip      1.1.1     ✔ yardstick    1.3.0
## ✔ recipes      1.0.9
## Warning: package 'broom' was built under R version 4.2.3
## Warning: package 'dials' was built under R version 4.2.3
## Warning: package 'infer' was built under R version 4.2.3
## Warning: package 'modeldata' was built under R version 4.2.3
## Warning: package 'parsnip' was built under R version 4.2.3
## Warning: package 'recipes' was built under R version 4.2.3
## Warning: package 'rsample' was built under R version 4.2.3
## Warning: package 'tune' was built under R version 4.2.3
## Warning: package 'workflows' was built under R version 4.2.3
## Warning: package 'workflowsets' was built under R version 4.2.3
## Warning: package 'yardstick' was built under R version 4.2.3
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Search for functions across packages at https://www.tidymodels.org/find/
library(tidyquant)
## Loading required package: PerformanceAnalytics
## Loading required package: xts
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## 
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
## 
## 
## Attaching package: 'xts'
## 
## The following objects are masked from 'package:dplyr':
## 
##     first, last
## 
## 
## Attaching package: 'PerformanceAnalytics'
## 
## The following object is masked from 'package:graphics':
## 
##     legend
## 
## Loading required package: quantmod
## Loading required package: TTR
## 
## Attaching package: 'TTR'
## 
## The following object is masked from 'package:dials':
## 
##     momentum
## 
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo
data <- read_csv("../00_data/data_wrangled/data_clean.csv") %>%
    
    # h2o requires all variables to be either numeric or factors
    mutate(across(where(is.character), factor))
## Rows: 1470 Columns: 32
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr  (8): Attrition, BusinessTravel, Department, EducationField, Gender, Job...
## dbl (24): Age, DailyRate, DistanceFromHome, Education, EmployeeNumber, Envir...
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Split data

set.seed(1234)

data_split <- initial_split(data, strata = "Attrition")
train_tbl <- training(data_split)
test_tbl <- testing(data_split)

Recipes

recipe_obj <- recipe(Attrition ~ ., data = train_tbl) %>%
    
    # Remove zero variance variables
    step_zv(all_predictors()) 

Model

# Initialize h2o
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         33 minutes 433 milliseconds 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    4 months and 3 days 
##     H2O cluster name:           H2O_started_from_R_aesim_fhp551 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   3.31 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.2.2 (2022-10-31 ucrt)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (4 months and 3 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
split.h2o <- h2o.splitFrame(as.h2o(train_tbl), ratios = c(0.85), seed = 2567)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
train_h2o <- split.h2o[[1]]
valid_h2o <- split.h2o[[2]]
test_h2o  <- as.h2o(test_tbl)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
y <- "Attrition"
x <- setdiff(names(train_tbl), y)

auto_ml_models_h2o <- h2o.automl(
    x = x, 
    y = y, 
    training_frame    = train_h2o, 
    validation_frame  = valid_h2o, 
    leaderboard_frame = test_h2o, 
    max_runtime_secs  = 30, 
    nfolds            = 5, 
    seed              = 3456)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |====                                                                  |   6%
## 18:48:56.927: User specified a validation frame with cross-validation still enabled. Please note that the models will still be validated using cross-validation only, the validation frame will be used to provide purely informative validation metrics on the trained models.
## 18:48:56.931: AutoML: XGBoost is not available; skipping it.
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |======================================================================| 100%
auto_ml_models_h2o@leaderboard
##                                                  model_id       auc   logloss
## 1    DeepLearning_grid_1_AutoML_7_20240423_184856_model_1 0.8673139 0.3784866
## 2 StackedEnsemble_BestOfFamily_4_AutoML_7_20240423_184856 0.8640237 0.3079303
## 3            GBM_grid_1_AutoML_7_20240423_184856_model_14 0.8528587 0.3232253
## 4 StackedEnsemble_BestOfFamily_3_AutoML_7_20240423_184856 0.8515642 0.3030396
## 5 StackedEnsemble_BestOfFamily_2_AutoML_7_20240423_184856 0.8515642 0.3030082
## 6 StackedEnsemble_BestOfFamily_1_AutoML_7_20240423_184856 0.8513484 0.3028647
##       aucpr mean_per_class_error      rmse        mse
## 1 0.6842232            0.2307443 0.3144402 0.09887263
## 2 0.6915659            0.1920712 0.3018720 0.09112671
## 3 0.5962635            0.2474110 0.3120479 0.09737391
## 4 0.6703203            0.2004045 0.2948439 0.08693295
## 5 0.6703203            0.2004045 0.2948577 0.08694106
## 6 0.6696650            0.2004045 0.2949408 0.08699008
## 
## [40 rows x 7 columns]
auto_ml_models_h2o@leader
## Model Details:
## ==============
## 
## H2OBinomialModel: deeplearning
## Model ID:  DeepLearning_grid_1_AutoML_7_20240423_184856_model_1 
## Status of Neuron Layers: predicting Attrition, 2-class classification, bernoulli distribution, CrossEntropy loss, 6,202 weights/biases, 83.3 KB, 8,055 training samples, mini-batch size 1
##   layer units             type dropout       l1       l2 mean_rate rate_rms
## 1     1    59            Input  5.00 %       NA       NA        NA       NA
## 2     2   100 RectifierDropout 20.00 % 0.000000 0.000000  0.118115 0.316928
## 3     3     2          Softmax      NA 0.000000 0.000000  0.000426 0.000089
##   momentum mean_weight weight_rms mean_bias bias_rms
## 1       NA          NA         NA        NA       NA
## 2 0.000000    0.000493   0.110705  0.487810 0.020024
## 3 0.000000   -0.043842   0.559034 -0.000409 0.010533
## 
## 
## H2OBinomialMetrics: deeplearning
## ** Reported on training data. **
## ** Metrics reported on full training frame **
## 
## MSE:  0.0885245
## RMSE:  0.2975307
## LogLoss:  0.3354281
## Mean Per-Class Error:  0.1882202
## AUC:  0.8815982
## AUCPR:  0.7056277
## Gini:  0.7631963
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     729  52 0.066581  =52/781
## Yes     44  98 0.309859  =44/142
## Totals 773 150 0.104009  =96/923
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.118543   0.671233 142
## 2                       max f2  0.043056   0.696822 216
## 3                 max f0point5  0.204088   0.698925 100
## 4                 max accuracy  0.204088   0.902492 100
## 5                max precision  0.993111   1.000000   0
## 6                   max recall  0.000024   1.000000 399
## 7              max specificity  0.993111   1.000000   0
## 8             max absolute_mcc  0.141067   0.610621 128
## 9   max min_per_class_accuracy  0.043056   0.802817 216
## 10 max mean_per_class_accuracy  0.087788   0.819462 155
## 11                     max tns  0.993111 781.000000   0
## 12                     max fns  0.993111 141.000000   0
## 13                     max fps  0.000024 781.000000 399
## 14                     max tps  0.000024 142.000000 399
## 15                     max tnr  0.993111   1.000000   0
## 16                     max fnr  0.993111   0.992958   0
## 17                     max fpr  0.000024   1.000000 399
## 18                     max tpr  0.000024   1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: deeplearning
## ** Reported on validation data. **
## ** Metrics reported on full validation frame **
## 
## MSE:  0.1333056
## RMSE:  0.3651103
## LogLoss:  0.5772558
## Mean Per-Class Error:  0.281019
## AUC:  0.784016
## AUCPR:  0.5980084
## Gini:  0.568032
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     128  15 0.104895  =15/143
## Yes     16  19 0.457143   =16/35
## Totals 144  34 0.174157  =31/178
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.073982   0.550725  33
## 2                       max f2  0.005891   0.648536  98
## 3                 max f0point5  0.293960   0.659341  13
## 4                 max accuracy  0.293960   0.859551  13
## 5                max precision  0.970038   1.000000   0
## 6                   max recall  0.000438   1.000000 157
## 7              max specificity  0.970038   1.000000   0
## 8             max absolute_mcc  0.293960   0.485559  13
## 9   max min_per_class_accuracy  0.019448   0.685714  65
## 10 max mean_per_class_accuracy  0.073982   0.718981  33
## 11                     max tns  0.970038 143.000000   0
## 12                     max fns  0.970038  34.000000   0
## 13                     max fps  0.000003 143.000000 177
## 14                     max tps  0.000438  35.000000 157
## 15                     max tnr  0.970038   1.000000   0
## 16                     max fnr  0.970038   0.971429   0
## 17                     max fpr  0.000003   1.000000 177
## 18                     max tpr  0.000438   1.000000 157
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: deeplearning
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.1434243
## RMSE:  0.378714
## LogLoss:  0.9498462
## Mean Per-Class Error:  0.2291933
## AUC:  0.8277714
## AUCPR:  0.5620906
## Gini:  0.6555427
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error      Rate
## No     698  83 0.106274   =83/781
## Yes     50  92 0.352113   =50/142
## Totals 748 175 0.144095  =133/923
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.001488   0.580442 170
## 2                       max f2  0.000387   0.634638 252
## 3                 max f0point5  0.003443   0.559105 118
## 4                 max accuracy  0.017843   0.874323  43
## 5                max precision  0.846356   1.000000   0
## 6                   max recall  0.000000   1.000000 399
## 7              max specificity  0.846356   1.000000   0
## 8             max absolute_mcc  0.001488   0.498528 170
## 9   max min_per_class_accuracy  0.000387   0.753521 252
## 10 max mean_per_class_accuracy  0.001488   0.770807 170
## 11                     max tns  0.846356 781.000000   0
## 12                     max fns  0.846356 141.000000   0
## 13                     max fps  0.000000 781.000000 399
## 14                     max tps  0.000000 142.000000 399
## 15                     max tnr  0.846356   1.000000   0
## 16                     max fnr  0.846356   0.992958   0
## 17                     max fpr  0.000000   1.000000 399
## 18                     max tpr  0.000000   1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##                              mean        sd cv_1_valid cv_2_valid cv_3_valid
## accuracy                 0.814841  0.071526   0.702703   0.848649   0.794595
## auc                      0.790328  0.071853   0.666060   0.809682   0.803050
## err                      0.185159  0.071526   0.297297   0.151351   0.205405
## err_count               34.200000 13.274035  55.000000  28.000000  38.000000
## f0point5                 0.484620  0.124550   0.303030   0.522876   0.450644
## f1                       0.514321  0.086962   0.367816   0.533333   0.525000
## f2                       0.560100  0.061378   0.467836   0.544218   0.628742
## lift_top_group           5.863793  1.497568   6.607143   3.189655   6.379310
## logloss                  1.185119  0.174568   1.261851   1.237098   1.387543
## max_per_class_error      0.394828  0.078091   0.428571   0.448276   0.275862
## mcc                      0.421009  0.116009   0.228806   0.443481   0.432710
## mean_per_class_accuracy  0.729044  0.047098   0.648772   0.727785   0.765915
## mean_per_class_error     0.270956  0.047098   0.351228   0.272215   0.234085
## mse                      0.150494  0.006590   0.149274   0.156200   0.156519
## pr_auc                   0.504576  0.107795   0.320343   0.499387   0.560233
## precision                0.470917  0.149935   0.271186   0.516129   0.411765
## r2                      -0.155887  0.039403  -0.162174  -0.181687  -0.184098
## recall                   0.605172  0.078091   0.571429   0.551724   0.724138
## rmse                     0.387861  0.008550   0.386360   0.395222   0.395625
## specificity              0.852915  0.088771   0.726115   0.903846   0.807692
##                         cv_4_valid cv_5_valid
## accuracy                  0.891304   0.836956
## auc                       0.850962   0.821886
## err                       0.108696   0.163043
## err_count                20.000000  30.000000
## f0point5                  0.646552   0.500000
## f1                        0.600000   0.545455
## f2                        0.559701   0.600000
## lift_top_group            6.571429   6.571429
## logloss                   1.113108   0.925995
## max_per_class_error       0.464286   0.357143
## mcc                       0.543394   0.456653
## mean_per_class_accuracy   0.745421   0.757326
## mean_per_class_error      0.254579   0.242674
## mse                       0.150145   0.140334
## pr_auc                    0.586724   0.556194
## precision                 0.681818   0.473684
## r2                       -0.163760  -0.087715
## recall                    0.535714   0.642857
## rmse                      0.387485   0.374611
## specificity               0.955128   0.871795
best_model <- auto_ml_models_h2o@leader

best_model
## Model Details:
## ==============
## 
## H2OBinomialModel: deeplearning
## Model ID:  DeepLearning_grid_1_AutoML_7_20240423_184856_model_1 
## Status of Neuron Layers: predicting Attrition, 2-class classification, bernoulli distribution, CrossEntropy loss, 6,202 weights/biases, 83.3 KB, 8,055 training samples, mini-batch size 1
##   layer units             type dropout       l1       l2 mean_rate rate_rms
## 1     1    59            Input  5.00 %       NA       NA        NA       NA
## 2     2   100 RectifierDropout 20.00 % 0.000000 0.000000  0.118115 0.316928
## 3     3     2          Softmax      NA 0.000000 0.000000  0.000426 0.000089
##   momentum mean_weight weight_rms mean_bias bias_rms
## 1       NA          NA         NA        NA       NA
## 2 0.000000    0.000493   0.110705  0.487810 0.020024
## 3 0.000000   -0.043842   0.559034 -0.000409 0.010533
## 
## 
## H2OBinomialMetrics: deeplearning
## ** Reported on training data. **
## ** Metrics reported on full training frame **
## 
## MSE:  0.0885245
## RMSE:  0.2975307
## LogLoss:  0.3354281
## Mean Per-Class Error:  0.1882202
## AUC:  0.8815982
## AUCPR:  0.7056277
## Gini:  0.7631963
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     729  52 0.066581  =52/781
## Yes     44  98 0.309859  =44/142
## Totals 773 150 0.104009  =96/923
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.118543   0.671233 142
## 2                       max f2  0.043056   0.696822 216
## 3                 max f0point5  0.204088   0.698925 100
## 4                 max accuracy  0.204088   0.902492 100
## 5                max precision  0.993111   1.000000   0
## 6                   max recall  0.000024   1.000000 399
## 7              max specificity  0.993111   1.000000   0
## 8             max absolute_mcc  0.141067   0.610621 128
## 9   max min_per_class_accuracy  0.043056   0.802817 216
## 10 max mean_per_class_accuracy  0.087788   0.819462 155
## 11                     max tns  0.993111 781.000000   0
## 12                     max fns  0.993111 141.000000   0
## 13                     max fps  0.000024 781.000000 399
## 14                     max tps  0.000024 142.000000 399
## 15                     max tnr  0.993111   1.000000   0
## 16                     max fnr  0.993111   0.992958   0
## 17                     max fpr  0.000024   1.000000 399
## 18                     max tpr  0.000024   1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: deeplearning
## ** Reported on validation data. **
## ** Metrics reported on full validation frame **
## 
## MSE:  0.1333056
## RMSE:  0.3651103
## LogLoss:  0.5772558
## Mean Per-Class Error:  0.281019
## AUC:  0.784016
## AUCPR:  0.5980084
## Gini:  0.568032
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     128  15 0.104895  =15/143
## Yes     16  19 0.457143   =16/35
## Totals 144  34 0.174157  =31/178
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.073982   0.550725  33
## 2                       max f2  0.005891   0.648536  98
## 3                 max f0point5  0.293960   0.659341  13
## 4                 max accuracy  0.293960   0.859551  13
## 5                max precision  0.970038   1.000000   0
## 6                   max recall  0.000438   1.000000 157
## 7              max specificity  0.970038   1.000000   0
## 8             max absolute_mcc  0.293960   0.485559  13
## 9   max min_per_class_accuracy  0.019448   0.685714  65
## 10 max mean_per_class_accuracy  0.073982   0.718981  33
## 11                     max tns  0.970038 143.000000   0
## 12                     max fns  0.970038  34.000000   0
## 13                     max fps  0.000003 143.000000 177
## 14                     max tps  0.000438  35.000000 157
## 15                     max tnr  0.970038   1.000000   0
## 16                     max fnr  0.970038   0.971429   0
## 17                     max fpr  0.000003   1.000000 177
## 18                     max tpr  0.000438   1.000000 157
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: deeplearning
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.1434243
## RMSE:  0.378714
## LogLoss:  0.9498462
## Mean Per-Class Error:  0.2291933
## AUC:  0.8277714
## AUCPR:  0.5620906
## Gini:  0.6555427
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error      Rate
## No     698  83 0.106274   =83/781
## Yes     50  92 0.352113   =50/142
## Totals 748 175 0.144095  =133/923
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.001488   0.580442 170
## 2                       max f2  0.000387   0.634638 252
## 3                 max f0point5  0.003443   0.559105 118
## 4                 max accuracy  0.017843   0.874323  43
## 5                max precision  0.846356   1.000000   0
## 6                   max recall  0.000000   1.000000 399
## 7              max specificity  0.846356   1.000000   0
## 8             max absolute_mcc  0.001488   0.498528 170
## 9   max min_per_class_accuracy  0.000387   0.753521 252
## 10 max mean_per_class_accuracy  0.001488   0.770807 170
## 11                     max tns  0.846356 781.000000   0
## 12                     max fns  0.846356 141.000000   0
## 13                     max fps  0.000000 781.000000 399
## 14                     max tps  0.000000 142.000000 399
## 15                     max tnr  0.846356   1.000000   0
## 16                     max fnr  0.846356   0.992958   0
## 17                     max fpr  0.000000   1.000000 399
## 18                     max tpr  0.000000   1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##                              mean        sd cv_1_valid cv_2_valid cv_3_valid
## accuracy                 0.814841  0.071526   0.702703   0.848649   0.794595
## auc                      0.790328  0.071853   0.666060   0.809682   0.803050
## err                      0.185159  0.071526   0.297297   0.151351   0.205405
## err_count               34.200000 13.274035  55.000000  28.000000  38.000000
## f0point5                 0.484620  0.124550   0.303030   0.522876   0.450644
## f1                       0.514321  0.086962   0.367816   0.533333   0.525000
## f2                       0.560100  0.061378   0.467836   0.544218   0.628742
## lift_top_group           5.863793  1.497568   6.607143   3.189655   6.379310
## logloss                  1.185119  0.174568   1.261851   1.237098   1.387543
## max_per_class_error      0.394828  0.078091   0.428571   0.448276   0.275862
## mcc                      0.421009  0.116009   0.228806   0.443481   0.432710
## mean_per_class_accuracy  0.729044  0.047098   0.648772   0.727785   0.765915
## mean_per_class_error     0.270956  0.047098   0.351228   0.272215   0.234085
## mse                      0.150494  0.006590   0.149274   0.156200   0.156519
## pr_auc                   0.504576  0.107795   0.320343   0.499387   0.560233
## precision                0.470917  0.149935   0.271186   0.516129   0.411765
## r2                      -0.155887  0.039403  -0.162174  -0.181687  -0.184098
## recall                   0.605172  0.078091   0.571429   0.551724   0.724138
## rmse                     0.387861  0.008550   0.386360   0.395222   0.395625
## specificity              0.852915  0.088771   0.726115   0.903846   0.807692
##                         cv_4_valid cv_5_valid
## accuracy                  0.891304   0.836956
## auc                       0.850962   0.821886
## err                       0.108696   0.163043
## err_count                20.000000  30.000000
## f0point5                  0.646552   0.500000
## f1                        0.600000   0.545455
## f2                        0.559701   0.600000
## lift_top_group            6.571429   6.571429
## logloss                   1.113108   0.925995
## max_per_class_error       0.464286   0.357143
## mcc                       0.543394   0.456653
## mean_per_class_accuracy   0.745421   0.757326
## mean_per_class_error      0.254579   0.242674
## mse                       0.150145   0.140334
## pr_auc                    0.586724   0.556194
## precision                 0.681818   0.473684
## r2                       -0.163760  -0.087715
## recall                    0.535714   0.642857
## rmse                      0.387485   0.374611
## specificity               0.955128   0.871795

Examine the output of h2o.automl

auto_ml_models_h2o %>% typeof()
## [1] "S4"
auto_ml_models_h2o %>% slotNames()
## [1] "project_name"   "leader"         "leaderboard"    "event_log"     
## [5] "modeling_steps" "training_info"
auto_ml_models_h2o@leaderboard
##                                                  model_id       auc   logloss
## 1    DeepLearning_grid_1_AutoML_7_20240423_184856_model_1 0.8673139 0.3784866
## 2 StackedEnsemble_BestOfFamily_4_AutoML_7_20240423_184856 0.8640237 0.3079303
## 3            GBM_grid_1_AutoML_7_20240423_184856_model_14 0.8528587 0.3232253
## 4 StackedEnsemble_BestOfFamily_3_AutoML_7_20240423_184856 0.8515642 0.3030396
## 5 StackedEnsemble_BestOfFamily_2_AutoML_7_20240423_184856 0.8515642 0.3030082
## 6 StackedEnsemble_BestOfFamily_1_AutoML_7_20240423_184856 0.8513484 0.3028647
##       aucpr mean_per_class_error      rmse        mse
## 1 0.6842232            0.2307443 0.3144402 0.09887263
## 2 0.6915659            0.1920712 0.3018720 0.09112671
## 3 0.5962635            0.2474110 0.3120479 0.09737391
## 4 0.6703203            0.2004045 0.2948439 0.08693295
## 5 0.6703203            0.2004045 0.2948577 0.08694106
## 6 0.6696650            0.2004045 0.2949408 0.08699008
## 
## [40 rows x 7 columns]
auto_ml_models_h2o@leader
## Model Details:
## ==============
## 
## H2OBinomialModel: deeplearning
## Model ID:  DeepLearning_grid_1_AutoML_7_20240423_184856_model_1 
## Status of Neuron Layers: predicting Attrition, 2-class classification, bernoulli distribution, CrossEntropy loss, 6,202 weights/biases, 83.3 KB, 8,055 training samples, mini-batch size 1
##   layer units             type dropout       l1       l2 mean_rate rate_rms
## 1     1    59            Input  5.00 %       NA       NA        NA       NA
## 2     2   100 RectifierDropout 20.00 % 0.000000 0.000000  0.118115 0.316928
## 3     3     2          Softmax      NA 0.000000 0.000000  0.000426 0.000089
##   momentum mean_weight weight_rms mean_bias bias_rms
## 1       NA          NA         NA        NA       NA
## 2 0.000000    0.000493   0.110705  0.487810 0.020024
## 3 0.000000   -0.043842   0.559034 -0.000409 0.010533
## 
## 
## H2OBinomialMetrics: deeplearning
## ** Reported on training data. **
## ** Metrics reported on full training frame **
## 
## MSE:  0.0885245
## RMSE:  0.2975307
## LogLoss:  0.3354281
## Mean Per-Class Error:  0.1882202
## AUC:  0.8815982
## AUCPR:  0.7056277
## Gini:  0.7631963
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     729  52 0.066581  =52/781
## Yes     44  98 0.309859  =44/142
## Totals 773 150 0.104009  =96/923
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.118543   0.671233 142
## 2                       max f2  0.043056   0.696822 216
## 3                 max f0point5  0.204088   0.698925 100
## 4                 max accuracy  0.204088   0.902492 100
## 5                max precision  0.993111   1.000000   0
## 6                   max recall  0.000024   1.000000 399
## 7              max specificity  0.993111   1.000000   0
## 8             max absolute_mcc  0.141067   0.610621 128
## 9   max min_per_class_accuracy  0.043056   0.802817 216
## 10 max mean_per_class_accuracy  0.087788   0.819462 155
## 11                     max tns  0.993111 781.000000   0
## 12                     max fns  0.993111 141.000000   0
## 13                     max fps  0.000024 781.000000 399
## 14                     max tps  0.000024 142.000000 399
## 15                     max tnr  0.993111   1.000000   0
## 16                     max fnr  0.993111   0.992958   0
## 17                     max fpr  0.000024   1.000000 399
## 18                     max tpr  0.000024   1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: deeplearning
## ** Reported on validation data. **
## ** Metrics reported on full validation frame **
## 
## MSE:  0.1333056
## RMSE:  0.3651103
## LogLoss:  0.5772558
## Mean Per-Class Error:  0.281019
## AUC:  0.784016
## AUCPR:  0.5980084
## Gini:  0.568032
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     128  15 0.104895  =15/143
## Yes     16  19 0.457143   =16/35
## Totals 144  34 0.174157  =31/178
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.073982   0.550725  33
## 2                       max f2  0.005891   0.648536  98
## 3                 max f0point5  0.293960   0.659341  13
## 4                 max accuracy  0.293960   0.859551  13
## 5                max precision  0.970038   1.000000   0
## 6                   max recall  0.000438   1.000000 157
## 7              max specificity  0.970038   1.000000   0
## 8             max absolute_mcc  0.293960   0.485559  13
## 9   max min_per_class_accuracy  0.019448   0.685714  65
## 10 max mean_per_class_accuracy  0.073982   0.718981  33
## 11                     max tns  0.970038 143.000000   0
## 12                     max fns  0.970038  34.000000   0
## 13                     max fps  0.000003 143.000000 177
## 14                     max tps  0.000438  35.000000 157
## 15                     max tnr  0.970038   1.000000   0
## 16                     max fnr  0.970038   0.971429   0
## 17                     max fpr  0.000003   1.000000 177
## 18                     max tpr  0.000438   1.000000 157
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: deeplearning
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.1434243
## RMSE:  0.378714
## LogLoss:  0.9498462
## Mean Per-Class Error:  0.2291933
## AUC:  0.8277714
## AUCPR:  0.5620906
## Gini:  0.6555427
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error      Rate
## No     698  83 0.106274   =83/781
## Yes     50  92 0.352113   =50/142
## Totals 748 175 0.144095  =133/923
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.001488   0.580442 170
## 2                       max f2  0.000387   0.634638 252
## 3                 max f0point5  0.003443   0.559105 118
## 4                 max accuracy  0.017843   0.874323  43
## 5                max precision  0.846356   1.000000   0
## 6                   max recall  0.000000   1.000000 399
## 7              max specificity  0.846356   1.000000   0
## 8             max absolute_mcc  0.001488   0.498528 170
## 9   max min_per_class_accuracy  0.000387   0.753521 252
## 10 max mean_per_class_accuracy  0.001488   0.770807 170
## 11                     max tns  0.846356 781.000000   0
## 12                     max fns  0.846356 141.000000   0
## 13                     max fps  0.000000 781.000000 399
## 14                     max tps  0.000000 142.000000 399
## 15                     max tnr  0.846356   1.000000   0
## 16                     max fnr  0.846356   0.992958   0
## 17                     max fpr  0.000000   1.000000 399
## 18                     max tpr  0.000000   1.000000 399
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##                              mean        sd cv_1_valid cv_2_valid cv_3_valid
## accuracy                 0.814841  0.071526   0.702703   0.848649   0.794595
## auc                      0.790328  0.071853   0.666060   0.809682   0.803050
## err                      0.185159  0.071526   0.297297   0.151351   0.205405
## err_count               34.200000 13.274035  55.000000  28.000000  38.000000
## f0point5                 0.484620  0.124550   0.303030   0.522876   0.450644
## f1                       0.514321  0.086962   0.367816   0.533333   0.525000
## f2                       0.560100  0.061378   0.467836   0.544218   0.628742
## lift_top_group           5.863793  1.497568   6.607143   3.189655   6.379310
## logloss                  1.185119  0.174568   1.261851   1.237098   1.387543
## max_per_class_error      0.394828  0.078091   0.428571   0.448276   0.275862
## mcc                      0.421009  0.116009   0.228806   0.443481   0.432710
## mean_per_class_accuracy  0.729044  0.047098   0.648772   0.727785   0.765915
## mean_per_class_error     0.270956  0.047098   0.351228   0.272215   0.234085
## mse                      0.150494  0.006590   0.149274   0.156200   0.156519
## pr_auc                   0.504576  0.107795   0.320343   0.499387   0.560233
## precision                0.470917  0.149935   0.271186   0.516129   0.411765
## r2                      -0.155887  0.039403  -0.162174  -0.181687  -0.184098
## recall                   0.605172  0.078091   0.571429   0.551724   0.724138
## rmse                     0.387861  0.008550   0.386360   0.395222   0.395625
## specificity              0.852915  0.088771   0.726115   0.903846   0.807692
##                         cv_4_valid cv_5_valid
## accuracy                  0.891304   0.836956
## auc                       0.850962   0.821886
## err                       0.108696   0.163043
## err_count                20.000000  30.000000
## f0point5                  0.646552   0.500000
## f1                        0.600000   0.545455
## f2                        0.559701   0.600000
## lift_top_group            6.571429   6.571429
## logloss                   1.113108   0.925995
## max_per_class_error       0.464286   0.357143
## mcc                       0.543394   0.456653
## mean_per_class_accuracy   0.745421   0.757326
## mean_per_class_error      0.254579   0.242674
## mse                       0.150145   0.140334
## pr_auc                    0.586724   0.556194
## precision                 0.681818   0.473684
## r2                       -0.163760  -0.087715
## recall                    0.535714   0.642857
## rmse                      0.387485   0.374611
## specificity               0.955128   0.871795

Save and Load

# h2o.getModel("GBM_grid_1_AutoML_2_20240423_113107_model_1") %>% 
    # h2o.saveModel("h2o_models/")

Make predictions

predictions <- h2o.predict(best_model, newdata = test_h2o)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
predictions_tbl <- predictions %>% 
    as_tibble()

predictions_tbl %>% 
    bind_cols(test_tbl)
## # A tibble: 369 × 35
##    predict    No      Yes   Age Attrition BusinessTravel    DailyRate Department
##    <fct>   <dbl>    <dbl> <dbl> <fct>     <fct>                 <dbl> <fct>     
##  1 No      0.999 0.00125     59 No        Travel_Rarely          1324 Research …
##  2 No      0.993 0.00721     35 No        Travel_Rarely           809 Research …
##  3 No      0.993 0.00653     34 No        Travel_Rarely          1346 Research …
##  4 No      0.985 0.0149      22 No        Non-Travel             1123 Research …
##  5 No      1.00  0.000164    53 No        Travel_Rarely          1219 Sales     
##  6 No      0.983 0.0173      24 No        Non-Travel              673 Research …
##  7 No      0.967 0.0334      21 No        Travel_Rarely           391 Research …
##  8 No      0.977 0.0229      34 Yes       Travel_Rarely           699 Research …
##  9 No      1.00  0.000340    53 No        Travel_Rarely          1282 Research …
## 10 Yes     0.190 0.810       32 Yes       Travel_Frequently      1125 Research …
## # ℹ 359 more rows
## # ℹ 27 more variables: DistanceFromHome <dbl>, Education <dbl>,
## #   EducationField <fct>, EmployeeNumber <dbl>, EnvironmentSatisfaction <dbl>,
## #   Gender <fct>, HourlyRate <dbl>, JobInvolvement <dbl>, JobLevel <dbl>,
## #   JobRole <fct>, JobSatisfaction <dbl>, MaritalStatus <fct>,
## #   MonthlyIncome <dbl>, MonthlyRate <dbl>, NumCompaniesWorked <dbl>,
## #   OverTime <fct>, PercentSalaryHike <dbl>, PerformanceRating <dbl>, …

Evaluate model

performance_h2o <- h2o.performance(best_model, newdata = test_h2o)
typeof(performance_h2o)
## [1] "S4"
slotNames(performance_h2o) 
## [1] "algorithm" "on_train"  "on_valid"  "on_xval"   "metrics"
performance_h2o@metrics
## $model
## $model$`__meta`
## $model$`__meta`$schema_version
## [1] 3
## 
## $model$`__meta`$schema_name
## [1] "ModelKeyV3"
## 
## $model$`__meta`$schema_type
## [1] "Key<Model>"
## 
## 
## $model$name
## [1] "DeepLearning_grid_1_AutoML_7_20240423_184856_model_1"
## 
## $model$type
## [1] "Key<Model>"
## 
## $model$URL
## [1] "/3/Models/DeepLearning_grid_1_AutoML_7_20240423_184856_model_1"
## 
## 
## $model_checksum
## [1] "-2112073008793364328"
## 
## $frame
## $frame$name
## [1] "test_tbl_sid_b48e_3"
## 
## 
## $frame_checksum
## [1] "-54413681510283746"
## 
## $description
## NULL
## 
## $scoring_time
## [1] 1.713913e+12
## 
## $predictions
## NULL
## 
## $MSE
## [1] 0.09887263
## 
## $RMSE
## [1] 0.3144402
## 
## $nobs
## [1] 369
## 
## $custom_metric_name
## NULL
## 
## $custom_metric_value
## [1] 0
## 
## $r2
## [1] 0.2738621
## 
## $logloss
## [1] 0.3784866
## 
## $AUC
## [1] 0.8673139
## 
## $pr_auc
## [1] 0.6842232
## 
## $Gini
## [1] 0.7346278
## 
## $mean_per_class_error
## [1] 0.2307443
## 
## $domain
## [1] "No"  "Yes"
## 
## $cm
## $cm$`__meta`
## $cm$`__meta`$schema_version
## [1] 3
## 
## $cm$`__meta`$schema_name
## [1] "ConfusionMatrixV3"
## 
## $cm$`__meta`$schema_type
## [1] "ConfusionMatrix"
## 
## 
## $cm$table
## Confusion Matrix: Row labels: Actual class; Column labels: Predicted class
##         No Yes  Error       Rate
## No     290  19 0.0615 = 19 / 309
## Yes     24  36 0.4000 =  24 / 60
## Totals 314  55 0.1165 = 43 / 369
## 
## 
## $thresholds_and_metric_scores
## Metrics for Thresholds: Binomial metrics as a function of classification thresholds
##   threshold       f1       f2 f0point5 accuracy precision   recall specificity
## 1  0.978409 0.032787 0.020747 0.078125 0.840108  1.000000 0.016667    1.000000
## 2  0.962851 0.064516 0.041322 0.147059 0.842818  1.000000 0.033333    1.000000
## 3  0.931140 0.095238 0.061728 0.208333 0.845528  1.000000 0.050000    1.000000
## 4  0.851139 0.125000 0.081967 0.263158 0.848238  1.000000 0.066667    1.000000
## 5  0.850000 0.153846 0.102041 0.312500 0.850949  1.000000 0.083333    1.000000
##   absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns fns fps tps
## 1     0.118299               0.016667                0.508333 309  59   0   1
## 2     0.167527               0.033333                0.516667 309  58   0   2
## 3     0.205458               0.050000                0.525000 309  57   0   3
## 4     0.237568               0.066667                0.533333 309  56   0   4
## 5     0.265973               0.083333                0.541667 309  55   0   5
##        tnr      fnr      fpr      tpr idx
## 1 1.000000 0.983333 0.000000 0.016667   0
## 2 1.000000 0.966667 0.000000 0.033333   1
## 3 1.000000 0.950000 0.000000 0.050000   2
## 4 1.000000 0.933333 0.000000 0.066667   3
## 5 1.000000 0.916667 0.000000 0.083333   4
## 
## ---
##     threshold       f1       f2 f0point5 accuracy precision   recall
## 364  0.000020 0.283019 0.496689 0.197889 0.176152  0.164835 1.000000
## 365  0.000014 0.282353 0.495868 0.197368 0.173442  0.164384 1.000000
## 366  0.000012 0.281690 0.495050 0.196850 0.170732  0.163934 1.000000
## 367  0.000007 0.281030 0.494234 0.196335 0.168022  0.163488 1.000000
## 368  0.000003 0.280374 0.493421 0.195822 0.165312  0.163043 1.000000
## 369  0.000001 0.279720 0.492611 0.195313 0.162602  0.162602 1.000000
##     specificity absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns
## 364    0.016181     0.051645               0.016181                0.508091   5
## 365    0.012945     0.046130               0.012945                0.506472   4
## 366    0.009709     0.039895               0.009709                0.504854   3
## 367    0.006472     0.032530               0.006472                0.503236   2
## 368    0.003236     0.022971               0.003236                0.501618   1
## 369    0.000000     0.000000               0.000000                0.500000   0
##     fns fps tps      tnr      fnr      fpr      tpr idx
## 364   0 304  60 0.016181 0.000000 0.983819 1.000000 363
## 365   0 305  60 0.012945 0.000000 0.987055 1.000000 364
## 366   0 306  60 0.009709 0.000000 0.990291 1.000000 365
## 367   0 307  60 0.006472 0.000000 0.993528 1.000000 366
## 368   0 308  60 0.003236 0.000000 0.996764 1.000000 367
## 369   0 309  60 0.000000 0.000000 1.000000 1.000000 368
## 
## $max_criteria_and_metric_scores
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.124479   0.626087  54
## 2                       max f2  0.014039   0.669975 162
## 3                 max f0point5  0.344917   0.718750  24
## 4                 max accuracy  0.344917   0.894309  24
## 5                max precision  0.978409   1.000000   0
## 6                   max recall  0.000335   1.000000 328
## 7              max specificity  0.978409   1.000000   0
## 8             max absolute_mcc  0.228765   0.565158  38
## 9   max min_per_class_accuracy  0.025684   0.766667 117
## 10 max mean_per_class_accuracy  0.053030   0.786893  80
## 11                     max tns  0.978409 309.000000   0
## 12                     max fns  0.978409  59.000000   0
## 13                     max fps  0.000001 309.000000 368
## 14                     max tps  0.000335  60.000000 328
## 15                     max tnr  0.978409   1.000000   0
## 16                     max fnr  0.978409   0.983333   0
## 17                     max fpr  0.000001   1.000000 368
## 18                     max tpr  0.000335   1.000000 328
## 
## $gains_lift_table
## Gains/Lift Table: Avg response rate: 16.26 %, avg score:  7.32 %
##    group cumulative_data_fraction lower_threshold     lift cumulative_lift
## 1      1               0.01084011        0.850364 6.150000        6.150000
## 2      2               0.02168022        0.767188 6.150000        6.150000
## 3      3               0.03252033        0.652627 4.612500        5.637500
## 4      4               0.04065041        0.488102 6.150000        5.740000
## 5      5               0.05149051        0.444812 6.150000        5.826316
## 6      6               0.10027100        0.242952 3.416667        4.654054
## 7      7               0.15176152        0.123230 2.589474        3.953571
## 8      8               0.20054201        0.069858 1.366667        3.324324
## 9      9               0.30081301        0.029268 0.831081        2.493243
## 10    10               0.40108401        0.017165 0.997297        2.119257
## 11    11               0.50135501        0.009675 0.498649        1.795135
## 12    12               0.59891599        0.005170 0.512500        1.586199
## 13    13               0.69918699        0.002752 0.166216        1.382558
## 14    14               0.79945799        0.001224 0.000000        1.209153
## 15    15               0.89972900        0.000300 0.332432        1.111446
## 16    16               1.00000000        0.000001 0.000000        1.000000
##    response_rate    score cumulative_response_rate cumulative_score
## 1       1.000000 0.930885                 1.000000         0.930885
## 2       1.000000 0.808199                 1.000000         0.869542
## 3       0.750000 0.692175                 0.916667         0.810420
## 4       1.000000 0.540434                 0.933333         0.756422
## 5       1.000000 0.463258                 0.947368         0.694704
## 6       0.555556 0.311268                 0.756757         0.508167
## 7       0.421053 0.159296                 0.642857         0.389800
## 8       0.222222 0.097595                 0.540541         0.318723
## 9       0.135135 0.044030                 0.405405         0.227159
## 10      0.162162 0.022159                 0.344595         0.175909
## 11      0.081081 0.013239                 0.291892         0.143375
## 12      0.083333 0.006818                 0.257919         0.121130
## 13      0.027027 0.003879                 0.224806         0.104315
## 14      0.000000 0.001812                 0.196610         0.091459
## 15      0.054054 0.000683                 0.180723         0.081342
## 16      0.000000 0.000132                 0.162602         0.073199
##    capture_rate cumulative_capture_rate        gain cumulative_gain
## 1      0.066667                0.066667  515.000000      515.000000
## 2      0.066667                0.133333  515.000000      515.000000
## 3      0.050000                0.183333  361.250000      463.750000
## 4      0.050000                0.233333  515.000000      474.000000
## 5      0.066667                0.300000  515.000000      482.631579
## 6      0.166667                0.466667  241.666667      365.405405
## 7      0.133333                0.600000  158.947368      295.357143
## 8      0.066667                0.666667   36.666667      232.432432
## 9      0.083333                0.750000  -16.891892      149.324324
## 10     0.100000                0.850000   -0.270270      111.925676
## 11     0.050000                0.900000  -50.135135       79.513514
## 12     0.050000                0.950000  -48.750000       58.619910
## 13     0.016667                0.966667  -83.378378       38.255814
## 14     0.000000                0.966667 -100.000000       20.915254
## 15     0.033333                1.000000  -66.756757       11.144578
## 16     0.000000                1.000000 -100.000000        0.000000
##    kolmogorov_smirnov
## 1            0.066667
## 2            0.133333
## 3            0.180097
## 4            0.230097
## 5            0.296764
## 6            0.437540
## 7            0.535275
## 8            0.556634
## 9            0.536408
## 10           0.536084
## 11           0.476052
## 12           0.419256
## 13           0.319417
## 14           0.199676
## 15           0.119741
## 16           0.000000
h2o.auc(performance_h2o)
## [1] 0.8673139
h2o.confusionMatrix(performance_h2o)
## Confusion Matrix (vertical: actual; across: predicted)  for max f1 @ threshold = 0.12447934114241:
##         No Yes    Error     Rate
## No     290  19 0.061489  =19/309
## Yes     24  36 0.400000   =24/60
## Totals 314  55 0.116531  =43/369
h2o.metric(performance_h2o) %>% as_tibble() %>% filter(threshold %>% between (0.41, 0.42))
## # A tibble: 2 × 20
##   threshold    f1    f2 f0point5 accuracy precision recall specificity
##       <dbl> <dbl> <dbl>    <dbl>    <dbl>     <dbl>  <dbl>       <dbl>
## 1     0.420 0.512 0.401    0.709    0.892     0.955   0.35       0.997
## 2     0.419 0.506 0.399    0.691    0.889     0.913   0.35       0.994
## # ℹ 12 more variables: absolute_mcc <dbl>, min_per_class_accuracy <dbl>,
## #   mean_per_class_accuracy <dbl>, tns <dbl>, fns <dbl>, fps <dbl>, tps <dbl>,
## #   tnr <dbl>, fnr <dbl>, fpr <dbl>, tpr <dbl>, idx <int>