Goal is to automate building and tuning a classification model to predict employee attrition, using the h2o::h2o.automl.
Import the cleaned data from Module 7.
library(h2o)
## Warning: package 'h2o' was built under R version 4.2.3
##
## ----------------------------------------------------------------------
##
## Your next step is to start H2O:
## > h2o.init()
##
## For H2O package documentation, ask for help:
## > ??h2o
##
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
##
## ----------------------------------------------------------------------
##
## Attaching package: 'h2o'
## The following objects are masked from 'package:stats':
##
## cor, sd, var
## The following objects are masked from 'package:base':
##
## %*%, %in%, &&, ||, apply, as.factor, as.numeric, colnames,
## colnames<-, ifelse, is.character, is.factor, is.numeric, log,
## log10, log1p, log2, round, signif, trunc
library(tidyverse)
## Warning: package 'tidyverse' was built under R version 4.2.3
## Warning: package 'ggplot2' was built under R version 4.2.3
## Warning: package 'tibble' was built under R version 4.2.3
## Warning: package 'tidyr' was built under R version 4.2.3
## Warning: package 'readr' was built under R version 4.2.3
## Warning: package 'purrr' was built under R version 4.2.3
## Warning: package 'dplyr' was built under R version 4.2.3
## Warning: package 'stringr' was built under R version 4.2.3
## Warning: package 'forcats' was built under R version 4.2.3
## Warning: package 'lubridate' was built under R version 4.2.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.4.4 ✔ tibble 3.2.1
## ✔ lubridate 1.9.2 ✔ tidyr 1.3.1
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::day() masks h2o::day()
## ✖ dplyr::filter() masks stats::filter()
## ✖ lubridate::hour() masks h2o::hour()
## ✖ dplyr::lag() masks stats::lag()
## ✖ lubridate::month() masks h2o::month()
## ✖ lubridate::week() masks h2o::week()
## ✖ lubridate::year() masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## Warning: package 'tidymodels' was built under R version 4.2.3
## ── Attaching packages ────────────────────────────────────── tidymodels 1.1.1 ──
## ✔ broom 1.0.5 ✔ rsample 1.2.0
## ✔ dials 1.2.0 ✔ tune 1.1.2
## ✔ infer 1.0.6 ✔ workflows 1.1.3
## ✔ modeldata 1.3.0 ✔ workflowsets 1.0.1
## ✔ parsnip 1.1.1 ✔ yardstick 1.3.0
## ✔ recipes 1.0.9
## Warning: package 'broom' was built under R version 4.2.3
## Warning: package 'dials' was built under R version 4.2.3
## Warning: package 'infer' was built under R version 4.2.3
## Warning: package 'modeldata' was built under R version 4.2.3
## Warning: package 'parsnip' was built under R version 4.2.3
## Warning: package 'recipes' was built under R version 4.2.3
## Warning: package 'rsample' was built under R version 4.2.3
## Warning: package 'tune' was built under R version 4.2.3
## Warning: package 'workflows' was built under R version 4.2.3
## Warning: package 'workflowsets' was built under R version 4.2.3
## Warning: package 'yardstick' was built under R version 4.2.3
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter() masks stats::filter()
## ✖ recipes::fixed() masks stringr::fixed()
## ✖ dplyr::lag() masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step() masks stats::step()
## • Dig deeper into tidy modeling with R at https://www.tmwr.org
library(tidyquant)
## Loading required package: PerformanceAnalytics
## Loading required package: xts
## Loading required package: zoo
##
## Attaching package: 'zoo'
##
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
##
##
## Attaching package: 'xts'
##
## The following objects are masked from 'package:dplyr':
##
## first, last
##
##
## Attaching package: 'PerformanceAnalytics'
##
## The following object is masked from 'package:graphics':
##
## legend
##
## Loading required package: quantmod
## Loading required package: TTR
##
## Attaching package: 'TTR'
##
## The following object is masked from 'package:dials':
##
## momentum
##
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
members <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-22/members.csv')
## Rows: 76519 Columns: 21
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (10): expedition_id, member_id, peak_id, peak_name, season, sex, citizen...
## dbl (5): year, age, highpoint_metres, death_height_metres, injury_height_me...
## lgl (6): hired, success, solo, oxygen_used, died, injured
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
set.seed(1234)
data_split <- initial_split(members, strata = "died")
train_tbl <- training(data_split)
test_tbl <- testing(data_split)
recipe_obj <- recipe(died ~ ., data = train_tbl) %>%
# Remove zero variance variables
step_zv(all_predictors())
# Initialize h2o
h2o.init()
## Connection successful!
##
## R is connected to the H2O cluster:
## H2O cluster uptime: 2 minutes 18 seconds
## H2O cluster timezone: America/New_York
## H2O data parsing timezone: UTC
## H2O cluster version: 3.44.0.3
## H2O cluster version age: 4 months and 5 days
## H2O cluster name: H2O_started_from_R_aesim_zyd177
## H2O cluster total nodes: 1
## H2O cluster total memory: 3.84 GB
## H2O cluster total cores: 8
## H2O cluster allowed cores: 8
## H2O cluster healthy: TRUE
## H2O Connection ip: localhost
## H2O Connection port: 54321
## H2O Connection proxy: NA
## H2O Internal Security: FALSE
## R Version: R version 4.2.2 (2022-10-31 ucrt)
## Warning in h2o.clusterInfo():
## Your H2O cluster version is (4 months and 5 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
split.h2o <- h2o.splitFrame(as.h2o(train_tbl), ratios = c(0.85), seed = 2567)
##
|
| | 0%
|
|======================================================================| 100%
train_h2o <- split.h2o[[1]]
valid_h2o <- split.h2o[[2]]
test_h2o <- as.h2o(test_tbl)
##
|
| | 0%
|
|======================================================================| 100%
y <- "died"
x <- setdiff(names(train_tbl), y)
auto_ml_models_h2o <- h2o.automl(
x = x,
y = y,
training_frame = train_h2o,
validation_frame = valid_h2o,
leaderboard_frame = test_h2o,
max_runtime_secs = 30,
nfolds = 5,
seed = 3456)
##
|
| | 0%
|
|==== | 5%
## 19:47:17.855: User specified a validation frame with cross-validation still enabled. Please note that the models will still be validated using cross-validation only, the validation frame will be used to provide purely informative validation metrics on the trained models.
## 19:47:17.863: AutoML: XGBoost is not available; skipping it.
## 19:47:17.874: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|=========== | 16%
|
|================== | 26%
## 19:47:25.916: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|========================= | 36%
|
|================================ | 46%
|
|====================================== | 55%
|
|============================================= | 64%
## 19:47:36.444: _train param, Dropping unused columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|======================================================== | 80%
|
|============================================================== | 89%
## 19:47:43.629: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
## 19:47:44.595: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
## 19:47:45.680: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|===================================================================== | 98%
## 19:47:46.549: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
## 19:47:47.255: _train param, Dropping unused columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|======================================================================| 100%
auto_ml_models_h2o@leaderboard
## model_id auc logloss
## 1 StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717 0.9948424 0.021773852
## 2 GBM_1_AutoML_2_20240425_194717 0.9948087 0.002921935
## 3 StackedEnsemble_BestOfFamily_1_AutoML_2_20240425_194717 0.9947989 0.003008795
## 4 DRF_1_AutoML_2_20240425_194717 0.9946281 0.004457692
## 5 GBM_3_AutoML_2_20240425_194717 0.9918184 0.013381651
## 6 GBM_2_AutoML_2_20240425_194717 0.9896567 0.011187667
## aucpr mean_per_class_error rmse mse
## 1 0.9858480 0.016666667 0.06556630 0.0042989393
## 2 0.9856255 0.009312282 0.02091913 0.0004376100
## 3 0.9856224 0.009312282 0.02104494 0.0004428894
## 4 0.9833868 0.016666667 0.02696857 0.0007273040
## 5 0.9790450 0.016666667 0.02458285 0.0006043165
## 6 0.9822232 0.016666667 0.02336205 0.0005457855
##
## [8 rows x 7 columns]
auto_ml_models_h2o@leader
## Model Details:
## ==============
##
## H2OBinomialModel: stackedensemble
## Model ID: StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717
## Model Summary for Stacked Ensemble:
## key value
## 1 Stacking strategy cross_validation
## 2 Number of base models (used / total) 2/3
## 3 # GBM base models (used / total) 1/1
## 4 # DRF base models (used / total) 1/1
## 5 # GLM base models (used / total) 0/1
## 6 Metalearner algorithm GLM
## 7 Metalearner fold assignment scheme Random
## 8 Metalearner nfolds 5
## 9 Metalearner fold_column NA
## 10 Custom metalearner hyperparameters None
##
##
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
##
## MSE: 0.00407101
## RMSE: 0.06380447
## LogLoss: 0.02131811
## Mean Per-Class Error: 0.01868477
## AUC: 0.9992293
## AUCPR: 0.987983
## Gini: 0.9984587
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 9759 1 0.000102 =1/9760
## TRUE 6 155 0.037267 =6/161
## Totals 9765 156 0.000706 =7/9921
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.025260 0.977918 111
## 2 max f2 0.021330 0.971357 114
## 3 max f0point5 0.042401 0.988296 108
## 4 max accuracy 0.025260 0.999294 111
## 5 max precision 0.723764 1.000000 0
## 6 max recall 0.006293 1.000000 206
## 7 max specificity 0.723764 1.000000 0
## 8 max absolute_mcc 0.025260 0.977685 111
## 9 max min_per_class_accuracy 0.012572 0.987578 133
## 10 max mean_per_class_accuracy 0.012572 0.991688 133
## 11 max tns 0.723764 9760.000000 0
## 12 max fns 0.723764 160.000000 0
## 13 max fps 0.005408 9760.000000 399
## 14 max tps 0.006293 161.000000 206
## 15 max tnr 0.723764 1.000000 0
## 16 max fnr 0.723764 0.993789 0
## 17 max fpr 0.005408 1.000000 399
## 18 max tpr 0.006293 1.000000 206
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
##
## MSE: 0.003973735
## RMSE: 0.06303757
## LogLoss: 0.02098925
## Mean Per-Class Error: 0.0141844
## AUC: 0.9941931
## AUCPR: 0.9797678
## Gini: 0.9883863
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 8536 0 0.000000 =0/8536
## TRUE 4 137 0.028369 =4/141
## Totals 8540 137 0.000461 =4/8677
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.037257 0.985612 101
## 2 max f2 0.037257 0.977175 101
## 3 max f0point5 0.037257 0.994194 101
## 4 max accuracy 0.037257 0.999539 101
## 5 max precision 0.724954 1.000000 0
## 6 max recall 0.005802 1.000000 307
## 7 max specificity 0.724954 1.000000 0
## 8 max absolute_mcc 0.037257 0.985483 101
## 9 max min_per_class_accuracy 0.012572 0.978723 123
## 10 max mean_per_class_accuracy 0.012572 0.987312 123
## 11 max tns 0.724954 8536.000000 0
## 12 max fns 0.724954 140.000000 0
## 13 max fps 0.005405 8536.000000 399
## 14 max tps 0.005802 141.000000 307
## 15 max tnr 0.724954 1.000000 0
## 16 max fnr 0.724954 0.992908 0
## 17 max fpr 0.005405 1.000000 399
## 18 max tpr 0.005802 1.000000 307
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
##
## MSE: 0.002666812
## RMSE: 0.05164119
## LogLoss: 0.01968123
## Mean Per-Class Error: 0.05755396
## AUC: 0.9870595
## AUCPR: 0.9328614
## Gini: 0.974119
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 48017 0 0.000000 =0/48017
## TRUE 80 615 0.115108 =80/695
## Totals 48097 615 0.001642 =80/48712
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.303636 0.938931 80
## 2 max f2 0.303636 0.905744 80
## 3 max f0point5 0.303636 0.974643 80
## 4 max accuracy 0.303636 0.998358 80
## 5 max precision 0.996247 1.000000 0
## 6 max recall 0.005847 1.000000 399
## 7 max specificity 0.996247 1.000000 0
## 8 max absolute_mcc 0.303636 0.939904 80
## 9 max min_per_class_accuracy 0.022833 0.969386 227
## 10 max mean_per_class_accuracy 0.030719 0.973936 193
## 11 max tns 0.996247 48017.000000 0
## 12 max fns 0.996247 691.000000 0
## 13 max fps 0.005847 48017.000000 399
## 14 max tps 0.005847 695.000000 399
## 15 max tnr 0.996247 1.000000 0
## 16 max fnr 0.996247 0.994245 0
## 17 max fpr 0.005847 1.000000 399
## 18 max tpr 0.005847 1.000000 399
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary:
## mean sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy 0.999486 0.000128 0.999386 0.999483 0.999695 0.999380
## auc 0.988085 0.003713 0.984202 0.991024 0.991949 0.989099
## err 0.000514 0.000128 0.000614 0.000517 0.000305 0.000620
## err_count 5.000000 1.224745 6.000000 5.000000 3.000000 6.000000
## f0point5 0.992533 0.001967 0.990566 0.991525 0.995671 0.991848
## cv_5_valid
## accuracy 0.999487
## auc 0.984153
## err 0.000513
## err_count 5.000000
## f0point5 0.993056
##
## ---
## mean sd cv_1_valid cv_2_valid cv_3_valid
## precision 1.000000 0.000000 1.000000 1.000000 1.000000
## r2 0.693159 0.020661 0.673253 0.672139 0.692413
## recall 0.963806 0.009325 0.954545 0.959016 0.978723
## residual_deviance 441.750460 9.672743 450.239170 426.167880 445.265870
## rmse 0.065536 0.000992 0.065995 0.063879 0.065915
## specificity 1.000000 0.000000 1.000000 1.000000 1.000000
## cv_4_valid cv_5_valid
## precision 1.000000 1.000000
## r2 0.714485 0.713506
## recall 0.960526 0.966216
## residual_deviance 448.045930 439.033500
## rmse 0.066446 0.065443
## specificity 1.000000 1.000000
best_model <- auto_ml_models_h2o@leader
best_model
## Model Details:
## ==============
##
## H2OBinomialModel: stackedensemble
## Model ID: StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717
## Model Summary for Stacked Ensemble:
## key value
## 1 Stacking strategy cross_validation
## 2 Number of base models (used / total) 2/3
## 3 # GBM base models (used / total) 1/1
## 4 # DRF base models (used / total) 1/1
## 5 # GLM base models (used / total) 0/1
## 6 Metalearner algorithm GLM
## 7 Metalearner fold assignment scheme Random
## 8 Metalearner nfolds 5
## 9 Metalearner fold_column NA
## 10 Custom metalearner hyperparameters None
##
##
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
##
## MSE: 0.00407101
## RMSE: 0.06380447
## LogLoss: 0.02131811
## Mean Per-Class Error: 0.01868477
## AUC: 0.9992293
## AUCPR: 0.987983
## Gini: 0.9984587
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 9759 1 0.000102 =1/9760
## TRUE 6 155 0.037267 =6/161
## Totals 9765 156 0.000706 =7/9921
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.025260 0.977918 111
## 2 max f2 0.021330 0.971357 114
## 3 max f0point5 0.042401 0.988296 108
## 4 max accuracy 0.025260 0.999294 111
## 5 max precision 0.723764 1.000000 0
## 6 max recall 0.006293 1.000000 206
## 7 max specificity 0.723764 1.000000 0
## 8 max absolute_mcc 0.025260 0.977685 111
## 9 max min_per_class_accuracy 0.012572 0.987578 133
## 10 max mean_per_class_accuracy 0.012572 0.991688 133
## 11 max tns 0.723764 9760.000000 0
## 12 max fns 0.723764 160.000000 0
## 13 max fps 0.005408 9760.000000 399
## 14 max tps 0.006293 161.000000 206
## 15 max tnr 0.723764 1.000000 0
## 16 max fnr 0.723764 0.993789 0
## 17 max fpr 0.005408 1.000000 399
## 18 max tpr 0.006293 1.000000 206
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
##
## MSE: 0.003973735
## RMSE: 0.06303757
## LogLoss: 0.02098925
## Mean Per-Class Error: 0.0141844
## AUC: 0.9941931
## AUCPR: 0.9797678
## Gini: 0.9883863
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 8536 0 0.000000 =0/8536
## TRUE 4 137 0.028369 =4/141
## Totals 8540 137 0.000461 =4/8677
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.037257 0.985612 101
## 2 max f2 0.037257 0.977175 101
## 3 max f0point5 0.037257 0.994194 101
## 4 max accuracy 0.037257 0.999539 101
## 5 max precision 0.724954 1.000000 0
## 6 max recall 0.005802 1.000000 307
## 7 max specificity 0.724954 1.000000 0
## 8 max absolute_mcc 0.037257 0.985483 101
## 9 max min_per_class_accuracy 0.012572 0.978723 123
## 10 max mean_per_class_accuracy 0.012572 0.987312 123
## 11 max tns 0.724954 8536.000000 0
## 12 max fns 0.724954 140.000000 0
## 13 max fps 0.005405 8536.000000 399
## 14 max tps 0.005802 141.000000 307
## 15 max tnr 0.724954 1.000000 0
## 16 max fnr 0.724954 0.992908 0
## 17 max fpr 0.005405 1.000000 399
## 18 max tpr 0.005802 1.000000 307
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
##
## MSE: 0.002666812
## RMSE: 0.05164119
## LogLoss: 0.01968123
## Mean Per-Class Error: 0.05755396
## AUC: 0.9870595
## AUCPR: 0.9328614
## Gini: 0.974119
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 48017 0 0.000000 =0/48017
## TRUE 80 615 0.115108 =80/695
## Totals 48097 615 0.001642 =80/48712
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.303636 0.938931 80
## 2 max f2 0.303636 0.905744 80
## 3 max f0point5 0.303636 0.974643 80
## 4 max accuracy 0.303636 0.998358 80
## 5 max precision 0.996247 1.000000 0
## 6 max recall 0.005847 1.000000 399
## 7 max specificity 0.996247 1.000000 0
## 8 max absolute_mcc 0.303636 0.939904 80
## 9 max min_per_class_accuracy 0.022833 0.969386 227
## 10 max mean_per_class_accuracy 0.030719 0.973936 193
## 11 max tns 0.996247 48017.000000 0
## 12 max fns 0.996247 691.000000 0
## 13 max fps 0.005847 48017.000000 399
## 14 max tps 0.005847 695.000000 399
## 15 max tnr 0.996247 1.000000 0
## 16 max fnr 0.996247 0.994245 0
## 17 max fpr 0.005847 1.000000 399
## 18 max tpr 0.005847 1.000000 399
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary:
## mean sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy 0.999486 0.000128 0.999386 0.999483 0.999695 0.999380
## auc 0.988085 0.003713 0.984202 0.991024 0.991949 0.989099
## err 0.000514 0.000128 0.000614 0.000517 0.000305 0.000620
## err_count 5.000000 1.224745 6.000000 5.000000 3.000000 6.000000
## f0point5 0.992533 0.001967 0.990566 0.991525 0.995671 0.991848
## cv_5_valid
## accuracy 0.999487
## auc 0.984153
## err 0.000513
## err_count 5.000000
## f0point5 0.993056
##
## ---
## mean sd cv_1_valid cv_2_valid cv_3_valid
## precision 1.000000 0.000000 1.000000 1.000000 1.000000
## r2 0.693159 0.020661 0.673253 0.672139 0.692413
## recall 0.963806 0.009325 0.954545 0.959016 0.978723
## residual_deviance 441.750460 9.672743 450.239170 426.167880 445.265870
## rmse 0.065536 0.000992 0.065995 0.063879 0.065915
## specificity 1.000000 0.000000 1.000000 1.000000 1.000000
## cv_4_valid cv_5_valid
## precision 1.000000 1.000000
## r2 0.714485 0.713506
## recall 0.960526 0.966216
## residual_deviance 448.045930 439.033500
## rmse 0.066446 0.065443
## specificity 1.000000 1.000000
Examine the output of h2o.automl
auto_ml_models_h2o %>% typeof()
## [1] "S4"
auto_ml_models_h2o %>% slotNames()
## [1] "project_name" "leader" "leaderboard" "event_log"
## [5] "modeling_steps" "training_info"
auto_ml_models_h2o@leaderboard
## model_id auc logloss
## 1 StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717 0.9948424 0.021773852
## 2 GBM_1_AutoML_2_20240425_194717 0.9948087 0.002921935
## 3 StackedEnsemble_BestOfFamily_1_AutoML_2_20240425_194717 0.9947989 0.003008795
## 4 DRF_1_AutoML_2_20240425_194717 0.9946281 0.004457692
## 5 GBM_3_AutoML_2_20240425_194717 0.9918184 0.013381651
## 6 GBM_2_AutoML_2_20240425_194717 0.9896567 0.011187667
## aucpr mean_per_class_error rmse mse
## 1 0.9858480 0.016666667 0.06556630 0.0042989393
## 2 0.9856255 0.009312282 0.02091913 0.0004376100
## 3 0.9856224 0.009312282 0.02104494 0.0004428894
## 4 0.9833868 0.016666667 0.02696857 0.0007273040
## 5 0.9790450 0.016666667 0.02458285 0.0006043165
## 6 0.9822232 0.016666667 0.02336205 0.0005457855
##
## [8 rows x 7 columns]
auto_ml_models_h2o@leader
## Model Details:
## ==============
##
## H2OBinomialModel: stackedensemble
## Model ID: StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717
## Model Summary for Stacked Ensemble:
## key value
## 1 Stacking strategy cross_validation
## 2 Number of base models (used / total) 2/3
## 3 # GBM base models (used / total) 1/1
## 4 # DRF base models (used / total) 1/1
## 5 # GLM base models (used / total) 0/1
## 6 Metalearner algorithm GLM
## 7 Metalearner fold assignment scheme Random
## 8 Metalearner nfolds 5
## 9 Metalearner fold_column NA
## 10 Custom metalearner hyperparameters None
##
##
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
##
## MSE: 0.00407101
## RMSE: 0.06380447
## LogLoss: 0.02131811
## Mean Per-Class Error: 0.01868477
## AUC: 0.9992293
## AUCPR: 0.987983
## Gini: 0.9984587
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 9759 1 0.000102 =1/9760
## TRUE 6 155 0.037267 =6/161
## Totals 9765 156 0.000706 =7/9921
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.025260 0.977918 111
## 2 max f2 0.021330 0.971357 114
## 3 max f0point5 0.042401 0.988296 108
## 4 max accuracy 0.025260 0.999294 111
## 5 max precision 0.723764 1.000000 0
## 6 max recall 0.006293 1.000000 206
## 7 max specificity 0.723764 1.000000 0
## 8 max absolute_mcc 0.025260 0.977685 111
## 9 max min_per_class_accuracy 0.012572 0.987578 133
## 10 max mean_per_class_accuracy 0.012572 0.991688 133
## 11 max tns 0.723764 9760.000000 0
## 12 max fns 0.723764 160.000000 0
## 13 max fps 0.005408 9760.000000 399
## 14 max tps 0.006293 161.000000 206
## 15 max tnr 0.723764 1.000000 0
## 16 max fnr 0.723764 0.993789 0
## 17 max fpr 0.005408 1.000000 399
## 18 max tpr 0.006293 1.000000 206
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
##
## MSE: 0.003973735
## RMSE: 0.06303757
## LogLoss: 0.02098925
## Mean Per-Class Error: 0.0141844
## AUC: 0.9941931
## AUCPR: 0.9797678
## Gini: 0.9883863
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 8536 0 0.000000 =0/8536
## TRUE 4 137 0.028369 =4/141
## Totals 8540 137 0.000461 =4/8677
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.037257 0.985612 101
## 2 max f2 0.037257 0.977175 101
## 3 max f0point5 0.037257 0.994194 101
## 4 max accuracy 0.037257 0.999539 101
## 5 max precision 0.724954 1.000000 0
## 6 max recall 0.005802 1.000000 307
## 7 max specificity 0.724954 1.000000 0
## 8 max absolute_mcc 0.037257 0.985483 101
## 9 max min_per_class_accuracy 0.012572 0.978723 123
## 10 max mean_per_class_accuracy 0.012572 0.987312 123
## 11 max tns 0.724954 8536.000000 0
## 12 max fns 0.724954 140.000000 0
## 13 max fps 0.005405 8536.000000 399
## 14 max tps 0.005802 141.000000 307
## 15 max tnr 0.724954 1.000000 0
## 16 max fnr 0.724954 0.992908 0
## 17 max fpr 0.005405 1.000000 399
## 18 max tpr 0.005802 1.000000 307
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
##
## MSE: 0.002666812
## RMSE: 0.05164119
## LogLoss: 0.01968123
## Mean Per-Class Error: 0.05755396
## AUC: 0.9870595
## AUCPR: 0.9328614
## Gini: 0.974119
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 48017 0 0.000000 =0/48017
## TRUE 80 615 0.115108 =80/695
## Totals 48097 615 0.001642 =80/48712
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.303636 0.938931 80
## 2 max f2 0.303636 0.905744 80
## 3 max f0point5 0.303636 0.974643 80
## 4 max accuracy 0.303636 0.998358 80
## 5 max precision 0.996247 1.000000 0
## 6 max recall 0.005847 1.000000 399
## 7 max specificity 0.996247 1.000000 0
## 8 max absolute_mcc 0.303636 0.939904 80
## 9 max min_per_class_accuracy 0.022833 0.969386 227
## 10 max mean_per_class_accuracy 0.030719 0.973936 193
## 11 max tns 0.996247 48017.000000 0
## 12 max fns 0.996247 691.000000 0
## 13 max fps 0.005847 48017.000000 399
## 14 max tps 0.005847 695.000000 399
## 15 max tnr 0.996247 1.000000 0
## 16 max fnr 0.996247 0.994245 0
## 17 max fpr 0.005847 1.000000 399
## 18 max tpr 0.005847 1.000000 399
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary:
## mean sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy 0.999486 0.000128 0.999386 0.999483 0.999695 0.999380
## auc 0.988085 0.003713 0.984202 0.991024 0.991949 0.989099
## err 0.000514 0.000128 0.000614 0.000517 0.000305 0.000620
## err_count 5.000000 1.224745 6.000000 5.000000 3.000000 6.000000
## f0point5 0.992533 0.001967 0.990566 0.991525 0.995671 0.991848
## cv_5_valid
## accuracy 0.999487
## auc 0.984153
## err 0.000513
## err_count 5.000000
## f0point5 0.993056
##
## ---
## mean sd cv_1_valid cv_2_valid cv_3_valid
## precision 1.000000 0.000000 1.000000 1.000000 1.000000
## r2 0.693159 0.020661 0.673253 0.672139 0.692413
## recall 0.963806 0.009325 0.954545 0.959016 0.978723
## residual_deviance 441.750460 9.672743 450.239170 426.167880 445.265870
## rmse 0.065536 0.000992 0.065995 0.063879 0.065915
## specificity 1.000000 0.000000 1.000000 1.000000 1.000000
## cv_4_valid cv_5_valid
## precision 1.000000 1.000000
## r2 0.714485 0.713506
## recall 0.960526 0.966216
## residual_deviance 448.045930 439.033500
## rmse 0.066446 0.065443
## specificity 1.000000 1.000000
# h2o.getModel("GBM_grid_1_AutoML_2_20240423_113107_model_1") %>%
# h2o.saveModel("h2o_models/")
predictions <- h2o.predict(best_model, newdata = test_h2o)
##
|
| | 0%
|
|======================================================================| 100%
predictions_tbl <- predictions %>%
as_tibble()
predictions_tbl %>%
bind_cols(test_tbl)
## # A tibble: 19,130 × 24
## predict FALSE. TRUE. expedition_id member_id peak_id peak_name year season
## <fct> <dbl> <dbl> <chr> <chr> <chr> <chr> <dbl> <chr>
## 1 FALSE 0.994 0.00585 AMAD78301 AMAD7830… AMAD Ama Dabl… 1978 Autumn
## 2 FALSE 0.994 0.00583 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 3 FALSE 0.994 0.00592 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 4 FALSE 0.994 0.00598 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 5 FALSE 0.994 0.00594 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 6 FALSE 0.994 0.00580 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 7 FALSE 0.994 0.00581 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 8 FALSE 0.994 0.00581 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 9 FALSE 0.994 0.00592 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 10 FALSE 0.994 0.00581 AMAD79301 AMAD7930… AMAD Ama Dabl… 1979 Autumn
## # ℹ 19,120 more rows
## # ℹ 15 more variables: sex <chr>, age <dbl>, citizenship <chr>,
## # expedition_role <chr>, hired <lgl>, highpoint_metres <dbl>, success <lgl>,
## # solo <lgl>, oxygen_used <lgl>, died <lgl>, death_cause <chr>,
## # death_height_metres <dbl>, injured <lgl>, injury_type <chr>,
## # injury_height_metres <dbl>
performance_h2o <- h2o.performance(best_model, newdata = test_h2o)
typeof(performance_h2o)
## [1] "S4"
slotNames(performance_h2o)
## [1] "algorithm" "on_train" "on_valid" "on_xval" "metrics"
performance_h2o@metrics
## $model
## $model$`__meta`
## $model$`__meta`$schema_version
## [1] 3
##
## $model$`__meta`$schema_name
## [1] "ModelKeyV3"
##
## $model$`__meta`$schema_type
## [1] "Key<Model>"
##
##
## $model$name
## [1] "StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717"
##
## $model$type
## [1] "Key<Model>"
##
## $model$URL
## [1] "/3/Models/StackedEnsemble_BestOfFamily_2_AutoML_2_20240425_194717"
##
##
## $model_checksum
## [1] "3516566267451993616"
##
## $frame
## $frame$name
## [1] "test_tbl_sid_9c64_3"
##
##
## $frame_checksum
## [1] "678340420273909232"
##
## $description
## NULL
##
## $scoring_time
## [1] 1.714089e+12
##
## $predictions
## NULL
##
## $MSE
## [1] 0.004298939
##
## $RMSE
## [1] 0.0655663
##
## $nobs
## [1] 19130
##
## $custom_metric_name
## NULL
##
## $custom_metric_value
## [1] 0
##
## $r2
## [1] 0.6910517
##
## $logloss
## [1] 0.02177385
##
## $AUC
## [1] 0.9948424
##
## $pr_auc
## [1] 0.985848
##
## $Gini
## [1] 0.9896848
##
## $mean_per_class_error
## [1] 0.01666667
##
## $domain
## [1] "FALSE" "TRUE"
##
## $cm
## $cm$`__meta`
## $cm$`__meta`$schema_version
## [1] 3
##
## $cm$`__meta`$schema_name
## [1] "ConfusionMatrixV3"
##
## $cm$`__meta`$schema_type
## [1] "ConfusionMatrix"
##
##
## $cm$table
## Confusion Matrix: Row labels: Actual class; Column labels: Predicted class
## FALSE TRUE Error Rate
## FALSE 18860 0 0.0000 = 0 / 18,860
## TRUE 9 261 0.0333 = 9 / 270
## Totals 18869 261 0.0005 = 9 / 19,130
##
##
## $thresholds_and_metric_scores
## Metrics for Thresholds: Binomial metrics as a function of classification thresholds
## threshold f1 f2 f0point5 accuracy precision recall specificity
## 1 0.726341 0.007380 0.004625 0.018248 0.985938 1.000000 0.003704 1.000000
## 2 0.723408 0.014706 0.009242 0.035971 0.985991 1.000000 0.007407 1.000000
## 3 0.722831 0.021978 0.013850 0.053191 0.986043 1.000000 0.011111 1.000000
## 4 0.722788 0.029197 0.018450 0.069930 0.986095 1.000000 0.014815 1.000000
## 5 0.722702 0.036364 0.023041 0.086207 0.986147 1.000000 0.018519 1.000000
## absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns fns fps tps
## 1 0.060429 0.003704 0.501852 18860 269 0 1
## 2 0.085461 0.007407 0.503704 18860 268 0 2
## 3 0.104671 0.011111 0.505556 18860 267 0 3
## 4 0.120867 0.014815 0.507407 18860 266 0 4
## 5 0.135137 0.018519 0.509259 18860 265 0 5
## tnr fnr fpr tpr idx
## 1 1.000000 0.996296 0.000000 0.003704 0
## 2 1.000000 0.992593 0.000000 0.007407 1
## 3 1.000000 0.988889 0.000000 0.011111 2
## 4 1.000000 0.985185 0.000000 0.014815 3
## 5 1.000000 0.981481 0.000000 0.018519 4
##
## ---
## threshold f1 f2 f0point5 accuracy precision recall
## 395 0.005416 0.030037 0.071854 0.018987 0.088447 0.015247 1.000000
## 396 0.005414 0.029591 0.070833 0.018702 0.074281 0.015018 1.000000
## 397 0.005412 0.029074 0.069649 0.018372 0.057344 0.014752 1.000000
## 398 0.005411 0.028475 0.068271 0.017989 0.036905 0.014443 1.000000
## 399 0.005409 0.028075 0.067352 0.017734 0.022791 0.014238 1.000000
## 400 0.005408 0.027835 0.066799 0.017580 0.014114 0.014114 1.000000
## specificity absolute_mcc min_per_class_accuracy mean_per_class_accuracy
## 395 0.075398 0.033906 0.075398 0.537699
## 396 0.061029 0.030274 0.061029 0.530514
## 397 0.043849 0.025433 0.043849 0.521925
## 398 0.023118 0.018273 0.023118 0.511559
## 399 0.008802 0.011194 0.008802 0.504401
## 400 0.000000 0.000000 0.000000 0.500000
## tns fns fps tps tnr fnr fpr tpr idx
## 395 1422 0 17438 270 0.075398 0.000000 0.924602 1.000000 394
## 396 1151 0 17709 270 0.061029 0.000000 0.938971 1.000000 395
## 397 827 0 18033 270 0.043849 0.000000 0.956151 1.000000 396
## 398 436 0 18424 270 0.023118 0.000000 0.976882 1.000000 397
## 399 166 0 18694 270 0.008802 0.000000 0.991198 1.000000 398
## 400 0 0 18860 270 0.000000 0.000000 1.000000 1.000000 399
##
## $max_criteria_and_metric_scores
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.037257 0.983051 132
## 2 max f2 0.037257 0.973154 132
## 3 max f0point5 0.037257 0.993151 132
## 4 max accuracy 0.037257 0.999530 132
## 5 max precision 0.726341 1.000000 0
## 6 max recall 0.005454 1.000000 377
## 7 max specificity 0.726341 1.000000 0
## 8 max absolute_mcc 0.037257 0.982958 132
## 9 max min_per_class_accuracy 0.012225 0.988889 174
## 10 max mean_per_class_accuracy 0.012225 0.990786 174
## 11 max tns 0.726341 18860.000000 0
## 12 max fns 0.726341 269.000000 0
## 13 max fps 0.005408 18860.000000 399
## 14 max tps 0.005454 270.000000 377
## 15 max tnr 0.726341 1.000000 0
## 16 max fnr 0.726341 0.996296 0
## 17 max fpr 0.005408 1.000000 399
## 18 max tpr 0.005454 1.000000 377
##
## $gains_lift_table
## Gains/Lift Table: Avg response rate: 1.41 %, avg score: 1.35 %
## group cumulative_data_fraction lower_threshold lift cumulative_lift
## 1 1 0.01003659 0.715180 70.851852 70.851852
## 2 2 0.02070047 0.012227 26.048475 47.771324
## 3 3 0.03005750 0.011976 0.000000 32.899903
## 4 4 0.04004182 0.011850 0.000000 24.696403
## 5 5 0.05002614 0.011772 0.000000 19.767445
## 6 6 0.10026137 0.006223 0.000000 9.863110
## 7 7 0.15033978 0.006082 0.000000 6.577693
## 8 8 0.20000000 0.006068 0.000000 4.944444
## 9 9 0.30005227 0.005941 0.037018 3.308066
## 10 10 0.40000000 0.005866 0.000000 2.481481
## 11 11 0.50000000 0.005801 0.000000 1.985185
## 12 12 0.60000000 0.005599 0.037037 1.660494
## 13 13 0.70000000 0.005448 0.037037 1.428571
## 14 14 0.80000000 0.005434 0.000000 1.250000
## 15 15 0.90000000 0.005421 0.000000 1.111111
## 16 16 1.00000000 0.005404 0.000000 1.000000
## response_rate score cumulative_response_rate cumulative_score
## 1 1.000000 0.717382 1.000000 0.717382
## 2 0.367647 0.047284 0.674242 0.372180
## 3 0.000000 0.012108 0.464348 0.260088
## 4 0.000000 0.011912 0.348564 0.198206
## 5 0.000000 0.011806 0.278997 0.161004
## 6 0.000000 0.007288 0.139208 0.083986
## 7 0.000000 0.006141 0.092837 0.058055
## 8 0.000000 0.006074 0.069786 0.045148
## 9 0.000522 0.005985 0.046690 0.032089
## 10 0.000000 0.005889 0.035024 0.025543
## 11 0.000000 0.005830 0.028019 0.021600
## 12 0.000523 0.005694 0.023436 0.018949
## 13 0.000523 0.005499 0.020163 0.017028
## 14 0.000000 0.005439 0.017642 0.015579
## 15 0.000000 0.005430 0.015682 0.014451
## 16 0.000000 0.005413 0.014114 0.013548
## capture_rate cumulative_capture_rate gain cumulative_gain
## 1 0.711111 0.711111 6985.185185 6985.185185
## 2 0.277778 0.988889 2504.847495 4677.132435
## 3 0.000000 0.988889 -100.000000 3189.990338
## 4 0.000000 0.988889 -100.000000 2369.640267
## 5 0.000000 0.988889 -100.000000 1876.744456
## 6 0.000000 0.988889 -100.000000 886.310972
## 7 0.000000 0.988889 -100.000000 557.769278
## 8 0.000000 0.988889 -100.000000 394.444444
## 9 0.003704 0.992593 -96.298231 230.806556
## 10 0.000000 0.992593 -100.000000 148.148148
## 11 0.000000 0.992593 -100.000000 98.518519
## 12 0.003704 0.996296 -96.296296 66.049383
## 13 0.003704 1.000000 -96.296296 42.857143
## 14 0.000000 1.000000 -100.000000 25.000000
## 15 0.000000 1.000000 -100.000000 11.111111
## 16 0.000000 1.000000 -100.000000 0.000000
## kolmogorov_smirnov
## 1 0.711111
## 2 0.982049
## 3 0.972558
## 4 0.962431
## 5 0.952304
## 6 0.901349
## 7 0.850554
## 8 0.800183
## 9 0.702455
## 10 0.601076
## 11 0.499645
## 12 0.401970
## 13 0.304295
## 14 0.202863
## 15 0.101432
## 16 0.000000
##
## $residual_deviance
## [1] 833.0676
##
## $null_deviance
## [1] 2836.923
##
## $AIC
## [1] 839.0676
##
## $loglikelihood
## [1] 0
##
## $null_degrees_of_freedom
## [1] 19129
##
## $residual_degrees_of_freedom
## [1] 19127
h2o.auc(performance_h2o)
## [1] 0.9948424
h2o.confusionMatrix(performance_h2o)
## Confusion Matrix (vertical: actual; across: predicted) for max f1 @ threshold = 0.037257435626137:
## FALSE TRUE Error Rate
## FALSE 18860 0 0.000000 =0/18860
## TRUE 9 261 0.033333 =9/270
## Totals 18869 261 0.000470 =9/19130
h2o.metric(performance_h2o) %>% as_tibble() %>% filter(threshold %>% between (0.41, 0.42))
## # A tibble: 0 × 20
## # ℹ 20 variables: threshold <dbl>, f1 <dbl>, f2 <dbl>, f0point5 <dbl>,
## # accuracy <dbl>, precision <dbl>, recall <dbl>, specificity <dbl>,
## # absolute_mcc <dbl>, min_per_class_accuracy <dbl>,
## # mean_per_class_accuracy <dbl>, tns <dbl>, fns <dbl>, fps <dbl>, tps <dbl>,
## # tnr <dbl>, fnr <dbl>, fpr <dbl>, tpr <dbl>, idx <int>