Goal is to automate building and tuning a classification model to predict whether the person died, using the h2o::h2o.automl.

Set up

Import data

Import the cleaned data from Module 7.

library(h2o)
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## Attaching package: 'h2o'
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## The following objects are masked from 'package:base':
## 
##     &&, %*%, %in%, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::day()   masks h2o::day()
## ✖ dplyr::filter()    masks stats::filter()
## ✖ lubridate::hour()  masks h2o::hour()
## ✖ dplyr::lag()       masks stats::lag()
## ✖ lubridate::month() masks h2o::month()
## ✖ lubridate::week()  masks h2o::week()
## ✖ lubridate::year()  masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.2.0 ──
## ✔ broom        1.0.8     ✔ rsample      1.2.1
## ✔ dials        1.3.0     ✔ tune         1.2.1
## ✔ infer        1.0.7     ✔ workflows    1.1.4
## ✔ modeldata    1.4.0     ✔ workflowsets 1.1.0
## ✔ parsnip      1.2.1     ✔ yardstick    1.3.2
## ✔ recipes      1.1.0     
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Learn how to get started at https://www.tidymodels.org/start/
library(tidyquant)
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo 
## ── Attaching core tidyquant packages ──────────────────────── tidyquant 1.0.9 ──
## ✔ PerformanceAnalytics 2.0.4      ✔ TTR                  0.24.4
## ✔ quantmod             0.4.26     ✔ xts                  0.14.0── Conflicts ────────────────────────────────────────── tidyquant_conflicts() ──
## ✖ zoo::as.Date()                 masks base::as.Date()
## ✖ zoo::as.Date.numeric()         masks base::as.Date.numeric()
## ✖ scales::col_factor()           masks readr::col_factor()
## ✖ lubridate::day()               masks h2o::day()
## ✖ scales::discard()              masks purrr::discard()
## ✖ dplyr::filter()                masks stats::filter()
## ✖ xts::first()                   masks dplyr::first()
## ✖ recipes::fixed()               masks stringr::fixed()
## ✖ lubridate::hour()              masks h2o::hour()
## ✖ dplyr::lag()                   masks stats::lag()
## ✖ xts::last()                    masks dplyr::last()
## ✖ PerformanceAnalytics::legend() masks graphics::legend()
## ✖ TTR::momentum()                masks dials::momentum()
## ✖ lubridate::month()             masks h2o::month()
## ✖ yardstick::spec()              masks readr::spec()
## ✖ quantmod::summary()            masks h2o::summary(), base::summary()
## ✖ lubridate::week()              masks h2o::week()
## ✖ lubridate::year()              masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
data <- read_csv("../00_data/data_wrangled/data_clean.csv") %>%
    
    # h2o requires all variables to be either numeric or factors
    mutate(across(where(is.character), factor))
## Rows: 72984 Columns: 15
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (8): expedition_id, member_id, peak_name, season, sex, citizenship, expe...
## dbl (2): year, age
## lgl (5): hired, success, solo, oxygen_used, injured
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.

Split data

set.seed(1234)

data_split <- initial_split(data, strata = "died")
train_tbl <- training(data_split)
test_tbl <- testing(data_split)

Recipes

recipe_obj <- recipe(died ~ ., data = train_tbl) %>%
    
    # Remove zero variance variables
    step_zv(all_predictors()) 

Model

# Initialize h2o
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         2 days 1 hours 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    1 year, 4 months and 4 days 
##     H2O cluster name:           H2O_started_from_R_alyssadalessio_fyb567 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   3.09 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.4.1 (2024-06-14)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (1 year, 4 months and 4 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
split.h2o <- h2o.splitFrame(as.h2o(train_tbl), ratios = c(0.85), seed = 2345)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
train_h2o <- split.h2o[[1]]
valid_h2o <- split.h2o[[2]]
test_h2o <- as.h2o(test_tbl)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
y <- "died"
x <- setdiff(names(train_tbl), y)

models_h2o <- h2o.automl(
    x = x,
    y = y, 
    training_frame = train_h2o, 
    validation_frame = valid_h2o,
    leaderboard_frame = test_h2o, 
    # max_runtime_secs = 30, 
    max_models = 10,
    exclude_algos = "DeepLearning",
    nfolds = 5, 
    seed = 3456
)
##   |                                                                              |                                                                      |   0%
## 20:10:38.750: User specified a validation frame with cross-validation still enabled. Please note that the models will still be validated using cross-validation only, the validation frame will be used to provide purely informative validation metrics on the trained models.  |                                                                              |==                                                                    |   3%  |                                                                              |===                                                                   |   4%  |                                                                              |===                                                                   |   5%  |                                                                              |====                                                                  |   5%  |                                                                              |====                                                                  |   6%  |                                                                              |=====                                                                 |   7%  |                                                                              |=====                                                                 |   8%  |                                                                              |=======                                                               |  11%  |                                                                              |========                                                              |  11%  |                                                                              |========                                                              |  12%  |                                                                              |=========                                                             |  12%  |                                                                              |=========                                                             |  13%  |                                                                              |==========                                                            |  14%  |                                                                              |==========                                                            |  15%  |                                                                              |===========                                                           |  15%  |                                                                              |===========                                                           |  16%  |                                                                              |============                                                          |  17%  |                                                                              |=============                                                         |  18%  |                                                                              |=============                                                         |  19%  |                                                                              |==============                                                        |  20%  |                                                                              |==============                                                        |  21%  |                                                                              |================                                                      |  23%  |                                                                              |=================                                                     |  24%  |                                                                              |=================                                                     |  25%  |                                                                              |==================                                                    |  25%  |                                                                              |====================                                                  |  28%  |                                                                              |======================                                                |  31%  |                                                                              |=======================                                               |  33%  |                                                                              |======================================================================| 100%

Examine the output of h2o.automl:

models_h2o %>% typeof()
## [1] "S4"
models_h2o %>% slotNames()
## [1] "project_name"   "leader"         "leaderboard"    "event_log"     
## [5] "modeling_steps" "training_info"
models_h2o@leaderboard
##                                                   model_id       auc    logloss
## 1    StackedEnsemble_AllModels_1_AutoML_13_20250424_201038 0.8183652 0.05878677
## 2 StackedEnsemble_BestOfFamily_1_AutoML_13_20250424_201038 0.8121654 0.05975624
## 3                          GBM_1_AutoML_13_20250424_201038 0.7909844 0.06178741
## 4                      XGBoost_2_AutoML_13_20250424_201038 0.7861913 0.06347880
## 5                      XGBoost_3_AutoML_13_20250424_201038 0.7829949 0.06396848
## 6                          GBM_3_AutoML_13_20250424_201038 0.7799224 0.06284255
##       aucpr mean_per_class_error      rmse        mse
## 1 0.9962210            0.4640556 0.1102959 0.01216518
## 2 0.9960518            0.4503056 0.1114531 0.01242179
## 3 0.9950684            0.4701111 0.1118329 0.01250660
## 4 0.9957521            0.4940278 0.1137129 0.01293061
## 5 0.9955785            0.4940000 0.1141582 0.01303210
## 6 0.9949200            0.4661945 0.1124708 0.01264968
## 
## [12 rows x 7 columns]
models_h2o@leader
## Model Details:
## ==============
## 
## H2OBinomialModel: stackedensemble
## Model ID:  StackedEnsemble_AllModels_1_AutoML_13_20250424_201038 
## Model Summary for Stacked Ensemble: 
##                                     key            value
## 1                     Stacking strategy cross_validation
## 2  Number of base models (used / total)             8/10
## 3      # GBM base models (used / total)              2/4
## 4  # XGBoost base models (used / total)              3/3
## 5      # GLM base models (used / total)              1/1
## 6      # DRF base models (used / total)              2/2
## 7                 Metalearner algorithm              GLM
## 8    Metalearner fold assignment scheme           Random
## 9                    Metalearner nfolds                5
## 10              Metalearner fold_column               NA
## 11   Custom metalearner hyperparameters             None
## 
## 
## H2OBinomialMetrics: stackedensemble
## ** Reported on training data. **
## 
## MSE:  0.008440459
## RMSE:  0.09187197
## LogLoss:  0.03437751
## Mean Per-Class Error:  0.2233938
## AUC:  0.9867074
## AUCPR:  0.99979
## Gini:  0.9734147
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        died   no    Error      Rate
## died     76   61 0.445255   =61/137
## no       15 9775 0.001532  =15/9790
## Totals   91 9836 0.007656  =76/9927
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.827024    0.996128 320
## 2                       max f2  0.755281    0.998103 339
## 3                 max f0point5  0.887288    0.995764 283
## 4                 max accuracy  0.834474    0.992344 317
## 5                max precision  0.999280    1.000000   0
## 6                   max recall  0.523702    1.000000 373
## 7              max specificity  0.999280    1.000000   0
## 8             max absolute_mcc  0.848209    0.686792 310
## 9   max min_per_class_accuracy  0.967345    0.948905 182
## 10 max mean_per_class_accuracy  0.963945    0.953053 189
## 11                     max tns  0.999280  137.000000   0
## 12                     max fns  0.999280 9785.000000   0
## 13                     max fps  0.119885  137.000000 399
## 14                     max tps  0.523702 9790.000000 373
## 15                     max tnr  0.999280    1.000000   0
## 16                     max fnr  0.999280    0.999489   0
## 17                     max fpr  0.119885    1.000000 399
## 18                     max tpr  0.523702    1.000000 373
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on validation data. **
## 
## MSE:  0.01152636
## RMSE:  0.1073609
## LogLoss:  0.05628264
## Mean Per-Class Error:  0.4713374
## AUC:  0.8080054
## AUCPR:  0.9965005
## Gini:  0.6160107
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        died   no    Error       Rate
## died      6   98 0.942308    =98/104
## no        3 8167 0.000367    =3/8170
## Totals    9 8265 0.012207  =101/8274
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold       value idx
## 1                       max f1  0.566644    0.993855 390
## 2                       max f2  0.353244    0.997485 398
## 3                 max f0point5  0.848952    0.990727 360
## 4                 max accuracy  0.634075    0.987793 388
## 5                max precision  0.999144    1.000000   0
## 6                   max recall  0.353244    1.000000 398
## 7              max specificity  0.999144    1.000000   0
## 8             max absolute_mcc  0.848952    0.227665 360
## 9   max min_per_class_accuracy  0.989357    0.719706 105
## 10 max mean_per_class_accuracy  0.989357    0.725238 105
## 11                     max tns  0.999144  104.000000   0
## 12                     max fns  0.999144 8165.000000   0
## 13                     max fps  0.190828  104.000000 399
## 14                     max tps  0.353244 8170.000000 398
## 15                     max tnr  0.999144    1.000000   0
## 16                     max fnr  0.999144    0.999388   0
## 17                     max fpr  0.190828    1.000000 399
## 18                     max tpr  0.353244    1.000000 398
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: stackedensemble
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
## 
## MSE:  0.01134589
## RMSE:  0.1065171
## LogLoss:  0.05642441
## Mean Per-Class Error:  0.4714787
## AUC:  0.7910615
## AUCPR:  0.995858
## Gini:  0.582123
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##        died    no    Error        Rate
## died     33   542 0.942609    =542/575
## no       16 45873 0.000349   =16/45889
## Totals   49 46415 0.012009  =558/46464
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold        value idx
## 1                       max f1  0.647224     0.993955 365
## 2                       max f2  0.432645     0.997517 386
## 3                 max f0point5  0.763507     0.990707 330
## 4                 max accuracy  0.665783     0.987991 360
## 5                max precision  0.999187     1.000000   0
## 6                   max recall  0.218575     1.000000 396
## 7              max specificity  0.999187     1.000000   0
## 8             max absolute_mcc  0.882565     0.217416 267
## 9   max min_per_class_accuracy  0.989828     0.709565  71
## 10 max mean_per_class_accuracy  0.989468     0.713642  73
## 11                     max tns  0.999187   575.000000   0
## 12                     max fns  0.999187 45794.000000   0
## 13                     max fps  0.051049   575.000000 399
## 14                     max tps  0.218575 45889.000000 396
## 15                     max tnr  0.999187     1.000000   0
## 16                     max fnr  0.999187     0.997930   0
## 17                     max fpr  0.051049     1.000000 399
## 18                     max tpr  0.218575     1.000000 396
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary: 
##                 mean       sd cv_1_valid cv_2_valid cv_3_valid cv_4_valid
## accuracy    0.988139 0.000573   0.989051   0.987867   0.988294   0.987558
## auc         0.791612 0.018429   0.787500   0.813290   0.795134   0.798913
## err         0.011861 0.000573   0.010949   0.012133   0.011706   0.012442
## err_count 110.200000 4.919350 102.000000 112.000000 110.000000 115.000000
## f0point5    0.990572 0.000461   0.991282   0.990268   0.990666   0.990076
##           cv_5_valid
## accuracy    0.987927
## auc         0.763224
## err         0.012073
## err_count 112.000000
## f0point5    0.990570
## 
## ---
##                          mean        sd cv_1_valid  cv_2_valid  cv_3_valid
## precision            0.988281  0.000579   0.989153    0.987864    0.988386
## r2                   0.070810  0.017777   0.064362    0.071709    0.085976
## recall               0.999848  0.000166   0.999891    1.000000    0.999892
## residual_deviance 1048.286900 49.902570 977.222500 1015.216900 1074.409700
## rmse                 0.106509  0.002702   0.102111    0.106407    0.107346
## specificity          0.052945  0.029402   0.038095    0.017544    0.091667
##                    cv_4_valid  cv_5_valid
## precision            0.987653    0.988347
## r2                   0.087743    0.044258
## recall               0.999890    0.999564
## residual_deviance 1094.074600 1080.510900
## rmse                 0.109445    0.107237
## specificity          0.073171    0.044248

Save and Load

?h2o.getModel
?h2o.saveModel
?h2o.loadModel

best_model <- models_h2o@leader

Make predictions

predictions <- h2o.predict(best_model, newdata = test_h2o)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
## Warning in doTryCatch(return(expr), name, parentenv, handler): Test/Validation
## dataset column 'expedition_id' has levels not trained on: ["AMAD00106",
## "AMAD02308", "AMAD03311", "AMAD04102", "AMAD04106", "AMAD04301", "AMAD04340",
## "AMAD05106", "AMAD05107", "AMAD05109", ...335 not listed..., "SPHN36101",
## "TANK15101", "TAWO09401", "TAWO74101", "TAWO87401", "THAM82301", "TILI86301",
## "TSAR13301", "TUKU16102", "YALU84101"]
## Warning in doTryCatch(return(expr), name, parentenv, handler): Test/Validation
## dataset column 'member_id' has levels not trained on: ["ACHN15301-01",
## "ACHN15301-04", "ACHN15302-01", "ACHN15302-03", "ACHN15302-04", "ACHN15302-09",
## "ACHN15302-10", "ACHN18301-01", "ACHN18301-02", "ACHN18301-08", ...18226 not
## listed..., "YALU89401-01", "YALU89401-03", "YALU91301-05", "YALU91301-06",
## "YANS03301-05", "YARA18301-03", "YAUP17101-03", "YAUP17101-04", "YAUP17101-06",
## "YAUP89301-01"]
## Warning in doTryCatch(return(expr), name, parentenv, handler): Test/Validation
## dataset column 'peak_name' has levels not trained on: ["Kabru Main", "Lhayul
## Peak", "Tankya I"]
## Warning in doTryCatch(return(expr), name, parentenv, handler): Test/Validation
## dataset column 'citizenship' has levels not trained on: ["Botswana",
## "Canada/Russia", "Egypt/UK", "Germany/Switzerland", "India/Nepal",
## "Macedonia/Australia", "N Korea", "Paraguay/Germany", "Poland/Canada",
## "Romania/USA", "Spain/Brazil", "Tanzania", "UK/Iceland", "USA/Israel",
## "USA/Jamaica"]
## Warning in doTryCatch(return(expr), name, parentenv, handler): Test/Validation
## dataset column 'expedition_role' has levels not trained on: ["2nd Sirdar", "ABC
## Cook", "Advance Team Leader", "BC Chef", "BC Manager (C1 only)", "BC Nurse",
## "BC Technician", "Base Camp Leader", "Cash Director", "Charge of publicity",
## ...59 not listed..., "President", "Press Correspondent", "Programme Director",
## "Scientific Coordinator", "Secretary/interpreter", "Survey Party Leader", "Team
## Mgr, Trainer", "Transport Manager", "Video Editor", "Wireless Operater"]
predictions_tbl <- predictions %>%
    as_tibble()

predictions_tbl %>% 
    bind_cols(test_tbl)
## New names:
## • `died` -> `died...2`
## • `died` -> `died...17`
## # A tibble: 18,246 × 18
##    predict died...2    no expedition_id member_id   peak_name  year season sex  
##    <fct>      <dbl> <dbl> <fct>         <fct>       <fct>     <dbl> <fct>  <fct>
##  1 no       0.0205  0.980 AMAD78301     AMAD78301-… Ama Dabl…  1978 Autumn M    
##  2 no       0.0119  0.988 AMAD78301     AMAD78301-… Ama Dabl…  1978 Autumn M    
##  3 no       0.00711 0.993 AMAD78301     AMAD78301-… Ama Dabl…  1978 Autumn M    
##  4 no       0.0115  0.988 AMAD78301     AMAD78301-… Ama Dabl…  1978 Autumn M    
##  5 no       0.00726 0.993 AMAD79101     AMAD79101-… Ama Dabl…  1979 Spring M    
##  6 no       0.00605 0.994 AMAD79101     AMAD79101-… Ama Dabl…  1979 Spring M    
##  7 no       0.00457 0.995 AMAD79101     AMAD79101-… Ama Dabl…  1979 Spring M    
##  8 no       0.00537 0.995 AMAD79101     AMAD79101-… Ama Dabl…  1979 Spring M    
##  9 no       0.00492 0.995 AMAD79101     AMAD79101-… Ama Dabl…  1979 Spring M    
## 10 no       0.00658 0.993 AMAD79301     AMAD79301-… Ama Dabl…  1979 Autumn F    
## # ℹ 18,236 more rows
## # ℹ 9 more variables: age <dbl>, citizenship <fct>, expedition_role <fct>,
## #   hired <lgl>, success <lgl>, solo <lgl>, oxygen_used <lgl>, died...17 <fct>,
## #   injured <lgl>

Evaluate model

?h2o.performance
performance_h2o <- h2o.performance(best_model, newdata = test_h2o)
typeof(performance_h2o)
## [1] "S4"
slotNames(performance_h2o)
## [1] "algorithm" "on_train"  "on_valid"  "on_xval"   "metrics"
performance_h2o@metrics
## $model
## $model$`__meta`
## $model$`__meta`$schema_version
## [1] 3
## 
## $model$`__meta`$schema_name
## [1] "ModelKeyV3"
## 
## $model$`__meta`$schema_type
## [1] "Key<Model>"
## 
## 
## $model$name
## [1] "StackedEnsemble_AllModels_1_AutoML_13_20250424_201038"
## 
## $model$type
## [1] "Key<Model>"
## 
## $model$URL
## [1] "/3/Models/StackedEnsemble_AllModels_1_AutoML_13_20250424_201038"
## 
## 
## $model_checksum
## [1] "6269474788221941022"
## 
## $frame
## $frame$name
## [1] "test_tbl_sid_9bac_3"
## 
## 
## $frame_checksum
## [1] "4100564097386666151"
## 
## $description
## NULL
## 
## $scoring_time
## [1] 1.74554e+12
## 
## $predictions
## NULL
## 
## $MSE
## [1] 0.01216518
## 
## $RMSE
## [1] 0.1102959
## 
## $nobs
## [1] 18246
## 
## $custom_metric_name
## NULL
## 
## $custom_metric_value
## [1] 0
## 
## $r2
## [1] 0.0998025
## 
## $logloss
## [1] 0.05878677
## 
## $AUC
## [1] 0.8183652
## 
## $pr_auc
## [1] 0.996221
## 
## $Gini
## [1] 0.6367304
## 
## $mean_per_class_error
## [1] 0.4640556
## 
## $domain
## [1] "died" "no"  
## 
## $cm
## $cm$`__meta`
## $cm$`__meta`$schema_version
## [1] 3
## 
## $cm$`__meta`$schema_name
## [1] "ConfusionMatrixV3"
## 
## $cm$`__meta`$schema_type
## [1] "ConfusionMatrix"
## 
## 
## $cm$table
## Confusion Matrix: Row labels: Actual class; Column labels: Predicted class
##        died    no  Error           Rate
## died     18   232 0.9280 =    232 / 250
## no        2 17994 0.0001 =   2 / 17,996
## Totals   20 18226 0.0128 = 234 / 18,246
## 
## 
## $thresholds_and_metric_scores
## Metrics for Thresholds: Binomial metrics as a function of classification thresholds
##   threshold       f1       f2 f0point5 accuracy precision   recall specificity
## 1  0.999102 0.001444 0.000903 0.003602 0.014414  1.000000 0.000722    1.000000
## 2  0.998890 0.004767 0.002985 0.011834 0.016058  1.000000 0.002389    1.000000
## 3  0.998669 0.012041 0.007560 0.029568 0.019676  1.000000 0.006057    1.000000
## 4  0.998553 0.016205 0.010190 0.039550 0.021758  1.000000 0.008168    1.000000
## 5  0.998452 0.028592 0.018064 0.068540 0.028006  1.000000 0.014503    1.000000
##   absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns   fns fps tps
## 1     0.003147               0.000722                0.500361 250 17983   0  13
## 2     0.005729               0.002389                0.501195 250 17953   0  43
## 3     0.009137               0.006057                0.503028 250 17887   0 109
## 4     0.010622               0.008168                0.504084 250 17849   0 147
## 5     0.014199               0.014503                0.507252 250 17735   0 261
##        tnr      fnr      fpr      tpr idx
## 1 1.000000 0.999278 0.000000 0.000722   0
## 2 1.000000 0.997611 0.000000 0.002389   1
## 3 1.000000 0.993943 0.000000 0.006057   2
## 4 1.000000 0.991832 0.000000 0.008168   3
## 5 1.000000 0.985497 0.000000 0.014503   4
## 
## ---
##     threshold       f1       f2 f0point5 accuracy precision   recall
## 395  0.372490 0.993239 0.997285 0.989226 0.986572  0.986569 1.000000
## 396  0.268320 0.993212 0.997274 0.989183 0.986518  0.986515 1.000000
## 397  0.253131 0.993184 0.997262 0.989139 0.986463  0.986461 1.000000
## 398  0.203811 0.993157 0.997251 0.989096 0.986408  0.986406 1.000000
## 399  0.180695 0.993129 0.997240 0.989052 0.986353  0.986352 1.000000
## 400  0.131746 0.993102 0.997229 0.989009 0.986298  0.986298 1.000000
##     specificity absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns
## 395    0.020000     0.140468               0.020000                0.510000   5
## 396    0.016000     0.125635               0.016000                0.508000   4
## 397    0.012000     0.108800               0.012000                0.506000   3
## 398    0.008000     0.088833               0.008000                0.504000   2
## 399    0.004000     0.062812               0.004000                0.502000   1
## 400    0.000000     0.000000               0.000000                0.500000   0
##     fns fps   tps      tnr      fnr      fpr      tpr idx
## 395   0 245 17996 0.020000 0.000000 0.980000 1.000000 394
## 396   0 246 17996 0.016000 0.000000 0.984000 1.000000 395
## 397   0 247 17996 0.012000 0.000000 0.988000 1.000000 396
## 398   0 248 17996 0.008000 0.000000 0.992000 1.000000 397
## 399   0 249 17996 0.004000 0.000000 0.996000 1.000000 398
## 400   0 250 17996 0.000000 0.000000 1.000000 1.000000 399
## 
## $max_criteria_and_metric_scores
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold        value idx
## 1                       max f1  0.637323     0.993540 379
## 2                       max f2  0.546045     0.997384 385
## 3                 max f0point5  0.869164     0.990212 319
## 4                 max accuracy  0.664179     0.987175 377
## 5                max precision  0.999102     1.000000   0
## 6                   max recall  0.546045     1.000000 385
## 7              max specificity  0.999102     1.000000   0
## 8             max absolute_mcc  0.828640     0.269258 341
## 9   max min_per_class_accuracy  0.988627     0.725939  93
## 10 max mean_per_class_accuracy  0.983206     0.736843 128
## 11                     max tns  0.999102   250.000000   0
## 12                     max fns  0.999102 17983.000000   0
## 13                     max fps  0.131746   250.000000 399
## 14                     max tps  0.546045 17996.000000 385
## 15                     max tnr  0.999102     1.000000   0
## 16                     max fnr  0.999102     0.999278   0
## 17                     max fpr  0.131746     1.000000 399
## 18                     max tpr  0.546045     1.000000 385
## 
## $gains_lift_table
## Gains/Lift Table: Avg response rate: 98.63 %, avg score: 98.77 %
##    group cumulative_data_fraction lower_threshold     lift cumulative_lift
## 1      1               0.01002960        0.998474 1.013892        1.013892
## 2      2               0.02000438        0.998279 1.013892        1.013892
## 3      3               0.03003398        0.998154 1.008352        1.012042
## 4      4               0.04006358        0.998036 1.008352        1.011118
## 5      5               0.05003836        0.997945 1.013892        1.011671
## 6      6               0.10002192        0.997517 1.012780        1.012225
## 7      7               0.15000548        0.997127 1.011669        1.012040
## 8      8               0.20004385        0.996732 1.012781        1.012225
## 9      9               0.30001096        0.995865 1.011669        1.012040
## 10    10               0.40003288        0.995008 1.009448        1.011392
## 11    11               0.50000000        0.993741 1.007777        1.010669
## 12    12               0.60002192        0.991958 1.005559        1.009817
## 13    13               0.69998904        0.989196 1.003886        1.008970
## 14    14               0.80001096        0.985096 1.001114        1.007988
## 15    15               0.89997808        0.976265 0.994993        1.006544
## 16    16               1.00000000        0.131746 0.941114        1.000000
##    response_rate    score cumulative_response_rate cumulative_score
## 1       1.000000 0.998685                 1.000000         0.998685
## 2       1.000000 0.998370                 1.000000         0.998528
## 3       0.994536 0.998214                 0.998175         0.998423
## 4       0.994536 0.998092                 0.997264         0.998340
## 5       1.000000 0.997987                 0.997809         0.998270
## 6       0.998904 0.997726                 0.998356         0.997998
## 7       0.997807 0.997320                 0.998173         0.997772
## 8       0.998905 0.996935                 0.998356         0.997563
## 9       0.997807 0.996297                 0.998173         0.997141
## 10      0.995616 0.995439                 0.997534         0.996716
## 11      0.993969 0.994402                 0.996821         0.996253
## 12      0.991781 0.992914                 0.995981         0.995696
## 13      0.990132 0.990637                 0.995146         0.994974
## 14      0.987397 0.987347                 0.994177         0.994020
## 15      0.981360 0.981405                 0.992753         0.992619
## 16      0.928219 0.943117                 0.986298         0.987668
##    capture_rate cumulative_capture_rate      gain cumulative_gain
## 1      0.010169                0.010169  1.389198        1.389198
## 2      0.010113                0.020282  1.389198        1.389198
## 3      0.010113                0.030396  0.835158        1.204181
## 4      0.010113                0.040509  0.835158        1.111799
## 5      0.010113                0.050622  1.389198        1.167096
## 6      0.050622                0.101245  1.278025        1.222530
## 7      0.050567                0.151812  1.166853        1.203978
## 8      0.050678                0.202489  1.278147        1.222530
## 9      0.101134                0.303623  1.166853        1.203978
## 10     0.100967                0.404590  0.944752        1.139163
## 11     0.100745                0.505335  0.777750        1.066904
## 12     0.100578                0.605912  0.555862        0.981715
## 13     0.100356                0.706268  0.388646        0.897017
## 14     0.100133                0.806401  0.111416        0.798797
## 15     0.099467                0.905868 -0.500733        0.654449
## 16     0.094132                1.000000 -5.888602        0.000000
##    kolmogorov_smirnov
## 1            0.010169
## 2            0.020282
## 3            0.026396
## 4            0.032509
## 5            0.042622
## 6            0.089245
## 7            0.131812
## 8            0.178489
## 9            0.263623
## 10           0.332590
## 11           0.389335
## 12           0.429912
## 13           0.458268
## 14           0.466401
## 15           0.429868
## 16           0.000000
## 
## $residual_deviance
## [1] 2145.247
## 
## $null_deviance
## [1] 2644.218
## 
## $AIC
## [1] 2163.247
## 
## $loglikelihood
## [1] 0
## 
## $null_degrees_of_freedom
## [1] 18245
## 
## $residual_degrees_of_freedom
## [1] 18237
h2o.auc(performance_h2o)
## [1] 0.8183652
h2o.confusionMatrix(performance_h2o)
## Confusion Matrix (vertical: actual; across: predicted)  for max f1 @ threshold = 0.637323151151124:
##        died    no    Error        Rate
## died     18   232 0.928000    =232/250
## no        2 17994 0.000111    =2/17996
## Totals   20 18226 0.012825  =234/18246
h2o.metric(performance_h2o)
## Metrics for Thresholds: Binomial metrics as a function of classification thresholds
##   threshold       f1       f2 f0point5 accuracy precision   recall specificity
## 1  0.999102 0.001444 0.000903 0.003602 0.014414  1.000000 0.000722    1.000000
## 2  0.998890 0.004767 0.002985 0.011834 0.016058  1.000000 0.002389    1.000000
## 3  0.998669 0.012041 0.007560 0.029568 0.019676  1.000000 0.006057    1.000000
## 4  0.998553 0.016205 0.010190 0.039550 0.021758  1.000000 0.008168    1.000000
## 5  0.998452 0.028592 0.018064 0.068540 0.028006  1.000000 0.014503    1.000000
##   absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns   fns fps tps
## 1     0.003147               0.000722                0.500361 250 17983   0  13
## 2     0.005729               0.002389                0.501195 250 17953   0  43
## 3     0.009137               0.006057                0.503028 250 17887   0 109
## 4     0.010622               0.008168                0.504084 250 17849   0 147
## 5     0.014199               0.014503                0.507252 250 17735   0 261
##        tnr      fnr      fpr      tpr idx
## 1 1.000000 0.999278 0.000000 0.000722   0
## 2 1.000000 0.997611 0.000000 0.002389   1
## 3 1.000000 0.993943 0.000000 0.006057   2
## 4 1.000000 0.991832 0.000000 0.008168   3
## 5 1.000000 0.985497 0.000000 0.014503   4
## 
## ---
##     threshold       f1       f2 f0point5 accuracy precision   recall
## 395  0.372490 0.993239 0.997285 0.989226 0.986572  0.986569 1.000000
## 396  0.268320 0.993212 0.997274 0.989183 0.986518  0.986515 1.000000
## 397  0.253131 0.993184 0.997262 0.989139 0.986463  0.986461 1.000000
## 398  0.203811 0.993157 0.997251 0.989096 0.986408  0.986406 1.000000
## 399  0.180695 0.993129 0.997240 0.989052 0.986353  0.986352 1.000000
## 400  0.131746 0.993102 0.997229 0.989009 0.986298  0.986298 1.000000
##     specificity absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns
## 395    0.020000     0.140468               0.020000                0.510000   5
## 396    0.016000     0.125635               0.016000                0.508000   4
## 397    0.012000     0.108800               0.012000                0.506000   3
## 398    0.008000     0.088833               0.008000                0.504000   2
## 399    0.004000     0.062812               0.004000                0.502000   1
## 400    0.000000     0.000000               0.000000                0.500000   0
##     fns fps   tps      tnr      fnr      fpr      tpr idx
## 395   0 245 17996 0.020000 0.000000 0.980000 1.000000 394
## 396   0 246 17996 0.016000 0.000000 0.984000 1.000000 395
## 397   0 247 17996 0.012000 0.000000 0.988000 1.000000 396
## 398   0 248 17996 0.008000 0.000000 0.992000 1.000000 397
## 399   0 249 17996 0.004000 0.000000 0.996000 1.000000 398
## 400   0 250 17996 0.000000 0.000000 1.000000 1.000000 399

Conclusion: This classification model with h2o produced a higher AUC of .8183652, whereas my old model had only produced an AUC of .747 at the highest. The overall accuracy of the predictions was also much higher in this model at around 98 percent. This model also had an f1 of .99, meaning the model is almost perfect in precision and recall and correctly identifying all positive cases. This model using h2o also took a lot less time to create and did not involve as many steps, so it was more effective in creating a model and saved time. I also noticed that this model did not take as much time to run, whereas my old model took forever to knit.