Goal is to automate building and tuning a classification model to predict climbers deaths, using the h2o::h2o.automl.
Set up Import data Import the cleaned data from Module 7.
library(h2o)
## Warning: package 'h2o' was built under R version 4.3.3
##
## ----------------------------------------------------------------------
##
## Your next step is to start H2O:
## > h2o.init()
##
## For H2O package documentation, ask for help:
## > ??h2o
##
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
##
## ----------------------------------------------------------------------
##
## Attaching package: 'h2o'
## The following objects are masked from 'package:stats':
##
## cor, sd, var
## The following objects are masked from 'package:base':
##
## %*%, %in%, &&, ||, apply, as.factor, as.numeric, colnames,
## colnames<-, ifelse, is.character, is.factor, is.numeric, log,
## log10, log1p, log2, round, signif, trunc
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.3 ✔ readr 2.1.4
## ✔ forcats 1.0.0 ✔ stringr 1.5.0
## ✔ ggplot2 3.4.3 ✔ tibble 3.2.1
## ✔ lubridate 1.9.2 ✔ tidyr 1.3.0
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::day() masks h2o::day()
## ✖ dplyr::filter() masks stats::filter()
## ✖ lubridate::hour() masks h2o::hour()
## ✖ dplyr::lag() masks stats::lag()
## ✖ lubridate::month() masks h2o::month()
## ✖ lubridate::week() masks h2o::week()
## ✖ lubridate::year() masks h2o::year()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.1.1 ──
## ✔ broom 1.0.5 ✔ rsample 1.2.0
## ✔ dials 1.2.0 ✔ tune 1.1.2
## ✔ infer 1.0.5 ✔ workflows 1.1.3
## ✔ modeldata 1.2.0 ✔ workflowsets 1.0.1
## ✔ parsnip 1.1.1 ✔ yardstick 1.2.0
## ✔ recipes 1.0.8
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter() masks stats::filter()
## ✖ recipes::fixed() masks stringr::fixed()
## ✖ dplyr::lag() masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step() masks stats::step()
## • Dig deeper into tidy modeling with R at https://www.tmwr.org
library(tidyquant)
## Loading required package: PerformanceAnalytics
## Loading required package: xts
## Loading required package: zoo
##
## Attaching package: 'zoo'
##
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
##
##
## ######################### Warning from 'xts' package ##########################
## # #
## # The dplyr lag() function breaks how base R's lag() function is supposed to #
## # work, which breaks lag(my_xts). Calls to lag(my_xts) that you type or #
## # source() into this session won't work correctly. #
## # #
## # Use stats::lag() to make sure you're not using dplyr::lag(), or you can add #
## # conflictRules('dplyr', exclude = 'lag') to your .Rprofile to stop #
## # dplyr from breaking base R's lag() function. #
## # #
## # Code in packages is not affected. It's protected by R's namespace mechanism #
## # Set `options(xts.warn_dplyr_breaks_lag = FALSE)` to suppress this warning. #
## # #
## ###############################################################################
##
## Attaching package: 'xts'
##
## The following objects are masked from 'package:dplyr':
##
## first, last
##
##
## Attaching package: 'PerformanceAnalytics'
##
## The following object is masked from 'package:graphics':
##
## legend
##
## Loading required package: quantmod
## Loading required package: TTR
##
## Attaching package: 'TTR'
##
## The following object is masked from 'package:dials':
##
## momentum
##
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
data <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-22/members.csv')
## Rows: 76519 Columns: 21
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (10): expedition_id, member_id, peak_id, peak_name, season, sex, citizen...
## dbl (5): year, age, highpoint_metres, death_height_metres, injury_height_me...
## lgl (6): hired, success, solo, oxygen_used, died, injured
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
factors_vec <- data %>% select(year, age, highpoint_metres, death_height_metres, injury_height_metres) %>%
names()
# Treating missing values
data_clean <- data %>%
select(-death_cause, -injury_type, -death_height_metres, - injury_height_metres) %>%
drop_na() %>%
# Mutate logical Variables
mutate(died = case_when(died == "TRUE" ~ "died", died == "FALSE" ~ "no")) %>%
mutate(across(where(is.logical), as.factor)) %>%
# Recode "died"
mutate(died = if_else(died == "TRUE", "deaths", died))
Split the Data
set.seed(1234)
data_split <- initial_split(data, strata = "died")
train_tbl <- training(data_split)
test_tbl <- testing(data_split)
Recipes
recipe_obj <- recipe(died ~ ., data = train_tbl) %>%
# Remove zero variance variables
step_zv(all_predictors())
Model
# Initialize h2o
h2o.init()
## Connection successful!
##
## R is connected to the H2O cluster:
## H2O cluster uptime: 9 days 3 hours
## H2O cluster timezone: America/New_York
## H2O data parsing timezone: UTC
## H2O cluster version: 3.44.0.3
## H2O cluster version age: 4 months and 18 days
## H2O cluster name: H2O_started_from_R_OPend_zez903
## H2O cluster total nodes: 1
## H2O cluster total memory: 3.79 GB
## H2O cluster total cores: 12
## H2O cluster allowed cores: 12
## H2O cluster healthy: TRUE
## H2O Connection ip: localhost
## H2O Connection port: 54321
## H2O Connection proxy: NA
## H2O Internal Security: FALSE
## R Version: R version 4.3.1 (2023-06-16 ucrt)
## Warning in h2o.clusterInfo():
## Your H2O cluster version is (4 months and 18 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
# Split training
split.h2o <- h2o.splitFrame(as.h2o(train_tbl), ratios = c(0.85), seed = 2567)
##
|
| | 0%
|
|======================================================================| 100%
train_h2o <- split.h2o[[1]]
valid_h2o <- split.h2o[[2]]
test_h2o <- as.h2o(test_tbl)
##
|
| | 0%
|
|======================================================================| 100%
y <- "died"
x <- setdiff(names(train_tbl), y)
models_h2o <- h2o.automl(
x = x,
y = y,
training_frame = train_h2o,
validation_frame = valid_h2o,
leaderboard_frame = test_h2o,
max_runtime_secs = 30,
nfolds = 5,
seed = 2345
)
##
|
| | 0%
|
|== | 3%
## 19:00:28.705: User specified a validation frame with cross-validation still enabled. Please note that the models will still be validated using cross-validation only, the validation frame will be used to provide purely informative validation metrics on the trained models.
## 19:00:28.709: AutoML: XGBoost is not available; skipping it.
## 19:00:28.709: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|======== | 12%
|
|============= | 19%
## 19:00:33.408: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|=================== | 27%
|
|========================= | 35%
|
|============================== | 42%
|
|================================== | 49%
## 19:00:43.719: _train param, Dropping unused columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|======================================== | 58%
|
|============================================== | 65%
## 19:00:48.310: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|=================================================== | 73%
## 19:00:50.58: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|======================================================== | 80%
## 19:00:51.897: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|============================================================= | 87%
## 19:00:53.847: _train param, Dropping bad and constant columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|================================================================== | 94%
## 19:00:55.797: _train param, Dropping unused columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
## 19:00:56.924: _train param, Dropping unused columns: [member_id, peak_name, death_cause, peak_id, sex, citizenship, expedition_role, season, expedition_id, injury_type]
|
|======================================================================| 100%
Examine the output of h2o.automl
models_h2o %>% typeof()
## [1] "S4"
models_h2o %>% slotNames()
## [1] "project_name" "leader" "leaderboard" "event_log"
## [5] "modeling_steps" "training_info"
models_h2o@leaderboard
## model_id auc logloss
## 1 GBM_3_AutoML_7_20240508_190028 0.9975800 0.004680771
## 2 StackedEnsemble_BestOfFamily_1_AutoML_7_20240508_190028 0.9945110 0.002919614
## 3 GBM_1_AutoML_7_20240508_190028 0.9944712 0.002861552
## 4 DRF_1_AutoML_7_20240508_190028 0.9937626 0.003502198
## 5 StackedEnsemble_BestOfFamily_2_AutoML_7_20240508_190028 0.9937626 0.018274232
## 6 StackedEnsemble_AllModels_1_AutoML_7_20240508_190028 0.9935932 0.012946726
## aucpr mean_per_class_error rmse mse
## 1 0.9856455 0.009312282 0.02134694 0.0004556918
## 2 0.9845382 0.009285770 0.02024188 0.0004097338
## 3 0.9845538 0.009285770 0.02000074 0.0004000296
## 4 0.9859443 0.009285770 0.02172861 0.0004721324
## 5 0.9859443 0.009285770 0.06392251 0.0040860870
## 6 0.9857322 0.009285770 0.05460987 0.0029822376
##
## [9 rows x 7 columns]
models_h2o@leader
## Model Details:
## ==============
##
## H2OBinomialModel: gbm
## Model ID: GBM_3_AutoML_7_20240508_190028
## Model Summary:
## number_of_trees number_of_internal_trees model_size_in_bytes min_depth
## 1 21 21 17880 1
## max_depth mean_depth min_leaves max_leaves mean_leaves
## 1 8 7.33333 2 163 63.57143
##
##
## H2OBinomialMetrics: gbm
## ** Reported on training data. **
##
## MSE: 0.0004970244
## RMSE: 0.02229404
## LogLoss: 0.004757673
## Mean Per-Class Error: 0.01440932
## AUC: 0.9995247
## AUCPR: 0.9918435
## Gini: 0.9990494
## R^2: 0.9646597
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 48015 2 0.000042 =2/48017
## TRUE 20 675 0.028777 =20/695
## Totals 48035 677 0.000452 =22/48712
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.063413 0.983965 168
## 2 max f2 0.038868 0.976945 175
## 3 max f0point5 0.108224 0.992897 164
## 4 max accuracy 0.063413 0.999548 168
## 5 max precision 0.998186 1.000000 0
## 6 max recall 0.002479 1.000000 325
## 7 max specificity 0.998186 1.000000 0
## 8 max absolute_mcc 0.063413 0.983823 168
## 9 max min_per_class_accuracy 0.005034 0.992806 242
## 10 max mean_per_class_accuracy 0.005034 0.993529 242
## 11 max tns 0.998186 48017.000000 0
## 12 max fns 0.998186 694.000000 0
## 13 max fps 0.002006 48017.000000 399
## 14 max tps 0.002479 695.000000 325
## 15 max tnr 0.998186 1.000000 0
## 16 max fnr 0.998186 0.998561 0
## 17 max fpr 0.002006 1.000000 399
## 18 max tpr 0.002479 1.000000 325
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: gbm
## ** Reported on validation data. **
## ** Validation metrics **
##
## MSE: 0.0004679748
## RMSE: 0.02163273
## LogLoss: 0.005128165
## Mean Per-Class Error: 0.0141844
## AUC: 0.9924716
## AUCPR: 0.9832142
## Gini: 0.9849432
## R^2: 0.9707256
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 8536 0 0.000000 =0/8536
## TRUE 4 137 0.028369 =4/141
## Totals 8540 137 0.000461 =4/8677
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.974702 0.985612 107
## 2 max f2 0.974702 0.977175 107
## 3 max f0point5 0.974702 0.994194 107
## 4 max accuracy 0.974702 0.999539 107
## 5 max precision 0.999014 1.000000 0
## 6 max recall 0.002123 1.000000 370
## 7 max specificity 0.999014 1.000000 0
## 8 max absolute_mcc 0.974702 0.985483 107
## 9 max min_per_class_accuracy 0.004077 0.985816 165
## 10 max mean_per_class_accuracy 0.019842 0.989010 111
## 11 max tns 0.999014 8536.000000 0
## 12 max fns 0.999014 140.000000 0
## 13 max fps 0.002002 8536.000000 399
## 14 max tps 0.002123 141.000000 370
## 15 max tnr 0.999014 1.000000 0
## 16 max fnr 0.999014 0.992908 0
## 17 max fpr 0.002002 1.000000 399
## 18 max tpr 0.002123 1.000000 370
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## H2OBinomialMetrics: gbm
## ** Reported on cross-validation data. **
## ** 5-fold cross-validation on training data (Metrics computed for combined holdout predictions) **
##
## MSE: 0.0005219622
## RMSE: 0.02284649
## LogLoss: 0.005476449
## Mean Per-Class Error: 0.01798561
## AUC: 0.9925264
## AUCPR: 0.9731345
## Gini: 0.9850529
## R^2: 0.9628866
##
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
## FALSE TRUE Error Rate
## FALSE 48017 0 0.000000 =0/48017
## TRUE 25 670 0.035971 =25/695
## Totals 48042 670 0.000513 =25/48712
##
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.972886 0.981685 152
## 2 max f2 0.972886 0.971014 152
## 3 max f0point5 0.972886 0.992593 152
## 4 max accuracy 0.972886 0.999487 152
## 5 max precision 0.997748 1.000000 0
## 6 max recall 0.001946 1.000000 380
## 7 max specificity 0.997748 1.000000 0
## 8 max absolute_mcc 0.972886 0.981594 152
## 9 max min_per_class_accuracy 0.003002 0.974613 288
## 10 max mean_per_class_accuracy 0.972886 0.982014 152
## 11 max tns 0.997748 48017.000000 0
## 12 max fns 0.997748 692.000000 0
## 13 max fps 0.001672 48017.000000 399
## 14 max tps 0.001946 695.000000 380
## 15 max tnr 0.997748 1.000000 0
## 16 max fnr 0.997748 0.995683 0
## 17 max fpr 0.001672 1.000000 399
## 18 max tpr 0.001946 1.000000 380
##
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
## Cross-Validation Metrics Summary:
## mean sd cv_1_valid cv_2_valid cv_3_valid
## accuracy 0.999487 0.000145 0.999589 0.999384 0.999281
## auc 0.993261 0.002335 0.995417 0.990461 0.995405
## err 0.000513 0.000145 0.000411 0.000616 0.000719
## err_count 5.000000 1.414214 4.000000 6.000000 7.000000
## f0point5 0.992611 0.001918 0.994437 0.990854 0.990237
## f1 0.981741 0.004687 0.986207 0.977444 0.975945
## f2 0.971119 0.007330 0.978112 0.964392 0.962060
## lift_top_group 70.294120 4.211651 66.278910 71.639710 65.382550
## logloss 0.005476 0.000914 0.004646 0.006173 0.006716
## max_per_class_error 0.035829 0.009026 0.027211 0.044118 0.046980
## mcc 0.981658 0.004669 0.986095 0.977387 0.975872
## mean_per_class_accuracy 0.982085 0.004513 0.986395 0.977941 0.976510
## mean_per_class_error 0.017915 0.004513 0.013605 0.022059 0.023490
## mse 0.000522 0.000144 0.000417 0.000624 0.000726
## pr_auc 0.973468 0.006299 0.980216 0.966722 0.966781
## precision 1.000000 0.000000 1.000000 1.000000 1.000000
## r2 0.963021 0.009121 0.971931 0.954648 0.951792
## recall 0.964171 0.009026 0.972789 0.955882 0.953020
## rmse 0.022680 0.003078 0.020423 0.024984 0.026945
## specificity 1.000000 0.000000 1.000000 1.000000 1.000000
## cv_4_valid cv_5_valid
## accuracy 0.999589 0.999589
## auc 0.991169 0.993853
## err 0.000411 0.000411
## err_count 4.000000 4.000000
## f0point5 0.993740 0.993789
## f1 0.984496 0.984615
## f2 0.975422 0.975610
## lift_top_group 74.366410 73.803030
## logloss 0.005014 0.004832
## max_per_class_error 0.030534 0.030303
## mcc 0.984410 0.984527
## mean_per_class_accuracy 0.984733 0.984849
## mean_per_class_error 0.015267 0.015152
## mse 0.000421 0.000421
## pr_auc 0.976271 0.977352
## precision 1.000000 1.000000
## r2 0.968230 0.968504
## recall 0.969466 0.969697
## rmse 0.020530 0.020518
## specificity 1.000000 1.000000
best_model <- models_h2o@leader
Save and Load
?h2o.getModel
## starting httpd help server ... done
?h2o.saveModel
?h2o.loadModel
# h2o.getModel("GLM_1_AutoML_4_20240423_111307") %>%
# h2o.saveModel("h2o_models/")
# best_model <- h2o.loadModel("h2o_models/GLM_1_AutoML_4_20240423_111307")
Make Predictions
predictions <- h2o.predict(best_model, newdata = test_h2o)
##
|
| | 0%
|
|======================================================================| 100%
predictions_tbl <- predictions %>%
as_tibble()
predictions_tbl %>%
bind_cols(test_tbl)
## # A tibble: 19,130 × 24
## predict FALSE. TRUE. expedition_id member_id peak_id peak_name year season
## <fct> <dbl> <dbl> <chr> <chr> <chr> <chr> <dbl> <chr>
## 1 FALSE 0.998 0.00230 AMAD78301 AMAD7830… AMAD Ama Dabl… 1978 Autumn
## 2 FALSE 0.998 0.00216 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 3 FALSE 0.998 0.00223 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 4 FALSE 0.995 0.00471 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 5 FALSE 0.998 0.00247 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 6 FALSE 0.998 0.00214 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 7 FALSE 0.998 0.00216 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 8 FALSE 0.998 0.00220 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 9 FALSE 0.998 0.00247 AMAD79101 AMAD7910… AMAD Ama Dabl… 1979 Spring
## 10 FALSE 0.998 0.00220 AMAD79301 AMAD7930… AMAD Ama Dabl… 1979 Autumn
## # ℹ 19,120 more rows
## # ℹ 15 more variables: sex <chr>, age <dbl>, citizenship <chr>,
## # expedition_role <chr>, hired <lgl>, highpoint_metres <dbl>, success <lgl>,
## # solo <lgl>, oxygen_used <lgl>, died <lgl>, death_cause <chr>,
## # death_height_metres <dbl>, injured <lgl>, injury_type <chr>,
## # injury_height_metres <dbl>
Evaluate Model
?h2o.performance
performance_h2o <- h2o.performance(best_model, newdata = test_h2o)
typeof(performance_h2o)
## [1] "S4"
slotNames(performance_h2o)
## [1] "algorithm" "on_train" "on_valid" "on_xval" "metrics"
performance_h2o@metrics
## $model
## $model$`__meta`
## $model$`__meta`$schema_version
## [1] 3
##
## $model$`__meta`$schema_name
## [1] "ModelKeyV3"
##
## $model$`__meta`$schema_type
## [1] "Key<Model>"
##
##
## $model$name
## [1] "GBM_3_AutoML_7_20240508_190028"
##
## $model$type
## [1] "Key<Model>"
##
## $model$URL
## [1] "/3/Models/GBM_3_AutoML_7_20240508_190028"
##
##
## $model_checksum
## [1] "-9193747636028883112"
##
## $frame
## $frame$name
## [1] "test_tbl_sid_bb07_3"
##
##
## $frame_checksum
## [1] "678340420273909232"
##
## $description
## NULL
##
## $scoring_time
## [1] 1.715209e+12
##
## $predictions
## NULL
##
## $MSE
## [1] 0.0004556918
##
## $RMSE
## [1] 0.02134694
##
## $nobs
## [1] 19130
##
## $custom_metric_name
## NULL
##
## $custom_metric_value
## [1] 0
##
## $r2
## [1] 0.9672512
##
## $logloss
## [1] 0.004680771
##
## $AUC
## [1] 0.99758
##
## $pr_auc
## [1] 0.9856455
##
## $Gini
## [1] 0.99516
##
## $mean_per_class_error
## [1] 0.009312282
##
## $domain
## [1] "FALSE" "TRUE"
##
## $cm
## $cm$`__meta`
## $cm$`__meta`$schema_version
## [1] 3
##
## $cm$`__meta`$schema_name
## [1] "ConfusionMatrixV3"
##
## $cm$`__meta`$schema_type
## [1] "ConfusionMatrix"
##
##
## $cm$table
## Confusion Matrix: Row labels: Actual class; Column labels: Predicted class
## FALSE TRUE Error Rate
## FALSE 18858 2 0.0001 = 2 / 18,860
## TRUE 5 265 0.0185 = 5 / 270
## Totals 18863 267 0.0004 = 7 / 19,130
##
##
## $thresholds_and_metric_scores
## Metrics for Thresholds: Binomial metrics as a function of classification thresholds
## threshold f1 f2 f0point5 accuracy precision recall specificity
## 1 0.997889 0.007380 0.004625 0.018248 0.985938 1.000000 0.003704 1.000000
## 2 0.997068 0.021978 0.013850 0.053191 0.986043 1.000000 0.011111 1.000000
## 3 0.995201 0.036364 0.023041 0.086207 0.986147 1.000000 0.018519 1.000000
## 4 0.991214 0.050542 0.032199 0.117450 0.986252 1.000000 0.025926 1.000000
## 5 0.990583 0.057554 0.036765 0.132450 0.986304 1.000000 0.029630 1.000000
## absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns fns fps tps
## 1 0.060429 0.003704 0.501852 18860 269 0 1
## 2 0.104671 0.011111 0.505556 18860 267 0 3
## 3 0.135137 0.018519 0.509259 18860 265 0 5
## 4 0.159904 0.025926 0.512963 18860 263 0 7
## 5 0.170949 0.029630 0.514815 18860 262 0 8
## tnr fnr fpr tpr idx
## 1 1.000000 0.996296 0.000000 0.003704 0
## 2 1.000000 0.988889 0.000000 0.011111 1
## 3 1.000000 0.981481 0.000000 0.018519 2
## 4 1.000000 0.974074 0.000000 0.025926 3
## 5 1.000000 0.970370 0.000000 0.029630 4
##
## ---
## threshold f1 f2 f0point5 accuracy precision recall
## 395 0.002059 0.028115 0.067443 0.017759 0.024203 0.014258 1.000000
## 396 0.002047 0.028081 0.067365 0.017737 0.023001 0.014241 1.000000
## 397 0.002028 0.028064 0.067325 0.017726 0.022373 0.014231 1.000000
## 398 0.002019 0.027943 0.067047 0.017649 0.018035 0.014170 1.000000
## 399 0.002015 0.027911 0.066974 0.017629 0.016884 0.014153 1.000000
## 400 0.002006 0.027835 0.066799 0.017580 0.014114 0.014114 1.000000
## specificity absolute_mcc min_per_class_accuracy mean_per_class_accuracy tns
## 395 0.010233 0.012079 0.010233 0.505117 193
## 396 0.009014 0.011330 0.009014 0.504507 170
## 397 0.008378 0.010919 0.008378 0.504189 158
## 398 0.003977 0.007506 0.003977 0.501988 75
## 399 0.002810 0.006307 0.002810 0.501405 53
## 400 0.000000 0.000000 0.000000 0.500000 0
## fns fps tps tnr fnr fpr tpr idx
## 395 0 18667 270 0.010233 0.000000 0.989767 1.000000 394
## 396 0 18690 270 0.009014 0.000000 0.990986 1.000000 395
## 397 0 18702 270 0.008378 0.000000 0.991622 1.000000 396
## 398 0 18785 270 0.003977 0.000000 0.996023 1.000000 397
## 399 0 18807 270 0.002810 0.000000 0.997190 1.000000 398
## 400 0 18860 270 0.000000 0.000000 1.000000 1.000000 399
##
## $max_criteria_and_metric_scores
## Maximum Metrics: Maximum metrics at their respective thresholds
## metric threshold value idx
## 1 max f1 0.063832 0.986965 142
## 2 max f2 0.063832 0.983667 142
## 3 max f0point5 0.974248 0.993151 140
## 4 max accuracy 0.063832 0.999634 142
## 5 max precision 0.997889 1.000000 0
## 6 max recall 0.002295 1.000000 329
## 7 max specificity 0.997889 1.000000 0
## 8 max absolute_mcc 0.063832 0.986795 142
## 9 max min_per_class_accuracy 0.004928 0.985185 194
## 10 max mean_per_class_accuracy 0.063832 0.990688 142
## 11 max tns 0.997889 18860.000000 0
## 12 max fns 0.997889 269.000000 0
## 13 max fps 0.002006 18860.000000 399
## 14 max tps 0.002295 270.000000 329
## 15 max tnr 0.997889 1.000000 0
## 16 max fnr 0.997889 0.996296 0
## 17 max fpr 0.002006 1.000000 399
## 18 max tpr 0.002295 1.000000 329
##
## $gains_lift_table
## Gains/Lift Table: Avg response rate: 1.41 %, avg score: 1.57 %
## group cumulative_data_fraction lower_threshold lift cumulative_lift
## 1 1 0.01003659 0.976651 70.851852 70.851852
## 2 2 0.02002091 0.004817 27.450456 49.207814
## 3 3 0.03000523 0.003156 0.000000 32.833785
## 4 4 0.04004182 0.002911 0.000000 24.603907
## 5 5 0.05054888 0.002782 0.000000 19.489754
## 6 6 0.10000000 0.002564 0.074896 9.888889
## 7 7 0.15117616 0.002469 0.072372 6.565801
## 8 8 0.20057501 0.002401 0.074976 4.967200
## 9 9 0.30000000 0.002297 0.000000 3.320988
## 10 10 0.40454783 0.002238 0.035426 2.471896
## 11 11 0.50005227 0.002199 0.000000 1.999791
## 12 12 0.60010455 0.002168 0.000000 1.666376
## 13 13 0.70078411 0.002140 0.000000 1.426973
## 14 14 0.80684788 0.002112 0.000000 1.239391
## 15 15 0.90041819 0.002092 0.000000 1.110595
## 16 16 1.00000000 0.002001 0.000000 1.000000
## response_rate score cumulative_response_rate cumulative_score
## 1 1.000000 0.981018 1.000000 0.981018
## 2 0.387435 0.360406 0.694517 0.671522
## 3 0.000000 0.003763 0.463415 0.449324
## 4 0.000000 0.003028 0.347258 0.337458
## 5 0.000000 0.002825 0.275078 0.267902
## 6 0.001057 0.002652 0.139571 0.136732
## 7 0.001021 0.002511 0.092669 0.091296
## 8 0.001058 0.002430 0.070107 0.069410
## 9 0.000000 0.002348 0.046872 0.047184
## 10 0.000500 0.002264 0.034888 0.035575
## 11 0.000000 0.002218 0.028225 0.029204
## 12 0.000000 0.002183 0.023519 0.024699
## 13 0.000000 0.002154 0.020140 0.021460
## 14 0.000000 0.002125 0.017493 0.018918
## 15 0.000000 0.002104 0.015675 0.017171
## 16 0.000000 0.002075 0.014114 0.015668
## capture_rate cumulative_capture_rate gain cumulative_gain
## 1 0.711111 0.711111 6985.185185 6985.185185
## 2 0.274074 0.985185 2645.045569 4820.781356
## 3 0.000000 0.985185 -100.000000 3183.378500
## 4 0.000000 0.985185 -100.000000 2360.390678
## 5 0.000000 0.985185 -100.000000 1848.975449
## 6 0.003704 0.988889 -92.510375 888.888889
## 7 0.003704 0.992593 -92.762834 556.580093
## 8 0.003704 0.996296 -92.502450 396.720046
## 9 0.000000 0.996296 -100.000000 232.098765
## 10 0.003704 1.000000 -96.457407 147.189559
## 11 0.000000 1.000000 -100.000000 99.979093
## 12 0.000000 1.000000 -100.000000 66.637631
## 13 0.000000 1.000000 -100.000000 42.697300
## 14 0.000000 1.000000 -100.000000 23.939099
## 15 0.000000 1.000000 -100.000000 11.059507
## 16 0.000000 1.000000 -100.000000 0.000000
## kolmogorov_smirnov
## 1 0.711111
## 2 0.978982
## 3 0.968854
## 4 0.958674
## 5 0.948017
## 6 0.901614
## 7 0.853462
## 8 0.807113
## 9 0.706264
## 10 0.603977
## 11 0.507105
## 12 0.405620
## 13 0.303499
## 14 0.195917
## 15 0.101007
## 16 0.000000
h2o.auc(best_model)
## [1] 0.9995247
h2o.confusionMatrix(performance_h2o)
## Confusion Matrix (vertical: actual; across: predicted) for max f1 @ threshold = 0.0638320454487536:
## FALSE TRUE Error Rate
## FALSE 18858 2 0.000106 =2/18860
## TRUE 5 265 0.018519 =5/270
## Totals 18863 267 0.000366 =7/19130
h2o.metric(performance_h2o) %>% as_tibble() %>% filter(threshold %>% between(0.98, 0.99))
## # A tibble: 48 × 20
## threshold f1 f2 f0point5 accuracy precision recall specificity
## <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 0.990 0.0783 0.0504 0.175 0.986 1 0.0407 1
## 2 0.989 0.0851 0.0549 0.189 0.987 1 0.0444 1
## 3 0.989 0.0919 0.0595 0.202 0.987 1 0.0481 1
## 4 0.989 0.0986 0.0640 0.215 0.987 1 0.0519 1
## 5 0.988 0.118 0.0775 0.251 0.987 1 0.0630 1
## 6 0.988 0.125 0.0820 0.263 0.987 1 0.0667 1
## 7 0.987 0.138 0.0909 0.286 0.987 1 0.0741 1
## 8 0.987 0.144 0.0954 0.297 0.987 1 0.0778 1
## 9 0.987 0.157 0.104 0.318 0.987 1 0.0852 1
## 10 0.987 0.169 0.113 0.338 0.987 1 0.0926 1
## # ℹ 38 more rows
## # ℹ 12 more variables: absolute_mcc <dbl>, min_per_class_accuracy <dbl>,
## # mean_per_class_accuracy <dbl>, tns <dbl>, fns <dbl>, fps <dbl>, tps <dbl>,
## # tnr <dbl>, fnr <dbl>, fpr <dbl>, tpr <dbl>, idx <int>