library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.3     ✔ readr     2.1.4
## ✔ forcats   1.0.0     ✔ stringr   1.5.0
## ✔ ggplot2   3.4.3     ✔ tibble    3.2.1
## ✔ lubridate 1.9.2     ✔ tidyr     1.3.0
## ✔ purrr     1.0.2     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.1.1 ──
## ✔ broom        1.0.5     ✔ rsample      1.2.0
## ✔ dials        1.2.0     ✔ tune         1.1.2
## ✔ infer        1.0.5     ✔ workflows    1.1.3
## ✔ modeldata    1.2.0     ✔ workflowsets 1.0.1
## ✔ parsnip      1.1.1     ✔ yardstick    1.2.0
## ✔ recipes      1.0.8     
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Use tidymodels_prefer() to resolve common conflicts.
library(h2o)
## Warning: package 'h2o' was built under R version 4.3.3
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## 
## Attaching package: 'h2o'
## 
## The following objects are masked from 'package:lubridate':
## 
##     day, hour, month, week, year
## 
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## 
## The following objects are masked from 'package:base':
## 
##     %*%, %in%, &&, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc

Prompt 1: I have a dataset called climbers_data that looks like this

climbers_data <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-22/members.csv')
## Rows: 76519 Columns: 21
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (10): expedition_id, member_id, peak_id, peak_name, season, sex, citizen...
## dbl  (5): year, age, highpoint_metres, death_height_metres, injury_height_me...
## lgl  (6): hired, success, solo, oxygen_used, died, injured
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
climbers_data %>% glimpse() 
## Rows: 76,519
## Columns: 21
## $ expedition_id        <chr> "AMAD78301", "AMAD78301", "AMAD78301", "AMAD78301…
## $ member_id            <chr> "AMAD78301-01", "AMAD78301-02", "AMAD78301-03", "…
## $ peak_id              <chr> "AMAD", "AMAD", "AMAD", "AMAD", "AMAD", "AMAD", "…
## $ peak_name            <chr> "Ama Dablam", "Ama Dablam", "Ama Dablam", "Ama Da…
## $ year                 <dbl> 1978, 1978, 1978, 1978, 1978, 1978, 1978, 1978, 1…
## $ season               <chr> "Autumn", "Autumn", "Autumn", "Autumn", "Autumn",…
## $ sex                  <chr> "M", "M", "M", "M", "M", "M", "M", "M", "M", "M",…
## $ age                  <dbl> 40, 41, 27, 40, 34, 25, 41, 29, 35, 37, 23, 44, 2…
## $ citizenship          <chr> "France", "France", "France", "France", "France",…
## $ expedition_role      <chr> "Leader", "Deputy Leader", "Climber", "Exp Doctor…
## $ hired                <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ highpoint_metres     <dbl> NA, 6000, NA, 6000, NA, 6000, 6000, 6000, NA, 681…
## $ success              <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ solo                 <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ oxygen_used          <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ died                 <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ death_cause          <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
## $ death_height_metres  <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
## $ injured              <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ injury_type          <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
## $ injury_height_metres <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…

The goal is to help predict died for members.

Please write R code to create a classification model that predicts the probability of died.

# Load data
climbers_data_clean <- climbers_data 

# Preprocess data
climbers_data_processed <- climbers_data %>%
  select(-expedition_id, -member_id, -peak_id, -peak_name) %>%  # Remove unnecessary columns
  mutate(success = as.factor(success),  # Convert success to factor
         solo = as.factor(solo),        # Convert solo to factor
         oxygen_used = as.factor(oxygen_used),  # Convert oxygen_used to factor
         died = as.factor(died),        # Convert died to factor
         injured = as.factor(injured))  # Convert injured to factor

# Set seed for reproducibility
set.seed(123)

# Split data into training (80%) and testing (20%) sets
data_split <- initial_split(climbers_data_processed, prop = 0.8, strata = died)
train_data <- training(data_split)
test_data <- testing(data_split)

# Initialize h2o
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         4 hours 21 minutes 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    4 months and 18 days 
##     H2O cluster name:           H2O_started_from_R_OPend_eji420 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   3.61 GB 
##     H2O cluster total cores:    12 
##     H2O cluster allowed cores:  12 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.3.1 (2023-06-16 ucrt)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (4 months and 18 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
# Convert data to h2o format
train_h2o <- as.h2o(train_data)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
test_h2o <- as.h2o(test_data)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
# Define predictors and response variable
predictors <- setdiff(colnames(train_h2o), c("died"))
response <- "died"

# Train AutoML model
aml <- h2o.automl(x = predictors,
                  y = response,
                  training_frame = train_h2o,
                  leaderboard_frame = test_h2o,
                  max_runtime_secs = 30,  # Set maximum runtime
                  seed = 123)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |==                                                                    |   3%
## 16:18:58.126: AutoML: XGBoost is not available; skipping it.
## 16:18:58.126: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:18:58.557: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:18:59.809: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=======                                                               |  10%
## 16:19:00.625: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:01.276: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |============                                                          |  17%
## 16:19:02.323: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:03.341: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=================                                                     |  24%
## 16:19:05.59: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:06.3: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |======================                                                |  31%
## 16:19:07.142: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:07.631: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:08.213: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |===========================                                           |  38%
## 16:19:09.26: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:09.741: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |===================================================================   |  95%
## 16:19:25.776: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 16:19:26.341: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |======================================================================| 100%
# View AutoML leaderboard
print(aml@leaderboard)
##                                       model_id       auc     logloss     aucpr
## 1              GBM_5_AutoML_10_20240509_161858 0.9991879 0.002078241 0.9921059
## 2 GBM_grid_1_AutoML_10_20240509_161858_model_9 0.9991712 0.001838259 0.9913093
## 3 GBM_grid_1_AutoML_10_20240509_161858_model_4 0.9991279 0.009240130 0.9297628
## 4              GBM_4_AutoML_10_20240509_161858 0.9989083 0.001733668 0.9920909
## 5 GBM_grid_1_AutoML_10_20240509_161858_model_3 0.9987784 0.019474249 0.8867014
## 6              GBM_1_AutoML_10_20240509_161858 0.9987706 0.001942950 0.9908809
##   mean_per_class_error       rmse          mse
## 1          0.012254902 0.01878381 0.0003528315
## 2          0.007352941 0.01548552 0.0002398015
## 3          0.007518504 0.02477269 0.0006136862
## 4          0.007386054 0.01592136 0.0002534897
## 5          0.015070121 0.03925796 0.0015411871
## 6          0.007386054 0.01695634 0.0002875174
## 
## [29 rows x 7 columns]
# Get best model from AutoML
best_model <- aml@leader

# Make predictions on test data
predictions <- h2o.predict(best_model, test_h2o)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
# View predictions
head(predictions)
##   predict     FALSE         TRUE
## 1   FALSE 0.9996404 0.0003596237
## 2   FALSE 0.9992763 0.0007237478
## 3   FALSE 0.9992132 0.0007868431
## 4   FALSE 0.9992132 0.0007868431
## 5   FALSE 0.9996660 0.0003339674
## 6   FALSE 0.9997021 0.0002979313