Prompt 1

I have a dataset called climbers_data that looks like this.

climbers_data %>% glimpse() 
## Rows: 76,519
## Columns: 21
## $ expedition_id        <chr> "AMAD78301", "AMAD78301", "AMAD78301", "AMAD78301…
## $ member_id            <chr> "AMAD78301-01", "AMAD78301-02", "AMAD78301-03", "…
## $ peak_id              <chr> "AMAD", "AMAD", "AMAD", "AMAD", "AMAD", "AMAD", "…
## $ peak_name            <chr> "Ama Dablam", "Ama Dablam", "Ama Dablam", "Ama Da…
## $ year                 <dbl> 1978, 1978, 1978, 1978, 1978, 1978, 1978, 1978, 1…
## $ season               <chr> "Autumn", "Autumn", "Autumn", "Autumn", "Autumn",…
## $ sex                  <chr> "M", "M", "M", "M", "M", "M", "M", "M", "M", "M",…
## $ age                  <dbl> 40, 41, 27, 40, 34, 25, 41, 29, 35, 37, 23, 44, 2…
## $ citizenship          <chr> "France", "France", "France", "France", "France",…
## $ expedition_role      <chr> "Leader", "Deputy Leader", "Climber", "Exp Doctor…
## $ hired                <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ highpoint_metres     <dbl> NA, 6000, NA, 6000, NA, 6000, 6000, 6000, NA, 681…
## $ success              <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ solo                 <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ oxygen_used          <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ died                 <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ death_cause          <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
## $ death_height_metres  <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
## $ injured              <lgl> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, …
## $ injury_type          <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…
## $ injury_height_metres <dbl> NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, N…

Rows: 52,383 Columns: 17 $ expedition_id “AMAD78301”, “AMAD78301”, “AMAD78301”, “AMAD78301”, “AMAD78301”, “AMAD79101”, “AMAD79101”, “AMAD79101”, “AMAD7… $ member_id ”AMAD78301-02”, “AMAD78301-04”, “AMAD78301-06”, “AMAD78301-07”, “AMAD78301-08”, “AMAD79101-04”, “AMAD79101-05”… $ peak_id “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”, “AMAD”… $ peak_name “Ama Dablam”, “Ama Dablam”, “Ama Dablam”, “Ama Dablam”, “Ama Dablam”, “Ama Dablam”, “Ama Dablam”, “Ama Dablam”… $ year 1978, 1978, 1978, 1978, 1978, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 1979, 19… $ season “Autumn”, “Autumn”, “Autumn”, “Autumn”, “Autumn”, “Spring”, “Spring”, “Spring”, “Spring”, “Spring”, “Spring”, … $ sex “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, “M”, … $ age 41, 40, 25, 41, 29, 37, 23, 42, 30, 28, 35, 33, 29, 26, 23, 34, 35, 31, 28, 37, 28, 31, 44, 29, 34, 36, 25, 29… $ citizenship “France”, “France”, “France”, “France”, “France”, “W Germany”, “USA”, “USA”, “USA”, “USA”, “USA”, “USA”, “USA”… $ expedition_role “Deputy Leader”, “Exp Doctor”, “Climber”, “Climber”, “Climber”, “Climber”, “Climber”, “Climbing Leader”, “Film… $ hired FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE,… $ highpoint_metres 6000, 6000, 6000, 6000, 6000, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 6814, 68… $ success FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRU… $ solo FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE… $ oxygen_used FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE… $ died ”no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “no”, “n… $ injured FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE… >

The goal is to help predict died for members.

Please write R code to create a classification model that predicts the probability of died.

# Load libraries
library(tidyverse)
library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.2.0 ──
## ✔ broom        1.0.5      ✔ rsample      1.2.1 
## ✔ dials        1.2.1      ✔ tune         1.2.1 
## ✔ infer        1.0.6      ✔ workflows    1.1.4 
## ✔ modeldata    1.3.0      ✔ workflowsets 1.1.0 
## ✔ parsnip      1.2.1      ✔ yardstick    1.3.1 
## ✔ recipes      1.0.10
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Use tidymodels_prefer() to resolve common conflicts.
library(h2o)
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## Attaching package: 'h2o'
## The following objects are masked from 'package:lubridate':
## 
##     day, hour, month, week, year
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## The following objects are masked from 'package:base':
## 
##     &&, %*%, %in%, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc
# Load data
climbers_data_clean <- climbers_data 

# Preprocess data
climbers_data_processed <- climbers_data %>%
  select(-expedition_id, -member_id, -peak_id, -peak_name) %>%  # Remove unnecessary columns
  mutate(success = as.factor(success),  # Convert success to factor
         solo = as.factor(solo),        # Convert solo to factor
         oxygen_used = as.factor(oxygen_used),  # Convert oxygen_used to factor
         died = as.factor(died),        # Convert died to factor
         injured = as.factor(injured))  # Convert injured to factor

# Set seed for reproducibility
set.seed(123)

# Split data into training (80%) and testing (20%) sets
data_split <- initial_split(climbers_data_processed, prop = 0.8, strata = died)
train_data <- training(data_split)
test_data <- testing(data_split)

# Initialize h2o
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         21 hours 46 minutes 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    4 months and 11 days 
##     H2O cluster name:           H2O_started_from_R_Vanessa_zyd177 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   1.30 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.3.1 (2023-06-16)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (4 months and 11 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
# Convert data to h2o format
train_h2o <- as.h2o(train_data)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
test_h2o <- as.h2o(test_data)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
# Define predictors and response variable
predictors <- setdiff(colnames(train_h2o), c("died"))
response <- "died"

# Train AutoML model
aml <- h2o.automl(x = predictors,
                  y = response,
                  training_frame = train_h2o,
                  leaderboard_frame = test_h2o,
                  max_runtime_secs = 30,  # Set maximum runtime
                  seed = 123)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |==                                                                    |   3%
## 11:23:51.705: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 11:23:53.246: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=======                                                               |  10%
## 11:23:54.651: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |============                                                          |  17%
## 11:23:56.659: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=================                                                     |  24%
## 11:23:59.75: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |======================                                                |  31%
## 11:24:01.680: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |===========================                                           |  38%
## 11:24:02.968: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |===============================                                       |  45%
## 11:24:05.363: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |====================================                                  |  52%
## 11:24:06.820: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=========================================                             |  58%
## 11:24:08.380: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |==============================================                        |  65%
## 11:24:10.832: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |==================================================                    |  72%
## 11:24:13.455: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=======================================================               |  79%
## 11:24:14.966: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
## 11:24:16.97: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |============================================================          |  86%
## 11:24:17.939: _train param, Dropping bad and constant columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |=================================================================     |  93%
## 11:24:19.422: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
  |                                                                            
  |======================================================================| 100%
## 11:24:21.88: _train param, Dropping unused columns: [expedition_role, season, death_cause, sex, citizenship, injury_type]
# View AutoML leaderboard
print(aml@leaderboard)
##                                                  model_id       auc     logloss
## 1                          DRF_1_AutoML_6_20240502_112351 0.9977190 0.003732542
## 2                          GBM_3_AutoML_6_20240502_112351 0.9974373 0.001686459
## 3                          GBM_1_AutoML_6_20240502_112351 0.9973831 0.001986849
## 4                          GBM_2_AutoML_6_20240502_112351 0.9972340 0.001831792
## 5 StackedEnsemble_BestOfFamily_1_AutoML_6_20240502_112351 0.9972195 0.002022247
## 6    StackedEnsemble_AllModels_2_AutoML_6_20240502_112351 0.9971773 0.001735483
##       aucpr mean_per_class_error       rmse          mse
## 1 0.9905834          0.007352941 0.02209069 0.0004879984
## 2 0.9911733          0.007352941 0.01427139 0.0002036727
## 3 0.9905591          0.007352941 0.01616443 0.0002612889
## 4 0.9912935          0.007352941 0.01557386 0.0002425453
## 5 0.9906157          0.007352941 0.01686741 0.0002845094
## 6 0.9910759          0.007352941 0.01506485 0.0002269496
## 
## [17 rows x 7 columns]
# Get best model from AutoML
best_model <- aml@leader

# Make predictions on test data
predictions <- h2o.predict(best_model, test_h2o)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |======================================================================| 100%
# View predictions
head(predictions)
##   predict     FALSE         TRUE
## 1   FALSE 0.9996317 0.0003683484
## 2   FALSE 0.9994714 0.0005285704
## 3   FALSE 0.9989233 0.0010767376
## 4   FALSE 0.9989572 0.0010427701
## 5   FALSE 0.9997627 0.0002373469
## 6   FALSE 0.9996466 0.0003533924