Goal: Predict whether Himalayan climbers died

Import Data

members <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/main/data/2020/2020-09-22/members.csv')
## Rows: 76519 Columns: 21
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (10): expedition_id, member_id, peak_id, peak_name, season, sex, citizen...
## dbl  (5): year, age, highpoint_metres, death_height_metres, injury_height_me...
## lgl  (6): hired, success, solo, oxygen_used, died, injured
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
skimr::skim(members)
Data summary
Name members
Number of rows 76519
Number of columns 21
_______________________
Column type frequency:
character 10
logical 6
numeric 5
________________________
Group variables None

Variable type: character

skim_variable n_missing complete_rate min max empty n_unique whitespace
expedition_id 0 1.00 9 9 0 10350 0
member_id 0 1.00 12 12 0 76518 0
peak_id 0 1.00 4 4 0 391 0
peak_name 15 1.00 4 25 0 390 0
season 0 1.00 6 7 0 5 0
sex 2 1.00 1 1 0 2 0
citizenship 10 1.00 2 23 0 212 0
expedition_role 21 1.00 4 25 0 524 0
death_cause 75413 0.01 3 27 0 12 0
injury_type 74807 0.02 3 27 0 11 0

Variable type: logical

skim_variable n_missing complete_rate mean count
hired 0 1 0.21 FAL: 60788, TRU: 15731
success 0 1 0.38 FAL: 47320, TRU: 29199
solo 0 1 0.00 FAL: 76398, TRU: 121
oxygen_used 0 1 0.24 FAL: 58286, TRU: 18233
died 0 1 0.01 FAL: 75413, TRU: 1106
injured 0 1 0.02 FAL: 74806, TRU: 1713

Variable type: numeric

skim_variable n_missing complete_rate mean sd p0 p25 p50 p75 p100 hist
year 0 1.00 2000.36 14.78 1905 1991 2004 2012 2019 ▁▁▁▃▇
age 3497 0.95 37.33 10.40 7 29 36 44 85 ▁▇▅▁▁
highpoint_metres 21833 0.71 7470.68 1040.06 3800 6700 7400 8400 8850 ▁▁▆▃▇
death_height_metres 75451 0.01 6592.85 1308.19 400 5800 6600 7550 8830 ▁▁▂▇▆
injury_height_metres 75510 0.01 7049.91 1214.24 400 6200 7100 8000 8880 ▁▁▂▇▇
# Remove variables with too many missing values
members_clean <- members %>%
  select(-c( highpoint_metres, death_height_metres, death_cause, injured, injury_type, injury_height_metres, age)) %>%
    
# Remove Irrelevant Variables
    select(-oxygen_used, -solo, -hired) %>%
    
# Remove Redundant Variables
    select(-c(peak_id)) %>%

# Remove Duplicates in Member_id
    distinct(member_id, .keep_all = TRUE) %>%
    
    select(-expedition_role, -peak_name, -citizenship, -sex) %>%
    na.omit() %>%

    mutate(across(where(is.logical), as.factor)) %>%
    mutate(across(where(is.character), as.factor)) %>%
    mutate(died = if_else(died == TRUE, "died", "not_died")) %>% mutate(died = as.factor(died))

Explore Data

members_clean %>% count(died)
## # A tibble: 2 × 2
##   died         n
##   <fct>    <int>
## 1 died      1106
## 2 not_died 75412
members_clean %>%
  ggplot(aes(died)) +
  geom_bar()

Death vs Success Rate

ggplot(members_clean, aes(x = died, fill = as.factor(success))) +
  geom_bar(position = "fill") +
  labs(title = "Proportion of Success by Died Status",
       x = "Died",
       y = "Proportion",
       fill = "Success")

Correlation Plot

 # step 1: binarize 
member_binarized <- members_clean %>% select(-member_id) %>% binarize()

member_binarized %>% glimpse()
## Rows: 76,518
## Columns: 14
## $ expedition_id__EVER88101 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `expedition_id__-OTHER`  <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ `year__-Inf_1991`        <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ year__1991_2004          <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ year__2004_2012          <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ year__2012_Inf           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ season__Autumn           <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, …
## $ season__Spring           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, …
## $ season__Winter           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `season__-OTHER`         <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ success__FALSE           <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, …
## $ success__TRUE            <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, …
## $ died__died               <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ died__not_died           <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
# Step 2: correlation
member_correlation <- member_binarized %>%
  correlate(died__died)
## Warning: correlate(): [Data Imbalance Detected] Consider sampling to balance the classes more than 5%
##   Column with imbalance: died__died
member_correlation
## # A tibble: 14 × 3
##    feature       bin       correlation
##    <fct>         <chr>           <dbl>
##  1 died          died         1       
##  2 died          not_died    -1       
##  3 year          -Inf_1991    0.0616  
##  4 success       FALSE        0.0415  
##  5 success       TRUE        -0.0415  
##  6 year          2012_Inf    -0.0260  
##  7 year          2004_2012   -0.0254  
##  8 season        Winter       0.0111  
##  9 year          1991_2004   -0.0109  
## 10 season        Autumn      -0.00567 
## 11 season        Spring       0.00195 
## 12 expedition_id -OTHER       0.00131 
## 13 expedition_id EVER88101   -0.00131 
## 14 season        -OTHER       0.000324
# Step 3: Plot
member_correlation %>%
  correlationfunnel::plot_correlation_funnel()

Split Data

set.seed(1234)
members_clean <- members_clean %>% group_by(died) %>% sample_n(50) %>% ungroup() 

members_split <- initial_split( members_clean, strata = died)
members_train <- training(members_split)
members_test <- testing(members_split)

members_cv <- rsample::vfold_cv(members_train, strata = died)
members_cv
## #  10-fold cross-validation using stratification 
## # A tibble: 10 × 2
##    splits         id    
##    <list>         <chr> 
##  1 <split [66/8]> Fold01
##  2 <split [66/8]> Fold02
##  3 <split [66/8]> Fold03
##  4 <split [66/8]> Fold04
##  5 <split [66/8]> Fold05
##  6 <split [66/8]> Fold06
##  7 <split [66/8]> Fold07
##  8 <split [68/6]> Fold08
##  9 <split [68/6]> Fold09
## 10 <split [68/6]> Fold10

Preprocess Data

library(themis)
## Warning: package 'themis' was built under R version 4.4.3
# xgboost_recipe <- recipes::recipe(died ~ ., data = members_train) %>%
#   update_role(member_id, new_role = "id") %>%
#   step_other(expedition_id, threshold = 0.05) %>%
#   step_dummy(expedition_id, season, success, one_hot = TRUE) %>%
#   step_zv(all_predictors()) %>%
#   step_normalize(all_numeric_predictors()) %>%
#   step_YeoJohnson(year) %>%
#   step_pca(all_numeric_predictors(), threshold = .99) %>%
#   step_smote(died)

 xgboost_recipe <- recipes::recipe(died ~ ., data = members_train) %>%
   update_role(member_id, new_role = "id") %>%
   step_other(expedition_id, threshold = 0.05) %>%
   step_dummy(expedition_id, season, success, one_hot = TRUE) %>%
   step_zv(all_predictors()) %>%
   step_normalize(all_numeric_predictors()) %>%
   step_YeoJohnson(year) %>%
   step_pca(all_numeric_predictors(), threshold = .99)

xgboost_recipe %>% prep() %>% juice() %>% glimpse()
## Rows: 74
## Columns: 8
## $ member_id <fct> EVER15161-02, DHA114301-11, EVER96302-10, EVER52301-17, EVER…
## $ died      <fct> died, died, died, died, died, died, died, died, died, died, …
## $ PC1       <dbl> 0.08238175, 0.91988628, 1.08967503, 1.26588003, -1.94457728,…
## $ PC2       <dbl> 1.42919275, -1.00259882, -0.95565859, -0.90694450, -0.094888…
## $ PC3       <dbl> 0.58069949, -0.55577516, -0.84401798, -1.14315335, 2.0210838…
## $ PC4       <dbl> 0.9365735, 1.2025975, 0.5543082, -0.1184797, -0.1732750, 0.7…
## $ PC5       <dbl> 0.1774627, 0.2446289, 0.2637229, 0.2835384, -0.2092048, 0.18…
## $ PC6       <dbl> -1.08713891, -1.14592880, -0.05965581, 1.06766690, -0.651910…

Specify Model

xgboost_spec <- 
  boost_tree(trees = tune(), min_n = tune(), tree_depth = tune(), learn_rate = tune(), 
    loss_reduction = tune(), sample_size = tune()) %>% 
  set_mode("classification") %>% 
  set_engine("xgboost") 

xgboost_workflow <- 
  workflow() %>% 
  add_recipe(xgboost_recipe) %>% 
  add_model(xgboost_spec) 

Tune Hyperparameters

doParallel::registerDoParallel()

set.seed(65447)
xgboost_tune <-
  tune_grid(xgboost_workflow, 
            resamples = members_cv,
            grid = 5,
            control = control_grid(save_pred = TRUE))

Model Evaluation

Identify Optimal Values for Hyperparameters

collect_metrics(xgboost_tune)
## # A tibble: 15 × 12
##    trees min_n tree_depth learn_rate loss_reduction sample_size .metric    
##    <int> <int>      <int>      <dbl>          <dbl>       <dbl> <chr>      
##  1   959     7         15    0.0120        4.77e- 2       0.769 accuracy   
##  2   959     7         15    0.0120        4.77e- 2       0.769 brier_class
##  3   959     7         15    0.0120        4.77e- 2       0.769 roc_auc    
##  4   619    13          2    0.0336        4.64e- 1       0.496 accuracy   
##  5   619    13          2    0.0336        4.64e- 1       0.496 brier_class
##  6   619    13          2    0.0336        4.64e- 1       0.496 roc_auc    
##  7  1646    19          7    0.219         1.00e-10       0.293 accuracy   
##  8  1646    19          7    0.219         1.00e-10       0.293 brier_class
##  9  1646    19          7    0.219         1.00e-10       0.293 roc_auc    
## 10    90    30         10    0.00123       1.37e- 7       0.996 accuracy   
## 11    90    30         10    0.00123       1.37e- 7       0.996 brier_class
## 12    90    30         10    0.00123       1.37e- 7       0.996 roc_auc    
## 13  1598    39          5    0.00661       5.51e- 6       0.210 accuracy   
## 14  1598    39          5    0.00661       5.51e- 6       0.210 brier_class
## 15  1598    39          5    0.00661       5.51e- 6       0.210 roc_auc    
## # ℹ 5 more variables: .estimator <chr>, mean <dbl>, n <int>, std_err <dbl>,
## #   .config <chr>
collect_predictions(xgboost_tune) %>%
    group_by(id) %>%
    roc_curve(died, .pred_died) %>%
    autoplot()

xgboost_last <- xgboost_workflow %>%
    finalize_workflow(select_best(xgboost_tune, metric = "accuracy")) %>%
    last_fit(members_split)

collect_metrics(xgboost_last) 
## # A tibble: 3 × 4
##   .metric     .estimator .estimate .config             
##   <chr>       <chr>          <dbl> <chr>               
## 1 accuracy    binary         0.385 Preprocessor1_Model1
## 2 roc_auc     binary         0.432 Preprocessor1_Model1
## 3 brier_class binary         0.279 Preprocessor1_Model1
collect_predictions(xgboost_last) %>%
    yardstick::conf_mat(died, .pred_class) %>%
    autoplot()

library(vip)
## Warning: package 'vip' was built under R version 4.4.3
## 
## Attaching package: 'vip'
## The following object is masked from 'package:utils':
## 
##     vi
xgboost_last %>%
    workflows::extract_fit_engine() %>%
    vip()

Conclusion

The previous model had accuracy 0f 0.385 and AUC of 0.432