Import Data

members <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-22/members.csv')
## Rows: 76519 Columns: 21
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (10): expedition_id, member_id, peak_id, peak_name, season, sex, citizen...
## dbl  (5): year, age, highpoint_metres, death_height_metres, injury_height_me...
## lgl  (6): hired, success, solo, oxygen_used, died, injured
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
skimr::skim(members)
Data summary
Name members
Number of rows 76519
Number of columns 21
_______________________
Column type frequency:
character 10
logical 6
numeric 5
________________________
Group variables None

Variable type: character

skim_variable n_missing complete_rate min max empty n_unique whitespace
expedition_id 0 1.00 9 9 0 10350 0
member_id 0 1.00 12 12 0 76518 0
peak_id 0 1.00 4 4 0 391 0
peak_name 15 1.00 4 25 0 390 0
season 0 1.00 6 7 0 5 0
sex 2 1.00 1 1 0 2 0
citizenship 10 1.00 2 23 0 212 0
expedition_role 21 1.00 4 25 0 524 0
death_cause 75413 0.01 3 27 0 12 0
injury_type 74807 0.02 3 27 0 11 0

Variable type: logical

skim_variable n_missing complete_rate mean count
hired 0 1 0.21 FAL: 60788, TRU: 15731
success 0 1 0.38 FAL: 47320, TRU: 29199
solo 0 1 0.00 FAL: 76398, TRU: 121
oxygen_used 0 1 0.24 FAL: 58286, TRU: 18233
died 0 1 0.01 FAL: 75413, TRU: 1106
injured 0 1 0.02 FAL: 74806, TRU: 1713

Variable type: numeric

skim_variable n_missing complete_rate mean sd p0 p25 p50 p75 p100 hist
year 0 1.00 2000.36 14.78 1905 1991 2004 2012 2019 ▁▁▁▃▇
age 3497 0.95 37.33 10.40 7 29 36 44 85 ▁▇▅▁▁
highpoint_metres 21833 0.71 7470.68 1040.06 3800 6700 7400 8400 8850 ▁▁▆▃▇
death_height_metres 75451 0.01 6592.85 1308.19 400 5800 6600 7550 8830 ▁▁▂▇▆
injury_height_metres 75510 0.01 7049.91 1214.24 400 6200 7100 8000 8880 ▁▁▂▇▇
data_clean <- members %>%

 # Treat missing values 
  select(-death_cause, -injury_type, -highpoint_metres, -death_height_metres, -injury_height_metres, -peak_id) %>%
  na.omit() %>%
  
  # Log Transform Variables with pos-skewed Distribution
  mutate(across(where(is.logical), as.factor))
# Step 1: Prepare data 
data_binarized_tbl <- data_clean %>%
  select(-peak_name, -expedition_id) %>%
  binarize()

data_binarized_tbl %>% glimpse()
## Rows: 72,985
## Columns: 55
## $ `member_id__KANG10101-01`      <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `member_id__-OTHER`            <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ `year__-Inf_1992`              <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ year__1992_2004                <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ year__2004_2012                <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ year__2012_Inf                 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ season__Autumn                 <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, …
## $ season__Spring                 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, …
## $ season__Winter                 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `season__-OTHER`               <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ sex__F                         <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ sex__M                         <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ `age__-Inf_29`                 <dbl> 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, …
## $ age__29_36                     <dbl> 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, …
## $ age__36_44                     <dbl> 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, …
## $ age__44_Inf                    <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Australia         <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Austria           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Canada            <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__China             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__France            <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, …
## $ citizenship__Germany           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__India             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Italy             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Japan             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Nepal             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Netherlands       <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__New_Zealand       <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Poland            <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Russia            <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__S_Korea           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Spain             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__Switzerland       <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__UK                <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ citizenship__USA               <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, …
## $ citizenship__W_Germany         <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, …
## $ `citizenship__-OTHER`          <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ expedition_role__Climber       <dbl> 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, …
## $ expedition_role__Deputy_Leader <dbl> 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ expedition_role__Exp_Doctor    <dbl> 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `expedition_role__H-A_Worker`  <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ expedition_role__Leader        <dbl> 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `expedition_role__-OTHER`      <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, …
## $ hired__FALSE                   <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ hired__TRUE                    <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ success__FALSE                 <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, …
## $ success__TRUE                  <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, …
## $ solo__FALSE                    <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ `solo__-OTHER`                 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ oxygen_used__FALSE             <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ oxygen_used__TRUE              <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ died__FALSE                    <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ died__TRUE                     <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ injured__FALSE                 <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ injured__TRUE                  <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
# Step 2: Correlate 
data_corr_tbl <- data_binarized_tbl %>%
  correlate(died__TRUE)
## Warning: correlate(): [Data Imbalance Detected] Consider sampling to balance the classes more than 5%
##   Column with imbalance: died__TRUE
data_corr_tbl
## # A tibble: 55 × 3
##    feature     bin       correlation
##    <fct>       <chr>           <dbl>
##  1 died        FALSE         -1     
##  2 died        TRUE           1     
##  3 year        -Inf_1992      0.0519
##  4 success     FALSE          0.0332
##  5 success     TRUE          -0.0332
##  6 year        2004_2012     -0.0211
##  7 year        2012_Inf      -0.0208
##  8 sex         F             -0.0168
##  9 sex         M              0.0168
## 10 citizenship USA           -0.0154
## # ℹ 45 more rows
# Step 3: Plot 
data_corr_tbl %>%
  plot_correlation_funnel()
## Warning: ggrepel: 27 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps

Build Model

Split Data

library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.1.1 ──
## ✔ broom        1.0.5     ✔ rsample      1.2.0
## ✔ dials        1.2.0     ✔ tune         1.1.2
## ✔ infer        1.0.6     ✔ workflows    1.1.3
## ✔ modeldata    1.3.0     ✔ workflowsets 1.0.1
## ✔ parsnip      1.1.1     ✔ yardstick    1.3.0
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ xgboost::slice()  masks dplyr::slice()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Search for functions across packages at https://www.tidymodels.org/find/
set.seed(2543)
data_clean <- data_clean %>% group_by(died) %>% sample_n(50) %>% ungroup()

data_split <- initial_split(data_clean, strata = died)
data_train <- training(data_split)
data_test <- testing(data_split)

data_cv <- rsample::vfold_cv(data_train, strata = died)
data_cv
## #  10-fold cross-validation using stratification 
## # A tibble: 10 × 2
##    splits         id    
##    <list>         <chr> 
##  1 <split [66/8]> Fold01
##  2 <split [66/8]> Fold02
##  3 <split [66/8]> Fold03
##  4 <split [66/8]> Fold04
##  5 <split [66/8]> Fold05
##  6 <split [66/8]> Fold06
##  7 <split [66/8]> Fold07
##  8 <split [68/6]> Fold08
##  9 <split [68/6]> Fold09
## 10 <split [68/6]> Fold10

Preprocess Data

library(themis)
xgboost_rec <- recipes::recipe(died ~ ., data = data_train) %>%
  update_role(member_id, new_role = "ID") %>%
  step_other(expedition_id, threshold = 0.1) %>%
  step_dummy(all_nominal_predictors()) %>%
  step_zv(all_predictors()) %>%
  step_normalize(all_numeric_predictors()) %>%
  step_pca(all_numeric_predictors(), threshold = .75)
  #step_YeoJohnson(year, age) %>%
  



#xgboost_rec <- recipes::recipe(died ~ ., data = data_train) %>%
   #update_role(member_id, new_role = "ID") %>%
   #step_other(expedition_id, threshold = 0.1) %>%
   #step_dummy(all_nominal_predictors()) %>%
   #step_zv(all_predictors()) %>%
   #step_normalize(all_numeric_predictors()) %>%
   #step_smote(died) 
  
xgboost_rec %>% prep() %>% juice() %>% glimpse()
## Rows: 74
## Columns: 29
## $ member_id <fct> EVER17173-08, EVER09152-21, MANA12305-02, PUTH16303-15, YALU…
## $ died      <fct> FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALS…
## $ PC01      <dbl> 1.1917918, 4.9247713, -0.7508168, 4.2451967, -1.3720092, 3.7…
## $ PC02      <dbl> 2.6227831461, 0.1794178936, 0.6704350353, -3.0455124853, 0.9…
## $ PC03      <dbl> 1.1066087, 0.5024598, -1.3796486, -4.4819844, -0.5430749, -1…
## $ PC04      <dbl> 2.49428185, -0.02633459, 1.06237836, -1.73309436, 0.05334953…
## $ PC05      <dbl> -0.511942488, 0.306959955, 0.312012047, 0.004162768, 1.19067…
## $ PC06      <dbl> 0.32775301, -0.12802154, 0.32733826, 0.13007511, -0.26506268…
## $ PC07      <dbl> 0.97451592, 0.39155758, 0.20584857, -1.49753032, 0.87942168,…
## $ PC08      <dbl> -0.15589491, -1.03336614, 0.25703791, 3.09556807, -1.8949514…
## $ PC09      <dbl> 0.57819696, -0.64706463, 0.42055340, 1.63345434, -0.03661938…
## $ PC10      <dbl> -0.07587130, 0.56938865, 0.65145277, -0.76589173, -3.4639428…
## $ PC11      <dbl> 0.14225868, -0.17454054, 0.09118009, 1.02907014, -5.67275255…
## $ PC12      <dbl> -0.267189815, -0.564960327, 0.049369371, 2.126969730, 4.7503…
## $ PC13      <dbl> 0.4101704, -0.8023939, -0.5512226, 3.3322538, 2.4232247, -1.…
## $ PC14      <dbl> 1.01569877, -0.62271605, 0.61575619, 1.75830743, -2.73338854…
## $ PC15      <dbl> 0.257731000, -0.128445731, 0.003187602, 0.593966387, -0.2565…
## $ PC16      <dbl> 0.157323677, 0.274696657, 0.200957042, -0.925130465, 1.57346…
## $ PC17      <dbl> -0.17599295, -0.08107509, 0.41229455, 0.61452536, 0.05715916…
## $ PC18      <dbl> -1.89760533, -0.18203465, 0.73815630, -0.65152428, 0.2355835…
## $ PC19      <dbl> 2.57743078, 0.03659445, -0.20412580, 0.07289396, -2.29671486…
## $ PC20      <dbl> -0.36503814, -0.42565575, 0.17474887, 0.41643640, 0.36039129…
## $ PC21      <dbl> 0.44104024, -0.19612196, -0.10569248, -0.49855039, -0.240458…
## $ PC22      <dbl> 0.6978934, 0.7473159, 0.1465296, 0.4224369, 1.9637685, 0.563…
## $ PC23      <dbl> 0.14817309, -0.40092809, 0.32203388, -0.16256206, 0.06633671…
## $ PC24      <dbl> -0.003574022, 0.101180727, 0.326208544, 0.425199380, 1.89185…
## $ PC25      <dbl> -0.620854928, -0.165379660, -0.004880421, 0.988190588, -1.49…
## $ PC26      <dbl> -0.05685536, 0.12074624, 0.01336233, 1.44586781, -1.30939826…
## $ PC27      <dbl> 0.77774044, -0.51980472, 0.17337405, 0.04556243, 0.90155952,…

Specify Model

library(usemodels)
usemodels::use_xgboost(died ~ ., data = data_train)
## xgboost_recipe <- 
##   recipe(formula = died ~ ., data = data_train) %>% 
##   step_zv(all_predictors()) 
## 
## xgboost_spec <- 
##   boost_tree(trees = tune(), min_n = tune(), tree_depth = tune(), learn_rate = tune(), 
##     loss_reduction = tune(), sample_size = tune()) %>% 
##   set_mode("classification") %>% 
##   set_engine("xgboost") 
## 
## xgboost_workflow <- 
##   workflow() %>% 
##   add_recipe(xgboost_recipe) %>% 
##   add_model(xgboost_spec) 
## 
## set.seed(55703)
## xgboost_tune <-
##   tune_grid(xgboost_workflow, resamples = stop("add your rsample object"), grid = stop("add number of candidate points"))
xgboost_spec <- 
  boost_tree(trees = tune()) %>% #, tree_depth = tune()) %>% 
  set_mode("classification") %>% 
  set_engine("xgboost") 

xgboost_workflow <- 
  workflow() %>% 
  add_recipe(xgboost_rec) %>% 
  add_model(xgboost_spec) 

Tune Hyperparmeters

#tree_grid <- grid_regular(trees(),
                          #tree_depth(),
                          #levels = 5)


doParallel::registerDoParallel()

set.seed(24817)
xgboost_tune <-
  tune_grid(xgboost_workflow,
            resamples = data_cv, 
            grid = 5,
            control = control_grid(save_pred = TRUE))

Evaluate Models

tune::show_best(xgboost_tune, metric = "roc_auc")
## # A tibble: 5 × 7
##   trees .metric .estimator  mean     n std_err .config             
##   <int> <chr>   <chr>      <dbl> <int>   <dbl> <chr>               
## 1   232 roc_auc binary     0.45     10  0.0399 Preprocessor1_Model1
## 2   784 roc_auc binary     0.425    10  0.0377 Preprocessor1_Model2
## 3  1191 roc_auc binary     0.425    10  0.0377 Preprocessor1_Model3
## 4  1209 roc_auc binary     0.425    10  0.0377 Preprocessor1_Model4
## 5  1975 roc_auc binary     0.425    10  0.0377 Preprocessor1_Model5
# Update the model by selecting the best hyper-parameters
xgboost_fw <- tune::finalize_workflow(xgboost_workflow,
                        tune::select_best(xgboost_tune, metric = "roc_auc"))

# Fit the model on the entire training data and test it on the test data
data_fit <- tune::last_fit(xgboost_fw, data_split)
## → A | warning: ! There are new levels in a factor: Khatung Khang, Kangchenjunga, Annapurna
##                  III, Dhaulagiri IV, Annapurna II, and Langtang Lirung, ! There are new levels in a factor: Norway, Estonia, Belarus, Canada, Austria,
##                  and Slovenia, ! There are new levels in a factor: Member, Porter, and Film Team
## 
There were issues with some computations   A: x1

There were issues with some computations   A: x1
tune::collect_metrics(data_fit)
## # A tibble: 2 × 4
##   .metric  .estimator .estimate .config             
##   <chr>    <chr>          <dbl> <chr>               
## 1 accuracy binary         0.577 Preprocessor1_Model1
## 2 roc_auc  binary         0.559 Preprocessor1_Model1
tune::collect_predictions(data_fit) %>%
  ggplot(aes(died, .pred_TRUE)) +
  geom_point(alpha = 0.3, fill = "midnightblue") +
  geom_abline(lty = 2, color = "gray50") +
  coord_fixed() 

Conclusion

***** My full model has almost 80,000 observations and took several hours to knit in the last apply assignment, so to be able to try all these different methods for my apply I had to use sample*******

*Feature transformation: Normalized numeric data. I used step_normalize in my previous model, so this step is the base I’m trying to get higher. It has an accuracy of 0.462 and AUC of 0.414.

*Feature transformation: YeoJohnson transformation. Did not improve, accuracy stayed the same and AUC improved slightly to 0.420.

*Feature selection: PCA improved the model. Accuracy increased to 0.577 and AUC increase too 0.559.

*Algorithm tuning: Did not improve my model