library(tidyverse)
## Warning: package 'ggplot2' was built under R version 4.3.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1
## ✔ lubridate 1.9.3 ✔ tidyr 1.3.1
## ✔ purrr 1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(correlationfunnel)
## Warning: package 'correlationfunnel' was built under R version 4.3.3
## ══ correlationfunnel Tip #1 ════════════════════════════════════════════════════
## Make sure your data is not overly imbalanced prior to using `correlate()`.
## If less than 5% imbalance, consider sampling. :)
data <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-04-27/departures.csv')
## Rows: 9423 Columns: 19
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (8): coname, exec_fullname, interim_coceo, still_there, notes, sources...
## dbl (10): dismissal_dataset_id, gvkey, fyear, co_per_rol, departure_code, c...
## dttm (1): leftofc
##
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
skimr::skim(data)
Name | data |
Number of rows | 9423 |
Number of columns | 19 |
_______________________ | |
Column type frequency: | |
character | 8 |
numeric | 10 |
POSIXct | 1 |
________________________ | |
Group variables | None |
Variable type: character
skim_variable | n_missing | complete_rate | min | max | empty | n_unique | whitespace |
---|---|---|---|---|---|---|---|
coname | 0 | 1.00 | 2 | 30 | 0 | 3860 | 0 |
exec_fullname | 0 | 1.00 | 5 | 790 | 0 | 8701 | 0 |
interim_coceo | 9105 | 0.03 | 6 | 7 | 0 | 6 | 0 |
still_there | 7311 | 0.22 | 3 | 10 | 0 | 77 | 0 |
notes | 1644 | 0.83 | 5 | 3117 | 0 | 7755 | 0 |
sources | 1475 | 0.84 | 18 | 1843 | 0 | 7915 | 0 |
eight_ks | 4499 | 0.52 | 69 | 3884 | 0 | 4914 | 0 |
_merge | 0 | 1.00 | 11 | 11 | 0 | 1 | 0 |
Variable type: numeric
skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
---|---|---|---|---|---|---|---|---|---|---|
dismissal_dataset_id | 0 | 1.00 | 5684.10 | 25005.46 | 1 | 2305.5 | 4593 | 6812.5 | 559044 | ▇▁▁▁▁ |
gvkey | 0 | 1.00 | 40132.48 | 53921.34 | 1004 | 7337.0 | 14385 | 60900.5 | 328795 | ▇▁▁▁▁ |
fyear | 0 | 1.00 | 2007.74 | 8.19 | 1987 | 2000.0 | 2008 | 2016.0 | 2020 | ▁▆▅▅▇ |
co_per_rol | 0 | 1.00 | 25580.22 | 18202.38 | -1 | 8555.5 | 22980 | 39275.5 | 64602 | ▇▆▅▃▃ |
departure_code | 1667 | 0.82 | 5.20 | 1.53 | 1 | 5.0 | 5 | 7.0 | 9 | ▁▃▇▅▁ |
ceo_dismissal | 1813 | 0.81 | 0.20 | 0.40 | 0 | 0.0 | 0 | 0.0 | 1 | ▇▁▁▁▂ |
tenure_no_ceodb | 0 | 1.00 | 1.03 | 0.17 | 0 | 1.0 | 1 | 1.0 | 3 | ▁▇▁▁▁ |
max_tenure_ceodb | 0 | 1.00 | 1.05 | 0.24 | 1 | 1.0 | 1 | 1.0 | 4 | ▇▁▁▁▁ |
fyear_gone | 1802 | 0.81 | 2006.64 | 13.63 | 1980 | 2000.0 | 2007 | 2013.0 | 2997 | ▇▁▁▁▁ |
cik | 245 | 0.97 | 741469.17 | 486551.43 | 1750 | 106413.0 | 857323 | 1050375.8 | 1808065 | ▆▁▇▂▁ |
Variable type: POSIXct
skim_variable | n_missing | complete_rate | min | max | median | n_unique |
---|---|---|---|---|---|---|
leftofc | 1802 | 0.81 | 1981-01-01 | 2998-04-27 | 2006-12-31 | 3627 |
factors_vec <- data %>% select(leftofc) %>% names()
data_clean <- data %>%
# Address factors imported as numeric
mutate(across(all_of(factors_vec), as.factor)) %>%
# Remove NA
drop_na(ceo_dismissal, tenure_no_ceodb, fyear_gone, leftofc) %>%
# Drop zero-variance variables
select(-c(interim_coceo, eight_ks, gvkey, cik, coname, exec_fullname, sources, notes, "_merge",leftofc, departure_code)) %>%
# Drop still_there due to high missing values
select(-still_there) %>%
# Convert dismissal_dataset_id to character
mutate(dismissal_dataset_id = as.character(dismissal_dataset_id)) %>%
distinct(dismissal_dataset_id, .keep_all = TRUE) %>%
mutate(ceo_dismissal = as.factor(ceo_dismissal))
skimr::skim(data_clean)
Name | data_clean |
Number of rows | 7476 |
Number of columns | 7 |
_______________________ | |
Column type frequency: | |
character | 1 |
factor | 1 |
numeric | 5 |
________________________ | |
Group variables | None |
Variable type: character
skim_variable | n_missing | complete_rate | min | max | empty | n_unique | whitespace |
---|---|---|---|---|---|---|---|
dismissal_dataset_id | 0 | 1 | 1 | 6 | 0 | 7476 | 0 |
Variable type: factor
skim_variable | n_missing | complete_rate | ordered | n_unique | top_counts |
---|---|---|---|---|---|
ceo_dismissal | 0 | 1 | FALSE | 2 | 0: 5993, 1: 1483 |
Variable type: numeric
skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
---|---|---|---|---|---|---|---|---|---|---|
fyear | 0 | 1 | 2005.61 | 7.45 | 1987 | 1999.0 | 2006.0 | 2012.00 | 2020 | ▁▇▆▇▆ |
co_per_rol | 0 | 1 | 21444.46 | 16350.23 | -1 | 6975.5 | 18267.5 | 33414.25 | 64601 | ▇▅▅▂▁ |
tenure_no_ceodb | 0 | 1 | 1.03 | 0.16 | 1 | 1.0 | 1.0 | 1.00 | 3 | ▇▁▁▁▁ |
max_tenure_ceodb | 0 | 1 | 1.05 | 0.23 | 1 | 1.0 | 1.0 | 1.00 | 4 | ▇▁▁▁▁ |
fyear_gone | 0 | 1 | 2006.54 | 13.69 | 1980 | 2000.0 | 2006.0 | 2013.00 | 2997 | ▇▁▁▁▁ |
# Step 1: Binarize
data_binarized <- data_clean %>%
binarize()
data_binarized %>% glimpse()
## Rows: 7,476
## Columns: 22
## $ dismissal_dataset_id__1 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `dismissal_dataset_id__-OTHER` <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ `fyear__-Inf_1999` <dbl> 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, …
## $ fyear__1999_2006 <dbl> 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, …
## $ fyear__2006_2012 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, …
## $ fyear__2012_Inf <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `co_per_rol__-Inf_6975.5` <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ co_per_rol__6975.5_18267.5 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ co_per_rol__18267.5_33414.25 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ co_per_rol__33414.25_Inf <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ ceo_dismissal__0 <dbl> 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, …
## $ ceo_dismissal__1 <dbl> 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, …
## $ tenure_no_ceodb__1 <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ tenure_no_ceodb__2 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `tenure_no_ceodb__-OTHER` <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ max_tenure_ceodb__1 <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ max_tenure_ceodb__2 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `max_tenure_ceodb__-OTHER` <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `fyear_gone__-Inf_2000` <dbl> 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, …
## $ fyear_gone__2000_2006 <dbl> 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, …
## $ fyear_gone__2006_2013 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, …
## $ fyear_gone__2013_Inf <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
# Step 2: Correlation
data_correlation <- data_binarized %>%
correlate(ceo_dismissal__0)
data_correlation
## # A tibble: 22 × 3
## feature bin correlation
## <fct> <chr> <dbl>
## 1 ceo_dismissal 0 1
## 2 ceo_dismissal 1 -1
## 3 fyear -Inf_1999 0.0775
## 4 co_per_rol -Inf_6975.5 0.0595
## 5 fyear_gone -Inf_2000 0.0585
## 6 max_tenure_ceodb 1 -0.0580
## 7 co_per_rol 33414.25_Inf -0.0560
## 8 max_tenure_ceodb 2 0.0536
## 9 fyear 1999_2006 -0.0346
## 10 fyear 2006_2012 -0.0301
## # ℹ 12 more rows
# Step 3: Plot
data_correlation %>%
correlationfunnel::plot_correlation_funnel()
library(tidymodels)
## Warning: package 'tidymodels' was built under R version 4.3.3
## ── Attaching packages ────────────────────────────────────── tidymodels 1.2.0 ──
## ✔ broom 1.0.5 ✔ rsample 1.2.1
## ✔ dials 1.3.0 ✔ tune 1.2.1
## ✔ infer 1.0.7 ✔ workflows 1.1.4
## ✔ modeldata 1.4.0 ✔ workflowsets 1.1.0
## ✔ parsnip 1.2.1 ✔ yardstick 1.3.1
## ✔ recipes 1.1.0
## Warning: package 'dials' was built under R version 4.3.3
## Warning: package 'infer' was built under R version 4.3.3
## Warning: package 'modeldata' was built under R version 4.3.3
## Warning: package 'parsnip' was built under R version 4.3.3
## Warning: package 'recipes' was built under R version 4.3.3
## Warning: package 'rsample' was built under R version 4.3.3
## Warning: package 'tune' was built under R version 4.3.3
## Warning: package 'workflows' was built under R version 4.3.3
## Warning: package 'workflowsets' was built under R version 4.3.3
## Warning: package 'yardstick' was built under R version 4.3.3
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter() masks stats::filter()
## ✖ recipes::fixed() masks stringr::fixed()
## ✖ dplyr::lag() masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step() masks stats::step()
## • Learn how to get started at https://www.tidymodels.org/start/
set.seed(1234)
data_split <- initial_split(data_clean, strata = ceo_dismissal)
data_train <- training(data_split)
data_test <- testing(data_split)
data_cv <- rsample::vfold_cv(data_train, strata = ceo_dismissal)
data_cv
## # 10-fold cross-validation using stratification
## # A tibble: 10 × 2
## splits id
## <list> <chr>
## 1 <split [5044/562]> Fold01
## 2 <split [5044/562]> Fold02
## 3 <split [5045/561]> Fold03
## 4 <split [5045/561]> Fold04
## 5 <split [5046/560]> Fold05
## 6 <split [5046/560]> Fold06
## 7 <split [5046/560]> Fold07
## 8 <split [5046/560]> Fold08
## 9 <split [5046/560]> Fold09
## 10 <split [5046/560]> Fold10
library(themis)
## Warning: package 'themis' was built under R version 4.3.3
# data_train <- data_train %>% mutate(ceo_dismissal = as.factor(ceo_dismissal))
xgboost_rec <- recipes::recipe(ceo_dismissal ~ ., data = data_train) %>%
update_role(dismissal_dataset_id, new_role = "ID") %>%
step_dummy(all_nominal_predictors()) %>%
# step_normalize(all_numeric_predictors()) %>%
# step_pca(all_numeric_predictors(), threshold = .99) %>%
step_smote(ceo_dismissal)
xgboost_rec %>%
prep() %>%
juice() %>%
glimpse()
## Rows: 8,988
## Columns: 7
## $ dismissal_dataset_id <fct> 12, 31, 43, 51, 63, 75, 76, 80, 99, 109, 110, 112…
## $ fyear <dbl> 1997, 1998, 2001, 1997, 1997, 1993, 2007, 1993, 1…
## $ co_per_rol <dbl> 1, 6, 11, 16, 22, 33, 34, 43, 60, 66, 68, 71, 73,…
## $ tenure_no_ceodb <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1…
## $ max_tenure_ceodb <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1…
## $ fyear_gone <dbl> 1998, 1998, 2002, 1997, 1998, 1995, 2007, 1993, 2…
## $ ceo_dismissal <fct> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0…
xgboost_spec <-
boost_tree(trees = tune()) %>%
set_mode("classification") %>%
set_engine("xgboost")
xgboost_workflow <-
workflow() %>%
add_recipe(xgboost_rec) %>%
add_model(xgboost_spec)
doParallel::registerDoParallel()
set.seed(65743)
xgboost_tune <-
tune_grid(xgboost_workflow,
resamples = data_cv,
grid = 5,
control = control_grid(save_pred = TRUE))
collect_metrics(xgboost_tune)
## # A tibble: 15 × 7
## trees .metric .estimator mean n std_err .config
## <int> <chr> <chr> <dbl> <int> <dbl> <chr>
## 1 155 accuracy binary 0.767 10 0.00347 Preprocessor1_Model1
## 2 155 brier_class binary 0.174 10 0.00169 Preprocessor1_Model1
## 3 155 roc_auc binary 0.583 10 0.00881 Preprocessor1_Model1
## 4 485 accuracy binary 0.741 10 0.00438 Preprocessor1_Model2
## 5 485 brier_class binary 0.190 10 0.00208 Preprocessor1_Model2
## 6 485 roc_auc binary 0.578 10 0.00775 Preprocessor1_Model2
## 7 1125 accuracy binary 0.726 10 0.00415 Preprocessor1_Model3
## 8 1125 brier_class binary 0.207 10 0.00263 Preprocessor1_Model3
## 9 1125 roc_auc binary 0.569 10 0.00828 Preprocessor1_Model3
## 10 1312 accuracy binary 0.725 10 0.00412 Preprocessor1_Model4
## 11 1312 brier_class binary 0.210 10 0.00253 Preprocessor1_Model4
## 12 1312 roc_auc binary 0.569 10 0.00789 Preprocessor1_Model4
## 13 1741 accuracy binary 0.724 10 0.00425 Preprocessor1_Model5
## 14 1741 brier_class binary 0.216 10 0.00241 Preprocessor1_Model5
## 15 1741 roc_auc binary 0.568 10 0.00792 Preprocessor1_Model5
collect_predictions(xgboost_tune) %>%
group_by(id)
## # A tibble: 28,030 × 8
## # Groups: id [10]
## .pred_class .pred_0 .pred_1 id .row trees ceo_dismissal .config
## <fct> <dbl> <dbl> <chr> <int> <int> <fct> <chr>
## 1 0 0.940 0.0598 Fold01 5 155 0 Preprocessor1_M…
## 2 0 0.898 0.102 Fold01 10 155 0 Preprocessor1_M…
## 3 0 0.975 0.0249 Fold01 47 155 0 Preprocessor1_M…
## 4 0 0.585 0.415 Fold01 48 155 0 Preprocessor1_M…
## 5 0 0.996 0.00422 Fold01 63 155 0 Preprocessor1_M…
## 6 0 0.932 0.0682 Fold01 64 155 0 Preprocessor1_M…
## 7 0 0.943 0.0568 Fold01 69 155 0 Preprocessor1_M…
## 8 0 0.927 0.0726 Fold01 78 155 0 Preprocessor1_M…
## 9 0 0.957 0.0433 Fold01 82 155 0 Preprocessor1_M…
## 10 0 0.749 0.251 Fold01 95 155 0 Preprocessor1_M…
## # ℹ 28,020 more rows
xgboost_last <- xgboost_workflow %>%
finalize_workflow(select_best(xgboost_tune, metric = "accuracy")) %>%
last_fit(data_split)
## Warning: package 'xgboost' was built under R version 4.3.3
collect_metrics(xgboost_last)
## # A tibble: 3 × 4
## .metric .estimator .estimate .config
## <chr> <chr> <dbl> <chr>
## 1 accuracy binary 0.764 Preprocessor1_Model1
## 2 roc_auc binary 0.578 Preprocessor1_Model1
## 3 brier_class binary 0.173 Preprocessor1_Model1
collect_predictions(xgboost_last) %>%
yardstick::conf_mat(ceo_dismissal, .pred_class)
## Truth
## Prediction 0 1
## 0 1397 339
## 1 102 32
library(vip)
## Warning: package 'vip' was built under R version 4.3.3
##
## Attaching package: 'vip'
## The following object is masked from 'package:utils':
##
## vi
xgboost_last %>%
workflows::extract_fit_engine() %>%
vip()
The previous model had an accuracy of 0.762 and the roc_auc was 0.577. - Applying step_normalize(all_numeric_predictors()) droped the accuracy to 0.682 but increased the roc_auc to 0.588. - Applying step_pca(all_numeric_predictors(), threshold = .99), along with the previous step, lowered the accuracy to 0.627 and roc_auc also dropped to 0.571