library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.4     ✔ tidyr     1.3.1
## ✔ purrr     1.0.4     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
attrition_raw_tbl <- read_csv("../00_data/WA_Fn-UseC_-HR-Employee-Attrition.csv")
## Rows: 1470 Columns: 35
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr  (9): Attrition, BusinessTravel, Department, EducationField, Gender, Job...
## dbl (26): Age, DailyRate, DistanceFromHome, Education, EmployeeCount, Employ...
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
# If data is not sensitive:
attrition_raw_tbl %>% glimpse()
## Rows: 1,470
## Columns: 35
## $ Age                      <dbl> 41, 49, 37, 33, 27, 32, 59, 30, 38, 36, 35, 2…
## $ Attrition                <chr> "Yes", "No", "Yes", "No", "No", "No", "No", "…
## $ BusinessTravel           <chr> "Travel_Rarely", "Travel_Frequently", "Travel…
## $ DailyRate                <dbl> 1102, 279, 1373, 1392, 591, 1005, 1324, 1358,…
## $ Department               <chr> "Sales", "Research & Development", "Research …
## $ DistanceFromHome         <dbl> 1, 8, 2, 3, 2, 2, 3, 24, 23, 27, 16, 15, 26, …
## $ Education                <dbl> 2, 1, 2, 4, 1, 2, 3, 1, 3, 3, 3, 2, 1, 2, 3, …
## $ EducationField           <chr> "Life Sciences", "Life Sciences", "Other", "L…
## $ EmployeeCount            <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, …
## $ EmployeeNumber           <dbl> 1, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14, 15, 16,…
## $ EnvironmentSatisfaction  <dbl> 2, 3, 4, 4, 1, 4, 3, 4, 4, 3, 1, 4, 1, 2, 3, …
## $ Gender                   <chr> "Female", "Male", "Male", "Female", "Male", "…
## $ HourlyRate               <dbl> 94, 61, 92, 56, 40, 79, 81, 67, 44, 94, 84, 4…
## $ JobInvolvement           <dbl> 3, 2, 2, 3, 3, 3, 4, 3, 2, 3, 4, 2, 3, 3, 2, …
## $ JobLevel                 <dbl> 2, 2, 1, 1, 1, 1, 1, 1, 3, 2, 1, 2, 1, 1, 1, …
## $ JobRole                  <chr> "Sales Executive", "Research Scientist", "Lab…
## $ JobSatisfaction          <dbl> 4, 2, 3, 3, 2, 4, 1, 3, 3, 3, 2, 3, 3, 4, 3, …
## $ MaritalStatus            <chr> "Single", "Married", "Single", "Married", "Ma…
## $ MonthlyIncome            <dbl> 5993, 5130, 2090, 2909, 3468, 3068, 2670, 269…
## $ MonthlyRate              <dbl> 19479, 24907, 2396, 23159, 16632, 11864, 9964…
## $ NumCompaniesWorked       <dbl> 8, 1, 6, 1, 9, 0, 4, 1, 0, 6, 0, 0, 1, 0, 5, …
## $ Over18                   <chr> "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", …
## $ OverTime                 <chr> "Yes", "No", "Yes", "Yes", "No", "No", "Yes",…
## $ PercentSalaryHike        <dbl> 11, 23, 15, 11, 12, 13, 20, 22, 21, 13, 13, 1…
## $ PerformanceRating        <dbl> 3, 4, 3, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 3, 3, …
## $ RelationshipSatisfaction <dbl> 1, 4, 2, 3, 4, 3, 1, 2, 2, 2, 3, 4, 4, 3, 2, …
## $ StandardHours            <dbl> 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 8…
## $ StockOptionLevel         <dbl> 0, 1, 0, 0, 1, 0, 3, 1, 0, 2, 1, 0, 1, 1, 0, …
## $ TotalWorkingYears        <dbl> 8, 10, 7, 8, 6, 8, 12, 1, 10, 17, 6, 10, 5, 3…
## $ TrainingTimesLastYear    <dbl> 0, 3, 3, 3, 3, 2, 3, 2, 2, 3, 5, 3, 1, 2, 4, …
## $ WorkLifeBalance          <dbl> 1, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3, 2, 3, 3, …
## $ YearsAtCompany           <dbl> 6, 10, 0, 8, 2, 7, 1, 1, 9, 7, 5, 9, 5, 2, 4,…
## $ YearsInCurrentRole       <dbl> 4, 7, 0, 7, 2, 7, 0, 0, 7, 7, 4, 5, 2, 2, 2, …
## $ YearsSinceLastPromotion  <dbl> 0, 1, 0, 3, 2, 3, 0, 0, 1, 7, 0, 0, 4, 1, 0, …
## $ YearsWithCurrManager     <dbl> 5, 7, 0, 0, 2, 6, 0, 0, 8, 7, 3, 8, 3, 2, 3, …

Prompt 1

The goal is to help predict attrition for employees.

Please write R code to create a predictive model that predicts the probability of attrition.

# Load libraries
library(tidymodels)
## ── Attaching packages ────────────────────────────────────── tidymodels 1.2.0 ──
## ✔ broom        1.0.7     ✔ rsample      1.2.1
## ✔ dials        1.4.0     ✔ tune         1.2.1
## ✔ infer        1.0.7     ✔ workflows    1.1.4
## ✔ modeldata    1.4.0     ✔ workflowsets 1.1.0
## ✔ parsnip      1.3.0     ✔ yardstick    1.3.2
## ✔ recipes      1.1.1
## ── Conflicts ───────────────────────────────────────── tidymodels_conflicts() ──
## ✖ scales::discard() masks purrr::discard()
## ✖ dplyr::filter()   masks stats::filter()
## ✖ recipes::fixed()  masks stringr::fixed()
## ✖ dplyr::lag()      masks stats::lag()
## ✖ yardstick::spec() masks readr::spec()
## ✖ recipes::step()   masks stats::step()
## • Learn how to get started at https://www.tidymodels.org/start/
library(dplyr)
library(janitor)
## Warning: package 'janitor' was built under R version 4.4.3
## 
## Attaching package: 'janitor'
## The following objects are masked from 'package:stats':
## 
##     chisq.test, fisher.test
# Step 1: Clean and prepare data
attrition_tbl <- attrition_raw_tbl %>%
  clean_names() %>%
  mutate(attrition = factor(attrition, levels = c("No", "Yes")))

# Step 2: Split data
set.seed(123)
attrition_split <- initial_split(attrition_tbl, prop = 0.8, strata = attrition)
attrition_train <- training(attrition_split)
attrition_test  <- testing(attrition_split)

# Step 3: Preprocessing recipe
attrition_recipe <- recipe(attrition ~ ., data = attrition_train) %>%
  update_role(employee_number, new_role = "ID") %>%
  step_rm(employee_count, over18, standard_hours) %>%
  step_zv(all_predictors()) %>%
  step_dummy(all_nominal_predictors()) %>%
  step_normalize(all_numeric_predictors())

# Step 4: Define logistic regression model
logistic_model <- logistic_reg(mode = "classification", engine = "glm")

# Step 5: Create workflow
attrition_wf <- workflow() %>%
  add_model(logistic_model) %>%
  add_recipe(attrition_recipe)

# Step 6: Fit model
attrition_fit <- attrition_wf %>%
  fit(data = attrition_train)

# Step 7: Make predictions on test data
attrition_preds <- predict(attrition_fit, new_data = attrition_test, type = "prob") %>%
  bind_cols(attrition_test %>% select(attrition)) %>%
  bind_cols(predict(attrition_fit, new_data = attrition_test, type = "class"))

# Step 8: Evaluate performance
metrics(attrition_preds, truth = attrition, estimate = .pred_class)
## # A tibble: 2 × 3
##   .metric  .estimator .estimate
##   <chr>    <chr>          <dbl>
## 1 accuracy binary         0.871
## 2 kap      binary         0.464
roc_auc(attrition_preds, truth = attrition, .pred_Yes)
## # A tibble: 1 × 3
##   .metric .estimator .estimate
##   <chr>   <chr>          <dbl>
## 1 roc_auc binary         0.112

Prompt 2:

Please update the code to use tidymodels instead of caret and to use the h2o model instead of glmnet.

# Load libraries
library(tidymodels)
library(h2o)
## Warning: package 'h2o' was built under R version 4.4.3
## 
## ----------------------------------------------------------------------
## 
## Your next step is to start H2O:
##     > h2o.init()
## 
## For H2O package documentation, ask for help:
##     > ??h2o
## 
## After starting H2O, you can use the Web UI at http://localhost:54321
## For more information visit https://docs.h2o.ai
## 
## ----------------------------------------------------------------------
## 
## Attaching package: 'h2o'
## The following objects are masked from 'package:lubridate':
## 
##     day, hour, month, week, year
## The following objects are masked from 'package:stats':
## 
##     cor, sd, var
## The following objects are masked from 'package:base':
## 
##     %*%, %in%, &&, ||, apply, as.factor, as.numeric, colnames,
##     colnames<-, ifelse, is.character, is.factor, is.numeric, log,
##     log10, log1p, log2, round, signif, trunc
library(janitor)
library(dplyr)

# Initialize H2O
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         47 minutes 10 seconds 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    1 year, 4 months and 10 days 
##     H2O cluster name:           H2O_started_from_R_sheac_ggl822 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   1.42 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.4.2 (2024-10-31 ucrt)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (1 year, 4 months and 10 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
# Step 1: Read data
attrition_raw_tbl <- readr::read_csv("C:/Users/sheac/OneDrive/Documents/PSU_DAT3100/00_data/WA_Fn-UseC_-HR-Employee-Attrition.csv")
## Rows: 1470 Columns: 35
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr  (9): Attrition, BusinessTravel, Department, EducationField, Gender, Job...
## dbl (26): Age, DailyRate, DistanceFromHome, Education, EmployeeCount, Employ...
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
# Step 2: Clean and prepare
attrition_tbl <- attrition_raw_tbl %>%
  clean_names() %>%
  mutate(attrition = factor(attrition, levels = c("No", "Yes")))

# Step 3: Split data
set.seed(123)
attrition_split <- initial_split(attrition_tbl, prop = 0.8, strata = attrition)
attrition_train <- training(attrition_split)
attrition_test  <- testing(attrition_split)

# Step 4: Recipe
attrition_recipe <- recipe(attrition ~ ., data = attrition_train) %>%
  update_role(employee_number, new_role = "ID") %>%
  step_rm(employee_count, over18, standard_hours) %>%
  step_zv(all_predictors()) %>%
  step_dummy(all_nominal_predictors()) %>%
  step_normalize(all_numeric_predictors())

# Step 5: Prep and juice the data
prepped_recipe <- prep(attrition_recipe)
train_juiced   <- juice(prepped_recipe)
test_baked     <- bake(prepped_recipe, new_data = attrition_test)

# Step 6: Convert to H2O frame
train_h2o <- as.h2o(train_juiced)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
test_h2o  <- as.h2o(test_baked)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
# Step 7: H2O model training (AutoML or GBM as example)
automl_model <- h2o.automl(
  x = setdiff(names(train_h2o), c("attrition", "employee_number")),
  y = "attrition",
  training_frame = train_h2o,
  max_runtime_secs = 60,
  balance_classes = TRUE,
  seed = 123
)
##   |                                                                              |                                                                      |   0%  |                                                                              |==                                                                    |   2%
## 21:21:06.814: AutoML: XGBoost is not available; skipping it.  |                                                                              |======                                                                |   8%  |                                                                              |=========                                                             |  12%  |                                                                              |==============                                                        |  20%  |                                                                              |====================                                                  |  28%  |                                                                              |=====================                                                 |  30%
## 21:21:24.23: GBM_grid_1_AutoML_6_20250430_212106 [GBM Grid Search] failed: java.lang.AssertionError: Coldata -0.759627153880504 out of range hourly_rate:-1.69101314961767-1.5933479932449601 step=0.16421805714313148 nbins=20 actNBins=20 isInt=0
## cnt=3.000000, [-1.691013 - -1.526795], mean/var=  0.10/  0.00,
## cnt=1.000000, [-1.526795 - -1.362577], mean/var=  0.08/  0.00,
## cnt=0.000000, [-1.362577 - -1.198359], mean/var=  0.00/  0.00,
## cnt=1.000000, [-1.198359 - -1.034141], mean/var= -0.24/  0.00,
## cnt=1.000000, [-1.034141 - -0.869923], mean/var=  0.07/  0.00,
## cnt=0.000000, [-0.869923 - -0.705705], mean/var=  0.00/  0.00,
## cnt=0.000000, [-0.705705 - -0.541487], mean/var=  0.00/  0.00,
## cnt=0.000000, [-0.541487 - -0.377269], mean/var=  0.00/  0.00,
## cnt=0.000000, [-0.377269 - -0.213051], mean/var=  0.00/  0.00,
## cnt=4.000000, [-0.213051 - -0.048833], mean/var=  0.13/  0.00,
## cnt=3.000000, [-0.048833 - 0.115385], mean/var=  0.10/  0.00,
## cnt=3.000000, [0.115385 - 0.279604], mean/var=  0.07/  0.00,
## cnt=0.000000, [0.279604 - 0.443822], mean/var=  0.00/  0.00,
## cnt=0.000000, [0.443822 - 0.608040], mean/var=  0.00/  0.00,
## cnt=3.000000, [0.608040 - 0.772258], mean/var=  0.08/  0.00,
## cnt=3.000000, [0.772258 - 0.936476], mean/var=  0.07/  0.00,
## cnt=0.000000, [0.936476 - 1.100694], mean/var=  0.00/  0.00,
## cnt=0.000000, [1.100694 - 1.264912], mean/var=  0.00/  0.00,
## cnt=0.000000, [1.264912 - 1.429130], mean/var=  0.00/  0.00,
## cnt=2.000000, [1.429130 - 1.593348], mean/var=  0.08/  0.00,
##   |                                                                              |==============================                                        |  43%  |                                                                              |====================================                                  |  51%  |                                                                              |=======================================                               |  56%  |                                                                              |==========================================                            |  60%
## 21:21:42.80: Model `GBM_grid_1_AutoML_6_20250430_212106_model_1` has unexpectedly been deleted from H2O: ignoring the model and/or removing it from the leaderboard.
## 21:21:42.80: Model `GBM_grid_1_AutoML_6_20250430_212106_model_3` has unexpectedly been deleted from H2O: ignoring the model and/or removing it from the leaderboard.
## 21:21:42.80: Model `GBM_grid_1_AutoML_6_20250430_212106_model_2` has unexpectedly been deleted from H2O: ignoring the model and/or removing it from the leaderboard.  |                                                                              |================================================                      |  69%  |                                                                              |=====================================================                 |  75%  |                                                                              |========================================================              |  80%  |                                                                              |=============================================================         |  87%  |                                                                              |================================================================      |  92%  |                                                                              |====================================================================  |  98%  |                                                                              |======================================================================| 100%
# Step 8: Make predictions
preds <- h2o.predict(automl_model@leader, test_h2o)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
preds_df <- as.data.frame(preds)

# Step 9: Combine with actuals
results <- bind_cols(
  attrition = test_baked$attrition,
  predicted_class = preds_df$predict,
  prob_no = preds_df$No,
  prob_yes = preds_df$Yes
)

# Accuracy and other metrics
results_metrics <- metrics(results, truth = attrition, estimate = predicted_class)

# AUC
auc_value <- roc_auc(results, truth = attrition, prob_yes)

# Print
print(results_metrics)
## # A tibble: 2 × 3
##   .metric  .estimator .estimate
##   <chr>    <chr>          <dbl>
## 1 accuracy binary         0.861
## 2 kap      binary         0.547
print(auc_value)
## # A tibble: 1 × 3
##   .metric .estimator .estimate
##   <chr>   <chr>          <dbl>
## 1 roc_auc binary        0.0973

Prompt 3:

Error in .h2o.doSafeREST(h2oRestApiVersion = h2oRestApiVersion, urlSuffix = page, :

# 1. Confirm H2O is running
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         52 minutes 40 seconds 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    1 year, 4 months and 10 days 
##     H2O cluster name:           H2O_started_from_R_sheac_ggl822 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   1.74 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.4.2 (2024-10-31 ucrt)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (1 year, 4 months and 10 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
# 2. Check Java version
system("java -version")
## [1] 0
# 3. Inspect your H2O frame
h2o.describe(train_h2o)
##                                Label Type Missing Zeros PosInf NegInf
## 1                                age real       0     0      0      0
## 2                         daily_rate real       0     0      0      0
## 3                 distance_from_home real       0     0      0      0
## 4                          education real       0     0      0      0
## 5                    employee_number  int       0     0      0      0
## 6           environment_satisfaction real       0     0      0      0
## 7                        hourly_rate real       0     0      0      0
## 8                    job_involvement real       0     0      0      0
## 9                          job_level real       0     0      0      0
## 10                  job_satisfaction real       0     0      0      0
## 11                    monthly_income real       0     0      0      0
## 12                      monthly_rate real       0     0      0      0
## 13              num_companies_worked real       0     0      0      0
## 14               percent_salary_hike real       0     0      0      0
## 15                performance_rating real       0     0      0      0
## 16         relationship_satisfaction real       0     0      0      0
## 17                stock_option_level real       0     0      0      0
## 18               total_working_years real       0     0      0      0
## 19          training_times_last_year real       0     0      0      0
## 20                 work_life_balance real       0     0      0      0
## 21                  years_at_company real       0     0      0      0
## 22             years_in_current_role real       0     0      0      0
## 23        years_since_last_promotion real       0     0      0      0
## 24           years_with_curr_manager real       0     0      0      0
## 25                         attrition enum       0   986      0      0
## 26 business_travel_Travel_Frequently real       0     0      0      0
## 27     business_travel_Travel_Rarely real       0     0      0      0
## 28 department_Research...Development real       0     0      0      0
## 29                  department_Sales real       0     0      0      0
## 30     education_field_Life.Sciences real       0     0      0      0
## 31         education_field_Marketing real       0     0      0      0
## 32           education_field_Medical real       0     0      0      0
## 33             education_field_Other real       0     0      0      0
## 34  education_field_Technical.Degree real       0     0      0      0
## 35                       gender_Male real       0     0      0      0
## 36          job_role_Human.Resources real       0     0      0      0
## 37    job_role_Laboratory.Technician real       0     0      0      0
## 38                  job_role_Manager real       0     0      0      0
## 39   job_role_Manufacturing.Director real       0     0      0      0
## 40        job_role_Research.Director real       0     0      0      0
## 41       job_role_Research.Scientist real       0     0      0      0
## 42          job_role_Sales.Executive real       0     0      0      0
## 43     job_role_Sales.Representative real       0     0      0      0
## 44            marital_status_Married real       0     0      0      0
## 45             marital_status_Single real       0     0      0      0
## 46                     over_time_Yes real       0     0      0      0
##           Min          Max          Mean     Sigma Cardinality
## 1  -2.0850036    2.5710861 -2.558717e-17   1.00000          NA
## 2  -1.7378056    1.7296501  9.662952e-17   1.00000          NA
## 3  -1.0108389    2.4715276 -6.084543e-16   1.00000          NA
## 4  -1.8685813    2.0468588 -5.117434e-16   1.00000          NA
## 5   1.0000000 2068.0000000  1.023358e+03 605.62206          NA
## 6  -1.6089143    1.1482189  7.977560e-16   1.00000          NA
## 7  -1.7400335    1.6913886 -2.435118e-16   1.00000          NA
## 8  -2.4311929    1.7904598 -7.620315e-16   1.00000          NA
## 9  -0.9574690    2.6775103  3.563298e-16   1.00000          NA
## 10 -1.5787506    1.1532553 -6.608483e-16   1.00000          NA
## 11 -1.1693596    2.8802210 -4.384920e-17   1.00000          NA
## 12 -1.7184862    1.7900028  6.650056e-18   1.00000          NA
## 13 -1.0721293    2.5374750  1.973248e-17   1.00000          NA
## 14 -1.1512279    2.6461852 -1.139496e-15   1.00000          NA
## 15 -0.4362500    2.2903126 -7.685909e-16   1.00000          NA
## 16 -1.5782605    1.1923413  1.031727e-15   1.00000          NA
## 17 -0.9382129    2.5763466  2.818926e-17   1.00000          NA
## 18 -1.4425561    3.6945964 -6.093487e-16   1.00000          NA
## 19 -2.1663889    2.4815240  4.068198e-16   1.00000          NA
## 20 -2.5052487    1.7445115  1.880007e-16   1.00000          NA
## 21 -1.1471747    5.4305013 -1.369889e-16   1.00000          NA
## 22 -1.1785978    3.8480898 -1.099652e-15   1.00000          NA
## 23 -0.6897883    4.0315780  1.075529e-16   1.00000          NA
## 24 -1.1631352    3.6421647 -5.448116e-16   1.00000          NA
## 25  0.0000000    1.0000000  1.608511e-01   0.36755           2
## 26 -0.4730409    2.1121828  8.918647e-16   1.00000          NA
## 27 -1.5961450    0.6259763  1.127137e-15   1.00000          NA
## 28 -1.3756732    0.7262982 -1.172456e-15   1.00000          NA
## 29 -0.6523867    1.5315288  4.547144e-16   1.00000          NA
## 30 -0.8365632    1.1943496 -1.012645e-15   1.00000          NA
## 31 -0.3433402    2.9100847 -1.088539e-16   1.00000          NA
## 32 -0.6749976    1.4802258  9.384854e-16   1.00000          NA
## 33 -0.2572680    3.8836893  2.016074e-16   1.00000          NA
## 34 -0.3081877    3.2420140  2.321819e-16   1.00000          NA
## 35 -1.2177359    0.8204972  2.146395e-15   1.00000          NA
## 36 -0.2017656    4.9520289 -4.867797e-16   1.00000          NA
## 37 -0.4676466    2.1365468 -3.416321e-16   1.00000          NA
## 38 -0.2647033    3.7745999  4.653396e-16   1.00000          NA
## 39 -0.3324032    3.0058342  5.721877e-16   1.00000          NA
## 40 -0.2458004    4.0648787  6.936997e-16   1.00000          NA
## 41 -0.4917986    2.0316222 -1.097375e-15   1.00000          NA
## 42 -0.5341488    1.8705441 -1.298007e-15   1.00000          NA
## 43 -0.2379211    4.1994966  1.163078e-16   1.00000          NA
## 44 -0.9312925    1.0728626  7.286923e-16   1.00000          NA
## 45 -0.6763333    1.4773026  5.431853e-16   1.00000          NA
## 46 -0.6312449    1.5828230  3.150692e-16   1.00000          NA

Prompt 4:

Please update the code to use h2o.performance in Step 5, instead of mean.

# Load libraries
library(tidymodels)
library(h2o)
library(janitor)
library(dplyr)

# Initialize H2O
h2o.init()
##  Connection successful!
## 
## R is connected to the H2O cluster: 
##     H2O cluster uptime:         52 minutes 44 seconds 
##     H2O cluster timezone:       America/New_York 
##     H2O data parsing timezone:  UTC 
##     H2O cluster version:        3.44.0.3 
##     H2O cluster version age:    1 year, 4 months and 10 days 
##     H2O cluster name:           H2O_started_from_R_sheac_ggl822 
##     H2O cluster total nodes:    1 
##     H2O cluster total memory:   1.74 GB 
##     H2O cluster total cores:    8 
##     H2O cluster allowed cores:  8 
##     H2O cluster healthy:        TRUE 
##     H2O Connection ip:          localhost 
##     H2O Connection port:        54321 
##     H2O Connection proxy:       NA 
##     H2O Internal Security:      FALSE 
##     R Version:                  R version 4.4.2 (2024-10-31 ucrt)
## Warning in h2o.clusterInfo(): 
## Your H2O cluster version is (1 year, 4 months and 10 days) old. There may be a newer version available.
## Please download and install the latest version from: https://h2o-release.s3.amazonaws.com/h2o/latest_stable.html
# Step 1: Read and prepare data
attrition_raw_tbl <- readr::read_csv("C:/Users/sheac/OneDrive/Documents/PSU_DAT3100/00_data/WA_Fn-UseC_-HR-Employee-Attrition.csv")
## Rows: 1470 Columns: 35
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr  (9): Attrition, BusinessTravel, Department, EducationField, Gender, Job...
## dbl (26): Age, DailyRate, DistanceFromHome, Education, EmployeeCount, Employ...
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
attrition_tbl <- attrition_raw_tbl %>%
  clean_names() %>%
  mutate(attrition = factor(attrition, levels = c("No", "Yes")))

# Step 2: Split data
set.seed(123)
attrition_split <- initial_split(attrition_tbl, prop = 0.8, strata = attrition)
attrition_train <- training(attrition_split)
attrition_test  <- testing(attrition_split)

# Step 3: Preprocessing with recipe
attrition_recipe <- recipe(attrition ~ ., data = attrition_train) %>%
  update_role(employee_number, new_role = "ID") %>%
  step_rm(employee_count, over18, standard_hours) %>%
  step_zv(all_predictors()) %>%
  step_dummy(all_nominal_predictors()) %>%
  step_normalize(all_numeric_predictors())

# Step 4: Prepare data for H2O
prepped_recipe <- prep(attrition_recipe)
train_juiced   <- juice(prepped_recipe)
test_baked     <- bake(prepped_recipe, new_data = attrition_test)

train_h2o <- as.h2o(train_juiced)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
test_h2o  <- as.h2o(test_baked)
##   |                                                                              |                                                                      |   0%  |                                                                              |======================================================================| 100%
# Set x and y
x <- setdiff(names(train_h2o), c("attrition", "employee_number"))
y <- "attrition"

# Step 5: Train model with H2O AutoML
automl_model <- h2o.automl(
  x = x,
  y = y,
  training_frame = train_h2o,
  max_runtime_secs = 60,
  balance_classes = TRUE,
  seed = 123
)
##   |                                                                              |                                                                      |   0%  |                                                                              |==                                                                    |   2%
## 21:26:44.113: AutoML: XGBoost is not available; skipping it.  |                                                                              |======                                                                |   8%  |                                                                              |=========                                                             |  12%  |                                                                              |==============                                                        |  19%  |                                                                              |====================                                                  |  28%  |                                                                              |====================                                                  |  29%  |                                                                              |=====================                                                 |  31%  |                                                                              |========================                                              |  35%  |                                                                              |============================                                          |  40%  |                                                                              |===============================                                       |  44%  |                                                                              |==================================                                    |  49%  |                                                                              |======================================                                |  54%  |                                                                              |==========================================                            |  59%  |                                                                              |=============================================                         |  64%  |                                                                              |===================================================                   |  73%  |                                                                              |========================================================              |  80%  |                                                                              |=============================================================         |  87%  |                                                                              |=================================================================     |  92%  |                                                                              |====================================================================  |  98%  |                                                                              |======================================================================| 100%
# Step 6: Evaluate performance using h2o.performance
perf <- h2o.performance(automl_model@leader, newdata = test_h2o)

# Print performance metrics
print(perf)
## H2OBinomialMetrics: stackedensemble
## 
## MSE:  0.0824071
## RMSE:  0.2870664
## LogLoss:  0.2760028
## Mean Per-Class Error:  0.1698296
## AUC:  0.9042679
## AUCPR:  0.7041058
## Gini:  0.8085358
## 
## Confusion Matrix (vertical: actual; across: predicted) for F1-optimal threshold:
##         No Yes    Error     Rate
## No     230  17 0.068826  =17/247
## Yes     13  35 0.270833   =13/48
## Totals 243  52 0.101695  =30/295
## 
## Maximum Metrics: Maximum metrics at their respective thresholds
##                         metric threshold      value idx
## 1                       max f1  0.339026   0.700000  51
## 2                       max f2  0.177814   0.742049  90
## 3                 max f0point5  0.444626   0.706522  33
## 4                 max accuracy  0.444626   0.898305  33
## 5                max precision  0.952680   1.000000   0
## 6                   max recall  0.038098   1.000000 212
## 7              max specificity  0.952680   1.000000   0
## 8             max absolute_mcc  0.339026   0.639635  51
## 9   max min_per_class_accuracy  0.230490   0.833333  78
## 10 max mean_per_class_accuracy  0.177814   0.838310  90
## 11                     max tns  0.952680 247.000000   0
## 12                     max fns  0.952680  47.000000   0
## 13                     max fps  0.000741 247.000000 294
## 14                     max tps  0.038098  48.000000 212
## 15                     max tnr  0.952680   1.000000   0
## 16                     max fnr  0.952680   0.979167   0
## 17                     max fpr  0.000741   1.000000 294
## 18                     max tpr  0.038098   1.000000 212
## 
## Gains/Lift Table: Extract with `h2o.gainsLift(<model>, <data>)` or `h2o.gainsLift(<model>, valid=<T/F>, xval=<T/F>)`
h2o.auc(perf)
## [1] 0.9042679
h2o.confusionMatrix(perf)
## Confusion Matrix (vertical: actual; across: predicted)  for max f1 @ threshold = 0.339025778684586:
##         No Yes    Error     Rate
## No     230  17 0.068826  =17/247
## Yes     13  35 0.270833   =13/48
## Totals 243  52 0.101695  =30/295
h2o.logloss(perf)
## [1] 0.2760028