Goal: build a regression model to predict the prices at IKEA Clickhere for data.
ikea <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-11-03/ikea.csv')
## New names:
## Rows: 3694 Columns: 14
## ── Column specification
## ──────────────────────────────────────────────────────── Delimiter: "," chr
## (7): name, category, old_price, link, other_colors, short_description, d... dbl
## (6): ...1, item_id, price, depth, height, width lgl (1): sellable_online
## ℹ Use `spec()` to retrieve the full column specification for this data. ℹ
## Specify the column types or set `show_col_types = FALSE` to quiet this message.
## • `` -> `...1`
skimr::skim(ikea)
| Name | ikea |
| Number of rows | 3694 |
| Number of columns | 14 |
| _______________________ | |
| Column type frequency: | |
| character | 7 |
| logical | 1 |
| numeric | 6 |
| ________________________ | |
| Group variables | None |
Variable type: character
| skim_variable | n_missing | complete_rate | min | max | empty | n_unique | whitespace |
|---|---|---|---|---|---|---|---|
| name | 0 | 1 | 3 | 27 | 0 | 607 | 0 |
| category | 0 | 1 | 4 | 36 | 0 | 17 | 0 |
| old_price | 0 | 1 | 4 | 13 | 0 | 365 | 0 |
| link | 0 | 1 | 52 | 163 | 0 | 2962 | 0 |
| other_colors | 0 | 1 | 2 | 3 | 0 | 2 | 0 |
| short_description | 0 | 1 | 3 | 63 | 0 | 1706 | 0 |
| designer | 0 | 1 | 3 | 1261 | 0 | 381 | 0 |
Variable type: logical
| skim_variable | n_missing | complete_rate | mean | count |
|---|---|---|---|---|
| sellable_online | 0 | 1 | 0.99 | TRU: 3666, FAL: 28 |
Variable type: numeric
| skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
|---|---|---|---|---|---|---|---|---|---|---|
| …1 | 0 | 1.00 | 1846.50 | 1066.51 | 0 | 923.25 | 1846.5 | 2769.75 | 3693 | ▇▇▇▇▇ |
| item_id | 0 | 1.00 | 48632396.79 | 28887094.10 | 58487 | 20390574.00 | 49288078.0 | 70403572.75 | 99932615 | ▇▇▇▇▇ |
| price | 0 | 1.00 | 1078.21 | 1374.65 | 3 | 180.90 | 544.7 | 1429.50 | 9585 | ▇▁▁▁▁ |
| depth | 1463 | 0.60 | 54.38 | 29.96 | 1 | 38.00 | 47.0 | 60.00 | 257 | ▇▃▁▁▁ |
| height | 988 | 0.73 | 101.68 | 61.10 | 1 | 67.00 | 83.0 | 124.00 | 700 | ▇▂▁▁▁ |
| width | 589 | 0.84 | 104.47 | 71.13 | 1 | 60.00 | 80.0 | 140.00 | 420 | ▇▅▂▁▁ |
data <- ikea %>%
# Treat missing values
# select(-...1, -old_price, -depth, -link, -short_description, -designer) %>%
select(item_id, price, height, width, designer) %>%
na.omit() %>%
# Log transform variables with pos-skewed distribution
mutate(price = log(price)) %>%
mutate(across(where(is.logical), factor))
Identify good predictors
width
data %>%
ggplot(aes(price, width)) +
scale_y_log10() +
geom_point()
category
data %>%
ggplot(aes(price, as.factor(item_id))) +
geom_boxplot()
short_description
data %>%
# Tokenize title
unnest_tokens(output = word, input = short_description) %>%
# Calculate avg price per word
group_by(word) %>%
summarise(price = mean(price),
n = n()) %>%
ungroup() %>%
filter(n > 10, !str_detect(word, "\\d")) %>%
slice_max(order_by = price, n = 20) %>%
# Plot
ggplot(aes(price, fct_reorder(word, price))) +
geom_point() +
labs(y = "Words in Title")
EDA shortcut
# Step 1: Prepare data
data_binarized_tbl <- data %>%
select(-item_id) %>%
binarize()
data_binarized_tbl %>% glimpse()
## Rows: 2,591
## Columns: 30
## $ `price__-Inf_5.57972982598622` <dbl> 1, 0, 1, 1, 0, 1, 1, 1, 0, …
## $ price__5.57972982598622_6.45362499889269 <dbl> 0, 0, 0, 0, 1, 0, 0, 0, 1, …
## $ price__6.45362499889269_7.34277918933185 <dbl> 0, 1, 0, 0, 0, 0, 0, 0, 0, …
## $ price__7.34277918933185_Inf <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `height__-Inf_70` <dbl> 0, 0, 0, 1, 0, 0, 0, 0, 0, …
## $ height__70_83 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ height__83_127 <dbl> 1, 1, 1, 0, 1, 1, 1, 1, 1, …
## $ height__127_Inf <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `width__-Inf_60` <dbl> 1, 0, 1, 0, 1, 1, 1, 1, 1, …
## $ width__60_80 <dbl> 0, 1, 0, 1, 0, 0, 0, 0, 0, …
## $ width__80_150 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ width__150_Inf <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Carina_Bengs <dbl> 0, 0, 0, 0, 1, 0, 0, 0, 1, …
## $ designer__Ebba_Strandmark <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Ehlén_Johansson <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `designer__Ehlén_Johansson/IKEA_of_Sweden` <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Francis_Cayouette <dbl> 0, 1, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Henrik_Preutz <dbl> 0, 0, 1, 0, 0, 0, 0, 0, 0, …
## $ designer__IKEA_of_Sweden <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `designer__IKEA_of_Sweden/Ehlén_Johansson` <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `designer__IKEA_of_Sweden/Jon_Karlsson` <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Johan_Kroon <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Jon_Karlsson <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `designer__K_Hagberg/M_Hagberg` <dbl> 0, 0, 0, 0, 0, 1, 1, 1, 0, …
## $ designer__Marcus_Arvonen <dbl> 0, 0, 0, 1, 0, 0, 0, 0, 0, …
## $ designer__Nike_Karlsson <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Ola_Wihlborg <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Studio_Copenhagen <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ designer__Tord_Björklund <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, …
## $ `designer__-OTHER` <dbl> 1, 0, 0, 0, 0, 0, 0, 0, 0, …
# Step 2: Correlate
data_corr_tbl <- data_binarized_tbl %>%
correlate(price__7.34277918933185_Inf)
data_corr_tbl
## # A tibble: 30 × 3
## feature bin correlation
## <fct> <chr> <dbl>
## 1 price 7.34277918933185_Inf 1
## 2 width 150_Inf 0.546
## 3 width -Inf_60 -0.389
## 4 price -Inf_5.57972982598622 -0.334
## 5 price 6.45362499889269_7.34277918933185 -0.333
## 6 price 5.57972982598622_6.45362499889269 -0.332
## 7 height -Inf_70 -0.299
## 8 width 60_80 -0.215
## 9 designer Ehlén_Johansson/IKEA_of_Sweden 0.204
## 10 designer IKEA_of_Sweden/Ehlén_Johansson 0.190
## # ℹ 20 more rows
# Step 3: Plot
data_corr_tbl %>%
plot_correlation_funnel()
## Warning: ggrepel: 18 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
Split data
# Split into train and test dataset
set.seed(1234)
data_split <- rsample::initial_split(data)
data_train <- training(data_split)
data_test <- testing(data_split)
# Further split training dataset for cross-validation
set.seed(2345)
data_cv <- rsample::vfold_cv(data_train)
data_cv
## # 10-fold cross-validation
## # A tibble: 10 × 2
## splits id
## <list> <chr>
## 1 <split [1748/195]> Fold01
## 2 <split [1748/195]> Fold02
## 3 <split [1748/195]> Fold03
## 4 <split [1749/194]> Fold04
## 5 <split [1749/194]> Fold05
## 6 <split [1749/194]> Fold06
## 7 <split [1749/194]> Fold07
## 8 <split [1749/194]> Fold08
## 9 <split [1749/194]> Fold09
## 10 <split [1749/194]> Fold10
library(usemodels)
usemodels::use_xgboost(price ~ ., data = data_train)
## xgboost_recipe <-
## recipe(formula = price ~ ., data = data_train) %>%
## step_zv(all_predictors())
##
## xgboost_spec <-
## boost_tree(trees = tune(), min_n = tune(), tree_depth = tune(), learn_rate = tune(),
## loss_reduction = tune(), sample_size = tune()) %>%
## set_mode("classification") %>%
## set_engine("xgboost")
##
## xgboost_workflow <-
## workflow() %>%
## add_recipe(xgboost_recipe) %>%
## add_model(xgboost_spec)
##
## set.seed(42752)
## xgboost_tune <-
## tune_grid(xgboost_workflow, resamples = stop("add your rsample object"), grid = stop("add number of candidate points"))
# Specify recipe
xgboost_recipe <-
recipe(formula = price ~ ., data = data_train) %>%
recipes::update_role(item_id, new_role = "ID") %>%
step_other(designer) %>%
step_dummy(designer,one_hot = TRUE) %>%
step_log(width, height)
xgboost_recipe %>% prep() %>% juice() %>% glimpse()
## Rows: 1,943
## Columns: 6
## $ item_id <dbl> 39251926, 59248867, 30346986, 40299885, 893239…
## $ height <dbl> 4.499810, 4.094345, 3.688879, 4.158883, 4.3174…
## $ width <dbl> 4.584967, 4.941642, 3.091042, 4.787492, 4.2484…
## $ price <dbl> 7.106606, 6.363028, 3.912023, 6.163315, 6.1092…
## $ designer_IKEA.of.Sweden <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1…
## $ designer_other <dbl> 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0…
# Specify model
xgboost_spec <-
boost_tree(trees = tune(), min_n = tune(), mtry = tune(), learn_rate = tune()) %>%
set_mode("regression") %>%
set_engine("xgboost")
# Combine recipe and model using workflow
xgboost_workflow <-
workflow() %>%
add_recipe(xgboost_recipe) %>%
add_model(xgboost_spec)
# Tune hyperparameters
set.seed(344)
xgboost_tune <-
tune_grid(xgboost_workflow,
resamples = data_cv,
grid = 5)
## i Creating pre-processing data to finalize unknown parameter: mtry
## Warning: package 'xgboost' was built under R version 4.3.3
tune::show_best(xgboost_tune, metric = "rmse")
## # A tibble: 5 × 10
## mtry trees min_n learn_rate .metric .estimator mean n std_err .config
## <int> <int> <int> <dbl> <chr> <chr> <dbl> <int> <dbl> <chr>
## 1 3 1524 23 0.0836 rmse standard 0.551 10 0.0128 Preproces…
## 2 3 768 12 0.112 rmse standard 0.553 10 0.0141 Preproces…
## 3 1 1613 36 0.0290 rmse standard 0.613 10 0.0130 Preproces…
## 4 2 1104 28 0.00484 rmse standard 0.632 10 0.0113 Preproces…
## 5 4 162 7 0.00108 rmse standard 5.09 10 0.0266 Preproces…
# Update the model by selecting the best hyperparameters.
xgboost_fw <- tune::finalize_workflow(xgboost_workflow,
tune::select_best(xgboost_tune, metric = "rmse"))
# Fit the model on the entire training data and test it on the test data.
data_fit <- tune::last_fit(xgboost_fw, data_split)
tune::collect_metrics(data_fit)
## # A tibble: 2 × 4
## .metric .estimator .estimate .config
## <chr> <chr> <dbl> <chr>
## 1 rmse standard 0.565 Preprocessor1_Model1
## 2 rsq standard 0.808 Preprocessor1_Model1
tune::collect_predictions(data_fit) %>%
ggplot(aes(price, .pred)) +
geom_point(alpha = 0.3, fill = "midnightblue") +
geom_abline(lty = 2, color ="gray50") +
coord_fixed()