Goal: Split our data and build a model Click [here for the data] https://github.com/rfordatascience/tidytuesday/tree/master/data/2022/2022-11-01
horror_movies <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-11-01/horror_movies.csv')
skimr:: skim(horror_movies)
| Name | horror_movies |
| Number of rows | 32540 |
| Number of columns | 20 |
| _______________________ | |
| Column type frequency: | |
| character | 10 |
| Date | 1 |
| logical | 1 |
| numeric | 8 |
| ________________________ | |
| Group variables | None |
Variable type: character
| skim_variable | n_missing | complete_rate | min | max | empty | n_unique | whitespace |
|---|---|---|---|---|---|---|---|
| original_title | 0 | 1.00 | 1 | 191 | 0 | 30296 | 0 |
| title | 0 | 1.00 | 1 | 191 | 0 | 29563 | 0 |
| original_language | 0 | 1.00 | 2 | 2 | 0 | 97 | 0 |
| overview | 1286 | 0.96 | 1 | 1000 | 0 | 31020 | 0 |
| tagline | 19835 | 0.39 | 1 | 237 | 0 | 12513 | 0 |
| poster_path | 4474 | 0.86 | 30 | 32 | 0 | 28048 | 0 |
| status | 0 | 1.00 | 7 | 15 | 0 | 4 | 0 |
| backdrop_path | 18995 | 0.42 | 29 | 32 | 0 | 13536 | 0 |
| genre_names | 0 | 1.00 | 6 | 144 | 0 | 772 | 0 |
| collection_name | 30234 | 0.07 | 4 | 56 | 0 | 815 | 0 |
Variable type: Date
| skim_variable | n_missing | complete_rate | min | max | median | n_unique |
|---|---|---|---|---|---|---|
| release_date | 0 | 1 | 1950-01-01 | 2022-12-31 | 2012-12-09 | 10999 |
Variable type: logical
| skim_variable | n_missing | complete_rate | mean | count |
|---|---|---|---|---|
| adult | 0 | 1 | 0 | FAL: 32540 |
Variable type: numeric
| skim_variable | n_missing | complete_rate | mean | sd | p0 | p25 | p50 | p75 | p100 | hist |
|---|---|---|---|---|---|---|---|---|---|---|
| id | 0 | 1.00 | 445910.83 | 305744.67 | 17 | 146494.8 | 426521.00 | 707534.00 | 1033095.00 | ▇▆▆▅▅ |
| popularity | 0 | 1.00 | 4.01 | 37.51 | 0 | 0.6 | 0.84 | 2.24 | 5088.58 | ▇▁▁▁▁ |
| vote_count | 0 | 1.00 | 62.69 | 420.89 | 0 | 0.0 | 2.00 | 11.00 | 16900.00 | ▇▁▁▁▁ |
| vote_average | 0 | 1.00 | 3.34 | 2.88 | 0 | 0.0 | 4.00 | 5.70 | 10.00 | ▇▂▆▃▁ |
| budget | 0 | 1.00 | 543126.59 | 4542667.81 | 0 | 0.0 | 0.00 | 0.00 | 200000000.00 | ▇▁▁▁▁ |
| revenue | 0 | 1.00 | 1349746.73 | 14430479.15 | 0 | 0.0 | 0.00 | 0.00 | 701842551.00 | ▇▁▁▁▁ |
| runtime | 0 | 1.00 | 62.14 | 41.00 | 0 | 14.0 | 80.00 | 91.00 | 683.00 | ▇▁▁▁▁ |
| collection | 30234 | 0.07 | 481534.88 | 324498.16 | 656 | 155421.0 | 471259.00 | 759067.25 | 1033032.00 | ▇▅▅▅▅ |
data <- horror_movies %>%
# Log transform vote_average
mutate(vote_average = log1p(vote_average)) %>% # for zeroes: log1p(x) is the same as log(x+1)
# Treat multiple categories in genre_names
separate_rows(genre_names, sep = ", ") %>%
filter(status == "Released") %>%
select(id, vote_average, genre_names, overview, runtime) %>%
na.omit()
data %>%
ggplot(aes(runtime, vote_average)) +
geom_point()
data %>%
group_by(runtime, vote_average) %>%
summarise(mean_group = mean(vote_average)) -> data2
data2 %>%
ggplot(aes(x= runtime, y= mean_group,
color= runtime, shape= vote_average,
group = runtime,
label = round(mean_group, 2))) +
scale_shape_binned() +
geom_point()
data_binarized_tbl <- data %>%
select(-overview, -genre_names) %>%
binarize()
data_binarized_tbl %>% glimpse()
## Rows: 62,252
## Columns: 11
## $ `id__-Inf_105927` <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ id__105927_387814 <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ id__387814_654747 <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ id__654747_Inf <dbl> 1, 1, 1, 1, 1, 1, 1, 1…
## $ `vote_average__-Inf_1.70474809223843` <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ vote_average__1.70474809223843_1.93152141160321 <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ vote_average__1.93152141160321_Inf <dbl> 1, 1, 1, 1, 1, 1, 1, 1…
## $ `runtime__-Inf_24` <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ runtime__24_84 <dbl> 0, 0, 0, 0, 0, 0, 0, 0…
## $ runtime__84_93 <dbl> 0, 0, 1, 1, 1, 0, 0, 0…
## $ runtime__93_Inf <dbl> 1, 1, 0, 0, 0, 1, 1, 1…
data_corr_tbl <- data_binarized_tbl %>%
correlate( `vote_average__-Inf_1.70474809223843` )
data_corr_tbl
## # A tibble: 11 × 3
## feature bin correlation
## <fct> <chr> <dbl>
## 1 vote_average -Inf_1.70474809223843 1
## 2 vote_average 1.70474809223843_1.93152141160321 -0.585
## 3 vote_average 1.93152141160321_Inf -0.579
## 4 id 654747_Inf 0.237
## 5 id -Inf_105927 -0.233
## 6 runtime -Inf_24 0.188
## 7 runtime 93_Inf -0.178
## 8 runtime 84_93 -0.0929
## 9 runtime 24_84 0.0778
## 10 id 105927_387814 -0.0419
## 11 id 387814_654747 0.0386
data_corr_tbl %>%
plot_correlation_funnel()
Split Data
data <- sample_n(data, 100)
# Split into train and test data set
set.seed(1234)
data_split <- rsample::initial_split(data)
data_train <- training(data_split)
data_test <- testing(data_split)
# Further split training data set for cross-validation
set.seed(2345)
data_cv <- rsample::vfold_cv(data_train)
data_cv
## # 10-fold cross-validation
## # A tibble: 10 × 2
## splits id
## <list> <chr>
## 1 <split [67/8]> Fold01
## 2 <split [67/8]> Fold02
## 3 <split [67/8]> Fold03
## 4 <split [67/8]> Fold04
## 5 <split [67/8]> Fold05
## 6 <split [68/7]> Fold06
## 7 <split [68/7]> Fold07
## 8 <split [68/7]> Fold08
## 9 <split [68/7]> Fold09
## 10 <split [68/7]> Fold10
library(usemodels)
## Warning: package 'usemodels' was built under R version 4.4.1
usemodels::use_xgboost(vote_average ~., data = data_train)
## xgboost_recipe <-
## recipe(formula = vote_average ~ ., data = data_train) %>%
## step_zv(all_predictors())
##
## xgboost_spec <-
## boost_tree(trees = tune(), min_n = tune(), tree_depth = tune(), learn_rate = tune(),
## loss_reduction = tune(), sample_size = tune()) %>%
## set_mode("classification") %>%
## set_engine("xgboost")
##
## xgboost_workflow <-
## workflow() %>%
## add_recipe(xgboost_recipe) %>%
## add_model(xgboost_spec)
##
## set.seed(6804)
## xgboost_tune <-
## tune_grid(xgboost_workflow, resamples = stop("add your rsample object"), grid = stop("add number of candidate points"))
xgboost_recipe <-
recipe(formula = vote_average ~ ., data = data_train) %>%
step_other(genre_names, threshold = 0.05) %>%
step_dummy(all_nominal_predictors(), one_hot = TRUE) %>%
step_YeoJohnson(all_numeric_predictors())
xgboost_spec <-
boost_tree(
trees = tune(),
min_n = tune(),
tree_depth = tune(),
learn_rate = tune(),
loss_reduction = tune(),
sample_size = tune()
) %>%
set_mode("regression") %>%
set_engine("xgboost")
xgboost_workflow <-
workflow() %>%
add_recipe(xgboost_recipe) %>%
add_model(xgboost_spec)
# Tune the model
set.seed(6804)
xgboost_tune <- tune_grid(
xgboost_workflow,
resamples = data_cv,
grid = 5
)
tune::show_best(xgboost_tune, metric = "rmse")
## # A tibble: 5 × 12
## trees min_n tree_depth learn_rate loss_reduction sample_size .metric
## <int> <int> <int> <dbl> <dbl> <dbl> <chr>
## 1 1020 11 5 0.00253 4.65e- 5 0.672 rmse
## 2 431 9 10 0.0100 7.91e-10 0.427 rmse
## 3 1309 24 2 0.00694 2.65e+ 0 0.905 rmse
## 4 1926 32 7 0.0768 5.00e- 8 0.553 rmse
## 5 121 34 15 0.212 1.08e- 1 0.110 rmse
## # ℹ 5 more variables: .estimator <chr>, mean <dbl>, n <int>, std_err <dbl>,
## # .config <chr>
xgboost_fw <- tune::finalize_workflow(xgboost_workflow,
tune::select_best(xgboost_tune, metric = "rmse"))
data_fit <- tune::last_fit(xgboost_fw, data_split)
tune::collect_metrics(data_fit)
## # A tibble: 2 × 4
## .metric .estimator .estimate .config
## <chr> <chr> <dbl> <chr>
## 1 rmse standard 0.750 Preprocessor1_Model1
## 2 rsq standard 0.177 Preprocessor1_Model1
tune::collect_predictions(data_fit) %>%
ggplot(aes(vote_average, .pred)) +
geom_point(alpha = 0.3, fill= "midnightblue") +
geom_abline(lty = 2, color = "gray50") +
coord_fixed()
```