Load necessary libraries
options(warnPartialMatchArgs = FALSE) # don't want these warnings
library(magrittr) # pipes
library(tibble) # tibbles
library(dplyr) # data wrangling
library(boot) # for `cv.glm`
library(purrr) # iteration
library(ggplot2) # tidy plotting
library(ISLR) # Auto data set
The Auto Data Set
Auto %<>% as.tibble() # Convert -> tibble
class(Auto) # Confirm Auto is now a tibble
[1] "tbl_df" "tbl" "data.frame"
names(Auto) # List variables
[1] "mpg" "cylinders" "displacement" "horsepower" "weight" "acceleration"
[7] "year" "origin" "name"
Auto # Print tibble; top 10 by default
Auto %>%
ggplot(aes(x = horsepower, y = mpg)) + # Plot hp vs mpg
geom_point(alpha = 0.5) + # Create a scatter plot
geom_smooth(method = 'loess') # Fit a polynomial smoothing function

Cross-validation (by hand)
# Create a random partition into training and test sets
set.seed(1)
n <- nrow(Auto)
train <- sample(1:n, n/2) # Randomly select half of the numbers 1 to n
# Fit linear model -> mpg as a function of hp
stats::lm(mpg ~ horsepower, data = Auto, subset = train) # linear in horsepower
Call:
stats::lm(formula = mpg ~ horsepower, data = Auto, subset = train)
Coefficients:
(Intercept) horsepower
40.3404 -0.1617
# Loop over polynomial power and calculate MSE on test set (-train)
purrr::map_dbl(1:5,
~stats::lm(mpg ~ stats::poly(horsepower, .x), # fit lm
data = Auto, subset = train) %>%
predict(., Auto) %>% # Predict for all samples
magrittr::subtract(Auto$mpg) %>% # Obtain differences from actual values
magrittr::raise_to_power(2) %>% # Square differences
magrittr::extract(-train) %>% # Ignore training predictions
mean() # MSE
)
[1] 26.14142 19.82259 19.78252 19.99969 20.18225
# Set a different random seed to create a different random train/test partition
set.seed(2)
train <- sample(1:n, n/2) # Randomly select half of the numbers 1 to n
purrr::map_dbl(1:5,
~stats::lm(mpg ~ stats::poly(horsepower, .x),
data = Auto, subset = train) %>% # fit lm
predict(., Auto) %>% # Predict for all samples
magrittr::subtract(Auto$mpg) %>% # Obtain differences from actual values
magrittr::raise_to_power(2) %>% # Square differences
magrittr::extract(-train) %>% # Ignore training predictions
mean() # MSE
)
[1] 23.29559 18.90124 19.25740 20.38538 20.29775
Leave-One-Out CV (boot::cv.glm)
# Linear regression using glm
glm_fit <- stats::glm(mpg ~ horsepower, data = Auto)
cv_err <- boot::cv.glm(Auto, glm_fit)
cv_err$K # Default K = n; i.e. LOOCV
[1] 392
cv_err$delta
[1] 24.23151 24.23114
# Loop over polynomial power
# raw = TRUE issue resolved
purrr::map_dbl(1:5, function(.x) {
fit <- stats::glm(mpg ~ stats::poly(horsepower, degree = .x, raw = TRUE),
data = Auto)
boot::cv.glm(Auto, fit) %>%
purrr::pluck("delta") %>% # Retrieve the `delta` element
purrr::pluck(1) # Extract the first component of `delta`
})
[1] 24.23151 19.24821 19.33498 19.42443 19.03321
K-fold CV (boot::cv.glm)
set.seed(17)
# K-folds chosen at random; here K = 10
# Loop over polynomial power
# Default: `raw = FALSE`
purrr::map_dbl(1:10, function(.x) {
fit <- stats::glm(mpg ~ stats::poly(horsepower, degree = .x), data = Auto)
boot::cv.glm(Auto, fit, K = 10) %>%
purrr::pluck("delta") %>%
purrr::pluck(1)
})
[1] 169.3269 190.3656 194.8383 195.6951 192.9770 196.7003 197.5728 197.6055 195.7763
[10] 203.9586
Bootstrap (boot::boot)
Here we estimate the accuracy of a linear regression model via the bootstrap. We will first define a function to fit a linear model of mpg ~ horsepower given a dataset and a subset index (rows).
Sampling with replacement
# What does sample with replacement do?
# Sampling with replacement is what the bootstrap is all about!
set.seed(3)
sample(1:10, 10, replace = FALSE)
[1] 2 8 4 3 9 6 1 5 10 7
sample(1:10, 10, replace = TRUE)
[1] 6 6 6 6 9 9 2 8 9 3
Create a boot function
boot_fn <- function(data, index) {
lm(mpg ~ horsepower, data = data, subset = index) %>%
purrr::pluck("coefficients")
}
Use boot::boot()
# 1000 bootstrap samples; gets tedious, so use `boot::boot`
boot::boot(Auto, boot_fn, R = 1000)
ORDINARY NONPARAMETRIC BOOTSTRAP
Call:
boot::boot(data = Auto, statistic = boot_fn, R = 1000)
Bootstrap Statistics :
original bias std. error
t1* 39.9358610 0.02972191 0.860007896
t2* -0.1578447 -0.00030823 0.007404467
# Compare with linear regression
# (differences indicate some assumptions may be broken)
lm(mpg ~ horsepower, data = Auto) %>%
summary() %>%
purrr::pluck("coefficients")
Estimate Std. Error t value Pr(>|t|)
(Intercept) 39.9358610 0.717498656 55.65984 1.220362e-187
horsepower -0.1578447 0.006445501 -24.48914 7.031989e-81
Repeat with a quadratic model
# Redefine `boot_fn()`
boot_fn2 <- function(data, index) {
lm(mpg ~ horsepower + I(horsepower^2), data = data, subset = index) %>%
purrr::pluck("coefficients")
}
# Make the simulation reproducible
set.seed(1)
boot(Auto, boot_fn2, 1000)
ORDINARY NONPARAMETRIC BOOTSTRAP
Call:
boot(data = Auto, statistic = boot_fn2, R = 1000)
Bootstrap Statistics :
original bias std. error
t1* 56.900099702 6.098115e-03 2.0944855842
t2* -0.466189630 -1.777108e-04 0.0334123802
t3* 0.001230536 1.324315e-06 0.0001208339
# Compare with standard error from linear regression
lm(mpg ~ horsepower + I(horsepower^2), data = Auto) %>%
summary() %>%
purrr::pluck("coefficients")
Estimate Std. Error t value Pr(>|t|)
(Intercept) 56.900099702 1.8004268063 31.60367 1.740911e-109
horsepower -0.466189630 0.0311246171 -14.97816 2.289429e-40
I(horsepower^2) 0.001230536 0.0001220759 10.08009 2.196340e-21
Created on 2020-01-30 by Rmarkdown (v1.10) and R version 3.5.0 (2018-04-23).
---
title: 'STAA 577: Laboratory Five </br> Resampling methods (`tidyverse`)'
author: 'Adapted by Tavener & Field </br> From: James, Witten, Hastie and Tibshirani'
date: "`r format(Sys.Date(), '%e %B %Y')`"
output:
  html_notebook:
    code_folding: show
    number_sections: yes
    toc: yes
    toc_float:
      collapsed: no
ratio: '9:16'
tables: yes
fontsize: 12pt
---


### Load necessary libraries {-}
```{r setup, message = FALSE, warning = FALSE}
options(warnPartialMatchArgs = FALSE)  # don't want these warnings
library(magrittr)     # pipes
library(tibble)       # tibbles
library(dplyr)        # data wrangling
library(boot)         # for `cv.glm`
library(purrr)        # iteration
library(ggplot2)      # tidy plotting
library(ISLR)         # Auto data set
```

---------------------------

# The `Auto` Data Set
```{r data, include = TRUE}
Auto %<>% as.tibble()                     # Convert -> tibble
class(Auto)                               # Confirm Auto is now a tibble
names(Auto)                               # List variables
Auto                                      # Print tibble; top 10 by default
Auto %>%                                  
  ggplot(aes(x = horsepower, y = mpg)) +  # Plot hp vs mpg
  geom_point(alpha = 0.5) +               # Create a scatter plot
  geom_smooth(method = 'loess')           # Fit a polynomial smoothing function
```


------------------------------


# Cross-validation (by hand)

```{r cv}
# Create a random partition into training and test sets
set.seed(1)
n     <- nrow(Auto)
train <- sample(1:n, n/2)         # Randomly select half of the numbers 1 to n

# Fit linear model -> mpg as a function of hp
stats::lm(mpg ~ horsepower, data = Auto, subset = train)  # linear in horsepower

# Loop over polynomial power and calculate MSE on test set (-train)
purrr::map_dbl(1:5,
  ~stats::lm(mpg ~ stats::poly(horsepower, .x),  # fit lm
             data = Auto, subset = train) %>%  
    predict(., Auto) %>%                 # Predict for all samples
    magrittr::subtract(Auto$mpg) %>%     # Obtain differences from actual values
    magrittr::raise_to_power(2) %>%      # Square differences
    magrittr::extract(-train) %>%        # Ignore training predictions
    mean()                               # MSE
)

# Set a different random seed to create a different random train/test partition
set.seed(2)
train <- sample(1:n, n/2)                # Randomly select half of the numbers 1 to n
purrr::map_dbl(1:5,
  ~stats::lm(mpg ~ stats::poly(horsepower, .x),
             data = Auto, subset = train) %>%  # fit lm
    predict(., Auto) %>%                 # Predict for all samples
    magrittr::subtract(Auto$mpg) %>%     # Obtain differences from actual values
    magrittr::raise_to_power(2) %>%      # Square differences
    magrittr::extract(-train) %>%        # Ignore training predictions
    mean()                               # MSE
)
```



# Leave-One-Out CV (`boot::cv.glm`)

```{r LOOCV}
# Linear regression using glm
glm_fit <- stats::glm(mpg ~ horsepower, data = Auto)
cv_err  <- boot::cv.glm(Auto, glm_fit)
cv_err$K            # Default K = n; i.e. LOOCV
cv_err$delta
```


```{r LOOCV_poly, eval = FALSE, echo = FALSE}
# Loop over polynomial power
# This chunk not currently run
purrr::map_dbl(1:5, function(.x) {
  fit <- stats::glm(mpg ~ stats::poly(horsepower, degree = .x),
                    data = Auto)
  boot::cv.glm(Auto, fit) %>%
  purrr::pluck("delta") %>%     # Retrieve the `delta` element
  purrr::pluck(1)               # Extract the first component of `delta`
})
```


```{r LOOCV_poly_2}
# Loop over polynomial power
# raw = TRUE issue resolved
purrr::map_dbl(1:5, function(.x) {
  fit <- stats::glm(mpg ~ stats::poly(horsepower, degree = .x, raw = TRUE),
                    data = Auto)
  boot::cv.glm(Auto, fit) %>%
  purrr::pluck("delta") %>%     # Retrieve the `delta` element
  purrr::pluck(1)               # Extract the first component of `delta`
})
```


-------------------------


# K-fold CV (`boot::cv.glm`)

```{r k-fold_cross_validation_orthogonal_polynomials}
set.seed(17)
# K-folds chosen at random; here K = 10
# Loop over polynomial power
# Default: `raw = FALSE`
purrr::map_dbl(1:10, function(.x) {
  fit <- stats::glm(mpg ~ stats::poly(horsepower, degree = .x), data = Auto)
  boot::cv.glm(Auto, fit, K = 10) %>%
    purrr::pluck("delta") %>%
    purrr::pluck(1)
})
```

--------------------

# Bootstrap (`boot::boot`)

Here we estimate the accuracy of a linear regression model via the bootstrap.
We will first define a function to fit a linear model of `mpg ~ horsepower`
given a dataset and a subset index (rows).

## Sampling with replacement
```{r Sampling_with_replacement}
# What does sample with replacement do?
# Sampling with replacement is what the bootstrap is all about!
set.seed(3)
sample(1:10, 10, replace = FALSE)
sample(1:10, 10, replace = TRUE)
```

## Create a boot function
```{r bootstrap_function}
boot_fn <- function(data, index) {
  lm(mpg ~ horsepower, data = data, subset = index) %>%
    purrr::pluck("coefficients")
}
```

## Perform bootstrap fits (by hand)
```{r bootstrap}
# Using all the data
boot_fn(Auto, 1:392)

# Make the simulation reproducible
set.seed(1)

# First sample
sample(1:392, 392, replace = TRUE) %>% boot_fn(Auto, .)

# Second sample
sample(1:392, 392, replace = TRUE) %>% boot_fn(Auto, .)
```


## Use `boot::boot()`
```{r bootstrap with boot::boot}
# 1000 bootstrap samples; gets tedious, so use `boot::boot`
boot::boot(Auto, boot_fn, R = 1000)

# Compare with linear regression
# (differences indicate some assumptions may be broken)
lm(mpg ~ horsepower, data = Auto) %>%
  summary() %>%
  purrr::pluck("coefficients")
```

## Repeat with a quadratic model

```{r boot2}
# Redefine `boot_fn()`
boot_fn2 <- function(data, index) {
  lm(mpg ~ horsepower + I(horsepower^2), data = data, subset = index) %>%
    purrr::pluck("coefficients")
}

# Make the simulation reproducible
set.seed(1)
boot(Auto, boot_fn2, 1000)

# Compare with standard error from linear regression
lm(mpg ~ horsepower + I(horsepower^2), data = Auto) %>%
  summary() %>%
  purrr::pluck("coefficients")
```


-------------------------

Created on `r Sys.Date()` by [Rmarkdown](https://github.com/rstudio/rmarkdown)
(v`r utils::packageVersion("rmarkdown")`) and `r R.version$version.string`.
