suppressWarnings({
  library(fpp3)
  library(dplyr)
  library(tidyr)
  library(fable)
  library(fabletools)
})
## Registered S3 method overwritten by 'tsibble':
##   method               from 
##   as_tibble.grouped_df dplyr
## ── Attaching packages ──────────────────────────────────────────── fpp3 1.0.0 ──
## ✔ tibble      3.2.1     ✔ tsibble     1.1.5
## ✔ dplyr       1.1.2     ✔ tsibbledata 0.4.1
## ✔ tidyr       1.3.0     ✔ feasts      0.3.2
## ✔ lubridate   1.9.2     ✔ fable       0.3.4
## ✔ ggplot2     3.5.1     ✔ fabletools  0.4.2
## ── Conflicts ───────────────────────────────────────────────── fpp3_conflicts ──
## ✖ lubridate::date()    masks base::date()
## ✖ dplyr::filter()      masks stats::filter()
## ✖ tsibble::intersect() masks base::intersect()
## ✖ tsibble::interval()  masks lubridate::interval()
## ✖ dplyr::lag()         masks stats::lag()
## ✖ tsibble::setdiff()   masks base::setdiff()
## ✖ tsibble::union()     masks base::union()

1.

Figure 9.32 shows the ACFs for 36 random numbers, 360 random numbers and 1,000 random numbers.

knitr::include_graphics("/Users/mollysiebecker/Desktop/Screenshot 2024-10-20 at 11.06.03 AM.png")

a.)

Explain the differences among these figures. Do they all indicate that the data are white noise?

Yes, all three figures indicate that the data are white noise, which makes sense since they are random numbers. In all three, the ACF goes to 0 quickly instead of decreasing slowly, as it would in non-stationary data, and each autocorrelation is within the significance threshold. They differ in what the significance thresholds and the autocorrelations themselves are because of the different sizes of each set. As the size of the set increases, the significance threshold and autocorrelations tend towards 0, which, again, makes sense because as you increase the quantity of random numbers, you are less likely to find possible correlations between them.

b.)

Why are the critical values at different distances from the mean of zero? Why are the autocorrelations different in each figure when they each refer to white noise?

The critical values are at different distances from the mean, and the autocorrelations differ, because the size of each set is different.

For 36 random numbers, we get a significance threshold of \(\pm \frac {1.96}{\sqrt 36} \approx \pm 0.33\). For 360 random numbers, we get a significance threshold of \(\pm \frac {1.96}{\sqrt 360} \approx \pm 0.10\). For 1,000 random numbers, we get a significance threshold of \(\pm \frac {1.96}{\sqrt 1000} \approx \pm 0.06\).

2.

A classic example of a non-stationary series are stock prices. Plot the daily closing prices for Amazon stock (contained in gafa_stock), along with the ACF and PACF. Explain how each plot shows that the series is non-stationary and should be differenced.

amazon_stock <- gafa_stock %>%
  filter(Symbol == "AMZN")

autoplot(amazon_stock, Close) +
  labs(x = "Date", y = "Closing Price", title = "Daily Closing Prices of Amazon Stock, 2014-2019")

We can tell that the data is non-stationary because there is a visible trend, and the data do not appear roughly horizontal.

suppressWarnings({
  ACF(amazon_stock, Close) %>%
  autoplot()
})

The ACF plot shows that the data is non-stationary for multiple reasons: a.) the autocorrelations go well beyond the significance threshold, b.) the ACF decreases slowly instead of quickly decreasing, and c.) r_1 is large and positive.

suppressWarnings({
  PACF(amazon_stock, Close) %>%
  autoplot()
})

The PACF shows the data is non-stationary because it follows a sinusoidal pattern, which indicates an ARIMA(0,d,q) model may fit the data well, and, therefore, differencing is necessary.

3.

For the following series, find an appropriate Box-Cox transformation and order of differencing in order to obtain stationary data.

a.) Turkish GDP from global_economy.

turkish_economy <- global_economy %>%
  filter(Country == "Turkey")

lambda_1 <- turkish_economy |>
  features(GDP, features = guerrero) |>
  pull(lambda_guerrero)

turkish_economy <- turkish_economy %>%
  mutate(GDP_bc = box_cox(GDP, lambda_1))
autoplot(turkish_economy, GDP_bc) +
  labs(x = "Year", y = "Box-Cox transformed GDP", title = "Box-Cox Transformed Turkish GDP, 1960-2017")

turkish_economy %>%
  features(GDP_bc, unitroot_nsdiffs)
## # A tibble: 1 × 2
##   Country nsdiffs
##   <fct>     <int>
## 1 Turkey        0
turkish_economy %>%
  features(GDP_bc, unitroot_ndiffs)
## # A tibble: 1 × 2
##   Country ndiffs
##   <fct>    <int>
## 1 Turkey       1
turkish_economy <- turkish_economy %>%
  mutate(difference = difference(GDP_bc))

autoplot(turkish_economy, difference) +
  labs(x = "Year", y = "First Difference", title = "First Difference of Box-Cox Transformed Turkish GDP")
## Warning: Removed 1 row containing missing values or values outside the scale range
## (`geom_line()`).

A Box Cox transformation with lambda \(\approx\) 0.1572 and a first differencing will produce stationary data.

b.) Accommodation takings in the state of Tasmania from aus_accommodation.

tasmanian_accommodation <- aus_accommodation %>%
  filter(State == "Tasmania")

lambda_2 <- tasmanian_accommodation |>
  features(Takings, features = guerrero) |>
  pull(lambda_guerrero)

tasmanian_accommodation <- tasmanian_accommodation %>%
  mutate(Takings_bc = box_cox(Takings, lambda_2))
autoplot(tasmanian_accommodation, Takings_bc) +
  labs(x = "Quarter", y = "Box-Cox transformed Takings", title = "Box-Cox Transformed Tasmanian Accommodation Takings, 1998-2016")

tasmanian_accommodation %>%
  features(Takings_bc, unitroot_nsdiffs)
## # A tibble: 1 × 2
##   State    nsdiffs
##   <chr>      <int>
## 1 Tasmania       1
tasmanian_accommodation <- tasmanian_accommodation %>%
  mutate(s_difference = difference(Takings_bc, 4))
autoplot(tasmanian_accommodation, s_difference) +
  labs(x = "Quarter", y = "Seasonal Difference", title = "Seasonal Difference of Box-Cox Transformed Tasmanian Accommodation Takings")
## Warning: Removed 4 rows containing missing values or values outside the scale range
## (`geom_line()`).

tasmanian_accommodation %>%
  features(s_difference, unitroot_ndiffs)
## # A tibble: 1 × 2
##   State    ndiffs
##   <chr>     <int>
## 1 Tasmania      0

A Box Cox transformation with lambda \(\approx\) 0.0018 and a single seasonal difference will produce stationary data.

c.) Monthly sales from souvenirs.

lambda_3 <- souvenirs |>
  features(Sales, features = guerrero) |>
  pull(lambda_guerrero)

souvenirs <- souvenirs %>%
  mutate(Sales_bc = box_cox(Sales, lambda_3))
autoplot(souvenirs, Sales_bc) +
  labs(x = "Month", y = "Box-Cox transformed Sales", title = "Box-Cox Transformed Souvenir Sales, 1987-1993")

souvenirs %>%
  features(Sales_bc, unitroot_nsdiffs)
## # A tibble: 1 × 1
##   nsdiffs
##     <int>
## 1       1
souvenirs <- souvenirs %>%
  mutate(s_difference = difference(Sales_bc, 12))
autoplot(souvenirs, s_difference) +
  labs(x = "Month", y = "Seasonal Difference", title = "Seasonal Difference of Box-Cox Transformed Souvenir Sales")
## Warning: Removed 12 rows containing missing values or values outside the scale range
## (`geom_line()`).

souvenirs %>%
  features(s_difference, unitroot_ndiffs)
## # A tibble: 1 × 1
##   ndiffs
##    <int>
## 1      0

A Box Cox transformation with lambda \(\approx\) 0.0021 and a single seasonal difference will produce stationary data.

5.

For your retail data (from Exercise 7 in Section 2.10), find the appropriate order of differencing (after transformation if necessary) to obtain stationary data.

set.seed(1989)
my_retail_series <- aus_retail %>%
  filter(`Series ID` == sample(aus_retail$`Series ID`,1))
autoplot(my_retail_series, Turnover) +
  labs(x= "Month", y= "Turnover (Millions of AUD)", title = "Turnover at Australian Retail Stores")

lambda_4 <- my_retail_series |>
  features(Turnover, features = guerrero) |>
  pull(lambda_guerrero)

my_retail_series <- my_retail_series %>%
  mutate(Turnover_bc = box_cox(Turnover, lambda_4))

autoplot(my_retail_series, Turnover_bc) +
  labs(x = "Month", y = "Box-Cox Transformed Turnover", title = "Box-Cox Transformed Australian Retail Turnover")

my_retail_series %>%
  features(Turnover_bc, unitroot_nsdiffs)
## # A tibble: 1 × 3
##   State              Industry       nsdiffs
##   <chr>              <chr>            <int>
## 1 Northern Territory Food retailing       1
my_retail_series <- my_retail_series %>%
  mutate(s_difference = difference(Turnover_bc, 12))
my_retail_series %>%
  features(s_difference, unitroot_ndiffs)
## # A tibble: 1 × 3
##   State              Industry       ndiffs
##   <chr>              <chr>           <int>
## 1 Northern Territory Food retailing      0
autoplot(my_retail_series, s_difference) +
  labs(x = "Month", y = "Seasonal Difference", title = "Seasonal Difference of Box-Cox Transformed Australian Retail Turnover")
## Warning: Removed 12 rows containing missing values or values outside the scale range
## (`geom_line()`).

A Box Cox transformation with lambda \(\approx\) -0.0628 and a single seasonal difference will produce stationary data.

6.

Simulate and plot some data from simple ARIMA models.

a.)

Use the following R code to generate data from an AR(1) model with \(\phi_1=0.6\) and \(\sigma^2=1\). The process starts with \(y_1=0\).

y <- numeric(100)
e <- rnorm(100)
for(i in 2:100)
  y[i] <- 0.6*y[i-1] + e[i]
sim <- tsibble(idx = seq_len(100), y = y, index = idx)

b.)

Produce a time plot for the series. How does the plot change as you change \(\phi_1\)?

autoplot(sim, y)

y_1 <- numeric(100)
y_2 <- numeric(100)
y_3 <- numeric(100)
y_4 <- numeric(100)
y_5 <- numeric(100)

for(i in 2:100) {

y_1[i] <- -0.3*y_1[i-1] + e[i]
y_2[i] <- 0.3*y_2[i-1] + e[i]
y_3[i] <- 0.6*y_3[i-1] + e[i]
y_4[i] <- 0.9*y_4[i-1] + e[i]
y_5[i] <- 0.99*y_5[i-1] + e[i]
}

sim_ar_1 <- tsibble(idx = seq_len(100), phi_neg_0.3 = y_1, phi_0.3 = y_2, phi_0.6 = y_3, phi_0.9 = y_4, phi_0.99 = y_5, index = idx)

sim_long_ar_1 <- sim_ar_1 %>%
pivot_longer(!idx, names_to = 'phi', values_to = 'y')

autoplot(sim_long_ar_1, y)

Moderate values of phi, whether positive or negative, show less variability and are roughly horizontal, giving a stationary series. As phi gets close to 1, it creates more variability in the data, giving a non-stationary series.

c.)

Write your own code to generate data from an MA(1) model with \(\theta_1=0.6\) and \(\sigma^2=1\).

у <- numeric(100)
e <- rnorm (100)
for(i in 2:100)
y[i] <- e[i] + 0.6*e[i-1]
sim_ma_1 <- tsibble(idx = seq_len(100), y = y, index = idx)

d.)

Produce a time plot for the series. How does the plot change as you change \(\theta_1\)?

autoplot(sim_ma_1, y)

y_1 <- numeric(100)
y_2 <- numeric(100)
y_3 <- numeric(100)
y_4 <- numeric(100)
y_5 <- numeric(100)

for(i in 2:100) {

y_1[i] <- e[i] + -0.3*e[i-1]
y_2[i] <- e[i] + 0.3*e[i-1]
y_3[i] <- e[i] + 0.6*e[i-1]
y_4[i] <- e[i] + 0.9*e[i-1]
y_5[i] <- e[i] + 0.99*e[i-1]
}

sim_ma_1 <- tsibble(idx = seq_len(100), theta_neg_0.3 = y_1, theta_0.3 = y_2, theta_0.6 = y_3, theta_0.9 = y_4, theta_0.99 = y_5, index = idx)

sim_long_ma_1 <- sim_ma_1 %>%
pivot_longer(!idx, names_to = 'theta', values_to = 'y')

autoplot(sim_long_ma_1, y)

Regardless of the value of theta, the data follows a similar pattern, but values of theta farther from 0 result in greater variance from the mean.

e.)

Generate data from an ARMA(1,1) model with \(\phi_1=0.6\), \(\theta_1=0.6\) and \(\sigma^2=1\).

у <- numeric(100)
e <- rnorm (100)
for(i in 2:100)
  y[i] <- e[i] + 0.6*e[i-1] + 0.6*y[i-1]
sim_arma_1_1 <- tsibble(idx = seq_len(100), y = y, index = idx)

f.)

Generate data from an AR(2) model with \(\phi_1=-0.8\), \(\phi_2=0.3\) and \(\sigma^2=1\). (Note that these parameters will give a non-stationary series.)

y <- numeric(100)
e <- rnorm(100)
for(i in 3:100)
  y[i] <- -0.8*y[i-1] + 0.3*y[i-2] + e[i]
sim_ar_2 <- tsibble(idx = seq_len(100), y = y, index = idx)

g.)

Graph the latter two series and compare them.

sim_arma_1_1 <- sim_arma_1_1 %>%
  rename(y_arma_1_1 = y)

sim_ar_2 <- sim_ar_2 %>%
  rename(y_ar_2 = y)

combined_data <- sim_arma_1_1 %>%
  bind_cols(sim_ar_2)
## New names:
## • `idx` -> `idx...1`
## • `idx` -> `idx...3`
autoplot(combined_data, vars(y_arma_1_1, y_ar_2))

The ARMA(1,1) model produces roughly stationary data while the AR(2) model produces data with constantly increasing variation.

7.

Consider aus_airpassengers, the total number of passengers (in millions) from Australian air carriers for the period 1970-2011.

aus_air_2011 <- aus_airpassengers %>%
  filter(Year <= 2011)
autoplot(aus_air_2011, Passengers) +
  labs(x = "Year", y = "Number of Passengers", title = "Number of Australian Air Passengers, 1970-2011")

a.)

Use ARIMA() to find an appropriate ARIMA model. What model was selected? Check that the residuals look like white noise. Plot forecasts for the next 10 periods.

aus_air_fit <- aus_air_2011 %>%
  model(ARIMA(Passengers))

report(aus_air_fit)
## Series: Passengers 
## Model: ARIMA(0,2,1) 
## 
## Coefficients:
##           ma1
##       -0.8756
## s.e.   0.0722
## 
## sigma^2 estimated as 4.671:  log likelihood=-87.8
## AIC=179.61   AICc=179.93   BIC=182.99

The selected model is an ARIMA(0,2,1) model.

aug_aus_air <- aus_air_fit %>%
  augment()

autoplot(aug_aus_air, .innov)

The residuals look similar to white noise.

aus_air_forecast <- aus_air_fit %>%
  forecast(h = 10)

aus_air_forecast %>%
  autoplot(aus_air_2011) +
  geom_line(aes(y = .fitted), col="#D55E00",
            data = augment(aus_air_fit)) +
  labs(x = "Year", y = "Number of Passengers", title = "ARIMA(0,2,1) Forecast of Australian Air Passengers")+
  guides(colour = "none")

b.)

Write the model in terms of the backshift operator.

\[ (1-\phi_1B-...-\phi_pB^p)(1-B)^dy_t=c+(1+\theta_1B+...+\theta_qB^q)\epsilon_t\\ p=0,d=2,q=1, c=0, \theta_1=-0.8756\\ (1-B)^2y_t=(1-0.8756B)\epsilon_t\\ \]

c.)

Plot forecasts from an ARIMA(0,1,0) model with drift and compare these to part a.

aus_air_fit <- aus_air_2011 %>%
  model(arima010_w_drift = ARIMA(Passengers ~ 1 + pdq(p=0, d=1, q=0)),
        arima021 = ARIMA(Passengers))
  
aus_air_forecast <- aus_air_fit %>%
  forecast(h = 10)

aus_air_forecast %>%
  autoplot(aus_air_2011, level = NULL) +
  labs(x = "Year", y = "Number of Passengers", title = "Comparing ARIMA Forecasts of Australian Air Passengers")

glance(aus_air_fit) %>% arrange(AICc)
## # A tibble: 2 × 8
##   .model           sigma2 log_lik   AIC  AICc   BIC ar_roots  ma_roots 
##   <chr>             <dbl>   <dbl> <dbl> <dbl> <dbl> <list>    <list>   
## 1 arima021           4.67   -87.8  180.  180.  183. <cpl [0]> <cpl [1]>
## 2 arima010_w_drift   4.63   -89.1  182.  182.  186. <cpl [0]> <cpl [0]>

The ARIMA(0,1,0) model with drift produces lower forecasts than the automatically selected ARIMA(0,2,1) model without drift. The original model is preferable, given the lower AICc value.

d.)

Plot forecasts from an ARIMA(2,1,2) model with drift and compare these to parts a and c. Remove the constant and see what happens.

aus_air_fit <- aus_air_2011 %>%
  model(arima010_w_drift = ARIMA(Passengers ~ 1 + pdq(p=0, d=1, q=0)),
        arima021 = ARIMA(Passengers),
        arima212_w_drift = ARIMA(Passengers ~ 1 + pdq(p=2, d=1, q=2)))
  
aus_air_forecast <- aus_air_fit %>%
  forecast(h = 10)

aus_air_forecast %>%
  autoplot(aus_air_2011, level = NULL) +
  labs(x = "Year", y = "Number of Passengers", title = "Comparing ARIMA Forecasts of Australian Air Passengers")

glance(aus_air_fit) %>% arrange(AICc)
## # A tibble: 3 × 8
##   .model           sigma2 log_lik   AIC  AICc   BIC ar_roots  ma_roots 
##   <chr>             <dbl>   <dbl> <dbl> <dbl> <dbl> <list>    <list>   
## 1 arima021           4.67   -87.8  180.  180.  183. <cpl [0]> <cpl [1]>
## 2 arima010_w_drift   4.63   -89.1  182.  182.  186. <cpl [0]> <cpl [0]>
## 3 arima212_w_drift   4.75   -87.7  187.  190.  198. <cpl [2]> <cpl [2]>

The ARIMA(2,1,2) model with drift produces only slightly lower forecasts than the automatically selected ARIMA(0,2,1) model without drift. The original automatically selected model is still preferable, given it has the lowest AICc value.

aus_air_fit <- aus_air_2011 %>%
  model(arima010_w_drift = ARIMA(Passengers ~ 1 + pdq(p=0, d=1, q=0)),
        arima021 = ARIMA(Passengers),
        arima212 = ARIMA(Passengers ~ 0 + pdq(p=2, d=1, q=2)))
## Warning: 1 error encountered for arima212
## [1] non-stationary AR part from CSS

Removing the constant from the ARIMA(2,1,2) model produces an error.

e.)

Plot forecasts from an ARIMA(0,2,1) model with a constant. What happens?

aus_air_fit <- aus_air_2011 %>%
  model(arima021_w_drift = ARIMA(Passengers ~ 1 + pdq(p=0, d=2, q=1)))
## Warning: Model specification induces a quadratic or higher order polynomial trend. 
## This is generally discouraged, consider removing the constant or reducing the number of differences.
aus_air_forecast <- aus_air_fit %>%
  forecast(h = 10)

aus_air_forecast %>%
  autoplot(aus_air_2011) +
  labs(x = "Year", y = "Number of Passengers", title = "ARIMA(0,2,1) Forecast, with Constant, of Australian Air Passengers")

report(aus_air_fit)
## Series: Passengers 
## Model: ARIMA(0,2,1) w/ poly 
## 
## Coefficients:
##           ma1  constant
##       -1.0000    0.0721
## s.e.   0.0709    0.0260
## 
## sigma^2 estimated as 4.086:  log likelihood=-85.74
## AIC=177.48   AICc=178.15   BIC=182.55

The model produces a warning, since the constant has induced a polynomial trend in the data, which is discouraged. It produces a value of theta that rounds to -1. We can see on the graph that the forecast is not linear.

8.

For the United States GDP series (from global_economy):

us_economy <- global_economy %>%
  filter(Country == "United States")

autoplot(us_economy, GDP) +
  labs(x = "Year", y = "GDP", title = "United States GDP, 1960-2017")

a.)

if necessary, find a suitable Box-Cox transformation for the data;

lambda_8 <- us_economy |>
  features(GDP, features = guerrero) |>
  pull(lambda_guerrero)

us_economy <- us_economy %>%
  mutate(GDP_bc = box_cox(GDP, lambda_8))

autoplot(us_economy, GDP_bc) +
  labs(x = "Year", y = "Box-Cox Transformed US GDP", title = "Box-Cox Transformed US GDP")

b.)

fit a suitable ARIMA model to the transformed data using ARIMA();

us_gdp_fit <- us_economy %>%
  model(ARIMA(GDP_bc))

report(us_gdp_fit)
## Series: GDP_bc 
## Model: ARIMA(1,1,0) w/ drift 
## 
## Coefficients:
##          ar1  constant
##       0.4586  118.1822
## s.e.  0.1198    9.5047
## 
## sigma^2 estimated as 5479:  log likelihood=-325.32
## AIC=656.65   AICc=657.1   BIC=662.78

The selected model is an ARIMA(1,1,0) model with drift that can be represented by \((1-0.4586B)(1-B)y_t=118.1822+\epsilon_t\).

c.)

try some other plausible models by experimenting with the orders chosen;

us_gdp_fit <- us_economy %>%
  model(arima110_w_drift = ARIMA(GDP_bc),
        arima210_w_drift = ARIMA(GDP_bc ~ 1 + pdq(2,1,0)),
        arima111_w_drift = ARIMA(GDP_bc ~ 1 + pdq(1,1,1)),
        arima110 = ARIMA(GDP_bc ~ 0 + pdq(1,1,0)))
        
glance(us_gdp_fit) %>% arrange(AICc)
## # A tibble: 4 × 9
##   Country       .model        sigma2 log_lik   AIC  AICc   BIC ar_roots ma_roots
##   <fct>         <chr>          <dbl>   <dbl> <dbl> <dbl> <dbl> <list>   <list>  
## 1 United States arima110_w_d…  5479.   -325.  657.  657.  663. <cpl>    <cpl>   
## 2 United States arima111_w_d…  5580.   -325.  659.  659.  667. <cpl>    <cpl>   
## 3 United States arima210_w_d…  5580.   -325.  659.  659.  667. <cpl>    <cpl>   
## 4 United States arima110       7038.   -334.  672.  672.  676. <cpl>    <cpl>

The original model is still preferable because it has the lowest AICc.

d.)

choose what you think is the best model and check the residual diagnostics;

us_gdp_fit <- us_economy %>%
  model(arima110_w_drift = ARIMA(GDP_bc))

us_gdp_fit %>%
  gg_tsresiduals()

augment(us_gdp_fit) %>%
  features(.innov, ljung_box, lag = 10, dof = 3)
## # A tibble: 1 × 4
##   Country       .model           lb_stat lb_pvalue
##   <fct>         <chr>              <dbl>     <dbl>
## 1 United States arima110_w_drift    3.81     0.801

The residual plots along with the high p-value in the Portmanteau test confirm that the residuals are white noise.

e.)

produce forecasts of your fitted model. Do the forecasts look reasonable?

us_gdp_forecast <- us_gdp_fit %>%
  forecast(h = 10)

us_gdp_forecast %>%
  autoplot(us_economy) +
  geom_line(aes(y = .fitted), col="#D55E00",
            data = augment(us_gdp_fit)) +
  labs(x = "Year", y = "Box-Cox Transformed GDP", title = "ARIMA Forecast of Box-Cox Transformed US GDP") +
  guides(colour = "none")

Yes, the forecasts look appropriate for the data.

f.)

compare the results with what you would obtain using ETS() (with no transformation).

us_economy %>%
  slice(-(n()-4):-n()) %>%
  stretch_tsibble(.init = 10) %>%
  model(
    ETS(GDP),
    ARIMA(GDP)
  ) %>%
  forecast(h = 5) %>%
  accuracy(us_economy) %>%
  select(.model, RMSE:MAPE)
## # A tibble: 2 × 5
##   .model              RMSE           MAE   MPE  MAPE
##   <chr>              <dbl>         <dbl> <dbl> <dbl>
## 1 ARIMA(GDP) 794911020348. 420606689328.  4.71  6.89
## 2 ETS(GDP)   594154114481. 365653621312.  2.59  4.80

The ETS model has better accuracy measures on the non-transformed data.

us_economy %>%
  model(ETS(GDP)) %>%
  forecast(h = 5) %>%
  autoplot(us_economy %>% filter(Year >= 2000)) +
  labs(title = "ETS Forecast of United States GDP",
       y = "GDP")