#bring in the data
library(readr)
wmt <- read_csv("WMT For Homework.csv")
Parsed with column specification:
cols(
Date = [31mcol_character()[39m,
Open = [32mcol_double()[39m,
High = [32mcol_double()[39m,
Low = [32mcol_double()[39m,
Close = [32mcol_double()[39m,
`Adj Close` = [32mcol_double()[39m,
Volume = [32mcol_double()[39m
)
#load packages needed
library(readr)
library(ggplot2)
library(ggfortify)
library(vars)
library(forecast)
library(psych)
#Data Exploration / Plots
wmtTS <- ts(wmt$Close, frequency=12, start=c(2013,1))
describe(wmtTS)
summary(wmtTS)
Min. 1st Qu. Median Mean 3rd Qu. Max.
57.24 71.31 75.59 77.76 82.89 106.60
#plot
autoplot(wmtTS) + xlab("Month") + ylab("Adjusted Monthly Close Price")

acf(wmtTS)

pacf(wmtTS)

#model 1 - ETS
## Autoselection is used in the model argument using "Z" values
model1 <- ets(wmtTS, model="ZZZ", damped=NULL, alpha=NULL, beta=NULL,
gamma=NULL, phi=NULL, lambda=NULL, biasadj=FALSE,
additive.only=FALSE, restrict=TRUE,
allow.multiplicative.trend=FALSE)
model1
ETS(M,N,N)
Call:
ets(y = wmtTS, model = "ZZZ", damped = NULL, alpha = NULL, beta = NULL,
Call:
gamma = NULL, phi = NULL, additive.only = FALSE, lambda = NULL,
Call:
biasadj = FALSE, restrict = TRUE, allow.multiplicative.trend = FALSE)
Smoothing parameters:
alpha = 0.9999
Initial states:
l = 69.7594
sigma: 0.0521
AIC AICc BIC
511.7460 512.0989 518.5760
#plot model1
autoplot(model1)

## Residuals of the Model
cbind('Residuals' = residuals(model1),
'Forecast errors' = residuals(model1,type='response')) %>%
autoplot(facet=TRUE) + xlab("Year") + ylab("")

checkresiduals(model1)
Ljung-Box test
data: Residuals from ETS(M,N,N)
Q* = 6.7193, df = 12, p-value = 0.8756
Model df: 2. Total lags used: 14

#Model 1 Overview
#For the first model I picked the auto select feature via the ETS model. This allowed R to fit the best possible ets model to the time series. It avoids alot of the guess and check that could come with guessing. The ETS model looks at 3 features: trend , error, and seasonality. The ACF chart for the model shows the values that fall inside the "critical region". This shows that there is not a correlation between the lags and shows us there is no white noise to worry about.
#For the evaluation process of this model I looked at the AIC, AICc, and BC values to judge the model. The model had a AIC of 498.92, AICc of 499.27 and BIC of 505.75
#Model 2
#Auto Arima
auto.arima(wmtTS)
Series: wmtTS
ARIMA(0,1,0)
sigma^2 estimated as 18.08: log likelihood=-203.5
AIC=409.01 AICc=409.07 BIC=411.27
model2 <- Arima(wmtTS, order=c(0,1,0), seasonal=c(0,1,0))
checkresiduals(model2)
Ljung-Box test
data: Residuals from ARIMA(0,1,0)(0,1,0)[12]
Q* = 22.255, df = 14, p-value = 0.07352
Model df: 0. Total lags used: 14

summary(model2)
Series: wmtTS
ARIMA(0,1,0)(0,1,0)[12]
sigma^2 estimated as 34.41: log likelihood=-188.1
AIC=378.2 AICc=378.27 BIC=380.28
Training set error measures:
ME RMSE MAE MPE MAPE MASE ACF1
Training set -0.1454743 5.31032 3.618168 -0.2757981 4.620008 0.3823691 0.06752075
#forecast forward 12 months
model2F <- forecast(model2, h=12)
model2F
#plot Auto Arima
autoplot(model2F) + xlab("Month") + ylab("Adjusted Monthly Close Price")

summary(model2F)
Forecast method: ARIMA(0,1,0)(0,1,0)[12]
Model Information:
Series: wmtTS
ARIMA(0,1,0)(0,1,0)[12]
sigma^2 estimated as 34.41: log likelihood=-188.1
AIC=378.2 AICc=378.27 BIC=380.28
Error measures:
ME RMSE MAE MPE MAPE MASE ACF1
Training set -0.1454743 5.31032 3.618168 -0.2757981 4.620008 0.3823691 0.06752075
Forecasts:
#Model 2 Discussion
#Like model 1 I used a model that had an autoselection option. The auto arima function chose ARIMA (0,1,0) aas the best model for the data. Overal arima models work best when used for data that has little to no seasonality. For this data set the ACF plot didnt show a strong correlation between lag and so we can conclude that the white noise in the data is limitted. Again, the ARIMA model produced AIC, AICc, and BIC values for comparison values.
# The AIC was 370.04, the AICc was 370.11, and the BIC was 372.12
#Model 3 & 4
#Holts Winters
model3 <- hw(wmtTS, seasonal="multiplicative", damped=FALSE, h=12)
model3
model4 <- hw(wmtTS, seasonal="additive", damped=FALSE, h=12)
model4
summary(model3)
Forecast method: Holt-Winters' multiplicative method
Model Information:
Holt-Winters' multiplicative method
Call:
hw(y = wmtTS, h = 12, seasonal = "multiplicative", damped = FALSE)
Smoothing parameters:
alpha = 0.9706
beta = 1e-04
gamma = 3e-04
Initial states:
l = 71.7697
b = 0.0938
s = 1.0243 1.046 0.992 0.9697 0.9717 0.9906
0.9749 0.9879 1.0103 1.0025 0.9976 1.0325
sigma: 0.0548
AIC AICc BIC
531.0589 542.3923 569.7623
Error measures:
ME RMSE MAE MPE MAPE MASE ACF1
Training set 0.1754923 3.855351 2.993427 0.09080314 3.836737 0.3163463 0.03482605
Forecasts:
summary(model4)
Forecast method: Holt-Winters' additive method
Model Information:
Holt-Winters' additive method
Call:
hw(y = wmtTS, h = 12, seasonal = "additive", damped = FALSE)
Smoothing parameters:
alpha = 0.9978
beta = 0.0114
gamma = 1e-04
Initial states:
l = 71.6379
b = 0.0112
s = 1.9301 3.2594 -1.0069 -2.2947 -2.503 -0.0194
-2.028 -1.2186 0.5313 0.8513 0.3609 2.1376
sigma: 4.4892
AIC AICc BIC
540.0666 551.4000 578.7699
Error measures:
ME RMSE MAE MPE MAPE MASE ACF1
Training set 0.2100296 3.959107 2.998012 0.1496457 3.832977 0.3168309 0.00200536
Forecasts:
checkresiduals(model3)
Ljung-Box test
data: Residuals from Holt-Winters' multiplicative method
Q* = 14.007, df = 3, p-value = 0.002896
Model df: 16. Total lags used: 19

checkresiduals(model4)
Ljung-Box test
data: Residuals from Holt-Winters' additive method
Q* = 14.658, df = 3, p-value = 0.002134
Model df: 16. Total lags used: 19

#plot model 3 and 4
#model 3
autoplot(model3) + xlab("Month") + ylab("Adjusted Monthly Close Price")

#model 4
autoplot(model4) + xlab("Month") + ylab("Adjusted Monthly Close Price")

#Model 3/4 Discussion
#The first thing I noticed about these two models was how large the forecast range ended up being when compared to the other models. Overall the additive model was slightly better when it came to comparisions even though the data had a slight upward trend. But on the other hand the multiplicative model seems more appropriate due tot the trends and seasonality.
#The addive model produced and AIC of 530.68 , AICc of 542.02, and BIC of 569.39
#The multiplicative model produced and AIC of 530.26 , AICc of 541.59, and BIC of 568.96
#Overall, the multiplicative model produced better scores but was pretty much the same.
#Best Model
#When looking into the models and the overall scores one thing should be mentioned. AIC values cannot be compared between model classes. So just because the ARIMA had a lower score then ETS does not mean it automatically was better. Ultimately, The AIC will be usefull for getting an overal sense of best performing models. Looking at the data set the best model was the Auto Arima. The forecast graph of this model seemed to capture the trend and seasonality well along with keeping the forecast range smaller in range. The residual plots backed up the model by showing there was very little white noise and correlations between lags, which ultimately means the model residuals were normally distributed. Overall the ARIMA method was the best fit for the data.
LS0tCnRpdGxlOiAiV2VlayA2IEhvbWV3b3JrIgpvdXRwdXQ6IGh0bWxfbm90ZWJvb2sKLS0tCgpgYGB7cn0KI2JyaW5nIGluIHRoZSBkYXRhCmxpYnJhcnkocmVhZHIpCndtdCA8LSByZWFkX2NzdigiV01UIEZvciBIb21ld29yay5jc3YiKQpgYGAKCgpgYGB7cn0KI2xvYWQgcGFja2FnZXMgbmVlZGVkCmxpYnJhcnkocmVhZHIpCmxpYnJhcnkoZ2dwbG90MikKbGlicmFyeShnZ2ZvcnRpZnkpCmxpYnJhcnkodmFycykKbGlicmFyeShmb3JlY2FzdCkKbGlicmFyeShwc3ljaCkKYGBgCgoKYGBge3J9CiNEYXRhIEV4cGxvcmF0aW9uIC8gUGxvdHMKCndtdFRTIDwtIHRzKHdtdCRDbG9zZSwgZnJlcXVlbmN5PTEyLCBzdGFydD1jKDIwMTMsMSkpCgpkZXNjcmliZSh3bXRUUykKCnN1bW1hcnkod210VFMpCmBgYAoKCmBgYHtyfQojcGxvdCAKCmF1dG9wbG90KHdtdFRTKSArIHhsYWIoIk1vbnRoIikgKyB5bGFiKCJBZGp1c3RlZCBNb250aGx5IENsb3NlIFByaWNlIikKYWNmKHdtdFRTKQpwYWNmKHdtdFRTKQpgYGAKCgpgYGB7cn0KI21vZGVsIDEgLSBFVFMKCgoKIyMgQXV0b3NlbGVjdGlvbiBpcyB1c2VkIGluIHRoZSBtb2RlbCBhcmd1bWVudCB1c2luZyAiWiIgdmFsdWVzCm1vZGVsMSA8LSBldHMod210VFMsIG1vZGVsPSJaWloiLCBkYW1wZWQ9TlVMTCwgYWxwaGE9TlVMTCwgYmV0YT1OVUxMLAogICAgICAgICAgICAgIGdhbW1hPU5VTEwsIHBoaT1OVUxMLCBsYW1iZGE9TlVMTCwgYmlhc2Fkaj1GQUxTRSwKICAgICAgICAgICAgICBhZGRpdGl2ZS5vbmx5PUZBTFNFLCByZXN0cmljdD1UUlVFLAogICAgICAgICAgICAgIGFsbG93Lm11bHRpcGxpY2F0aXZlLnRyZW5kPUZBTFNFKQoKbW9kZWwxCgojcGxvdCBtb2RlbDEKYXV0b3Bsb3QobW9kZWwxKQoKIyMgUmVzaWR1YWxzIG9mIHRoZSBNb2RlbApjYmluZCgnUmVzaWR1YWxzJyA9IHJlc2lkdWFscyhtb2RlbDEpLAogICAgICAnRm9yZWNhc3QgZXJyb3JzJyA9IHJlc2lkdWFscyhtb2RlbDEsdHlwZT0ncmVzcG9uc2UnKSkgJT4lCiAgYXV0b3Bsb3QoZmFjZXQ9VFJVRSkgKyB4bGFiKCJZZWFyIikgKyB5bGFiKCIiKQpjaGVja3Jlc2lkdWFscyhtb2RlbDEpCmBgYAoKCmBgYHtyfQojTW9kZWwgMSBPdmVydmlldwojRm9yIHRoZSBmaXJzdCBtb2RlbCBJIHBpY2tlZCB0aGUgYXV0byBzZWxlY3QgZmVhdHVyZSB2aWEgdGhlIEVUUyBtb2RlbC4gVGhpcyBhbGxvd2VkIFIgdG8gZml0IHRoZSBiZXN0IHBvc3NpYmxlIGV0cyBtb2RlbCB0byB0aGUgdGltZSBzZXJpZXMuIEl0IGF2b2lkcyBhbG90IG9mIHRoZSBndWVzcyBhbmQgY2hlY2sgdGhhdCBjb3VsZCBjb21lIHdpdGggZ3Vlc3NpbmcuIFRoZSBFVFMgbW9kZWwgbG9va3MgYXQgMyBmZWF0dXJlczogdHJlbmQgLCBlcnJvciwgYW5kIHNlYXNvbmFsaXR5LiBUaGUgQUNGIGNoYXJ0IGZvciB0aGUgbW9kZWwgc2hvd3MgdGhlIHZhbHVlcyB0aGF0IGZhbGwgaW5zaWRlIHRoZSAiY3JpdGljYWwgcmVnaW9uIi4gVGhpcyBzaG93cyB0aGF0IHRoZXJlIGlzIG5vdCBhIGNvcnJlbGF0aW9uIGJldHdlZW4gdGhlIGxhZ3MgYW5kIHNob3dzIHVzIHRoZXJlIGlzIG5vIHdoaXRlIG5vaXNlIHRvIHdvcnJ5IGFib3V0LgojRm9yIHRoZSBldmFsdWF0aW9uIHByb2Nlc3Mgb2YgdGhpcyBtb2RlbCBJIGxvb2tlZCBhdCB0aGUgQUlDLCBBSUNjLCBhbmQgQkMgdmFsdWVzIHRvIGp1ZGdlIHRoZSBtb2RlbC4gVGhlIG1vZGVsIGhhZCBhIEFJQyBvZiA0OTguOTIsIEFJQ2Mgb2YgNDk5LjI3IGFuZCBCSUMgb2YgNTA1Ljc1CmBgYAoKCmBgYHtyfQojTW9kZWwgMiAKI0F1dG8gQXJpbWEKCmF1dG8uYXJpbWEod210VFMpCm1vZGVsMiA8LSBBcmltYSh3bXRUUywgb3JkZXI9YygwLDEsMCksIHNlYXNvbmFsPWMoMCwxLDApKQpjaGVja3Jlc2lkdWFscyhtb2RlbDIpCgoKc3VtbWFyeShtb2RlbDIpCgojZm9yZWNhc3QgZm9yd2FyZCAxMiBtb250aHMKbW9kZWwyRiA8LSBmb3JlY2FzdChtb2RlbDIsIGg9MTIpCm1vZGVsMkYKCgojcGxvdCBBdXRvIEFyaW1hCgphdXRvcGxvdChtb2RlbDJGKSArIHhsYWIoIk1vbnRoIikgKyB5bGFiKCJBZGp1c3RlZCBNb250aGx5IENsb3NlIFByaWNlIikKc3VtbWFyeShtb2RlbDJGKQpgYGAKCgpgYGB7cn0KI01vZGVsIDIgRGlzY3Vzc2lvbgojTGlrZSBtb2RlbCAxIEkgdXNlZCBhIG1vZGVsIHRoYXQgaGFkIGFuIGF1dG9zZWxlY3Rpb24gb3B0aW9uLiBUaGUgYXV0byBhcmltYSBmdW5jdGlvbiBjaG9zZSBBUklNQSAoMCwxLDApIGFhcyB0aGUgYmVzdCBtb2RlbCBmb3IgdGhlIGRhdGEuIE92ZXJhbCBhcmltYSBtb2RlbHMgd29yayBiZXN0IHdoZW4gdXNlZCBmb3IgZGF0YSB0aGF0IGhhcyBsaXR0bGUgdG8gbm8gc2Vhc29uYWxpdHkuIEZvciB0aGlzIGRhdGEgc2V0IHRoZSBBQ0YgcGxvdCBkaWRudCBzaG93IGEgc3Ryb25nIGNvcnJlbGF0aW9uIGJldHdlZW4gbGFnIGFuZCBzbyB3ZSBjYW4gY29uY2x1ZGUgdGhhdCB0aGUgd2hpdGUgbm9pc2UgaW4gdGhlIGRhdGEgaXMgbGltaXR0ZWQuIEFnYWluLCB0aGUgQVJJTUEgbW9kZWwgcHJvZHVjZWQgQUlDLCBBSUNjLCBhbmQgQklDIHZhbHVlcyBmb3IgY29tcGFyaXNvbiB2YWx1ZXMuCiMgVGhlIEFJQyB3YXMgMzcwLjA0LCB0aGUgQUlDYyB3YXMgMzcwLjExLCBhbmQgdGhlIEJJQyB3YXMgMzcyLjEyCmBgYAoKCmBgYHtyfQojTW9kZWwgMyAmIDQKI0hvbHRzIFdpbnRlcnMgCgptb2RlbDMgPC0gaHcod210VFMsIHNlYXNvbmFsPSJtdWx0aXBsaWNhdGl2ZSIsIGRhbXBlZD1GQUxTRSwgaD0xMikKbW9kZWwzCgoKbW9kZWw0IDwtIGh3KHdtdFRTLCBzZWFzb25hbD0iYWRkaXRpdmUiLCBkYW1wZWQ9RkFMU0UsIGg9MTIpCm1vZGVsNAoKCnN1bW1hcnkobW9kZWwzKQpzdW1tYXJ5KG1vZGVsNCkKCmNoZWNrcmVzaWR1YWxzKG1vZGVsMykKY2hlY2tyZXNpZHVhbHMobW9kZWw0KQoKI3Bsb3QgbW9kZWwgMyBhbmQgNAoKI21vZGVsIDMgCmF1dG9wbG90KG1vZGVsMykgKyB4bGFiKCJNb250aCIpICsgeWxhYigiQWRqdXN0ZWQgTW9udGhseSBDbG9zZSBQcmljZSIpCgojbW9kZWwgNAphdXRvcGxvdChtb2RlbDQpICsgeGxhYigiTW9udGgiKSArIHlsYWIoIkFkanVzdGVkIE1vbnRobHkgQ2xvc2UgUHJpY2UiKQpgYGAKCgpgYGB7cn0KI01vZGVsIDMvNCBEaXNjdXNzaW9uCiNUaGUgZmlyc3QgdGhpbmcgSSBub3RpY2VkIGFib3V0IHRoZXNlIHR3byBtb2RlbHMgd2FzIGhvdyBsYXJnZSB0aGUgZm9yZWNhc3QgcmFuZ2UgZW5kZWQgdXAgYmVpbmcgd2hlbiBjb21wYXJlZCB0byB0aGUgb3RoZXIgbW9kZWxzLiBPdmVyYWxsIHRoZSBhZGRpdGl2ZSBtb2RlbCB3YXMgc2xpZ2h0bHkgYmV0dGVyIHdoZW4gaXQgY2FtZSB0byBjb21wYXJpc2lvbnMgZXZlbiB0aG91Z2ggdGhlIGRhdGEgaGFkIGEgc2xpZ2h0IHVwd2FyZCB0cmVuZC4gQnV0IG9uIHRoZSBvdGhlciBoYW5kIHRoZSBtdWx0aXBsaWNhdGl2ZSBtb2RlbCBzZWVtcyBtb3JlIGFwcHJvcHJpYXRlIGR1ZSB0b3QgdGhlIHRyZW5kcyBhbmQgc2Vhc29uYWxpdHkuCiNUaGUgYWRkaXZlIG1vZGVsIHByb2R1Y2VkIGFuZCBBSUMgb2YgNTMwLjY4ICwgQUlDYyBvZiA1NDIuMDIsIGFuZCBCSUMgb2YgNTY5LjM5CiNUaGUgbXVsdGlwbGljYXRpdmUgbW9kZWwgcHJvZHVjZWQgYW5kIEFJQyBvZiA1MzAuMjYgLCBBSUNjIG9mIDU0MS41OSwgYW5kIEJJQyBvZiA1NjguOTYKI092ZXJhbGwsIHRoZSBtdWx0aXBsaWNhdGl2ZSBtb2RlbCBwcm9kdWNlZCBiZXR0ZXIgc2NvcmVzIGJ1dCB3YXMgcHJldHR5IG11Y2ggdGhlIHNhbWUuCmBgYAoKCmBgYHtyfQojQmVzdCBNb2RlbAoKI1doZW4gbG9va2luZyBpbnRvIHRoZSBtb2RlbHMgYW5kIHRoZSBvdmVyYWxsIHNjb3JlcyBvbmUgdGhpbmcgc2hvdWxkIGJlIG1lbnRpb25lZC4gQUlDIHZhbHVlcyBjYW5ub3QgYmUgY29tcGFyZWQgYmV0d2VlbiBtb2RlbCBjbGFzc2VzLiBTbyBqdXN0IGJlY2F1c2UgdGhlIEFSSU1BIGhhZCBhIGxvd2VyIHNjb3JlIHRoZW4gRVRTIGRvZXMgbm90IG1lYW4gaXQgYXV0b21hdGljYWxseSB3YXMgYmV0dGVyLiBVbHRpbWF0ZWx5LCBUaGUgQUlDIHdpbGwgYmUgdXNlZnVsbCBmb3IgZ2V0dGluZyBhbiBvdmVyYWwgc2Vuc2Ugb2YgYmVzdCBwZXJmb3JtaW5nIG1vZGVscy4gTG9va2luZyBhdCB0aGUgZGF0YSBzZXQgdGhlIGJlc3QgbW9kZWwgd2FzIHRoZSBBdXRvIEFyaW1hLiBUaGUgZm9yZWNhc3QgZ3JhcGggb2YgdGhpcyBtb2RlbCBzZWVtZWQgdG8gY2FwdHVyZSB0aGUgdHJlbmQgYW5kIHNlYXNvbmFsaXR5IHdlbGwgYWxvbmcgd2l0aCBrZWVwaW5nIHRoZSBmb3JlY2FzdCByYW5nZSBzbWFsbGVyIGluIHJhbmdlLiBUaGUgcmVzaWR1YWwgcGxvdHMgYmFja2VkIHVwIHRoZSBtb2RlbCBieSBzaG93aW5nIHRoZXJlIHdhcyB2ZXJ5IGxpdHRsZSB3aGl0ZSBub2lzZSBhbmQgY29ycmVsYXRpb25zIGJldHdlZW4gbGFncywgd2hpY2ggdWx0aW1hdGVseSBtZWFucyB0aGUgbW9kZWwgcmVzaWR1YWxzIHdlcmUgbm9ybWFsbHkgZGlzdHJpYnV0ZWQuIE92ZXJhbGwgdGhlIEFSSU1BIG1ldGhvZCB3YXMgdGhlIGJlc3QgZml0IGZvciB0aGUgZGF0YS4KYGBgCgo=