#load dataset

library(readr)
dj <- read_csv("^DJI.csv")
Parsed with column specification:
cols(
  Week = col_character(),
  Close = col_double()
)
#load packages
library(forecast)
library(xts)
library(TTR)
library(tseries)
#explore data
names(dj)
[1] "Week"  "Close"
names(dj)[2]<-c("close.price")
head(dj)

plot(dj$close.price,type="l",xlab="Week",ylab="closing price",main="historical data")

#break data by month and timeseries
dj.ts.q<-ts(dj,frequency = 4)
NAs introduced by coercion
dj.ts.m<-ts(dj[,2],frequency = 12)
#forecasting
plot(decompose(dj.ts.m))

#doesnt look like an addiative trend so i will go with multiplicative ETS from here forward
#Time series

ets1<-ets(dj.ts.m, model="ANN")
out_ets1<-forecast(ets1,h=24)
e1<-summary(ets1)
ETS(A,N,N) 

Call:
 ets(y = dj.ts.m, model = "ANN") 

  Smoothing parameters:
    alpha = 0.9999 

  Initial states:
    l = 20598.5831 

  sigma:  673.3649

     AIC     AICc      BIC 
2842.676 2842.833 2851.845 

Training set error measures:
                    ME     RMSE      MAE         MPE     MAPE      MASE
Training set -9.072149 669.0622 402.7747 -0.08508425 1.642472 0.3198343
                   ACF1
Training set 0.08826075
e1
                    ME     RMSE      MAE         MPE     MAPE      MASE
Training set -9.072149 669.0622 402.7747 -0.08508425 1.642472 0.3198343
                   ACF1
Training set 0.08826075
ets2<-ets(dj.ts.m, model="MAM")
out_ets2<-forecast(ets2,h=24)
e2<-summary(ets2)
ETS(M,Ad,M) 

Call:
 ets(y = dj.ts.m, model = "MAM") 

  Smoothing parameters:
    alpha = 0.9412 
    beta  = 0.14 
    gamma = 1e-04 
    phi   = 0.9672 

  Initial states:
    l = 20498.4185 
    b = 137.5281 
    s = 1.0024 1.0011 0.9972 1.0061 1.0014 0.9971
           0.9959 0.9992 0.9995 1.0021 0.9999 0.998

  sigma:  0.0272

     AIC     AICc      BIC 
2858.002 2862.959 2913.015 

Training set error measures:
                    ME    RMSE      MAE        MPE     MAPE      MASE       ACF1
Training set -48.18525 645.276 408.9003 -0.2341012 1.662035 0.3246985 0.03830966
 
e2
                    ME    RMSE      MAE        MPE     MAPE      MASE       ACF1
Training set -48.18525 645.276 408.9003 -0.2341012 1.662035 0.3246985 0.03830966
#auto.arima

arho<-auto.arima(dj.ts.m[1:138], stepwise=FALSE, approximation=FALSE)
out_arho<-forecast(arho,h=24)
a2<-summary(arho)
Series: dj.ts.m[1:138] 
ARIMA(0,1,0) 

sigma^2 estimated as 231460:  log likelihood=-1040.52
AIC=2083.03   AICc=2083.06   BIC=2085.95

Training set error measures:
                   ME     RMSE      MAE       MPE     MAPE     MASE       ACF1
Training set 51.48635 479.3564 346.8576 0.1962078 1.401475 0.993181 -0.1064762
a2
                   ME     RMSE      MAE       MPE     MAPE     MASE       ACF1
Training set 51.48635 479.3564 346.8576 0.1962078 1.401475 0.993181 -0.1064762
#graphs of all models

par(mfrow=c(2,2))
plot(ets1); 

plot(ets2); 

plot(arho);
No roots to plot

accuracy(out_arho,dj.ts.m[139:162])
                     ME      RMSE       MAE        MPE     MAPE     MASE
Training set   51.48635  479.3564  346.8576  0.1962078 1.401475 0.993181
Test set     -188.50554 2480.3749 1610.5705 -1.7311321 6.660710 4.611656
                   ACF1
Training set -0.1064762
Test set             NA
#comparison of models

comparison=matrix(c(AIC(ets1),BIC(ets1),e1[2],e1[3],
                    AIC(ets2),BIC(ets2),e2[2],e2[3],
                    AIC(arho),BIC(arho),a2[2],a2[3])
                  ,ncol=4,byrow=TRUE)
colnames(comparison)=c("AIC","BIC","RMSE","MAE")
rownames(comparison)=c("model1-ANN","model2-MAM","model3-auto.arima")
comparison.table<-as.table(comparison)
as.table(comparison)
                        AIC       BIC      RMSE       MAE
model1-ANN        2842.6760 2851.8447  669.0622  402.7747
model2-MAM        2858.0021 2913.0145  645.2760  408.9003
model3-auto.arima 2083.0335 2085.9535  479.3564  346.8576
LS0tCnRpdGxlOiAiRGlzY3Vzc2lvbiBXZWVrIDMtIEF1dG8uYXJpbWEiCm91dHB1dDogaHRtbF9ub3RlYm9vawotLS0KCgpgYGB7cn0KI2xvYWQgZGF0YXNldAoKbGlicmFyeShyZWFkcikKZGogPC0gcmVhZF9jc3YoIl5ESkkuY3N2IikKYGBgCgoKYGBge3J9CiNsb2FkIHBhY2thZ2VzCmxpYnJhcnkoZm9yZWNhc3QpCmxpYnJhcnkoeHRzKQpsaWJyYXJ5KFRUUikKbGlicmFyeSh0c2VyaWVzKQpgYGAKCgpgYGB7cn0KI2V4cGxvcmUgZGF0YQpuYW1lcyhkaikKCm5hbWVzKGRqKVsyXTwtYygiY2xvc2UucHJpY2UiKQpoZWFkKGRqKQoKcGxvdChkaiRjbG9zZS5wcmljZSx0eXBlPSJsIix4bGFiPSJXZWVrIix5bGFiPSJjbG9zaW5nIHByaWNlIixtYWluPSJoaXN0b3JpY2FsIGRhdGEiKQpgYGAKCgpgYGB7cn0KI2JyZWFrIGRhdGEgYnkgbW9udGggYW5kIHRpbWVzZXJpZXMKZGoudHMucTwtdHMoZGosZnJlcXVlbmN5ID0gNCkKZGoudHMubTwtdHMoZGpbLDJdLGZyZXF1ZW5jeSA9IDEyKQpgYGAKCgpgYGB7cn0KI2ZvcmVjYXN0aW5nCnBsb3QoZGVjb21wb3NlKGRqLnRzLm0pKQojZG9lc250IGxvb2sgbGlrZSBhbiBhZGRpYXRpdmUgdHJlbmQgc28gaSB3aWxsIGdvIHdpdGggbXVsdGlwbGljYXRpdmUgRVRTIGZyb20gaGVyZSBmb3J3YXJkCmBgYAoKCmBgYHtyfQojVGltZSBzZXJpZXMKCmV0czE8LWV0cyhkai50cy5tLCBtb2RlbD0iQU5OIikKb3V0X2V0czE8LWZvcmVjYXN0KGV0czEsaD0yNCkKZTE8LXN1bW1hcnkoZXRzMSkKCgplMQoKCgpldHMyPC1ldHMoZGoudHMubSwgbW9kZWw9Ik1BTSIpCm91dF9ldHMyPC1mb3JlY2FzdChldHMyLGg9MjQpCmUyPC1zdW1tYXJ5KGV0czIpCiAKZTIKCiNhdXRvLmFyaW1hCgphcmhvPC1hdXRvLmFyaW1hKGRqLnRzLm1bMToxMzhdLCBzdGVwd2lzZT1GQUxTRSwgYXBwcm94aW1hdGlvbj1GQUxTRSkKb3V0X2FyaG88LWZvcmVjYXN0KGFyaG8saD0yNCkKYTI8LXN1bW1hcnkoYXJobykKCmEyCmBgYAoKCmBgYHtyfQojZ3JhcGhzIG9mIGFsbCBtb2RlbHMKCnBhcihtZnJvdz1jKDIsMikpCnBsb3QoZXRzMSk7IApwbG90KGV0czIpOyAKcGxvdChhcmhvKTsKYGBgCgoKYGBge3J9CmFjY3VyYWN5KG91dF9hcmhvLGRqLnRzLm1bMTM5OjE2Ml0pCgojY29tcGFyaXNvbiBvZiBtb2RlbHMKCmNvbXBhcmlzb249bWF0cml4KGMoQUlDKGV0czEpLEJJQyhldHMxKSxlMVsyXSxlMVszXSwKICAgICAgICAgICAgICAgICAgICBBSUMoZXRzMiksQklDKGV0czIpLGUyWzJdLGUyWzNdLAogICAgICAgICAgICAgICAgICAgIEFJQyhhcmhvKSxCSUMoYXJobyksYTJbMl0sYTJbM10pCiAgICAgICAgICAgICAgICAgICxuY29sPTQsYnlyb3c9VFJVRSkKY29sbmFtZXMoY29tcGFyaXNvbik9YygiQUlDIiwiQklDIiwiUk1TRSIsIk1BRSIpCnJvd25hbWVzKGNvbXBhcmlzb24pPWMoIm1vZGVsMS1BTk4iLCJtb2RlbDItTUFNIiwibW9kZWwzLWF1dG8uYXJpbWEiKQpjb21wYXJpc29uLnRhYmxlPC1hcy50YWJsZShjb21wYXJpc29uKQphcy50YWJsZShjb21wYXJpc29uKQpgYGA=