QUESTION ONE

This question refers to the data set that you are using for your project. For your data set determine the best forecasting method (average, naïve, seasonal naïve, exponential smoothing, Holt-Winters’ method, ETS, ARIMA). ). Use 80% of the data for training and 20% of the data for testing. Using the best model, forecast your data set for 6 periods into the future.

Load Data and Library Packages
Define as time-series and name varaible
indigo_data = ts(indigo_data, start=2002, frequency=4)
data = indigo_data[,3]
Set training data, test data, out of sample
train <- window(data,start=c(2002, 1),end=c(2014, 2))
test <- window(data, start=c(2014,2),end=c(2017, 3))
both <- window(data,start=c(2002, 1))
h=length(test)
Forecast using Average, Naïve, and Seasonal Naïve Method
Indigofit1 <- meanf(train, h=h)
Indigofit2 <- naive(train, h=h)
Indigofit3 <- snaive(train, h=h)
Plot forecasts for Average, Naïve, and Seasonal Naïve Method
plot(Indigofit1, PI=FALSE,
     main="Forecasts for quarterly Indigo sales")
lines(Indigofit2$mean,col=2)
lines(Indigofit3$mean,col=3)
legend("topleft",lty=1,col=c(4,2,3),
       legend=c("Mean method","Naive method","Seasonal naive method"),bty="n")

Forecast using Simple Exonential moving averages
Indigofit4 <- ses(train, h = h)
plot(Indigofit4)

Forecast using Holt’s linear trend method
Indigofit5 <- holt(train,  h=h) 
plot(Indigofit5)

Forecast using Holt’s exponential trend method
Indigofit6 <- holt(train, exponential=TRUE, h=h) 
plot(Indigofit6)

Forecast using Holt’s damped trend method
Indigofit7 <- holt(train, damped=TRUE, h=h) 
plot(Indigofit7)

Forecast using Holt-Winters smoothing methods
Indigofit8 <- hw(train, seasonal="multiplicative", h=h) 
plot(Indigofit8)

Indigofit9 <- hw(train, seasonal="additive", h=h) 
plot(Indigofit9)

plot(Indigofit6,ylab="Indigo sales",
     plot.conf=FALSE, type="o", fcol="white", xlab="Year", main= "Holt-Winters Seasonal Smoothing Forecasts")
## Warning in plot.window(xlim, ylim, log, ...): "plot.conf" is not a
## graphical parameter
## Warning in title(main = main, xlab = xlab, ylab = ylab, ...): "plot.conf"
## is not a graphical parameter
## Warning in axis(1, ...): "plot.conf" is not a graphical parameter
## Warning in axis(2, ...): "plot.conf" is not a graphical parameter
## Warning in box(...): "plot.conf" is not a graphical parameter
lines(fitted(Indigofit8), col="red", lty=2)
lines(fitted(Indigofit9), col="green", lty=2)
lines(Indigofit8$mean, type="o", col="red")
lines(Indigofit9$mean, type="o", col="green")
legend("topleft",lty=1, pch=1, col=1:3, 
       c("data","Holt Winters' Additive","Holt Winters' Multiplicative"))

Forecast using Exponential Smoothing
y.ets <- ets(train, model="ZZZ") 
Indigofit10 <- forecast(y.ets, h=h)
plot(Indigofit10)

Forecasting using Seasonal Decomposition of Time Series by Loess
y.stl <- stl(train, t.window=15, s.window="periodic", robust=TRUE)
summary(y.stl)
##  Call:
##  stl(x = train, s.window = "periodic", t.window = 15, robust = TRUE)
## 
##  Time.series components:
##     seasonal             trend            remainder       
##  Min.   :-40.43530   Min.   :181.9106   Min.   :-9.66224  
##  1st Qu.:-37.32012   1st Qu.:199.2795   1st Qu.:-3.26187  
##  Median :-27.97458   Median :216.5959   Median :-0.26496  
##  Mean   : -1.36820   Mean   :215.3320   Mean   : 2.43617  
##  3rd Qu.:-22.84995   3rd Qu.:231.3167   3rd Qu.: 6.55513  
##  Max.   : 91.25982   Max.   :240.8440   Max.   :38.38922  
##  IQR:
##      STL.seasonal STL.trend STL.remainder data  
##      14.470       32.037     9.817        50.000
##    %  28.9         64.1      19.6         100.0 
## 
##  Weights:
##      Min.   1st Qu.    Median      Mean   3rd Qu.      Max. 
## 0.0000000 0.8507417 0.9451216 0.8629910 0.9945073 0.9999257 
## 
##  Other components: List of 5
##  $ win  : Named num [1:3] 501 15 5
##  $ deg  : Named int [1:3] 0 1 1
##  $ jump : Named num [1:3] 51 2 1
##  $ inner: int 1
##  $ outer: int 15
Indigofit11 <- forecast(y.stl, method="rwdrift",h=h)
plot(Indigofit11)

Forecasting using trend plus seasonal
tps <- tslm(train ~ trend + season)
Indigofit12 = forecast(tps,h=h)
plot(Indigofit12)

Forecasting using ARIMA modelling and forecasting
adf.test(train, alternative = "stationary")
kpss.test(train)
ndiffs(train)

y.arima <- auto.arima(train)
Indigofit13 <- forecast(y.arima, h=h)
plot(Indigofit13)

Forecast by taking logs to reduce variability
y.arima.lambda <- auto.arima(train, lambda=0)
Indigofit14 <- forecast(y.arima.lambda, h=h)
plot(Indigofit14)

tsdiag(y.arima.lambda)

Accuracy Measures
a1Indigofit1 = accuracy(Indigofit1, test)
a2Indigofit2 = accuracy(Indigofit2, test)
a3Indigofit3 = accuracy(Indigofit3, test)
a4Indigofit4 = accuracy(Indigofit4, test)
a5Indigofit5 = accuracy(Indigofit5, test)
a6Indigofit6 = accuracy(Indigofit6, test)
a7Indigofit7 = accuracy(Indigofit7, test)
a8Indigofit8 = accuracy(Indigofit8, test)
a9Indigofit9 = accuracy(Indigofit9, test)
a10Indigofit10 = accuracy(Indigofit10, test)
a11Indigofit11 = accuracy(Indigofit11, test)
a12Indigofit12 = accuracy(Indigofit12, test)
a13Indigofit13 = accuracy(Indigofit13, test)
a14Indigofit14 = accuracy(Indigofit14, test)
Combining forecast summary statistics into a table with row names
a.table <- rbind(a1Indigofit1, a2Indigofit2, a3Indigofit3, a4Indigofit4, a5Indigofit5, a6Indigofit6, a7Indigofit7, a8Indigofit8, a9Indigofit9, a10Indigofit10, a11Indigofit11, a12Indigofit12, a13Indigofit13,a14Indigofit14)

row.names(a.table)<-c('Mean training','Mean test', 'Naive training', 'Naive test', 'S. Naive training', 'S. Naive test' , 'ES training','ES test', 'Holt linear training', 'Holt linear test', 'Holt ES training', 'Holt ES test' , 'Holt dampled training','Holt damped test', 'Holt Winters multiplicative training', 'Holt Winters multiplicative test', 'Holt Winters additive training', 'Holt Winters additive test', 'ETS training', 'ETS test','STL training','STL test','linear trend training','linear trend test' , 'ARIMA training', 'ARIMA test', 'ARIMA training (log)', 'ARIMA test (log)')

a.table<-as.data.frame(a.table)

kable(a.table, caption="Forecast accuracy",digits = 4 )
Forecast accuracy
ME RMSE MAE MPE MAPE MASE ACF1 Theil’s U
Mean training 0.0000 59.5876 47.8720 -6.4317 21.4118 4.8186 -0.1012 NA
Mean test 30.9077 85.7059 60.2615 4.6263 20.3423 6.0657 -0.1527 0.8400
Naive training 0.8980 88.4072 68.3265 -5.8694 29.3440 6.8775 -0.3815 NA
Naive test 68.3077 105.1482 68.3077 21.1096 21.1096 6.8756 -0.1527 1.0195
S. Naive training 2.8913 12.1145 9.9348 1.2172 4.8370 1.0000 0.4454 NA
S. Naive test 20.2308 28.4483 21.0000 7.8957 8.1647 2.1138 0.5471 0.2896
ES training -0.0242 59.5906 47.8863 -6.4440 21.4208 4.8201 -0.1012 NA
ES test 30.8634 85.6899 60.2649 4.6068 20.3480 6.0661 -0.1527 0.8399
Holt linear training -9.8360 58.6791 52.9862 -10.5803 24.2308 5.3334 -0.1772 NA
Holt linear test 14.8023 82.7412 65.8098 -2.6189 24.4579 6.6242 -0.1215 0.8260
Holt ES training -5.8522 58.8101 51.4181 -8.3263 22.8648 5.1756 -0.1768 NA
Holt ES test 48.8698 97.7867 67.7618 12.1629 22.4092 6.8207 -0.0533 0.9787
Holt dampled training 3.1409 58.9391 46.6316 -4.2784 20.0658 4.6938 -0.1525 NA
Holt damped test 16.5625 82.2135 64.9485 -1.7567 23.8837 6.5375 -0.1367 0.8178
Holt Winters multiplicative training -2.0446 9.7421 8.0996 -1.1484 3.8516 0.8153 0.0627 NA
Holt Winters multiplicative test 44.6692 57.7153 44.6692 16.6514 16.6514 4.4962 0.3261 0.5898
Holt Winters additive training -1.6080 9.4452 7.3524 -1.0898 3.5492 0.7401 0.0468 NA
Holt Winters additive test 40.5063 49.4899 40.5063 16.2902 16.2902 4.0772 0.5929 0.5025
ETS training 1.1397 9.6818 7.4391 0.1935 3.5230 0.7488 -0.0961 NA
ETS test 23.1353 30.6775 23.1353 9.0231 9.0231 2.3287 0.5078 0.3101
STL training 0.0000 14.5414 9.7270 -0.4313 4.4992 0.9791 -0.5520 NA
STL test 28.8075 39.7259 29.3304 9.9664 10.2487 2.9523 0.0588 0.4012
linear trend training 0.0000 14.0213 11.3459 -0.3877 5.3824 1.1420 0.4640 NA
linear trend test -10.4310 25.8653 22.5494 -6.8549 9.9407 2.2697 0.1024 0.2455
ARIMA training -2.0014 9.4166 7.0925 -1.2689 3.3782 0.7139 -0.0196 NA
ARIMA test 38.4741 47.3226 38.4741 15.4844 15.4844 3.8727 0.5966 0.4808
ARIMA training (log) -2.5397 10.3375 7.8001 -1.3528 3.5980 0.7851 -0.0869 NA
ARIMA test (log) 40.0507 51.1623 40.0507 15.0811 15.0811 4.0314 0.3572 0.5220

When determining how accurate which model is the best to forecast with (average, naïve, seasonal naïve, exponential smoothing, Holt-Winters’ method, ETS, ARIMA) I will be looking at the following: RSME, MAE, MAPE and MASE. It is evident that the seasonal naïve model produces the least amount of error overall. This is because it produced the lowest amount of error for MAE, MAPE and MASE. Although the forecast using trend plus seasonal produces the least amount or error for the RSME metric and in fact comes close in the other measures, in my opinion the season naive method produces the least amount for majority of the measures which is why this is the best forecasting method. With that conclusion, the forecasts for the next 6 periods would be:

2017,4: 220
2018, 1: 193
2018, 2: 217
2018, 3: 400
2018, 4: 220
2019, 1: 193

QUESTION TWO

In this question we investigate the dynamic interaction between Home Depot sales (HD), building permits (PERMIT) and consumer sentiment (CS). All data are for the United States. HD measured in millions of dollars and PERMIT measured in thousands of units. Data are in as3_data.csv

Estimate an appropriate vector autoregression (VAR) for this data set. Use natural logs of each variable. Include seasonal dummy variables. This can be done by adding an option to the VAR estimation command: VAR(vardata, p = lag, seasonal = 4). Use your preferred specification to investigate causality, impulse response functions, and forecast error decompositions. How do building permits and consumer sentiment affect HD sales? Compute forecasts for 2017:3 to 2018:2. 20 points.

Load Data and define as a time-series
as3_data <- read.csv("~/Dropbox (Personal)/ECON4210/Assignment Three/as3_data.csv")
as3_data = ts(as3_data, start=2000, frequency=4)
Extract and Plot HD Sales
y = as3_data[,"HD"]
plot(y, main="Quarterly Home Depot sales data", col= "blue", lwd=2, xlab="Time", ylab="Sales")

tsdisplay(y, main="Quarterly Home Depot sales data", col= "blue", lwd=2, xlab="Time", ylab="Sales")

Conduct a Vector Autoregression
cor(as3_data[,3:5])
##                 HD         CS      PERMIT
## HD      1.00000000 0.01730134 -0.09430523
## CS      0.01730134 1.00000000  0.67336928
## PERMIT -0.09430523 0.67336928  1.00000000
vardata = log (as3_data[,c(5,4,3)])
nrow(vardata)
## [1] 70
plot(vardata, main = "VAR data", xlab = "")

VARselect(vardata, lag.max = 16, type = "both", season=4)
## $selection
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##     15     15     15     16 
## 
## $criteria
##                    1             2             3             4
## AIC(n) -1.701542e+01 -1.715894e+01 -1.718719e+01 -1.716519e+01
## HQ(n)  -1.667450e+01 -1.669017e+01 -1.659058e+01 -1.644074e+01
## SC(n)  -1.613143e+01 -1.594345e+01 -1.564020e+01 -1.528671e+01
## FPE(n)  4.103526e-08  3.593329e-08  3.560209e-08  3.750478e-08
##                    5             6             7             8
## AIC(n) -1.693596e+01 -1.705351e+01 -1.691086e+01 -1.670522e+01
## HQ(n)  -1.608365e+01 -1.607336e+01 -1.580286e+01 -1.546938e+01
## SC(n)  -1.472597e+01 -1.451203e+01 -1.403788e+01 -1.350075e+01
## FPE(n)  4.931369e-08  4.669594e-08  5.873198e-08  8.110284e-08
##                    9            10            11            12
## AIC(n) -1.686221e+01 -1.775738e+01 -1.868321e+01 -1.972018e+01
## HQ(n)  -1.549852e+01 -1.626585e+01 -1.706384e+01 -1.797296e+01
## SC(n)  -1.332624e+01 -1.388991e+01 -1.448425e+01 -1.518972e+01
## FPE(n)  8.107832e-08  4.083692e-08  2.144368e-08  1.118208e-08
##                   13            14            15            16
## AIC(n) -2.092515e+01 -2.156585e+01 -2.256495e+01           NaN
## HQ(n)  -1.905008e+01 -1.956294e+01 -2.043419e+01           NaN
## SC(n)  -1.606319e+01 -1.637240e+01 -1.703999e+01           NaN
## FPE(n)  5.791391e-09  6.978407e-09  1.077310e-08 -1.687835e-39
VARselect(vardata, lag.max = 16, type = "const", season=4)
## $selection
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##     15     15     15     16 
## 
## $criteria
##                    1             2             3             4
## AIC(n) -1.664695e+01 -1.700237e+01 -1.698864e+01 -1.699619e+01
## HQ(n)  -1.634864e+01 -1.657622e+01 -1.643465e+01 -1.631435e+01
## SC(n)  -1.587346e+01 -1.589738e+01 -1.555216e+01 -1.522820e+01
## FPE(n)  5.918788e-08  4.184057e-08  4.310066e-08  4.390495e-08
##                    5             6             7             8
## AIC(n) -1.681308e+01 -1.690001e+01 -1.675048e+01 -1.657267e+01
## HQ(n)  -1.600339e+01 -1.596248e+01 -1.568510e+01 -1.537945e+01
## SC(n)  -1.471360e+01 -1.446903e+01 -1.398800e+01 -1.347870e+01
## FPE(n)  5.483934e-08  5.318620e-08  6.678391e-08  8.870798e-08
##                    9            10            11            12
## AIC(n) -1.665447e+01 -1.691601e+01 -1.780071e+01 -1.852145e+01
## HQ(n)  -1.533340e+01 -1.546709e+01 -1.622395e+01 -1.681685e+01
## SC(n)  -1.322899e+01 -1.315904e+01 -1.371225e+01 -1.410149e+01
## FPE(n)  9.423828e-08  8.773284e-08  4.672974e-08  3.213930e-08
##                   13            14            15            16
## AIC(n) -2.005809e+01 -2.099944e+01 -2.154108e+01           NaN
## HQ(n)  -1.822564e+01 -1.903914e+01 -1.945293e+01           NaN
## SC(n)  -1.530663e+01 -1.591648e+01 -1.612662e+01           NaN
## FPE(n)  1.122166e-08  8.934368e-09  1.667013e-08 -3.791961e-25
var.1 = VAR(vardata, type = "const", season =4, ic="AIC")
summary(var.1)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: PERMIT, CS, HD 
## Deterministic variables: const 
## Sample size: 69 
## Log Likelihood: 307.246 
## Roots of the characteristic polynomial:
## 0.9719 0.9609 0.6418
## Call:
## VAR(y = vardata, type = "const", season = 4L, ic = "AIC")
## 
## 
## Estimation results for equation PERMIT: 
## ======================================= 
## PERMIT = PERMIT.l1 + CS.l1 + HD.l1 + const + sd1 + sd2 + sd3 
## 
##            Estimate Std. Error t value Pr(>|t|)    
## PERMIT.l1  0.926381   0.030859  30.020   <2e-16 ***
## CS.l1      0.214482   0.091597   2.342   0.0224 *  
## HD.l1     -0.023872   0.049097  -0.486   0.6285    
## const     -0.197302   0.571489  -0.345   0.7311    
## sd1       -0.014237   0.025913  -0.549   0.5847    
## sd2       -0.002457   0.025211  -0.097   0.9227    
## sd3       -0.019577   0.026355  -0.743   0.4604    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.0742 on 62 degrees of freedom
## Multiple R-Squared: 0.9729,  Adjusted R-squared: 0.9703 
## F-statistic: 370.9 on 6 and 62 DF,  p-value: < 2.2e-16 
## 
## 
## Estimation results for equation CS: 
## =================================== 
## CS = PERMIT.l1 + CS.l1 + HD.l1 + const + sd1 + sd2 + sd3 
## 
##            Estimate Std. Error t value Pr(>|t|)    
## PERMIT.l1  0.058245   0.033816   1.722    0.090 .  
## CS.l1      0.690356   0.100371   6.878 3.49e-09 ***
## HD.l1      0.033513   0.053801   0.623    0.536    
## const      0.625959   0.626236   1.000    0.321    
## sd1       -0.037479   0.028396  -1.320    0.192    
## sd2       -0.008986   0.027626  -0.325    0.746    
## sd3       -0.061907   0.028880  -2.144    0.036 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.08131 on 62 degrees of freedom
## Multiple R-Squared: 0.7083,  Adjusted R-squared:  0.68 
## F-statistic: 25.09 on 6 and 62 DF,  p-value: 7.067e-15 
## 
## 
## Estimation results for equation HD: 
## =================================== 
## HD = PERMIT.l1 + CS.l1 + HD.l1 + const + sd1 + sd2 + sd3 
## 
##            Estimate Std. Error t value Pr(>|t|)    
## PERMIT.l1 -0.001123   0.015871  -0.071   0.9438    
## CS.l1      0.037374   0.047108   0.793   0.4306    
## HD.l1      0.957927   0.025251  37.937   <2e-16 ***
## const      0.265839   0.293918   0.904   0.3692    
## sd1        0.165032   0.013327  12.383   <2e-16 ***
## sd2        0.238039   0.012966  18.359   <2e-16 ***
## sd3       -0.041296   0.013555  -3.047   0.0034 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.03816 on 62 degrees of freedom
## Multiple R-Squared: 0.9659,  Adjusted R-squared: 0.9626 
## F-statistic:   293 on 6 and 62 DF,  p-value: < 2.2e-16 
## 
## 
## 
## Covariance matrix of residuals:
##          PERMIT        CS        HD
## PERMIT 0.005506 0.0020376 0.0012687
## CS     0.002038 0.0066109 0.0004281
## HD     0.001269 0.0004281 0.0014562
## 
## Correlation matrix of residuals:
##        PERMIT     CS     HD
## PERMIT 1.0000 0.3377 0.4481
## CS     0.3377 1.0000 0.1380
## HD     0.4481 0.1380 1.0000
roots(var.1)
## [1] 0.9719023 0.9609497 0.6418125
Plot the VAR
plot(var.1, names ="HS")
## Warning in plot.varest(var.1, names = "HS"): 
## Invalid variable name(s) supplied, using first variable.

plot(var.1, names ="CS")

plot(var.1, names = "PERMIT")

serial.test(var.1, lags.pt = 16, type = "PT.adjusted")
## 
##  Portmanteau Test (adjusted)
## 
## data:  Residuals of VAR object var.1
## Chi-squared = 208.16, df = 135, p-value = 5.372e-05
acf(residuals(var.1), type="partial", lag.max=10)

Look at Granger Causality
causality(var.1, cause= c("PERMIT", "CS"))
## $Granger
## 
##  Granger causality H0: PERMIT CS do not Granger-cause HD
## 
## data:  VAR object var.1
## F-Test = 0.60141, df1 = 2, df2 = 186, p-value = 0.5491
## 
## 
## $Instant
## 
##  H0: No instantaneous causality between: PERMIT CS and HD
## 
## data:  VAR object var.1
## Chi-squared = 11.546, df = 2, p-value = 0.00311

The null hypothesis of no Granger-causality shows a large p-value (0.5491> 0.05) which indicates weak evidence against the null hypothesis, so you fail to reject the null hypothesis. The null hypothesis of non-instantenous causality is rejected because it shows a small p-value (0.00311 ≤ 0.05) which indicates a strong evidence against the null hypothesis. Therefore, building permits and consumer sentiment do Granger-cause HD sales.

Look at Impulse Responses
var1a.irf <- irf(var.1,n.ahead = 16, boot = TRUE,runs=500, seed=99, cumulative=FALSE)

par(mfrow=c(3,3))
plot(var1a.irf, plot.type = "single")

par(mfrow=c(1,1))

par(mfrow=c(2,2))
plot( irf(var.1, response = "HD", n.ahead = 24, boot = TRUE,runs=500) , plot.type = "single")

par(mfrow=c(1,1))

A change in the building permits does not seem to significantly shock HD sales. A change in consumer sentiment, however, does seem to shock HD sales significantly from the 1st to the 10th period, as the line spikes up alongside the CS change.

Look at Forecast Error Decompositions
fevd(var.1, n.ahead = 16)
## $PERMIT
##          PERMIT         CS           HD
##  [1,] 1.0000000 0.00000000 0.000000e+00
##  [2,] 0.9760279 0.02391339 5.876238e-05
##  [3,] 0.9439781 0.05588818 1.337567e-04
##  [4,] 0.9141332 0.08566711 1.996656e-04
##  [5,] 0.8890894 0.11065954 2.510912e-04
##  [6,] 0.8688177 0.13089321 2.890773e-04
##  [7,] 0.8525746 0.14710924 3.161947e-04
##  [8,] 0.8395437 0.16012137 3.349707e-04
##  [9,] 0.8290217 0.17063083 3.474859e-04
## [10,] 0.8204495 0.17919513 3.553469e-04
## [11,] 0.8133965 0.18624375 3.597629e-04
## [12,] 0.8075346 0.19210375 3.616354e-04
## [13,] 0.8026146 0.19702374 3.616352e-04
## [14,] 0.7984464 0.20119333 3.602627e-04
## [15,] 0.7948841 0.20475803 3.578927e-04
## [16,] 0.7918149 0.20783033 3.548072e-04
## 
## $CS
##          PERMIT        CS           HD
##  [1,] 0.1140729 0.8859271 0.0000000000
##  [2,] 0.1326991 0.8671698 0.0001310705
##  [3,] 0.1503622 0.8492424 0.0003954556
##  [4,] 0.1666752 0.8325768 0.0007480061
##  [5,] 0.1815391 0.8173137 0.0011471674
##  [6,] 0.1950139 0.8034247 0.0015613983
##  [7,] 0.2072243 0.7908060 0.0019696879
##  [8,] 0.2183082 0.7793323 0.0023595026
##  [9,] 0.2283947 0.7688810 0.0027242732
## [10,] 0.2375976 0.7593411 0.0030612882
## [11,] 0.2460149 0.7506149 0.0033701767
## [12,] 0.2537310 0.7426171 0.0036518979
## [13,] 0.2608189 0.7352730 0.0039081031
## [14,] 0.2673419 0.7285173 0.0041407472
## [15,] 0.2733556 0.7222926 0.0043518616
## [16,] 0.2789086 0.7165479 0.0045434264
## 
## $HD
##          PERMIT           CS        HD
##  [1,] 0.2007619 0.0002011183 0.7990370
##  [2,] 0.2093690 0.0020416687 0.7885894
##  [3,] 0.2166316 0.0056639810 0.7777045
##  [4,] 0.2230106 0.0097769326 0.7672125
##  [5,] 0.2287847 0.0138239013 0.7573914
##  [6,] 0.2341246 0.0175926984 0.7482827
##  [7,] 0.2391371 0.0210265353 0.7398364
##  [8,] 0.2438909 0.0241344834 0.7319746
##  [9,] 0.2484309 0.0269499900 0.7246191
## [10,] 0.2527877 0.0295123153 0.7177000
## [11,] 0.2569821 0.0318587726 0.7111591
## [12,] 0.2610292 0.0340219626 0.7049488
## [13,] 0.2649399 0.0360292312 0.6990309
## [14,] 0.2687222 0.0379030444 0.6933747
## [15,] 0.2723826 0.0396616719 0.6879557
## [16,] 0.2759261 0.0413199151 0.6827540

Looking at the forecast error decomposition for HD sales, it is evident that aside of HD sales itself, the influence of building permits is contributing the most to the forecast uncertainty of HD sales.

Forecasts for 2017:3 to 2018:2
var.fc = predict(var.1, n.ahead= 4)
plot(var.fc)

summary(var.fc)
##          Length Class  Mode   
## fcst       3    -none- list   
## endog    210    mts    numeric
## model     10    varest list   
## exo.fcst   0    -none- NULL
var.fc$fcst$HD
##          fcst     lower    upper         CI
## [1,] 10.10918 10.034383 10.18397 0.07479374
## [2,] 10.01919  9.914934 10.12345 0.10425696
## [3,] 10.09856  9.972568 10.22456 0.12599518
## [4,] 10.24649 10.102871 10.39011 0.14362188
exp (var.fc$fcst$HD[,1])
## [1] 24567.42 22453.26 24308.06 28183.53

Therefore, the forcasts for 2017:3 to 2018:2 are as follows

2017:3 - 24567.42
2017:4 - 22453.26
2018:1 - 24308.06
2018:2 - 28183.53

Building permits and consumer sentiment affect HD sales as proved by the granger casuality test, and it is further proven that consumer sentiment affect HD sales using the impulse responses and that building permits affect HD sales using the forecast error decomposition.