library("zoo")
library("forecast")
library("tseries")
library("urca")
library ("Quandl")
library("vars")
library("Quandl")
Quandl.api_key("DLk9RQrfTVkD4UTKc7op")
library("VARsignR")
library("stargazer")

Problem 1

Obtain quarterly time series for real GDP FRED/GDPC1, GDP deflator FRED/GDPDEF and quarterly closing value of S&P 500 Index YAHOO/INDEX_GSPC. Use them to construct two time series: the log change in GDP \(y_{1,t} = \Delta log GDP_t\) (which approximates the growth rate of the GDP) and the real log return for S&P500 \(y_{2,t} = \Delta logSP500_t- \Delta logp_{t}^{GDP}\) (which approximates the inflation adjusted return of S&P 500).

Introduction and Data :

In this problem, data is analyzed about real GDP, GDP deflator, and quarterly closing value of S&P 500 Index from 1950 to 2016. The quarterly data for this problem is collected from Quandl.

rGDP_d<- Quandl("FRED/GDPC1",  type="zoo")
rGDP<- window(rGDP_d[,2], start="1950 Q1", end="2014 Q4")

str(rGDP)
## 'zooreg' series from 1950 Q1 to 2014 Q4
##   Data: num [1:260] 2085 2148 2230 2273 2304 ...
##   Index: Class 'yearqtr'  num [1:260] 1950 1950 1950 1951 1951 ...
##   Frequency: 4
head(rGDP)
## 1950 Q1 1950 Q2 1950 Q3 1950 Q4 1951 Q1 1951 Q2 
##  2084.6  2147.6  2230.4  2273.4  2304.5  2344.5
tail(rGDP)
## 2013 Q3 2013 Q4 2014 Q1 2014 Q2 2014 Q3 2014 Q4 
## 15614.4 15761.5 15724.9 15901.5 16068.8 16151.4
tsdisplay(rGDP, lag.max = 100, xlab=" Years 1950-2014", ylab= "Billions of Chained 2005 Dollars Seasonally Adjusted Annual Rate ", main ="The inflation adjusted value of Real gross domestic product from 1950 to 2014,Quaterly")

l.rGDP= log(rGDP)
diff.l.rGDP=diff(l.rGDP)


GDP_defl_d<-Quandl("FRED/GDPDEF",  type="zoo")
GDP_defl<- window(GDP_defl_d[,2], start="1950 Q1", end="2014 Q4")

str(GDP_defl)
## 'zooreg' series from 1950 Q1 to 2014 Q4
##   Data: num [1:260] 13.5 13.5 13.8 14.1 14.6 ...
##   Index: Class 'yearqtr'  num [1:260] 1950 1950 1950 1951 1951 ...
##   Frequency: 4
head(GDP_defl)
## 1950 Q1 1950 Q2 1950 Q3 1950 Q4 1951 Q1 1951 Q2 
##  13.490  13.538  13.832  14.090  14.596  14.692
tail(GDP_defl)
## 2013 Q3 2013 Q4 2014 Q1 2014 Q2 2014 Q3 2014 Q4 
## 107.128 107.589 108.009 108.606 109.044 109.067
tsdisplay(GDP_defl, lag.max = 100, xlab=" Years 1950-2014", ylab= "Index 2005=100 Seasonally Adjusted ", main ="A Guide to the National Income and Product Accounts of the United States from 1950 to 2014,Quaterly")

l.GDP_defl= log(GDP_defl)
diff.l.GDP_defl=diff(l.GDP_defl)


SP500_close_d<-Quandl("YAHOO/INDEX_GSPC", collapse="quarterly", type="zoo")
SP500_close <- window(SP500_close_d[,4], end="2014 Q4")
a<-data.frame(SP500_close)
str(SP500_close)
## 'zooreg' series from 1950 Q1 to 2014 Q4
##   Data: num [1:260] 17.3 17.7 19.5 20.4 21.5 ...
##   Index: Class 'yearqtr'  num [1:260] 1950 1950 1950 1951 1951 ...
##   Frequency: 4
head(SP500_close)
## 1950 Q1 1950 Q2 1950 Q3 1950 Q4 1951 Q1 1951 Q2 
##   17.29   17.69   19.45   20.43   21.48   20.96
tail(SP500_close)
## 2013 Q3 2013 Q4 2014 Q1 2014 Q2 2014 Q3 2014 Q4 
## 1681.55 1848.36 1872.34 1960.23 1972.29 2058.90
tsdisplay(SP500_close, lag.max = 100, xlab=" Years 1950-2014", ylab= "S&P 500 Index, Closing ", main ="American stock market index based on the market capitalizations of 500 large companies having common stock listed on the NYSE or NASDAQ from 1950 to 2014,Quaterly")

l.SP500_close= log(SP500_close)
diff.l.SP500_close=diff(l.SP500_close)

The Real GDP data is increasing over the year. There are some small section of time with decreasing in real GDP. The PACK does not have peaks, while ACF exponentially decreasing peaks.
The GDP deflator is also increasing in value over the years mostly. The data set also does not have PACF peaks, While ACF has exponentially decreasing peaks.
The quarterly closing value of S&P 500 data has two different trend. In first 150 quarters there was not much change. The volatility and increase in value started after 150 quarter. The PACF does not have peaks, while ACF peaks decays slowly.

(a) Estimate a bivariate reduced form VAR for \(y_t = (y_{1,t}, y_{2,t})'\) for the period 1951 Q1-2014 Q4, use information criteria to select number of lags. How large is the correlation of residuals in the model, and what are the implications?

y1= diff.l.rGDP
y2= diff.l.SP500_close - diff.l.GDP_defl
y.Q <- cbind(y1, y2)

par(mfrow=c(1,2), cex=0.8)
plot(y1, xlab="", ylab="", main="Growth Rate of GDP", col="black")
plot(y2, xlab="", ylab="", main="Inflation Adjusted Return of SP500", col="red")

y.q.corr <-cor(y1, y2)
y.q.corr
## [1] 0.1297444
VARselect(y.Q, lag.max=8, type="const")
## $selection
## AIC(n)  HQ(n)  SC(n) FPE(n) 
##      2      2      1      2 
## 
## $criteria
##                    1             2             3             4
## AIC(n) -1.463005e+01 -1.468243e+01 -1.466877e+01 -1.464260e+01
## HQ(n)  -1.459614e+01 -1.462591e+01 -1.458964e+01 -1.454086e+01
## SC(n)  -1.454578e+01 -1.454197e+01 -1.447213e+01 -1.438978e+01
## FPE(n)  4.428443e-07  4.202486e-07  4.260368e-07  4.373472e-07
##                    5             6             7             8
## AIC(n) -1.462359e+01 -1.460373e+01 -1.459563e+01 -1.456650e+01
## HQ(n)  -1.449923e+01 -1.445677e+01 -1.442606e+01 -1.437432e+01
## SC(n)  -1.431458e+01 -1.423855e+01 -1.417426e+01 -1.408895e+01
## FPE(n)  4.457653e-07  4.547371e-07  4.584816e-07  4.720945e-07
var1 <- VAR(y.Q, p=2, type="const")
summary(var1)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: y1, y2 
## Deterministic variables: const 
## Sample size: 257 
## Log Likelihood: 1170.529 
## Roots of the characteristic polynomial:
## 0.4387 0.4387 0.2998 0.2998
## Call:
## VAR(y = y.Q, p = 2, type = "const")
## 
## 
## Estimation results for equation y1: 
## =================================== 
## y1 = y1.l1 + y2.l1 + y1.l2 + y2.l2 + const 
## 
##        Estimate Std. Error t value Pr(>|t|)    
## y1.l1 0.2463539  0.0603812   4.080 6.05e-05 ***
## y2.l1 0.0231050  0.0063715   3.626 0.000348 ***
## y1.l2 0.0809585  0.0579149   1.398 0.163377    
## y2.l2 0.0262690  0.0065313   4.022 7.63e-05 ***
## const 0.0046278  0.0007134   6.487 4.60e-10 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.008001 on 252 degrees of freedom
## Multiple R-Squared: 0.2381,  Adjusted R-squared: 0.226 
## F-statistic: 19.69 on 4 and 252 DF,  p-value: 4.095e-14 
## 
## 
## Estimation results for equation y2: 
## =================================== 
## y2 = y1.l1 + y2.l1 + y1.l2 + y2.l2 + const 
## 
##        Estimate Std. Error t value Pr(>|t|)  
## y1.l1  0.281599   0.597857   0.471   0.6380  
## y2.l1  0.107349   0.063087   1.702   0.0901 .
## y1.l2 -0.785554   0.573438  -1.370   0.1719  
## y2.l2 -0.041183   0.064669  -0.637   0.5248  
## const  0.013437   0.007063   1.902   0.0583 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.07922 on 252 degrees of freedom
## Multiple R-Squared: 0.02071, Adjusted R-squared: 0.005163 
## F-statistic: 1.332 on 4 and 252 DF,  p-value: 0.2584 
## 
## 
## 
## Covariance matrix of residuals:
##           y1       y2
## y1 6.401e-05 0.000085
## y2 8.500e-05 0.006276
## 
## Correlation matrix of residuals:
##        y1     y2
## y1 1.0000 0.1341
## y2 0.1341 1.0000

The correlation between \(y_1\) and \(y_2\) is 0.1297. The most suitable lag is 2.
The Correlation of residuals is 0.1341. This suggests there is some causality relationship between these two variables. The model with \(y_1\) dependent has some significant variables but not for model with \(y_2\).

(b) Run the Granger causality tests for both variables What do the results suggest about the predictive power of the two variables?

causality(var1, cause="y1")
## $Granger
## 
##  Granger causality H0: y1 do not Granger-cause y2
## 
## data:  VAR object var1
## F-Test = 0.93921, df1 = 2, df2 = 504, p-value = 0.3916
## 
## 
## $Instant
## 
##  H0: No instantaneous causality between: y1 and y2
## 
## data:  VAR object var1
## Chi-squared = 4.5407, df = 1, p-value = 0.0331
causality(var1, cause="y2")
## $Granger
## 
##  Granger causality H0: y2 do not Granger-cause y1
## 
## data:  VAR object var1
## F-Test = 15.909, df1 = 2, df2 = 504, p-value = 1.997e-07
## 
## 
## $Instant
## 
##  H0: No instantaneous causality between: y2 and y1
## 
## data:  VAR object var1
## Chi-squared = 4.5407, df = 1, p-value = 0.0331
var1.f <- predict(var1, n.ahead=12)
var1.f
## $y1
##              fcst        lower      upper         CI
##  [1,] 0.007781723 -0.007899626 0.02346307 0.01568135
##  [2,] 0.008341288 -0.008314514 0.02499709 0.01665580
##  [3,] 0.007860986 -0.009879999 0.02560197 0.01774098
##  [4,] 0.007769889 -0.010106267 0.02564605 0.01787616
##  [5,] 0.007677010 -0.010215618 0.02556964 0.01789263
##  [6,] 0.007637136 -0.010256629 0.02553090 0.01789377
##  [7,] 0.007630348 -0.010263597 0.02552429 0.01789394
##  [8,] 0.007629595 -0.010264467 0.02552366 0.01789406
##  [9,] 0.007631172 -0.010262929 0.02552527 0.01789410
## [10,] 0.007632468 -0.010261641 0.02552658 0.01789411
## [11,] 0.007633082 -0.010261027 0.02552719 0.01789411
## [12,] 0.007633323 -0.010260786 0.02552743 0.01789411
## 
## $y2
##              fcst      lower     upper        CI
##  [1,] 0.011163292 -0.1441037 0.1664303 0.1552670
##  [2,] 0.011037886 -0.1452468 0.1673226 0.1562847
##  [3,] 0.010398217 -0.1463305 0.1671270 0.1567287
##  [4,] 0.009759891 -0.1470386 0.1665584 0.1567985
##  [5,] 0.010069361 -0.1467870 0.1669258 0.1568564
##  [6,] 0.010174278 -0.1466926 0.1670412 0.1568669
##  [7,] 0.010234528 -0.1466334 0.1671025 0.1568679
##  [8,] 0.010266087 -0.1466019 0.1671341 0.1568680
##  [9,] 0.010272115 -0.1465959 0.1671401 0.1568680
## [10,] 0.010272497 -0.1465955 0.1671405 0.1568680
## [11,] 0.010271416 -0.1465966 0.1671394 0.1568680
## [12,] 0.010270439 -0.1465976 0.1671385 0.1568680
plot(var1.f, lwd=2)

fanchart(var1.f, lwd=2)

The p-value for \(y_2\) on \(y_1\) is less than 0.05 while for \(y_1\) on \(y_2\) is more than 0.05. This suggests that \(y_2\) may Granger -cause \(y_1\) but not vice-versa. The p-value for \(y_2\) on \(y_1\) and \(y_1\) on \(y_2\) is less than 0.05 for no instantaneous causality. This also suggests there may be a causality relationship. However, the model is not powerful enough to use for forecasting. This can be seen in the forecast plot.

(c) Estimate two restricted VAR models - in first remove lags based on the Granger causality test from (b), in the second one remove all variables which have t-statistics lower than 2.

# estimate restricted VAR - based on Granger causality test eliminate lags of y1 from the equation for  y2

mat.r <- matrix(1, nrow=2, ncol=5)
mat.r[1, c(3,4)] <- 0
mat.r[2, c(3,4)] <- 0
mat.r
##      [,1] [,2] [,3] [,4] [,5]
## [1,]    1    1    0    0    1
## [2,]    1    1    0    0    1
varp.r <- restrict(var1, method="manual", resmat=mat.r)
varp.r
## 
## VAR Estimation Results:
## ======================= 
## 
## Estimated coefficients for equation y1: 
## ======================================= 
## Call:
## y1 = y1.l1 + y2.l1 + const 
## 
##       y1.l1       y2.l1       const 
## 0.333913680 0.024651145 0.004836728 
## 
## 
## Estimated coefficients for equation y2: 
## ======================================= 
## Call:
## y2 = y1.l1 + y2.l1 + const 
## 
##        y1.l1        y2.l1        const 
## -0.117455677  0.108357033  0.009917336
summary(varp.r)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: y1, y2 
## Deterministic variables: const 
## Sample size: 257 
## Log Likelihood: 1159.441 
## Roots of the characteristic polynomial:
## 0.3202 0.122     0     0
## Call:
## VAR(y = y.Q, p = 2, type = "const")
## 
## 
## Estimation results for equation y1: 
## =================================== 
## y1 = y1.l1 + y2.l1 + const 
## 
##        Estimate Std. Error t value Pr(>|t|)    
## y1.l1 0.3339137  0.0560698   5.955 8.60e-09 ***
## y2.l1 0.0246511  0.0065486   3.764 0.000207 ***
## const 0.0048367  0.0006745   7.171 8.12e-12 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.008258 on 254 degrees of freedom
## Multiple R-Squared: 0.5244,  Adjusted R-squared: 0.5188 
## F-statistic: 93.37 on 3 and 254 DF,  p-value: < 2.2e-16 
## 
## 
## Estimation results for equation y2: 
## =================================== 
## y2 = y1.l1 + y2.l1 + const 
## 
##        Estimate Std. Error t value Pr(>|t|)  
## y1.l1 -0.117456   0.538249  -0.218    0.827  
## y2.l1  0.108357   0.062864   1.724    0.086 .
## const  0.009917   0.006475   1.532    0.127  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.07927 on 254 degrees of freedom
## Multiple R-Squared: 0.02737, Adjusted R-squared: 0.01588 
## F-statistic: 2.382 on 3 and 254 DF,  p-value: 0.06994 
## 
## 
## 
## Covariance matrix of residuals:
##           y1        y2
## y1 6.874e-05 0.0000731
## y2 7.310e-05 0.0063343
## 
## Correlation matrix of residuals:
##        y1     y2
## y1 1.0000 0.1108
## y2 0.1108 1.0000
varp.r$restrictions
##    y1.l1 y2.l1 y1.l2 y2.l2 const
## y1     1     1     0     0     1
## y2     1     1     0     0     1
Acoef(varp.r)
## [[1]]
##         y1.l1      y2.l1
## y1  0.3339137 0.02465114
## y2 -0.1174557 0.10835703
## 
## [[2]]
##    y1.l2 y2.l2
## y1     0     0
## y2     0     0
# estimate restricted VAR - keep only variables with t-value larger than 2.0
varp.r.ser <- restrict(var1, method="ser", thresh=2.0)
varp.r.ser
## 
## VAR Estimation Results:
## ======================= 
## 
## Estimated coefficients for equation y1: 
## ======================================= 
## Call:
## y1 = y1.l1 + y2.l1 + y2.l2 + const 
## 
##       y1.l1       y2.l1       y2.l2       const 
## 0.277784962 0.022656641 0.026623429 0.005022662 
## 
## 
## Estimated coefficients for equation y2: 
## ======================================= 
## Call:
## y2 = const 
## 
##      const 
## 0.01010546
summary(varp.r.ser)
## 
## VAR Estimation Results:
## ========================= 
## Endogenous variables: y1, y2 
## Deterministic variables: const 
## Sample size: 257 
## Log Likelihood: 1166.53 
## Roots of the characteristic polynomial:
## 0.2778     0     0     0
## Call:
## VAR(y = y.Q, p = 2, type = "const")
## 
## 
## Estimation results for equation y1: 
## =================================== 
## y1 = y1.l1 + y2.l1 + y2.l2 + const 
## 
##        Estimate Std. Error t value Pr(>|t|)    
## y1.l1 0.2777850  0.0561442   4.948 1.37e-06 ***
## y2.l1 0.0226566  0.0063754   3.554 0.000453 ***
## y2.l2 0.0266234  0.0065386   4.072 6.25e-05 ***
## const 0.0050227  0.0006563   7.653 4.13e-13 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.008016 on 253 degrees of freedom
## Multiple R-Squared: 0.5537,  Adjusted R-squared: 0.5466 
## F-statistic: 78.47 on 4 and 253 DF,  p-value: < 2.2e-16 
## 
## 
## Estimation results for equation y2: 
## =================================== 
## y2 = const 
## 
##       Estimate Std. Error t value Pr(>|t|)  
## const 0.010105   0.004954    2.04   0.0424 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## 
## Residual standard error: 0.07942 on 256 degrees of freedom
## Multiple R-Squared: 0.01599, Adjusted R-squared: 0.01215 
## F-statistic:  4.16 on 1 and 256 DF,  p-value: 0.04241 
## 
## 
## 
## Covariance matrix of residuals:
##           y1        y2
## y1 6.451e-05 8.019e-05
## y2 8.019e-05 6.408e-03
## 
## Correlation matrix of residuals:
##        y1     y2
## y1 1.0000 0.1247
## y2 0.1247 1.0000
varp.r.ser$restrictions
##    y1.l1 y2.l1 y1.l2 y2.l2 const
## y1     1     1     0     1     1
## y2     0     0     0     0     1
Acoef(varp.r.ser)
## [[1]]
##       y1.l1      y2.l1
## y1 0.277785 0.02265664
## y2 0.000000 0.00000000
## 
## [[2]]
##    y1.l2      y2.l2
## y1     0 0.02662343
## y2     0 0.00000000

(d) Use stargazer package to show estimation results from all three models in (a) and (c).

# using stargazer package to report results of VAR estimation
lm1 <- var1$varresult
lmp.r <- varp.r$varresult
lmp.r.se <- varp.r.ser$varresult

stargazer(lm1$y1, lm1$y2, lmp.r$y1, lmp.r$y2,lmp.r.se$y1, lmp.r.se$y2, type="text", column.labels=rep(colnames(y.Q), 3), dep.var.labels.include=FALSE)
## 
## ==========================================================================================================================================================
##                                                                              Dependent variable:                                                          
##                     --------------------------------------------------------------------------------------------------------------------------------------
##                               y1                    y2                    y1                     y2                    y1                     y2          
##                               (1)                   (2)                   (3)                   (4)                    (5)                    (6)         
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## y1.l1                      0.246***                0.282               0.334***                -0.117               0.278***                              
##                             (0.060)               (0.598)               (0.056)               (0.538)                (0.056)                              
##                                                                                                                                                           
## y2.l1                      0.023***               0.107*               0.025***                0.108*               0.023***                              
##                             (0.006)               (0.063)               (0.007)               (0.063)                (0.006)                              
##                                                                                                                                                           
## y1.l2                        0.081                -0.786                                                                                                  
##                             (0.058)               (0.573)                                                                                                 
##                                                                                                                                                           
## y2.l2                      0.026***               -0.041                                                            0.027***                              
##                             (0.007)               (0.065)                                                            (0.007)                              
##                                                                                                                                                           
## const                      0.005***               0.013*               0.005***                0.010                0.005***                0.010**       
##                             (0.001)               (0.007)               (0.001)               (0.006)                (0.001)                (0.005)       
##                                                                                                                                                           
## ----------------------------------------------------------------------------------------------------------------------------------------------------------
## Observations                  257                   257                   257                   257                    257                    257         
## R2                           0.238                 0.021                 0.524                 0.027                  0.554                  0.016        
## Adjusted R2                  0.226                 0.005                 0.519                 0.016                  0.547                  0.012        
## Residual Std. Error    0.008 (df = 252)      0.079 (df = 252)      0.008 (df = 254)       0.079 (df = 254)      0.008 (df = 253)       0.079 (df = 256)   
## F Statistic         19.685*** (df = 4; 252) 1.332 (df = 4; 252) 93.368*** (df = 3; 254) 2.382* (df = 3; 254) 78.466*** (df = 4; 253) 4.160** (df = 1; 256)
## ==========================================================================================================================================================
## Note:                                                                                                                          *p<0.1; **p<0.05; ***p<0.01

(e) Plot IRFs and FEVD for the VAR model based on Choleski decomposition. Afterwards reverse the order of the variables in the VAR and plot IRFs and FEVD based on Choleski decomposition for the alternative order. Does the order matter much in this particular case?

# IRFs - based on Choleski decomposition of var(e)
var1.irfs <- irf(var1, n.ahead=10)
par(mfcol=c(2,2), cex=0.6)
plot(var1.irfs, plot.type="single")

# FEVD - based on Choleski decomposition of var(e)
var1.fevd <- fevd(var1, n.ahead=10)
var1.fevd[[1]][c(1,3,6,10),]
##             y1        y2
## [1,] 1.0000000 0.0000000
## [2,] 0.8706221 0.1293779
## [3,] 0.8590837 0.1409163
## [4,] 0.8590767 0.1409233
var1.fevd[[2]][c(1,3,6,10),]
##              y1        y2
## [1,] 0.01798606 0.9820139
## [2,] 0.02459518 0.9754048
## [3,] 0.02541634 0.9745837
## [4,] 0.02541661 0.9745834
plot(var1.fevd)

plot(var1.fevd, addbars=8)

# note: ordering of variables matters for IRF and FEVD

# ordering 1: LA before RI
y.Q.ord1 <- cbind(y1, y2)
y.Q.ord1 <- window(y.Q.ord1, start="1950 Q1", end="2014 Q4")
# ordering 2: RI before LA
y.Q.ord2 <- cbind(y2, y1)
y.Q.ord2 <- window(y.Q.ord2, start="1950 Q1", end="2014 Q4")

# reduced form VAR(1)
var1.ord1 <- VAR(y.Q.ord1, p=2, type="const")
var1.ord2 <- VAR(y.Q.ord2, p=2, type="const")

# IRF based on Choleski decomposition of var(e)
var1.irfs.ord1 <- irf(var1.ord1, n.ahead=10)
var1.irfs.ord2 <- irf(var1.ord2, n.ahead=10)
par(mfcol=c(2,2), cex=0.6)
plot(var1.irfs.ord1, plot.type="single")

plot(var1.irfs.ord2, plot.type="single")

# FEVD based on Choleski decomposition of var(e)
var1.fevd.ord1 <- fevd(var1.ord1, n.ahead=10)
var1.fevd.ord2 <- fevd(var1.ord2, n.ahead=10)
plot(var1.fevd.ord1)

plot(var1.fevd.ord2)

There is change in the impulse response function and also the FEVD, which tells us that order does matter in estimating VAR model. The ordering of variables in the VAR(p) model thus matters: yi,t is only affected by shocks \(e_{1,t}\) , . . . , \(e_{i,t}\) , remaining shocks \(e_{i+1,t}\) , . . . , \(e_{k,t}\) have no contemporaneous effect on yi,t and will only affect yi,t0 for t0 > t indirectly through their effect on \(y_{i+1,t}\) , . . . , \(y_{k,t}\)