Paquetes

library(lmtest)
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
library(forecast)
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo
library(tseries)

Cargar la base de datos

library(readxl)
Inflacion <- read_excel("C:/Users/57310/OneDrive/Escritorio/Tasa de inflacion Mensual.xlsx")
Desempleo <- read_excel("C:/Users/57310/OneDrive/Escritorio/Tasa de Desempleo Mensual.xlsx")
## New names:
## • `` -> `...1`

Transformamos y organizamos las serie

Inflacion.s=ts(Inflacion[,2],start = 2001, frequency = 12)
Desempleo.s=(ts(Desempleo[,2] , start=2001, frequency = 12))
Desempleo.l=log(Desempleo.s)

Desestacionalizamos

library(seasonal)
ajuste=seas(Inflacion.s, x11="")
infajust=data.frame(ajuste)
infdes= infajust[,2]
inflacion=ts(infdes,start=2011,frequency = 12)

VAMOS LAS BASES DE DATOS SIN TRATAR

plot(Desempleo.l)

plot(inflacion)

## Miramos estacionariedad

library(tseries)
adf.test(inflacion)
## 
##  Augmented Dickey-Fuller Test
## 
## data:  inflacion
## Dickey-Fuller = -3.3306, Lag order = 6, p-value = 0.06707
## alternative hypothesis: stationary
adf.test(Desempleo.l)
## 
##  Augmented Dickey-Fuller Test
## 
## data:  Desempleo.l
## Dickey-Fuller = -1.0239, Lag order = 6, p-value = 0.9328
## alternative hypothesis: stationary

Diferenciamos 1 vez

Des.d=diff(Desempleo.l)
Inf.d=diff(inflacion)                    
adf.test(Des.d)
## Warning in adf.test(Des.d): p-value smaller than printed p-value
## 
##  Augmented Dickey-Fuller Test
## 
## data:  Des.d
## Dickey-Fuller = -5.8795, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
adf.test(Inf.d)
## Warning in adf.test(Inf.d): p-value smaller than printed p-value
## 
##  Augmented Dickey-Fuller Test
## 
## data:  Inf.d
## Dickey-Fuller = -4.6858, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary

Probamos Cointegración

Phillips Ouliaris

po.test(cbind(Des.d,Inf.d))
## Warning in po.test(cbind(Des.d, Inf.d)): p-value smaller than printed p-value
## 
##  Phillips-Ouliaris Cointegration Test
## 
## data:  cbind(Des.d, Inf.d)
## Phillips-Ouliaris demeaned = -146.29, Truncation lag parameter = 1,
## p-value = 0.01

Test de Cointegración

phill.ols= lm(Inf.d~Des.d)
summary(phill.ols)
## 
## Call:
## lm(formula = Inf.d ~ Des.d)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.01546 -0.19426  0.01602  0.19541  0.76225 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)
## (Intercept)  -0.0196     0.0214  -0.916    0.361
## Des.d         0.2658     0.4513   0.589    0.556
## 
## Residual standard error: 0.3223 on 225 degrees of freedom
## Multiple R-squared:  0.001539,   Adjusted R-squared:  -0.002898 
## F-statistic: 0.3469 on 1 and 225 DF,  p-value: 0.5565
adf.test(phill.ols$residuals)
## Warning in adf.test(phill.ols$residuals): p-value smaller than printed p-value
## 
##  Augmented Dickey-Fuller Test
## 
## data:  phill.ols$residuals
## Dickey-Fuller = -4.6539, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary

Armamos el modelo arimax

Observamos las funciones de autocorrelación

acf(Des.d)

pacf(Des.d)

acf(Inf.d)

pacf(Inf.d)

Podemos estimar un Arimax para la inflación de (1,0,3) Probamos que modelo sirve mas

auto.arima(Inf.d,xreg=Des.d)
## Series: Inf.d 
## Regression with ARIMA(1,0,0)(0,0,1)[12] errors 
## 
## Coefficients:
##          ar1     sma1      TD
##       0.4462  -0.4204  0.3461
## s.e.  0.0599   0.0598  0.2985
## 
## sigma^2 = 0.0697:  log likelihood = -19.55
## AIC=47.09   AICc=47.27   BIC=60.79

Usando Box Jenkins

arima(Inf.d,c(1,0,3),xreg=Des.d)$aic
## [1] 88.35389
arima(Inf.d,c(1,0,2),xreg=Des.d)$aic
## [1] 87.24429
arima(Inf.d,c(1,0,1),xreg=Des.d)$aic
## [1] 85.78646
arima(Inf.d,c(1,0,0),xreg=Des.d)$aic
## [1] 83.78932
arima(Inf.d,c(0,0,3),xreg=Des.d)$aic
## [1] 88.28839
arima(Inf.d,c(0,0,2),xreg=Des.d)$aic
## [1] 86.29035
arima(Inf.d,c(0,0,1),xreg=Des.d)$aic
## [1] 93.12159

Vemos que el modelo 4 es el que posee un menor AIC, por lo tanto planteamos

mod4=arima(Inf.d,c(1,0,0),xreg=Des.d)
Box.test(mod4$residuals,type="Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  mod4$residuals
## X-squared = 1.9623e-06, df = 1, p-value = 0.9989

Autocorrelación de los residuos

acf(mod4$residuals, main=" Autocorrelaciones de los residuos para el ARIMA(1,0,0")

pacf(mod4$residuals, main=" Autocorrelaciones de los residuos para el ARIMA(1,0,0")

### Test de normalidad

jarque.bera.test(mod4$residuals)
## 
##  Jarque Bera Test
## 
## data:  mod4$residuals
## X-squared = 16.174, df = 2, p-value = 0.0003074

ARDL

Montamos la regresión lineal que conocemos existe

resid.ols=phill.ols$residual
summary(phill.ols)
## 
## Call:
## lm(formula = Inf.d ~ Des.d)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.01546 -0.19426  0.01602  0.19541  0.76225 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)
## (Intercept)  -0.0196     0.0214  -0.916    0.361
## Des.d         0.2658     0.4513   0.589    0.556
## 
## Residual standard error: 0.3223 on 225 degrees of freedom
## Multiple R-squared:  0.001539,   Adjusted R-squared:  -0.002898 
## F-statistic: 0.3469 on 1 and 225 DF,  p-value: 0.5565
plot(acf(resid.ols))

Estimación por ARDL

phill1.gen<-lm(Inf.d~(lag(Inf.d,-1))+Des.d+lag(Inf.d,-1))
summary(phill1.gen)
## Warning in summary.lm(phill1.gen): essentially perfect fit: summary may be
## unreliable
## 
## Call:
## lm(formula = Inf.d ~ (lag(Inf.d, -1)) + Des.d + lag(Inf.d, -1))
## 
## Residuals:
##        Min         1Q     Median         3Q        Max 
## -1.008e-16 -1.080e-17 -3.840e-18  1.930e-18  9.094e-16 
## 
## Coefficients:
##                  Estimate Std. Error    t value Pr(>|t|)    
## (Intercept)    -7.369e-18  4.256e-18 -1.731e+00   0.0848 .  
## lag(Inf.d, -1)  1.000e+00  1.324e-17  7.555e+16   <2e-16 ***
## Des.d           3.927e-17  8.966e-17  4.380e-01   0.6619    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 6.399e-17 on 224 degrees of freedom
## Multiple R-squared:      1,  Adjusted R-squared:      1 
## F-statistic: 2.859e+33 on 2 and 224 DF,  p-value: < 2.2e-16