ts.plot(examen)
boxplot(examen)
.
hist(examen)
.
acf.examen<-acf(examen, main="ACF de la Serie")
pacf.examen<-pacf(examen, main="PACF de la Serie generada")
stat.desc(examen,norm = T)
## x
## nbr.val 700.00000000
## nbr.null 0.00000000
## nbr.na 0.00000000
## min -4.19803815
## max 3.63624802
## range 7.83428617
## sum -54.33256127
## median -0.06708635
## mean -0.07761794
## SE.mean 0.05170340
## CI.mean.0.95 0.10151257
## var 1.87126905
## std.dev 1.36794337
## coef.var -17.62406067
## skewness -0.08378120
## skew.2SE -0.45343778
## kurtosis -0.07756026
## kurt.2SE -0.21018114
## normtest.W 0.99767494
## normtest.p 0.44324165
.
adf.test(examen)
## Warning in adf.test(examen): p-value smaller than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: examen
## Dickey-Fuller = -7.409, Lag order = 8, p-value = 0.01
## alternative hypothesis: stationary
.
.
pp.test(examen)
## Warning in pp.test(examen): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: examen
## Dickey-Fuller Z(alpha) = -282.71, Truncation lag parameter = 6, p-value
## = 0.01
## alternative hypothesis: stationary
.
kpss.test(examen)
##
## KPSS Test for Level Stationarity
##
## data: examen
## KPSS Level = 0.3899, Truncation lag parameter = 6, p-value = 0.08151
.
.
modelo1<-arima(examen,order=c(3,0,2),include.mean=T)
coeftest(modelo1)
##
## z test of coefficients:
##
## Estimate Std. Error z value Pr(>|z|)
## ar1 0.492356 0.105715 4.6574 3.203e-06 ***
## ar2 -0.322405 0.089698 -3.5943 0.0003252 ***
## ar3 0.385068 0.060596 6.3546 2.089e-10 ***
## ma1 0.275494 0.111939 2.4611 0.0138511 *
## ma2 0.124480 0.108720 1.1450 0.2522243
## intercept -0.080976 0.118835 -0.6814 0.4956062
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
.
.
modelo1<-arima(examen,order=c(3,0,1),include.mean=F)
coeftest(modelo1)
##
## z test of coefficients:
##
## Estimate Std. Error z value Pr(>|z|)
## ar1 0.554846 0.087845 6.3162 2.68e-10 ***
## ar2 -0.259068 0.071535 -3.6216 0.0002928 ***
## ar3 0.330200 0.039678 8.3219 < 2.2e-16 ***
## ma1 0.214300 0.091654 2.3382 0.0193792 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
.
modelo1$coef[1]+modelo1$coef[2]+modelo1$coef[3]
## ar1
## 0.6259779
plot(modelo1)
.
h<-c(1:length(examen)-1)
for (i in 1:length(examen)-1) {
Q=Box.test(modelo1$residuals, lag=i, type='Ljung-Box')
h[i]<-Q$p.value
}
head(h)
## [1] 0.9272350 0.9283267 0.9787708 0.9952582 0.9914426 0.9185487
sum(h<.05)
## [1] 0
.
.
ArchTest(modelo1$residuals,lags=1)
##
## ARCH LM-test; Null hypothesis: no ARCH effects
##
## data: modelo1$residuals
## Chi-squared = 0.5592, df = 1, p-value = 0.4546
.
.
bds.test(modelo1$residuals)
##
## BDS Test
##
## data: modelo1$residuals
##
## Embedding dimension = 2 3
##
## Epsilon for close points = 0.5024 1.0047 1.5071 2.0094
##
## Standard Normal =
## [ 0.5024 ] [ 1.0047 ] [ 1.5071 ] [ 2.0094 ]
## [ 2 ] -1.7861 -1.8569 -1.6694 -1.5402
## [ 3 ] -2.2659 -2.1744 -2.0855 -2.0920
##
## p-value =
## [ 0.5024 ] [ 1.0047 ] [ 1.5071 ] [ 2.0094 ]
## [ 2 ] 0.0741 0.0633 0.095 0.1235
## [ 3 ] 0.0235 0.0297 0.037 0.0364
.
jarque.bera.test(modelo1$residuals)
##
## Jarque Bera Test
##
## data: modelo1$residuals
## X-squared = 1.2199, df = 2, p-value = 0.5434
.
.
modelo2<-arima(examen,order=c(1,0,4),include.mean=T)
coeftest(modelo2)
##
## z test of coefficients:
##
## Estimate Std. Error z value Pr(>|z|)
## ar1 0.506029 0.125660 4.0270 5.650e-05 ***
## ma1 0.261832 0.123036 2.1281 0.033329 *
## ma2 -0.199883 0.100302 -1.9928 0.046282 *
## ma3 0.150562 0.050781 2.9649 0.003028 **
## ma4 0.193123 0.042614 4.5319 5.846e-06 ***
## intercept -0.080187 0.108049 -0.7421 0.458006
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
.
modelo2<-arima(examen,order=c(1,0,4),include.mean=F)
coeftest(modelo2)
##
## z test of coefficients:
##
## Estimate Std. Error z value Pr(>|z|)
## ar1 0.510028 0.125684 4.0580 4.949e-05 ***
## ma1 0.258727 0.123102 2.1017 0.035578 *
## ma2 -0.202042 0.100509 -2.0102 0.044410 *
## ma3 0.150272 0.050982 2.9476 0.003203 **
## ma4 0.192897 0.042777 4.5094 6.501e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
.
modelo2$coef[1]
## ar1
## 0.5100281
.
.
plot(modelo2)
#### De nuevo se observa que los puntos se contienen dentro del círculo unitario, por lo tanto se cumple la condición de raíces invertibles.
.
h<-c(1:length(examen)-1)
for (i in 1:length(examen)-1) {
Box.test(modelo2$residuals, lag=1, type = "Ljung-Box")
h[i]<-Q$p.value
}
sum(h<.05)
## [1] 0
head(h)
## [1] 0.8362662 0.8362662 0.8362662 0.8362662 0.8362662 0.8362662
.
.
modelo2$coef[1]
## ar1
## 0.5100281
.
ArchTest(modelo1$residuals,lags=1)
##
## ARCH LM-test; Null hypothesis: no ARCH effects
##
## data: modelo1$residuals
## Chi-squared = 0.5592, df = 1, p-value = 0.4546
.
.
bds.test(modelo2$residuals)
##
## BDS Test
##
## data: modelo2$residuals
##
## Embedding dimension = 2 3
##
## Epsilon for close points = 0.5040 1.0079 1.5119 2.0158
##
## Standard Normal =
## [ 0.504 ] [ 1.0079 ] [ 1.5119 ] [ 2.0158 ]
## [ 2 ] -1.6713 -1.7917 -1.588 -1.4090
## [ 3 ] -1.3798 -2.1213 -2.056 -1.9821
##
## p-value =
## [ 0.504 ] [ 1.0079 ] [ 1.5119 ] [ 2.0158 ]
## [ 2 ] 0.0947 0.0732 0.1123 0.1588
## [ 3 ] 0.1676 0.0339 0.0398 0.0475
stat.desc(modelo2$residuals, norm=T)
## x
## nbr.val 700.00000000
## nbr.null 0.00000000
## nbr.na 0.00000000
## min -3.43368777
## max 3.26502216
## range 6.69870992
## sum -19.21363395
## median -0.01469015
## mean -0.02744805
## SE.mean 0.03809545
## CI.mean.0.95 0.07479521
## var 1.01588408
## std.dev 1.00791075
## coef.var -36.72067074
## skewness -0.01377546
## skew.2SE -0.07455510
## kurtosis 0.25549389
## kurt.2SE 0.69236488
## normtest.W 0.99725088
## normtest.p 0.29065117
jarque.bera.test(modelo2$residuals)
##
## Jarque Bera Test
##
## data: modelo2$residuals
## X-squared = 2.0676, df = 2, p-value = 0.3557
BIC(modelo1)
## [1] 2026.18
BIC(modelo2)
## [1] 2037.258
real <- window(examen, start = 560)
ts.plot(real)
#### En la siguiente gráfica se evidencia la predicción que genero el modelo ARIMA (3,0,1) en la muestra y los valores que sugiere con diferente bandas de confianza
#### MODELO 1
modelo1.train<-arima(examen[1:560], order=c(3,0,1),include.mean=F)
prueba.modelo1 <- forecast(modelo1.train, h = 140, level = c(90,95,99))
plot(prueba.modelo1)
.
.
.
#### MODELO 2
modelo2.train<-arima(examen[1:560], order=c(1,0,4),include.mean=F)
prueba.modelo2 <- forecast(modelo2.train, h = 140, level = c(90,95,99))
plot(prueba.modelo2)
accuracy(prueba.modelo2,real)
## ME RMSE MAE MPE MAPE MASE
## Training set -0.009497745 1.022149 0.805749 116.81306 239.54713 0.8168982
## Test set -0.273507187 1.199356 0.960393 99.83772 99.83772 0.9736820
## ACF1 Theil's U
## Training set 0.004382661 NA
## Test set 0.530228157 0.9932497
.
modelo1.train<-arima(examen, order=c(3,0,1),include.mean=F)
pronostico.modelo1 <- forecast(modelo1.train, h = 300, level = c(90,95,99))
plot(pronostico.modelo1)
accuracy(pronostico.modelo1)
## ME RMSE MAE MPE MAPE MASE
## Training set -0.02418051 1.004283 0.7951675 92.47113 223.1619 0.8180625
## ACF1
## Training set 0.003444345