1 Medidas de dependencia en el tiempo

1.1 Serie de tiempo

\[\boldsymbol{x}=\{x_{t_1},x_{t_2},\ldots,x_{t_k},x_{t_1+h},x_{t_2+h},\ldots,x_{t_k+h},\ldots,x_{t_n}\}\]

1.1.1 Función de distribución conjunta

\[ F(c_1,c_2,\ldots,c_n)=\mathcal{P}(x_{t_1}{\leq}c_1,x_{t_2}{\leq}c_2,\ldots,x_{t_n}{\leq}c_n) \]

1.1.2 Bajo normalidad multivariada

\[ \begin{align} F(c_1,c_2,\ldots,c_n)&=\prod_{t=1}^n\Phi(c_t)\\ &=\prod_{t=1}^n\frac{1}{\sqrt{2\pi}}\int_{-\infty}^x\exp{\left\{-\frac{z^2}{2}\right\}}dz \end{align} \]

1.1.3 Función de distribución univariada

\[ F_t(x)=\mathcal{P}\left\{x_t{\leq}x\right\} \]

\[ f_t(x)=\frac{F_t(x)}{{\partial}x} \]

1.1.4 Función valor esperado o media

\[ \begin{align} \mu_{x_t}&=E(x_t)\\ &=\int_{-\infty}^{\infty}xf_t(x)dx \end{align} \]

1.1.5 Función de autocovarianza

\[ \gamma_x(s,t)=E[(x_s-\mu_s)(x_t-\mu_t)] \]

\[ \gamma_x(t,t)=E[(x_t-\mu_t)^2] \]

1.1.6 Función de autocorrelación

\[ \begin{align} \rho_x(s,t)&=\frac{E[(x_s-\mu_s)(x_t-\mu_t)]}{\sqrt{E[(x_s-\mu_s)^2]}\sqrt{E[(x_t-\mu_t)^2]}}\\ &=\frac{\gamma_x(s,t)}{\sqrt{\gamma_x(s,s)}\sqrt{\gamma_x(t,t)}} \end{align} \]

1.1.7 Función de covarianza cruzada

\[ \gamma_{xy}(s,t)=E[(x_s-\mu_{xs})(y_t-\mu_{yt})] \]

1.1.8 Función de correlación cruzada

\[ \begin{align} \rho_{xy}(s,t)&=\frac{E[(x_s-\mu_{xs})(y_t-\mu_{yt})]}{\sqrt{E[(x_s-\mu_{xs})^2]}\sqrt{E[(x_t-\mu_{yt})^2]}}\\ &=\frac{\gamma_{xy}(s,t)}{\sqrt{\gamma_x(s,s)}\sqrt{\gamma_y(t,t)}} \end{align} \]

2 Series de tiempo estacionarias

2.1 Estabilizar la varianza

\[\sigma_t=f(\mu_t)\]

\[x_t=e^{\left(x_{t-1}+\omega_t\right)}\]

x <- rep(NA,100)
x[1] <- rnorm(n = 1, mean = 0, sd = .009)
for(i in 2:100){x[i] <- expm1(x[i-1] + rnorm(n = 1, mean = 0, sd= .001))}
plot(x)

\[ \begin{align} \ln{x_t}&= \ln{\left[e^{\left(x_{t-1}+\omega_t\right)}\right]}\\ &=x_{t-1} + \omega_t \end{align} \]

\[\omega_t{\sim}N(\mu_{\omega}=0,\sigma_{\omega}^2=0.001^2)\]

plot(log1p(x))

library(astsa)
acf2(log1p(x))

##      [,1]  [,2]  [,3]  [,4] [,5]  [,6]  [,7] [,8] [,9] [,10] [,11] [,12] [,13]
## ACF  0.95  0.89  0.84  0.79 0.73  0.68  0.63 0.60 0.57  0.54  0.52  0.50  0.46
## PACF 0.95 -0.02 -0.01 -0.09 0.01 -0.01 -0.02 0.12 0.04  0.01  0.00 -0.02 -0.15
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20]
## ACF   0.43  0.39  0.35  0.31  0.27  0.23  0.19
## PACF -0.01 -0.04 -0.06  0.03 -0.05  0.03 -0.13

2.2 Estabilizar la media

\[x_t=x_{t-1}+\omega_t\]

x <- rep(NA,100)
x[1] <- rnorm(n = 1, mean = 2, sd = .9)
for(i in 2:100){x[i]<-x[i-1] + rnorm(n = 1, mean = 0, sd= 1)}
plot(x)

\[x_t-x_{t-1}=\omega_t\]

\[\omega_t{\sim}N(\mu_{\omega}=0,\sigma_{\omega}^2=1)\]

plot(diff(x))

library(astsa)
acf2(diff(x))

##       [,1]  [,2]  [,3] [,4]  [,5] [,6]  [,7]  [,8] [,9] [,10] [,11] [,12] [,13]
## ACF  -0.09  0.00 -0.01 0.07 -0.05 0.07 -0.01 -0.12 0.07  0.12 -0.05 -0.12  0.02
## PACF -0.09 -0.01 -0.01 0.07 -0.04 0.07  0.01 -0.12 0.06  0.12 -0.03 -0.12 -0.02
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20]
## ACF  -0.03 -0.11 -0.01  0.05 -0.14 -0.04 -0.01
## PACF -0.02 -0.12 -0.05  0.07 -0.10 -0.09 -0.05

\[x_t-x_{t-1}=\omega_t\]

\[x_{t-1}-x_{t-2}=\omega_{t-1}\]

\[ \begin{align} \omega_t-\omega_{t-1}&=(x_t-x_{t-1})-(x_{t-1}-x_{t-2})\\ &=x_t-x_{t-1}-x_{t-1}+x_{t-2}\\ &=x_t-2x_{t-1}+x_{t-2} \end{align} \]

\[ \begin{align} x_t&=2x_{t-1}-1x_{t-2}-1\omega_{t-1}+\omega_t \end{align} \]

plot(diff(diff(x)))

library(astsa)
acf2(diff(diff(x)))

##       [,1]  [,2]  [,3]  [,4]  [,5]  [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  -0.54  0.04 -0.04  0.09 -0.11  0.09  0.01 -0.13  0.06  0.10 -0.04 -0.10
## PACF -0.54 -0.36 -0.33 -0.17 -0.23 -0.14 -0.01 -0.18 -0.21 -0.06  0.03 -0.07
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20]
## ACF   0.08  0.02 -0.08  0.02  0.12 -0.13  0.03  0.06
## PACF -0.06  0.04 -0.04 -0.14  0.03  0.02 -0.02  0.01

2.3 Estacionariedad estricta

\[\mathcal{P}(x_{t_1}{\leq}c_1,x_{t_2}{\leq}c_2,\ldots,x_{t_k}{\leq}c_k)=\mathcal{P}(x_{t_1+h}{\leq}c_1,x_{t_2+h}{\leq}c_2,\ldots,x_{t_k+h}{\leq}c_k)\]

2.3.1 Para \(k=1\)

\[\forall_{s,t}\mathcal{P}(x_{s}{\leq}c)=\mathcal{P}(x_{t}{\leq}c)\]

2.3.2 Para \(k=2\)

\[\forall_{s,t,h}\mathcal{P}(x_{s}{\leq}c_1,x_{t}{\leq}c_2)=\mathcal{P}(x_{s+h}{\leq}c_1,x_{t+h}{\leq}c_2)\]

\[\forall_{s,t,h}\gamma_x(s,t)=\gamma_x(s+h,t+h)\]

2.4 Estacionariedad débil

2.4.1 Función valor esperado o media

\[ \begin{align} \mu_t&=E[x_t]\\ &=\mu \end{align} \]

2.4.2 Función de autocovarianza

\[ \begin{align} \gamma_x(t+h,t)&=E[(x_{t+h}-\mu)(x_{t}-\mu)]\\ &\stackrel{t=0}{=}E[(x_{h}-\mu)[(x_{0}-\mu)]\\ &=\gamma_x(h,0)\\ &=\gamma_x(h) \end{align} \]

\[\gamma_x(0,0)<\infty\]

2.4.3 Función de autocorrelación

\[ \begin{align} \rho_x(t+h,t)&=\frac{E[(x_{t+h}-\mu)(x_{t}-\mu)]}{\sqrt{E[(x_{t+h}-\mu)[(x_{t+h}-\mu)]}\sqrt{E[(x_{t}-\mu)[(x_{t}-\mu)]}}\\ &=\frac{\gamma_x(t+h,t)}{\sqrt{\gamma_x(t+h,t+h)\gamma_x(t,t)}}\\ &=\frac{\gamma_x(h)}{\gamma_x(0)}\\ &=\rho(h) \end{align} \]

\[-1<\rho(h)<+1\]

3 Predicción usando la función de correlación cruzada

\[y_t=Ax_{t-l}+\omega_t\]

\[\mu_x=0=\mu_y\]

\[ \begin{align} \gamma_{yx}(h)&=E[y_{t+h}x_t]\\ &=E[(Ax_{t+h-l}+\omega_t)x_t]\\ &=AE[x_{t+h-l}x_t]+E[\omega_tx_t]\\ &=A\gamma_x(h-l) \end{align} \]

3.1 Un proceso lineal

\[ x_t=\mu+\sum_{j=-\infty}^{\infty}\psi_j\omega_{t-j} \]

\[ \sum_{j=-\infty}^{\infty}|\psi_j|<\infty \]

\[ \begin{align} \gamma_x(t+h,t)&=E[(x_{t+h}-\mu)(x_{t}-\mu)]\\ &=E\left[(\mu+\sum_{j=-\infty}^{\infty}\psi_{j}\omega_{t+h-j}-\mu)[(\mu+\sum_{j=-\infty}^{\infty}\psi_j\omega_{t-j}-\mu)\right]\\ &=E\left[\sum_{j=-\infty}^{\infty}\psi_{j}\omega_{t+h-j}\sum_{j=-\infty}^{\infty}\psi_j\omega_{t-j}\right]\\ &=E\left[\sum_{j=-\infty}^{\infty}\sum_{j=-\infty}^{\infty}\psi_{j}\omega_{t+h-j}\psi_j\omega_{t-j}\right]\\ &=\sum_{j=-\infty}^{\infty}\sum_{j=-\infty}^{\infty}E\left[\psi_{j}\psi_j\omega_{t+h-j}\omega_{t-j}\right]\\ &=\sum_{j=-\infty}^{\infty}E\left[\psi_{j+h}\psi_j\omega_{t-j}\omega_{t-j}\right]\\ &=\sum_{j=-\infty}^{\infty}\psi_{j+h}\psi_jE\left[\omega_{t-j}\omega_{t-j}\right]\\ &=\sum_{j=-\infty}^{\infty}\psi_{j+h}\psi_j\sigma_{\omega}^{2}\\ &=\sigma_{\omega}^{2}\sum_{j=-\infty}^{\infty}\psi_{j+h}\psi_j \end{align} \]

3.2 Un proceso Gaussiano

\[f(\boldsymbol{x})=(2\pi)^{-\frac{n}{2}}|\Gamma|^{-\frac{1}{2}}\exp{\left\{{-\frac{1}{2}\left(\boldsymbol{x}-\boldsymbol{\mu}\right)^{t}\Gamma^{-1}\left(\boldsymbol{x}-\boldsymbol{\mu}\right)}\right\}}\]

4 Estimación de la correlación

\[\bar{x}=\frac{1}{n}\sum_{i=1}^{n}x_t\]

4.1 Función de autocovarianza muestral

\[\widehat{\gamma}(h)=n^{-1}\sum_{t=1}^{n-h}{(x_{t+h}-\bar{x})(x_{t}-\bar{x})}\]

4.2 Función de autocorrelación muestral

\[ \begin{align} \widehat{\rho}(h)&=\frac{n^{-1}\sum_{t=1}^{n-h}{(x_{t+h}-\bar{x})(x_{t}-\bar{x})}}{n^{-1}\sum_{t=1}^{n}{(x_{t}-\bar{x})(x_{t}-\bar{x})}}\\ &=\frac{\widehat{\gamma}(h)}{\widehat{\gamma}(0)} \end{align} \]

\[\widehat{\rho}(h)\stackrel{approx}{\sim}N\left(0,\frac{1}{n}\right)\]

\[\mathcal{P}\left(-Z_{1-\frac{\alpha}{2}}\frac{1}{\sqrt{n}}<\rho(h)<Z_{1-\frac{\alpha}{2}}\frac{1}{\sqrt{n}}\right)=1-\alpha\]

4.3 Función de correlación cruzada muestral

\[\widehat{\gamma}_{xy}(h)=n^{-1}\sum_{t=1}^{n-h}{(x_{t+h}-\bar{x})(y_{t}-\bar{y})}\]

4.4 Función de autocorrelación muestral

\[ \begin{align} \widehat{\rho}_{xy}(h)&=\frac{n^{-1}\sum_{t=1}^{n-h}{(x_{t+h}-\bar{x})(y_{t}-\bar{y})}}{\sqrt{n^{-1}\sum_{t=1}^{n}{(x_{t}-\bar{x})(x_{t}-\bar{x})}n^{-1}\sum_{t=1}^{n}{(y_{t}-\bar{y})(y_{t}-\bar{y})}}}\\ &=\frac{\widehat{\gamma}_{xy}(h)}{\sqrt{\widehat{\gamma}_{x}(0)\widehat{\gamma}_{y}(0)}} \end{align} \]

\[\widehat{\rho}_{xy}(h)\stackrel{approx}{\sim}N\left(0,\frac{1}{n}\right)\]

5 Series de tiempo multidimensionales

\[\boldsymbol{x_{t}}=\{\boldsymbol{x}_{t_1},\boldsymbol{x}_{t_2},\ldots,\boldsymbol{x}_{t_k},\boldsymbol{x}_{t_1+h},\boldsymbol{x}_{t_2+h},\ldots,\boldsymbol{x}_{t_k+h},\ldots,\boldsymbol{x}_{t_p}\}\]

\[\boldsymbol{x_{t}}\in\mathbb{R}^p\]

\[\boldsymbol{\mu}=E[\boldsymbol{x_{t}}]\]

\[\Gamma(h)=E[\left(\boldsymbol{x_{t+h}}-\boldsymbol{\mu}\right)\left(\boldsymbol{x_{t}}-\boldsymbol{\mu}\right)^t]\]

\[\gamma_{ij}(h)=E[(x_{t+h,i}-\mu_i)(x_{t,j}-\mu_j)]\]

\[\gamma_{ij}(h)=\gamma_{ji}(-h)\]

\[\Gamma(-h)=\Gamma^{t}(h)\]

\[\widehat{\Gamma}(h)=n^{-1}\sum_{t=1}^{n-h}\left(\boldsymbol{x_{t+h}}-\boldsymbol{\bar{x}}\right)\left(\boldsymbol{x_{t}}-\boldsymbol{\bar{x}}\right)^t\]

\[\boldsymbol{\bar{x}}=n^{-1}\sum_{t=1}^{n}\boldsymbol{x_{t}}\]

\[\widehat{\Gamma}(-h)=\widehat{\Gamma}^{t}(h)\]

6 Regresión clásica en el contexto de series de tiempo

\[{x}_{t}={\beta}_{1}x_{t1}+{\beta}_{2}x_{t2}+\cdots+{\beta}_{q}x_{tq}+{\omega}_{t}\]

6.1 Estimación de la tendencia

\[{x}_{t}={\beta}_{1}+{\beta}_{2}t+{\omega}_{t}\]

6.2 Notación matricial

\[x_t=\boldsymbol{\beta}^t\boldsymbol{z}_t+\omega_t\]

\[\omega_t{\sim}iid(0,\sigma_\omega^2)\]

6.3 Suma de cuadrados de los errores (RSS)

\[RSS=\sum_{t=1}^{n}\left(x_t-\boldsymbol{\beta}^t\boldsymbol{z}_t\right)^2\]

6.4 Ecuaciones normales

\[ \begin{align} \left(\sum_{t=1}^{n}\boldsymbol{z}_t\boldsymbol{z}_t^t\right)\widehat{\boldsymbol{\beta}}&=\sum_{t=1}^{n}\boldsymbol{z}_t\boldsymbol{x}_t\\ \left(\boldsymbol{Z}^{t}\boldsymbol{Z}\right)\widehat{\boldsymbol{\beta}}&=\boldsymbol{Z}^{t}\boldsymbol{x}\\ \widehat{\boldsymbol{\beta}}&=\left(\boldsymbol{Z}^{t}\boldsymbol{Z}\right)^{-1}\boldsymbol{Z}^{t}\boldsymbol{x} \end{align} \]

6.5 Suma de cuadrados de los errores (RSS)

\[ \begin{align} {rg(\boldsymbol{Z}^{t}\boldsymbol{Z})=q}{\implies}RSS&=\left(\boldsymbol{x}-\boldsymbol{Z}\widehat{\boldsymbol{\beta}}\right)^{t}\left(\boldsymbol{x}-\boldsymbol{Z}\widehat{\boldsymbol{\beta}}\right)\\ &=\boldsymbol{x}\boldsymbol{x}^{t}-\widehat{\boldsymbol{\beta}}^{t}\boldsymbol{Z}^{t}\boldsymbol{x}\\ &=\boldsymbol{x}\boldsymbol{x}^{t}-\boldsymbol{x}^{t}\boldsymbol{Z}\left(\boldsymbol{Z}^{t}\boldsymbol{Z}\right)^{-1} \boldsymbol{Z}^{t}\boldsymbol{x}\\ \end{align} \]

\[\text{Uso de }MCO{\implies}E\left[\widehat{\boldsymbol{\beta}}\right]=\boldsymbol{\beta}\text{ y su varianza es mínima dentro de todos los estimadores lineales insesgados}\]

6.6 Covarianza de los parámtros estimados

\[ \begin{align} C\left[\widehat{\boldsymbol{\beta}}\right]&=\sigma_{\omega}^2\left(\sum_{t=1}^{n}\boldsymbol{z}_t\boldsymbol{z}_t^t\right)^{-1}\\ &=\sigma_{\omega}^2\left(\boldsymbol{Z}^{t}\boldsymbol{Z}\right)^{-1} \end{align} \]

\[ \begin{align} \widehat{C}\left[\widehat{\boldsymbol{\beta}}\right]&=\frac{RSS}{n-q}\\ &=s_{\omega}^2 \end{align} \]

6.7 Estimador de la varianza de los errores

\[\widehat{\sigma}_{\omega}^2=\frac{RSS}{n}\]

\[ \begin{align} E\left[s_{\omega}^2\right]&={\sigma}_{\omega}^2 \end{align} \]

Source df Sum of Squares Mean Square
\({z}_{t,q_1+1},\ldots,z_{t,q}\) \(q-{q}_{1}\) \({SS}_{reg}={RSS}_{1}-RSS\) \(\frac{{MS}_{reg}={SS}_{reg}}{q-q_1}\)
\(Error\) \(n-q\) \(RSS\) \({s}_{\omega}=\frac{RSS}{n-q}\)
\(Total\) \(n-{q}_{1}\) \({RSS}_{1}\)

\[\widehat{\sigma}_{\omega}=\frac{RSS}{n}\stackrel{\omega_t{\sim}N(0,\sigma_{\omega}^2)}{\sim}\chi_{n-q}^2\]

\(\widehat{\sigma}_{\omega}\) y \(\widehat{\beta}\) son independientes

\[t_{n-q}=\frac{\widehat{\beta}_{i}-{\beta}_{i}}{s_{\omega}\sqrt{{c}_{ii}}}{\sim}{\mathcal{t-student}}_{n-q}\]

\[{C}_{(q{\times}q)}=\{{c}_{ij}\}_{i,j=1,\ldots,q}\]

\[\boldsymbol{z}_{1t}=\left({z}_{t1},{z}_{t2},\ldots,{z}_{tq_1}\right)^{t}_{q_{1}<q}\]

\[x_t=\boldsymbol{\beta}_{1}^{t}\boldsymbol{z}_{1t}+\omega_t\]

\[\boldsymbol{\beta}_{1}=\left(\beta_1,\beta_2,\ldots,\beta_{q_{1}}\right)^{t}_{q_{1}<q}\]

\[ F_{q-q_1,n-q}=\frac{\frac{{RSS}_{1}-{RSS}}{q-q_{1}}}{\frac{RSS}{n-q}}=\frac{{MS}_{reg}}{s_{\omega}^2}{\sim}{\mathcal{F}}_{q-q_1,n-q} \]

6.7.1 Caso especial: \(q_1=1\), \(z_{1t}=1\)

\[x_t=\beta_1+\omega_t\]

\[{R}_{xx}^{2}=\frac{{RSS}_{0}-{RSS}}{{RSS}_{0}}\]

\[{RSS}_{0}={\sum}_{t=1}^n\left(x_t-\bar{x}\right)^{2}\]

\[\widehat{\sigma}_{k}^{2}=\frac{{RSS}_{k}}{n}\]

6.8 Criterio de información de Akaike \((AIC)\)

\[AIC=\ln{\widehat{\sigma}_{k}^{2}}+\frac{n+2k}{n}\]

6.8.1 \(AIC\) corregido por el sesgo \((AIC_c)\)

\[AIC_c=\ln{\widehat{\sigma}_{k}^{2}}+\frac{n+k}{n-k-2}\]

6.9 Criterio de información de Schwartz \((SIC)\)

\[AIC=\ln{\widehat{\sigma}_{k}^{2}}+\frac{k\ln{n}}{n}\]

6.10 Análisis exploratorio de datos

\[x_t=\mu_t+y_t\]

\[\widehat{y}_t=x_t+\widehat{\mu}_t\]

6.10.1 Cuando la varianza no es constante

\[ {y}_{t}= \begin{cases} \frac{\left({x}_{t}^{\lambda}-1\right)}{\lambda}&\text{ si }\lambda{\neq}0\\ \ln{{x}_{t}}&\text{ si }\lambda{=}0 \end{cases} \]

6.10.2 ¿Eliminar la tendencia o diferenciar la serie?

\[x_t=\mu_t+y_t\]

6.10.2.1 \({\mu}_{t}={\delta}+{\mu}_{t-1}+{\omega}_{t}\)

\[{\mu}_{t}-{\mu}_{t-1}={\delta}+{\omega}_{t}\]

\[ \begin{align} {x}_{t}-{x}_{t-1}&={\mu}_{t}+{y}_{t}-\left({\mu}_{t-1}+{y}_{t-1}\right)\\ &={\mu}_{t}-{\mu}_{t-1}+{y}_{t}-{y}_{t-1}\\ &={\delta}+{\omega}_{t}+{y}_{t}-{y}_{t-1} \end{align} \]

6.10.2.2 \(\mu_t=\beta_1+\beta_2t\)

\[\widehat{\mu}_t=\widehat{\beta}_1+\widehat{\beta}_2t\]

\[ \begin{align} y_t&=x_t-\mu_t\\ &=x_t-\beta_1-\beta_2t \end{align} \]

\[ \begin{align} \widehat{y}_t&=x_t-\widehat{\mu}_t\\ &=x_t-\widehat{\beta}_1-\widehat{\beta}_2t \end{align} \]

6.10.2.3 \(\mu_t=\beta_1+\beta_2t\)

\[ \begin{align} {\mu}_{t}-{\mu}_{t-1}&=\beta_1+\beta_2t-\beta_1-\beta_2(t-1)\\ &=\beta_1+\beta_2t-\beta_1-\beta_2t+\beta_2\\ &=\beta_2 \end{align} \]

\[ \begin{align} {x}_{t}-{x}_{t-1}&={\mu}_{t}+{y}_{t}-\left({\mu}_{t-1}+{y}_{t-1}\right)\\ &={\mu}_{t}-{\mu}_{t-1}+{y}_{t}-{y}_{t-1}\\ &=\beta_2+{y}_{t}-{y}_{t-1} \end{align} \]

\[{\nabla}{x}_{t}={x}_{t}-{x}_{t-1}\]

6.11 El operador de rezago

\[{B}{x}_{t}={x}_{t-1}\]

\[ \begin{align} {B}^{2}{x}_{t}&={B}({B}{x}_{t})\\ &={B}{x}_{t-1}\\ &={x}_{t-2} \end{align} \]

\[ \begin{align} {B}^{k}{x}_{t}&={B}({B}^{k-1}{x}_{t})\\ &={B}{x}_{t-(k-1)}\\ &={x}_{t-k} \end{align} \]

6.12 El operador de diferencias

\[{\nabla}{x}_{t}=\left(1-{B}\right){x}_{t}\]

\[ \begin{align} {\nabla}^{2}{x}_{t}&=\left(1-{B}\right)^{2}{x}_{t}\\ &=\left(1-{2}{B}+{B}^{2}\right){x}_{t}\\ &={x}_{t}-2{x}_{t-1}+{x}_{t-2} \end{align} \]

\[ \begin{align} {\nabla}^{2}{x}_{t}&={\nabla}\left({\nabla}{x}_{t}\right)\\ &={\nabla}\left({x}_{t}-{x}_{t-1}\right)\\ &=\left({x}_{t}-{x}_{t-1}\right)-\left({x}_{t-1}-{x}_{t-2}\right)\\ &={x}_{t}-2{x}_{t-1}+{x}_{t-2} \end{align} \]

\[{\nabla}^{d}=\left(1-{B}\right)^{d}\]

6.13 Señal de ruido

\[x_t=A\cos{(2{\pi}{w}t+\phi)}+{\omega}_t\]

XT <- function(A,w,t,phi,omega){A*cos(2*pi*w*t+phi)+omega}
xt <- XT(A=2,w=1/50,t=1:500,phi=.6*pi,omega=rnorm(n=500,mean=0,sd=5))
plot(xt,type="l",lty=2);lines(2*cos(2*pi*1/50*1:500+.6*pi),lwd=3)

\[ \begin{align} A\cos{(2{\pi}{w}t+\phi)}&=A\cos{(\phi)}\cos{(2{\pi}{w}t)}-A\sin{(2{\pi}{w}t)}\sin{(\phi)}\\ &=A\cos{(\phi)}\cos{(2{\pi}{w}t)}-A\sin{(\phi)}\sin{(2{\pi}{w}t)}\\ &={\beta}_{1}\cos{(2{\pi}{w}t)}+{\beta}_{2}\sin{(2{\pi}{w}t)} \end{align} \]

beta1 <- 2*cos(.6*pi)
beta1
## [1] -0.618034
beta2 <- -2*sin(.6*pi)
beta2
## [1] -1.902113

\[ \begin{align} {x}_t&={\beta}_{1}\cos{(2{\pi}{w}t)}+{\beta}_{2}\sin{(2{\pi}{w}t)}+{\omega}_{t} \end{align} \]

data <- data.frame("xt1"=cos((2*pi*1:500)/50),"xt2"=sin((2*pi*1:500)/50),"xt"=xt)
model <- lm(xt~xt1+xt2-1,data=data)
summary(model)
## 
## Call:
## lm(formula = xt ~ xt1 + xt2 - 1, data = data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -17.1686  -3.0347   0.0848   3.0172  15.6676 
## 
## Coefficients:
##     Estimate Std. Error t value Pr(>|t|)    
## xt1  -0.6688     0.3010  -2.221   0.0268 *  
## xt2  -1.9279     0.3010  -6.404 3.51e-10 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 4.76 on 498 degrees of freedom
## Multiple R-squared:  0.08447,    Adjusted R-squared:  0.08079 
## F-statistic: 22.97 on 2 and 498 DF,  p-value: 2.863e-10

\[ \begin{align} \widehat{x}_t&={-0.6688}_{(0.301)}\cos{(2{\pi}{w}t)}+{-1.9279}_{(0.301)}\sin{(2{\pi}{w}t)}+{\omega}_{t} \end{align} \]

\[ \begin{align} \widehat{\beta}_{1}&=\frac{{\sum}_{t=1}^{n}x_t{\cos{\left(\frac{2{\pi}t}{50}\right)}}}{{\sum}_{t=1}^{n}{\cos^{2}{\left(\frac{2{\pi}t}{50}\right)}}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\cos{\left(\frac{2{\pi}t}{50}\right)}} \end{align} \]

\[ \begin{align} \widehat{\beta}_{2}&=\frac{{\sum}_{t=1}^{n}x_t{\sin{\left(\frac{2{\pi}t}{50}\right)}}}{{\sum}_{t=1}^{n}{\sin^{2}{\left(\frac{2{\pi}t}{50}\right)}}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\sin{\left(\frac{2{\pi}t}{50}\right)}} \end{align} \]

\[ \begin{align} \widehat{\beta}_{1}{\left(\frac{j}{n}\right)}&=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\cos{\left(2{\pi}t\frac{j}{n}\right)}}\text{; }n=500\text{, }j=1,\ldots,\frac{n}{2}-1 \end{align} \]

\[ \begin{align} \widehat{\beta}_{2}{\left(\frac{j}{n}\right)}&=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\sin{\left(2{\pi}t\frac{j}{n}\right)}}\text{; }n=500\text{, }j=1,\ldots,\frac{n}{2}-1 \end{align} \]

\[ \begin{align} \widehat{\beta}_{1}{\left(0\right)}&=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\cos{\left(0\right)}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t \end{align} \]

\[ \begin{align} \widehat{\beta}_{1}{\left(\frac{\frac{n}{2}}{n}\right)}&=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\cos{\left(2{\pi}t\frac{\frac{n}{2}}{n}\right)}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\cos{\left(2{\pi}t\frac{\frac{n}{2}}{\frac{n}{1}}\right)}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\cos{\left({\pi}t\right)}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}{\left(-1\right)}^{t}x_t \end{align} \]

\[ \begin{align} \widehat{\beta}_{2}{\left(0\right)}&=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\sin{\left(0\right)}}\\ &=0 \end{align} \]

\[ \begin{align} \widehat{\beta}_{2}{\left(\frac{\frac{n}{2}}{n}\right)}&=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\sin{\left(2{\pi}t\frac{\frac{n}{2}}{n}\right)}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\sin{\left(2{\pi}t\frac{\frac{n}{2}}{\frac{n}{1}}\right)}}\\ &=\frac{2}{n}{\sum}_{t=1}^{n}x_t{\sin{\left({\pi}t\right)}}\\ &=0 \end{align} \]

\[P{\left(\frac{j}{n}\right)}=\widehat{\beta}_{1}^{2}{\left(\frac{j}{n}\right)}+\widehat{\beta}_{2}^{2}{\left(\frac{j}{n}\right)}\]

\[ \begin{align} {x}_t&={\sum}_{j=0}^{\frac{n}{2}}{{\beta}_{1}\left(\frac{j}{n}\right)\cos{\left(2{\pi}{w}t\frac{j}{n}\right)}+{\beta}_{2}\left(\frac{j}{n}\right)\sin{\left(2{\pi}{w}t\frac{j}{n}\right)}} \end{align} \]

6.13.1 Transfomación de Fourier discreta

\[d{\left(\frac{j}{n}\right)}=\frac{1}{2}{\sum}_{t=1}^{n}{{x}_{t}\exp{\left(-2{\pi}it\frac{j}{n}\right)}}\]

\[\left|{d\left(\frac{j}{n}\right)}\right|^{2}=\frac{1}{n}\left[{\sum}_{t=1}^{n}{{x}_{t}\cos{\left(\frac{2{\pi}itj}{n}\right)}}\right]^{2}+\frac{1}{n}\left[{\sum}_{t=1}^{n}{{x}_{t}\sin{\left(\frac{2{\pi}itj}{n}\right)}}\right]^{2}\]

\[{I\left(\frac{j}{n}\right)}=\left|{d\left(\frac{j}{n}\right)}\right|^{2}\]

\[{P\left(\frac{j}{n}\right)}=\frac{4}{n}{I\left(\frac{j}{n}\right)}\]

6.13.1.1 Periodograma y periodograma escalado

I <- abs(fft(xt)/sqrt(500))**2
P <- (4/500)*abs(fft(xt)/sqrt(500))**2
f <- 0:250/500
plot(f,P[1:251],type="l",xlab="Frequency",ylab="");abline(v=seq(0,.5,.02),lty=3)

6.13.1.2 Periodograma como marcador de señales

6.13.1.2.1 \({x}_{t}=\cos{\left(2{\pi}t\frac{1}{100}\right)}+{\omega}_{t}\)
xt <- XT(A=2,w=1/100,t=1:100,phi=0*pi,omega=rnorm(n=100,mean=0,sd=5))
plot(xt,type="l",lty=2);lines(2*cos(2*pi*1/100*1:100+0*pi),lwd=3)

6.13.1.2.2 \({x}_{t}=\cos{\left(2{\pi}t\frac{2}{100}\right)}+{\omega}_{t}\)
xt <- XT(A=2,w=2/100,t=1:100,phi=0*pi,omega=rnorm(n=100,mean=0,sd=5))
plot(xt,type="l",lty=2);lines(2*cos(2*pi*2/100*1:100+0*pi),lwd=3)

6.13.1.2.3 \({x}_{t}=\cos{\left(2{\pi}t\frac{3}{100}\right)}+{\omega}_{t}\)
xt <- XT(A=2,w=3/100,t=1:100,phi=0*pi,omega=rnorm(n=100,mean=0,sd=5))
plot(xt,type="l",lty=2);lines(2*cos(2*pi*3/100*1:100+0*pi),lwd=3)

6.13.1.2.4 \({x}_{t}=\cos{\left(2{\pi}t\frac{4}{100}\right)}+{\omega}_{t}\)
xt <- XT(A=2,w=4/100,t=1:100,phi=0*pi,omega=rnorm(n=100,mean=0,sd=5))
plot(xt,type="l",lty=2);lines(2*cos(2*pi*4/100*1:100+0*pi),lwd=3)

6.13.1.2.5 \({x}_{t}=\cos{\left(2{\pi}t\frac{5}{100}\right)}+{\omega}_{t}\)
xt <- XT(A=2,w=5/100,t=1:100,phi=0*pi,omega=rnorm(n=100,mean=0,sd=5))
plot(xt,type="l",lty=2);lines(2*cos(2*pi*5/100*1:100+0*pi),lwd=3)

6.14 Suavizamiento en series de tiempo

\[m_t={\sum}_{j=-k}^{k}{a}_{j}{x}_{t-j}\text{; con }{a}_{j}={a}_{-j}\text{ y }{\sum}_{j=-k}^{k}{a}_{j}=1\]

6.14.1 Suavizamiento de regresión periodica y polinomial

\[{x}_{t}={f}_{t}+{y}_{t}\]

6.14.2 Suavizamiento de regresión polinomial

\[{f}_{t}={\beta}_{0}+{\beta}_{1t}+\cdots+{\beta}_{p}t^{p}\]

6.14.3 Suavizamiento de regresión periodica

\[{f}_{t}={\alpha}_{0}+{\alpha}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+{\gamma}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+{\alpha}_{2}\cos{\left(2{\pi}{w}_{2}t\right)}+{\gamma}_{2}\cos{\left(2{\pi}{w}_{2}t\right)}+\cdots+{\alpha}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+{\gamma}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}\]

6.14.4 Suavizamiento de regresión periodica y polinomial

\[ \begin{align} {f}_{t}&={\beta}_{0}+\cdots+{\beta}_{p}t^{p}+{\alpha}_{0}+{\alpha}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+{\gamma}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\cdots+{\alpha}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+{\gamma}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}\\ &={\alpha}_{0}+{\beta}_{0}+\cdots+{\beta}_{p}t^{p}+{\alpha}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+{\gamma}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\cdots+{\alpha}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+{\gamma}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}\\ &={\delta}_{0}+\cdots+{\beta}_{p}t^{p}+{\alpha}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+{\gamma}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\cdots+{\alpha}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+{\gamma}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}\\ \end{align} \]

6.14.4.1 Estimación mediante el suavizamiento de regresión periodica y polinomial

\[\widehat{f}_{t}=\widehat{\delta}_{0}+\cdots+\widehat{\beta}_{p}t^{p}+\widehat{\alpha}_{0}+\widehat{\alpha}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\widehat{\gamma}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\cdots+\widehat{\alpha}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+\widehat{\gamma}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}\]

YT <- function(delta0,beta1,J,beta2,beta3,alpha1,N,gamma1,omega){
  delta0+beta1*J+beta2*J**2+beta3*J**3+alpha1*cos(2*pi*J/N)+gamma1*sin(2*pi*J/N)+omega
  }

\[{x}_{t}=\widehat{\delta}_{0}+\cdots+\widehat{\beta}_{p}t^{p}+\widehat{\alpha}_{0}+\widehat{\alpha}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\widehat{\gamma}_{1}\cos{\left(2{\pi}{w}_{1}t\right)}+\cdots+\widehat{\alpha}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+\widehat{\gamma}_{p}\cos{\left(2{\pi}{w}_{p}t\right)}+{\omega}_{t}\]

\[{\omega}_{t}{\sim}N\left(0,{\sigma}_{\omega}^{2}\right)\]

delta0 <- rnorm(n=1,mean=0,sd=runif(n=1,min=10**0,max=10**1));delta0
## [1] 2.773789
beta1 <- rnorm(n=1,mean=0,sd=runif(n=1,min=10**1,max=10**3));beta1
## [1] 1391.147
J <- 1:108;J
##   [1]   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15  16  17  18
##  [19]  19  20  21  22  23  24  25  26  27  28  29  30  31  32  33  34  35  36
##  [37]  37  38  39  40  41  42  43  44  45  46  47  48  49  50  51  52  53  54
##  [55]  55  56  57  58  59  60  61  62  63  64  65  66  67  68  69  70  71  72
##  [73]  73  74  75  76  77  78  79  80  81  82  83  84  85  86  87  88  89  90
##  [91]  91  92  93  94  95  96  97  98  99 100 101 102 103 104 105 106 107 108
beta2 <- rnorm(n=1,mean=0,sd=runif(n=1,min=10**1,max=10**3));beta2
## [1] -497.532
beta3 <- rnorm(n=1,mean=0,sd=runif(n=1,min=10**1,max=10**3));beta3
## [1] 164.6063
alpha1 <- rnorm(n=1,mean=0,sd=runif(n=1,min=10**3,max=10**5));alpha1
## [1] -39098.81
N <- 52;N
## [1] 52
gamma1 <- rnorm(n=1,mean=0,sd=runif(n=1,min=10**3,max=10**5));gamma1
## [1] 33022.52
omega <- rnorm(n=J,mean=0,sd=runif(n=1,min=10**5,max=10**7));boxplot(omega,horizontal=TRUE,col="#48D1CC")

yt <- YT(delta0,beta1,J,beta2,beta3,alpha1,N,gamma1,omega)
t1=J**1;t2=J**2;t3=J**3;model0=lm(yt~t1+t2+t3);summary(model0)
## 
## Call:
## lm(formula = yt ~ t1 + t2 + t3)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -20283000  -3369158   -173828   3343749  22374207 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 1714916.68 2722818.73   0.630    0.530    
## t1          -113484.77  215338.04  -0.527    0.599    
## t2             1997.13    4579.22   0.436    0.664    
## t3              149.01      27.62   5.394 4.34e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 6830000 on 104 degrees of freedom
## Multiple R-squared:  0.9867, Adjusted R-squared:  0.9863 
## F-statistic:  2564 on 3 and 104 DF,  p-value: < 2.2e-16
plot(yt);lines(model0$fitted.values)

c=cos(2*pi*J/N);s=sin(2*pi*J/N);model1=lm(yt~t1+t2+t3+c+s);summary(model1)
## 
## Call:
## lm(formula = yt ~ t1 + t2 + t3 + c + s)
## 
## Residuals:
##       Min        1Q    Median        3Q       Max 
## -20182834  -3272173   -181164   3337480  21576466 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept) 1990300.27 2957195.71   0.673    0.502    
## t1          -135676.66  227450.65  -0.597    0.552    
## t2             2596.88    4781.80   0.543    0.588    
## t3              144.49      28.83   5.012 2.28e-06 ***
## c            689999.77  979171.87   0.705    0.483    
## s           -441589.91 1050371.55  -0.420    0.675    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 6875000 on 102 degrees of freedom
## Multiple R-squared:  0.9867, Adjusted R-squared:  0.9861 
## F-statistic:  1518 on 5 and 102 DF,  p-value: < 2.2e-16
plot(yt);lines(model1$fitted.values)

6.14.4.2 Suavizamiento del kernel

\[\widehat{f}_{t}={\sum}_{i=1}^{n}{{\omega}_{t}{\left(i\right)}{x}_{t}}\]

6.14.4.2.1 Estimador Naradaya-Watson

\[{\omega}_{t}{\left(i\right)}=\frac{K\left(\frac{t-i}{b}\right)}{{\sum}_{j=1}^{n}{K\left(\frac{t-j}{b}\right)}}\]

6.14.4.2.2 Kernel normal

\[K\left(z\right)=\frac{1}{\sqrt{2\pi}}\exp{\left(-\frac{{z}^{2}}{2}\right)}\]

plot(yt);lines(ksmooth(J,yt,"normal",bandwidth=5))

plot(yt);lines(ksmooth(J,yt,"normal",bandwidth=108))

6.14.5 Suavizamiento splines

\[{\sum}_{t=1}^{n}{\left({x}_{t}-{f}_{t}\right)}^{2}+\lambda\int{{\left({f}_{t}^{''}\right)}^{2}dt}\]

plot(yt);lines(smooth.spline(J,yt,spar=10**-7))

plot(yt);lines(smooth.spline(J,yt,spar=10**-11))

7 Box & Jenkins

\[V\left({x}_{t}\right)={\sigma}_{x}^{2}<{\infty}\text{ y constante}\]

\[E\left({x}_{t}\right)={\mu}_{x}<{\infty}\text{ y constante}\]

\[\dot{x}_{t}={x}_{t}-{\mu}_{x}=f\left({x}_{t-1}-{\mu}_{x},{x}_{t-1}-{\mu}_{x},{x}_{t-2}-{\mu}_{x},\ldots\right)+{\omega}_{t}\]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

7.1 \({AR}(p)\)

\[ \begin{align} \dot{x}_{t}&={\phi}_{1}\dot{x}_{t-1}+{\cdots}+{\phi}_{p}\dot{x}_{t-p}+{\omega}_{t}\\ {x}_{t}-{\mu}_{x}&={\phi}_{1}\left({x}_{t-1}-{\mu}_{x}\right)+{\cdots}+{\phi}_{p}\left({x}_{t-p}-{\mu}_{x}\right)+{\omega}_{t}\\ {x}_{t}&={\mu}_{x}+{\phi}_{1}{x}_{t-1}-{\phi}_{1}{\mu}_{x}+{\cdots}+{\phi}_{p}{x}_{t-p}-{\phi}_{p}{\mu}_{x}+{\omega}_{t}\\ &={\mu}_{x}-{\phi}_{1}{\mu}_{x}-{\cdots}-{\phi}_{p}{\mu}_{x}+{\phi}_{1}{x}_{t-1}+\cdots+{\phi}_{p}{x}_{t-p}+{\omega}_{t}\\ &={\mu}_{x}\left(1-{\phi}_{1}-{\cdots}-{\phi}_{p}\right)+{\phi}_{1}{x}_{t-1}+\cdots+{\phi}_{p}{x}_{t-p}+{\omega}_{t} \end{align} \]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

\[{\alpha}={\mu}_{x}\left(1-{\phi}_{1}-{\cdots}-{\phi}_{p}\right)\]

\[ \begin{align} {x}_{t}&={\alpha}+{\phi}_{1}{x}_{t-1}+\cdots+{\phi}_{p}{x}_{t-p}+{\omega}_{t} \end{align} \]

\[ \begin{align} \dot{x}_{t}-{\phi}_{1}\dot{x}_{t-1}-{\cdots}-{\phi}_{p}\dot{x}_{t-p}&={\omega}_{t}\\ \dot{x}_{t}-{\phi}_{1}{B}^{1}\dot{x}_{t}-{\cdots}-{\phi}_{p}{B}^{p}\dot{x}_{t}&=\\ \left(1-{\phi}_{1}{B}^{1}-{\cdots}-{\phi}_{p}{B}^{p}\right)\dot{x}_{t}&=\\ {\phi}{\left(B\right)}\dot{x}_{t}&= \end{align} \]

7.1.1 \({AR}(1)\)

\[ \begin{align} \dot{x}_{t}&={\phi}_{1}\dot{x}_{t-1}+{\omega}_{t}\\ &={\phi}_{1}\left({\phi}_{1}\dot{x}_{t-2}+{\omega}_{t-1}\right)+{\omega}_{t}\\ &={\phi}_{1}^{2}\dot{x}_{t-2}+{\phi}_{1}{\omega}_{t-1}+{\omega}_{t}\\ &={\phi}_{1}^{2}\left({\phi}_{1}\dot{x}_{t-3}+{\omega}_{t-2}\right)+{\phi}_{1}{\omega}_{t-1}+{\omega}_{t}\\ &={\phi}_{1}^{3}\dot{x}_{t-3}+{\phi}_{1}^{2}{\omega}_{t-2}+{\phi}_{1}{\omega}_{t-1}+{\omega}_{t}\\ &={\phi}_{1}^{k}\dot{x}_{t-k}+{\sum}_{j=0}^{k-1}{{\phi}_{1}^{j}{\omega}_{t-j}} \end{align} \]

\[ \begin{align} \lim_{k\rightarrow\infty}E\left(\dot{x}_{t}-{\sum}_{j=0}^{k-1}{{\phi}_{1}^{j}{\omega}_{t-j}}\right)^{2}&=\lim_{k\rightarrow\infty}E\left({\phi}_{1}^{k}\dot{x}_{t-k}\right)^{2}\\ &=\lim_{k\rightarrow\infty}{\phi}_{1}^{2k}E\left(\dot{x}_{t-k}^{2}\right) \end{align} \]

\[\lim_{k\rightarrow\infty}{\phi}_{1}^{2k}E\left(\dot{x}_{t-k}^{2}\right)\stackrel{\left|{\phi}_{1}\right|<1}{=}0\]

\[ \begin{align} \dot{x}_{t}&={\sum}_{j=0}^{\infty}{{\phi}_{1}^{j}{\omega}_{t-j}} \end{align} \]

\[ \begin{align} E\left(\dot{x}_{t}\right)&={\sum}_{j=0}^{\infty}{{\phi}_{1}^{j}E\left({\omega}_{t-j}\right)}\\ &=0 \end{align} \]

\[ \begin{align} \gamma\left(h\right)&=C\left({x}_{t},{x}_{t+h}\right)\\ &=E\left[\left({\sum}_{j=0}^{\infty}{{\phi}_{1}^{j}{\omega}_{t-j}}\right)\left({\sum}_{k=0}^{\infty}{{\phi}_{1}^{k}{\omega}_{t+h-k}}\right)\right]\\ &={\sum}_{j=0}^{\infty}{\sum}_{k=0}^{\infty}{{\phi}_{1}^{j}{\phi}_{1}^{k}E\left[{\omega}_{t-j}{\omega}_{t+h-k}\right]}\\ &={\sum}_{j=0}^{\infty}{{\phi}_{1}^{j}{\phi}_{1}^{h+j}E\left[{\omega}_{t-j}{\omega}_{t+h-(h+j)}\right]}+{\sum}_{j=0}^{\infty}{\sum}_{k{\neq}j+h}{{\phi}_{1}^{j}{\phi}_{1}^{k}E\left[{\omega}_{t-j}{\omega}_{t+h-k}\right]}\\ &={\sigma}_{\omega}^{2}{\sum}_{j=0}^{\infty}{{\phi}_{1}^{j}{\phi}_{1}^{h+j}}\\ &={\phi}_{1}^{h}{\sigma}_{\omega}^{2}{\sum}_{j=0}^{\infty}{{\phi}_{1}^{j}{\phi}_{1}^{j}}\\ &={\phi}_{1}^{h}{\sigma}_{\omega}^{2}{\sum}_{j=0}^{\infty}{{\phi}_{1}^{2j}}\\ &={\phi}_{1}^{h}{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}} \end{align} \]

\[ \begin{align} \gamma\left(0\right)&={\phi}_{1}^{0}{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}\\ &={\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}} \end{align} \]

\[ \begin{align} \rho\left(h-1\right)&=\frac{{\phi}_{1}^{h-1}{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}{{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}\\ &={\phi}_{1}^{h-1}\frac{{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}{{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}\\ &={\phi}_{1}^{h-1} \end{align} \]

\[ \begin{align} \rho\left(h\right)&=\frac{{\phi}_{1}^{h}{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}{{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}\\ &={\phi}_{1}^{h}\frac{{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}{{\sigma}_{\omega}^{2}\frac{1}{1-{\phi}_{1}^{2}}}\\ &={\phi}_{1}^{h} \end{align} \]

\[\rho\left(h\right)={\phi}_{1}\rho\left(h-1\right)\text{ para }h=1,2,\ldots\]

7.1.1.1 Simulación de un proceso autorregresivo de orden uno

n <- rpois(n=1,lambda=108);n
## [1] 125

\[\dot{x}_{t}={\phi}_{1}\dot{x}_{t-1}+{\omega}_{t}\text{, }t=1,\ldots,125\]

phi1 <- rnorm(n=1,mean=0.9,sd=0.05);phi1
## [1] 0.8355509

\[{\phi}_{1}=0.8355509\]

7.1.1.1.1 \({\phi}_{1}\) = -0.8355509
AR1 <- arima.sim(list(order=c(1,0,0),ar=-phi1),n=n)
par(mfrow=c(1,2))
plot(AR1)
acf(AR1)

7.1.1.1.2 \({\phi}_{1}\) = 0.8355509
AR1 <- arima.sim(list(order=c(1,0,0),ar=+phi1),n=n)
par(mfrow=c(1,2))
plot(AR1)
acf(AR1)

7.1.1.2 Causalidad y modelos autorregresivos explosivos

\[\left|{\phi}_{1}\right|^{j+1}>\left|{\phi}_{1}\right|^{j}>\cdots>\left|{\phi}_{1}\right|>1{\implies}\dot{x}_{t}={\sum}_{j=0}^{k-1}{{\phi}_{1}^{-j}{\omega}_{t-j}}\stackrel{k{\rightarrow}\infty}{\rightarrow}{\infty}\]

\[ \begin{align} \dot{x}_{t+1}&={\phi}_{1}\dot{x}_{t}+{\omega}_{t+1}\\ \dot{x}_{t+1}-{\omega}_{t+1}&={\phi}_{1}\dot{x}_{t}\\ {\phi}_{1}^{-1}\dot{x}_{t+1}-{\phi}_{1}^{-1}{\omega}_{t+1}&=\dot{x}_{t} \end{align} \]

\[ \begin{align} \dot{x}_{t}&={\phi}_{1}^{-1}\dot{x}_{t+1}-{\phi}_{1}^{-1}{\omega}_{t+1}\\ &={\phi}_{1}^{-1}\left({\phi}_{1}^{-1}\dot{x}_{t+2}-{\phi}_{1}^{-1}{\omega}_{t+2}\right)-{\phi}_{1}^{-1}{\omega}_{t+1}\\ &={\phi}_{1}^{-2}\dot{x}_{t+2}-{\phi}_{1}^{-2}{\omega}_{t+2}-{\phi}_{1}^{-1}{\omega}_{t+1}\\ &={\phi}_{1}^{-2}\left({\phi}_{1}^{-1}\dot{x}_{t+3}-{\phi}_{1}^{-1}{\omega}_{t+3}\right)-{\phi}_{1}^{-2}{\omega}_{t+2}-{\phi}_{1}^{-1}{\omega}_{t+1}\\ &={\phi}_{1}^{-3}\dot{x}_{t+3}-{\phi}_{1}^{-3}{\omega}_{t+3}-{\phi}_{1}^{-2}{\omega}_{t+2}-{\phi}_{1}^{-1}{\omega}_{t+1}\\ &={\phi}_{1}^{-k}\dot{x}_{t+k}-{\sum}_{j=1}^{k}{{\phi}_{1}^{-j}{\omega}_{t+j}} \end{align} \]

\[ \begin{align} \lim_{k\rightarrow\infty}E\left(\dot{x}_{t}+{\sum}_{j=1}^{k}{{\phi}_{1}^{-j}{\omega}_{t+j}}\right)^{2}&=\lim_{k\rightarrow\infty}E\left({\phi}_{1}^{-k}\dot{x}_{t+k}\right)^{2}\\ &=\lim_{k\rightarrow\infty}{\phi}_{1}^{-2k}E\left(\dot{x}_{t+k}^{2}\right) \end{align} \]

\[\lim_{k\rightarrow\infty}{\phi}_{1}^{-2k}E\left(\dot{x}_{t+k}^{2}\right)\stackrel{\left|{\phi}_{1}^{-1}\right|<1}{=}0\]

\[ \begin{align} \dot{x}_{t}&=-{\sum}_{j=1}^{\infty}{{\phi}_{1}^{-j}{\omega}_{t+j}} \end{align} \]

\[\phi{\left(B\right)}{x}_{t}={\omega}_{t}\]

\[\phi{\left(B\right)}=1-{\phi}_{1}B\text{ con }\left|{\phi}\right|<1\]

\[ \begin{align} {x}_{t}&={\sum}_{j=0}^{\infty}{\psi}_{j}{\omega}_{t-j}\\ &=\psi{\left(B\right)}{\omega}_{t} \end{align} \]

\[\psi{\left(B\right)}={\sum}_{j=0}^{\infty}{\psi}_{j}{B}^{j}\text{ con }{\psi}_{j}={\phi}^{j}\]

\[\phi{\left(B\right)}\psi{\left(B\right)}{\omega}_{t}={\omega}_{t}\]

\[ \begin{align} \phi{\left(B\right)}\psi{\left(B\right)}&=1\\ \left(1-{\phi}_{1}B\right)\left(1+{\psi}_{1}B+{\psi}_{2}B^2+\cdots+{\psi}_{j}B^j+\cdots\right)&=\\ \left(1-{\phi}_{1}B\right)+\left(1-{\phi}_{1}B\right){\psi}_{1}B+\left(1-{\phi}_{1}B\right){\psi}_{2}B^2+\cdots+\left(1-{\phi}_{1}B\right){\psi}_{j}B^j+\cdots&=\\ 1-{\phi}_{1}B+\left({\psi}_{1}-{\psi}_{1}{\phi}_{1}B\right)B+\left({\psi}_{2}-{\psi}_{2}{\phi}_{1}B\right)B^2+\cdots+\left({\psi}_{j}-{\psi}_{j}{\phi}_{1}B\right)B^j+\cdots&=\\ 1-{\phi}_{1}B+{\psi}_{1}B-{\psi}_{1}{\phi}_{1}B^2+{\psi}_{2}B^2-{\psi}_{2}{\phi}_{1}B^3+\cdots+{\psi}_{j}B^j-{\psi}_{j}{\phi}_{1}B^{j+1}+\cdots&=\\ 1+\left({\psi}_{1}-{\phi}_{1}\right)B+\left({\psi}_{2}-{\psi}_{1}{\phi}_{1}\right)B^2+\left({\psi}_{3}-{\psi}_{2}{\phi}_{1}\right)B^3+\cdots+\left({\psi}_{j}-{\psi}_{j-1}{\phi}_{1}\right)B^j+\cdots&= \end{align} \]

\[ \begin{align} \left({\psi}_{1}-{\phi}_{1}\right)=0&{\implies}{\psi}_{1}={\phi}_{1}\\ \left({\psi}_{2}-{\psi}_{1}{\phi}_{1}\right)=0&{\implies}{\psi}_{2}={\psi}_{1}{\phi}_{1}\\ \left({\psi}_{3}-{\psi}_{2}{\phi}_{1}\right)=0&{\implies}{\psi}_{2}={\psi}_{1}{\phi}_{1}\\ \left({\psi}_{j}-{\psi}_{2}{\phi}_{j-1}\right)=0&{\implies}{\psi}_{j}={\psi}_{j-1}{\phi}_{1} \end{align} \]

\[{\psi}_{0}=1{\implies}{\psi}_{j}={\phi}_{1}^{j}\]

\[ \begin{align} \phi{\left(B\right)}{x}_{t}&={\omega}_{t}\\ \phi^{-1}{\left(B\right)}\phi{\left(B\right)}{x}_{t}&=\phi^{-1}{\left(B\right)}{\omega}_{t}\\ {x}_{t}&=\phi^{-1}{\left(B\right)}{\omega}_{t} \end{align} \]

\[ \begin{align} \phi^{-1}{\left(B\right)}&=1+{\phi}B+{\phi}^{2}B^{2}+\cdots+{\phi}^{j}B^{j}+\cdots\\ &=\psi{\left(B\right)} \end{align} \]

7.1.1.2.1 \(\phi{\left(z\right)}=1-{\phi}z\)

\[ \begin{align} {\phi}^{-1}{\left(z\right)}&=\frac{1}{1-{\phi}z}\\ &=1-{\phi}{z}-\left({\phi}{z}\right)^{2}-\cdots-\left({\phi}{z}\right)^{j}+\cdots\\ &=1-{\phi}{z}-{\phi}^{2}{z}^{2}-\cdots-{\phi}^{j}{z}^{j}+\cdots\text{, }\left|z\right|{\leq}1 \end{align} \]

geometric <- function(base, max) {
    result0 <- base^(0:floor(max))
    result1 <- sum(result0)
    result2 <- 1/(1-base)
    return(list(result0,result1,result2))
}
base=runif(n=1,min=1-10**-1,max=1-10**-3);base
## [1] 0.9244088
maximun=round(runif(n=1,min=10**3,max=10**5),0);maximun
## [1] 51965
serie <- geometric(base=base,max=maximun)
plot(cumsum(serie[[1]]), type="h", log="x"); serie[[2]]; serie[[3]]

## [1] 13.22905
## [1] 13.22905

7.2 \({ARI}(p,d)\)

\[{\nabla}^{d}{x}_{t}={\phi}_{1}\dot{x}_{t-1}+{\cdots}+{\phi}_{p}\dot{x}_{t-p}+{\omega}_{t}\]

\[ \begin{align} {\nabla}^{d}{x}_{t}&={\left(1-B\right)}^{d}{x}_{t}\\ &=\dot{x}_{t} \end{align} \]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

7.3 \({MA}(q)\)

\[ \begin{align} {x}_{t}&={\mu}_{x}+{\omega}_{t}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q}\\ {x}_{t}-{\mu}_{x}&={\omega}_{t}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q}\\ \dot{x}_{t}&={\omega}_{t}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q} \end{align} \]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

\[ \begin{align} \dot{x}_{t}&={\omega}_{t}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q}\\ &=\dot{x}_{t}+{\theta}_{1}{B}^{1}{\omega}_{t}+{\cdots}+{\theta}_{q}{B}^{q}{\omega}_{t}\\ &=\left(1+{\theta}_{1}{B}^{1}+{\cdots}+{\theta}_{q}{B}^{q}\right){\omega}_{t}\\ &={\theta}{\left(B\right)}{\omega}_{t} \end{align} \]

7.3.1 \({MA}(1)\)

\[\dot{x}_{t}={\omega}_{t}+{\theta}_{1}{\omega}_{t-1}\]

\[ \begin{align} E\left(\dot{x}_{t}\right)&={\sum}_{j=0}^{1}{{\theta}_{j}E\left({\omega}_{t-j}\right)}\\ &=0 \end{align} \]

\[ \begin{align} \gamma\left(h\right)&=C\left({x}_{t},{x}_{t+h}\right)\\ &=E\left[\left({\sum}_{j=0}^{1}{{\theta}_{j}{\omega}_{t-j}}\right)\left({\sum}_{k=0}^{1}{{\theta}_{k}{\omega}_{t+h-k}}\right)\right]\\ &={\sum}_{j=0}^{1}{\sum}_{k=0}^{1}{{\theta}_{j}{\theta}_{k}E\left[{\omega}_{t-j}{\omega}_{t+h-k}\right]}\\ &={\sum}_{j=0}^{1}{{\theta}_{j}{\theta}_{h+j}E\left[{\omega}_{t-j}{\omega}_{t+h-(h+j)}\right]}+{\sum}_{j=0}^{1}{\sum}_{k{\neq}j+h}{{\theta}_{j}{\theta}_{k}E\left[{\omega}_{t-j}{\omega}_{t+h-k}\right]}\\ &={\sigma}_{\omega}^{2}{\sum}_{j=0}^{1}{{\theta}_{j}{\theta}_{h+j}} \end{align} \]

\[ \begin{align} \gamma\left(h\right)&={\sigma}_{\omega}^{2}{\sum}_{j=0}^{1}{{\theta}_{j}{\theta}_{h+j}}\\ &={\sigma}_{\omega}^{2}\left({{\theta}_{0}{\theta}_{h+0}+{\theta}_{1}{\theta}_{h+1}}\right)\\ &={\sigma}_{\omega}^{2}\left({{\theta}_{h}+{\theta}_{1}{\theta}_{h+1}}\right) \end{align} \]

\[ \begin{align} \gamma\left(0\right)&={\sigma}_{\omega}^{2}{\sum}_{j=0}^{1}{{\theta}_{j}{\theta}_{0+j}}\\ &={\sigma}_{\omega}^{2}{\sum}_{j=0}^{1}{{\theta}_{j}^{2}}\\ &={\sigma}_{\omega}^{2}\left({{\theta}_{0}^{2}+{\theta}_{1}^{2}}\right)\\ &={\sigma}_{\omega}^{2}\left({1+{\theta}_{1}^{2}}\right) \end{align} \]

\[ \begin{align} \rho\left(h\right)&=\frac{{\sigma}_{\omega}^{2}\left({{\theta}_{h}+{\theta}_{1}{\theta}_{h+1}}\right)}{{\sigma}_{\omega}^{2}\left({1+{\theta}_{1}^{2}}\right)}\\ &=\frac{{{\theta}_{h}+{\theta}_{1}{\theta}_{h+1}}}{{1+{\theta}_{1}^{2}}} \end{align} \]

\[ \begin{align} \rho\left(h\right)= \begin{cases} 1\text{, }h=0\\ \frac{{{\theta}_{1}}}{{1+{\theta}_{1}^{2}}}\text{, }h=1\\ 0\text{, }h>1 \end{cases} \end{align} \]

\[ \begin{align} {\theta}_{1}{\in}\mathbb{R}&{\iff}0{\leq}\left({1+{\theta}_{1}}\right)^{2}\text{ & }0{\leq}\left({1-{\theta}_{1}}\right)^{2}\\ &{\iff}0{\leq}+{1}+{2}{\theta}_{1}+{{\theta}_{1}^{2}}\text{ & }0{\leq}+{1}-{2}{\theta}_{1}+{{\theta}_{1}^{2}}\\ &{\iff}-{1}-{{\theta}_{1}^{2}}{\leq}{2}{\theta}_{1}\text{ & }{2}{\theta}_{1}{\leq}+{1}+{{\theta}_{1}^{2}}\\ &{\iff}-{1}-{{\theta}_{1}^{2}}{\leq}{2}{\theta}_{1}{\leq}+{1+{\theta}_{1}^{2}}\\ &{\iff}-\left({1+{\theta}_{1}^{2}}\right){\leq}{2}{\theta}_{1}{\leq}+\left({1+{\theta}_{1}^{2}}\right)\\ &{\iff}-\frac{1}{2}\left({1+{\theta}_{1}^{2}}\right){\leq}{\theta}_{1}{\leq}+\frac{1}{2}\left({1+{\theta}_{1}^{2}}\right)\\ &{\iff}-\frac{1}{2}{\leq}\frac{{\theta}_{1}}{1+{\theta}_{1}^{2}}{\leq}+\frac{1}{2}\\ &{\iff}\left|\frac{{\theta}_{1}}{1+{\theta}_{1}^{2}}\right|{\leq}\frac{1}{2}\\ &{\iff}\left|\rho\left(1\right)\right|{\leq}\frac{1}{2} \end{align} \]

7.3.1.1 Simulación de un proceso de promedios móviles de orden uno

n <- rpois(n=1,lambda=108);n
## [1] 81

\[\dot{x}_{t}={\omega}_{t}+{\theta}_{1}\dot{x}_{t-1}\text{, }t=1,\ldots,81\]

theta1 <- rnorm(n=1,mean=0.9,sd=0.05);theta1
## [1] 0.8719363

\[{\theta}_{1}=0.8719363\]

7.3.1.1.1 \({\theta}_{1}\) = -0.8719363
MA1 <- arima.sim(list(order=c(0,0,1),ma=-theta1),n=n)
par(mfrow=c(1,2))
plot(MA1)
acf(MA1)

7.3.1.1.2 \({\theta}_{1}\) = 0.8719363
MA1 <- arima.sim(list(order=c(0,0,1),ma=+theta1),n=n)
par(mfrow=c(1,2))
plot(MA1)
acf(MA1)

7.3.1.2 No unicidad de un proceso de promedios móviles

\[{\theta}_{1}=1.1468727\]

7.3.1.2.1 \({\theta}_{1}\) = -0.8719363 & \(\frac{1}{{\theta}_{1}}\) = -1.1468727
set.seed(1234567890);MA1.1 <- arima.sim(list(order=c(0,0,1),ma=-theta1),n=n)
set.seed(1234567890);MA1.2 <- arima.sim(list(order=c(0,0,1),ma=-1/theta1),n=n)
par(mfrow=c(2,2))
plot(MA1.1);acf(MA1.1)
plot(MA1.2);acf(MA1.2)

7.3.1.2.2 \({\theta}_{1}\) = 0.8719363 & \(\frac{1}{{\theta}_{1}}\) = 1.1468727
set.seed(1234567890);MA1.1 <- arima.sim(list(order=c(0,0,1),ma=+theta1),n=n)
set.seed(1234567890);MA1.2 <- arima.sim(list(order=c(0,0,1),ma=+1/theta1),n=n)
par(mfrow=c(2,2))
plot(MA1.1);acf(MA1.1)
plot(MA1.2);acf(MA1.2)

7.3.1.3 Invertibilidad de los procesos de promedios móviles

\[ \begin{align} \dot{x}_{t}&={\omega}_{t}+{\theta}_{1}{\omega}_{t-1}\\ \dot{x}_{t}-{\theta}_{1}{\omega}_{t-1}&={\omega}_{t} \end{align} \]

\[ \begin{align} -{\theta}_{1}{\omega}_{t-1}+\dot{x}_{t}&={\omega}_{t}\\ -{\theta}_{1}\left({-\theta}_{1}{\omega}_{t-2}+\dot{x}_{t-1}\right)+\dot{x}_{t}&=\\ \left(-{\theta}_{1}\right)^{2}{\omega}_{t-2}-{\theta}_{1}\dot{x}_{t-1}+\dot{x}_{t}&=\\ \left(-{\theta}_{1}\right)^{2}\left(-{\theta}_{1}{\omega}_{t-3}+\dot{x}_{t-2}\right)-{\theta}_{1}\dot{x}_{t-1}+\dot{x}_{t}&=\\ \left(-{\theta}_{1}\right)^{3}{\omega}_{t-3}+\left(-{\theta}_{1}\right)^{2}\dot{x}_{t-2}-{\theta}_{1}\dot{x}_{t-1}+\dot{x}_{t}&=\\ \left(-{\theta}_{1}\right)^{k}{\omega}_{t-k}+{\sum}_{j=0}^{k-1}{\left(-{\theta}_{1}\right)^{j}\dot{x}_{t-j}}&= \end{align} \]

\[ \begin{align} \lim_{k\rightarrow\infty}E\left[\left(-{\theta}_{1}\right)^{k}{\omega}_{t-k}\right]^{2}&=\lim_{k\rightarrow\infty}E\left({\omega}_{t}-{\sum}_{j=0}^{k-1}{\left(-{\theta}_{1}\right)^{j}\dot{x}_{t-j}}\right)^{2}\\ \lim_{k\rightarrow\infty}\left(-{\theta}_{1}\right)^{2k}E\left({\omega}_{t-k}^{2}\right)&= \end{align} \]

\[\lim_{k\rightarrow\infty}\left(-{\theta}_{1}\right)^{2k}E\left({\omega}_{t-k}^{2}\right)\stackrel{\left|{\theta}_{1}\right|<1}{=}0\]

\[ \begin{align} {\sum}_{j=0}^{\infty}{\left(-{\theta}_{1}\right)^{j}\dot{x}_{t-j}}&={\omega}_{t} \end{align} \]

\[ \begin{align} \dot{x}_{t}&={\theta\left(B\right)}{\omega}_{t}\\ \theta^{-1}{\left(B\right)}\dot{x}_{t}&=\theta^{-1}{\left(B\right)}\theta{\left(B\right)}{\omega}_{t}\\ \pi{\left(B\right)}\dot{x}_{t}&={\omega}_{t} \end{align} \]

7.4 \({IMA}(d,q)\)

\[{\nabla}^{d}{x}_{t}={\omega}_{t}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q}\]

\[ \begin{align} {\nabla}^{d}{x}_{t}&={\left(1-B\right)}^{d}{x}_{t}\\ &=\dot{x}_{t} \end{align} \]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

7.5 \({ARIMA}(p,d,q)\)

\[{\nabla}^{d}{x}_{t}={\phi}_{1}\dot{x}_{t-1}+{\cdots}+{\phi}_{p}\dot{x}_{t-p}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q}+{\omega}_{t}\]

\[ \begin{align} {\nabla}^{d}{x}_{t}&={\left(1-B\right)}^{d}{x}_{t}\\ &=\dot{x}_{t} \end{align} \]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

7.6 \({SARIMA}(p,d,q){\times}(P,D,Q)_{S}\)

\[{\nabla}^{DS}{\nabla}^{d}{x}_{t}={\phi}_{1}\dot{x}_{t-1}+{\cdots}+{\phi}_{p}\dot{x}_{t-p}+{\phi}_{1S}\dot{x}_{t-1S}+{\cdots}+{\phi}_{PS}\dot{x}_{t-PS}+{\theta}_{1}{\omega}_{t-1}+{\cdots}+{\theta}_{q}{\omega}_{t-q}+{\theta}_{1S}{\omega}_{t-1S}+{\cdots}+{\theta}_{QS}{\omega}_{t-QS}+{\omega}_{t}\]

\[ \begin{align} {\nabla}^{DS}{\nabla}^{d}{x}_{t}&={\left(1-B\right)}^{DS}{\left(1-B\right)}^{d}{x}_{t}\\ &=\dot{x}_{t} \end{align} \]

\[{\omega}_{t}{\sim}{WN}\left(0,{\sigma}_{\omega}^{2}\right)\]

8 Cargar la libreria

library(astsa)

9 Generar un ruido blanco

\[ y_t=\omega_t{\sim}wn(0,\sigma_\omega^2) \]

WN <- arima.sim(model = list(order=c(0,0,0)), 200)

10 Gráfico de la serie

plot(WN)

11 Funciones de autocovarianza y autocorrelación

11.1 Función de autocovarianza

\[ \begin{align} \gamma_\omega(s,t)&=\begin{cases} E[(\omega_t-\mu_t)^2]\text{, si }s{=}t\\ E[(\omega_s-\mu_s)(\omega_t-\mu_t)]\text{, si }s{\neq}t \end{cases}\\ &=\begin{cases} E[\omega_t^2]\text{, si }s{=}t\\ E[\omega_s\omega_t]\text{, si }s{\neq}t \end{cases}\\ &=\begin{cases} \sigma_\omega^2\text{, si }s{=}t\\ 0\text{, si }s{\neq}t \end{cases} \end{align} \]

11.2 Función de autocorrelación

\[ \begin{align} \rho_\omega(s,t)&=\begin{cases} \frac{E[(\omega_t-\mu_t)^2]}{E[(\omega_t-\mu_t)^2]}\text{, si }s{=}t\\ \frac{E[(\omega_s-\mu_s)(\omega_t-\mu_t)]}{\sqrt{E[(\omega_s-\mu_s)^2]}\sqrt{E[(\omega_t-\mu_t)^2]}}\text{, si }s{\neq}t \end{cases}\\ &=\begin{cases} \frac{E[\omega_t^2]}{E[\omega_t^2]}\text{, si }s{=}t\\ \frac{E[\omega_s\omega_t]}{\sqrt{E[\omega_s^2]}\sqrt{E[\omega_t^2]}}\text{, si }s{\neq}t \end{cases}\\ &=\begin{cases} \frac{E[\omega_t^2]}{E[\omega_t^2]}\text{, si }s{=}t\\ \frac{E[\omega_s\omega_t]}{E[\omega_s^2]}\text{, si }s{\neq}t \end{cases}\\ &=\begin{cases} \frac{\sigma_\omega^2}{\sigma_\omega^2}\text{, si }s{=}t\\ \frac{0}{\sigma_\omega^2}\text{, si }s{\neq}t \end{cases}\\ &=\begin{cases} 1\text{, si }s{=}t\\ 0\text{, si }s{\neq}t \end{cases} \end{align} \]

acf2(WN)

##      [,1]  [,2] [,3]  [,4]  [,5] [,6] [,7]  [,8]  [,9] [,10] [,11] [,12] [,13]
## ACF  0.08 -0.01 0.15 -0.14 -0.05 0.17 0.02  0.01  0.04  0.00  0.00  0.02 -0.21
## PACF 0.08 -0.01 0.16 -0.17 -0.02 0.16 0.04 -0.01 -0.02  0.04  0.02 -0.01 -0.24
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24] [,25]
## ACF   0.06 -0.04 -0.07  0.07  0.04 -0.02  0.03 -0.02  0.00 -0.05 -0.13  0.00
## PACF  0.12 -0.06  0.01 -0.02  0.07  0.05 -0.02 -0.03  0.04 -0.04 -0.16  0.02

12 Sobrediferenciar

D.WN <- diff(WN,2)

\[ \begin{align} {\nabla}y_t&=y_t-y_{t-1}\\ &=\omega_t-\omega_{t-1}\text{; }\omega_t{\sim}wn(0,\sigma_\omega^2) \end{align} \]

\[ \begin{align} {\nabla}^2y_t&=(y_t-y_{t-1})-(y_{t-1}-y_{t-2})\\ &=(\omega_t-\omega_{t-1})-(\omega_{t-1}-\omega_{t-2})\\ &=\omega_t-2\omega_{t-1}+\omega_{t-2}\text{; }\omega_t{\sim}wn(0,\sigma_\omega^2) \end{align} \]

13 Gráfico de la serie

plot(D.WN)

14 Funciones de autocorrelación simple y parcial

acf2(D.WN)

##       [,1]  [,2] [,3]  [,4]  [,5]  [,6] [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  -0.04 -0.43 0.14 -0.21 -0.15  0.23 0.04 -0.08  0.03  0.00  0.08 -0.02
## PACF -0.04 -0.44 0.12 -0.49 -0.02 -0.22 0.04 -0.21 -0.01 -0.13  0.22 -0.23
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24]
## ACF  -0.19  0.09  0.04 -0.12  0.09  0.07 -0.04  0.00  0.02  0.05 -0.05 -0.14
## PACF  0.00 -0.08 -0.02 -0.13 -0.09 -0.03  0.02 -0.09  0.02  0.11 -0.03 -0.07
##      [,25]
## ACF   0.04
## PACF -0.05

15 Generar un MA(1)

MA <- arima.sim(model=list(order=c(0,0,1), ma=-0.9),n=200)

16 Funciones de autocorrelación simple y parcial

acf2(MA)

##       [,1]  [,2]  [,3]  [,4]  [,5]  [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  -0.54  0.06  0.01 -0.02  0.00 -0.07  0.03  0.10 -0.07 -0.01  0.02  0.03
## PACF -0.54 -0.32 -0.20 -0.15 -0.12 -0.22 -0.25 -0.07 -0.05 -0.10 -0.13 -0.07
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24]
## ACF   0.01 -0.12  0.13 -0.01 -0.05  0.06 -0.06  0.06 -0.02 -0.04  0.01 -0.02
## PACF  0.03 -0.12 -0.05  0.02  0.00  0.10  0.05  0.08  0.14  0.13  0.04 -0.05
##      [,25]
## ACF   0.09
## PACF  0.08

17 Estimar un modelo

sarima(MA, p=0, d=0, q=1)
## initial  value 0.330085 
## iter   2 value 0.106420
## iter   3 value 0.059333
## iter   4 value 0.035346
## iter   5 value 0.030750
## iter   6 value 0.024967
## iter   7 value 0.024812
## iter   8 value 0.024665
## iter   9 value 0.024478
## iter  10 value 0.024468
## iter  11 value 0.024468
## iter  11 value 0.024468
## final  value 0.024468 
## converged
## initial  value 0.018703 
## iter   2 value 0.018240
## iter   3 value 0.017717
## iter   4 value 0.017420
## iter   5 value 0.017391
## iter   6 value 0.017391
## iter   7 value 0.017391
## iter   8 value 0.017391
## iter   8 value 0.017391
## iter   8 value 0.017391
## final  value 0.017391 
## converged

## $fit
## 
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, 
##     Q), period = S), xreg = xmean, include.mean = FALSE, transform.pars = trans, 
##     fixed = fixed, optim.control = list(trace = trc, REPORT = 1, reltol = tol))
## 
## Coefficients:
##           ma1    xmean
##       -0.8816  -0.0115
## s.e.   0.0261   0.0088
## 
## sigma^2 estimated as 1.028:  log likelihood = -287.27,  aic = 580.53
## 
## $degrees_of_freedom
## [1] 198
## 
## $ttable
##       Estimate     SE  t.value p.value
## ma1    -0.8816 0.0261 -33.8213  0.0000
## xmean  -0.0115 0.0088  -1.2970  0.1961
## 
## $AIC
## [1] 2.902658
## 
## $AICc
## [1] 2.902963
## 
## $BIC
## [1] 2.952133

18 Generar un MA(1)

AR <- arima.sim(model=list(order=c(1,0,0), ar=0.8),n=200)

19 Funciones de autocorrelación simple y parcial

acf2(AR)

##      [,1]  [,2] [,3] [,4]  [,5] [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12] [,13]
## ACF  0.75  0.49 0.32 0.22  0.15 0.12  0.08  0.02 -0.05 -0.11 -0.09 -0.06 -0.08
## PACF 0.75 -0.18 0.05 0.02 -0.03 0.05 -0.04 -0.07 -0.07 -0.05  0.10 -0.03 -0.08
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24] [,25]
## ACF  -0.11 -0.07 -0.03  0.00  0.04  0.02 -0.03 -0.05 -0.05 -0.03  0.01  0.01
## PACF -0.04  0.12 -0.01  0.05  0.02 -0.12 -0.05  0.07 -0.06  0.05  0.02 -0.04

20 Estimar un modelo

sarima(AR, p=1, d=0, q=0)
## initial  value 0.496255 
## iter   2 value 0.068752
## iter   3 value 0.068631
## iter   4 value 0.068570
## iter   5 value 0.068561
## iter   6 value 0.068519
## iter   7 value 0.068516
## iter   8 value 0.068514
## iter   9 value 0.068514
## iter  10 value 0.068513
## iter  11 value 0.068512
## iter  12 value 0.068512
## iter  13 value 0.068512
## iter  13 value 0.068512
## iter  13 value 0.068512
## final  value 0.068512 
## converged
## initial  value 0.076386 
## iter   2 value 0.076289
## iter   3 value 0.076173
## iter   4 value 0.076137
## iter   5 value 0.076062
## iter   6 value 0.076036
## iter   7 value 0.076032
## iter   8 value 0.076030
## iter   8 value 0.076030
## iter   8 value 0.076030
## final  value 0.076030 
## converged

## $fit
## 
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, 
##     Q), period = S), xreg = xmean, include.mean = FALSE, transform.pars = trans, 
##     fixed = fixed, optim.control = list(trace = trc, REPORT = 1, reltol = tol))
## 
## Coefficients:
##          ar1   xmean
##       0.7603  0.0080
## s.e.  0.0460  0.3129
## 
## sigma^2 estimated as 1.159:  log likelihood = -298.99,  aic = 603.99
## 
## $degrees_of_freedom
## [1] 198
## 
## $ttable
##       Estimate     SE t.value p.value
## ar1     0.7603 0.0460 16.5298  0.0000
## xmean   0.0080 0.3129  0.0256  0.9796
## 
## $AIC
## [1] 3.019937
## 
## $AICc
## [1] 3.020241
## 
## $BIC
## [1] 3.069411

21 Generar un AR(2)

AR.2 <- arima.sim(model=list(order=c(2,0,0),ar=c(1.5,-0.75)),200)

22 Funciones de autocorrelación simple y parcial

acf2(AR.2)

##      [,1]  [,2]  [,3]  [,4]  [,5]  [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  0.85  0.54  0.17 -0.16 -0.39 -0.51 -0.51 -0.42 -0.27 -0.09  0.11  0.29
## PACF 0.85 -0.69 -0.11 -0.10 -0.05 -0.10 -0.06  0.00 -0.01  0.08  0.12  0.02
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24]
## ACF   0.39  0.38  0.27  0.10 -0.06 -0.19 -0.26 -0.28 -0.26 -0.19 -0.10 -0.02
## PACF -0.03 -0.13 -0.07  0.08  0.06 -0.09 -0.03 -0.04  0.00 -0.02 -0.04 -0.05
##      [,25]
## ACF   0.08
## PACF  0.06

23 Estimar un modelo

sarima(AR.2,p=2,d=0,q=0)
## initial  value 1.078684 
## iter   2 value 0.950130
## iter   3 value 0.519637
## iter   4 value 0.280909
## iter   5 value 0.080629
## iter   6 value 0.000445
## iter   7 value -0.024725
## iter   8 value -0.036056
## iter   9 value -0.036066
## iter  10 value -0.036077
## iter  11 value -0.036080
## iter  12 value -0.036082
## iter  13 value -0.036083
## iter  14 value -0.036083
## iter  14 value -0.036083
## iter  14 value -0.036083
## final  value -0.036083 
## converged
## initial  value -0.014852 
## iter   2 value -0.015173
## iter   3 value -0.015316
## iter   4 value -0.015320
## iter   5 value -0.015322
## iter   6 value -0.015323
## iter   7 value -0.015323
## iter   8 value -0.015323
## iter   8 value -0.015323
## final  value -0.015323 
## converged

## $fit
## 
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, 
##     Q), period = S), xreg = xmean, include.mean = FALSE, transform.pars = trans, 
##     fixed = fixed, optim.control = list(trace = trc, REPORT = 1, reltol = tol))
## 
## Coefficients:
##          ar1      ar2    xmean
##       1.5267  -0.7706  -0.2600
## s.e.  0.0458   0.0456   0.2835
## 
## sigma^2 estimated as 0.9546:  log likelihood = -280.72,  aic = 569.45
## 
## $degrees_of_freedom
## [1] 197
## 
## $ttable
##       Estimate     SE  t.value p.value
## ar1     1.5267 0.0458  33.3222  0.0000
## ar2    -0.7706 0.0456 -16.8919  0.0000
## xmean  -0.2600 0.2835  -0.9172  0.3602
## 
## $AIC
## [1] 2.847232
## 
## $AICc
## [1] 2.847844
## 
## $BIC
## [1] 2.913198

24 Generar un ARIMA(2,0,1)

ARIMA <- arima.sim(model=list(order=c(2,0,1),ar=c(1,-0.9),ma=0.8),n=400)

25 Funciones de autocorrelación simple y parcial

acf2(ARIMA)

##      [,1]  [,2]  [,3]  [,4] [,5]  [,6] [,7]  [,8]  [,9] [,10] [,11] [,12] [,13]
## ACF  0.55 -0.31 -0.76 -0.45 0.23  0.58 0.35 -0.18 -0.43 -0.22  0.18  0.34  0.16
## PACF 0.55 -0.89  0.43 -0.16 0.08 -0.09 0.01  0.06  0.10 -0.03 -0.02  0.08  0.05
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24] [,25]
## ACF  -0.13 -0.22 -0.08  0.11  0.15  0.04 -0.09 -0.11 -0.01  0.08  0.07 -0.03
## PACF  0.07 -0.06  0.03  0.05 -0.06  0.05 -0.07  0.00  0.04 -0.03 -0.08 -0.09
##      [,26] [,27] [,28] [,29] [,30]
## ACF  -0.11 -0.08  0.04  0.14  0.13
## PACF  0.03  0.02  0.00  0.07  0.01

26 Estimar un modelo

sarima(ARIMA,p=2,d=0,q=1)
## initial  value 1.264093 
## iter   2 value 0.509472
## iter   3 value 0.316706
## iter   4 value 0.106644
## iter   5 value 0.032171
## iter   6 value 0.014005
## iter   7 value 0.012180
## iter   8 value 0.005370
## iter   9 value 0.004309
## iter  10 value 0.004283
## iter  11 value 0.004283
## iter  12 value 0.004283
## iter  13 value 0.004283
## iter  14 value 0.004283
## iter  14 value 0.004283
## iter  14 value 0.004283
## final  value 0.004283 
## converged
## initial  value 0.011001 
## iter   2 value 0.010993
## iter   3 value 0.010982
## iter   4 value 0.010982
## iter   5 value 0.010981
## iter   6 value 0.010981
## iter   7 value 0.010980
## iter   8 value 0.010980
## iter   9 value 0.010980
## iter   9 value 0.010980
## iter   9 value 0.010980
## final  value 0.010980 
## converged

## $fit
## 
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, 
##     Q), period = S), xreg = xmean, include.mean = FALSE, transform.pars = trans, 
##     fixed = fixed, optim.control = list(trace = trc, REPORT = 1, reltol = tol))
## 
## Coefficients:
##          ar1      ar2     ma1   xmean
##       0.9597  -0.8481  0.8456  0.0376
## s.e.  0.0270   0.0269  0.0291  0.1044
## 
## sigma^2 estimated as 1.007:  log likelihood = -571.97,  aic = 1153.94
## 
## $degrees_of_freedom
## [1] 396
## 
## $ttable
##       Estimate     SE  t.value p.value
## ar1     0.9597 0.0270  35.4906  0.0000
## ar2    -0.8481 0.0269 -31.5509  0.0000
## ma1     0.8456 0.0291  29.0323  0.0000
## xmean   0.0376 0.1044   0.3603  0.7188
## 
## $AIC
## [1] 2.884838
## 
## $AICc
## [1] 2.885091
## 
## $BIC
## [1] 2.934731

27 Generar un proceso no estacionario

ARIMA.110 <- arima.sim(model=list(order=c(1,1,0),ar=0.9),n=500)

28 Funciones de autocorrelación simple y parcial

acf2(ARIMA.110)

##      [,1]  [,2]  [,3]  [,4]  [,5]  [,6]  [,7]  [,8]  [,9] [,10] [,11] [,12]
## ACF  0.99  0.98  0.97  0.95  0.94  0.93  0.91  0.90  0.88  0.87  0.86  0.84
## PACF 0.99 -0.04 -0.03 -0.03 -0.03 -0.03 -0.02 -0.01 -0.02  0.00  0.00  0.00
##      [,13] [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24]
## ACF   0.83  0.82  0.80  0.79  0.78  0.76  0.75  0.74  0.73  0.71  0.70  0.69
## PACF  0.01  0.01  0.01  0.00  0.00 -0.01 -0.01  0.00  0.00 -0.01 -0.01 -0.01
##      [,25] [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33]
## ACF   0.68  0.67  0.65  0.64  0.63  0.62  0.61   0.6  0.58
## PACF  0.00  0.00  0.00 -0.01 -0.01  0.00  0.00   0.0  0.00

29 Diferenciación no estacional

sarima(ARIMA.110,p=0,d=1,q=0)
## initial  value 0.746912 
## iter   1 value 0.746912
## final  value 0.746912 
## converged
## initial  value 0.746912 
## iter   1 value 0.746912
## final  value 0.746912 
## converged

## $fit
## 
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, 
##     Q), period = S), xreg = constant, transform.pars = trans, fixed = fixed, 
##     optim.control = list(trace = trc, REPORT = 1, reltol = tol))
## 
## Coefficients:
##       constant
##        -0.5253
## s.e.    0.0944
## 
## sigma^2 estimated as 4.454:  log likelihood = -1082.93,  aic = 2169.85
## 
## $degrees_of_freedom
## [1] 499
## 
## $ttable
##          Estimate     SE t.value p.value
## constant  -0.5253 0.0944  -5.566       0
## 
## $AIC
## [1] 4.3397
## 
## $AICc
## [1] 4.339716
## 
## $BIC
## [1] 4.356559

30 Nueva serie diferenciada buscando estacionariedad

D.ARIMA.110 <- diff(ARIMA.110,1)

31 Funciones de autocorrelación simple y parcial

acf2(D.ARIMA.110)

##      [,1]  [,2] [,3]  [,4] [,5]  [,6] [,7] [,8]  [,9] [,10] [,11] [,12] [,13]
## ACF  0.87  0.75 0.65  0.56 0.48  0.40 0.35 0.31  0.27  0.24  0.21  0.18  0.14
## PACF 0.87 -0.05 0.02 -0.03 0.02 -0.05 0.04 0.04 -0.02  0.02 -0.03 -0.02 -0.03
##      [,14] [,15] [,16] [,17] [,18] [,19] [,20] [,21] [,22] [,23] [,24] [,25]
## ACF   0.11  0.07  0.05  0.04  0.04  0.05  0.04  0.02  0.01 -0.01 -0.02 -0.02
## PACF  0.00 -0.09  0.08  0.01  0.03  0.03 -0.06 -0.04 -0.02 -0.01  0.02  0.01
##      [,26] [,27] [,28] [,29] [,30] [,31] [,32] [,33]
## ACF  -0.03 -0.04 -0.06 -0.07 -0.08 -0.09  -0.1 -0.10
## PACF -0.03  0.01 -0.07  0.00 -0.01 -0.02   0.0  0.06

32 Estimar un modelo

sarima(ARIMA.110,p=1,d=1,q=0)
## initial  value 0.743468 
## iter   2 value 0.005444
## iter   3 value 0.005328
## iter   4 value 0.005324
## iter   5 value 0.005324
## iter   6 value 0.005316
## iter   7 value 0.005314
## iter   8 value 0.005313
## iter   9 value 0.005312
## iter  10 value 0.005309
## iter  11 value 0.005305
## iter  12 value 0.005303
## iter  13 value 0.005303
## iter  14 value 0.005303
## iter  15 value 0.005303
## iter  16 value 0.005303
## iter  17 value 0.005303
## iter  18 value 0.005303
## iter  19 value 0.005303
## iter  20 value 0.005303
## iter  21 value 0.005303
## iter  22 value 0.005303
## iter  23 value 0.005303
## iter  24 value 0.005303
## iter  25 value 0.005303
## iter  26 value 0.005303
## iter  27 value 0.005303
## iter  28 value 0.005303
## iter  29 value 0.005303
## iter  30 value 0.005303
## iter  31 value 0.005303
## iter  32 value 0.005303
## iter  33 value 0.005303
## iter  34 value 0.005303
## iter  35 value 0.005303
## iter  36 value 0.005303
## iter  37 value 0.005303
## iter  37 value 0.005303
## iter  37 value 0.005303
## final  value 0.005303 
## converged
## initial  value 0.010434 
## iter   2 value 0.010394
## iter   3 value 0.010331
## iter   4 value 0.010327
## iter   5 value 0.010312
## iter   6 value 0.010288
## iter   7 value 0.010279
## iter   8 value 0.010261
## iter   9 value 0.010260
## iter  10 value 0.010218
## iter  11 value 0.010218
## iter  11 value 0.010218
## final  value 0.010218 
## converged

## $fit
## 
## Call:
## stats::arima(x = xdata, order = c(p, d, q), seasonal = list(order = c(P, D, 
##     Q), period = S), xreg = constant, transform.pars = trans, fixed = fixed, 
##     optim.control = list(trace = trc, REPORT = 1, reltol = tol))
## 
## Coefficients:
##          ar1  constant
##       0.8807   -0.6047
## s.e.  0.0213    0.3731
## 
## sigma^2 estimated as 1.018:  log likelihood = -714.58,  aic = 1435.16
## 
## $degrees_of_freedom
## [1] 498
## 
## $ttable
##          Estimate     SE t.value p.value
## ar1        0.8807 0.0213 41.3772  0.0000
## constant  -0.6047 0.3731 -1.6209  0.1057
## 
## $AIC
## [1] 2.870314
## 
## $AICc
## [1] 2.870362
## 
## $BIC
## [1] 2.895602

sarima(ts_moreno, p=3, d=1, q=3,fixed=c(0,0,NA,0,0,NA,NA))