Simulación GARCH

El modelo GARCH es un modelo autorregresivo generalizado que captura las agrupaciones de volatilidad de las rentabilidades a través de la varianza condicional. Este modelo encuentra la volatilidad promedio a medio plazo mediante una autorregresión que depende de la suma de perturbaciones y de la suma de varianzas.

El Departamento de Riesgos se propone a presentar una simulación de algún activo de nuestro portafolio mediante esta metodología. En este caso, haremos uso del titulo DO0127, Bono Soberano con vencimiento enero del 2027.

#Conexión a BigQuery
con <- dbConnect(
  bigrquery::bigquery(),
  project = "picasso-364722",
  dataset = "vangogh",
  billing = Sys.getenv("picasso-364722")
)
symbol = "DO0127"

precios <- paste("SELECT fl_cierre as fecha, mt_precio as precio FROM `picasso-364722.vangogh.precios` WHERE (fl_cierre >= '2014-01-01') AND co_titulo = '", symbol, "'", sep = "")

tb_precios <- bq_project_query("picasso-364722", precios)

sample <- (bq_table_download(tb_precios))

sample <- xts(x=sample$precio, order.by = sample$fecha)
sample <- sample["2014::2100"] 
colnames(sample)[1] <- "Close.precio"

Veamos el comportamiento historico del titulo hasta la fecha

plot(sample, type="l", main = paste("Evolución del precio de", symbol))

Ahora, dividamos la data historica en dos muestras para hacer un out of sample, con la cual validaremos el modelo que entrenaremos.

H1.oos <- sample["2022::"] 

ticker <- sample["2014::2021"] 
ticker$daily.vol <- diff(log(Cl(ticker)))
ticker <- na.omit(ticker)

Utilizaremos una función que probarÔ distintos modelos con diferentes distribuciones de probabilidad.

tab_out <- do_arch_test(x = ticker$daily.vol, max_lag = 5)

#Definimos los modelos de rezago, es decir, cuantas observaciones atrƔs.
max_lag_AR <- 1 # m - parametro AR
max_lag_MA <- 1 # n - parametro MA
max_lag_ARCH <- 1 # p - parametro ARCH
max_lag_GARCH <- 1 # q - parametro GARCH
dist_to_use <- c('norm','std','sstd','jsu') #Modelos de distribución a probar, rugarchspecs para ver mÔs
models_to_estimate <- c('sGARCH', 'eGARCH','gjrGARCH') #Modelos autoregresivos

out <- find_best_arch_model(x = ticker$daily.vol, 
                            type_models = models_to_estimate,
                            dist_to_use = dist_to_use,
                            max_lag_AR = max_lag_AR,
                            max_lag_MA = max_lag_MA,
                            max_lag_ARCH = max_lag_ARCH,
                            max_lag_GARCH = max_lag_GARCH)
## Estimating ARMA(0,0)-sGARCH(1,1) dist = norm Done
## Estimating ARMA(0,1)-sGARCH(1,1) dist = norm Done
## Estimating ARMA(1,0)-sGARCH(1,1) dist = norm Done
## Estimating ARMA(1,1)-sGARCH(1,1) dist = norm Done
## Estimating ARMA(0,0)-sGARCH(1,1) dist = std  Done
## Estimating ARMA(0,1)-sGARCH(1,1) dist = std  Done
## Estimating ARMA(1,0)-sGARCH(1,1) dist = std  Done
## Estimating ARMA(1,1)-sGARCH(1,1) dist = std  Done
## Estimating ARMA(0,0)-sGARCH(1,1) dist = sstd Done
## Estimating ARMA(0,1)-sGARCH(1,1) dist = sstd Done
## Estimating ARMA(1,0)-sGARCH(1,1) dist = sstd Done
## Estimating ARMA(1,1)-sGARCH(1,1) dist = sstd Done
## Estimating ARMA(0,0)-sGARCH(1,1) dist = jsu  Done
## Estimating ARMA(0,1)-sGARCH(1,1) dist = jsu  Done
## Estimating ARMA(1,0)-sGARCH(1,1) dist = jsu  Done
## Estimating ARMA(1,1)-sGARCH(1,1) dist = jsu  Done
## Estimating ARMA(0,0)-eGARCH(1,1) dist = norm Done
## Estimating ARMA(0,1)-eGARCH(1,1) dist = norm Done
## Estimating ARMA(1,0)-eGARCH(1,1) dist = norm Done
## Estimating ARMA(1,1)-eGARCH(1,1) dist = norm Done
## Estimating ARMA(0,0)-eGARCH(1,1) dist = std  Done
## Estimating ARMA(0,1)-eGARCH(1,1) dist = std  Done
## Estimating ARMA(1,0)-eGARCH(1,1) dist = std  Done
## Estimating ARMA(1,1)-eGARCH(1,1) dist = std  Done
## Estimating ARMA(0,0)-eGARCH(1,1) dist = sstd Done
## Estimating ARMA(0,1)-eGARCH(1,1) dist = sstd Done
## Estimating ARMA(1,0)-eGARCH(1,1) dist = sstd Done
## Estimating ARMA(1,1)-eGARCH(1,1) dist = sstd Done
## Estimating ARMA(0,0)-eGARCH(1,1) dist = jsu  Done
## Estimating ARMA(0,1)-eGARCH(1,1) dist = jsu  Done
## Estimating ARMA(1,0)-eGARCH(1,1) dist = jsu  Done
## Estimating ARMA(1,1)-eGARCH(1,1) dist = jsu  Done
## Estimating ARMA(0,0)-gjrGARCH(1,1) dist = norm   Done
## Estimating ARMA(0,1)-gjrGARCH(1,1) dist = norm   Done
## Estimating ARMA(1,0)-gjrGARCH(1,1) dist = norm   Done
## Estimating ARMA(1,1)-gjrGARCH(1,1) dist = norm   Done
## Estimating ARMA(0,0)-gjrGARCH(1,1) dist = std    Done
## Estimating ARMA(0,1)-gjrGARCH(1,1) dist = std    Done
## Estimating ARMA(1,0)-gjrGARCH(1,1) dist = std    Done
## Estimating ARMA(1,1)-gjrGARCH(1,1) dist = std    Done
## Estimating ARMA(0,0)-gjrGARCH(1,1) dist = sstd   Done
## Estimating ARMA(0,1)-gjrGARCH(1,1) dist = sstd   Done
## Estimating ARMA(1,0)-gjrGARCH(1,1) dist = sstd   Done
## Estimating ARMA(1,1)-gjrGARCH(1,1) dist = sstd   Done
## Estimating ARMA(0,0)-gjrGARCH(1,1) dist = jsu    Done
## Estimating ARMA(0,1)-gjrGARCH(1,1) dist = jsu    Done
## Estimating ARMA(1,0)-gjrGARCH(1,1) dist = jsu    Done
## Estimating ARMA(1,1)-gjrGARCH(1,1) dist = jsu    Done
# Tabla con algunos de los modelos que se probaron 
tab_out <- out$tab_out
print(tab_out)
## # A tibble: 48 Ɨ 9
##    lag_ar lag_ma lag_arch lag_garch   AIC   BIC type_model type_dist model_name 
##     <int>  <int>    <int>     <int> <dbl> <dbl> <chr>      <chr>     <chr>      
##  1      0      0        1         1 -8.33 -8.31 sGARCH     norm      ARMA(0,0)+…
##  2      0      1        1         1 -8.35 -8.33 sGARCH     norm      ARMA(0,1)+…
##  3      1      0        1         1 -8.36 -8.34 sGARCH     norm      ARMA(1,0)+…
##  4      1      1        1         1 -8.36 -8.33 sGARCH     norm      ARMA(1,1)+…
##  5      0      0        1         1 -8.52 -8.50 sGARCH     std       ARMA(0,0)+…
##  6      0      1        1         1 -8.56 -8.54 sGARCH     std       ARMA(0,1)+…
##  7      1      0        1         1 -8.57 -8.54 sGARCH     std       ARMA(1,0)+…
##  8      1      1        1         1 -8.56 -8.54 sGARCH     std       ARMA(1,1)+…
##  9      0      0        1         1 -8.52 -8.50 sGARCH     sstd      ARMA(0,0)+…
## 10      0      1        1         1 -8.56 -8.53 sGARCH     sstd      ARMA(0,1)+…
## # … with 38 more rows

Veamos cuales son los mejores modelos

models_names <- unique(tab_out$model_name)
best_models <- c(tab_out$model_name[which.min(tab_out$AIC)],
                 tab_out$model_name[which.min(tab_out$BIC)])
print(best_models)
## [1] "ARMA(1,0)+sGARCH(1,1) jsu" "ARMA(1,0)+sGARCH(1,1) jsu"

Basado en el criterio de información bayesiano, tomaremos el mejor modelo que minimice el error. Veamos también la media y la varianza de las observaciones del modelo para ver el comportamiento de la volatilidad del activo.

La vida media es el tiempo necesario para que la volatilidad retroceda hasta la mitad de su media.

# Define el modelo que minimiza el BIC
best_spec = ugarchspec(variance.model = list(model =  out$best_bic$type_model, 
                                             garchOrder = c(out$best_bic$lag_arch,
                                                            out$best_bic$lag_garch)),
                       mean.model = list(armaOrder = c(out$best_bic$lag_ar, 
                                                       out$best_bic$lag_ma)),
                       distribution = out$best_bic$type_dist)

# Implementa el modelo definido
my_best_garch <- ugarchfit(spec = best_spec, 
                           data = ticker$daily.vol)
my_best_garch
## 
## *---------------------------------*
## *          GARCH Model Fit        *
## *---------------------------------*
## 
## Conditional Variance Dynamics    
## -----------------------------------
## GARCH Model  : sGARCH(1,1)
## Mean Model   : ARFIMA(1,0,0)
## Distribution : jsu 
## 
## Optimal Parameters
## ------------------------------------
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.000170    0.000114  1.49150  0.13583
## ar1     0.208820    0.026950  7.74848  0.00000
## omega   0.000001    0.000001  0.86736  0.38575
## alpha1  0.279012    0.074762  3.73201  0.00019
## beta1   0.714059    0.052854 13.50999  0.00000
## skew    0.032335    0.057622  0.56116  0.57469
## shape   1.093035    0.074311 14.70887  0.00000
## 
## Robust Standard Errors:
##         Estimate  Std. Error  t value Pr(>|t|)
## mu      0.000170    0.000125  1.36031 0.173733
## ar1     0.208820    0.028324  7.37244 0.000000
## omega   0.000001    0.000010  0.12899 0.897366
## alpha1  0.279012    0.328918  0.84827 0.396286
## beta1   0.714059    0.280354  2.54699 0.010866
## skew    0.032335    0.059672  0.54188 0.587904
## shape   1.093035    0.241477  4.52645 0.000006
## 
## LogLikelihood : 5324.356 
## 
## Information Criteria
## ------------------------------------
##                     
## Akaike       -8.5695
## Bayes        -8.5406
## Shibata      -8.5695
## Hannan-Quinn -8.5586
## 
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.2287  0.6325
## Lag[2*(p+q)+(p+q)-1][2]    1.4399  0.4712
## Lag[4*(p+q)+(p+q)-1][5]    4.1484  0.2028
## d.o.f=1
## H0 : No serial correlation
## 
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
##                         statistic p-value
## Lag[1]                     0.2965  0.5861
## Lag[2*(p+q)+(p+q)-1][5]    0.7109  0.9208
## Lag[4*(p+q)+(p+q)-1][9]    1.7804  0.9295
## d.o.f=2
## 
## Weighted ARCH LM Tests
## ------------------------------------
##             Statistic Shape Scale P-Value
## ARCH Lag[3]  0.007493 0.500 2.000  0.9310
## ARCH Lag[5]  0.706916 1.440 1.667  0.8214
## ARCH Lag[7]  1.342796 2.315 1.543  0.8523
## 
## Nyblom stability test
## ------------------------------------
## Joint Statistic:  41.2988
## Individual Statistics:             
## mu     0.1352
## ar1    1.1204
## omega  5.6211
## alpha1 0.2287
## beta1  0.4099
## skew   0.0993
## shape  0.6091
## 
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic:          1.69 1.9 2.35
## Individual Statistic:     0.35 0.47 0.75
## 
## Sign Bias Test
## ------------------------------------
##                    t-value    prob sig
## Sign Bias          2.26929 0.02342  **
## Negative Sign Bias 1.30562 0.19193    
## Positive Sign Bias 0.07127 0.94320    
## Joint Effect       6.44519 0.09185   *
## 
## 
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
##   group statistic p-value(g-1)
## 1    20     21.74       0.2974
## 2    30     37.06       0.1447
## 3    40     45.48       0.2203
## 4    50     60.09       0.1332
## 
## 
## Elapsed time : 0.514915
plot(my_best_garch, which="all")
## 
## please wait...calculating quantiles...

#Parametros del mejor modelo
infocriteria(my_best_garch)
##                       
## Akaike       -8.569470
## Bayes        -8.540570
## Shibata      -8.569533
## Hannan-Quinn -8.558602
#Unconditional mean, media si se mantienen los mismo niveles del pasado 
uncmean(my_best_garch) 
## [1] 0.0001699124
#Unconditional variance: omega/(alpha1 + beta1), varianza si se mantienen los mismo niveles del pasado
uncvariance(my_best_garch) 
## [1] 0.0001812085
#persistence: alpha1 + beta1, Si es 1 = persiste, si es 0 = no persiste
persistence(my_best_garch) 
## [1] 0.9930712
# half-life: No. dĆ­as donde la volatilidad revierte 50%
halflife(my_best_garch)
## [1] 99.6922

Hagamos un pronostico de la volatilidad en los proximos 10 dĆ­as. El cono amarillo ilustra el rango en que se pronostica estarĆ” la volatilidad.

# Predicción 10 días hacia adelante
garch.forecast = ugarchforecast(my_best_garch, n.ahead = 10)
garch.forecast
## 
## *------------------------------------*
## *       GARCH Model Forecast         *
## *------------------------------------*
## Model: sGARCH
## Horizon: 10
## Roll Steps: 0
## Out of Sample: 0
## 
## 0-roll forecast [T0=2021-12-31]:
##         Series    Sigma
## T+1  0.0003254 0.002218
## T+2  0.0002024 0.002478
## T+3  0.0001767 0.002712
## T+4  0.0001713 0.002926
## T+5  0.0001702 0.003123
## T+6  0.0001700 0.003308
## T+7  0.0001699 0.003482
## T+8  0.0001699 0.003646
## T+9  0.0001699 0.003802
## T+10 0.0001699 0.003951
# Visualizar el forecast
par(mfrow=c(2,1))
plot(garch.forecast, which=1)
plot(garch.forecast, which=3)

Simulación de Montecarlo

Con nuestro modelo, ahora haremos mil simulaciones del proximo aƱo para ver cuƔl seria el comportamiento del precio. Marcaremos algunos niveles de confianza del modelo en 1%, 5%, 25%, 50%, 75%, 95%, 99%.

days.ahead = 365
n.sim <- 1000
garch.sim <- matrix(nrow = days.ahead, ncol=n.sim) #Matriz de las simulaciones
set.seed(123)
#Empecemos las simulaciones
for(i in 1:n.sim){
  p.sim = ugarchsim(my_best_garch, n.sim=days.ahead, startMethod="sample")
  garch.sim[,i] <- p.sim@simulation$seriesSim
}

#Convertimos a df
garch.sim <- as.data.frame(garch.sim)

# Agregamos y determinamos los percentiles correspondientes
garch.sim$Q25 <- NA
garch.sim$Q05 <- NA
garch.sim$Q01 <- NA
garch.sim$Q75 <- NA
garch.sim$Q95 <- NA
garch.sim$Q99 <- NA
garch.sim$Q01 <- apply(garch.sim[,2:(ncol(garch.sim)-6)], FUN = function(x){quantile(na.omit(x),0.01)}, MARGIN = 1)
garch.sim$Q05 <- apply(garch.sim[,2:(ncol(garch.sim)-6)], FUN = function(x){quantile(na.omit(x),0.05)}, MARGIN = 1)
garch.sim$Q25 <- apply(garch.sim[,2:(ncol(garch.sim)-6)], FUN = function(x){quantile(na.omit(x),0.25)}, MARGIN = 1)
garch.sim$Q75 <- apply(garch.sim[,2:(ncol(garch.sim)-6)], FUN = function(x){quantile(na.omit(x),0.75)}, MARGIN = 1)
garch.sim$Q95 <- apply(garch.sim[,2:(ncol(garch.sim)-6)], FUN = function(x){quantile(na.omit(x),0.95)}, MARGIN = 1)
garch.sim$Q99 <- apply(garch.sim[,2:(ncol(garch.sim)-6)], FUN = function(x){quantile(na.omit(x),0.99)}, MARGIN = 1)

Veamos nuestras percentiles de corte y la simulación, asi podemos comparar con la muestra Out of Sample para determinar en que escenario del mercado estamos.

##                 Q01      Q05      Q25     mean      Q75      Q95      Q99
## 2022-12-17 76.02342 93.93484 109.6422 118.0220 125.0198 145.2241 168.5241
## 2022-12-18 75.85824 94.04852 109.6295 118.0721 125.2839 144.9143 168.4107
## 2022-12-19 72.83628 93.67351 109.6927 118.0888 125.3507 145.3012 168.3534
## 2022-12-20 73.41627 93.66552 109.8177 118.1211 125.3936 145.6580 169.5862
## 2022-12-21 73.52118 93.72368 110.0155 118.1046 125.4314 145.9109 167.7056
## 2022-12-22 73.52120 93.59566 109.8809 118.0835 125.2725 145.9865 170.8441
## 2022-12-23 73.48748 93.82591 109.7291 118.0656 125.3333 145.8448 171.4717
## 2022-12-24 73.49923 93.91211 109.6997 118.1118 125.5164 146.1437 171.1016
## 2022-12-25 73.70192 94.02526 109.5573 118.1375 125.5561 146.3336 170.1509
## 2022-12-26 74.40904 94.18640 109.6077 118.1766 125.6792 146.4455 168.9629
## 2022-12-27 74.75239 94.74431 109.7033 118.2057 125.7264 146.1049 168.8811
## 2022-12-28 74.51525 94.55942 109.6169 118.2162 125.5646 146.0678 169.7556
## 2022-12-29 74.61203 94.62742 109.3543 118.2087 125.7779 146.3220 169.4392
## 2022-12-30 73.14998 94.60004 109.4036 118.2274 126.0182 146.3867 168.2628
## 2022-12-31 73.19489 94.53186 109.4499 118.2732 125.8167 146.3994 169.1458

Hacemos una revisión hacia atras del modelo y lo recalibramos para mejorar el modelo. El backtest también nos ayuda a verificar si las observaciones de los fueron mÔs que las que predecía el modelo, nos da una prueba de hipotesis del modelo y nos dice si los excesos son dependientes o sucesivos, uno tras otro.

garch.roll = ugarchroll(best_spec, ticker$daily.vol, n.ahead=1,
                        forecast.length = 1000, solver = "hybrid",
                        refit.every=30, refit.window="moving", VaR.alpha=c(0.01, 0.05, 0.95, 0.99)) #Predecimos un dia hacia adelante, recalibrando el modelo cada 30 dias.

# Prueba de modelo
report(garch.roll, type="VaR")
## VaR Backtest Report
## ===========================================
## Model:               sGARCH-jsu
## Backtest Length: 1000
## Data:                
## 
## ==========================================
## alpha:               1%
## Expected Exceed: 10
## Actual VaR Exceed:   15
## Actual %:            1.5%
## 
## Unconditional Coverage (Kupiec)
## Null-Hypothesis: Correct Exceedances
## LR.uc Statistic: 2.189
## LR.uc Critical:      3.841
## LR.uc p-value:       0.139
## Reject Null:     NO
## 
## Conditional Coverage (Christoffersen)
## Null-Hypothesis: Correct Exceedances and
##                  Independence of Failures
## LR.cc Statistic: 2.647
## LR.cc Critical:      5.991
## LR.cc p-value:       0.266
## Reject Null:     NO

Veamos las mƩtricas del GARCH Roll

report(garch.roll, type="fpm")
## 
## GARCH Roll Mean Forecast Performance Measures
## ---------------------------------------------
## Model        : sGARCH
## No.Refits    : 34
## No.Forecasts: 1000
## 
##         Stats
## MSE 3.654e-05
## MAE 3.160e-03
## DAC 5.780e-01

Veamos la grƔfica de los excesos

a <- rolling.var.backtest(rolling.garch.object=garch.roll, symbol=symbol, return.series=ticker$daily.vol, n=1000)


# static VaR. LT
asset1.ret <- ticker$daily.vol
var05.breach <- which(garch.roll@forecast$VaR$realized < as.numeric(quantile(asset1.ret, 0.05)))
var01.breach <- which(garch.roll@forecast$VaR$realized < as.numeric(quantile(asset1.ret, 0.01)))
var95.breach <- which(garch.roll@forecast$VaR$realized > as.numeric(quantile(asset1.ret, 0.95)))
var99.breach <- which(garch.roll@forecast$VaR$realized > as.numeric(quantile(asset1.ret, 0.99)))
rows <- index(garch.roll@forecast$VaR[var95.breach,'realized'])

forecast.VaR <- as.data.frame(garch.roll@forecast$VaR)
forecast.VaR$var05.breach <- ifelse(forecast.VaR$realized < garch.roll@forecast$VaR$`alpha(5%)`,1,0)
forecast.VaR$var01.breach <- ifelse(forecast.VaR$realized < garch.roll@forecast$VaR$`alpha(1%)`,1,0)
forecast.VaR$var95.breach <- ifelse(forecast.VaR$realized > garch.roll@forecast$VaR$`alpha(95%)`,1,0)
forecast.VaR$var99.breach <- ifelse(forecast.VaR$realized > garch.roll@forecast$VaR$`alpha(99%)`,1,0)
forecast.VaR$nextday.ret <- lead(forecast.VaR$realized)

breach.days <- forecast.VaR[forecast.VaR$var95.breach == 1,]
breach.return <- forecast.VaR[forecast.VaR$var95.breach == 1,'nextday.ret']
breach.return <- cumprod(1 + na.omit(breach.return))

forecast.VaR <- as.xts(forecast.VaR)
forecast.density <- as.data.frame(garch.roll@forecast$density)
forecast.density <- as.xts(forecast.density)
forecast.density$price <- as.numeric(tail(asset1.ret,1000))

# see consecutive breaches
forecast.VaR <- a[[1]]
forecast.VaR1 <- as.data.frame(a[[1]][a[[1]]$var95.breach==1,])
forecast.VaR1$date <- ymd(rownames(forecast.VaR1))
forecast.VaR1$days.between <- NA
for(i in 2:nrow(forecast.VaR1)){
  forecast.VaR1$days.between[i] <- forecast.VaR1$date[i] - forecast.VaR1$date[i-1]
}

# Plot 1. VaR Only
par(mfrow=c(1,1), mar=c(3,3,3,3))
rolling.plot <- plot(forecast.VaR$realized, ylim=c(min(forecast.VaR$`alpha(1%)` -0.01), max(forecast.VaR$`alpha(99%)` +0.01)), main=paste(symbol,"t+1 Rolling eGARCH VaR Estimation w/ 30-Day Refit"), type="h", col="black", lwd=0.50)
rolling.plot <- lines(forecast.VaR$`alpha(5%)`, on=1, col="orange", lty=1, lwd=0.75)
rolling.plot <- lines(forecast.VaR$`alpha(1%)`, on=1, col="red", lty=1, lwd=1)
rolling.plot <- lines(forecast.VaR$`alpha(95%)`, on=1, col="orange", lty=1, lwd=0.75)
rolling.plot <- lines(forecast.VaR$`alpha(99%)`, on=1, col="red", lty=1, lwd=1)
rolling.plot <- points(forecast.VaR[forecast.VaR$var99.breach==1,5], col="red", cex=1.25, pch=16)
rolling.plot <- points(forecast.VaR[forecast.VaR$var01.breach==1,1], col="red", cex=1.25, pch=16)
rolling.plot <- addLegend("bottomleft", legend.names=c(paste("05pctile: Current", round(last(forecast.VaR$`alpha(5%)`)*100,2),"%"), 
                                                       paste("01pctile: Current", round(last(forecast.VaR$`alpha(1%)`)*100,2),"%")), lty=c(1,1), col=c("orange","red"), bty="n", y.intersp = 0.75)
rolling.plot <- addLegend("topleft", legend.names=c(paste("99pctile: Current", round(last(forecast.VaR$`alpha(99%)`)*100,2),"%"), 
                                                    paste("95pctile: Current", round(last(forecast.VaR$`alpha(95%)`)*100,2),"%")), lty=c(1,1), col=c("red","orange"), bty="n", y.intersp = 0.75)

plot(rolling.plot)