library(tibble)
library(httr)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(xts)
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
##
## ######################### Warning from 'xts' package ##########################
## # #
## # The dplyr lag() function breaks how base R's lag() function is supposed to #
## # work, which breaks lag(my_xts). Calls to lag(my_xts) that you type or #
## # source() into this session won't work correctly. #
## # #
## # Use stats::lag() to make sure you're not using dplyr::lag(), or you can add #
## # conflictRules('dplyr', exclude = 'lag') to your .Rprofile to stop #
## # dplyr from breaking base R's lag() function. #
## # #
## # Code in packages is not affected. It's protected by R's namespace mechanism #
## # Set `options(xts.warn_dplyr_breaks_lag = FALSE)` to suppress this warning. #
## # #
## ###############################################################################
##
## Attaching package: 'xts'
## The following objects are masked from 'package:dplyr':
##
## first, last
library(xml2)
library(rvest)
library(quantmod)
## Loading required package: TTR
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(tidyquant)
## Loading required package: lubridate
##
## Attaching package: 'lubridate'
## The following objects are masked from 'package:base':
##
## date, intersect, setdiff, union
## Loading required package: PerformanceAnalytics
##
## Attaching package: 'PerformanceAnalytics'
## The following object is masked from 'package:graphics':
##
## legend
library(kableExtra)
##
## Attaching package: 'kableExtra'
## The following object is masked from 'package:dplyr':
##
## group_rows
library(ggplot2)
library(tseries)
library(moments)
##
## Attaching package: 'moments'
## The following objects are masked from 'package:PerformanceAnalytics':
##
## kurtosis, skewness
library(AdequacyModel)
library(rugarch)
## Loading required package: parallel
##
## Attaching package: 'rugarch'
## The following object is masked from 'package:stats':
##
## sigma
library(lmtest)
library(car)
## Loading required package: carData
##
## Attaching package: 'car'
## The following object is masked from 'package:dplyr':
##
## recode
library(forecast)
library(gridExtra)
##
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
##
## combine
library(GAS)
##
## Attaching package: 'GAS'
## The following objects are masked from 'package:rugarch':
##
## convergence, pit, residuals
## The following object is masked from 'package:PerformanceAnalytics':
##
## ES
## The following object is masked from 'package:stats':
##
## residuals
library(knitr)
library(fGarch)
## NOTE: Packages 'fBasics', 'timeDate', and 'timeSeries' are no longer
## attached to the search() path when 'fGarch' is attached.
##
## If needed attach them yourself in your R script by e.g.,
## require("timeSeries")
##
## Attaching package: 'fGarch'
## The following object is masked from 'package:TTR':
##
## volatility
library(purrr)
##
## Attaching package: 'purrr'
## The following object is masked from 'package:car':
##
## some
## The following object is masked from 'package:rugarch':
##
## reduce
library(quarks)
## ********************************************************************************
## Welcome to the package 'quarks'!
## ********************************************************************************
##
## Please report any possible errors and bugs to sebastian.letmathe@uni-paderborn.de.
## Thank you.
##
## ********************************************************************************
#set data
VN30_Index<- read.csv("D:/Jupyter/Untitled Folder/new_t.csv")
VN30_Index <- VN30_Index %>%
mutate(symbol = "^VN30")
VN30_Index$date <- as.Date(VN30_Index$date)
VN30_Index[1250,]
## date Open_price Highest_price Lowest_price Closed_price
## 1250 2022-03-25 1499.34 1503.18 1494.28 1498.36
## Trading_Volume symbol
## 1250 109442400 ^VN30
VN30_Index %>%
ggplot(aes(date, Closed_price, color = symbol))+
geom_line(show.legend = FALSE)+
labs(x = "Year" , y = "INDEX", title = "Figure 1: VN30 Index Trend")

VN30_Index %>%
group_by(symbol)%>%
mutate(Log_Price_Close = log(Closed_price)) %>%
ungroup() %>%
select(symbol,date,Log_Price_Close) -> VN30_Index_log
VN30_Index_Modified<-VN30_Index[,c(1,5)]
Date = as.Date(VN30_Index_Modified$date, format = "%Y%m%d")
VN30_Index_Modified<-VN30_Index_Modified[order(Date),]
VN30_Index_Modified1<-xts(x = VN30_Index_Modified$Closed_price, order.by = Date)
names(VN30_Index_Modified1)<- "VN30"
VN30.logret<- diff(log(VN30_Index_Modified1), lag = 1)
VN30.logret[1,]<- 0
hist(VN30.logret[1:1000] ,breaks=50,col="deeppink1",type="l")
## Warning in plot.window(xlim, ylim, "", ...): graphical parameter "type" is
## obsolete
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## graphical parameter "type" is obsolete
## Warning in axis(1, ...): graphical parameter "type" is obsolete
## Warning in axis(2, ...): graphical parameter "type" is obsolete
lines(x = density(x = VN30.logret[1:1000]), col = "blue")

plot(VN30.logret[1:1000], col = "deeppink1", lwd = 1)

ind.summary<- c("Average" = mean(VN30.logret[1:1000]) , "Standard Deviation" = sd(VN30.logret), "Skewness" = skewness(VN30.logret), "Kurtosis" = kurtosis(VN30.logret))
ind.summary %>%
kbl(caption = "Table 2: Data indicators summary", escape = TRUE)%>%
kable_classic(full_width = FALSE, html_font = "cambria")
Table 2: Data indicators summary
|
|
x
|
|
Average
|
0.0005184
|
|
Standard Deviation
|
0.0125081
|
|
Skewness.VN30
|
-0.9816194
|
|
Kurtosis.VN30
|
7.5767655
|
acf(VN30.logret[1:1000])

pacf(VN30.logret[1:1000])

adf.test(VN30.logret[1:1000])
## Warning in adf.test(VN30.logret[1:1000]): p-value smaller than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: VN30.logret[1:1000]
## Dickey-Fuller = -9.9339, Lag order = 9, p-value = 0.01
## alternative hypothesis: stationary
kpss.test(VN30.logret[1:1000], null="Trend",lshort=FALSE)
##
## KPSS Test for Trend Stationarity
##
## data: VN30.logret[1:1000]
## KPSS Trend = 0.14976, Truncation lag parameter = 21, p-value = 0.04686
model.arima = auto.arima(VN30.logret[1:1000] , max.order = c(10,10,10) , stationary = TRUE , trace = T , ic = 'aicc')
##
## Fitting models using approximations to speed things up...
##
## ARIMA(2,0,2) with non-zero mean : Inf
## ARIMA(0,0,0) with non-zero mean : -5896.814
## ARIMA(1,0,0) with non-zero mean : -5897.031
## ARIMA(0,0,1) with non-zero mean : -5897.462
## ARIMA(0,0,0) with zero mean : -5897.147
## ARIMA(1,0,1) with non-zero mean : -5900.829
## ARIMA(2,0,1) with non-zero mean : -5903.603
## ARIMA(2,0,0) with non-zero mean : -5905.589
## ARIMA(3,0,0) with non-zero mean : -5902.641
## ARIMA(3,0,1) with non-zero mean : Inf
## ARIMA(2,0,0) with zero mean : -5906.366
## ARIMA(1,0,0) with zero mean : -5897.549
## ARIMA(3,0,0) with zero mean : -5903.421
## ARIMA(2,0,1) with zero mean : -5904.373
## ARIMA(1,0,1) with zero mean : -5901.716
## ARIMA(3,0,1) with zero mean : Inf
##
## Now re-fitting the best model(s) without approximations...
##
## ARIMA(2,0,0) with zero mean : -5907.99
##
## Best model: ARIMA(2,0,0) with zero mean
model.arima
## Series: VN30.logret[1:1000]
## ARIMA(2,0,0) with zero mean
##
## Coefficients:
## ar1 ar2
## 0.0521 0.1068
## s.e. 0.0314 0.0314
##
## sigma^2 = 0.0001585: log likelihood = 2957.01
## AIC=-5908.01 AICc=-5907.99 BIC=-5893.29
model.arima$residuals %>% ggtsdisplay(plot.type = 'hist' , lag.max = 14)

ar.res = model.arima$residuals
Box.test(model.arima$residuals , lag = 7 , fitdf = 2 , type = 'Ljung-Box')
##
## Box-Ljung test
##
## data: model.arima$residuals
## X-squared = 7.085, df = 5, p-value = 0.2144
adf.test(model.arima$residuals)
## Warning in adf.test(model.arima$residuals): p-value smaller than printed
## p-value
##
## Augmented Dickey-Fuller Test
##
## data: model.arima$residuals
## Dickey-Fuller = -10.031, Lag order = 9, p-value = 0.01
## alternative hypothesis: stationary
kpss.test(model.arima$residuals, null="Trend",lshort=FALSE)
##
## KPSS Test for Trend Stationarity
##
## data: model.arima$residuals
## KPSS Trend = 0.14531, Truncation lag parameter = 21, p-value = 0.05128
jarque.bera.test(model.arima$residuals)
##
## Jarque Bera Test
##
## data: model.arima$residuals
## X-squared = 1082.8, df = 2, p-value < 2.2e-16
pacf(model.arima$residuals)

tsdisplay(ar.res^2 , main = 'Squared Residuals')

model.spec = ugarchspec(variance.model = list(model = 'sGARCH', garchOrder = c(1, 1)),mean.model = list(armaOrder = c(0, 0)))
model.fit = ugarchfit(spec = model.spec , data = ar.res , solver = 'solnp')
model.fit@fit$matcoef
## Estimate Std. Error t value Pr(>|t|)
## mu 4.772880e-04 2.997320e-04 1.592383 1.112988e-01
## omega 2.452173e-06 1.790145e-06 1.369818 1.707436e-01
## alpha1 9.366453e-02 1.723089e-02 5.435850 5.453571e-08
## beta1 8.965904e-01 1.681876e-02 53.308944 0.000000e+00
quantile(VN30.logret[1:1000] , 0.05)
## 5%
## -0.01943961
qplot(VN30.logret[1:1000] , geom = 'histogram') + geom_histogram(fill = 'lightblue' , bins = 30) +geom_histogram(aes(VN30.logret[1:1000][VN30.logret[1:1000] < quantile(VN30.logret[1:1000] , 0.05)]) , fill = 'red' , bins = 30) +labs(x = 'Daily Log Returns')
## Warning: `qplot()` was deprecated in ggplot2 3.4.0.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Don't know how to automatically pick scale for object of type <xts/zoo>.
## Defaulting to continuous.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

qqnorm(VN30.logret[1:1000])
qqline(VN30.logret[1:1000])

jarque.bera.test(ar.res^2)
##
## Jarque Bera Test
##
## data: ar.res^2
## X-squared = 63400, df = 2, p-value < 2.2e-16
jarque.bera.test(ar.res)
##
## Jarque Bera Test
##
## data: ar.res
## X-squared = 1082.8, df = 2, p-value < 2.2e-16
jarque.bera.test(VN30.logret[1:1000])
##
## Jarque Bera Test
##
## data: VN30.logret[1:1000]
## X-squared = 1225.1, df = 2, p-value < 2.2e-16
#create density plots
curve(dt(x, df=7), from=-4, to=4, col='blue')
curve(dt(x, df=30), from=-4, to=4, col='green', add=TRUE)
curve(dnorm(x, mean =0, sd=1), from=-4, to=4, col='red', add=TRUE)
#add legend
legend(-4, .3, legend=c("df=2", "df=30","normal"),
col=c("blue", "green","red"), lty=1, cex=1.2)

p2_1 = qplot(VN30.logret[1:1000] , geom = 'density') + geom_density(fill = 'blue' , alpha = 0.4) +
geom_density(aes(rnorm(200000 , 0 , sd(VN30.logret[1:1000]))) , fill = 'red' , alpha = 0.25) +
labs(x = '')
p2_2 = qplot(VN30.logret[1:1000] , geom = 'density') + geom_density(fill = 'blue' , alpha = 0.4) +
geom_density(aes(rnorm(200000 , 0 , sd(VN30.logret[1:1000]))) , fill = 'red' , alpha = 0.25) +
coord_cartesian(xlim = c(-0.07 , -0.02) , ylim = c(0 , 10)) +
geom_vline(xintercept = c(qnorm(p = c(0.01 ,0.025, 0.05, 0.1) , mean = mean(VN30.logret[1:1000]) , sd = sd(VN30.logret[1:1000]))) ,
color = c('darkgreen' , 'green', 'blue','red') , size = 1) + labs(x = 'Daily Log Returns')
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## i Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
grid.arrange(p2_1 , p2_2 , ncol = 1)
## Don't know how to automatically pick scale for object of type <xts/zoo>.
## Defaulting to continuous.
## Don't know how to automatically pick scale for object of type <xts/zoo>.
## Defaulting to continuous.

fitdist(distribution = 'std' , x = VN30.logret[1:1000])$pars
## mu sigma shape
## 0.001430956 0.016724441 2.441938207
shape <- fitdist(distribution = 'std' , x = VN30.logret[1:1000])$pars['shape']
cat("For a = 0.1 the quantile value of normal distribution is: ",
qnorm(p=0.1),"\n" ,
"For a = 0.1 the quantile value of t-distribution is: " ,
qdist(distribution = 'std' , shape = shape , p = 0.1) , "\n" , "\n" ,
"For a = 0.025 the quantile value of normal distribution is: ",
qnorm(p=0.025),"\n" ,
"For a = 0.025 the quantile value of t-distribution is: " ,
qdist(distribution = 'std' , shape = shape , p = 0.025) , "\n" , "\n" ,
"For a = 0.05 the quantile value of normal distribution is: " ,
qnorm(p = 0.05) , "\n" ,
"For a = 0.05 the quantile value of t-distribution is: " ,
qdist(distribution = 'std' , shape = shape , p = 0.05) , "\n" , "\n" ,
'For a = 0.01 the quantile value of normal distribution is: ' ,
qnorm(p = 0.01) , "\n" ,
"For a = 0.01 the quantile value of t-distribution is: " ,
qdist(distribution = 'std' , shape = shape , p = 0.01) , sep = "")
## For a = 0.1 the quantile value of normal distribution is: -1.281552
## For a = 0.1 the quantile value of t-distribution is: -0.7419843
##
## For a = 0.025 the quantile value of normal distribution is: -1.959964
## For a = 0.025 the quantile value of t-distribution is: -1.546898
##
## For a = 0.05 the quantile value of normal distribution is: -1.644854
## For a = 0.05 the quantile value of t-distribution is: -1.101686
##
## For a = 0.01 the quantile value of normal distribution is: -2.326348
## For a = 0.01 the quantile value of t-distribution is: -2.333126
# CL 99%
qbinom(0.95,250,0.01,lower.tail=TRUE,log.p=FALSE)
## [1] 5
#32
qbinom(0.9999,250,0.01,lower.tail=TRUE,log.p=FALSE)
## [1] 10
# CL 97.5%
qbinom(0.95,250,0.025,lower.tail=TRUE,log.p=FALSE)
## [1] 11
qbinom(0.9999,250,0.025,lower.tail=TRUE,log.p=FALSE)
## [1] 17
# CL 95%
qbinom(0.95,250,0.05,lower.tail=TRUE,log.p=FALSE)
## [1] 18
qbinom(0.9999,250,0.05,lower.tail=TRUE,log.p=FALSE)
## [1] 27
# CL 90%
qbinom(0.95,250,0.1,lower.tail=TRUE,log.p=FALSE)
## [1] 33
qbinom(0.9999,250,0.1,lower.tail=TRUE,log.p=FALSE)
## [1] 44
binom_sum <- function(size, prob, lb, ub, col = 4, lwd = 1, ...) {
x <- 0:size
if (missing(lb)) {
lb <- min(x)
}
if (missing(ub)) {
ub <- max(x)
}
plot(dbinom(x, size = size, prob = prob), type = "h", lwd = lwd, ...)
if(lb == min(x) & ub == max(x)) {
color <- col
} else {
color <- rep(1, length(x))
color[(lb + 1):ub ] <- col
}
lines(dbinom(x, size = size, prob = prob), type = "h",
col = color, lwd = lwd, ...)
}
binom_sum(size = 250, prob = 0.01, ub =5, lwd = 1,
ylab = "P(X = x)", xlab = "Number of successes")

binom_sum(size = 250, prob = 0.01, ub =10, lwd = 1,
ylab = "P(X = x)", xlab = "Number of successes")

model.roll = ugarchroll(spec = model.spec , data = VN30.logret , n.start = 1000 , ,refit.window = 'moving')
VaR90_td = model.roll@forecast$density[,'Sigma']*qdist(distribution='std', shape=shape, p=0.1)
qplot(y = VaR90_td , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR90_td)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with GARCH approach: ' , (sum(VN30.logret[1001:1250] < VaR90_td)) , sep = '')
## Number of exceptions with GARCH approach: 40
VaRTest(alpha=0.1,actual=VN30.logret[1001:1250],VaR=VaR90_td,conf.level = 0.95)
## $expected.exceed
## [1] 25
##
## $actual.exceed
## [1] 40
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 8.623284
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.003318929
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 9.982079
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.006798593
##
## $cc.Decision
## [1] "Reject H0"
VaRDurTest_Chist <- function(alpha, actual, var)
{
n00 <- 0
n01 <- 0
n10 <- 0
n11 <- 0
if (actual[1]<var[1])
{
n01 <- n01+1
}
if (actual[1]>var[1])
{
n00 <- n00+1
}
for (i in 2:length(var))
{
if (actual[i]<var[i] & actual[i-1] < var[i-1])
{
n11 <- n11+1
}
if (actual[i]<var[i] & actual[i-1] > var[i-1])
{
n01 <- n01+1
}
if (actual[i]>var[i] & actual[i-1] > var[i-1])
{
n00 <- n00+1
}
if (actual[i]>var[i] & actual[i-1] < var[i-1])
{
n10 <- n10+1
}
}
pi0 <- n01/(n00+n01)
pi1 <- n11/(n10+n11)
pi <- (n01+n11)/(n10+n11+n00+n01)
tuso <- (1-pi)^(n00+n10)*pi^(n01+n11)
mauso <- (1-pi0)^n00*pi0^n01*(1-pi1)^n10*pi1^n11
z <- -2*log(tuso/mauso)
if (z < qchisq(p=1-alpha, df=1, ncp = 0, lower.tail = TRUE, log.p = FALSE))
{
cat("Accept H0 with LRstat is: ",z)
}
if (z > qchisq(p=1-alpha, df=1, ncp = 0, lower.tail = TRUE, log.p = FALSE))
{
cat("Reject H0 with LRstat is: ",z)
}
}
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR90_td)
## Accept H0 with LRstat is: 1.387995
VaRDurTest_Haas <- function(alpha, actual, var,conf)
{
vec <- c(0)
stat <- c()
for (i in 1:length(var))
{
if (actual[i]<var[i])
{
vec <- cbind(vec,i)
}
}
for (j in 1:length(vec))
{
stat[j] <- -2*log((alpha*(1-alpha)^(vec[j+1]-vec[j]-1))/
((1/(vec[j+1]-vec[j]))*(1-(1/(vec[j+1]-vec[j])))^(vec[j+1]-vec[j]-1)))
}
z <- sum(na.omit(stat))
if (z < qchisq(p=conf, df=length(vec)-1, ncp = 0, lower.tail = TRUE, log.p = FALSE))
{
cat("Accept H0 with LRstat is: ",z)
}
if (z > qchisq(p=conf, df=length(vec)-1, ncp = 0, lower.tail = TRUE, log.p = FALSE))
{
cat("Reject H0 with LRstat is: ",z)
}
}
VaRDurTest_Haas(alpha=0.1,actual=as.numeric(VN30.logret[1001:1250]),var=VaR90_td,conf=0.95)
## Reject H0 with LRstat is: 65.839
my_function <- function(x)
{
qt(p=x,df=shape)
}
ES90_td = (1/(1-0.1))*model.roll@forecast$density[,'Sigma']*integrate(my_function,lower = 0.0000001,upper = 0.1)$value*(1/(shape-1))*(shape+(qt(p=0.1,df=shape))^2)
qplot(y = VaR90_td , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES90_td,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR90_td)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

# Acerbi/Szekely - Test 1 (2014)
as1 = function(x,var,es){
# Inputs:
# x | time series
# var | VaR prediction
# es | ES prediction
# Output:
# z | test statistic
z = 0
n = length(x)
nt= 0
for (i in 1:n)
{
if (x[i] < as.numeric(var[i]))
{
nt = nt + 1
z = z + x[i]/as.numeric(-es[i])
}
}
z = z/nt + 1
return(z)
}
z1_90 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR90_td,es=ES90_td)
z1_90
## [1] -0.04560795
# Acerbi/Szekely - Test 2 (2014)
as2 = function(x,var,es,alpha){
# Inputs:
# x | time series
# var | VaR prediction
# es | ES prediction
# alpha | confidence level
# Output:
# z | test statistic
n = length(x)
z = 0
for (i in 1:n){
if (x[i] < var[i])
{
z = z + (x[i])/(n*alpha*-es[i])
}
}
z = z+1
return(z)
}
z2_90 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR90_td,es=ES90_td,alpha=0.05)
z2_90
## [1] -2.345945
forc <- c(as.numeric(VN30.logret[1:1000]),model.roll@forecast$density[,'Sigma'])*rdist(distribution='std', shape=shape, n=250)
DoGARCH_VaR <- function(y, probability, portfolio_value = 1, WE)
{
# Number of observations
n <- length(y)
# Initialize empty VaR vector
VaR <- rep(0, n)
# Do a loop for the forecast
for (i in 1:(n-WE)){
# Subset the dataset to the estimation window
window <- y[i:(i+WE-1)]
op <- ceiling(length(window)*probability)
# Allocate the VaR forecast in the vector
VaR[i+WE] <- sort(as.numeric(window))[op]
}
# Return the VaR vector
return(VaR)
}
VaR_90_his <- DoGARCH_VaR(y=forc, probability=0.1, WE=1000)
VaR_90_his <- VaR_90_his[1001:1250]
qplot(y = VaR_90_his , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_90_his)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with His approach: ' , (sum(VN30.logret[1001:1250] < VaR_90_his)) , sep = '')
## Number of exceptions with His approach: 45
VaRTest(alpha=0.1,actual=VN30.logret[1001:1250],VaR=VaR_90_his,conf.level = 0.95)
## $expected.exceed
## [1] 25
##
## $actual.exceed
## [1] 45
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 14.73373
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.0001238116
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 15.3463
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.0004651499
##
## $cc.Decision
## [1] "Reject H0"
VaRTest(alpha=0.1,actual=VN30.logret[1001:1250],VaR=VaR_90_his,conf.level = 0.95)
## $expected.exceed
## [1] 25
##
## $actual.exceed
## [1] 45
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 14.73373
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.0001238116
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 15.3463
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.0004651499
##
## $cc.Decision
## [1] "Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_90_his)
## Accept H0 with LRstat is: 0.6349269
VaRDurTest_Haas(alpha=0.1,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_90_his,conf=0.95)
## Reject H0 with LRstat is: 75.58079
DoGARCH_ES <- function(y, probability, portfolio_value = 1, WE)
{
# Number of observations
n <- length(y)
# Initialize empty VaR vector
ES <- rep(0, n)
# Do a loop for the forecast
for (i in 1:(n-WE)){
# Subset the dataset to the estimation window
window <- y[i:(i+WE-1)]
op <- ceiling(length(window)*probability)
# Allocate the VaR forecast in the vector
ES[i+WE] <- mean(sort(window)[1:op])
}
# Return the VaR vector
return(ES)
}
ES_90_his <- DoGARCH_ES(y=forc, probability=0.1, WE=1000)
ES_90_his <- ES_90_his[1001:1250]
qplot(y = VaR_90_his , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES_90_his,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_90_his)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

z1_his_90 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR_90_his,es=ES_90_his)
z1_his_90
## [1] 0.09976771
z2_his_90 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR_90_his,es=ES_90_his,alpha=0.05)
z2_his_90
## [1] -2.240836
model.roll = ugarchroll(spec = model.spec , data = VN30.logret , n.start = 1000 , ,refit.window = 'moving')
VaR95_td = model.roll@forecast$density[,'Sigma']*qdist(distribution='std', shape=shape, p=0.05)
qplot(y = VaR95_td , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR95_td)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with GARCH approach: ' , (sum(VN30.logret[1001:1250] < VaR95_td)) , sep = '')
## Number of exceptions with GARCH approach: 21
VaRTest(alpha=0.05,actual=VN30.logret[1001:1250],VaR=VaR95_td,conf.level = 0.95)
## $expected.exceed
## [1] 12
##
## $actual.exceed
## [1] 21
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 5.097245
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.02396387
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 5.969293
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.05055738
##
## $cc.Decision
## [1] "Fail to Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR95_td)
## Accept H0 with LRstat is: 0.8837915
VaRDurTest_Haas(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR95_td,conf=0.95)
## Accept H0 with LRstat is: 30.74718
ES95_td = (1/(1-0.05))*model.roll@forecast$density[,'Sigma']*integrate(my_function,lower = 0.0000001,upper = 0.05)$value*(1/(shape-1))*(shape+(qt(p=0.05,df=shape))^2)
qplot(y = VaR95_td , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES95_td,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR95_td)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

VN30.logret1 <- diff(log(VN30_Index_Modified$Closed_price),lag=1)
z1_95 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR95_td,es=ES95_td)
z1_95
## [1] -0.2815386
z2_95 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR95_td,es=ES95_td,alpha=0.05)
z2_95
## [1] -1.152985
VaR_95_his <- DoGARCH_VaR(y=forc, probability=0.05, WE=1000)
VaR_95_his <- VaR_95_his[1001:1250]
qplot(y = VaR_95_his , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_95_his)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with His approach: ' , (sum(VN30.logret[1001:1250] < VaR_95_his)) , sep = '')
## Number of exceptions with His approach: 22
VaRTest(alpha=0.05,actual=VN30.logret[1001:1250],VaR=VaR_95_his,conf.level = 0.95)
## $expected.exceed
## [1] 12
##
## $actual.exceed
## [1] 22
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 6.258978
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.01235655
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 8.377008
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.01516896
##
## $cc.Decision
## [1] "Reject H0"
VaRDurTest(alpha=0.05,actual=VN30.logret[1001:1250],VaR=VaR_95_his,conf.level = 0.95)
## $b
## [1] 0.941314
##
## $uLL
## [1] -72.95248
##
## $rLL
## [1] -73.01571
##
## $LRp
## [1] 0.7221302
##
## $H0
## [1] "Duration Between Exceedances have no memory (Weibull b=1 = Exponential)"
##
## $Decision
## [1] "Fail to Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_95_his)
## Accept H0 with LRstat is: 2.137794
VaRDurTest_Haas(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_95_his,conf=0.95)
## Reject H0 with LRstat is: 42.69416
ES_95_his <- DoGARCH_ES(y=forc, probability=0.05, WE=1000)
ES_95_his <- ES_95_his[1001:1250]
qplot(y = VaR_95_his , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES_95_his,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_95_his)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

z1_his_95 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR_95_his,es=ES_95_his)
z1_his_95
## [1] 0.1212294
z2_his_95 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR_95_his,es=ES_95_his,alpha=0.05)
z2_his_95
## [1] -0.5466363
model.roll = ugarchroll(spec = model.spec , data = VN30.logret , n.start = 1000 , ,refit.window = 'moving')
VaR975_td = model.roll@forecast$density[,'Sigma']*qdist(distribution='std', shape=shape, p=0.025)
qplot(y = VaR975_td , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR975_td)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with GARCH approach: ' , (sum(VN30.logret[1001:1250] < VaR975_td)) , sep = '')
## Number of exceptions with GARCH approach: 13
VaRTest(alpha=0.025,actual=VN30.logret[1001:1250],VaR=VaR975_td,conf.level = 0.95)
## $expected.exceed
## [1] 6
##
## $actual.exceed
## [1] 13
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 5.730238
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.01667522
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 5.88014
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.05286202
##
## $cc.Decision
## [1] "Fail to Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR975_td)
## Accept H0 with LRstat is: 0.1527787
VaRDurTest_Haas(alpha=0.025,actual=as.numeric(VN30.logret[1001:1250]),var=VaR975_td,conf=0.95)
## Accept H0 with LRstat is: 19.83335
ES975_td = (1/(1-0.025))*model.roll@forecast$density[,'Sigma']*integrate(my_function,lower = 0.0000001,upper = 0.025)$value*(1/(shape-1))*(shape+(qt(p=0.025,df=shape))^2)
qplot(y = VaR975_td , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES975_td,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR975_td)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

z1_975 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR975_td,es=ES975_td)
z1_975
## [1] -0.3891991
z2_975 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR975_td,es=ES975_td,alpha=0.05)
z2_975
## [1] -0.4447671
VaR_975_his <- DoGARCH_VaR(y=forc, probability=0.025, WE=1000)
VaR_975_his <- VaR_975_his[1001:1250]
qplot(y = VaR_975_his , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_975_his)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with His approach: ' , (sum(VN30.logret[1001:1250] < VaR_975_his)) , sep = '')
## Number of exceptions with His approach: 10
VaRTest(alpha=0.025,actual=VN30.logret[1001:1250],VaR=VaR_975_his,conf.level = 0.95)
## $expected.exceed
## [1] 6
##
## $actual.exceed
## [1] 10
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 1.958063
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.1617206
##
## $uc.Decision
## [1] "Fail to Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 2.663613
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.2639999
##
## $cc.Decision
## [1] "Fail to Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_975_his)
## Accept H0 with LRstat is: 0.7107558
VaRDurTest_Haas(alpha=0.025,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_975_his,conf=0.95)
## Accept H0 with LRstat is: 15.37791
ES_975_his <- DoGARCH_ES(y=forc, probability=0.025, WE=1000)
ES_975_his <- ES_975_his[1001:1250]
qplot(y = VaR_975_his , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES_975_his,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_975_his)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

z1_his_975 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR_975_his,es=ES_975_his)
z1_his_975
## [1] 0.1437143
z2_his_975 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR_975_his,es=ES_975_his,alpha=0.05)
z2_his_975
## [1] 0.3149714
model.roll = ugarchroll(spec = model.spec , data = VN30.logret , n.start = 1000 , ,refit.window = 'moving')
VaR99_td = model.roll@forecast$density[,'Sigma']*qdist(distribution='std', shape=shape, p=0.01)
qplot(y = VaR99_td , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR99_td)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with GARCH approach: ' , (sum(VN30.logret[1001:1250] < VaR99_td)) , sep = '')
## Number of exceptions with GARCH approach: 8
VaRTest(alpha=0.01,actual=VN30.logret[1001:1250],VaR=VaR99_td,conf.level = 0.95)
## $expected.exceed
## [1] 2
##
## $actual.exceed
## [1] 8
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 7.733551
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.005420405
##
## $uc.Decision
## [1] "Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 8.264769
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.01604458
##
## $cc.Decision
## [1] "Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR99_td)
## Accept H0 with LRstat is: 0.529022
VaRDurTest_Haas(alpha=0.01,actual=as.numeric(VN30.logret[1001:1250]),var=VaR99_td,conf=0.95)
## Accept H0 with LRstat is: 12.07632
alpha=0.01
actual=as.numeric(VN30.logret[1001:1250])
var=VaR99_td
vec <- c(0)
stat <- c()
for (i in 1:length(var))
{
if (actual[i]<var[i])
{
vec <- cbind(vec,i)
}
}
for (j in 1:length(vec))
{
stat[j] <- -2*log((alpha*(1-alpha)^(vec[j+1]-vec[j]-1))/
((1/(vec[j+1]-vec[j]))*(1-(1/(vec[j+1]-vec[j])))^(vec[j+1]-vec[j]-1)))
}
z <- sum(na.omit(stat))
ES99_td = (1/(1-0.01))*model.roll@forecast$density[,'Sigma']*integrate(my_function,lower = 0.0000001,upper = 0.01)$value*(1/(shape-1))*(shape+(qt(p=0.01,df=shape))^2)
qplot(y = VaR99_td , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES99_td,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR99_td)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

z1_99 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR99_td,es=ES99_td)
z1_99
## [1] -0.3546379
z2_99<- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR99_td,es=ES99_td,alpha=0.05)
z2_99
## [1] 0.1330318
VaR_99_his <- DoGARCH_VaR(y=forc, probability=0.01, WE=1000)
VaR_99_his <- VaR_99_his[1001:1250]
qplot(y = VaR_99_his , x = 1:250 , geom = 'line') +
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_99_his)) , size = 2) + scale_color_manual(values = c('gray' , 'red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

cat('Number of exceptions with His approach: ' , (sum(VN30.logret[1001:1250] < VaR_99_his)) , sep = '')
## Number of exceptions with His approach: 2
VaRTest(alpha=0.01,actual=VN30.logret[1001:1250],VaR=VaR_99_his,conf.level = 0.95)
## $expected.exceed
## [1] 2
##
## $actual.exceed
## [1] 2
##
## $uc.H0
## [1] "Correct Exceedances"
##
## $uc.LRstat
## [1] 0.1084352
##
## $uc.critical
## [1] 3.841459
##
## $uc.LRp
## [1] 0.7419327
##
## $uc.Decision
## [1] "Fail to Reject H0"
##
## $cc.H0
## [1] "Correct Exceedances & Independent"
##
## $cc.LRstat
## [1] 0.1408242
##
## $cc.critical
## [1] 5.991465
##
## $cc.LRp
## [1] 0.9320096
##
## $cc.Decision
## [1] "Fail to Reject H0"
VaRDurTest_Chist(alpha=0.05,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_99_his)
## Accept H0 with LRstat is: 0.03225841
VaRDurTest_Haas(alpha=0.01,actual=as.numeric(VN30.logret[1001:1250]),var=VaR_99_his,conf=0.95)
## Accept H0 with LRstat is: 3.206818
ES_99_his <- DoGARCH_ES(y=forc, probability=0.01, WE=1000)
ES_99_his <- ES_99_his[1001:1250]
qplot(y = VaR_99_his , x = 1:250 , geom = 'line') +
geom_line(aes(x=1:250,y=ES_99_his,color='green'))+
geom_point(aes(x = 1:250 , y = VN30.logret[1001:1250] , color = as.factor(VN30.logret[1001:1250] < VaR_99_his)) , size = 2) + scale_color_manual(values = c('gray' , 'green','red')) +
labs(y = 'Daily Log Returns' , x = 'Test set Observation') + theme_light() +
theme(legend.position = 'none')

z1_his_99 <- as1(x=as.numeric(VN30.logret[1001:1250]),var=VaR_99_his,es=ES_99_his)
z1_his_99
## [1] 0.137491
z2_his_99 <- as2(x=as.numeric(VN30.logret[1001:1250]),var=VaR_99_his,es=ES_99_his,alpha=0.01)
z2_his_99
## [1] 0.3099928