# =========================================================
# EGARCH(1,1)-X + Mean–VaR Optimization with DEoptim
# Extended: Simulasi Max Risk
# =========================================================
library(readxl)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(stringr)
library(lubridate)
##
## Attaching package: 'lubridate'
## The following objects are masked from 'package:base':
##
## date, intersect, setdiff, union
library(PerformanceAnalytics)
## Loading required package: xts
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
##
## ######################### Warning from 'xts' package ##########################
## # #
## # The dplyr lag() function breaks how base R's lag() function is supposed to #
## # work, which breaks lag(my_xts). Calls to lag(my_xts) that you type or #
## # source() into this session won't work correctly. #
## # #
## # Use stats::lag() to make sure you're not using dplyr::lag(), or you can add #
## # conflictRules('dplyr', exclude = 'lag') to your .Rprofile to stop #
## # dplyr from breaking base R's lag() function. #
## # #
## # Code in packages is not affected. It's protected by R's namespace mechanism #
## # Set `options(xts.warn_dplyr_breaks_lag = FALSE)` to suppress this warning. #
## # #
## ###############################################################################
##
## Attaching package: 'xts'
## The following objects are masked from 'package:dplyr':
##
## first, last
##
## Attaching package: 'PerformanceAnalytics'
## The following object is masked from 'package:graphics':
##
## legend
library(forecast)
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(tseries)
library(car)
## Loading required package: carData
##
## Attaching package: 'car'
## The following object is masked from 'package:dplyr':
##
## recode
library(lmtest)
library(PortfolioAnalytics)
## Loading required package: foreach
## Registered S3 method overwritten by 'PortfolioAnalytics':
## method from
## print.constraint ROI
library(DEoptim)
## Loading required package: parallel
##
## DEoptim package
## Differential Evolution algorithm in R
## Authors: D. Ardia, K. Mullen, B. Peterson and J. Ulrich
library(reshape2)
library(ggplot2)
library(quadprog)
library(quantmod)
## Loading required package: TTR
library(rugarch) # Untuk GARCH-X
library(gridExtra)
##
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
##
## combine
library(tidyr)
##
## Attaching package: 'tidyr'
## The following object is masked from 'package:reshape2':
##
## smiths
library(tibble)
# 1. Data
# Import BI Rate data
birate <- read_excel("Database/bi_rate.xlsx")
colnames(birate) <- c("date","bi_rate")
head(birate); tail(birate)
## # A tibble: 6 × 2
## date bi_rate
## <chr> <chr>
## 1 18 Desember 2024 6.00
## 2 20 November 2024 6.00
## 3 16 Oktober 2024 6.00
## 4 18 September 2024 6.00
## 5 21 Agustus 2024 6.25
## 6 17 Juli 2024 6.25
## # A tibble: 6 × 2
## date bi_rate
## <chr> <chr>
## 1 22 September 2016 5.00
## 2 19 Agustus 2016 5.25
## 3 21 Juli 2016 5.25
## 4 16 Juni 2016 5.25
## 5 19 Mei 2016 5.50
## 6 21 April 2016 5.50
Sys.setlocale("LC_TIME", "id_ID.UTF-8")
## [1] "id_ID.UTF-8"
birate <- birate %>%
mutate(date = dmy(date))
birate$bi_rate <- as.numeric(birate$bi_rate)
birate <- birate[order(birate$date), ]
head(birate)
## # A tibble: 6 × 2
## date bi_rate
## <date> <dbl>
## 1 2016-04-21 5.5
## 2 2016-05-19 5.5
## 3 2016-06-16 5.25
## 4 2016-07-21 5.25
## 5 2016-08-19 5.25
## 6 2016-09-22 5
# Import 10-year bond data
bond10 <- read.csv("Database/bond10 new.csv")
bond10 <- bond10[, c("Date", "Price")]
colnames(bond10) <- c("date", "bond10")
bond10$date <- ymd(as.Date(bond10$date))
str(bond10)
## 'data.frame': 234 obs. of 2 variables:
## $ date : Date, format: "2025-04-30" "2025-03-27" ...
## $ bond10: num 99.1 97.4 98.7 98.4 97.3 ...
# Filter data antara 2014-01-01 dan 2024-12-31
bond10 <- bond10 %>%
filter(date >= as.Date("2014-01-01") & date <= as.Date("2024-12-31"))
bond10 <- bond10[order(bond10$date), ]
head(bond10)
## date bond10
## 132 2014-01-30 96.883
## 131 2014-02-28 99.809
## 130 2014-03-28 102.600
## 129 2014-04-30 102.700
## 128 2014-05-30 102.150
## 127 2014-06-30 100.800
# Import Bitcoin data
btc_usd <- read.csv("Database/btc_usd.csv")
btc_usd <- btc_usd[, c("Tanggal", "Terakhir")]
colnames(btc_usd) <- c("date", "btc")
btc_usd <- btc_usd %>%
mutate(date = dmy(date))
btc_usd <- btc_usd[order(btc_usd$date), ]
btc_usd <- btc_usd %>%
mutate(btc = as.numeric(gsub(",", "", btc)))
head(btc_usd)
## date btc
## 4015 2014-01-01 7403
## 4014 2014-01-02 7750
## 4013 2014-01-03 8121
## 4012 2014-01-04 8018
## 4011 2014-01-05 9040
## 4010 2014-01-06 9345
# Import CPI data
cpi <- read_excel("Database/inflation.xlsx")
colnames(cpi) <- c("date", "cpi")
bulan_id <- c("Januari","Februari","Maret","April","Mei","Juni",
"Juli","Agustus","September","Oktober","November","Desember")
bulan_en <- c("January","February","March","April","May","June",
"July","August","September","October","November","December")
cpi <- cpi %>%
mutate(date = str_replace_all(date, setNames(bulan_en, bulan_id)),
date = parse_date_time(date, orders = "my"),
date = as.Date(date))
cpi <- cpi[order(cpi$date), ]
cpi$cpi <- as.numeric(cpi$cpi)
head(cpi)
## # A tibble: 6 × 2
## date cpi
## <date> <dbl>
## 1 2014-01-01 8.22
## 2 2014-02-01 7.75
## 3 2014-03-01 7.32
## 4 2014-04-01 7.25
## 5 2014-05-01 7.32
## 6 2014-06-01 6.7
# Import JKSE data
jkse <- read.csv("Database/jkse.csv")
jkse <- jkse[, c("Tanggal", "Terakhir")]
colnames(jkse) <- c("date", "jkse")
jkse <- jkse %>%
mutate(date = dmy(date))
jkse <- jkse[order(jkse$date), ]
jkse <- jkse %>%
mutate(
jkse = str_replace_all(jkse, "\\.", ""), # hapus titik (pemisah ribuan)
jkse = str_replace_all(jkse, ",", "."), # ubah koma jadi titik (desimal)
jkse = as.numeric(jkse) # ubah jadi numeric
)
head(jkse)
## date jkse
## 2681 2014-01-01 4274.18
## 2680 2014-01-02 4327.27
## 2679 2014-01-03 4257.66
## 2678 2014-01-06 4202.81
## 2677 2014-01-07 4175.81
## 2676 2014-01-08 4200.59
# Import IDR exchange rate data
kurs_idr <- read.csv("Database/idr_kurs.csv")
kurs_idr <- kurs_idr[, c("Tanggal", "Terakhir")]
colnames(kurs_idr) <- c("date", "kurs_idr")
kurs_idr <- kurs_idr %>%
mutate(date = dmy(date))
kurs_idr <- kurs_idr[order(kurs_idr$date), ]
kurs_idr <- kurs_idr %>%
mutate(
kurs_idr = str_replace_all(kurs_idr, "\\.", ""), # hapus titik (pemisah ribuan)
kurs_idr = str_replace_all(kurs_idr, ",", "."), # ubah koma jadi titik (desimal)
kurs_idr = as.numeric(kurs_idr) # ubah jadi numeric
)
head(kurs_idr)
## date kurs_idr
## 2815 2014-01-01 12170.0
## 2814 2014-01-02 12160.0
## 2813 2014-01-03 12170.0
## 2812 2014-01-06 12180.0
## 2811 2014-01-07 12237.5
## 2810 2014-01-08 12235.0
# Gold data
gold <- read.csv("Database/gold.csv")
gold <- gold[, c("Tanggal", "Terakhir")]
colnames(gold) <- c("date", "gold")
gold <- gold %>%
mutate(date = dmy(date))
gold <- gold[order(gold$date), ]
gold <- gold %>%
mutate(
gold = str_replace_all(gold, "\\.", ""), # hapus titik (pemisah ribuan)
gold = str_replace_all(gold, ",", "."), # ubah koma jadi titik (desimal)
gold = as.numeric(gold) # ubah jadi numeric
)
head(gold)
## date gold
## 2816 2014-01-02 1225.2
## 2815 2014-01-03 1238.6
## 2814 2014-01-06 1238.0
## 2813 2014-01-07 1229.6
## 2812 2014-01-08 1225.5
## 2811 2014-01-09 1229.4
## Monthly Data Aggregation and Preprocessing
# Convert data to monthly format (for those that aren't already monthly)
birate_monthly <- birate %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(bi_rate = mean(bi_rate, na.rm = TRUE)) %>%
rename(date = year_month)
bond10_monthly <- bond10 %>%
mutate(date = floor_date(date, "month"))
btc_monthly <- btc_usd %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(btc = mean(btc, na.rm = TRUE)) %>%
rename(date = year_month)
jkse_monthly <- jkse %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(jkse = mean(jkse, na.rm = TRUE)) %>%
rename(date = year_month)
kurs_idr_monthly <- kurs_idr %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(kurs_idr = mean(kurs_idr, na.rm = TRUE)) %>%
rename(date = year_month)
gold_monthly <- gold %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(gold = mean(gold, na.rm = TRUE)) %>%
rename(date = year_month)
cpi_monthly <- cpi %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(cpi = mean(cpi, na.rm = TRUE)) %>%
rename(date = year_month)
# Merge all datasets
merged_data <- birate_monthly %>%
full_join(bond10_monthly, by = "date") %>%
full_join(btc_monthly, by = "date") %>%
full_join(cpi_monthly, by = "date") %>%
full_join(jkse_monthly, by = "date") %>%
full_join(kurs_idr_monthly, by = "date") %>%
full_join(gold_monthly, by = "date")
# Sort by date
merged_data <- merged_data %>% arrange(date)
# Check the result
head(merged_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2014-01-01 NA 96.9 8194. 8.22 4350. 12158. 1244.
## 2 2014-02-01 NA 99.8 6605. 7.75 4515. 11918. 1301.
## 3 2014-03-01 NA 103. 5929. 7.32 4720. 11416. 1337.
## 4 2014-04-01 NA 103. 4620. 7.25 4871. 11431. 1299.
## 5 2014-05-01 NA 102. 4829. 7.32 4925. 11536. 1288.
## 6 2014-06-01 NA 101. 6179. 6.7 4898. 11892. 1283.
tail(merged_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2024-07-01 6.25 97.9 62.9 2.13 7258. 16238. 2398.
## 2 2024-08-01 6.25 100. 60.0 2.12 7417. 15735. 2474.
## 3 2024-09-01 6 101. 60.5 1.84 7740. 15318. 2579.
## 4 2024-10-01 6 98.6 65.7 1.71 7621. 15558. 2695.
## 5 2024-11-01 6 98.1 86.5 1.55 7269. 15813. 2656.
## 6 2024-12-01 6 97.3 98.3 1.57 7216. 16036. 2650.
# Check for missing values
colSums(is.na(merged_data))
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## 0 27 0 0 0 0 0 0
# Data with non NA values
non_na_data <- merged_data %>%
filter(complete.cases(.))
# Check the non NA data
head(non_na_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2016-04-01 5.5 105. 4352. 3.6 4853. 13172. 1244.
## 2 2016-05-01 5.5 104. 4617. 3.33 4770. 13417. 1261.
## 3 2016-06-01 5.25 106. 6445. 3.45 4871. 13338. 1279.
## 4 2016-07-01 5.25 110. 6616. 3.21 5166. 13114. 1339.
## 5 2016-08-01 5.25 109. 5858. 2.79 5401. 13160. 1344.
## 6 2016-09-01 5 110. 6083. 3.07 5337. 13110. 1330.
tail(non_na_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2024-07-01 6.25 97.9 62.9 2.13 7258. 16238. 2398.
## 2 2024-08-01 6.25 100. 60.0 2.12 7417. 15735. 2474.
## 3 2024-09-01 6 101. 60.5 1.84 7740. 15318. 2579.
## 4 2024-10-01 6 98.6 65.7 1.71 7621. 15558. 2695.
## 5 2024-11-01 6 98.1 86.5 1.55 7269. 15813. 2656.
## 6 2024-12-01 6 97.3 98.3 1.57 7216. 16036. 2650.
nrow(non_na_data)
## [1] 105
# Convert BTC to IDR
btc_idr <- non_na_data %>%
mutate(btc_idr = btc * kurs_idr) %>%
dplyr::select(date, btc_idr)
# Convert Gold to IDR
gold_idr <- non_na_data %>%
mutate(gold_idr = gold * kurs_idr) %>%
dplyr::select(date, gold_idr)
# Add BTC_IDR and Gold_IDR to the dataset
non_na_data <- non_na_data %>%
left_join(btc_idr, by = "date")
non_na_data <- non_na_data %>%
left_join(gold_idr, by = "date")
# Select final set of variables
non_na_data <- non_na_data %>%
dplyr::select(date, bi_rate, bond10, btc_idr, cpi, jkse, kurs_idr, gold_idr)
# Display the final dataset
head(non_na_data); tail(non_na_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc_idr cpi jkse kurs_idr gold_idr
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2016-04-01 5.5 105. 57318032. 3.6 4853. 13172. 16383543.
## 2 2016-05-01 5.5 104. 61939403. 3.33 4770. 13417. 16912360.
## 3 2016-06-01 5.25 106. 85965931. 3.45 4871. 13338. 17061819.
## 4 2016-07-01 5.25 110. 86769238. 3.21 5166. 13114. 17561010.
## 5 2016-08-01 5.25 109. 77097435. 2.79 5401. 13160. 17693830.
## 6 2016-09-01 5 110. 79744103. 3.07 5337. 13110. 17430969.
## # A tibble: 6 × 8
## date bi_rate bond10 btc_idr cpi jkse kurs_idr gold_idr
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2024-07-01 6.25 97.9 1021903. 2.13 7258. 16238. 38941212.
## 2 2024-08-01 6.25 100. 944448. 2.12 7417. 15735. 38926254.
## 3 2024-09-01 6 101. 926531. 1.84 7740. 15318. 39502303.
## 4 2024-10-01 6 98.6 1021507. 1.71 7621. 15558. 41930732.
## 5 2024-11-01 6 98.1 1368402. 1.55 7269. 15813. 42000372.
## 6 2024-12-01 6 97.3 1576530. 1.57 7216. 16036. 42502040.
# Calculate returns and changes for each variable
non_na_data$bi_rate <- as.numeric(non_na_data$bi_rate)
apt_data <- non_na_data %>%
arrange(date) %>%
mutate(
jkse_return = c(NA, diff(log(jkse))),
bond_return = c(NA, diff(log(bond10))),
btc_return = c(NA, diff(log(btc_idr))),
gold_return = c(NA, diff(log(gold_idr))), # Add gold returns calculation
birate_change = c(NA, diff(bi_rate)),
cpi_change = c(NA, diff(cpi) / cpi[-length(cpi)]),
kurs_change = c(NA, diff(kurs_idr) / kurs_idr[-length(kurs_idr)])
) %>%
na.omit()
# Create risk-free rate variable from BI Rate
apt_data$rf_rate <- apt_data$bi_rate / 12 / 100 # Convert annual BI Rate to monthly
# Calculate excess returns
apt_data$excess_jkse <- apt_data$jkse_return - apt_data$rf_rate
apt_data$excess_bond <- apt_data$bond_return - apt_data$rf_rate
apt_data$excess_btc <- apt_data$btc_return - apt_data$rf_rate
apt_data$excess_gold <- apt_data$gold_return - apt_data$rf_rate # Add excess returns for gold
# Check the data
head(apt_data)
## # A tibble: 6 × 20
## date bi_rate bond10 btc_idr cpi jkse kurs_idr gold_idr jkse_return
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2016-05-01 5.5 104. 61939403. 3.33 4770. 13417. 16912360. -0.0173
## 2 2016-06-01 5.25 106. 85965931. 3.45 4871. 13338. 17061819. 0.0210
## 3 2016-07-01 5.25 110. 86769238. 3.21 5166. 13114. 17561010. 0.0589
## 4 2016-08-01 5.25 109. 77097435. 2.79 5401. 13160. 17693830. 0.0445
## 5 2016-09-01 5 110. 79744103. 3.07 5337. 13110. 17430969. -0.0120
## 6 2016-10-01 4.75 109. 83963283. 3.31 5406. 13018. 16481679. 0.0128
## # ℹ 11 more variables: bond_return <dbl>, btc_return <dbl>, gold_return <dbl>,
## # birate_change <dbl>, cpi_change <dbl>, kurs_change <dbl>, rf_rate <dbl>,
## # excess_jkse <dbl>, excess_bond <dbl>, excess_btc <dbl>, excess_gold <dbl>
summary(apt_data)
## date bi_rate bond10 btc_idr
## Min. :2016-05-01 Min. :3.500 Min. : 83.80 Min. : 16779
## 1st Qu.:2018-06-23 1st Qu.:4.188 1st Qu.: 97.23 1st Qu.: 118135
## Median :2020-08-16 Median :4.750 Median :100.53 Median : 342069
## Mean :2020-08-15 Mean :4.886 Mean : 99.82 Mean : 8297702
## 3rd Qu.:2022-10-08 3rd Qu.:5.750 3rd Qu.:102.90 3rd Qu.: 724748
## Max. :2024-12-01 Max. :6.250 Max. :110.16 Max. :110728567
## cpi jkse kurs_idr gold_idr
## Min. :1.320 Min. :4599 Min. :13018 Min. :15468115
## 1st Qu.:2.167 1st Qu.:5853 1st Qu.:13954 1st Qu.:18062115
## Median :3.060 Median :6237 Median :14345 Median :25328121
## Mean :2.997 Mean :6248 Mean :14459 Mean :24420591
## 3rd Qu.:3.490 3rd Qu.:6843 3rd Qu.:15057 3rd Qu.:28087431
## Max. :5.950 Max. :7740 Max. :16335 Max. :42502040
## jkse_return bond_return btc_return
## Min. :-0.201493 Min. :-0.0828848 Min. :-6.89749
## 1st Qu.:-0.012833 1st Qu.:-0.0143272 1st Qu.:-0.06265
## Median : 0.008020 Median :-0.0021093 Median : 0.02405
## Mean : 0.003815 Mean :-0.0007608 Mean :-0.03455
## 3rd Qu.: 0.020662 3rd Qu.: 0.0113273 3rd Qu.: 0.14451
## Max. : 0.086305 Max. : 0.1371552 Max. : 0.65514
## gold_return birate_change cpi_change
## Min. :-0.062174 Min. :-0.250000 Min. :-0.302752
## 1st Qu.:-0.009984 1st Qu.: 0.000000 1st Qu.:-0.077589
## Median : 0.007048 Median : 0.000000 Median :-0.013377
## Mean : 0.009166 Mean : 0.004808 Mean :-0.002132
## 3rd Qu.: 0.026680 3rd Qu.: 0.000000 3rd Qu.: 0.068724
## Max. : 0.100150 Max. : 0.625000 Max. : 0.314394
## kurs_change rf_rate excess_jkse
## Min. :-0.054480 Min. :0.002917 Min. :-0.2052429
## 1st Qu.:-0.006061 1st Qu.:0.003490 1st Qu.:-0.0176251
## Median : 0.001096 Median :0.003958 Median : 0.0038237
## Mean : 0.002065 Mean :0.004072 Mean :-0.0002566
## 3rd Qu.: 0.011865 3rd Qu.:0.004792 3rd Qu.: 0.0164970
## Max. : 0.102563 Max. :0.005208 Max. : 0.0831804
## excess_bond excess_btc excess_gold
## Min. :-0.086635 Min. :-6.90145 Min. :-0.066132
## 1st Qu.:-0.017855 1st Qu.:-0.06723 1st Qu.:-0.013795
## Median :-0.005789 Median : 0.01946 Median : 0.001944
## Mean :-0.004832 Mean :-0.03862 Mean : 0.005095
## 3rd Qu.: 0.008181 3rd Qu.: 0.14029 3rd Qu.: 0.022617
## Max. : 0.132155 Max. : 0.65159 Max. : 0.094941
min(apt_data$date)
## [1] "2016-05-01"
max(apt_data$date)
## [1] "2024-12-01"
#Figure 1. Monthly Closing Price of Assets
# Prepare data for plotting
price_data <- non_na_data %>%
dplyr::select(date, jkse, bond10, btc_idr, gold_idr) %>%
mutate(
log_jkse = log(jkse),
log_bond10 = log(bond10),
log_btc_idr = log(btc_idr),
log_gold = log(gold_idr)
) %>%
dplyr::select(date, log_jkse, log_bond10, log_btc_idr, log_gold)
# Reshape for ggplot
price_long <- melt(price_data, id.vars = "date", variable.name = "Asset", value.name = "LogPrice")
asset_labels <- c(
log_jkse = "JKSE",
log_bond10 = "Bond 10Y",
log_btc_idr = "Bitcoin (IDR)",
log_gold = "Gold"
)
price_long$Asset <- factor(price_long$Asset, levels = names(asset_labels), labels = asset_labels)
# Plot
ggplot(price_long, aes(x = date, y = LogPrice, color = Asset)) +
geom_line(linewidth = 1) +
labs(title = "Monthly Log Closing Price of Assets",
x = "Date", y = "Log Price",
color = "Asset") +
theme_minimal() +
theme(legend.position = "bottom")

# 2. Hitung Surprise Faktor (kurs, cpi, birate)
compute_surprise_ses <- function(series, series_name = NULL) {
m <- forecast::ses(series, h = 1)
res_raw <- as.numeric(residuals(m))
fit_val <- as.numeric(fitted(m))
if (!is.null(series_name) && tolower(series_name) %in% c("cpi", "kurs_idr")) {
res_adj <- res_raw / as.numeric(series)
} else {
res_adj <- res_raw
}
list(
res = res_adj,
fit = fit_val
)
}
apt_data <- apt_data %>%
mutate(
surprise_kurs = compute_surprise_ses(kurs_idr)$res,
surprise_cpi = compute_surprise_ses(cpi)$res,
surprise_birate = compute_surprise_ses(bi_rate)$res,
birate_forecast = compute_surprise_ses(bi_rate)$fit,
cpi_forecast = compute_surprise_ses(cpi)$fit,
kurs_forecast= compute_surprise_ses(kurs_idr)$fit
) %>%
drop_na()
apt_data <- na.omit(apt_data)
# Rate plot
p1 <- ggplot(apt_data, aes(x = 1:nrow(apt_data))) +
geom_line(aes(y = bi_rate, color = "Actual"), linewidth = 1) +
geom_line(aes(y = birate_forecast, color = "Forecast"), size = 1) +
labs(title = "BI Rate: Actual vs Forecast", x = "Time", y = "Rate") +
scale_color_manual(values = c("blue", "red")) +
theme_minimal()
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
# CPI plot
p2 <- ggplot(apt_data, aes(x = 1:nrow(apt_data))) +
geom_line(aes(y = cpi, color = "Actual"), size = 1) +
geom_line(aes(y = cpi_forecast, color = "Forecast"), size = 1) +
labs(title = "CPI: Actual vs Forecast", x = "Time", y = "Rate") +
scale_color_manual(values = c("blue", "red")) +
theme_minimal()
# Exchange Rate plot
p3 <- ggplot(apt_data, aes(x = 1:nrow(apt_data))) +
geom_line(aes(y = kurs_idr, color = "Actual"), size = 1) +
geom_line(aes(y = kurs_forecast, color = "Forecast"), size = 1) +
labs(title = "Exchange Rate: Actual vs Forecast", x = "Time", y = "IDR/USD") +
scale_color_manual(values = c("blue", "red")) +
theme_minimal()
# Show plots
library(gridExtra)
grid.arrange(p1, p2, p3, ncol = 1)

# desctiptive table exxess for macroeconomic variables
library(psych)
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
## The following object is masked from 'package:car':
##
## logit
macro_vars <- apt_data %>%
dplyr::select(surprise_birate, surprise_cpi, surprise_kurs
)
describe(macro_vars)
## vars n mean sd median trimmed mad min max
## surprise_birate 1 104 0.00 0.16 0.00 -0.01 0.00 -0.25 0.63
## surprise_cpi 2 104 -0.02 0.33 -0.03 -0.03 0.31 -0.99 1.26
## surprise_kurs 3 104 26.51 272.41 15.68 32.73 190.96 -855.85 1412.00
## range skew kurtosis se
## surprise_birate 0.88 1.10 2.86 0.02
## surprise_cpi 2.25 0.59 1.81 0.03
## surprise_kurs 2267.85 0.61 6.23 26.71
summary(macro_vars)
## surprise_birate surprise_cpi surprise_kurs
## Min. :-0.250025 Min. :-0.98998 Min. :-855.85
## 1st Qu.: 0.000000 1st Qu.:-0.24248 1st Qu.: -92.02
## Median : 0.000000 Median :-0.02996 Median : 15.68
## Mean : 0.004815 Mean :-0.01692 Mean : 26.51
## 3rd Qu.: 0.000000 3rd Qu.: 0.17250 3rd Qu.: 159.20
## Max. : 0.625038 Max. : 1.25997 Max. :1412.00
# Uji Stasioneritas
# Daftar variabel yang akan diuji stasioneritasnya
variables_to_test <- c(
"excess_jkse", "excess_bond", "excess_btc", "excess_gold",
"surprise_birate", "surprise_cpi", "surprise_kurs"
)
# Lakukan PP test untuk setiap variabel
stationarity_results <- lapply(variables_to_test, function(var_name) {
if (var_name %in% colnames(apt_data)) {
x <- apt_data[[var_name]]
x <- as.numeric(x)
x <- na.omit(x)
# Jalankan pp.test dengan tryCatch untuk hindari error
test_result <- tryCatch(
pp.test(x, alternative = "stationary"),
error = function(e) NULL
)
if (!is.null(test_result)) {
data.frame(
Variable = var_name,
PP_Statistic = test_result$statistic,
P_Value = test_result$p.value,
Stationary = ifelse(test_result$p.value < 0.05, "Yes", "No")
)
} else {
data.frame(
Variable = var_name,
PP_Statistic = NA,
P_Value = NA,
Stationary = "Error or non-numeric data"
)
}
} else {
data.frame(
Variable = var_name,
PP_Statistic = NA,
P_Value = NA,
Stationary = "Variable not found"
)
}
})
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
# Gabungkan hasil
stationarity_summary <- do.call(rbind, stationarity_results)
print(stationarity_summary)
## Variable PP_Statistic P_Value Stationary
## Dickey-Fuller Z(alpha) excess_jkse -65.85094 0.01 Yes
## Dickey-Fuller Z(alpha)1 excess_bond -102.95927 0.01 Yes
## Dickey-Fuller Z(alpha)2 excess_btc -106.53823 0.01 Yes
## Dickey-Fuller Z(alpha)3 excess_gold -72.91623 0.01 Yes
## Dickey-Fuller Z(alpha)4 surprise_birate -48.99106 0.01 Yes
## Dickey-Fuller Z(alpha)5 surprise_cpi -111.55318 0.01 Yes
## Dickey-Fuller Z(alpha)6 surprise_kurs -61.99687 0.01 Yes
# Interpretasi:
# P-value < 0.05 menunjukkan bahwa kita menolak hipotesis nol (data tidak stasioner)
# dan menyimpulkan bahwa data stasioner.
# 3. Fungsi fit EGARCH(1,1)-X dengan aturan distribusi
fit_egarchx <- function(returns, exog_matrix = NULL, asset_name = NULL) {
dist_model <- ifelse(tolower(asset_name) == "gold_return", "norm", "sstd")
# Spesifikasi model EGARCH
spec <- ugarchspec(
variance.model = list(
model = "eGARCH",
garchOrder = c(1, 1),
external.regressors = exog_matrix
),
mean.model = list(
armaOrder = c(1, 0),
include.mean = TRUE,
external.regressors = exog_matrix
),
distribution.model = dist_model
)
# Estimasi model
tryCatch(
ugarchfit(
spec,
data = returns,
solver = "hybrid",
fit.control = list(scale = TRUE)
),
error = function(e) {
message("ugarchfit error on ", asset_name, ": ", conditionMessage(e))
return(NULL)
}
)
}
# 4. Fit tiap aset
assets <- c("gold_return", "btc_return", "jkse_return", "bond_return")
# Di sini: mxreg1 = surprise_birate, mxreg2 = surprise_kurs, mxreg3 = surprise_cpi
exog_mat <- as.matrix(
apt_data %>% dplyr::select(surprise_birate, surprise_kurs, surprise_cpi)
)
# Fit model untuk tiap aset
fits <- lapply(assets, function(a) {
fit_egarchx(apt_data[[a]], exog_mat, asset_name = a)
})
names(fits) <- assets
print(fits)
## $gold_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : norm
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu 0.006399 0.000000 27716.6 0
## ar1 0.331473 0.000141 2358.0 0
## mxreg1 0.039969 0.000016 2560.1 0
## mxreg2 0.000044 0.000000 4255.6 0
## mxreg3 -0.006859 0.000001 -7865.3 0
## omega -1.453529 0.000167 -8723.4 0
## alpha1 0.178093 0.000141 1266.7 0
## beta1 0.795904 0.000013 59212.5 0
## gamma1 -0.634272 0.000095 -6650.2 0
## vxreg1 -0.375513 0.000064 -5868.3 0
## vxreg2 0.000234 0.000000 22535.0 0
## vxreg3 0.394621 0.000237 1667.2 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.006399 0.000015 427.80 0
## ar1 0.331473 0.000491 675.53 0
## mxreg1 0.039969 0.000051 788.33 0
## mxreg2 0.000044 0.000000 436.36 0
## mxreg3 -0.006859 0.000009 -791.87 0
## omega -1.453529 0.002182 -666.28 0
## alpha1 0.178093 0.000158 1124.64 0
## beta1 0.795904 0.000270 2951.85 0
## gamma1 -0.634272 0.001158 -547.66 0
## vxreg1 -0.375513 0.000351 -1068.90 0
## vxreg2 0.000234 0.000000 816.43 0
## vxreg3 0.394621 0.000589 669.86 0
##
## LogLikelihood : 232.0345
##
## Information Criteria
## ------------------------------------
##
## Akaike -4.2314
## Bayes -3.9263
## Shibata -4.2546
## Hannan-Quinn -4.1078
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.0491 0.8246
## Lag[2*(p+q)+(p+q)-1][2] 1.1100 0.6725
## Lag[4*(p+q)+(p+q)-1][5] 2.2759 0.6311
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.006987 0.9334
## Lag[2*(p+q)+(p+q)-1][5] 1.213559 0.8097
## Lag[4*(p+q)+(p+q)-1][9] 2.706393 0.8063
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.6988 0.500 2.000 0.4032
## ARCH Lag[5] 1.8983 1.440 1.667 0.4942
## ARCH Lag[7] 2.9880 2.315 1.543 0.5158
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: 4.8219
## Individual Statistics:
## mu 0.02004
## ar1 0.02119
## mxreg1 0.02111
## mxreg2 0.02031
## mxreg3 0.01990
## omega 0.01946
## alpha1 0.02151
## beta1 0.26484
## gamma1 0.02178
## vxreg1 0.01983
## vxreg2 0.02036
## vxreg3 0.01849
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 2.69 2.96 3.51
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 0.24097 0.8101
## Negative Sign Bias 0.21700 0.8287
## Positive Sign Bias 0.20758 0.8360
## Joint Effect 0.09025 0.9930
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 15.62 0.6828
## 2 30 35.04 0.2033
## 3 40 41.38 0.3670
## 4 50 53.69 0.2993
##
##
## Elapsed time : 0.4381549
##
##
## $btc_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : sstd
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu -0.055685 0.000021 -2687.307 0
## ar1 0.387728 0.000207 1872.133 0
## mxreg1 -0.049824 0.002489 -20.022 0
## mxreg2 -0.000053 0.000000 -140.217 0
## mxreg3 -0.048510 0.002947 -16.459 0
## omega -0.124739 0.000053 -2367.664 0
## alpha1 -1.376329 0.001017 -1353.303 0
## beta1 0.903576 0.000430 2102.230 0
## gamma1 -1.046768 0.000162 -6453.323 0
## vxreg1 -0.299846 0.001880 -159.516 0
## vxreg2 -0.000408 0.000000 -5328.537 0
## vxreg3 -0.388068 0.000193 -2012.231 0
## skew 0.664115 0.000236 2816.547 0
## shape 2.159145 0.002131 1013.358 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu -0.055685 0.000155 -358.87677 0.00000
## ar1 0.387728 0.000021 18233.86133 0.00000
## mxreg1 -0.049824 0.065295 -0.76306 0.44543
## mxreg2 -0.000053 0.000001 -41.01746 0.00000
## mxreg3 -0.048510 0.069130 -0.70171 0.48286
## omega -0.124739 0.000177 -706.52252 0.00000
## alpha1 -1.376329 0.030077 -45.75985 0.00000
## beta1 0.903576 0.003126 289.07837 0.00000
## gamma1 -1.046768 0.002406 -435.11283 0.00000
## vxreg1 -0.299846 0.007840 -38.24497 0.00000
## vxreg2 -0.000408 0.000003 -157.07293 0.00000
## vxreg3 -0.388068 0.002742 -141.55075 0.00000
## skew 0.664115 0.002082 318.99749 0.00000
## shape 2.159145 0.064727 33.35748 0.00000
##
## LogLikelihood : 25.28327
##
## Information Criteria
## ------------------------------------
##
## Akaike -0.21699
## Bayes 0.13899
## Shibata -0.24781
## Hannan-Quinn -0.07277
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 1.355 0.2445
## Lag[2*(p+q)+(p+q)-1][2] 2.749 0.0573
## Lag[4*(p+q)+(p+q)-1][5] 3.801 0.2589
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.0009317 0.9756
## Lag[2*(p+q)+(p+q)-1][5] 0.3377119 0.9796
## Lag[4*(p+q)+(p+q)-1][9] 0.4248927 0.9991
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.02562 0.500 2.000 0.8728
## ARCH Lag[5] 0.05894 1.440 1.667 0.9938
## ARCH Lag[7] 0.08832 2.315 1.543 0.9995
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: 3.0314
## Individual Statistics:
## mu 0.01597
## ar1 0.01634
## mxreg1 0.01554
## mxreg2 0.01624
## mxreg3 0.01709
## omega 0.01595
## alpha1 0.01601
## beta1 0.01603
## gamma1 0.01602
## vxreg1 0.01616
## vxreg2 0.01602
## vxreg3 0.01588
## skew 0.01591
## shape 0.01595
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 3.08 3.34 3.9
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 0.5775 0.5649
## Negative Sign Bias 0.1306 0.8963
## Positive Sign Bias 0.1892 0.8503
## Joint Effect 0.4683 0.9258
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 14.46 0.7562
## 2 30 33.31 0.2655
## 3 40 34.46 0.6769
## 4 50 56.58 0.2131
##
##
## Elapsed time : 1.635065
##
##
## $jkse_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : sstd
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu 0.000410 0.000000 5804.068 0
## ar1 0.245304 0.000082 2988.312 0
## mxreg1 0.007651 0.000045 170.836 0
## mxreg2 -0.000071 0.000000 -1474.595 0
## mxreg3 -0.015171 0.000008 -1927.433 0
## omega -0.263280 0.000108 -2446.027 0
## alpha1 -0.277770 0.000068 -4088.559 0
## beta1 0.962923 0.000381 2527.223 0
## gamma1 -0.286556 0.000101 -2833.834 0
## vxreg1 -0.450730 0.000142 -3171.449 0
## vxreg2 0.000380 0.000003 135.308 0
## vxreg3 0.222168 0.001988 111.732 0
## skew 0.553738 0.006930 79.903 0
## shape 50.730228 0.453655 111.826 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.000410 0.000002 258.7109 0
## ar1 0.245304 0.004700 52.1924 0
## mxreg1 0.007651 0.000311 24.5957 0
## mxreg2 -0.000071 0.000008 -8.7675 0
## mxreg3 -0.015171 0.000026 -577.1976 0
## omega -0.263280 0.014879 -17.6942 0
## alpha1 -0.277770 0.028138 -9.8717 0
## beta1 0.962923 0.056291 17.1061 0
## gamma1 -0.286556 0.007482 -38.3007 0
## vxreg1 -0.450730 0.051458 -8.7592 0
## vxreg2 0.000380 0.000028 13.5475 0
## vxreg3 0.222168 0.013402 16.5774 0
## skew 0.553738 0.070485 7.8561 0
## shape 50.730228 7.266975 6.9809 0
##
## LogLikelihood : 249.2664
##
## Information Criteria
## ------------------------------------
##
## Akaike -4.5244
## Bayes -4.1684
## Shibata -4.5552
## Hannan-Quinn -4.3801
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.3711 0.5424
## Lag[2*(p+q)+(p+q)-1][2] 0.5874 0.9398
## Lag[4*(p+q)+(p+q)-1][5] 1.3199 0.8881
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 2.069 0.1503
## Lag[2*(p+q)+(p+q)-1][5] 2.703 0.4641
## Lag[4*(p+q)+(p+q)-1][9] 3.636 0.6510
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.5574 0.500 2.000 0.4553
## ARCH Lag[5] 1.1995 1.440 1.667 0.6748
## ARCH Lag[7] 1.7317 2.315 1.543 0.7739
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: NaN
## Individual Statistics:
## mu 0.03691
## ar1 0.03696
## mxreg1 0.03711
## mxreg2 0.03743
## mxreg3 0.03742
## omega 0.03690
## alpha1 0.03707
## beta1 NaN
## gamma1 0.03635
## vxreg1 0.03697
## vxreg2 0.03765
## vxreg3 0.03608
## skew 0.03200
## shape 0.03635
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 3.08 3.34 3.9
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 1.8030 0.07442 *
## Negative Sign Bias 0.0121 0.99037
## Positive Sign Bias 1.7395 0.08505 *
## Joint Effect 4.4766 0.21439
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 19.46 0.4276
## 2 30 35.62 0.1850
## 3 40 37.54 0.5366
## 4 50 57.54 0.1885
##
##
## Elapsed time : 1.331631
##
##
## $bond_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : sstd
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu 0.001902 0.000001 3573.024 0
## ar1 -0.020583 0.000005 -4467.015 0
## mxreg1 -0.000399 0.000014 -28.983 0
## mxreg2 -0.000059 0.000000 -3861.484 0
## mxreg3 -0.005680 0.000003 -1984.392 0
## omega -1.854290 0.000271 -6837.150 0
## alpha1 -0.755627 0.000011 -66264.271 0
## beta1 0.748407 0.000101 7396.339 0
## gamma1 -1.049795 0.000354 -2962.330 0
## vxreg1 0.812749 0.000391 2080.245 0
## vxreg2 -0.002736 0.000001 -4116.540 0
## vxreg3 -0.221540 0.002107 -105.133 0
## skew 1.025567 0.002573 398.595 0
## shape 3.039306 0.000554 5481.277 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.001902 0.000003 709.595 0
## ar1 -0.020583 0.000001 -14677.695 0
## mxreg1 -0.000399 0.000004 -88.772 0
## mxreg2 -0.000059 0.000000 -596.198 0
## mxreg3 -0.005680 0.000009 -625.531 0
## omega -1.854290 0.001305 -1421.307 0
## alpha1 -0.755627 0.000168 -4497.847 0
## beta1 0.748407 0.002048 365.452 0
## gamma1 -1.049795 0.001522 -689.578 0
## vxreg1 0.812749 0.001321 615.309 0
## vxreg2 -0.002736 0.000003 -938.133 0
## vxreg3 -0.221540 0.001971 -112.419 0
## skew 1.025567 0.006601 155.372 0
## shape 3.039306 0.001132 2686.048 0
##
## LogLikelihood : 263.3789
##
## Information Criteria
## ------------------------------------
##
## Akaike -4.7957
## Bayes -4.4398
## Shibata -4.8266
## Hannan-Quinn -4.6515
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 2.602 0.10670
## Lag[2*(p+q)+(p+q)-1][2] 2.927 0.04058
## Lag[4*(p+q)+(p+q)-1][5] 3.299 0.35987
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.5281 0.4674
## Lag[2*(p+q)+(p+q)-1][5] 2.1658 0.5798
## Lag[4*(p+q)+(p+q)-1][9] 3.1202 0.7390
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.06794 0.500 2.000 0.7944
## ARCH Lag[5] 0.81490 1.440 1.667 0.7886
## ARCH Lag[7] 1.19566 2.315 1.543 0.8798
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: 4.4131
## Individual Statistics:
## mu 0.09466
## ar1 0.09307
## mxreg1 0.06152
## mxreg2 0.09954
## mxreg3 0.09548
## omega 0.23234
## alpha1 0.09315
## beta1 0.14217
## gamma1 0.07518
## vxreg1 0.08963
## vxreg2 0.09587
## vxreg3 0.05216
## skew 0.13174
## shape 0.09455
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 3.08 3.34 3.9
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 0.1381 0.8904
## Negative Sign Bias 1.0098 0.3151
## Positive Sign Bias 0.2797 0.7803
## Joint Effect 1.4832 0.6862
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 20.62 0.3584
## 2 30 36.19 0.1680
## 3 40 36.77 0.5721
## 4 50 50.81 0.4022
##
##
## Elapsed time : 0.565541
extract_egarchx_stats <- function(model, asset_name) {
if (is.null(model)) {
return(data.frame(
Asset = asset_name, Convergence = "Failed",
Mean_Intercept = NA, Mean_Intercept_SE = NA, Mean_Intercept_p = NA,
Beta_BIRate = NA, Beta_BIRate_SE = NA, Beta_BIRate_p = NA,
Beta_Exchange = NA, Beta_Exchange_SE = NA, Beta_Exchange_p = NA,
Beta_Inflation = NA, Beta_Inflation_SE = NA, Beta_Inflation_p = NA,
AR1 = NA, AR1_SE = NA, AR1_p = NA,
GARCH_Alpha1 = NA, GARCH_Alpha1_SE = NA, GARCH_Alpha1_p = NA,
GARCH_Beta1 = NA, GARCH_Beta1_SE = NA, GARCH_Beta1_p = NA,
Persistence = NA, Log_Likelihood = NA,
stringsAsFactors = FALSE
))
}
# convergence slot
conv_code <- tryCatch(model@fit$convergence, error = function(e) NA)
conv <- ifelse(!is.na(conv_code) && conv_code == 0, "Success", "Failed")
coefs <- tryCatch(coef(model), error = function(e) NULL)
ll <- tryCatch(model@fit$LLH, error = function(e) NA)
matc <- tryCatch(model@fit$matcoef, error = function(e) NULL)
if (conv == "Failed" || is.null(coefs) || is.null(matc)) {
return(data.frame(
Asset = asset_name, Convergence = conv,
Mean_Intercept = NA, Mean_Intercept_SE = NA, Mean_Intercept_p = NA,
Beta_BIRate = NA, Beta_BIRate_SE = NA, Beta_BIRate_p = NA,
Beta_Exchange = NA, Beta_Exchange_SE = NA, Beta_Exchange_p = NA,
Beta_Inflation = NA, Beta_Inflation_SE = NA, Beta_Inflation_p = NA,
AR1 = NA, AR1_SE = NA, AR1_p = NA,
GARCH_Alpha1 = NA, GARCH_Alpha1_SE = NA, GARCH_Alpha1_p = NA,
GARCH_Beta1 = NA, GARCH_Beta1_SE = NA, GARCH_Beta1_p = NA,
Persistence = NA, Log_Likelihood = ll,
stringsAsFactors = FALSE
))
}
# helper untuk ambil nilai aman
safe_extract <- function(name, col) {
if (!is.null(matc) && name %in% rownames(matc)) {
return(matc[name, col])
} else {
return(NA_real_)
}
}
data.frame(
Asset = asset_name,
Convergence = conv,
# Mean equation
Mean_Intercept = safe_extract("mu", 1),
Mean_Intercept_SE = safe_extract("mu", 2),
Mean_Intercept_p = safe_extract("mu", 4),
Beta_BIRate = safe_extract("mxreg1", 1),
Beta_BIRate_SE = safe_extract("mxreg1", 2),
Beta_BIRate_p = safe_extract("mxreg1", 4),
Beta_Exchange = safe_extract("mxreg2", 1),
Beta_Exchange_SE = safe_extract("mxreg2", 2),
Beta_Exchange_p = safe_extract("mxreg2", 4),
Beta_Inflation = safe_extract("mxreg3", 1),
Beta_Inflation_SE = safe_extract("mxreg3", 2),
Beta_Inflation_p = safe_extract("mxreg3", 4),
# AR term
AR1 = safe_extract("ar1", 1),
AR1_SE = safe_extract("ar1", 2),
AR1_p = safe_extract("ar1", 4),
# Variance model parameters
GARCH_Alpha1 = safe_extract("alpha1", 1),
GARCH_Alpha1_SE = safe_extract("alpha1", 2),
GARCH_Alpha1_p = safe_extract("alpha1", 4),
GARCH_Beta1 = safe_extract("beta1", 1),
GARCH_Beta1_SE = safe_extract("beta1", 2),
GARCH_Beta1_p = safe_extract("beta1", 4),
# Derived stats
Persistence = sum(c(safe_extract("alpha1", 1), safe_extract("beta1", 1)), na.rm = TRUE),
Log_Likelihood = ll,
stringsAsFactors = FALSE
)
}
egarchx_stats <- do.call(rbind, lapply(names(fits), function(asset) {
extract_egarchx_stats(fits[[asset]], asset)
}))
print(egarchx_stats)
## Asset Convergence Mean_Intercept Mean_Intercept_SE Mean_Intercept_p
## 1 gold_return Success 0.0063988406 2.308665e-07 0
## 2 btc_return Success -0.0556845360 2.072131e-05 0
## 3 jkse_return Success 0.0004103234 7.069582e-08 0
## 4 bond_return Success 0.0019020401 5.323334e-07 0
## Beta_BIRate Beta_BIRate_SE Beta_BIRate_p Beta_Exchange Beta_Exchange_SE
## 1 0.0399691524 1.561209e-05 0 4.380247e-05 1.029287e-08
## 2 -0.0498240619 2.488526e-03 0 -5.338921e-05 3.807621e-07
## 3 0.0076509014 4.478519e-05 0 -7.099364e-05 4.814451e-08
## 4 -0.0003987212 1.375705e-05 0 -5.948425e-05 1.540451e-08
## Beta_Exchange_p Beta_Inflation Beta_Inflation_SE Beta_Inflation_p AR1
## 1 0 -0.006859032 8.720677e-07 0 0.33147318
## 2 0 -0.048509767 2.947300e-03 0 0.38772783
## 3 0 -0.015170548 7.870858e-06 0 0.24530363
## 4 0 -0.005680366 2.862523e-06 0 -0.02058317
## AR1_SE AR1_p GARCH_Alpha1 GARCH_Alpha1_SE GARCH_Alpha1_p GARCH_Beta1
## 1 1.405742e-04 0 0.1780932 1.405920e-04 0 0.7959045
## 2 2.071048e-04 0 -1.3763285 1.017014e-03 0 0.9035757
## 3 8.208769e-05 0 -0.2777696 6.793828e-05 0 0.9629232
## 4 4.607812e-06 0 -0.7556268 1.140323e-05 0 0.7484068
## GARCH_Beta1_SE GARCH_Beta1_p Persistence Log_Likelihood
## 1 1.344149e-05 0 0.973997722 232.03455
## 2 4.298176e-04 0 -0.472752820 25.28327
## 3 3.810202e-04 0 0.685153527 249.26635
## 4 1.011861e-04 0 -0.007219951 263.37887
# 5. Forecast mu dan sigma (1-step ahead)
last_exog <- matrix(tail(exog_mat, 1), nrow = 1) # 1 x n_mxreg
get_forecast <- function(fit, exog) {
if (is.null(fit)) return(list(mu = NA_real_, sigma = NA_real_))
f <- tryCatch(
ugarchforecast(fit, n.ahead = 1,
external.forecasts = list(mregfor = exog, vregfor = exog)),
error = function(e) {
message("ugarchforecast error: ", conditionMessage(e))
return(NULL)
}
)
if (is.null(f)) return(list(mu = NA_real_, sigma = NA_real_))
# forecast slots: f@forecast$seriesFor, f@forecast$sigmaFor
list(
mu = as.numeric(f@forecast$seriesFor[1, ]),
sigma = as.numeric(f@forecast$sigmaFor[1, ])
)
}
forecasts <- lapply(fits, get_forecast, exog = last_exog)
# Ambil mu dan sigma, fallback ke mean/sd historis dari apt_data jika forecast gagal
mu_vec <- sapply(seq_along(assets), function(i) {
fmu <- forecasts[[i]]$mu
if (is.na(fmu)) mean(apt_data[[assets[i]]], na.rm = TRUE) else fmu
})
sigma_vec <- sapply(seq_along(assets), function(i) {
fs <- forecasts[[i]]$sigma
if (is.na(fs)) sd(apt_data[[assets[i]]], na.rm = TRUE) else fs
})
names(mu_vec) <- assets
names(sigma_vec) <- assets
# 6. Setup Parameter Mean–VaR Optimization
alpha <- 0.05
lambda <- 10
n_sim <- 20000
# 7. Fungsi Objective (dengan target_return)
objective_mean_var <- function(w_raw, max_risk = NA, target_return = NA) {
w <- pmax(0, w_raw)
if (sum(w) == 0) w <- rep(1 / length(w), length(w))
w <- w / sum(w)
# Simulasi returns (asumsi normal independen)
sims <- matrix(
rnorm(n_sim * length(assets),
mean = rep(mu_vec, each = n_sim),
sd = rep(sigma_vec, each = n_sim)),
ncol = length(assets)
)
port_rets <- sims %*% w
mean_p <- mean(port_rets)
sd_p <- sd(port_rets)
var_p <- quantile(port_rets, probs = alpha, type = 7)
penalty_risk <- 0
if (!is.na(max_risk) && sd_p > max_risk) {
penalty_risk <- 1e4 * (sd_p - max_risk)^2
}
penalty_return <- 0
if (!is.na(target_return)) {
penalty_return <- 1e4 * (mean_p - target_return)^2
}
obj <- -mean_p + lambda * abs(var_p) + penalty_risk + penalty_return
return(obj)
}
# 8. Fungsi Optimasi DEoptim (dengan target_return)
optimize_portfolio <- function(max_risk = NA, target_return = NA) {
lower <- rep(0, length(assets))
upper <- rep(1, length(assets))
res <- DEoptim(
fn = function(w) objective_mean_var(w, max_risk, target_return),
lower = lower, upper = upper,
control = DEoptim.control(NP = 60, itermax = 150, trace = FALSE)
)
best_w <- pmax(0, res$optim$bestmem)
best_w <- best_w / sum(best_w)
# Hitung ulang metrik hasil terbaik
sims <- matrix(
rnorm(n_sim * length(assets),
mean = rep(mu_vec, each = n_sim),
sd = rep(sigma_vec, each = n_sim)),
ncol = length(assets)
)
port_rets <- sims %*% best_w
mean_p <- mean(port_rets)
sd_p <- sd(port_rets)
var_p <- quantile(port_rets, probs = alpha, type = 7)
tibble(
max_risk = ifelse(is.na(max_risk), "No Limit", sprintf("%.2f", max_risk)),
target_return = ifelse(is.na(target_return), "None", round(target_return, 6)),
Mean = mean_p,
SD = sd_p,
VaR = var_p,
t(best_w)
)
}
# 9. Hitung target_return dari equal-weighted portfolio
equal_w <- rep(1 / length(assets), length(assets))
target_return_eq <- sum(equal_w * mu_vec)
cat("Target Return (Equal-Weighted):", round(target_return_eq, 6), "\n")
## Target Return (Equal-Weighted): 0.000602
# 10. Simulasi Max Risk + Target Return
max_risk_values <- c(NA, 0.02, 0.04, 0.06, 0.08)
results <- lapply(max_risk_values, function(r) optimize_portfolio(r, target_return_eq))
# Ambil best solution dari scenario tanpa batas risiko (pertama)
w_opt <- as.numeric(results[[1]]$`t(best_w)`)
# fallback jika tidak tersedia
if (is.null(w_opt) || any(is.na(w_opt))) {
w_opt <- equal_w
}
# Simpan equal weight vector
w_eq <- equal_w
# 11. Evaluasi kinerja kedua portofolio
return_matrix <- do.call(cbind, lapply(assets, function(a) apt_data[[a]]))
colnames(return_matrix) <- assets
# helper functions
calculate_portfolio_return <- function(w, ret_matrix) {
sum(w * colMeans(ret_matrix, na.rm = TRUE))
}
calculate_portfolio_var <- function(w, ret_matrix) {
covm <- cov(ret_matrix, use = "pairwise.complete.obs")
sqrt(as.numeric(t(w) %*% covm %*% w))
}
eval_portfolio <- function(w, mu, cov_matrix, alpha = 0.05) {
mu_p <- sum(w * mu)
sigma_p <- sqrt(as.numeric(t(w) %*% cov_matrix %*% w))
VaR_p <- mu_p + qnorm(alpha) * sigma_p
ES_p <- mu_p + (dnorm(qnorm(alpha)) / alpha) * sigma_p
return(c(mu_p, sigma_p, VaR_p, ES_p))
}
mu_hist <- colMeans(return_matrix, na.rm = TRUE)
cov_matrix <- cov(return_matrix, use = "pairwise.complete.obs")
perf_opt <- eval_portfolio(w_opt, mu_hist, cov_matrix)
perf_eq <- eval_portfolio(w_eq, mu_hist, cov_matrix)
comparison <- data.frame(
Portfolio = c("Mean–VaR (EGARCH(1,1)-X+DEOptim)", "Equal-Weighted"),
Mean_Return = c(perf_opt[1], perf_eq[1]),
Volatility = c(perf_opt[2], perf_eq[2]),
VaR_5pct = c(perf_opt[3], perf_eq[3]),
ES_5pct = c(perf_opt[4], perf_eq[4])
)
print(comparison)
## Portfolio Mean_Return Volatility VaR_5pct
## 1 Mean–VaR (EGARCH(1,1)-X+DEOptim) 0.004882497 0.01766997 -0.02418202
## 2 Equal-Weighted -0.005582913 0.17948072 -0.30080243
## ES_5pct
## 1 0.04133057
## 2 0.36463427
weights_df <- data.frame(
Asset = assets,
EGARCHX_MeanVaR = w_opt,
Equal_Weighted = w_eq,
stringsAsFactors = FALSE
)
melted_weights <- reshape2::melt(weights_df, id.vars = "Asset")
ggplot(melted_weights, aes(x = Asset, y = value, fill = variable)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = round(value, 3)),
vjust = -0.5, size = 3.5, position = position_dodge(0.9)) +
labs(
title = "Perbandingan Bobot Portofolio: Mean–VaR (EGARCH(1,1)-X+DEOptim) vs Equal-Weighted",
x = "Aset",
y = "Bobot",
fill = "Metode"
) +
theme_minimal()

# 12. Backtesting
# Utility untuk conditional mean/vol
extract_conditional_mean <- function(egarchx_model) {
if (is.null(egarchx_model)) return(NULL)
if (egarchx_model@fit$convergence != 0) return(NULL)
fitted(egarchx_model)
}
extract_conditional_volatility <- function(egarchx_model) {
if (is.null(egarchx_model)) return(NULL)
if (egarchx_model@fit$convergence != 0) return(NULL)
sigma(egarchx_model)
}
# calculate_egarchx_var returns time series of VaR estimates (negative numbers)
calculate_egarchx_var <- function(egarchx_model, confidence_level = 0.95) {
if (is.null(egarchx_model)) return(NULL)
if (egarchx_model@fit$convergence != 0) return(NULL)
cond_mean <- fitted(egarchx_model)
cond_vol <- sigma(egarchx_model)
df <- tryCatch(coef(egarchx_model)["shape"], error = function(e) NA)
if (is.na(df) || df <= 2) df <- 5
t_critical <- qt(1 - confidence_level, df = df)
# VaR per time: negative sign so exceedances measured as actual_loss > VaR
vaR_ts <- -(cond_mean + t_critical * cond_vol)
return(vaR_ts)
}
perform_egarchx_var_backtesting <- function(returns_vec, egarchx_model, confidence_level = 0.95) {
if (is.null(egarchx_model) || egarchx_model@fit$convergence != 0) {
return(list(exceedances = NA, dates = NA, var_estimates = NA, n = NA))
}
var_estimates <- calculate_egarchx_var(egarchx_model, confidence_level)
# align lengths - tail to match returns length (if needed)
n <- min(length(var_estimates), length(returns_vec))
var_estimates <- tail(var_estimates, n)
returns_vec <- tail(returns_vec, n)
exceedances <- ifelse(-returns_vec > var_estimates, 1, 0)
list(var_estimates = var_estimates, exceedances = exceedances, dates = NULL, n = n)
}
kupiec_test <- function(exceedances, total_obs, confidence_level) {
alpha <- 1 - confidence_level
x <- sum(exceedances, na.rm = TRUE)
p_hat <- x / total_obs
if (x == 0) {
lr_stat <- -2 * log((1 - alpha)^total_obs)
} else if (x == total_obs) {
lr_stat <- -2 * log(alpha^total_obs)
} else {
lr_stat <- -2 * (log((1 - alpha)^(total_obs - x) * alpha^x) -
log((1 - p_hat)^(total_obs - x) * p_hat^x))
}
p_value <- 1 - pchisq(lr_stat, df = 1)
list(
exceptions = x,
total_obs = total_obs,
expected_rate = alpha,
actual_rate = p_hat,
lr_stat = lr_stat,
p_value = p_value,
result = ifelse(p_value > 0.05, "Accept H0", "Reject H0")
)
}
# Map returns ke nama fits (harus tersedia kolom excess_* di apt_data)
returns_list <- list(
gold_return = apt_data$excess_gold,
btc_return = apt_data$excess_btc,
jkse_return = apt_data$excess_jkse,
bond_return = apt_data$excess_bond
)
egarchx_backtest_results <- list()
egarchx_kupiec_results <- list()
for (asset_name in names(returns_list)) {
model <- fits[[asset_name]]
backtest <- perform_egarchx_var_backtesting(returns_list[[asset_name]], model, confidence_level = 0.95)
egarchx_backtest_results[[asset_name]] <- backtest
if (!is.na(backtest$n)) {
kupiec <- kupiec_test(backtest$exceedances, backtest$n, confidence_level = 0.95)
egarchx_kupiec_results[[asset_name]] <- kupiec
}
}
egarchx_kupiec_summary <- do.call(rbind, lapply(names(egarchx_kupiec_results), function(asset) {
result <- egarchx_kupiec_results[[asset]]
if (is.null(result)) return(NULL)
data.frame(
Asset = asset,
Observations = result$total_obs,
Exceptions = result$exceptions,
Expected_Rate = result$expected_rate,
Actual_Rate = result$actual_rate,
LR_Statistic = result$lr_stat,
P_Value = result$p_value,
Result = result$result,
stringsAsFactors = FALSE
)
}))
print(egarchx_kupiec_summary)
## Asset Observations Exceptions Expected_Rate Actual_Rate LR_Statistic
## 1 gold_return 104 1 0.05 0.009615385 5.2787477
## 2 btc_return 104 1 0.05 0.009615385 5.2787477
## 3 jkse_return 104 7 0.05 0.067307692 0.5945158
## 4 bond_return 104 6 0.05 0.057692308 0.1237054
## P_Value Result
## 1 0.02158727 Reject H0
## 2 0.02158727 Reject H0
## 3 0.44067817 Accept H0
## 4 0.72504991 Accept H0
# 13. Compare Mean–VaR and Equal-weighted Portfolio Performance
dates <- as.Date(apt_data$date)
# Hitung return portofolio berdasarkan bobot optimal dan equal-weight
apt_data$portfolio_return_egarchx <- as.numeric(return_matrix %*% w_opt)
apt_data$portfolio_return_equal <- as.numeric(return_matrix %*% w_eq)
# Hitung cumulative returns (mulai dari 1)
egarchx_cumulative <- cumprod(1 + apt_data$portfolio_return_egarchx)
equal_cumulative <- cumprod(1 + apt_data$portfolio_return_equal)
jkse_cumulative <- cumprod(1 + apt_data$jkse_return)
bond_cumulative <- cumprod(1 + apt_data$bond_return)
btc_cumulative <- cumprod(1 + apt_data$btc_return)
gold_cumulative <- cumprod(1 + apt_data$gold_return)
plot(
dates, egarchx_cumulative, type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Cumulative Return (Start = 1)",
main = "Cumulative Performance Comparison",
ylim = c(
min(0.5, min(egarchx_cumulative, equal_cumulative, na.rm = TRUE) * 0.9),
max(egarchx_cumulative, equal_cumulative, btc_cumulative, na.rm = TRUE) * 1.1
)
)
lines(dates, equal_cumulative, col = "blue", lwd = 2, lty = 2)
lines(dates, jkse_cumulative, col = "darkgreen", lwd = 1)
lines(dates, bond_cumulative, col = "purple", lwd = 1)
lines(dates, btc_cumulative, col = "black", lwd = 1)
lines(dates, gold_cumulative, col = "goldenrod1", lwd = 1)
legend(
"topleft",
legend = c("Mean–VaR+EGARCH(1,1)-X", "Equal-Weight", "JKSE", "Bond", "BTC", "Gold"),
col = c("red", "blue", "darkgreen", "purple", "black", "goldenrod1"),
lwd = c(2, 2, 1, 1, 1, 1),
lty = c(1, 2, 1, 1, 1, 1),
cex = 0.8
)

# Rolling Metrics (24-month window)
window <- 24
assets_returns <- cbind(
apt_data$portfolio_return_egarchx,
apt_data$portfolio_return_equal,
apt_data$jkse_return,
apt_data$bond_return,
apt_data$btc_return,
apt_data$gold_return
)
colnames(assets_returns) <- c("MeanVar_EGARCHX", "Equal", "JKSE", "Bond", "BTC", "Gold")
rolling_returns <- rolling_volatility <- rolling_sharpe <- rolling_var <-
matrix(NA, nrow = nrow(assets_returns) - window, ncol = ncol(assets_returns))
for (i in 1:(nrow(assets_returns) - window)) {
window_data <- assets_returns[i:(i + window - 1), , drop = FALSE]
rolling_returns[i, ] <- colMeans(window_data, na.rm = TRUE)
rolling_volatility[i, ] <- apply(window_data, 2, sd, na.rm = TRUE)
rolling_sharpe[i, ] <- rolling_returns[i, ] / rolling_volatility[i, ]
rolling_var[i, ] <- apply(window_data, 2, function(x) -quantile(x, 0.05, na.rm = TRUE))
}
# Plot Rolling Sharpe Ratio
par(mfrow = c(2, 1))
plot(
dates[(window + 1):length(dates)], rolling_sharpe[, 1],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Sharpe Ratio",
main = "Rolling 24-Month Sharpe Ratio",
ylim = range(rolling_sharpe[, 1:2], na.rm = TRUE) * c(0.9, 1.1)
)
lines(dates[(window + 1):length(dates)], rolling_sharpe[, 2], col = "blue", lwd = 2, lty = 2)
abline(h = 0, lty = 2, col = "gray")
legend(
"bottomright",
legend = c("Mean–VaR+EGARCHX", "Equal-Weight"),
col = c("red", "blue"),
lwd = 2, lty = c(1, 2), cex = 0.8
)
# Plot Rolling VaR
plot(
dates[(window + 1):length(dates)], rolling_var[, 1],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Value-at-Risk (95%)",
main = "Rolling 24-Month VaR (95%)",
ylim = range(rolling_var[, 1:2], na.rm = TRUE) * c(0.9, 1.1)
)
lines(dates[(window + 1):length(dates)], rolling_var[, 2], col = "blue", lwd = 2, lty = 2)
legend(
"topright",
legend = c("Mean–VaR+EGARCHX", "Equal-Weight"),
col = c("red", "blue"),
lwd = 2, lty = c(1, 2), cex = 0.8
)

par(mfrow = c(1, 1))
# Summary Statistics
return_stats <- data.frame(
Mean_Return = colMeans(assets_returns, na.rm = TRUE) * 100,
Volatility = apply(assets_returns, 2, sd, na.rm = TRUE) * 100,
Sharpe = colMeans(assets_returns, na.rm = TRUE) /
apply(assets_returns, 2, sd, na.rm = TRUE),
VaR_95 = apply(assets_returns, 2, function(x) -quantile(x, 0.05, na.rm = TRUE)) * 100,
Max_Drawdown = apply(assets_returns, 2, function(x) {
cumu <- cumprod(1 + x)
peak <- cumu[1]
maxDD <- 0
for (i in 2:length(cumu)) {
if (cumu[i] > peak) peak <- cumu[i]
DD <- (peak - cumu[i]) / peak
if (DD > maxDD) maxDD <- DD
}
return(maxDD * 100)
})
)
return_stats$Portfolio <- rownames(return_stats)
return_stats <- return_stats[order(-return_stats$Sharpe), ]
print(return_stats)
## Mean_Return Volatility Sharpe VaR_95 Max_Drawdown
## Gold 0.91661033 3.194470 0.28693662 3.901653 14.468387
## MeanVar_EGARCHX 0.48824973 1.766997 0.27631610 2.384892 8.507039
## JKSE 0.38148754 3.514663 0.10854170 4.619607 32.465213
## Bond -0.07608112 2.835834 -0.02682848 4.497432 24.838468
## Equal -0.55829126 17.948072 -0.03110592 7.463672 251.233083
## BTC -3.45518180 71.955272 -0.04801847 25.803555 145.022780
## Portfolio
## Gold Gold
## MeanVar_EGARCHX MeanVar_EGARCHX
## JKSE JKSE
## Bond Bond
## Equal Equal
## BTC BTC
# Visualisasi VaR backtesting GARCH-X untuk semua aset (termasuk gold)
par(mfrow=c(2,2))
confidence_level <- 0.95
for (asset_name in names(egarchx_backtest_results)) {
backtest <- egarchx_backtest_results[[asset_name]]
asset_returns <- returns_list[[asset_name]]
if (is.na(backtest$n)) next
plot_data <- data.frame(
Index = 1:length(asset_returns),
Return = asset_returns,
VaR = backtest$var_estimates,
Exceed = backtest$exceedances
)
plot_data$NegReturn <- -plot_data$Return
plot(plot_data$Index, plot_data$NegReturn, type = "l",
col = "black", xlab = "Observation", ylab = "Return/VaR",
main = paste0(asset_name, " EGARCHX-VaR (", confidence_level*100, "%)"),
ylim = c(min(c(plot_data$NegReturn, plot_data$VaR), na.rm=TRUE),
max(c(plot_data$NegReturn, plot_data$VaR), na.rm=TRUE) * 1.2))
lines(plot_data$Index, plot_data$VaR, col = "blue", lty = 2)
exceed_idx <- which(plot_data$Exceed == 1)
points(exceed_idx, plot_data$NegReturn[exceed_idx],
col = "red", pch = 19, cex = 0.8)
if (asset_name %in% names(egarchx_kupiec_results)) {
result <- egarchx_kupiec_results[[asset_name]]
legend_text <- paste0(
"Exceptions: ", result$exceptions, "/", result$total_obs, " (",
round(result$actual_rate * 100, 2), "%)\n",
"Expected: ", round(result$expected_rate * 100, 2), "%\n",
"p-value: ", round(result$p_value, 4), "\n",
"Result: ", result$result
)
} else {
legend_text <- "No Kupiec test results"
}
legend("topright", legend = legend_text, bty = "n", cex = 0.7)
}

# Basel traffic light test
basel_traffic_light <- function(exceptions, observations, confidence_level) {
if (confidence_level == 0.99) {
if (exceptions <= 4) return("Green Zone")
else if (exceptions <= 9) return("Yellow Zone")
else return("Red Zone")
} else if (confidence_level == 0.95) {
expected_exceptions <- observations * 0.05
if (exceptions <= expected_exceptions * 1.2) return("Green Zone")
else if (exceptions <= expected_exceptions * 1.6) return("Yellow Zone")
else return("Red Zone")
}
}
basel_results_egarchx <- lapply(egarchx_kupiec_results, function(result) {
if (is.null(result)) return(NULL)
zone <- basel_traffic_light(result$exceptions, result$total_obs, confidence_level)
data.frame(
Exceptions = result$exceptions,
Observations = result$total_obs,
Expected_Rate = result$expected_rate,
Actual_Rate = result$actual_rate,
Zone = zone
)
})
basel_egarchx_summary <- do.call(rbind, lapply(names(basel_results_egarchx), function(asset) {
result <- basel_results_egarchx[[asset]]
if (is.null(result)) return(NULL)
result$Asset <- asset
return(result)
}))
if (!is.null(basel_egarchx_summary) && nrow(basel_egarchx_summary) > 0) {
basel_egarchx_summary <- basel_egarchx_summary[, c("Asset", "Exceptions", "Observations",
"Expected_Rate", "Actual_Rate", "Zone")]
print(basel_egarchx_summary)
ggplot(basel_egarchx_summary, aes(x = Asset, y = Actual_Rate * 100, fill = Zone)) +
geom_bar(stat = "identity") +
geom_hline(yintercept = 5, linetype = "dashed", color = "black") +
scale_fill_manual(values = c("Green Zone" = "darkgreen",
"Yellow Zone" = "gold",
"Red Zone" = "darkred")) +
theme_minimal() +
labs(title = "EGARCHX-VaR Exceedance Rates by Asset with Basel Zones",
subtitle = paste0(confidence_level * 100, "% VaR"),
y = "Exceedance Rate (%)",
x = "Asset") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
}
## Asset Exceptions Observations Expected_Rate Actual_Rate Zone
## 1 gold_return 1 104 0.05 0.009615385 Green Zone
## 2 btc_return 1 104 0.05 0.009615385 Green Zone
## 3 jkse_return 7 104 0.05 0.067307692 Yellow Zone
## 4 bond_return 6 104 0.05 0.057692308 Green Zone

# Optional: Export Summary
if (!requireNamespace("writexl", quietly = TRUE)) install.packages("writexl")
writexl::write_xlsx(return_stats, "Portfolio_Performance_Comparison EGARCHX.xlsx")
result_df <- bind_rows(results)
result_df <- result_df[,-5]
assets_weight <- bind_rows(data.frame(results[[1]]$`t(best_w)`),data.frame(results[[2]]$`t(best_w)`),
data.frame(results[[3]]$`t(best_w)`),data.frame(results[[4]]$`t(best_w)`),
data.frame(results[[5]]$`t(best_w)`))
colnames(assets_weight) <- assets
result_df <- cbind(result_df,assets_weight)
writexl::write_xlsx(data.frame(result_df),"Comparison Data Frame EGARCHX.xlsx")
# 14. Tampilkan Seluruh Hasil Simulasi
cat("\n=== Hasil Simulasi Mean–VaR dengan Batas Risiko ===\n")
##
## === Hasil Simulasi Mean–VaR dengan Batas Risiko ===
print(result_df)
## max_risk target_return Mean SD t(best_w).1 t(best_w).2
## 1 No Limit 0.000602 0.0004431166 0.01605084 0.476353587 0.005286068
## 2 0.02 0.000602 0.0006385414 0.01603827 0.483998422 0.005827608
## 3 0.04 0.000602 0.0003340957 0.01567232 0.467903139 0.002392165
## 4 0.06 0.000602 0.0003003612 0.01601335 0.473673824 0.004028096
## 5 0.08 0.000602 0.0008059399 0.01608408 0.486802382 0.003529337
## t(best_w).3 t(best_w).4 gold_return btc_return jkse_return bond_return
## 1 0.238916868 0.279443477 0.4763536 0.005286068 0.2389169 0.2794435
## 2 0.264319187 0.245854783 0.4839984 0.005827608 0.2643192 0.2458548
## 3 0.254689608 0.275015087 0.4679031 0.002392165 0.2546896 0.2750151
## 4 0.263832358 0.258465721 0.4736738 0.004028096 0.2638324 0.2584657
## 5 0.267681877 0.241986404 0.4868024 0.003529337 0.2676819 0.2419864
# 15. Visualisasi Perubahan Bobot
library(reshape2)
melted <- melt(result_df[, c("max_risk", assets)], id.vars = "max_risk")
ggplot(melted, aes(x = max_risk, y = value, fill = variable)) +
geom_bar(stat = "identity", position = "dodge") +
labs(
title = "Perubahan Bobot Portofolio terhadap Batas Risiko",
x = "Batas Risiko (SD Maksimum)",
y = "Bobot Aset",
fill = "Aset"
) +
theme_minimal() +
theme(
plot.title = element_text(size = 14, face = "bold"),
axis.text.x = element_text(size = 12)
)+
geom_text(
aes(label = round(value, 4)), # menampilkan nilai bobot
position = position_dodge(width = 0.9), # sejajar dengan batang
vjust = -0.3, # posisi sedikit di atas batang
size = 3.5 # ukuran teks
)

plot_pie <- function(risk_label, data) {
data_subset <- data %>% filter(max_risk == risk_label)
ggplot(data_subset, aes(x = "", y = value, fill = variable)) +
geom_col(width = 1, color = "white") +
coord_polar(theta = "y") +
labs(
title = paste("Distribusi Bobot Portofolio\nBatas Risiko:", risk_label),
fill = "Aset"
) +
geom_text(
aes(label = paste0(round(value * 100, 2), "%")),
position = position_stack(vjust = 0.5),
color = "white",
size = 4
) +
theme_void() +
theme(
plot.title = element_text(hjust = 0.5, size = 13, face = "bold"),
legend.title = element_text(size = 10),
legend.text = element_text(size = 9)
)
}
# Buat pie chart untuk tiap kondisi risiko
plots <- lapply(unique(melted$max_risk), function(risk) plot_pie(risk, melted))
# Tampilkan semua dalam grid (3 kolom)
do.call(grid.arrange, c(plots, ncol = 3))
