# =========================================================
# EGARCH(1,1)-X + Mean–VaR Optimization with DEoptim
# Extended: Simulasi Max Risk
# =========================================================
library(readxl)
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(stringr)
library(lubridate)
##
## Attaching package: 'lubridate'
## The following objects are masked from 'package:base':
##
## date, intersect, setdiff, union
library(PerformanceAnalytics)
## Loading required package: xts
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
##
## ######################### Warning from 'xts' package ##########################
## # #
## # The dplyr lag() function breaks how base R's lag() function is supposed to #
## # work, which breaks lag(my_xts). Calls to lag(my_xts) that you type or #
## # source() into this session won't work correctly. #
## # #
## # Use stats::lag() to make sure you're not using dplyr::lag(), or you can add #
## # conflictRules('dplyr', exclude = 'lag') to your .Rprofile to stop #
## # dplyr from breaking base R's lag() function. #
## # #
## # Code in packages is not affected. It's protected by R's namespace mechanism #
## # Set `options(xts.warn_dplyr_breaks_lag = FALSE)` to suppress this warning. #
## # #
## ###############################################################################
##
## Attaching package: 'xts'
## The following objects are masked from 'package:dplyr':
##
## first, last
##
## Attaching package: 'PerformanceAnalytics'
## The following object is masked from 'package:graphics':
##
## legend
library(forecast)
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(tseries)
library(car)
## Loading required package: carData
##
## Attaching package: 'car'
## The following object is masked from 'package:dplyr':
##
## recode
library(lmtest)
library(PortfolioAnalytics)
## Loading required package: foreach
## Registered S3 method overwritten by 'PortfolioAnalytics':
## method from
## print.constraint ROI
library(DEoptim)
## Loading required package: parallel
##
## DEoptim package
## Differential Evolution algorithm in R
## Authors: D. Ardia, K. Mullen, B. Peterson and J. Ulrich
library(reshape2)
library(ggplot2)
library(quadprog)
library(quantmod)
## Loading required package: TTR
library(rugarch) # Untuk GARCH-X
library(gridExtra)
##
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
##
## combine
library(tidyr)
##
## Attaching package: 'tidyr'
## The following object is masked from 'package:reshape2':
##
## smiths
library(tibble)
# 1. Data
# Import BI Rate data
birate <- read_excel("Database/bi_rate.xlsx")
colnames(birate) <- c("date","bi_rate")
head(birate); tail(birate)
## # A tibble: 6 × 2
## date bi_rate
## <chr> <chr>
## 1 18 Desember 2024 6.00
## 2 20 November 2024 6.00
## 3 16 Oktober 2024 6.00
## 4 18 September 2024 6.00
## 5 21 Agustus 2024 6.25
## 6 17 Juli 2024 6.25
## # A tibble: 6 × 2
## date bi_rate
## <chr> <chr>
## 1 22 September 2016 5.00
## 2 19 Agustus 2016 5.25
## 3 21 Juli 2016 5.25
## 4 16 Juni 2016 5.25
## 5 19 Mei 2016 5.50
## 6 21 April 2016 5.50
Sys.setlocale("LC_TIME", "id_ID.UTF-8")
## [1] "id_ID.UTF-8"
birate <- birate %>%
mutate(date = dmy(date))
birate$bi_rate <- as.numeric(birate$bi_rate)
birate <- birate[order(birate$date), ]
head(birate)
## # A tibble: 6 × 2
## date bi_rate
## <date> <dbl>
## 1 2016-04-21 5.5
## 2 2016-05-19 5.5
## 3 2016-06-16 5.25
## 4 2016-07-21 5.25
## 5 2016-08-19 5.25
## 6 2016-09-22 5
# Import 10-year bond data
bond10 <- read.csv("Database/bond10 new.csv")
bond10 <- bond10[, c("Date", "Price")]
colnames(bond10) <- c("date", "bond10")
bond10$date <- ymd(as.Date(bond10$date))
str(bond10)
## 'data.frame': 234 obs. of 2 variables:
## $ date : Date, format: "2025-04-30" "2025-03-27" ...
## $ bond10: num 99.1 97.4 98.7 98.4 97.3 ...
# Filter data antara 2014-01-01 dan 2024-12-31
bond10 <- bond10 %>%
filter(date >= as.Date("2014-01-01") & date <= as.Date("2024-12-31"))
bond10 <- bond10[order(bond10$date), ]
head(bond10)
## date bond10
## 132 2014-01-30 96.883
## 131 2014-02-28 99.809
## 130 2014-03-28 102.600
## 129 2014-04-30 102.700
## 128 2014-05-30 102.150
## 127 2014-06-30 100.800
# Import Bitcoin data
btc_usd <- read.csv("Database/btc_usd.csv")
btc_usd <- btc_usd[, c("Tanggal", "Terakhir")]
colnames(btc_usd) <- c("date", "btc")
btc_usd <- btc_usd %>%
mutate(date = dmy(date))
btc_usd <- btc_usd[order(btc_usd$date), ]
btc_usd <- btc_usd %>%
mutate(btc = as.numeric(gsub(",", "", btc)))
head(btc_usd)
## date btc
## 4015 2014-01-01 7403
## 4014 2014-01-02 7750
## 4013 2014-01-03 8121
## 4012 2014-01-04 8018
## 4011 2014-01-05 9040
## 4010 2014-01-06 9345
# Import CPI data
cpi <- read_excel("Database/inflation.xlsx")
colnames(cpi) <- c("date", "cpi")
bulan_id <- c("Januari","Februari","Maret","April","Mei","Juni",
"Juli","Agustus","September","Oktober","November","Desember")
bulan_en <- c("January","February","March","April","May","June",
"July","August","September","October","November","December")
cpi <- cpi %>%
mutate(date = str_replace_all(date, setNames(bulan_en, bulan_id)),
date = parse_date_time(date, orders = "my"),
date = as.Date(date))
cpi <- cpi[order(cpi$date), ]
cpi$cpi <- as.numeric(cpi$cpi)
head(cpi)
## # A tibble: 6 × 2
## date cpi
## <date> <dbl>
## 1 2014-01-01 8.22
## 2 2014-02-01 7.75
## 3 2014-03-01 7.32
## 4 2014-04-01 7.25
## 5 2014-05-01 7.32
## 6 2014-06-01 6.7
# Import JKSE data
jkse <- read.csv("Database/jkse.csv")
jkse <- jkse[, c("Tanggal", "Terakhir")]
colnames(jkse) <- c("date", "jkse")
jkse <- jkse %>%
mutate(date = dmy(date))
jkse <- jkse[order(jkse$date), ]
jkse <- jkse %>%
mutate(
jkse = str_replace_all(jkse, "\\.", ""), # hapus titik (pemisah ribuan)
jkse = str_replace_all(jkse, ",", "."), # ubah koma jadi titik (desimal)
jkse = as.numeric(jkse) # ubah jadi numeric
)
head(jkse)
## date jkse
## 2681 2014-01-01 4274.18
## 2680 2014-01-02 4327.27
## 2679 2014-01-03 4257.66
## 2678 2014-01-06 4202.81
## 2677 2014-01-07 4175.81
## 2676 2014-01-08 4200.59
# Import IDR exchange rate data
kurs_idr <- read.csv("Database/idr_kurs.csv")
kurs_idr <- kurs_idr[, c("Tanggal", "Terakhir")]
colnames(kurs_idr) <- c("date", "kurs_idr")
kurs_idr <- kurs_idr %>%
mutate(date = dmy(date))
kurs_idr <- kurs_idr[order(kurs_idr$date), ]
kurs_idr <- kurs_idr %>%
mutate(
kurs_idr = str_replace_all(kurs_idr, "\\.", ""), # hapus titik (pemisah ribuan)
kurs_idr = str_replace_all(kurs_idr, ",", "."), # ubah koma jadi titik (desimal)
kurs_idr = as.numeric(kurs_idr) # ubah jadi numeric
)
head(kurs_idr)
## date kurs_idr
## 2815 2014-01-01 12170.0
## 2814 2014-01-02 12160.0
## 2813 2014-01-03 12170.0
## 2812 2014-01-06 12180.0
## 2811 2014-01-07 12237.5
## 2810 2014-01-08 12235.0
# Gold data
gold <- read.csv("Database/gold.csv")
gold <- gold[, c("Tanggal", "Terakhir")]
colnames(gold) <- c("date", "gold")
gold <- gold %>%
mutate(date = dmy(date))
gold <- gold[order(gold$date), ]
gold <- gold %>%
mutate(
gold = str_replace_all(gold, "\\.", ""), # hapus titik (pemisah ribuan)
gold = str_replace_all(gold, ",", "."), # ubah koma jadi titik (desimal)
gold = as.numeric(gold) # ubah jadi numeric
)
head(gold)
## date gold
## 2816 2014-01-02 1225.2
## 2815 2014-01-03 1238.6
## 2814 2014-01-06 1238.0
## 2813 2014-01-07 1229.6
## 2812 2014-01-08 1225.5
## 2811 2014-01-09 1229.4
## Monthly Data Aggregation and Preprocessing
# Convert data to monthly format (for those that aren't already monthly)
birate_monthly <- birate %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(bi_rate = mean(bi_rate, na.rm = TRUE)) %>%
rename(date = year_month)
bond10_monthly <- bond10 %>%
mutate(date = floor_date(date, "month"))
btc_monthly <- btc_usd %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(btc = mean(btc, na.rm = TRUE)) %>%
rename(date = year_month)
jkse_monthly <- jkse %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(jkse = mean(jkse, na.rm = TRUE)) %>%
rename(date = year_month)
kurs_idr_monthly <- kurs_idr %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(kurs_idr = mean(kurs_idr, na.rm = TRUE)) %>%
rename(date = year_month)
gold_monthly <- gold %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(gold = mean(gold, na.rm = TRUE)) %>%
rename(date = year_month)
cpi_monthly <- cpi %>%
mutate(year_month = floor_date(date, "month")) %>%
group_by(year_month) %>%
summarise(cpi = mean(cpi, na.rm = TRUE)) %>%
rename(date = year_month)
# Merge all datasets
merged_data <- birate_monthly %>%
full_join(bond10_monthly, by = "date") %>%
full_join(btc_monthly, by = "date") %>%
full_join(cpi_monthly, by = "date") %>%
full_join(jkse_monthly, by = "date") %>%
full_join(kurs_idr_monthly, by = "date") %>%
full_join(gold_monthly, by = "date")
# Sort by date
merged_data <- merged_data %>% arrange(date)
# Check the result
head(merged_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2014-01-01 NA 96.9 8194. 8.22 4350. 12158. 1244.
## 2 2014-02-01 NA 99.8 6605. 7.75 4515. 11918. 1301.
## 3 2014-03-01 NA 103. 5929. 7.32 4720. 11416. 1337.
## 4 2014-04-01 NA 103. 4620. 7.25 4871. 11431. 1299.
## 5 2014-05-01 NA 102. 4829. 7.32 4925. 11536. 1288.
## 6 2014-06-01 NA 101. 6179. 6.7 4898. 11892. 1283.
tail(merged_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2024-07-01 6.25 97.9 62.9 2.13 7258. 16238. 2398.
## 2 2024-08-01 6.25 100. 60.0 2.12 7417. 15735. 2474.
## 3 2024-09-01 6 101. 60.5 1.84 7740. 15318. 2579.
## 4 2024-10-01 6 98.6 65.7 1.71 7621. 15558. 2695.
## 5 2024-11-01 6 98.1 86.5 1.55 7269. 15813. 2656.
## 6 2024-12-01 6 97.3 98.3 1.57 7216. 16036. 2650.
# Check for missing values
colSums(is.na(merged_data))
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## 0 27 0 0 0 0 0 0
# Data with non NA values
non_na_data <- merged_data %>%
filter(complete.cases(.))
# Check the non NA data
head(non_na_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2016-04-01 5.5 105. 4352. 3.6 4853. 13172. 1244.
## 2 2016-05-01 5.5 104. 4617. 3.33 4770. 13417. 1261.
## 3 2016-06-01 5.25 106. 6445. 3.45 4871. 13338. 1279.
## 4 2016-07-01 5.25 110. 6616. 3.21 5166. 13114. 1339.
## 5 2016-08-01 5.25 109. 5858. 2.79 5401. 13160. 1344.
## 6 2016-09-01 5 110. 6083. 3.07 5337. 13110. 1330.
tail(non_na_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc cpi jkse kurs_idr gold
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2024-07-01 6.25 97.9 62.9 2.13 7258. 16238. 2398.
## 2 2024-08-01 6.25 100. 60.0 2.12 7417. 15735. 2474.
## 3 2024-09-01 6 101. 60.5 1.84 7740. 15318. 2579.
## 4 2024-10-01 6 98.6 65.7 1.71 7621. 15558. 2695.
## 5 2024-11-01 6 98.1 86.5 1.55 7269. 15813. 2656.
## 6 2024-12-01 6 97.3 98.3 1.57 7216. 16036. 2650.
nrow(non_na_data)
## [1] 105
# Convert BTC to IDR
btc_idr <- non_na_data %>%
mutate(btc_idr = btc * kurs_idr) %>%
dplyr::select(date, btc_idr)
# Convert Gold to IDR
gold_idr <- non_na_data %>%
mutate(gold_idr = gold * kurs_idr) %>%
dplyr::select(date, gold_idr)
# Add BTC_IDR and Gold_IDR to the dataset
non_na_data <- non_na_data %>%
left_join(btc_idr, by = "date")
non_na_data <- non_na_data %>%
left_join(gold_idr, by = "date")
# Select final set of variables
non_na_data <- non_na_data %>%
dplyr::select(date, bi_rate, bond10, btc_idr, cpi, jkse, kurs_idr, gold_idr)
# Display the final dataset
head(non_na_data); tail(non_na_data)
## # A tibble: 6 × 8
## date bi_rate bond10 btc_idr cpi jkse kurs_idr gold_idr
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2016-04-01 5.5 105. 57318032. 3.6 4853. 13172. 16383543.
## 2 2016-05-01 5.5 104. 61939403. 3.33 4770. 13417. 16912360.
## 3 2016-06-01 5.25 106. 85965931. 3.45 4871. 13338. 17061819.
## 4 2016-07-01 5.25 110. 86769238. 3.21 5166. 13114. 17561010.
## 5 2016-08-01 5.25 109. 77097435. 2.79 5401. 13160. 17693830.
## 6 2016-09-01 5 110. 79744103. 3.07 5337. 13110. 17430969.
## # A tibble: 6 × 8
## date bi_rate bond10 btc_idr cpi jkse kurs_idr gold_idr
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2024-07-01 6.25 97.9 1021903. 2.13 7258. 16238. 38941212.
## 2 2024-08-01 6.25 100. 944448. 2.12 7417. 15735. 38926254.
## 3 2024-09-01 6 101. 926531. 1.84 7740. 15318. 39502303.
## 4 2024-10-01 6 98.6 1021507. 1.71 7621. 15558. 41930732.
## 5 2024-11-01 6 98.1 1368402. 1.55 7269. 15813. 42000372.
## 6 2024-12-01 6 97.3 1576530. 1.57 7216. 16036. 42502040.
# Calculate returns and changes for each variable
non_na_data$bi_rate <- as.numeric(non_na_data$bi_rate)
apt_data <- non_na_data %>%
arrange(date) %>%
mutate(
jkse_return = c(NA, diff(log(jkse))),
bond_return = c(NA, diff(log(bond10))),
btc_return = c(NA, diff(log(btc_idr))),
gold_return = c(NA, diff(log(gold_idr))), # Add gold returns calculation
birate_change = c(NA, diff(bi_rate)),
cpi_change = c(NA, diff(cpi) / cpi[-length(cpi)]),
kurs_change = c(NA, diff(kurs_idr) / kurs_idr[-length(kurs_idr)])
) %>%
na.omit()
# Create risk-free rate variable from BI Rate
apt_data$rf_rate <- apt_data$bi_rate / 12 / 100 # Convert annual BI Rate to monthly
# Calculate excess returns
apt_data$excess_jkse <- apt_data$jkse_return - apt_data$rf_rate
apt_data$excess_bond <- apt_data$bond_return - apt_data$rf_rate
apt_data$excess_btc <- apt_data$btc_return - apt_data$rf_rate
apt_data$excess_gold <- apt_data$gold_return - apt_data$rf_rate # Add excess returns for gold
# Check the data
head(apt_data)
## # A tibble: 6 × 20
## date bi_rate bond10 btc_idr cpi jkse kurs_idr gold_idr jkse_return
## <date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 2016-05-01 5.5 104. 61939403. 3.33 4770. 13417. 16912360. -0.0173
## 2 2016-06-01 5.25 106. 85965931. 3.45 4871. 13338. 17061819. 0.0210
## 3 2016-07-01 5.25 110. 86769238. 3.21 5166. 13114. 17561010. 0.0589
## 4 2016-08-01 5.25 109. 77097435. 2.79 5401. 13160. 17693830. 0.0445
## 5 2016-09-01 5 110. 79744103. 3.07 5337. 13110. 17430969. -0.0120
## 6 2016-10-01 4.75 109. 83963283. 3.31 5406. 13018. 16481679. 0.0128
## # ℹ 11 more variables: bond_return <dbl>, btc_return <dbl>, gold_return <dbl>,
## # birate_change <dbl>, cpi_change <dbl>, kurs_change <dbl>, rf_rate <dbl>,
## # excess_jkse <dbl>, excess_bond <dbl>, excess_btc <dbl>, excess_gold <dbl>
summary(apt_data)
## date bi_rate bond10 btc_idr
## Min. :2016-05-01 Min. :3.500 Min. : 83.80 Min. : 16779
## 1st Qu.:2018-06-23 1st Qu.:4.188 1st Qu.: 97.23 1st Qu.: 118135
## Median :2020-08-16 Median :4.750 Median :100.53 Median : 342069
## Mean :2020-08-15 Mean :4.886 Mean : 99.82 Mean : 8297702
## 3rd Qu.:2022-10-08 3rd Qu.:5.750 3rd Qu.:102.90 3rd Qu.: 724748
## Max. :2024-12-01 Max. :6.250 Max. :110.16 Max. :110728567
## cpi jkse kurs_idr gold_idr
## Min. :1.320 Min. :4599 Min. :13018 Min. :15468115
## 1st Qu.:2.167 1st Qu.:5853 1st Qu.:13954 1st Qu.:18062115
## Median :3.060 Median :6237 Median :14345 Median :25328121
## Mean :2.997 Mean :6248 Mean :14459 Mean :24420591
## 3rd Qu.:3.490 3rd Qu.:6843 3rd Qu.:15057 3rd Qu.:28087431
## Max. :5.950 Max. :7740 Max. :16335 Max. :42502040
## jkse_return bond_return btc_return
## Min. :-0.201493 Min. :-0.0828848 Min. :-6.89749
## 1st Qu.:-0.012833 1st Qu.:-0.0143272 1st Qu.:-0.06265
## Median : 0.008020 Median :-0.0021093 Median : 0.02405
## Mean : 0.003815 Mean :-0.0007608 Mean :-0.03455
## 3rd Qu.: 0.020662 3rd Qu.: 0.0113273 3rd Qu.: 0.14451
## Max. : 0.086305 Max. : 0.1371552 Max. : 0.65514
## gold_return birate_change cpi_change
## Min. :-0.062174 Min. :-0.250000 Min. :-0.302752
## 1st Qu.:-0.009984 1st Qu.: 0.000000 1st Qu.:-0.077589
## Median : 0.007048 Median : 0.000000 Median :-0.013377
## Mean : 0.009166 Mean : 0.004808 Mean :-0.002132
## 3rd Qu.: 0.026680 3rd Qu.: 0.000000 3rd Qu.: 0.068724
## Max. : 0.100150 Max. : 0.625000 Max. : 0.314394
## kurs_change rf_rate excess_jkse
## Min. :-0.054480 Min. :0.002917 Min. :-0.2052429
## 1st Qu.:-0.006061 1st Qu.:0.003490 1st Qu.:-0.0176251
## Median : 0.001096 Median :0.003958 Median : 0.0038237
## Mean : 0.002065 Mean :0.004072 Mean :-0.0002566
## 3rd Qu.: 0.011865 3rd Qu.:0.004792 3rd Qu.: 0.0164970
## Max. : 0.102563 Max. :0.005208 Max. : 0.0831804
## excess_bond excess_btc excess_gold
## Min. :-0.086635 Min. :-6.90145 Min. :-0.066132
## 1st Qu.:-0.017855 1st Qu.:-0.06723 1st Qu.:-0.013795
## Median :-0.005789 Median : 0.01946 Median : 0.001944
## Mean :-0.004832 Mean :-0.03862 Mean : 0.005095
## 3rd Qu.: 0.008181 3rd Qu.: 0.14029 3rd Qu.: 0.022617
## Max. : 0.132155 Max. : 0.65159 Max. : 0.094941
min(apt_data$date)
## [1] "2016-05-01"
max(apt_data$date)
## [1] "2024-12-01"
#Figure 1. Monthly Closing Price of Assets
# Prepare data for plotting
price_data <- non_na_data %>%
dplyr::select(date, jkse, bond10, btc_idr, gold_idr) %>%
mutate(
log_jkse = log(jkse),
log_bond10 = log(bond10),
log_btc_idr = log(btc_idr),
log_gold = log(gold_idr)
) %>%
dplyr::select(date, log_jkse, log_bond10, log_btc_idr, log_gold)
# Reshape for ggplot
price_long <- melt(price_data, id.vars = "date", variable.name = "Asset", value.name = "LogPrice")
asset_labels <- c(
log_jkse = "JKSE",
log_bond10 = "Bond 10Y",
log_btc_idr = "Bitcoin (IDR)",
log_gold = "Gold"
)
price_long$Asset <- factor(price_long$Asset, levels = names(asset_labels), labels = asset_labels)
# Plot
ggplot(price_long, aes(x = date, y = LogPrice, color = Asset)) +
geom_line(linewidth = 1) +
labs(title = "Monthly Log Closing Price of Assets",
x = "Date", y = "Log Price",
color = "Asset") +
theme_minimal() +
theme(legend.position = "bottom")

# 2. Hitung Surprise Faktor (kurs, cpi, birate)
compute_surprise_ses <- function(series, series_name = NULL) {
m <- forecast::ses(series, h = 1)
res_raw <- as.numeric(residuals(m))
fit_val <- as.numeric(fitted(m))
if (!is.null(series_name) && tolower(series_name) %in% c("cpi", "kurs_idr")) {
res_adj <- res_raw / as.numeric(series)
} else {
res_adj <- res_raw
}
list(
res = res_adj,
fit = fit_val
)
}
apt_data <- apt_data %>%
mutate(
surprise_kurs = compute_surprise_ses(kurs_idr)$res,
surprise_cpi = compute_surprise_ses(cpi)$res,
surprise_birate = compute_surprise_ses(bi_rate)$res,
birate_forecast = compute_surprise_ses(bi_rate)$fit,
cpi_forecast = compute_surprise_ses(cpi)$fit,
kurs_forecast= compute_surprise_ses(kurs_idr)$fit
) %>%
drop_na()
apt_data <- na.omit(apt_data)
# Rate plot
p1 <- ggplot(apt_data, aes(x = 1:nrow(apt_data))) +
geom_line(aes(y = bi_rate, color = "Actual"), linewidth = 1) +
geom_line(aes(y = birate_forecast, color = "Forecast"), size = 1) +
labs(title = "BI Rate: Actual vs Forecast", x = "Time", y = "Rate") +
scale_color_manual(values = c("blue", "red")) +
theme_minimal()
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
# CPI plot
p2 <- ggplot(apt_data, aes(x = 1:nrow(apt_data))) +
geom_line(aes(y = cpi, color = "Actual"), size = 1) +
geom_line(aes(y = cpi_forecast, color = "Forecast"), size = 1) +
labs(title = "CPI: Actual vs Forecast", x = "Time", y = "Rate") +
scale_color_manual(values = c("blue", "red")) +
theme_minimal()
# Exchange Rate plot
p3 <- ggplot(apt_data, aes(x = 1:nrow(apt_data))) +
geom_line(aes(y = kurs_idr, color = "Actual"), size = 1) +
geom_line(aes(y = kurs_forecast, color = "Forecast"), size = 1) +
labs(title = "Exchange Rate: Actual vs Forecast", x = "Time", y = "IDR/USD") +
scale_color_manual(values = c("blue", "red")) +
theme_minimal()
# Show plots
library(gridExtra)
grid.arrange(p1, p2, p3, ncol = 1)

# desctiptive table exxess for macroeconomic variables
library(psych)
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
## The following object is masked from 'package:car':
##
## logit
macro_vars <- apt_data %>%
dplyr::select(surprise_birate, surprise_cpi, surprise_kurs
)
describe(macro_vars)
## vars n mean sd median trimmed mad min max
## surprise_birate 1 104 0.00 0.16 0.00 -0.01 0.00 -0.25 0.63
## surprise_cpi 2 104 -0.02 0.33 -0.03 -0.03 0.31 -0.99 1.26
## surprise_kurs 3 104 26.51 272.41 15.68 32.73 190.96 -855.85 1412.00
## range skew kurtosis se
## surprise_birate 0.88 1.10 2.86 0.02
## surprise_cpi 2.25 0.59 1.81 0.03
## surprise_kurs 2267.85 0.61 6.23 26.71
summary(macro_vars)
## surprise_birate surprise_cpi surprise_kurs
## Min. :-0.250025 Min. :-0.98998 Min. :-855.85
## 1st Qu.: 0.000000 1st Qu.:-0.24248 1st Qu.: -92.02
## Median : 0.000000 Median :-0.02996 Median : 15.68
## Mean : 0.004815 Mean :-0.01692 Mean : 26.51
## 3rd Qu.: 0.000000 3rd Qu.: 0.17250 3rd Qu.: 159.20
## Max. : 0.625038 Max. : 1.25997 Max. :1412.00
# Uji Stasioneritas
# Daftar variabel yang akan diuji stasioneritasnya
variables_to_test <- c(
"excess_jkse", "excess_bond", "excess_btc", "excess_gold",
"surprise_birate", "surprise_cpi", "surprise_kurs"
)
# Lakukan PP test untuk setiap variabel
stationarity_results <- lapply(variables_to_test, function(var_name) {
if (var_name %in% colnames(apt_data)) {
x <- apt_data[[var_name]]
x <- as.numeric(x)
x <- na.omit(x)
# Jalankan pp.test dengan tryCatch untuk hindari error
test_result <- tryCatch(
pp.test(x, alternative = "stationary"),
error = function(e) NULL
)
if (!is.null(test_result)) {
data.frame(
Variable = var_name,
PP_Statistic = test_result$statistic,
P_Value = test_result$p.value,
Stationary = ifelse(test_result$p.value < 0.05, "Yes", "No")
)
} else {
data.frame(
Variable = var_name,
PP_Statistic = NA,
P_Value = NA,
Stationary = "Error or non-numeric data"
)
}
} else {
data.frame(
Variable = var_name,
PP_Statistic = NA,
P_Value = NA,
Stationary = "Variable not found"
)
}
})
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
## Warning in pp.test(x, alternative = "stationary"): p-value smaller than printed
## p-value
# Gabungkan hasil
stationarity_summary <- do.call(rbind, stationarity_results)
print(stationarity_summary)
## Variable PP_Statistic P_Value Stationary
## Dickey-Fuller Z(alpha) excess_jkse -65.85094 0.01 Yes
## Dickey-Fuller Z(alpha)1 excess_bond -102.95927 0.01 Yes
## Dickey-Fuller Z(alpha)2 excess_btc -106.53823 0.01 Yes
## Dickey-Fuller Z(alpha)3 excess_gold -72.91623 0.01 Yes
## Dickey-Fuller Z(alpha)4 surprise_birate -48.99106 0.01 Yes
## Dickey-Fuller Z(alpha)5 surprise_cpi -111.55318 0.01 Yes
## Dickey-Fuller Z(alpha)6 surprise_kurs -61.99687 0.01 Yes
# Interpretasi:
# P-value < 0.05 menunjukkan bahwa kita menolak hipotesis nol (data tidak stasioner)
# dan menyimpulkan bahwa data stasioner.
# 3. Fungsi fit EGARCH(1,1)-X dengan aturan distribusi
fit_egarchx <- function(returns, exog_matrix = NULL, asset_name = NULL) {
dist_model <- ifelse(tolower(asset_name) == "gold_return", "norm", "sstd")
# Spesifikasi model EGARCH
spec <- ugarchspec(
variance.model = list(
model = "eGARCH",
garchOrder = c(1, 1),
external.regressors = exog_matrix
),
mean.model = list(
armaOrder = c(1, 0),
include.mean = TRUE,
external.regressors = exog_matrix
),
distribution.model = dist_model
)
# Estimasi model
tryCatch(
ugarchfit(
spec,
data = returns,
solver = "hybrid",
fit.control = list(scale = TRUE)
),
error = function(e) {
message("ugarchfit error on ", asset_name, ": ", conditionMessage(e))
return(NULL)
}
)
}
# 4. Fit tiap aset
assets <- c("gold_return", "btc_return", "jkse_return", "bond_return")
# Di sini: mxreg1 = surprise_birate, mxreg2 = surprise_kurs, mxreg3 = surprise_cpi
exog_mat <- as.matrix(
apt_data %>% dplyr::select(surprise_birate, surprise_kurs, surprise_cpi)
)
# Fit model untuk tiap aset
fits <- lapply(assets, function(a) {
fit_egarchx(apt_data[[a]], exog_mat, asset_name = a)
})
names(fits) <- assets
print(fits)
## $gold_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : norm
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu 0.006399 0.000000 27716.6 0
## ar1 0.331473 0.000141 2358.0 0
## mxreg1 0.039969 0.000016 2560.1 0
## mxreg2 0.000044 0.000000 4255.6 0
## mxreg3 -0.006859 0.000001 -7865.3 0
## omega -1.453529 0.000167 -8723.4 0
## alpha1 0.178093 0.000141 1266.7 0
## beta1 0.795904 0.000013 59212.5 0
## gamma1 -0.634272 0.000095 -6650.2 0
## vxreg1 -0.375513 0.000064 -5868.3 0
## vxreg2 0.000234 0.000000 22535.0 0
## vxreg3 0.394621 0.000237 1667.2 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.006399 0.000015 427.80 0
## ar1 0.331473 0.000491 675.53 0
## mxreg1 0.039969 0.000051 788.33 0
## mxreg2 0.000044 0.000000 436.36 0
## mxreg3 -0.006859 0.000009 -791.87 0
## omega -1.453529 0.002182 -666.28 0
## alpha1 0.178093 0.000158 1124.64 0
## beta1 0.795904 0.000270 2951.85 0
## gamma1 -0.634272 0.001158 -547.66 0
## vxreg1 -0.375513 0.000351 -1068.90 0
## vxreg2 0.000234 0.000000 816.43 0
## vxreg3 0.394621 0.000589 669.86 0
##
## LogLikelihood : 232.0345
##
## Information Criteria
## ------------------------------------
##
## Akaike -4.2314
## Bayes -3.9263
## Shibata -4.2546
## Hannan-Quinn -4.1078
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.0491 0.8246
## Lag[2*(p+q)+(p+q)-1][2] 1.1100 0.6725
## Lag[4*(p+q)+(p+q)-1][5] 2.2759 0.6311
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.006987 0.9334
## Lag[2*(p+q)+(p+q)-1][5] 1.213559 0.8097
## Lag[4*(p+q)+(p+q)-1][9] 2.706393 0.8063
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.6988 0.500 2.000 0.4032
## ARCH Lag[5] 1.8983 1.440 1.667 0.4942
## ARCH Lag[7] 2.9880 2.315 1.543 0.5158
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: 4.8219
## Individual Statistics:
## mu 0.02004
## ar1 0.02119
## mxreg1 0.02111
## mxreg2 0.02031
## mxreg3 0.01990
## omega 0.01946
## alpha1 0.02151
## beta1 0.26484
## gamma1 0.02178
## vxreg1 0.01983
## vxreg2 0.02036
## vxreg3 0.01849
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 2.69 2.96 3.51
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 0.24097 0.8101
## Negative Sign Bias 0.21700 0.8287
## Positive Sign Bias 0.20758 0.8360
## Joint Effect 0.09025 0.9930
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 15.62 0.6828
## 2 30 35.04 0.2033
## 3 40 41.38 0.3670
## 4 50 53.69 0.2993
##
##
## Elapsed time : 0.723757
##
##
## $btc_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : sstd
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu -0.055685 0.000021 -2687.307 0
## ar1 0.387728 0.000207 1872.133 0
## mxreg1 -0.049824 0.002489 -20.022 0
## mxreg2 -0.000053 0.000000 -140.217 0
## mxreg3 -0.048510 0.002947 -16.459 0
## omega -0.124739 0.000053 -2367.664 0
## alpha1 -1.376329 0.001017 -1353.303 0
## beta1 0.903576 0.000430 2102.230 0
## gamma1 -1.046768 0.000162 -6453.323 0
## vxreg1 -0.299846 0.001880 -159.516 0
## vxreg2 -0.000408 0.000000 -5328.537 0
## vxreg3 -0.388068 0.000193 -2012.231 0
## skew 0.664115 0.000236 2816.547 0
## shape 2.159145 0.002131 1013.358 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu -0.055685 0.000155 -358.87677 0.00000
## ar1 0.387728 0.000021 18233.86133 0.00000
## mxreg1 -0.049824 0.065295 -0.76306 0.44543
## mxreg2 -0.000053 0.000001 -41.01746 0.00000
## mxreg3 -0.048510 0.069130 -0.70171 0.48286
## omega -0.124739 0.000177 -706.52252 0.00000
## alpha1 -1.376329 0.030077 -45.75985 0.00000
## beta1 0.903576 0.003126 289.07837 0.00000
## gamma1 -1.046768 0.002406 -435.11283 0.00000
## vxreg1 -0.299846 0.007840 -38.24497 0.00000
## vxreg2 -0.000408 0.000003 -157.07293 0.00000
## vxreg3 -0.388068 0.002742 -141.55075 0.00000
## skew 0.664115 0.002082 318.99749 0.00000
## shape 2.159145 0.064727 33.35748 0.00000
##
## LogLikelihood : 25.28327
##
## Information Criteria
## ------------------------------------
##
## Akaike -0.21699
## Bayes 0.13899
## Shibata -0.24781
## Hannan-Quinn -0.07277
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 1.355 0.2445
## Lag[2*(p+q)+(p+q)-1][2] 2.749 0.0573
## Lag[4*(p+q)+(p+q)-1][5] 3.801 0.2589
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.0009317 0.9756
## Lag[2*(p+q)+(p+q)-1][5] 0.3377119 0.9796
## Lag[4*(p+q)+(p+q)-1][9] 0.4248927 0.9991
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.02562 0.500 2.000 0.8728
## ARCH Lag[5] 0.05894 1.440 1.667 0.9938
## ARCH Lag[7] 0.08832 2.315 1.543 0.9995
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: 3.0314
## Individual Statistics:
## mu 0.01597
## ar1 0.01634
## mxreg1 0.01554
## mxreg2 0.01624
## mxreg3 0.01709
## omega 0.01595
## alpha1 0.01601
## beta1 0.01603
## gamma1 0.01602
## vxreg1 0.01616
## vxreg2 0.01602
## vxreg3 0.01588
## skew 0.01591
## shape 0.01595
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 3.08 3.34 3.9
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 0.5775 0.5649
## Negative Sign Bias 0.1306 0.8963
## Positive Sign Bias 0.1892 0.8503
## Joint Effect 0.4683 0.9258
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 14.46 0.7562
## 2 30 33.31 0.2655
## 3 40 34.46 0.6769
## 4 50 56.58 0.2131
##
##
## Elapsed time : 2.673613
##
##
## $jkse_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : sstd
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu 0.000410 0.000000 5804.068 0
## ar1 0.245304 0.000082 2988.312 0
## mxreg1 0.007651 0.000045 170.836 0
## mxreg2 -0.000071 0.000000 -1474.595 0
## mxreg3 -0.015171 0.000008 -1927.433 0
## omega -0.263280 0.000108 -2446.027 0
## alpha1 -0.277770 0.000068 -4088.559 0
## beta1 0.962923 0.000381 2527.223 0
## gamma1 -0.286556 0.000101 -2833.834 0
## vxreg1 -0.450730 0.000142 -3171.449 0
## vxreg2 0.000380 0.000003 135.308 0
## vxreg3 0.222168 0.001988 111.732 0
## skew 0.553738 0.006930 79.903 0
## shape 50.730228 0.453655 111.826 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.000410 0.000002 258.7109 0
## ar1 0.245304 0.004700 52.1924 0
## mxreg1 0.007651 0.000311 24.5957 0
## mxreg2 -0.000071 0.000008 -8.7675 0
## mxreg3 -0.015171 0.000026 -577.1976 0
## omega -0.263280 0.014879 -17.6942 0
## alpha1 -0.277770 0.028138 -9.8717 0
## beta1 0.962923 0.056291 17.1061 0
## gamma1 -0.286556 0.007482 -38.3007 0
## vxreg1 -0.450730 0.051458 -8.7592 0
## vxreg2 0.000380 0.000028 13.5475 0
## vxreg3 0.222168 0.013402 16.5774 0
## skew 0.553738 0.070485 7.8561 0
## shape 50.730228 7.266975 6.9809 0
##
## LogLikelihood : 249.2664
##
## Information Criteria
## ------------------------------------
##
## Akaike -4.5244
## Bayes -4.1684
## Shibata -4.5552
## Hannan-Quinn -4.3801
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.3711 0.5424
## Lag[2*(p+q)+(p+q)-1][2] 0.5874 0.9398
## Lag[4*(p+q)+(p+q)-1][5] 1.3199 0.8881
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 2.069 0.1503
## Lag[2*(p+q)+(p+q)-1][5] 2.703 0.4641
## Lag[4*(p+q)+(p+q)-1][9] 3.636 0.6510
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.5574 0.500 2.000 0.4553
## ARCH Lag[5] 1.1995 1.440 1.667 0.6748
## ARCH Lag[7] 1.7317 2.315 1.543 0.7739
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: NaN
## Individual Statistics:
## mu 0.03691
## ar1 0.03696
## mxreg1 0.03711
## mxreg2 0.03743
## mxreg3 0.03742
## omega 0.03690
## alpha1 0.03707
## beta1 NaN
## gamma1 0.03635
## vxreg1 0.03697
## vxreg2 0.03765
## vxreg3 0.03608
## skew 0.03200
## shape 0.03635
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 3.08 3.34 3.9
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 1.8030 0.07442 *
## Negative Sign Bias 0.0121 0.99037
## Positive Sign Bias 1.7395 0.08505 *
## Joint Effect 4.4766 0.21439
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 19.46 0.4276
## 2 30 35.62 0.1850
## 3 40 37.54 0.5366
## 4 50 57.54 0.1885
##
##
## Elapsed time : 1.839892
##
##
## $bond_return
##
## *---------------------------------*
## * GARCH Model Fit *
## *---------------------------------*
##
## Conditional Variance Dynamics
## -----------------------------------
## GARCH Model : eGARCH(1,1)
## Mean Model : ARFIMA(1,0,0)
## Distribution : sstd
##
## Optimal Parameters
## ------------------------------------
## Estimate Std. Error t value Pr(>|t|)
## mu 0.001902 0.000001 3573.024 0
## ar1 -0.020583 0.000005 -4467.015 0
## mxreg1 -0.000399 0.000014 -28.983 0
## mxreg2 -0.000059 0.000000 -3861.484 0
## mxreg3 -0.005680 0.000003 -1984.392 0
## omega -1.854290 0.000271 -6837.150 0
## alpha1 -0.755627 0.000011 -66264.271 0
## beta1 0.748407 0.000101 7396.339 0
## gamma1 -1.049795 0.000354 -2962.330 0
## vxreg1 0.812749 0.000391 2080.245 0
## vxreg2 -0.002736 0.000001 -4116.540 0
## vxreg3 -0.221540 0.002107 -105.133 0
## skew 1.025567 0.002573 398.595 0
## shape 3.039306 0.000554 5481.277 0
##
## Robust Standard Errors:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.001902 0.000003 709.595 0
## ar1 -0.020583 0.000001 -14677.695 0
## mxreg1 -0.000399 0.000004 -88.772 0
## mxreg2 -0.000059 0.000000 -596.198 0
## mxreg3 -0.005680 0.000009 -625.531 0
## omega -1.854290 0.001305 -1421.307 0
## alpha1 -0.755627 0.000168 -4497.847 0
## beta1 0.748407 0.002048 365.452 0
## gamma1 -1.049795 0.001522 -689.578 0
## vxreg1 0.812749 0.001321 615.309 0
## vxreg2 -0.002736 0.000003 -938.133 0
## vxreg3 -0.221540 0.001971 -112.419 0
## skew 1.025567 0.006601 155.372 0
## shape 3.039306 0.001132 2686.048 0
##
## LogLikelihood : 263.3789
##
## Information Criteria
## ------------------------------------
##
## Akaike -4.7957
## Bayes -4.4398
## Shibata -4.8266
## Hannan-Quinn -4.6515
##
## Weighted Ljung-Box Test on Standardized Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 2.602 0.10670
## Lag[2*(p+q)+(p+q)-1][2] 2.927 0.04058
## Lag[4*(p+q)+(p+q)-1][5] 3.299 0.35987
## d.o.f=1
## H0 : No serial correlation
##
## Weighted Ljung-Box Test on Standardized Squared Residuals
## ------------------------------------
## statistic p-value
## Lag[1] 0.5281 0.4674
## Lag[2*(p+q)+(p+q)-1][5] 2.1658 0.5798
## Lag[4*(p+q)+(p+q)-1][9] 3.1202 0.7390
## d.o.f=2
##
## Weighted ARCH LM Tests
## ------------------------------------
## Statistic Shape Scale P-Value
## ARCH Lag[3] 0.06794 0.500 2.000 0.7944
## ARCH Lag[5] 0.81490 1.440 1.667 0.7886
## ARCH Lag[7] 1.19566 2.315 1.543 0.8798
##
## Nyblom stability test
## ------------------------------------
## Joint Statistic: 4.4131
## Individual Statistics:
## mu 0.09466
## ar1 0.09307
## mxreg1 0.06152
## mxreg2 0.09954
## mxreg3 0.09548
## omega 0.23234
## alpha1 0.09315
## beta1 0.14217
## gamma1 0.07518
## vxreg1 0.08963
## vxreg2 0.09587
## vxreg3 0.05216
## skew 0.13174
## shape 0.09455
##
## Asymptotic Critical Values (10% 5% 1%)
## Joint Statistic: 3.08 3.34 3.9
## Individual Statistic: 0.35 0.47 0.75
##
## Sign Bias Test
## ------------------------------------
## t-value prob sig
## Sign Bias 0.1381 0.8904
## Negative Sign Bias 1.0098 0.3151
## Positive Sign Bias 0.2797 0.7803
## Joint Effect 1.4832 0.6862
##
##
## Adjusted Pearson Goodness-of-Fit Test:
## ------------------------------------
## group statistic p-value(g-1)
## 1 20 20.62 0.3584
## 2 30 36.19 0.1680
## 3 40 36.77 0.5721
## 4 50 50.81 0.4022
##
##
## Elapsed time : 0.7816072
extract_egarchx_stats <- function(model, asset_name) {
if (is.null(model)) {
return(data.frame(
Asset = asset_name, Convergence = "Failed",
Mean_Intercept = NA, Mean_Intercept_SE = NA, Mean_Intercept_p = NA,
Beta_BIRate = NA, Beta_BIRate_SE = NA, Beta_BIRate_p = NA,
Beta_Exchange = NA, Beta_Exchange_SE = NA, Beta_Exchange_p = NA,
Beta_Inflation = NA, Beta_Inflation_SE = NA, Beta_Inflation_p = NA,
AR1 = NA, AR1_SE = NA, AR1_p = NA,
GARCH_Alpha1 = NA, GARCH_Alpha1_SE = NA, GARCH_Alpha1_p = NA,
GARCH_Beta1 = NA, GARCH_Beta1_SE = NA, GARCH_Beta1_p = NA,
Persistence = NA, Log_Likelihood = NA,
stringsAsFactors = FALSE
))
}
# convergence slot
conv_code <- tryCatch(model@fit$convergence, error = function(e) NA)
conv <- ifelse(!is.na(conv_code) && conv_code == 0, "Success", "Failed")
coefs <- tryCatch(coef(model), error = function(e) NULL)
ll <- tryCatch(model@fit$LLH, error = function(e) NA)
matc <- tryCatch(model@fit$matcoef, error = function(e) NULL)
if (conv == "Failed" || is.null(coefs) || is.null(matc)) {
return(data.frame(
Asset = asset_name, Convergence = conv,
Mean_Intercept = NA, Mean_Intercept_SE = NA, Mean_Intercept_p = NA,
Beta_BIRate = NA, Beta_BIRate_SE = NA, Beta_BIRate_p = NA,
Beta_Exchange = NA, Beta_Exchange_SE = NA, Beta_Exchange_p = NA,
Beta_Inflation = NA, Beta_Inflation_SE = NA, Beta_Inflation_p = NA,
AR1 = NA, AR1_SE = NA, AR1_p = NA,
GARCH_Alpha1 = NA, GARCH_Alpha1_SE = NA, GARCH_Alpha1_p = NA,
GARCH_Beta1 = NA, GARCH_Beta1_SE = NA, GARCH_Beta1_p = NA,
Persistence = NA, Log_Likelihood = ll,
stringsAsFactors = FALSE
))
}
# helper untuk ambil nilai aman
safe_extract <- function(name, col) {
if (!is.null(matc) && name %in% rownames(matc)) {
return(matc[name, col])
} else {
return(NA_real_)
}
}
data.frame(
Asset = asset_name,
Convergence = conv,
# Mean equation
Mean_Intercept = safe_extract("mu", 1),
Mean_Intercept_SE = safe_extract("mu", 2),
Mean_Intercept_p = safe_extract("mu", 4),
Beta_BIRate = safe_extract("mxreg1", 1),
Beta_BIRate_SE = safe_extract("mxreg1", 2),
Beta_BIRate_p = safe_extract("mxreg1", 4),
Beta_Exchange = safe_extract("mxreg2", 1),
Beta_Exchange_SE = safe_extract("mxreg2", 2),
Beta_Exchange_p = safe_extract("mxreg2", 4),
Beta_Inflation = safe_extract("mxreg3", 1),
Beta_Inflation_SE = safe_extract("mxreg3", 2),
Beta_Inflation_p = safe_extract("mxreg3", 4),
# AR term
AR1 = safe_extract("ar1", 1),
AR1_SE = safe_extract("ar1", 2),
AR1_p = safe_extract("ar1", 4),
# Variance model parameters
GARCH_Alpha1 = safe_extract("alpha1", 1),
GARCH_Alpha1_SE = safe_extract("alpha1", 2),
GARCH_Alpha1_p = safe_extract("alpha1", 4),
GARCH_Beta1 = safe_extract("beta1", 1),
GARCH_Beta1_SE = safe_extract("beta1", 2),
GARCH_Beta1_p = safe_extract("beta1", 4),
# Derived stats
Persistence = sum(c(safe_extract("alpha1", 1), safe_extract("beta1", 1)), na.rm = TRUE),
Log_Likelihood = ll,
stringsAsFactors = FALSE
)
}
egarchx_stats <- do.call(rbind, lapply(names(fits), function(asset) {
extract_egarchx_stats(fits[[asset]], asset)
}))
print(egarchx_stats)
## Asset Convergence Mean_Intercept Mean_Intercept_SE Mean_Intercept_p
## 1 gold_return Success 0.0063988406 2.308665e-07 0
## 2 btc_return Success -0.0556845360 2.072131e-05 0
## 3 jkse_return Success 0.0004103234 7.069582e-08 0
## 4 bond_return Success 0.0019020401 5.323334e-07 0
## Beta_BIRate Beta_BIRate_SE Beta_BIRate_p Beta_Exchange Beta_Exchange_SE
## 1 0.0399691524 1.561209e-05 0 4.380247e-05 1.029287e-08
## 2 -0.0498240619 2.488526e-03 0 -5.338921e-05 3.807621e-07
## 3 0.0076509014 4.478519e-05 0 -7.099364e-05 4.814451e-08
## 4 -0.0003987212 1.375705e-05 0 -5.948425e-05 1.540451e-08
## Beta_Exchange_p Beta_Inflation Beta_Inflation_SE Beta_Inflation_p AR1
## 1 0 -0.006859032 8.720677e-07 0 0.33147318
## 2 0 -0.048509767 2.947300e-03 0 0.38772783
## 3 0 -0.015170548 7.870858e-06 0 0.24530363
## 4 0 -0.005680366 2.862523e-06 0 -0.02058317
## AR1_SE AR1_p GARCH_Alpha1 GARCH_Alpha1_SE GARCH_Alpha1_p GARCH_Beta1
## 1 1.405742e-04 0 0.1780932 1.405920e-04 0 0.7959045
## 2 2.071048e-04 0 -1.3763285 1.017014e-03 0 0.9035757
## 3 8.208769e-05 0 -0.2777696 6.793828e-05 0 0.9629232
## 4 4.607812e-06 0 -0.7556268 1.140323e-05 0 0.7484068
## GARCH_Beta1_SE GARCH_Beta1_p Persistence Log_Likelihood
## 1 1.344149e-05 0 0.973997722 232.03455
## 2 4.298176e-04 0 -0.472752820 25.28327
## 3 3.810202e-04 0 0.685153527 249.26635
## 4 1.011861e-04 0 -0.007219951 263.37887
# 5. Forecast mu dan sigma (1-step ahead)
last_exog <- matrix(tail(exog_mat, 1), nrow = 1) # 1 x n_mxreg
get_forecast <- function(fit, exog) {
if (is.null(fit)) return(list(mu = NA_real_, sigma = NA_real_))
f <- tryCatch(
ugarchforecast(fit, n.ahead = 1,
external.forecasts = list(mregfor = exog, vregfor = exog)),
error = function(e) {
message("ugarchforecast error: ", conditionMessage(e))
return(NULL)
}
)
if (is.null(f)) return(list(mu = NA_real_, sigma = NA_real_))
# forecast slots: f@forecast$seriesFor, f@forecast$sigmaFor
list(
mu = as.numeric(f@forecast$seriesFor[1, ]),
sigma = as.numeric(f@forecast$sigmaFor[1, ])
)
}
forecasts <- lapply(fits, get_forecast, exog = last_exog)
# Gunakan weights yang ada sebagai w_opt
all_weights <- read_excel("Weights.xlsx",sheet = "Sentimen")
w_koleris <- all_weights$Koleris
w_melankolis <- all_weights$Melankolis
w_sanguinis <- all_weights$Sanguinis
w_plegmatis <- all_weights$Plegmatis
# Equal-weight baseline
w_eq <- rep(1/4, length(w_koleris))
# 6. Evaluasi kinerja portofolio untuk setiap profil
return_matrix <- do.call(cbind, lapply(assets, function(a) apt_data[[a]]))
colnames(return_matrix) <- assets
# helper functions (kamu sudah punya di skrip; dipakai ulang)
calculate_portfolio_return <- function(w, ret_matrix) {
sum(w * colMeans(ret_matrix, na.rm = TRUE))
}
calculate_portfolio_var <- function(w, ret_matrix) {
covm <- cov(ret_matrix, use = "pairwise.complete.obs")
sqrt(as.numeric(t(w) %*% covm %*% w))
}
eval_portfolio <- function(w, mu, cov_matrix, alpha = 0.05) {
mu_p <- sum(w * mu)
sigma_p <- sqrt(as.numeric(t(w) %*% cov_matrix %*% w))
VaR_p <- mu_p + qnorm(alpha) * sigma_p
ES_p <- mu_p + (dnorm(qnorm(alpha)) / alpha) * sigma_p
return(c(mu_p, sigma_p, VaR_p, ES_p))
}
mu_hist <- colMeans(return_matrix, na.rm = TRUE)
cov_matrix <- cov(return_matrix, use = "pairwise.complete.obs")
# Evaluasi untuk tiap profil
perf_koleris <- eval_portfolio(w_koleris, mu_hist, cov_matrix)
perf_melankolis <- eval_portfolio(w_melankolis, mu_hist, cov_matrix)
perf_sanguinis <- eval_portfolio(w_sanguinis, mu_hist, cov_matrix)
perf_plegmatis <- eval_portfolio(w_plegmatis, mu_hist, cov_matrix)
perf_eq <- eval_portfolio(w_eq, mu_hist, cov_matrix)
comparison <- data.frame(
Portfolio = c("Koleris", "Melankolis", "Sanguinis", "Plegmatis", "Equal-Weighted"),
Mean_Return = c(perf_koleris[1], perf_melankolis[1], perf_sanguinis[1], perf_plegmatis[1], perf_eq[1]),
Volatility = c(perf_koleris[2], perf_melankolis[2], perf_sanguinis[2], perf_plegmatis[2], perf_eq[2]),
VaR_5pct = c(perf_koleris[3], perf_melankolis[3], perf_sanguinis[3], perf_plegmatis[3], perf_eq[3]),
ES_5pct = c(perf_koleris[4], perf_melankolis[4], perf_sanguinis[4], perf_plegmatis[4], perf_eq[4])
)
print(comparison)
## Portfolio Mean_Return Volatility VaR_5pct ES_5pct
## 1 Koleris -0.012491134 0.30559950 -0.5151576 0.6178729
## 2 Melankolis -0.002037066 0.14005279 -0.2324034 0.2868516
## 3 Sanguinis -0.002964917 0.13144320 -0.2191697 0.2681647
## 4 Plegmatis 0.002709828 0.08350983 -0.1346516 0.1749666
## 5 Equal-Weighted -0.005582913 0.17948072 -0.3008024 0.3646343
# Tabel bobot per aset (untuk visualisasi)
weights_df <- data.frame(
Asset = assets, # "gold_return","btc_return","jkse_return","bond_return"
Koleris = w_koleris,
Melankolis = w_melankolis,
Sanguinis = w_sanguinis,
Plegmatis = w_plegmatis,
Equal = w_eq,
stringsAsFactors = FALSE
)
# Ubah nama aset agar lebih readable (opsional)
weights_df$Asset_label <- c("Gold","Bitcoin","JKSE","Bond10Y")
# Melt untuk plotting
library(reshape2)
melted_weights <- melt(weights_df[, c("Asset_label","Koleris","Melankolis","Sanguinis","Plegmatis","Equal")],
id.vars = "Asset_label", variable.name = "Profile", value.name = "Weight")
# Bar chart bobot
ggplot(melted_weights, aes(x = Asset_label, y = Weight, fill = Profile)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = scales::percent(Weight, accuracy = 0.01)),
position = position_dodge(width = 0.9), vjust = -0.3, size = 3) +
labs(title = "Perbandingan Bobot per Profil Temperamen",
x = "Aset", y = "Bobot", fill = "Profil") +
theme_minimal()

# 7. Backtesting portofolio & perbandingan cumulative returns
# Hitung return portofolio per profil
apt_data$portfolio_koleris <- as.numeric(return_matrix %*% w_koleris)
apt_data$portfolio_melankolis <- as.numeric(return_matrix %*% w_melankolis)
apt_data$portfolio_sanguinis <- as.numeric(return_matrix %*% w_sanguinis)
apt_data$portfolio_plegmatis <- as.numeric(return_matrix %*% w_plegmatis)
apt_data$portfolio_equal <- as.numeric(return_matrix %*% w_eq)
# Cumulative returns
egarchx_cum_kol <- cumprod(1 + apt_data$portfolio_koleris)
egarchx_cum_mel <- cumprod(1 + apt_data$portfolio_melankolis)
egarchx_cum_san <- cumprod(1 + apt_data$portfolio_sanguinis)
egarchx_cum_ple <- cumprod(1 + apt_data$portfolio_plegmatis)
eq_cum <- cumprod(1 + apt_data$portfolio_equal)
jkse_cumulative <- cumprod(1 + apt_data$jkse_return)
bond_cumulative <- cumprod(1 + apt_data$bond_return)
btc_cumulative <- cumprod(1 + apt_data$btc_return)
gold_cumulative <- cumprod(1 + apt_data$gold_return)
dates <- as.Date(apt_data$date)
plot(dates, egarchx_cum_kol, type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Cumulative Return (Start = 1)",
main = "Cumulative Performance Comparison (Profil Temperamen)",
ylim = c(
min(0.5, min(egarchx_cum_kol, egarchx_cum_mel, egarchx_cum_san, egarchx_cum_ple, eq_cum, na.rm = TRUE)*0.9),
max(egarchx_cum_kol, egarchx_cum_mel, egarchx_cum_san, egarchx_cum_ple, eq_cum, btc_cumulative, gold_cumulative, na.rm = TRUE)*1.1
))
lines(dates, egarchx_cum_mel, col = "blue", lwd = 2, lty = 2)
lines(dates, egarchx_cum_san, col = "darkgreen", lwd = 2, lty = 3)
lines(dates, egarchx_cum_ple, col = "purple", lwd = 2, lty = 4)
lines(dates, eq_cum, col = "black", lwd = 2, lty = 5)
# tambahkan individual assets lines (opsional)
lines(dates, jkse_cumulative, col = "darkgray", lwd = 1)
lines(dates, bond_cumulative, col = "brown", lwd = 1)
lines(dates, btc_cumulative, col = "orange", lwd = 1)
lines(dates, gold_cumulative, col = "goldenrod", lwd = 1)
legend("topleft",
legend = c("Koleris","Melankolis","Sanguinis","Plegmatis","Equal-Weighted","JKSE","Bond","BTC","Gold"),
col = c("red","blue","darkgreen","purple","black","darkgray","brown","orange","goldenrod"),
lwd = c(2,2,2,2,2,1,1,1,1),
lty = c(1,2,3,4,5,1,1,1,1),
cex = 0.7)

# Rolling Metrics (24-month) untuk profil + equal + asset sample
window <- 24
assets_returns <- cbind(
apt_data$portfolio_koleris,
apt_data$portfolio_melankolis,
apt_data$portfolio_sanguinis,
apt_data$portfolio_plegmatis,
apt_data$portfolio_equal,
apt_data$jkse_return,
apt_data$bond_return,
apt_data$btc_return,
apt_data$gold_return
)
colnames(assets_returns) <- c("Koleris","Melankolis","Sanguinis","Plegmatis","Equal","JKSE","Bond","BTC","Gold")
nrows_rr <- nrow(assets_returns) - window
rolling_returns <- rolling_volatility <- rolling_sharpe <- rolling_var <-
matrix(NA, nrow = nrows_rr, ncol = ncol(assets_returns))
for (i in 1:nrows_rr) {
window_data <- assets_returns[i:(i + window - 1), , drop = FALSE]
rolling_returns[i, ] <- colMeans(window_data, na.rm = TRUE)
rolling_volatility[i, ] <- apply(window_data, 2, sd, na.rm = TRUE)
rolling_sharpe[i, ] <- rolling_returns[i, ] / rolling_volatility[i, ]
rolling_var[i, ] <- apply(window_data, 2, function(x) -quantile(x, 0.05, na.rm = TRUE))
}
colnames(rolling_sharpe) <- colnames(assets_returns)
colnames(rolling_var) <- colnames(assets_returns)
colnames(rolling_volatility) <- colnames(assets_returns)
# Plot Rolling Sharpe untuk dua contoh (Koleris vs Equal)
par(mfrow = c(2, 1))
plot(dates[(window + 1):length(dates)], rolling_sharpe[, "Koleris"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Sharpe Ratio",
main = "Rolling 24-Month Sharpe Ratio: Koleris vs Equal",
ylim = range(rolling_sharpe[, c("Koleris","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_sharpe[, "Equal"], col = "black", lwd = 2, lty = 2)
abline(h = 0, lty = 2, col = "gray")
legend("bottomright", legend = c("Koleris","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)
plot(dates[(window + 1):length(dates)], rolling_var[, "Koleris"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Value-at-Risk (95%)",
main = "Rolling 24-Month VaR (95%): Koleris vs Equal",
ylim = range(rolling_var[, c("Koleris","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_var[, "Equal"], col = "black", lwd = 2, lty = 2)
legend("topright", legend = c("Koleris","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)

par(mfrow = c(2, 1))
plot(dates[(window + 1):length(dates)], rolling_sharpe[, "Melankolis"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Sharpe Ratio",
main = "Rolling 24-Month Sharpe Ratio: Melankolis vs Equal",
ylim = range(rolling_sharpe[, c("Melankolis","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_sharpe[, "Equal"], col = "black", lwd = 2, lty = 2)
abline(h = 0, lty = 2, col = "gray")
legend("bottomright", legend = c("Melankolis","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)
plot(dates[(window + 1):length(dates)], rolling_var[, "Melankolis"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Value-at-Risk (95%)",
main = "Rolling 24-Month VaR (95%): Melankolis vs Equal",
ylim = range(rolling_var[, c("Melankolis","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_var[, "Equal"], col = "black", lwd = 2, lty = 2)
legend("topright", legend = c("Melankolis","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)

par(mfrow = c(2, 1))
plot(dates[(window + 1):length(dates)], rolling_sharpe[, "Sanguinis"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Sharpe Ratio",
main = "Rolling 24-Month Sharpe Ratio: Sanguinis vs Equal",
ylim = range(rolling_sharpe[, c("Sanguinis","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_sharpe[, "Equal"], col = "black", lwd = 2, lty = 2)
abline(h = 0, lty = 2, col = "gray")
legend("bottomright", legend = c("Sanguinis","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)
plot(dates[(window + 1):length(dates)], rolling_var[, "Sanguinis"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Value-at-Risk (95%)",
main = "Rolling 24-Month VaR (95%): Sanguinis vs Equal",
ylim = range(rolling_var[, c("Sanguinis","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_var[, "Equal"], col = "black", lwd = 2, lty = 2)
legend("topright", legend = c("Sanguinis","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)

par(mfrow = c(2, 1))
plot(dates[(window + 1):length(dates)], rolling_sharpe[, "Plegmatis"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Sharpe Ratio",
main = "Rolling 24-Month Sharpe Ratio: Plegmatis vs Equal",
ylim = range(rolling_sharpe[, c("Plegmatis","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_sharpe[, "Equal"], col = "black", lwd = 2, lty = 2)
abline(h = 0, lty = 2, col = "gray")
legend("bottomright", legend = c("Plegmatis","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)
plot(dates[(window + 1):length(dates)], rolling_var[, "Plegmatis"],
type = "l", col = "red", lwd = 2,
xlab = "Date", ylab = "Value-at-Risk (95%)",
main = "Rolling 24-Month VaR (95%): Plegmatis vs Equal",
ylim = range(rolling_var[, c("Plegmatis","Equal")], na.rm = TRUE) * c(0.9, 1.1))
lines(dates[(window + 1):length(dates)], rolling_var[, "Equal"], col = "black", lwd = 2, lty = 2)
legend("topright", legend = c("Plegmatis","Equal"), col = c("red","black"), lwd = 2, lty = c(1,2), cex = 0.8)

par(mfrow = c(1,1))
# Summary statistics (per profil + equal + beberapa aset) - reuse blok return_stats yang sudah ada
return_stats <- data.frame(
Mean_Return = colMeans(assets_returns, na.rm = TRUE) * 100,
Volatility = apply(assets_returns, 2, sd, na.rm = TRUE) * 100,
Sharpe = colMeans(assets_returns, na.rm = TRUE) / apply(assets_returns, 2, sd, na.rm = TRUE),
VaR_95 = apply(assets_returns, 2, function(x) -quantile(x, 0.05, na.rm = TRUE)) * 100,
Max_Drawdown = apply(assets_returns, 2, function(x) {
cumu <- cumprod(1 + x)
peak <- cumu[1]
maxDD <- 0
for (i in 2:length(cumu)) {
if (cumu[i] > peak) peak <- cumu[i]
DD <- (peak - cumu[i]) / peak
if (DD > maxDD) maxDD <- DD
}
return(maxDD * 100)
})
)
return_stats$Portfolio <- rownames(return_stats)
return_stats <- return_stats[order(-return_stats$Sharpe), ]
print(return_stats)
## Mean_Return Volatility Sharpe VaR_95 Max_Drawdown Portfolio
## Gold 0.91661033 3.194470 0.28693662 3.901653 14.46839 Gold
## JKSE 0.38148754 3.514663 0.10854170 4.619607 32.46521 JKSE
## Plegmatis 0.27098278 8.350983 0.03244921 3.768094 81.99133 Plegmatis
## Melankolis -0.20370665 14.005279 -0.01454499 5.861162 176.95635 Melankolis
## Sanguinis -0.29649167 13.144320 -0.02255664 5.209084 147.99288 Sanguinis
## Bond -0.07608112 2.835834 -0.02682848 4.497432 24.83847 Bond
## Equal -0.55829126 17.948072 -0.03110592 7.463672 251.23308 Equal
## Koleris -1.24911339 30.559950 -0.04087420 12.379192 475.11456 Koleris
## BTC -3.45518180 71.955272 -0.04801847 25.803555 145.02278 BTC
# Optional: simpan hasil ringkasan & bobot
writexl::write_xlsx(return_stats, "Portfolio_Performance_By_Temperament.xlsx")
writexl::write_xlsx(weights_df, "Portfolio_Weights_By_Temperament.xlsx")
# 8. Visualisasi pie per profil
plot_pie_profile <- function(profile_name, weights_df) {
# profile_name: "Koleris" / "Melankolis" / "Sanguinis" / "Plegmatis" / "Equal"
df <- data.frame(
Asset = weights_df$Asset_label,
Weight = as.numeric(weights_df[[profile_name]])
)
ggplot(df, aes(x = "", y = Weight, fill = Asset)) +
geom_col(width = 1, color = "white") +
coord_polar(theta = "y") +
geom_text(aes(label = paste0(round(Weight * 100, 2), "%")),
position = position_stack(vjust = 0.5), color = "white", size = 4) +
theme_void() +
labs(title = paste("Distribusi Bobot -", profile_name))
}
# Contoh: tampilkan pie untuk setiap profil dalam grid
plots_profiles <- lapply(c("Koleris","Melankolis","Sanguinis","Plegmatis","Equal"),
plot_pie_profile, weights_df = weights_df)
do.call(grid.arrange, c(plots_profiles, ncol = 3))
