Import library

library(readxl)
library(openxlsx)
library(ggplot2)
library(TTR)
library(forecast)
library(tseries)
library(TSA)
library(dynlm)
library(lmtest)
library(imputeTS)
library(stats)
library(MASS)
library(kableExtra)
library(padr)
library(astsa)
library(tfarima)
library(FinTS)
library(rugarch)

Import Data

data <- read.csv("C:/Users/Syammira Dhifa/Downloads/BBRI.JK.csv")
bbri <- data[,-c(2,3,4,6,7)]
bbri$Date <- as.POSIXct(bbri$Date)
bbri$Close <- as.numeric(bbri$Close)
## Warning: NAs introduced by coercion
bbri.ts <- ts(bbri)

Eksplorasi Data

head(bbri)
##         Date Close
## 1 2014-09-01  2210
## 2 2014-09-02  2220
## 3 2014-09-03  2225
## 4 2014-09-04  2210
## 5 2014-09-05  2190
## 6 2014-09-08  2190
summary(bbri) 
##       Date                         Close     
##  Min.   :2014-09-01 00:00:00   Min.   :1660  
##  1st Qu.:2016-12-14 18:00:00   1st Qu.:2440  
##  Median :2019-02-23 12:00:00   Median :3520  
##  Mean   :2019-03-06 17:11:34   Mean   :3473  
##  3rd Qu.:2021-06-08 06:00:00   3rd Qu.:4290  
##  Max.   :2023-09-29 00:00:00   Max.   :5700  
##                                NA's   :1

Plot harian

ggplot(bbri, aes(x=Date, y=Close)) +
  geom_line() +
  labs(title = "Plot Data Harian Harga Tutupan Saham BBRI",
       subtitle = "September 2013 - Agustus 2022") +
  theme(plot.title =  element_text(face = "bold", hjust=.5),
        plot.subtitle = element_text(hjust=.5),
        legend.position = "bottom",
        panel.background = element_rect(fill=NA)) 

p=(1997/2260)
p 
## [1] 0.8836283
f=as.integer(p*2489)
jumlah.train <- as.integer(p*nrow(bbri))

ggplot(bbri, aes(x=Date, y=Close)) +
  geom_line() + 
  labs(title = "Plot Data Harian Harga Tutupan Saham BBRI",
       subtitle = "1 September 2014 - 30 September 2023") +
  geom_vline(aes(xintercept = Date[jumlah.train], 
                 col="Pemisah_data_train"), lty=2, lwd=.7) +
  theme(plot.title =  element_text(face = "bold", hjust=.5),
        plot.subtitle = element_text(hjust=.5),
        legend.position = "bottom",
        panel.background = element_rect(fill=NA)) +
  scale_color_manual(name = "", values = c(Pemisah_data_train="blue")) 

train=1:f
training.ts <- ts(bbri$Close[train])
testing.ts <- ts(bbri$Close[-train], start = f+1)
library(imputeTS)
training.ts <- na_mean(training.ts) 

Plot data training

ggplot(bbri[1:1997,], aes(x=Date, y=Close)) +
  geom_line() + 
  labs(title = "Plot Time Series Data Training Harga Tutupan Saham bbri",
       subtitle = "(September 2022 - September 2023)") +
  theme(plot.title =  element_text(face = "bold", hjust=.5),
        plot.subtitle = element_text(hjust=.5),
        legend.position = "bottom",
        panel.background = element_rect(fill=NA)) 

Plot data testing

ggplot(bbri[1998:2260,], aes(x=Date, y=Close)) +
  geom_line() + 
  labs(title = "Plot Time Series Data Testing Harga Tutupan Saham bbri",
       subtitle = "(Januari 2022 - September 2023)") +
  theme(plot.title =  element_text(face = "bold", hjust=.5),
        plot.subtitle = element_text(hjust=.5),
        legend.position = "bottom",
        panel.background = element_rect(fill=NA)) 

Cek Kestasioneran Data

par(mfrow=c(1,2))
acf(training.ts, main = "ACF Plot for MA(q)")
pacf(training.ts, main = "PACF Plot for AR(p)") 

Uji Formal

H0 : data tidak stasioner H1 : data stasioner

adf.test(training.ts) 
## 
##  Augmented Dickey-Fuller Test
## 
## data:  training.ts
## Dickey-Fuller = -3.06, Lag order = 13, p-value = 0.1296
## alternative hypothesis: stationary

P-value lebih besar dari alpha=0.05 sehingga terima H0. Hal ini menyatakan bahwa data tidak stasioner

Penanganan Ketidakstasioneran (Differencing)

train.dif1<-diff(training.ts, differences = 1)
plot.ts(ts(train.dif1),lty=1,main="Plot Differencing Saham BBRI") 

adf.test(train.dif1) 
## Warning in adf.test(train.dif1): p-value smaller than printed p-value
## 
##  Augmented Dickey-Fuller Test
## 
## data:  train.dif1
## Dickey-Fuller = -14.202, Lag order = 12, p-value = 0.01
## alternative hypothesis: stationary

P-value = 0.01 kurang dari alpha=0.05 sehingga tolak H0. Artinya, pada taraf nyata 5% cukup bukti untuk menyatakan bahwa data stasioner.

par(mfrow=c(1,2))
acf(train.dif1, main = "ACF Plot for MA(q)")
pacf(train.dif1, main = "PACF Plot for AR(p)") 

Model ARIMA : (0,1,2) dan (2,1,0)

eacf(train.dif1) 
## AR/MA
##   0 1 2 3 4 5 6 7 8 9 10 11 12 13
## 0 x x o o o o o o o o o  o  x  o 
## 1 x x o o o o o o o o o  o  x  o 
## 2 x o o o o o o o o o o  o  o  o 
## 3 x x x o o o o o o o o  o  o  o 
## 4 x x x o o o o o o o o  o  o  o 
## 5 x x x o x o o o o o o  o  o  o 
## 6 x x x x x x o o o o o  o  o  o 
## 7 x o x x x x x o o o o  o  o  o

Model ARIMA : (0,1,3), (1,1,2), (2,1,2), (2,1,3), (3,1,3)

Pemilihan Model Terbaik

model1 <- Arima(train.dif1, order=c(0,0,2), method="ML")   
model2 <- Arima(train.dif1, order=c(2,0,0), method="ML")  
model3 <- Arima(train.dif1, order=c(0,0,3), method="ML")
model4 <- Arima(train.dif1, order=c(1,0,2), method="ML")
model5 <- Arima(train.dif1, order=c(2,0,2), method="ML") 
model6 <- Arima(train.dif1, order=c(2,0,3), method="ML")
model7 <- Arima(train.dif1, order=c(3,0,3), method="ML") 
summary(model1)
## Series: train.dif1 
## ARIMA(0,0,2) with non-zero mean 
## 
## Coefficients:
##           ma1      ma2    mean
##       -0.0841  -0.0484  1.4849
## s.e.   0.0214   0.0218  1.3172
## 
## sigma^2 = 5074:  log likelihood = -12493.95
## AIC=24995.9   AICc=24995.92   BIC=25018.68
## 
## Training set error measures:
##                       ME     RMSE      MAE MPE MAPE      MASE         ACF1
## Training set -0.00210321 71.18582 46.68208 NaN  Inf 0.6802009 -0.001676904
lmtest::coeftest((model1)) 
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value  Pr(>|z|)    
## ma1       -0.084124   0.021435 -3.9246 8.687e-05 ***
## ma2       -0.048446   0.021829 -2.2193   0.02647 *  
## intercept  1.484937   1.317211  1.1273   0.25960    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model2)
## Series: train.dif1 
## ARIMA(2,0,0) with non-zero mean 
## 
## Coefficients:
##           ar1      ar2    mean
##       -0.0872  -0.0592  1.4850
## s.e.   0.0213   0.0213  1.3242
## 
## sigma^2 = 5072:  log likelihood = -12493.42
## AIC=24994.85   AICc=24994.86   BIC=25017.63
## 
## Training set error measures:
##                        ME     RMSE      MAE MPE MAPE      MASE        ACF1
## Training set -0.002028233 71.16878 46.67393 NaN  Inf 0.6800821 0.001627744
lmtest::coeftest((model2))  
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value Pr(>|z|)    
## ar1       -0.087239   0.021292 -4.0973 4.18e-05 ***
## ar2       -0.059202   0.021287 -2.7812 0.005416 ** 
## intercept  1.484976   1.324218  1.1214 0.262118    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model3)
## Series: train.dif1 
## ARIMA(0,0,3) with non-zero mean 
## 
## Coefficients:
##           ma1      ma2     ma3    mean
##       -0.0847  -0.0506  0.0336  1.4851
## s.e.   0.0213   0.0217  0.0212  1.3632
## 
## sigma^2 = 5071:  log likelihood = -12492.7
## AIC=24995.4   AICc=24995.43   BIC=25023.88
## 
## Training set error measures:
##                        ME     RMSE      MAE MPE MAPE      MASE          ACF1
## Training set -0.001404759 71.14536 46.65928 NaN  Inf 0.6798687 -0.0005737837
lmtest::coeftest((model3)) 
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value  Pr(>|z|)    
## ma1       -0.084655   0.021302 -3.9740 7.067e-05 ***
## ma2       -0.050637   0.021666 -2.3372   0.01943 *  
## ma3        0.033588   0.021236  1.5817   0.11373    
## intercept  1.485054   1.363236  1.0894   0.27600    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model4)
## Series: train.dif1 
## ARIMA(1,0,2) with non-zero mean 
## 
## Coefficients:
##           ar1     ma1      ma2    mean
##       -0.3991  0.3144  -0.0875  1.4694
## s.e.   0.2206  0.2199   0.0251  1.3310
## 
## sigma^2 = 5071:  log likelihood = -12492.81
## AIC=24995.63   AICc=24995.66   BIC=25024.11
## 
## Training set error measures:
##                      ME     RMSE     MAE MPE MAPE      MASE          ACF1
## Training set 0.01600458 71.14906 46.6549 NaN  Inf 0.6798049 -0.0007567946
lmtest::coeftest((model4))  
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value  Pr(>|z|)    
## ar1       -0.399078   0.220588 -1.8092 0.0704261 .  
## ma1        0.314408   0.219882  1.4299 0.1527484    
## ma2       -0.087489   0.025087 -3.4874 0.0004877 ***
## intercept  1.469439   1.330954  1.1040 0.2695717    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model5)
## Series: train.dif1 
## ARIMA(2,0,2) with non-zero mean 
## 
## Coefficients:
##           ar1      ar2     ma1     ma2    mean
##       -0.5112  -0.2024  0.4266  0.1111  1.4827
## s.e.   0.2538   0.1999  0.2576  0.2005  1.3616
## 
## sigma^2 = 5072:  log likelihood = -12492.35
## AIC=24996.69   AICc=24996.73   BIC=25030.86
## 
## Training set error measures:
##                       ME     RMSE      MAE MPE MAPE      MASE          ACF1
## Training set 0.001618085 71.13388 46.67284 NaN  Inf 0.6800663 -0.0004864494
lmtest::coeftest((model5))  
## 
## z test of coefficients:
## 
##           Estimate Std. Error z value Pr(>|z|)  
## ar1       -0.51119    0.25384 -2.0139  0.04402 *
## ar2       -0.20235    0.19988 -1.0124  0.31136  
## ma1        0.42657    0.25763  1.6557  0.09778 .
## ma2        0.11107    0.20048  0.5540  0.57958  
## intercept  1.48275    1.36158  1.0890  0.27616  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model6)
## Series: train.dif1 
## ARIMA(2,0,3) with non-zero mean 
## 
## Coefficients:
##           ar1     ar2     ma1      ma2     ma3    mean
##       -0.1160  0.0366  0.0309  -0.0969  0.0312  1.4667
## s.e.   2.2567  1.3773  2.2543   1.1965  0.1912  1.3570
## 
## sigma^2 = 5075:  log likelihood = -12492.6
## AIC=24999.19   AICc=24999.24   BIC=25039.06
## 
## Training set error measures:
##                      ME     RMSE      MAE MPE MAPE      MASE          ACF1
## Training set 0.01916294 71.14199 46.66347 NaN  Inf 0.6799298 -0.0002160495
lmtest::coeftest((model6)) 
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value Pr(>|z|)
## ar1       -0.116028   2.256701 -0.0514   0.9590
## ar2        0.036593   1.377271  0.0266   0.9788
## ma1        0.030948   2.254337  0.0137   0.9890
## ma2       -0.096900   1.196512 -0.0810   0.9355
## ma3        0.031194   0.191188  0.1632   0.8704
## intercept  1.466653   1.356984  1.0808   0.2798
summary(model7)
## Series: train.dif1 
## ARIMA(3,0,3) with non-zero mean 
## 
## Coefficients:
## Warning in sqrt(diag(x$var.coef)): NaNs produced
##           ar1     ar2     ar3     ma1      ma2     ma3    mean
##       -0.1403  0.1024  0.3718  0.0602  -0.1619  -0.337  1.4731
## s.e.      NaN     NaN  0.1503     NaN      NaN     NaN  1.2784
## 
## sigma^2 = 5073:  log likelihood = -12491.69
## AIC=24999.38   AICc=24999.45   BIC=25044.95
## 
## Training set error measures:
##                      ME     RMSE      MAE MPE MAPE      MASE         ACF1
## Training set 0.01438191 71.11261 46.70091 NaN  Inf 0.6804753 -0.004752762
lmtest::coeftest((model7)) 
## Warning in sqrt(diag(se)): NaNs produced
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value Pr(>|z|)  
## ar1       -0.140280        NaN     NaN      NaN  
## ar2        0.102398        NaN     NaN      NaN  
## ar3        0.371790   0.150341  2.4730   0.0134 *
## ma1        0.060227        NaN     NaN      NaN  
## ma2       -0.161903        NaN     NaN      NaN  
## ma3       -0.337047        NaN     NaN      NaN  
## intercept  1.473148   1.278427  1.1523   0.2492  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Model <- c("ARIMA (0,1,2)","ARIMA (2,1,0)","ARIMA (0,1,3)","ARIMA (1,1,2)","ARIMA (2,1,2)","ARIMA (2,1,3)","ARIMA (3,1,3)")
AIC <- c(model1$aic,model2$aic,model3$aic,model4$aic,model5$aic,model6$aic,model7$aic)
BIC <- c(model1$bic,model2$bic,model3$bic,model4$bic,model5$bic,model6$bic,model7$bic)
Akurasi <- data.frame(Model,AIC,BIC)
Akurasi 
##           Model      AIC      BIC
## 1 ARIMA (0,1,2) 24995.90 25018.68
## 2 ARIMA (2,1,0) 24994.85 25017.63
## 3 ARIMA (0,1,3) 24995.40 25023.88
## 4 ARIMA (1,1,2) 24995.63 25024.11
## 5 ARIMA (2,1,2) 24996.69 25030.86
## 6 ARIMA (2,1,3) 24999.19 25039.06
## 7 ARIMA (3,1,3) 24999.38 25044.95
kable(Akurasi)  
Model AIC BIC
ARIMA (0,1,2) 24995.90 25018.68
ARIMA (2,1,0) 24994.85 25017.63
ARIMA (0,1,3) 24995.40 25023.88
ARIMA (1,1,2) 24995.63 25024.11
ARIMA (2,1,2) 24996.69 25030.86
ARIMA (2,1,3) 24999.19 25039.06
ARIMA (3,1,3) 24999.38 25044.95
paste("Model yang terbaik berdasarkan akurasi adalah model",Akurasi$Model[which.min(Akurasi[,"AIC"])])
## [1] "Model yang terbaik berdasarkan akurasi adalah model ARIMA (2,1,0)"

Overfitting

model2b <- Arima(train.dif1, order=c(3,0,0),method="ML") #intersep signifikan
lmtest::coeftest(model2b) 
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value  Pr(>|z|)    
## ar1       -0.085569   0.021321 -4.0134 5.986e-05 ***
## ar2       -0.056754   0.021359 -2.6572   0.00788 ** 
## ar3        0.028089   0.021312  1.3180   0.18750    
## intercept  1.485133   1.361908  1.0905   0.27550    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
model2c <- Arima(train.dif1, order=c(2,0,1),method="ML") #intersep signifikan
lmtest::coeftest(model2c) 
## 
## z test of coefficients:
## 
##            Estimate Std. Error z value  Pr(>|z|)    
## ar1       -0.439472   0.224944 -1.9537 0.0507373 .  
## ar2       -0.090602   0.025267 -3.5857 0.0003362 ***
## ma1        0.353610   0.225279  1.5697 0.1164958    
## intercept  1.473819   1.342464  1.0978 0.2722714    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Model <- c("ARIMA (2,1,0)","ARIMA (3,1,0)","ARIMA (2,1,1)")
AIC <- c(model2$aic, model2b$aic, model2c$aic)
BIC <- c(model2$bic, model2b$bic, model2c$bic)
Akurasi2 <- data.frame(Model,AIC,BIC)
Akurasi2
##           Model      AIC      BIC
## 1 ARIMA (2,1,0) 24994.85 25017.63
## 2 ARIMA (3,1,0) 24995.11 25023.59
## 3 ARIMA (2,1,1) 24995.00 25023.47
kableExtra::kable(Akurasi2) 
Model AIC BIC
ARIMA (2,1,0) 24994.85 25017.63
ARIMA (3,1,0) 24995.11 25023.59
ARIMA (2,1,1) 24995.00 25023.47
paste("Model yang terbaik berdasarkan akurasi adalah model",Akurasi2$Model[which.min(Akurasi2[,"AIC"])])
## [1] "Model yang terbaik berdasarkan akurasi adalah model ARIMA (2,1,0)"

Analisis Sisaan Model Tentatif Arima(2,1,0)

sisaan <- model2$residuals
tsdiag(model2) 

par(mfrow = c(2,2))
qqnorm(sisaan)
qqline(sisaan, col = "blue", lwd = 2)
plot(sisaan, type="o", 
     ylab = "Sisaan", xlab = "Order", main = "Sisaan Model vs Order")
abline(h = 0, col='red') 
acf(sisaan)
pacf(sisaan)

## Uji Formal 1) Sisaan Menyebar Normal H0: Sisaan mengikuti sebaran normal H1: Sisaan tidak mengikuti sebaran normal

shapiro.test(sisaan) 
## 
##  Shapiro-Wilk normality test
## 
## data:  sisaan
## W = 0.88711, p-value < 2.2e-16

p-value < 0.05, maka Tolak H0. Artinya, cukup bukti untuk menyatakan bahwa sisaan tidak menyebar normal.

2) Sisaan saling bebas/tidak ada autokorelasi H0: Tidak ada autokorelasi H1: Ada autokorelasi

Box.test(sisaan, type = "Ljung")  
## 
##  Box-Ljung test
## 
## data:  sisaan
## X-squared = 0.0058317, df = 1, p-value = 0.9391

P-value=0.9391 maka terima H0. Artinya, pada taraf nyata 5%, ada bukti untuk menyatakan bahwa tidak ada autokorelasi pada data.

3) Nilai tengah sisaan sama dengan nol H0: Nilai tengah sisaan bernilai nol H1: Nilai tengah sisaan tak bernilai nol

t.test(sisaan, mu = 0, conf.level = 0.95) 
## 
##  One Sample t-test
## 
## data:  sisaan
## t = -0.0013358, df = 2197, p-value = 0.9989
## alternative hypothesis: true mean is not equal to 0
## 95 percent confidence interval:
##  -2.979598  2.975541
## sample estimates:
##    mean of x 
## -0.002028233

P-value = 0.9989 > 0.005 maka TERIMA H0. Artinya, pada taraf nyata 5%, ada bukti untuk menyatakan bahwa nilai tengah sisaan bernilai nol.

MAPE ARIMA (Mean Absolute Percentage Error)

ramalan <- forecast(Arima(training.ts, order=c(2,1,0),method="ML",include.drift = TRUE),h = length(testing.ts))
data.ramalan <- ramalan$mean
error <- data.frame(testing.ts)-data.frame(data.ramalan[1:length(testing.ts)]) 
r.error <- (error/data.frame(testing.ts))*100 # Relative Error
MAPE.ARIMA <- sapply(abs(r.error), mean, na.rm = T)
MAPE.ARIMA
## testing.ts 
##   2.256412

Identifikasi Efek ARCH

H0 : Tidak terjadi heteroskedastisitas H1 : Terjadi Heteroskedastisitas

for (i in 1:15) {
  ArchTest <- ArchTest(model2$residuals, lags=i, demean=TRUE)
  cat("P Value LM Test lag ke", i,"adalah" , ArchTest$p.value, "\n") } 
## P Value LM Test lag ke 1 adalah 1.082944e-94 
## P Value LM Test lag ke 2 adalah 1.103766e-107 
## P Value LM Test lag ke 3 adalah 4.655191e-112 
## P Value LM Test lag ke 4 adalah 1.46401e-111 
## P Value LM Test lag ke 5 adalah 4.73566e-111 
## P Value LM Test lag ke 6 adalah 5.555617e-110 
## P Value LM Test lag ke 7 adalah 1.929291e-110 
## P Value LM Test lag ke 8 adalah 7.190597e-110 
## P Value LM Test lag ke 9 adalah 1.837069e-109 
## P Value LM Test lag ke 10 adalah 1.655277e-108 
## P Value LM Test lag ke 11 adalah 1.408027e-107 
## P Value LM Test lag ke 12 adalah 4.935172e-107 
## P Value LM Test lag ke 13 adalah 3.583235e-106 
## P Value LM Test lag ke 14 adalah 2.017045e-105 
## P Value LM Test lag ke 15 adalah 1.294978e-104

Tolak H0 maka terdapat unsur heteroskedastisitas pada sisaan model ARIMA(2, 1, 0).

Pemodelan GARCH

library(fGarch) 
## NOTE: Packages 'fBasics', 'timeDate', and 'timeSeries' are no longer
## attached to the search() path when 'fGarch' is attached.
## 
## If needed attach them yourself in your R script by e.g.,
##         require("timeSeries")
## 
## Attaching package: 'fGarch'
## The following object is masked from 'package:TTR':
## 
##     volatility
garch10<-garchFit(~arma(2,0)+garch(1,0),data = train.dif1, trace = F)
summary(garch10) 
## 
## Title:
##  GARCH Modelling 
## 
## Call:
##  garchFit(formula = ~arma(2, 0) + garch(1, 0), data = train.dif1, 
##     trace = F) 
## 
## Mean and Variance Equation:
##  data ~ arma(2, 0) + garch(1, 0)
## <environment: 0x000000002648c200>
##  [data = train.dif1]
## 
## Conditional Distribution:
##  norm 
## 
## Coefficient(s):
##          mu          ar1          ar2        omega       alpha1  
##    0.155693    -0.031957    -0.086796  3283.608857     0.380627  
## 
## Std. Errors:
##  based on Hessian 
## 
## Error Analysis:
##          Estimate  Std. Error  t value Pr(>|t|)    
## mu        0.15569     1.30104    0.120    0.905    
## ar1      -0.03196     0.02746   -1.164    0.245    
## ar2      -0.08680     0.02100   -4.134 3.57e-05 ***
## omega  3283.60886   142.91456   22.976  < 2e-16 ***
## alpha1    0.38063     0.04910    7.753 9.10e-15 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Log Likelihood:
##  -12336.02    normalized:  -5.612382 
## 
## Description:
##  Thu Nov 09 15:04:23 2023 by user: Syammira Dhifa 
## 
## 
## Standardised Residuals Tests:
##                                 Statistic p-Value   
##  Jarque-Bera Test   R    Chi^2  16414.97  0         
##  Shapiro-Wilk Test  R    W      0.9221026 0         
##  Ljung-Box Test     R    Q(10)  9.452615  0.4897535 
##  Ljung-Box Test     R    Q(15)  13.22961  0.5845682 
##  Ljung-Box Test     R    Q(20)  24.59774  0.2172557 
##  Ljung-Box Test     R^2  Q(10)  20.76022  0.02282902
##  Ljung-Box Test     R^2  Q(15)  28.91933  0.01647369
##  Ljung-Box Test     R^2  Q(20)  32.12583  0.0419759 
##  LM Arch Test       R    TR^2   19.79012  0.07116184
## 
## Information Criterion Statistics:
##      AIC      BIC      SIC     HQIC 
## 11.22931 11.24227 11.22930 11.23405
garch11<-garchFit(~arma(2,0)+garch(1,1),data = train.dif1, trace = F)
summary(garch11) 
## 
## Title:
##  GARCH Modelling 
## 
## Call:
##  garchFit(formula = ~arma(2, 0) + garch(1, 1), data = train.dif1, 
##     trace = F) 
## 
## Mean and Variance Equation:
##  data ~ arma(2, 0) + garch(1, 1)
## <environment: 0x0000000029a3b090>
##  [data = train.dif1]
## 
## Conditional Distribution:
##  norm 
## 
## Coefficient(s):
##        mu        ar1        ar2      omega     alpha1      beta1  
##  2.113490  -0.026855  -0.077227  50.987464   0.078420   0.919012  
## 
## Std. Errors:
##  based on Hessian 
## 
## Error Analysis:
##         Estimate  Std. Error  t value Pr(>|t|)    
## mu      2.113490    1.149466    1.839 0.065964 .  
## ar1    -0.026855    0.023982   -1.120 0.262801    
## ar2    -0.077227    0.023320   -3.312 0.000927 ***
## omega  50.987464   11.919120    4.278 1.89e-05 ***
## alpha1  0.078420    0.009324    8.411  < 2e-16 ***
## beta1   0.919012    0.008673  105.959  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Log Likelihood:
##  -12213.67    normalized:  -5.55672 
## 
## Description:
##  Thu Nov 09 15:04:23 2023 by user: Syammira Dhifa 
## 
## 
## Standardised Residuals Tests:
##                                 Statistic p-Value  
##  Jarque-Bera Test   R    Chi^2  23202.1   0        
##  Shapiro-Wilk Test  R    W      0.9231186 0        
##  Ljung-Box Test     R    Q(10)  5.424622  0.8610704
##  Ljung-Box Test     R    Q(15)  10.74062  0.770755 
##  Ljung-Box Test     R    Q(20)  17.40157  0.6267649
##  Ljung-Box Test     R^2  Q(10)  8.339834  0.5956799
##  Ljung-Box Test     R^2  Q(15)  9.386337  0.8564666
##  Ljung-Box Test     R^2  Q(20)  9.989542  0.9683612
##  LM Arch Test       R    TR^2   8.948693  0.7073064
## 
## Information Criterion Statistics:
##      AIC      BIC      SIC     HQIC 
## 11.11890 11.13445 11.11888 11.12458
garch13<-garchFit(~arma(2,0)+garch(1,3),data = train.dif1, trace = F)
summary(garch13) 
## 
## Title:
##  GARCH Modelling 
## 
## Call:
##  garchFit(formula = ~arma(2, 0) + garch(1, 3), data = train.dif1, 
##     trace = F) 
## 
## Mean and Variance Equation:
##  data ~ arma(2, 0) + garch(1, 3)
## <environment: 0x0000000026d180e8>
##  [data = train.dif1]
## 
## Conditional Distribution:
##  norm 
## 
## Coefficient(s):
##         mu         ar1         ar2       omega      alpha1       beta1  
##   2.385527   -0.043688   -0.076233  127.790853    0.170240    0.046908  
##      beta2       beta3  
##   0.013259    0.757488  
## 
## Std. Errors:
##  based on Hessian 
## 
## Error Analysis:
##         Estimate  Std. Error  t value Pr(>|t|)    
## mu       2.38553     1.13464    2.102 0.035513 *  
## ar1     -0.04369     0.02505   -1.744 0.081110 .  
## ar2     -0.07623     0.02088   -3.652 0.000261 ***
## omega  127.79085    28.13025    4.543 5.55e-06 ***
## alpha1   0.17024     0.02014    8.454  < 2e-16 ***
## beta1    0.04691     0.02020    2.322 0.020220 *  
## beta2    0.01326     0.02749    0.482 0.629639    
## beta3    0.75749     0.03690   20.528  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Log Likelihood:
##  -12187.38    normalized:  -5.544758 
## 
## Description:
##  Thu Nov 09 15:04:24 2023 by user: Syammira Dhifa 
## 
## 
## Standardised Residuals Tests:
##                                 Statistic p-Value  
##  Jarque-Bera Test   R    Chi^2  6752.752  0        
##  Shapiro-Wilk Test  R    W      0.9435996 0        
##  Ljung-Box Test     R    Q(10)  6.381246  0.7822805
##  Ljung-Box Test     R    Q(15)  11.31125  0.7302348
##  Ljung-Box Test     R    Q(20)  17.88969  0.594675 
##  Ljung-Box Test     R^2  Q(10)  4.571294  0.9179183
##  Ljung-Box Test     R^2  Q(15)  7.496919  0.9423673
##  Ljung-Box Test     R^2  Q(20)  9.532518  0.9758787
##  LM Arch Test       R    TR^2   6.016058  0.9152704
## 
## Information Criterion Statistics:
##      AIC      BIC      SIC     HQIC 
## 11.09679 11.11752 11.09677 11.10437
garch23<-garchFit(~arma(2,0)+garch(2,3),data = train.dif1, trace = F)
summary(garch23) 
## 
## Title:
##  GARCH Modelling 
## 
## Call:
##  garchFit(formula = ~arma(2, 0) + garch(2, 3), data = train.dif1, 
##     trace = F) 
## 
## Mean and Variance Equation:
##  data ~ arma(2, 0) + garch(2, 3)
## <environment: 0x0000000029ce5440>
##  [data = train.dif1]
## 
## Conditional Distribution:
##  norm 
## 
## Coefficient(s):
##         mu         ar1         ar2       omega      alpha1      alpha2  
##   2.470554   -0.043385   -0.078812  132.241376    0.163189    0.015121  
##      beta1       beta2       beta3  
##   0.023308    0.019885    0.765976  
## 
## Std. Errors:
##  based on Hessian 
## 
## Error Analysis:
##         Estimate  Std. Error  t value Pr(>|t|)    
## mu       2.47055     1.13464    2.177 0.029452 *  
## ar1     -0.04338     0.02500   -1.736 0.082625 .  
## ar2     -0.07881     0.02158   -3.652 0.000261 ***
## omega  132.24138    29.54454    4.476 7.61e-06 ***
## alpha1   0.16319     0.02083    7.834 4.66e-15 ***
## alpha2   0.01512     0.01192    1.269 0.204607    
## beta1    0.02331     0.02710    0.860 0.389770    
## beta2    0.01989     0.03415    0.582 0.560429    
## beta3    0.76598     0.03798   20.166  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Log Likelihood:
##  -12186.42    normalized:  -5.544323 
## 
## Description:
##  Thu Nov 09 15:04:25 2023 by user: Syammira Dhifa 
## 
## 
## Standardised Residuals Tests:
##                                 Statistic p-Value  
##  Jarque-Bera Test   R    Chi^2  6976.727  0        
##  Shapiro-Wilk Test  R    W      0.9431964 0        
##  Ljung-Box Test     R    Q(10)  6.459024  0.7753375
##  Ljung-Box Test     R    Q(15)  11.51532  0.7152826
##  Ljung-Box Test     R    Q(20)  17.84168  0.5978371
##  Ljung-Box Test     R^2  Q(10)  4.1738    0.9391661
##  Ljung-Box Test     R^2  Q(15)  6.69348   0.9656041
##  Ljung-Box Test     R^2  Q(20)  8.515654  0.9878687
##  LM Arch Test       R    TR^2   5.488169  0.9396591
## 
## Information Criterion Statistics:
##      AIC      BIC      SIC     HQIC 
## 11.09684 11.12016 11.09680 11.10536
garch33<-garchFit(~arma(2,0)+garch(3,3),data = train.dif1, trace = F)
summary(garch33) 
## 
## Title:
##  GARCH Modelling 
## 
## Call:
##  garchFit(formula = ~arma(2, 0) + garch(3, 3), data = train.dif1, 
##     trace = F) 
## 
## Mean and Variance Equation:
##  data ~ arma(2, 0) + garch(3, 3)
## <environment: 0x0000000029cb7628>
##  [data = train.dif1]
## 
## Conditional Distribution:
##  norm 
## 
## Coefficient(s):
##          mu          ar1          ar2        omega       alpha1       alpha2  
##  2.4273e+00  -4.1670e-02  -7.7486e-02   1.3395e+02   1.5631e-01   1.4034e-02  
##      alpha3        beta1        beta2        beta3  
##  1.8527e-02   2.7121e-02   1.0000e-08   7.7223e-01  
## 
## Std. Errors:
##  based on Hessian 
## 
## Error Analysis:
##          Estimate  Std. Error  t value Pr(>|t|)    
## mu      2.427e+00   1.131e+00    2.146 0.031896 *  
## ar1    -4.167e-02   2.570e-02   -1.622 0.104871    
## ar2    -7.749e-02   2.181e-02   -3.553 0.000382 ***
## omega   1.339e+02   3.013e+01    4.445 8.79e-06 ***
## alpha1  1.563e-01   3.323e-02    4.703 2.56e-06 ***
## alpha2  1.403e-02   1.388e-02    1.011 0.312112    
## alpha3  1.853e-02   3.362e-02    0.551 0.581569    
## beta1   2.712e-02   4.554e-02    0.596 0.551447    
## beta2   1.000e-08   1.134e-01    0.000 1.000000    
## beta3   7.722e-01   8.022e-02    9.626  < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Log Likelihood:
##  -12185.07    normalized:  -5.543707 
## 
## Description:
##  Thu Nov 09 15:04:26 2023 by user: Syammira Dhifa 
## 
## 
## Standardised Residuals Tests:
##                                 Statistic p-Value  
##  Jarque-Bera Test   R    Chi^2  7371.281  0        
##  Shapiro-Wilk Test  R    W      0.9423061 0        
##  Ljung-Box Test     R    Q(10)  6.115535  0.8054648
##  Ljung-Box Test     R    Q(15)  11.17305  0.7402334
##  Ljung-Box Test     R    Q(20)  17.5359   0.6179514
##  Ljung-Box Test     R^2  Q(10)  3.838896  0.9543221
##  Ljung-Box Test     R^2  Q(15)  6.076849  0.9784278
##  Ljung-Box Test     R^2  Q(20)  7.878979  0.9926386
##  LM Arch Test       R    TR^2   5.013785  0.957517 
## 
## Information Criterion Statistics:
##      AIC      BIC      SIC     HQIC 
## 11.09651 11.12243 11.09647 11.10598

AIC terkecil pada ARIMA(2,1,0)-GARCH(3,3)

Peramalan GARCH

garchSpec <- ugarchspec(
  variance.model=list(model="sGARCH",garchOrder=c(3,3)),
  mean.model=list(armaOrder=c(2,0)), distribution.model="std")
garchFitt <- ugarchfit(spec=garchSpec, data=train.dif1)

forc<- ugarchforecast(fitORspec = garchFitt, data = bbri$Close, n.ahead = 30, n.roll = 0)
plot(forc, which= 1) 

## Perbandingan aktual dan ramalan

pt_1 <- bbri$Close[1997] #nilai akhir data latih
hasil.forc.Diff <- forc@forecast$seriesFor[,1]
hasil <- diffinv(hasil.forc.Diff, differences = 1) + pt_1
length(testing.ts) <- length(hasil[-1])
perbandingan <- data.frame("Aktual"= testing.ts, "Ramalan" = hasil[-1])
head(perbandingan,10)
##    Aktual  Ramalan
## 1    5450 4341.750
## 2    5450 4340.364
## 3    5425 4342.833
## 4    5375 4345.333
## 5    5400 4347.538
## 6    5425 4349.756
## 7    5450 4351.997
## 8    5450 4354.234
## 9    5525 4356.471
## 10   5575 4358.707

MAPE GARCH

Date <- c(bbri$Date[1998:2260])
length(Date) <- length(perbandingan)
dataframe <- data.frame(Date, perbandingan) 
T <- nrow(dataframe) 
MAPE.GARCH <- 1/T*sum(abs((dataframe$Aktual-dataframe$Ramalan)/dataframe$Aktual)*100)
MAPE.GARCH 
## [1] 21.73535

Perbandingan MAPE ARIMA dan ARIMA-GARCH

MAPE <- data.frame("MAPE ARIMA"= MAPE.ARIMA, "MAPE GARCH" = MAPE.GARCH)
MAPE 
##            MAPE.ARIMA MAPE.GARCH
## testing.ts   2.256412   21.73535