lapply(c("dplyr","readxl","forecast","TTR","graphics","smooth","Mcomp","knitr","tseries",
         "readr","TSA","ggplot2","MLmetrics","cowplot","gridExtra","gtable","grid","MASS",
         "lmtest","GGally", "lubridate", "caret", "tseries", "tidyverse"), library, character.only = T)[[1]]
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
## Registered S3 method overwritten by 'quantmod':
##   method            from
##   as.zoo.data.frame zoo
## Loading required package: greybox
## Package "greybox", v1.0.6 loaded.
## This is package "smooth", v3.1.6
## 
## Attaching package: 'smooth'
## The following object is masked from 'package:TTR':
## 
##     lags
## Registered S3 methods overwritten by 'TSA':
##   method       from    
##   fitted.Arima forecast
##   plot.Arima   forecast
## 
## Attaching package: 'TSA'
## The following object is masked from 'package:readr':
## 
##     spec
## The following objects are masked from 'package:stats':
## 
##     acf, arima
## The following object is masked from 'package:utils':
## 
##     tar
## 
## Attaching package: 'MLmetrics'
## The following objects are masked from 'package:greybox':
## 
##     MAE, MAPE, MSE
## The following object is masked from 'package:base':
## 
##     Recall
## 
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
## 
##     combine
## 
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
## 
##     select
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
## Registered S3 method overwritten by 'GGally':
##   method from   
##   +.gg   ggplot2
## Warning: package 'lubridate' was built under R version 4.2.3
## 
## Attaching package: 'lubridate'
## The following object is masked from 'package:cowplot':
## 
##     stamp
## The following object is masked from 'package:greybox':
## 
##     hm
## The following objects are masked from 'package:base':
## 
##     date, intersect, setdiff, union
## Loading required package: lattice
## Registered S3 method overwritten by 'lava':
##   method     from   
##   print.pcor greybox
## 
## Attaching package: 'caret'
## The following objects are masked from 'package:MLmetrics':
## 
##     MAE, RMSE
## The following object is masked from 'package:greybox':
## 
##     MAE
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.2 ──
## ✔ tibble  3.1.8     ✔ stringr 1.5.0
## ✔ tidyr   1.2.1     ✔ forcats 0.5.2
## ✔ purrr   0.3.5     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ lubridate::as.difftime() masks base::as.difftime()
## ✖ gridExtra::combine()     masks dplyr::combine()
## ✖ lubridate::date()        masks base::date()
## ✖ dplyr::filter()          masks stats::filter()
## ✖ lubridate::hm()          masks greybox::hm()
## ✖ lubridate::intersect()   masks base::intersect()
## ✖ dplyr::lag()             masks stats::lag()
## ✖ purrr::lift()            masks caret::lift()
## ✖ MASS::select()           masks dplyr::select()
## ✖ lubridate::setdiff()     masks base::setdiff()
## ✖ TSA::spec()              masks readr::spec()
## ✖ tidyr::spread()          masks greybox::spread()
## ✖ lubridate::stamp()       masks cowplot::stamp()
## ✖ lubridate::union()       masks base::union()
## [1] "dplyr"     "stats"     "graphics"  "grDevices" "utils"     "datasets" 
## [7] "methods"   "base"
df = read_xlsx('C:/Users/User/Downloads/data indri.xlsx')
data2 = df$Jumlah_PNP
# Menghitung rata-rata
rata_rata <- mean(data2)

# Menghitung median
median_data <- median(data2)

# Menemukan nilai minimum
nilai_minimum <- min(data2)

# Menemukan nilai maksimum
nilai_maksimum <- max(data2)

# Mencetak hasil
cat("Rata-rata:", rata_rata, "\n")
## Rata-rata: 546531.3
cat("Median:", median_data, "\n")
## Median: 526712.3
cat("Nilai minimum:", nilai_minimum, "\n")
## Nilai minimum: 40511.1
cat("Nilai maksimum:", nilai_maksimum, "\n")
## Nilai maksimum: 975271
data.ts<-ts(data2, frequency = 365, start = c(2021,1,1))
length(data.ts)
## [1] 973
# Box-Cox
#lambda <- BoxCox.lambda(data.ts)
#data.ts <- BoxCox(data.ts, lambda)

#Plot semua data
plot(data.ts,xlab ="Bulan", ylab = "Jumlah Penumpang", col="black", main = "Plot Deret Waktu Data")
points(data.ts)

model <- lm(df$Jumlah_PNP ~ df$Tanggal, data = df)

# Melakukan uji Breusch-Pagan
bp_test <- bptest(model)
bp_test
## 
##  studentized Breusch-Pagan test
## 
## data:  model
## BP = 3.3165, df = 1, p-value = 0.06859
# Asumsi bahwa data.ts adalah time series data yang tersedia


# Split data into training and testing
n <- length(data.ts)
data.train <- data.ts[1:778]
data.test <- data.ts[779:n]


training.ts<-ts(data.train,frequency=365)
length(training.ts)
## [1] 778
testing.ts<-ts(data.test,frequency=365)
length(testing.ts)
## [1] 195
#Plot Data
#Plot semua data
plot(data.ts,xlab ="Bulan", ylab = "Jumlah Penumpang", col="black", main = "Plot Deret Waktu Data")
points(data.ts)

plot(training.ts, xlab ="Periode", ylab = "Jumlah Kedatangan", col="red", main = "Plot Data Training")
points(training.ts)

plot(testing.ts, xlab ="Periode", ylab = "Jumlah Kedatangan", col="red", main = "Plot Data Testing")
points(testing.ts)

## Plot Data Training dan testing Inflasi
ts.plot(data.ts, xlab = "Periode", ylab ="Jumlah Kedatangan", 
        main = "Plot Deret Waktu Data Jumlah Penumpang Transjakarta")
lines(data.train, col = "blue")
lines(data.test, col="Red")
legend("bottomleft",c("Data Training","Data Testing"), 
       lty=1, col=c("blue","red"), cex=0.8)
abline(v=2022.25, col=c("black"), lty=1, lwd=1)

# Plot
plot(data.train, xlab = "Periode", ylab ="Jumlah Kedatangan", col = "blue", 
     main = "Plot Deret Waktu Data Jumlah Penumpang Transjakarta")
lines(data.test, col = "red")
legend("bottomleft", c("Data Training", "Data Testing"), 
       lty = 1, col = c("blue", "red"), cex = 0.8)
abline(v = time(data.test)[1], col = "black", lty = 1, lwd = 1)

acf(data.train, lag.max = 48, main = "Plot ACF")

pacf(data.train, lag.max = 48, main = "Plot PACF")

adf.test(data.train)
## 
##  Augmented Dickey-Fuller Test
## 
## data:  data.train
## Dickey-Fuller = -2.8346, Lag order = 9, p-value = 0.225
## alternative hypothesis: stationary
# DIFFERENCING NON MUSIMAN
data.dif<- diff(data.train, differences = 1)

plot.ts(ts(data.dif,frequency=365, start = c(2021,1), end = c(2023,8)),lty=1,xlab = "Periode", ylab= "Jumlah Penumpang", main=" ")

adf.test(data.dif)
## Warning in adf.test(data.dif): p-value smaller than printed p-value
## 
##  Augmented Dickey-Fuller Test
## 
## data:  data.dif
## Dickey-Fuller = -9.6878, Lag order = 9, p-value = 0.01
## alternative hypothesis: stationary
acf(data.dif, lag.max = 48, main = "Plot ACF Setelah Differencing satu kali")

pacf(data.dif, lag.max = 50, main = "Plot PACF Setelah Differencing satu kali")

# DIFFERENCING NON MUSIMAN

data.dif2 <- diff(data.dif, lag = 7)
plot.ts(ts(data.dif2,frequency=365, start = c(2021,1), end = c(2023,8)),lty=1,xlab = "Periode", ylab= "Jumlah Penumpang", main=" ")

acf(data.dif2, lag.max = 48, main = " ")

pacf(data.dif2, lag.max = 50)

Plot————-Non Musiman————–Musiman PACF P(AR) = 3 P(SAR) = 3 Differencing d = 1 D = 1 ACF q(MA) = 1 Q(SMA) = 3

ARIMA(2,0,0)(0,1,0)[365]

##PENAKSIRAN PARAMETER (4,1,2)(5,1,1)7
model=Arima(data.train, order=c(3,1,1),seasonal=list(order=c(3,1,3),period=7))
model1=Arima(data.train, order=c(3,1,0),seasonal=list(order=c(3,1,3),period=7))
model2=Arima(data.train, order=c(0,1,1),seasonal=list(order=c(3,1,3),period=7))
summary(model)
## Series: data.train 
## ARIMA(3,1,1)(3,1,3)[7] 
## 
## Coefficients:
##          ar1      ar2     ar3      ma1     sar1     sar2     sar3    sma1
##       0.0823  -0.0108  0.0468  -0.6853  -1.7142  -0.9511  -0.0063  0.7956
## s.e.  0.1741   0.1101  0.0830   0.1678   0.0857   0.1253   0.0450  0.0891
##          sma2    sma3
##       -0.5984  -0.880
## s.e.   0.0384   0.088
## 
## sigma^2 = 1.566e+09:  log likelihood = -9246.94
## AIC=18515.88   AICc=18516.23   BIC=18567
## 
## Training set error measures:
##                     ME     RMSE      MAE       MPE     MAPE      MASE
## Training set -717.4737 39117.24 24527.09 -1.306128 6.928985 0.3973808
##                     ACF1
## Training set 0.006318749
coeftest(model)
## 
## z test of coefficients:
## 
##        Estimate Std. Error  z value  Pr(>|z|)    
## ar1   0.0823395  0.1741413   0.4728    0.6363    
## ar2  -0.0108136  0.1101304  -0.0982    0.9218    
## ar3   0.0468240  0.0830067   0.5641    0.5727    
## ma1  -0.6852821  0.1677892  -4.0842 4.423e-05 ***
## sar1 -1.7142492  0.0856577 -20.0128 < 2.2e-16 ***
## sar2 -0.9511145  0.1252835  -7.5917 3.157e-14 ***
## sar3 -0.0063492  0.0449617  -0.1412    0.8877    
## sma1  0.7955639  0.0890640   8.9325 < 2.2e-16 ***
## sma2 -0.5983823  0.0383610 -15.5987 < 2.2e-16 ***
## sma3 -0.8799662  0.0879876 -10.0010 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model1)
## Series: data.train 
## ARIMA(3,1,0)(3,1,3)[7] 
## 
## Coefficients:
##           ar1      ar2      ar3     sar1     sar2     sar3    sma1     sma2
##       -0.5813  -0.3707  -0.1546  -1.6971  -0.9266  -0.0029  0.7790  -0.5969
## s.e.   0.0362   0.0399   0.0360   0.1106   0.1542   0.0436  0.1087   0.0414
##          sma3
##       -0.8565
## s.e.   0.1047
## 
## sigma^2 = 1.588e+09:  log likelihood = -9251.95
## AIC=18523.91   AICc=18524.2   BIC=18570.37
## 
## Training set error measures:
##                     ME     RMSE      MAE       MPE     MAPE      MASE
## Training set -718.6889 39417.27 24770.89 -1.309296 6.956565 0.4013307
##                      ACF1
## Training set -0.003752709
coeftest(model1)
## 
## z test of coefficients:
## 
##        Estimate Std. Error  z value  Pr(>|z|)    
## ar1  -0.5812566  0.0362074 -16.0535 < 2.2e-16 ***
## ar2  -0.3707417  0.0398896  -9.2942 < 2.2e-16 ***
## ar3  -0.1545860  0.0359746  -4.2971 1.731e-05 ***
## sar1 -1.6971283  0.1105695 -15.3490 < 2.2e-16 ***
## sar2 -0.9266272  0.1541524  -6.0111 1.843e-09 ***
## sar3 -0.0028622  0.0436493  -0.0656    0.9477    
## sma1  0.7789655  0.1087325   7.1641 7.833e-13 ***
## sma2 -0.5969277  0.0413676 -14.4298 < 2.2e-16 ***
## sma3 -0.8564555  0.1046732  -8.1822 2.787e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model2)
## Series: data.train 
## ARIMA(0,1,1)(3,1,3)[7] 
## 
## Coefficients:
##           ma1     sar1     sar2     sar3    sma1     sma2     sma3
##       -0.6273  -1.7152  -0.9560  -0.0090  0.7969  -0.5931  -0.8779
## s.e.   0.0297   0.0990   0.1384   0.0423  0.1021   0.0391   0.0983
## 
## sigma^2 = 1.566e+09:  log likelihood = -9248.13
## AIC=18512.26   AICc=18512.45   BIC=18549.43
## 
## Training set error measures:
##                     ME     RMSE      MAE      MPE     MAPE      MASE       ACF1
## Training set -719.5041 39186.51 24586.57 -1.30618 6.949063 0.3983444 0.02890199
coeftest(model2)
## 
## z test of coefficients:
## 
##       Estimate Std. Error  z value  Pr(>|z|)    
## ma1  -0.627281   0.029738 -21.0935 < 2.2e-16 ***
## sar1 -1.715199   0.098972 -17.3302 < 2.2e-16 ***
## sar2 -0.956021   0.138405  -6.9074 4.935e-12 ***
## sar3 -0.008979   0.042341  -0.2121    0.8321    
## sma1  0.796853   0.102057   7.8079 5.816e-15 ***
## sma2 -0.593104   0.039115 -15.1631 < 2.2e-16 ***
## sma3 -0.877902   0.098292  -8.9316 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Overfitting
#ARIMA(0,1,1)(3,1,3)[7]

model2c <- Arima(data.train, order=c(0,1,2),seasonal=list(order=c(3,1,3),period=7))


model2b1 <- Arima(data.train, order=c(0,1,1),seasonal=list(order=c(4,1,3),period=7))
model2c1 <- Arima(data.train, order=c(0,1,1),seasonal=list(order=c(3,1,4),period=7))
summary(model2c)
## Series: data.train 
## ARIMA(0,1,2)(3,1,3)[7] 
## 
## Coefficients:
##           ma1      ma2    sar1     sar2     sar3    sma1     sma2     sma3
##       -0.6023  -0.0404  -1.714  -0.9527  -0.0067  0.7957  -0.5967  -0.8805
## s.e.   0.0374   0.0390   0.087   0.1250   0.0423  0.0881   0.0380   0.0855
## 
## sigma^2 = 1.565e+09:  log likelihood = -9247.59
## AIC=18513.19   AICc=18513.42   BIC=18555
## 
## Training set error measures:
##                     ME     RMSE      MAE       MPE     MAPE      MASE
## Training set -716.4644 39151.67 24481.08 -1.302816 6.915972 0.3966352
##                     ACF1
## Training set 0.005777293
coeftest(model2c)
## 
## z test of coefficients:
## 
##        Estimate Std. Error  z value  Pr(>|z|)    
## ma1  -0.6023453  0.0374050 -16.1033 < 2.2e-16 ***
## ma2  -0.0404224  0.0389842  -1.0369    0.2998    
## sar1 -1.7140206  0.0869529 -19.7121 < 2.2e-16 ***
## sar2 -0.9527439  0.1249995  -7.6220 2.498e-14 ***
## sar3 -0.0066731  0.0423249  -0.1577    0.8747    
## sma1  0.7956911  0.0880812   9.0336 < 2.2e-16 ***
## sma2 -0.5966909  0.0380019 -15.7016 < 2.2e-16 ***
## sma3 -0.8805475  0.0854521 -10.3046 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model2b1)
## Series: data.train 
## ARIMA(0,1,1)(4,1,3)[7] 
## 
## Coefficients:
##           ma1     sar1     sar2    sar3    sar4     sma1    sma2     sma3
##       -0.6231  -0.5118  -0.7089  0.0480  0.1182  -0.4189  0.3223  -0.7343
## s.e.   0.0305   0.2020   0.1618  0.0481  0.0399   0.2023  0.1748   0.1419
## 
## sigma^2 = 1.575e+09:  log likelihood = -9248.72
## AIC=18515.44   AICc=18515.68   BIC=18557.26
## 
## Training set error measures:
##                     ME     RMSE      MAE       MPE     MAPE      MASE
## Training set -710.5854 39282.02 24357.74 -1.301483 6.937928 0.3946369
##                    ACF1
## Training set 0.02782729
coeftest(model2b1)
## 
## z test of coefficients:
## 
##       Estimate Std. Error  z value  Pr(>|z|)    
## ma1  -0.623126   0.030523 -20.4148 < 2.2e-16 ***
## sar1 -0.511847   0.202016  -2.5337  0.011287 *  
## sar2 -0.708930   0.161822  -4.3809 1.182e-05 ***
## sar3  0.048024   0.048133   0.9977  0.318413    
## sar4  0.118232   0.039927   2.9612  0.003065 ** 
## sma1 -0.418894   0.202330  -2.0703  0.038420 *  
## sma2  0.322285   0.174798   1.8438  0.065218 .  
## sma3 -0.734287   0.141926  -5.1737 2.295e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
summary(model2c)
## Series: data.train 
## ARIMA(0,1,2)(3,1,3)[7] 
## 
## Coefficients:
##           ma1      ma2    sar1     sar2     sar3    sma1     sma2     sma3
##       -0.6023  -0.0404  -1.714  -0.9527  -0.0067  0.7957  -0.5967  -0.8805
## s.e.   0.0374   0.0390   0.087   0.1250   0.0423  0.0881   0.0380   0.0855
## 
## sigma^2 = 1.565e+09:  log likelihood = -9247.59
## AIC=18513.19   AICc=18513.42   BIC=18555
## 
## Training set error measures:
##                     ME     RMSE      MAE       MPE     MAPE      MASE
## Training set -716.4644 39151.67 24481.08 -1.302816 6.915972 0.3966352
##                     ACF1
## Training set 0.005777293
coeftest(model2c)
## 
## z test of coefficients:
## 
##        Estimate Std. Error  z value  Pr(>|z|)    
## ma1  -0.6023453  0.0374050 -16.1033 < 2.2e-16 ***
## ma2  -0.0404224  0.0389842  -1.0369    0.2998    
## sar1 -1.7140206  0.0869529 -19.7121 < 2.2e-16 ***
## sar2 -0.9527439  0.1249995  -7.6220 2.498e-14 ***
## sar3 -0.0066731  0.0423249  -0.1577    0.8747    
## sma1  0.7956911  0.0880812   9.0336 < 2.2e-16 ***
## sma2 -0.5966909  0.0380019 -15.7016 < 2.2e-16 ***
## sma3 -0.8805475  0.0854521 -10.3046 < 2.2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

tetap menggunakan model -> ARIMA(3,0,3)

#Diagnostik Sisaan
sisaan <- model2$residuals

#kenormalan
shapiro.test(sisaan)
## 
##  Shapiro-Wilk normality test
## 
## data:  sisaan
## W = 0.83991, p-value < 2.2e-16
#sisaan saling bebas
Box.test(sisaan, type = "Ljung")
## 
##  Box-Ljung test
## 
## data:  sisaan
## X-squared = 0.65239, df = 1, p-value = 0.4193

data ga normal (bisa di abaikan) data gaada autokol

# Forecasting
#ARIMA(0,1,1)(3,1,3)[7] 
#ramalan <- forecast(Arima(data.ts, order=c(3,1,3),method="ML",include.drift = TRUE),h = length(data.test))

# Forecast
forecast_length <- length(data.test)  # Length of the test data
ramalan <- forecast::forecast(model2, h = forecast_length)
data.ramalan <- ramalan$mean
data.ramalan.ts <- ts(data.ramalan, start = 2021, frequency = 365)
plot(ramalan,col="black",col.sub ="black",col.axis="black",
     col.lab="black",col.main="black",lwd=2)
box(col="black",lwd=2)

# Plot the forecast
plot(ramalan)

# Contoh tanggal dalam format "dd/mm/yyyy"
start_date <- as.Date("01/09/2023", format="%d/%m/%Y")

# Membuat vektor tanggal untuk forecast dengan asumsi bahwa forecast dimulai pada hari berikutnya
forecast_dates <- seq(from=start_date, length.out=195, by="day")

# Menambahkan vektor tanggal ke hasil forecast
hasilforecast <- matrix(data=data.ramalan[1:195], nrow=195, ncol=1)
colnames(hasilforecast) <- c("Hasil Forecast")
hasilforecast <- cbind(ForecastDate = forecast_dates, hasilforecast)

# Mengubah matrix menjadi data frame
hasilforecast_df <- as.data.frame(hasilforecast)
# If it's a character string in the format "yyyy-mm-dd"

# Mengkonversi angka menjadi tanggal
hasilforecast_df$ForecastDate <- as.Date(hasilforecast_df$ForecastDate, origin = "1970-01-01")

# Menampilkan hasil forecast dengan tanggal yang benar
print(hasilforecast_df)
##     ForecastDate Hasil Forecast
## 1     2023-09-01       673589.8
## 2     2023-09-02       610586.8
## 3     2023-09-03       802230.5
## 4     2023-09-04       805759.3
## 5     2023-09-05       808909.1
## 6     2023-09-06       806781.0
## 7     2023-09-07       811663.1
## 8     2023-09-08       675257.6
## 9     2023-09-09       601929.1
## 10    2023-09-10       824182.1
## 11    2023-09-11       809150.2
## 12    2023-09-12       791408.8
## 13    2023-09-13       805356.9
## 14    2023-09-14       820520.4
## 15    2023-09-15       683227.6
## 16    2023-09-16       620955.4
## 17    2023-09-17       816918.3
## 18    2023-09-18       811788.2
## 19    2023-09-19       816397.3
## 20    2023-09-20       815919.6
## 21    2023-09-21       819020.6
## 22    2023-09-22       681552.3
## 23    2023-09-23       609910.2
## 24    2023-09-24       822626.9
## 25    2023-09-25       817957.0
## 26    2023-09-26       804272.4
## 27    2023-09-27       813097.0
## 28    2023-09-28       827245.1
## 29    2023-09-29       690836.0
## 30    2023-09-30       624787.7
## 31    2023-10-01       833627.3
## 32    2023-10-02       818868.5
## 33    2023-10-03       815381.2
## 34    2023-10-04       821897.6
## 35    2023-10-05       828537.4
## 36    2023-10-06       690487.4
## 37    2023-10-07       623703.1
## 38    2023-10-08       823411.6
## 39    2023-10-09       825428.5
## 40    2023-10-10       821739.3
## 41    2023-10-11       823451.1
## 42    2023-10-12       832516.2
## 43    2023-10-13       696269.6
## 44    2023-10-14       625484.0
## 45    2023-10-15       844410.3
## 46    2023-10-16       827294.6
## 47    2023-10-17       814367.2
## 48    2023-10-18       826443.0
## 49    2023-10-19       838427.1
## 50    2023-10-20       700646.5
## 51    2023-10-21       637377.3
## 52    2023-10-22       832105.7
## 53    2023-10-23       831858.8
## 54    2023-10-24       834878.3
## 55    2023-10-25       833791.8
## 56    2023-10-26       838517.9
## 57    2023-10-27       701659.1
## 58    2023-10-28       629329.7
## 59    2023-10-29       847271.7
## 60    2023-10-30       836231.9
## 61    2023-10-31       820733.1
## 62    2023-11-01       832357.5
## 63    2023-11-02       846720.1
## 64    2023-11-03       709730.6
## 65    2023-11-04       645791.3
## 66    2023-11-05       846878.5
## 67    2023-11-06       838395.5
## 68    2023-11-07       839496.7
## 69    2023-11-08       841809.7
## 70    2023-11-09       846556.4
## 71    2023-11-10       708923.6
## 72    2023-11-11       639187.9
## 73    2023-11-12       847209.0
## 74    2023-11-13       844507.4
## 75    2023-11-14       834696.9
## 76    2023-11-15       840947.1
## 77    2023-11-16       853039.5
## 78    2023-11-17       716626.8
## 79    2023-11-18       648893.4
## 80    2023-11-19       860926.5
## 81    2023-11-20       845961.2
## 82    2023-11-21       839162.7
## 83    2023-11-22       847447.6
## 84    2023-11-23       856047.2
## 85    2023-11-24       718158.0
## 86    2023-11-25       652456.4
## 87    2023-11-26       851130.5
## 88    2023-11-27       851649.7
## 89    2023-11-28       849967.8
## 90    2023-11-29       851082.4
## 91    2023-11-30       858736.5
## 92    2023-12-01       722219.1
## 93    2023-12-02       651170.4
## 94    2023-12-03       868860.1
## 95    2023-12-04       854492.7
## 96    2023-12-05       841253.2
## 97    2023-12-06       852685.8
## 98    2023-12-07       865234.8
## 99    2023-12-08       727765.0
## 100   2023-12-09       663927.3
## 101   2023-12-10       861736.9
## 102   2023-12-11       858209.7
## 103   2023-12-12       859875.1
## 104   2023-12-13       860447.0
## 105   2023-12-14       865535.5
## 106   2023-12-15       728401.1
## 107   2023-12-16       657288.7
## 108   2023-12-17       871137.3
## 109   2023-12-18       863109.9
## 110   2023-12-19       850213.8
## 111   2023-12-20       859614.1
## 112   2023-12-21       872827.7
## 113   2023-12-22       736016.3
## 114   2023-12-23       670535.5
## 115   2023-12-24       875709.1
## 116   2023-12-25       865170.6
## 117   2023-12-26       863104.8
## 118   2023-12-27       867653.0
## 119   2023-12-28       874019.0
## 120   2023-12-29       736341.4
## 121   2023-12-30       668091.3
## 122   2023-12-31       872989.1
## 123   2024-01-01       870962.6
## 124   2024-01-02       864108.0
## 125   2024-01-03       868635.9
## 126   2024-01-04       879046.2
## 127   2024-01-05       742542.4
## 128   2024-01-06       673723.6
## 129   2024-01-07       887243.9
## 130   2024-01-08       873058.7
## 131   2024-01-09       864194.7
## 132   2024-01-10       873316.9
## 133   2024-01-11       883263.8
## 134   2024-01-12       745571.9
## 135   2024-01-13       680325.5
## 136   2024-01-14       879398.1
## 137   2024-01-15       877952.4
## 138   2024-01-16       877015.9
## 139   2024-01-17       878320.9
## 140   2024-01-18       885257.6
## 141   2024-01-19       748489.1
## 142   2024-01-20       677683.9
## 143   2024-01-21       893296.4
## 144   2024-01-22       881547.5
## 145   2024-01-23       868977.7
## 146   2024-01-24       879298.7
## 147   2024-01-25       891805.2
## 148   2024-01-26       754578.2
## 149   2024-01-27       689897.3
## 150   2024-01-28       890875.4
## 151   2024-01-29       884728.5
## 152   2024-01-30       884551.2
## 153   2024-01-31       886840.2
## 154   2024-02-01       892675.4
## 155   2024-02-02       755362.7
## 156   2024-02-03       685459.7
## 157   2024-02-04       895855.9
## 158   2024-02-05       889836.1
## 159   2024-02-06       879453.7
## 160   2024-02-07       886969.9
## 161   2024-02-08       898949.9
## 162   2024-02-09       762214.3
## 163   2024-02-10       695463.1
## 164   2024-02-11       903547.8
## 165   2024-02-12       892046.8
## 166   2024-02-13       887425.2
## 167   2024-02-14       893573.5
## 168   2024-02-15       901341.8
## 169   2024-02-16       763702.5
## 170   2024-02-17       696482.6
## 171   2024-02-18       899659.6
## 172   2024-02-19       897388.1
## 173   2024-02-20       892530.7
## 174   2024-02-21       896100.0
## 175   2024-02-22       905277.5
## 176   2024-02-23       768637.3
## 177   2024-02-24       699255.0
## 178   2024-02-25       912974.9
## 179   2024-02-26       900112.0
## 180   2024-02-27       890243.3
## 181   2024-02-28       899496.8
## 182   2024-02-29       910228.6
## 183   2024-03-01       772733.4
## 184   2024-03-02       707480.0
## 185   2024-03-03       907829.3
## 186   2024-03-04       904358.4
## 187   2024-03-05       903258.7
## 188   2024-03-06       905240.5
## 189   2024-03-07       911997.0
## 190   2024-03-08       775021.2
## 191   2024-03-09       704757.5
## 192   2024-03-10       918004.9
## 193   2024-03-11       908467.6
## 194   2024-03-12       897120.2
## 195   2024-03-13       906163.4
# Validasi model 

## Hitung error
## Hitung error
error <- data.frame(data.test)-data.frame(data.ramalan[1:195])

## SSE (Sum Square Error)
SSE <- sum(error^2, na.rm = T)

## MSE (Mean Squared Error)
MSE<- sapply(error^2, mean, na.rm = T)


## RMSE (Root Mean Square Error)
RMSE1 <- sqrt(MSE)
# Menghitung rata-rata dari nilai aktual
mean_actual <- mean(data.test, na.rm = TRUE)

# Menghitung persentase RMSE
RMSE <- (RMSE1 / mean_actual) * 100

## MAD (Mean Absolute Deviation)
MAD <- sapply(abs(error), mean, na.rm = T)

## MAPE (Mean Absolute Percentage Error)
r.error <- (error/data.frame(data.test))*100 # Relative Error
MAPE <- sapply(abs(r.error), mean, na.rm = T)
akurasifarima <- data.frame(
  "Ukuran Keakuratan" = c("SSE", "MSE", "MAPE", "RMSE", "MAD"), 
  "Forecasting" = c(SSE, MSE, MAPE, RMSE, MAD))
akurasifarima
##   Ukuran.Keakuratan  Forecasting
## 1               SSE 1.950499e+12
## 2               MSE 1.000256e+10
## 3              MAPE 1.011910e+01
## 4              RMSE 1.296567e+01
## 5               MAD 6.431735e+04

MAPE 10% RMSE 13%

residu <- residuals(model2)

# Melakukan uji Jarque-Bera
jarque.bera.test(residu)
## 
##  Jarque Bera Test
## 
## data:  residu
## X-squared = 2838.3, df = 2, p-value < 2.2e-16
# Melakukan uji Ljung-Box pada residuals
Box.test(residu, type = "Ljung-Box")
## 
##  Box-Ljung test
## 
## data:  residu
## X-squared = 0.65239, df = 1, p-value = 0.4193
# Uji Shapiro-Wilk untuk residuals
shapiro.test(residu)
## 
##  Shapiro-Wilk normality test
## 
## data:  residu
## W = 0.83991, p-value < 2.2e-16
# Membuat plot dasar untuk residuals
plot(residu, main="Residuals dari Model ARIMA", xlab="Waktu", ylab="Residuals")

# Membuat plot density untuk residuals
# Hal ini membantu dalam memeriksa distribusi normal dari residuals
hist(residu, breaks=30, probability=TRUE, main="Density Plot dari Residuals ARIMA", xlab="Residuals")
lines(density(residu), col="blue")

# Membuat correlogram (ACF Plot) untuk residuals
# Ini membantu dalam memeriksa adanya autokorelasi pada residuals
acf(residu, main="Correlogram dari Residuals ARIMA")