library(tidyverse)
## Warning: package 'tidyverse' was built under R version 4.3.3
## Warning: package 'tibble' was built under R version 4.3.2
## Warning: package 'tidyr' was built under R version 4.3.3
## Warning: package 'readr' was built under R version 4.3.3
## Warning: package 'purrr' was built under R version 4.3.3
## Warning: package 'dplyr' was built under R version 4.3.3
## Warning: package 'stringr' was built under R version 4.3.3
## Warning: package 'forcats' was built under R version 4.3.3
## Warning: package 'lubridate' was built under R version 4.3.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 4.0.0 ✔ tibble 3.2.1
## ✔ lubridate 1.9.4 ✔ tidyr 1.3.1
## ✔ purrr 1.0.4
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(tseries)
## Warning: package 'tseries' was built under R version 4.3.3
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(forecast)
## Warning: package 'forecast' was built under R version 4.3.3
library(TSA)
## Warning: package 'TSA' was built under R version 4.3.3
## Registered S3 methods overwritten by 'TSA':
## method from
## fitted.Arima forecast
## plot.Arima forecast
##
## Attaching package: 'TSA'
##
## The following object is masked from 'package:readr':
##
## spec
##
## The following objects are masked from 'package:stats':
##
## acf, arima
##
## The following object is masked from 'package:utils':
##
## tar
library(MASS)
##
## Attaching package: 'MASS'
##
## The following object is masked from 'package:dplyr':
##
## select
library(ggplot2)
library(dplyr)
library(urca)
## Warning: package 'urca' was built under R version 4.3.3
library(lmtest)
## Warning: package 'lmtest' was built under R version 4.3.3
## Loading required package: zoo
##
## Attaching package: 'zoo'
##
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
data <- c(
14268.7,11548.1,13352.2,12535.2,8438.6,10760.3,10464.3,10742.4,11570.1,
10786,12664.4,14438.4,13329.9,13265,16787.5,16204.3,14234.8,17218.5,
15263.1,16678.9,16234.1,16296.1,19351.2,21359.8,18227.8,16682.9,22001.3,
19792.5,18638.7,21029.5,21361.9,22200.3,19879.8,19167.1,19035.1,19918.6,
18538.4,15978.6,20715.5,15461.7,21409.9,17251.1,19647.2,18975.8,17461.1,
18777.1,19679.5,19217,18587.3,18507.7,18017.8,16993.6,19505.2,18539.3,
21855.6,20843.4,18974,22097.8,19768.3,21509.4,17935.9,18849,18920,20585,
20312.3,19333,20575.4
)
# Lihat struktur data
head(data)
## [1] 14268.7 11548.1 13352.2 12535.2 8438.6 10760.3
# --- Konversi dan Pembagian Data Time Series ---
# Mengubah data vektor biasa menjadi objek Time Series
# start = c(2020, 1) artinya data dimulai Januari 2020
# frequency = 12 artinya data ini adalah data Bulanan
ts_data <- ts(data, start = c(2020, 1), frequency = 12)
# Membagi data Training (Data Latih)
# Mengambil data dari awal (Jan 2020) sampai akhir tahun 2024 (Des 2024)
ts_train <- window(ts_data, start = c(2020, 1), end = c(2024, 12))
# Membagi data Testing (Data Uji)
# Mengambil sisa data mulai dari Januari 2025 sampai data habis
ts_test <- window(ts_data, start = c(2025, 1))
# --- Pengecekan Informasi Data ---
# Menampilkan jumlah data training ke layar (console)
# \n digunakan untuk memberi jarak (enter) baris baru
cat("\nUkuran data training:", length(ts_train), "\n")
##
## Ukuran data training: 60
# Menampilkan jumlah data testing ke layar
cat("Ukuran data testing :", length(ts_test), "\n")
## Ukuran data testing : 7
# Logika Peringatan: Jika data testing kurang dari 12 bulan (1 periode)
if(length(ts_test) < 12) {
# Tampilkan pesan peringatan agar berhati-hati saat evaluasi akurasi nanti
cat("⚠ PERINGATAN: Ukuran data testing sangat kecil. Hati-hati menggunakan MAPE.\n")
}
## ⚠ PERINGATAN: Ukuran data testing sangat kecil. Hati-hati menggunakan MAPE.
# --- Identifikasi Pola Data ---
autoplot(ts_train) +
geom_point(color = "blue", size = 1.5) + # Titik biru untuk penanda bulan
geom_smooth(method = "loess", se = FALSE, color = "red", linetype = "dashed") + # Garis tren halus (merah)
ggtitle("Plot Time Series: Data Impor Indonesia (2020-2024)") +
xlab("Waktu") +
ylab("Nilai Impor (Juta USD)") + # Asumsi satuan Juta USD (sesuaikan jika beda)
theme_minimal()
## `geom_smooth()` using formula = 'y ~ x'
# --- Dekomposisi Data ---
# Memecah data menjadi komponen Trend, Musiman, dan Random
# Kita gunakan type = "additive" sebagai asumsi standar
# (Gunakan "multiplicative" jika gelombang musiman makin lama makin besar)
ds_decomp <- decompose(ts_train, type = "additive")
# Menampilkan plot
plot(ds_decomp)
# --- Eksplorasi Visual: Seasonal Box Plot ---
# Kita buat Box Plot berdasarkan Siklus (Bulan)
# Ini akan mengelompokkan data berdasarkan bulannya (Jan - Des)
boxplot(ts_train ~ cycle(ts_train),
xlab = "Bulan (1 = Jan, 12 = Des)",
ylab = "Nilai Impor",
main = "Box Plot Musiman Data Impor",
col = "lightblue",
border = "darkblue")
# Tambahkan garis rata-rata global (opsional, untuk perbandingan)
abline(h = mean(ts_train), col = "red", lty = 2)
# --- Eksplorasi Visual: Box Plot Tahunan ---
# floor(time(ts_train)) berfungsi mengambil angka tahunnya saja (2020, 2021, dst)
boxplot(ts_train ~ floor(time(ts_train)),
xlab = "Tahun",
ylab = "Nilai Impor (Juta USD)",
main = "Perbandingan Distribusi Impor per Tahun (2020-2024)",
col = "lightgreen", # Saya beri warna hijau biar beda dengan yang bulanan
border = "darkgreen")
# A. Visualisasi Box-Cox (Cek Manual 95% CI)
# Taruh ini PERTAMA sebagai bukti visual di laporan
library(MASS)
par(mfrow=c(1,1)) # Pastikan layout grafik normal
# Membuat Plot
bc <- boxcox(ts_train ~ 1, lambda = seq(-2, 2, 0.1))
# Menambahkan Garis Bantu
abline(v = 1, col = "red", lty = 2, lwd = 2) # Garis Lambda = 1 (Data Asli)
abline(v = 0, col = "green", lty = 2, lwd = 2) # Garis Lambda = 0 (Log)
title(main = "Box-Cox Plot 95% Confidence Interval")
# --- INTERPRETASI MANUAL (PENTING) ---
# Lihat Plot di atas!
# Jika Garis Merah (1) ada DI DALAM kurva putus-putus --> Data Asli sudah bagus.
# Jika Garis Merah (1) ada DI LUAR kurva --> Butuh Transformasi.
# Cek Stasioneritas Varians (Metode Box-Cox)
# Mencari nilai lambda (pangkat) yang optimal
lambda <- BoxCox.lambda(ts_train)
cat("Nilai Lambda Optimal:", lambda, "\n")
## Nilai Lambda Optimal: 1.17993
# KITA REVISI LOGIKANYA SEDIKIT:
# Karena hasilmu 1.17 (dekat dengan 1), kita perlebar batas toleransinya.
# Biasanya 0.8 sampai 1.2 dianggap aman untuk tidak ditransformasi.
if(lambda > 0.80 && lambda < 1.20) {
cat("-> KEPUTUSAN: Lambda dekat dengan 1. Varians Stabil. \n")
cat("-> Gunakan DATA ASLI.\n")
ts_used <- ts_train
} else {
cat("-> KEPUTUSAN: Varians TIDAK Stabil. \n")
cat("-> Lakukan Transformasi LOGARITMA.\n")
ts_used <- log(ts_train)
}
## -> KEPUTUSAN: Lambda dekat dengan 1. Varians Stabil.
## -> Gunakan DATA ASLI.
# Uji ADF (Augmented Dickey-Fuller) pada data terpilih
# Hipotesis Awal (H0): Data TIDAK Stasioner (Punya Unit Root)
# Hipotesis Alt (H1): Data Stasioner
# PERBAIKAN: df_result diganti jadi adf_result
adf_result <- adf.test(ts_used)
print(adf_result)
##
## Augmented Dickey-Fuller Test
##
## data: ts_used
## Dickey-Fuller = -1.5028, Lag order = 3, p-value = 0.7758
## alternative hypothesis: stationary
if(adf_result$p.value < 0.05) {
cat("\nKESIMPULAN AKHIR: Data SUDAH Stasioner (Mean & Varians). Lanjut ke ARIMA.\n")
} else {
cat("\nKESIMPULAN AKHIR: Data BELUM Stasioner dalam Rata-rata.\n")
cat("-> P-value > 0.05. WAJIB Lakukan Differencing (d=1).\n")
}
##
## KESIMPULAN AKHIR: Data BELUM Stasioner dalam Rata-rata.
## -> P-value > 0.05. WAJIB Lakukan Differencing (d=1).
# --- Identifikasi Model Awal (Sebelum Differencing) ---
# Menggunakan ggtsdisplay untuk melihat Plot, ACF, dan PACF sekaligus
# lag.max = 48 artinya kita melihat pola sampai 4 tahun ke belakang (4 x 12 bulan)
ggtsdisplay(ts_train, lag.max = 48, main = "Plot Identifikasi Data Asli (Sebelum Differencing)")
### DIFFERENCING
# --- Penanganan: Differencing (d=1) & Identifikasi Model ---
# 1. Lakukan Differencing (Data Bulan Ini - Data Bulan Lalu)
ts_diff <- diff(ts_train, differences = 1)
# 2. Tampilkan Plot ACF & PACF dari Data yang SUDAH di-Diff
# Inilah plot "KUNCI" untuk menentukan nilai p dan q
ggtsdisplay(ts_diff, lag.max = 36, main = "Plot Identifikasi Setelah Differencing")
# 3. Cek Ulang dengan ADF Test (Pembuktian Statistik)
adf_diff <- adf.test(ts_diff)
## Warning in adf.test(ts_diff): p-value smaller than printed p-value
print(adf_diff)
##
## Augmented Dickey-Fuller Test
##
## data: ts_diff
## Dickey-Fuller = -5.1498, Lag order = 3, p-value = 0.01
## alternative hypothesis: stationary
# Cek apakah sudah stasioner?
if(adf_diff$p.value < 0.05) {
cat("-> HASIL: P-value < 0.05. Data SUDAH Stasioner. Siap tentukan model.\n")
} else {
cat("-> HASIL: Masih belum stasioner. Coba cek plotnya lagi.\n")
}
## -> HASIL: P-value < 0.05. Data SUDAH Stasioner. Siap tentukan model.
# --- Penanganan Musiman: Seasonal Differencing (D=1) ---
# Karena di plot sebelumnya Lag 12 & 24 masih tinggi, kita perlu Diff Musiman.
# Lakukan diff lag 12 pada data yang sudah di-diff biasa
# Jadi totalnya: d=1 DAN D=1
ts_diff_seasonal <- diff(ts_diff, lag = 12)
# Plot ACF/PACF Akhir (Siap Model)
ggtsdisplay(ts_diff_seasonal, lag.max = 36,
main = "Plot Identifikasi Akhir (d=1, D=1)")
# Cek ADF lagi (Formalitas)
print(adf.test(ts_diff_seasonal))
##
## Augmented Dickey-Fuller Test
##
## data: ts_diff_seasonal
## Dickey-Fuller = -3.7856, Lag order = 3, p-value = 0.02812
## alternative hypothesis: stationary
# --- Estimasi Parameter (Metode Manual Box-Jenkins) ---
# Kita uji kandidat model berdasarkan pengamatan Plot ACF/PACF tadi:
# Ingat: d=1 dan D=1 sudah pasti dipakai.
# Kandidat 1: Dugaan MA(1) dan SMA(1) -> Karena ACF ada cut-off di lag 1 & 12
model_manual_1 <- Arima(ts_train, order = c(0,1,1), seasonal = c(0,1,1))
# Kandidat 2: Dugaan AR(1) dan SAR(1) -> Karena PACF ada cut-off di lag 1 & 12
model_manual_2 <- Arima(ts_train, order = c(1,1,0), seasonal = c(1,1,0))
# Kandidat 3: Model Campuran (Seringkali paling fit)
model_manual_3 <- Arima(ts_train, order = c(1,1,1), seasonal = c(1,1,1))
# --- Pemilihan Model Terbaik ---
# Kita pilih yang nilai AIC-nya paling KECIL
cat("--- HASIL PERBANDINGAN KANDIDAT MODEL MANUAL ---\n")
## --- HASIL PERBANDINGAN KANDIDAT MODEL MANUAL ---
cat("AIC Model 1:", model_manual_1$aic, "\n")
## AIC Model 1: 853.5919
cat("AIC Model 2:", model_manual_2$aic, "\n")
## AIC Model 2: 850.1456
cat("AIC Model 3:", model_manual_3$aic, "\n")
## AIC Model 3: 853.2796
# --- Uji Diagnostik (Diagnostic Checking) ---
# Kita definisikan ulang model terbaik (Model 2) ke variabel 'best_model'
best_model <- Arima(ts_train, order = c(1,1,0), seasonal = c(1,1,0))
# A. Analisis Sisaan (Residuals) Visual
# Fungsi ini otomatis menampilkan 3 plot penting:
# 1. Grafik sisaan (harus acak di sekitar 0)
# 2. ACF sisaan (tidak boleh ada batang yang keluar batas biru)
# 3. Histogram (harus berbentuk lonceng/normal)
checkresiduals(best_model)
##
## Ljung-Box test
##
## data: Residuals from ARIMA(1,1,0)(1,1,0)[12]
## Q* = 10.428, df = 10, p-value = 0.4038
##
## Model df: 2. Total lags used: 12
# B. Uji Ljung-Box (Formal Test)
# Hipotesis Nol (H0): Sisaan bersifat acak / White Noise (Model Bagus)
# Hipotesis Alt (H1): Masih ada pola (autokorelasi) di sisaan (Model Kurang Bagus)
# Kita ingin P-value > 0.05 agar H0 diterima.
lb_test <- Box.test(residuals(best_model), type = "Ljung-Box", lag = 24)
print(lb_test)
##
## Box-Ljung test
##
## data: residuals(best_model)
## X-squared = 27.952, df = 24, p-value = 0.2621
# Interpretasi Otomatis untuk Laporan
if(lb_test$p.value > 0.05) {
cat("\n-> KESIMPULAN: P-value > 0.05. Terima H0.\n")
cat("-> Sisaan bersifat Acak (White Noise). Model SARIMA(1,1,0)(1,1,0)[12] LAYAK DIGUNAKAN.\n")
} else {
cat("\n-> PERINGATAN: P-value < 0.05. Sisaan belum acak. Model mungkin perlu diperbaiki.\n")
}
##
## -> KESIMPULAN: P-value > 0.05. Terima H0.
## -> Sisaan bersifat Acak (White Noise). Model SARIMA(1,1,0)(1,1,0)[12] LAYAK DIGUNAKAN.
# C. Uji Normalitas Sisaan (Kolmogorov-Smirnov)
# H0: Sisaan berdistribusi Normal
ks_test <- ks.test(residuals(best_model), "pnorm", mean=mean(residuals(best_model)), sd=sd(residuals(best_model)))
print(ks_test)
##
## Exact one-sample Kolmogorov-Smirnov test
##
## data: residuals(best_model)
## D = 0.14576, p-value = 0.1413
## alternative hypothesis: two-sided
# --- Peramalan (Forecasting) & Evaluasi Akurasi ---
# A. Melakukan Peramalan untuk 12 Bulan ke Depan (Tahun 2025)
# h = 12 (sesuai panjang data testing yang ideal/setahun)
ramalan <- forecast(best_model, h = 12)
# Tampilkan angka hasil ramalan
print(ramalan)
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Jan 2025 19611.12 17128.63 22093.62 15814.47 23407.77
## Feb 2025 19474.25 16781.00 22167.50 15355.28 23593.22
## Mar 2025 20047.48 16764.41 23330.54 15026.46 25068.49
## Apr 2025 18080.84 14513.73 21647.94 12625.42 23536.25
## May 2025 21406.45 17465.89 25347.01 15379.88 27433.02
## Jun 2025 19642.32 15421.46 23863.19 13187.07 26097.58
## Jul 2025 22668.33 18152.14 27184.52 15761.41 29575.24
## Aug 2025 21772.05 16996.63 26547.46 14468.68 29075.41
## Sep 2025 19985.54 14954.44 25016.63 12291.14 27679.93
## Oct 2025 22625.89 17356.94 27894.83 14567.72 30684.05
## Nov 2025 21170.13 15670.59 26669.66 12759.31 29580.94
## Dec 2025 22315.02 16595.86 28034.18 13568.33 31061.72
# B. Visualisasi Peramalan
# Menampilkan grafik Data Asli (Training) + Ramalan (Biru) + Data Testing (Merah)
autoplot(ramalan) +
autolayer(ts_test, series = "Data Aktual (Testing)", PI = FALSE) +
ggtitle("Hasil Peramalan Impor Indonesia: Forecast vs Aktual") +
xlab("Tahun") + ylab("Nilai Impor") +
theme_minimal() +
theme(legend.position = "bottom")
## Warning in ggplot2::geom_line(ggplot2::aes(x = .data[["timeVal"]], y =
## .data[["seriesVal"]], : Ignoring unknown parameters: `PI`
# C. Evaluasi Akurasi (MAPE)
# Membandingkan hasil ramalan dengan data testing yang sebenarnya
akurasi <- accuracy(ramalan, ts_test)
print(akurasi)
## ME RMSE MAE MPE MAPE MASE
## Training set 48.41063 1677.579 1178.458 0.1514384 6.420906 0.3892369
## Test set -631.45562 1531.224 1346.931 -3.3728585 6.848569 0.4448824
## ACF1 Theil's U
## Training set -0.06617958 NA
## Test set -0.26689982 1.477907
# Ambil nilai MAPE untuk Data Testing (Test set)
mape_test <- akurasi[2, "MAPE"] # Baris 2 biasanya Test set
cat("\n--- NILAI MAPE (Tingkat Kesalahan) ---\n")
##
## --- NILAI MAPE (Tingkat Kesalahan) ---
cat("MAPE pada Data Testing:", round(mape_test, 2), "%\n")
## MAPE pada Data Testing: 6.85 %
# Interpretasi MAPE
if(mape_test < 10) {
cat("-> Kategori: Kemampuan Peramalan SANGAT BAIK (<10%).\n")
} else if(mape_test < 20) {
cat("-> Kategori: Kemampuan Peramalan BAIK (10-20%).\n")
} else if(mape_test < 50) {
cat("-> Kategori: Kemampuan Peramalan CUKUP (20-50%).\n")
} else {
cat("-> Kategori: Kemampuan Peramalan BURUK (>50%).\n")
}
## -> Kategori: Kemampuan Peramalan SANGAT BAIK (<10%).
# --- Holt-Winters Exponential Smoothing ---
# Kita akan coba dua jenis Holt-Winters:
# 1. Additive: Jika lebar gelombang musiman stabil (tetap).
# 2. Multiplicative: Jika lebar gelombang musiman membesar/mengecil seiring waktu.
# A. Membuat Model & Ramalan
# h = panjang data testing (sesuaikan dengan jumlah data testingmu)
h_period <- length(ts_test)
# Model 1: Holt-Winters Additive
fit_hw_add <- hw(ts_train, seasonal = "additive", h = h_period)
# Model 2: Holt-Winters Multiplicative
fit_hw_mul <- hw(ts_train, seasonal = "multiplicative", h = h_period)
# B. Cek Akurasi (MAPE) pada Data Testing
# Kita bandingkan mana yang error-nya paling kecil
acc_add <- accuracy(fit_hw_add, ts_test)
acc_mul <- accuracy(fit_hw_mul, ts_test)
cat("--- PERBANDINGAN AKURASI (MAPE) ---\n")
## --- PERBANDINGAN AKURASI (MAPE) ---
cat("1. HW Additive MAPE :", acc_add[2, "MAPE"], "%\n")
## 1. HW Additive MAPE : 7.889685 %
cat("2. HW Multiplicative MAPE:", acc_mul[2, "MAPE"], "%\n")
## 2. HW Multiplicative MAPE: 6.078624 %
# C. Visualisasi Perbandingan (Grafik)
autoplot(ts_train) +
autolayer(fit_hw_add, series = "HW Additive", PI = FALSE) +
autolayer(fit_hw_mul, series = "HW Multiplicative", PI = FALSE) +
autolayer(ts_test, series = "Data Aktual (Test)", color = "black") +
ggtitle("Perbandingan Forecast: Holt-Winters vs Data Aktual") +
xlab("Tahun") + ylab("Nilai Impor") +
theme_minimal() +
theme(legend.position = "bottom")
### Regresi Time Series
# --- Regresi Time Series ---
# Kita menggunakan TSLM (Time Series Linear Model)
# Formula "trend + season" artinya:
# Kita menyuruh komputer mencari garis lurus (Trend) DAN pola bulanan (Season)
# yang paling pas dengan data.
# A. Membuat Model Regresi
model_regresi <- tslm(ts_train ~ trend + season)
# Tampilkan Hasil Statistik Regresi
summary(model_regresi)
##
## Call:
## tslm(formula = ts_train ~ trend + season)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4437.7 -1480.3 -175.8 1307.2 4312.1
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 12872.52 1171.21 10.991 1.39e-14 ***
## trend 148.72 18.33 8.112 1.74e-10 ***
## season2 -1542.68 1524.35 -1.012 0.317
## season3 1287.01 1524.68 0.844 0.403
## season4 -839.11 1525.23 -0.550 0.585
## season5 -739.84 1526.00 -0.485 0.630
## season6 -374.26 1526.99 -0.245 0.807
## season7 235.70 1528.20 0.154 0.878
## season8 256.73 1529.63 0.168 0.867
## season9 -956.33 1531.28 -0.625 0.535
## season10 -504.04 1533.14 -0.329 0.744
## season11 22.12 1535.22 0.014 0.989
## season12 1062.34 1537.52 0.691 0.493
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 2410 on 47 degrees of freedom
## Multiple R-squared: 0.6211, Adjusted R-squared: 0.5244
## F-statistic: 6.42 on 12 and 47 DF, p-value: 1.416e-06
# B. Melakukan Peramalan (Forecast)
# h = panjang data testing
fcast_regresi <- forecast(model_regresi, h = length(ts_test))
# C. Visualisasi Hasil Regresi
autoplot(fcast_regresi) +
autolayer(ts_test, series = "Data Aktual", PI = FALSE) +
autolayer(fitted(model_regresi), series = "Garis Regresi (Fitted)") +
ggtitle("Peramalan Menggunakan Regresi Time Series (Trend + Musiman)") +
xlab("Tahun") + ylab("Nilai Impor") +
theme_minimal() +
theme(legend.position = "bottom")
## Warning in ggplot2::geom_line(ggplot2::aes(x = .data[["timeVal"]], y =
## .data[["seriesVal"]], : Ignoring unknown parameters: `PI`
# D. Cek Akurasi (MAPE)
acc_regresi <- accuracy(fcast_regresi, ts_test)
mape_regresi <- acc_regresi[2, "MAPE"]
cat("\n--- AKURASI REGRESI TIME SERIES ---\n")
##
## --- AKURASI REGRESI TIME SERIES ---
cat("MAPE Regresi:", round(mape_regresi, 2), "%\n")
## MAPE Regresi: 13.61 %