library("forecast")
## Warning: package 'forecast' was built under R version 4.3.3
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library("graphics")
library("TTR")
## Warning: package 'TTR' was built under R version 4.3.3
library("TSA")
## Warning: package 'TSA' was built under R version 4.3.3
## Registered S3 methods overwritten by 'TSA':
## method from
## fitted.Arima forecast
## plot.Arima forecast
##
## Attaching package: 'TSA'
## The following objects are masked from 'package:stats':
##
## acf, arima
## The following object is masked from 'package:utils':
##
## tar
library("readxl")
## Warning: package 'readxl' was built under R version 4.3.2
data.mpdw <- read_excel("C:/Users/user/Downloads/Database MPDW (1).xlsx", sheet=1)
data.mpdw <-data.mpdw[301:400,1:2]
data.mpdw$SS <- as.numeric(data.mpdw$SS)
data.mpdw$SS[data.mpdw$SS == 0] <- 0.01
data.mpdw <- data.frame(
Tanggal = data.mpdw$Tanggal,
SS = data.mpdw$SS
)
Melihat data menggunakan fungsi View(), struktur data menggunakan fungsi str(), dan dimensi data menggunakan fungsi dim()
View(data.mpdw)
str(data.mpdw)
## 'data.frame': 100 obs. of 2 variables:
## $ Tanggal: POSIXct, format: "2023-10-28" "2023-10-29" ...
## $ SS : num 8.3 9.5 8.8 7.6 5 4 4.3 2.7 3 1 ...
dim(data.mpdw)
## [1] 100 2
Mengubah data agar terbaca sebagai data deret waktu dengan fungsi ts()
datampdw.ts <- ts(data.mpdw$SS)
Menampilkan ringkasan data
summary(datampdw.ts)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.010 1.000 2.800 3.062 4.700 9.500
Membuat plot data deret waktu
ts.plot(datampdw.ts, xlab="Time Period ", ylab="Lama Penyinaran",
main = "Time Series Plot")
points(datampdw.ts)
Pembagian data latih dan data uji dilakukan dengan perbandingan 80% data latih dan 20% data uji.
training_ma <- data.mpdw[1:80,]
testing_ma <- data.mpdw[81:100,]
train_ma.ts <- ts(training_ma$SS)
test_ma.ts <- ts(testing_ma$SS)
Eksplorasi data dilakukan pada keseluruhan data, data latih serta data uji menggunakan plot data deret waktu.
plot(datampdw.ts, col="black",main="Plot semua data")
points(datampdw.ts)
ts.plot(train_ma.ts, col="red",main="Plot data latih")
points(train_ma.ts)
ts.plot(test_ma.ts, col="blue",main="Plot data uji")
points(test_ma.ts)
Eksplorasi data juga dapat dilakukan menggunakan package ggplot2 dengan terlebih dahulu memanggil library package ggplot2.
library(ggplot2)
## Warning: package 'ggplot2' was built under R version 4.3.2
ggplot() +
geom_line(data = data.mpdw, aes(x = Tanggal, y = SS, col = "Data Latih")) +
geom_line(data = testing_ma, aes(x = Tanggal, y = SS, col = "Data Uji")) +
labs(x = "Periode Waktu", y = "Penyinaran Matahari", color = "Legend") +
scale_colour_manual(name="Keterangan:", breaks = c("Data Latih", "Data Uji"),
values = c("blue", "red")) +
theme_bw() + theme(legend.position = "bottom",
plot.caption = element_text(hjust=0.5, size=12))
Ide dasar dari Single Moving Average (SMA) adalah data suatu periode dipengaruhi oleh data periode sebelumnya. Metode pemulusan ini cocok digunakan untuk pola data stasioner atau konstan. Prinsip dasar metode pemulusan ini adalah data pemulusan pada periode ke-t merupakan rata rata dari m buah data pada periode ke-t hingga periode ke (t-m+1). Data pemulusan pada periode ke-t selanjutnya digunakan sebagai nilai peramalan pada periode ke t+1
Pemulusan menggunakan metode SMA dilakukan dengan fungsi SMA(). Dalam hal ini akan dilakukan pemulusan dengan parameter m=4.
data.sma<-SMA(train_ma.ts, n=4)
data.sma
## Time Series:
## Start = 1
## End = 80
## Frequency = 1
## [1] NA NA NA 8.5500 7.7250 6.3500 5.2250 4.0000 3.5000 2.7500
## [11] 3.2500 3.8750 4.7250 6.0250 5.9500 5.1750 4.2000 4.1000 3.5000 3.7250
## [21] 3.9250 2.7750 2.5750 3.1500 3.2750 3.8000 3.4250 2.1025 1.6525 0.8300
## [31] 1.3050 1.3050 1.1050 2.2775 1.7525 1.7525 2.3775 1.4775 1.2050 1.7775
## [41] 1.5275 1.9275 2.3000 2.0250 1.6000 1.8250 3.4250 4.3000 5.9500 6.4250
## [51] 5.7000 5.9250 5.0750 5.2500 6.1000 6.7250 6.8250 6.1750 4.4500 2.9750
## [61] 1.8275 0.9775 1.7275 1.8775 1.8775 2.1275 1.0050 0.6050 0.7275 0.5025
## [71] 0.5500 0.9500 0.8275 1.3775 2.0775 1.8025 1.8025 1.3275 1.4775 1.7525
Data pemulusan pada periode ke-t selanjutnya digunakan sebagai nilai peramalan pada periode ke t+1 sehingga hasil peramalan 1 periode kedepan adalah sebagai berikut.
data.ramal<-c(NA,data.sma)
data.ramal
## [1] NA NA NA NA 8.5500 7.7250 6.3500 5.2250 4.0000 3.5000
## [11] 2.7500 3.2500 3.8750 4.7250 6.0250 5.9500 5.1750 4.2000 4.1000 3.5000
## [21] 3.7250 3.9250 2.7750 2.5750 3.1500 3.2750 3.8000 3.4250 2.1025 1.6525
## [31] 0.8300 1.3050 1.3050 1.1050 2.2775 1.7525 1.7525 2.3775 1.4775 1.2050
## [41] 1.7775 1.5275 1.9275 2.3000 2.0250 1.6000 1.8250 3.4250 4.3000 5.9500
## [51] 6.4250 5.7000 5.9250 5.0750 5.2500 6.1000 6.7250 6.8250 6.1750 4.4500
## [61] 2.9750 1.8275 0.9775 1.7275 1.8775 1.8775 2.1275 1.0050 0.6050 0.7275
## [71] 0.5025 0.5500 0.9500 0.8275 1.3775 2.0775 1.8025 1.8025 1.3275 1.4775
## [81] 1.7525
Selanjutnya akan dilakukan peramalan sejumlah data uji yaitu 20 periode. Pada metode SMA, hasil peramalan 20 periode ke depan akan bernilai sama dengan hasil peramalan 1 periode kedepan. Dalam hal ini akan dilakukan pengguabungan data aktual train, data hasil pemulusan dan data hasil ramalan 20 periode kedepan.
data.gab<-cbind(aktual=c(train_ma.ts,rep(NA,20)),pemulusan=c(data.sma,rep(NA,20)),ramalan=c(data.ramal,rep(data.ramal[length(data.ramal)],19)))
data.gab
## aktual pemulusan ramalan
## [1,] 8.30 NA NA
## [2,] 9.50 NA NA
## [3,] 8.80 NA NA
## [4,] 7.60 8.5500 NA
## [5,] 5.00 7.7250 8.5500
## [6,] 4.00 6.3500 7.7250
## [7,] 4.30 5.2250 6.3500
## [8,] 2.70 4.0000 5.2250
## [9,] 3.00 3.5000 4.0000
## [10,] 1.00 2.7500 3.5000
## [11,] 6.30 3.2500 2.7500
## [12,] 5.20 3.8750 3.2500
## [13,] 6.40 4.7250 3.8750
## [14,] 6.20 6.0250 4.7250
## [15,] 6.00 5.9500 6.0250
## [16,] 2.10 5.1750 5.9500
## [17,] 2.50 4.2000 5.1750
## [18,] 5.80 4.1000 4.2000
## [19,] 3.60 3.5000 4.1000
## [20,] 3.00 3.7250 3.5000
## [21,] 3.30 3.9250 3.7250
## [22,] 1.20 2.7750 3.9250
## [23,] 2.80 2.5750 2.7750
## [24,] 5.30 3.1500 2.5750
## [25,] 3.80 3.2750 3.1500
## [26,] 3.30 3.8000 3.2750
## [27,] 1.30 3.4250 3.8000
## [28,] 0.01 2.1025 3.4250
## [29,] 2.00 1.6525 2.1025
## [30,] 0.01 0.8300 1.6525
## [31,] 3.20 1.3050 0.8300
## [32,] 0.01 1.3050 1.3050
## [33,] 1.20 1.1050 1.3050
## [34,] 4.70 2.2775 1.1050
## [35,] 1.10 1.7525 2.2775
## [36,] 0.01 1.7525 1.7525
## [37,] 3.70 2.3775 1.7525
## [38,] 1.10 1.4775 2.3775
## [39,] 0.01 1.2050 1.4775
## [40,] 2.30 1.7775 1.2050
## [41,] 2.70 1.5275 1.7775
## [42,] 2.70 1.9275 1.5275
## [43,] 1.50 2.3000 1.9275
## [44,] 1.20 2.0250 2.3000
## [45,] 1.00 1.6000 2.0250
## [46,] 3.60 1.8250 1.6000
## [47,] 7.90 3.4250 1.8250
## [48,] 4.70 4.3000 3.4250
## [49,] 7.60 5.9500 4.3000
## [50,] 5.50 6.4250 5.9500
## [51,] 5.00 5.7000 6.4250
## [52,] 5.60 5.9250 5.7000
## [53,] 4.20 5.0750 5.9250
## [54,] 6.20 5.2500 5.0750
## [55,] 8.40 6.1000 5.2500
## [56,] 8.10 6.7250 6.1000
## [57,] 4.60 6.8250 6.7250
## [58,] 3.60 6.1750 6.8250
## [59,] 1.50 4.4500 6.1750
## [60,] 2.20 2.9750 4.4500
## [61,] 0.01 1.8275 2.9750
## [62,] 0.20 0.9775 1.8275
## [63,] 4.50 1.7275 0.9775
## [64,] 2.80 1.8775 1.7275
## [65,] 0.01 1.8775 1.8775
## [66,] 1.20 2.1275 1.8775
## [67,] 0.01 1.0050 2.1275
## [68,] 1.20 0.6050 1.0050
## [69,] 0.50 0.7275 0.6050
## [70,] 0.30 0.5025 0.7275
## [71,] 0.20 0.5500 0.5025
## [72,] 2.80 0.9500 0.5500
## [73,] 0.01 0.8275 0.9500
## [74,] 2.50 1.3775 0.8275
## [75,] 3.00 2.0775 1.3775
## [76,] 1.70 1.8025 2.0775
## [77,] 0.01 1.8025 1.8025
## [78,] 0.60 1.3275 1.8025
## [79,] 3.60 1.4775 1.3275
## [80,] 2.80 1.7525 1.4775
## [81,] NA NA 1.7525
## [82,] NA NA 1.7525
## [83,] NA NA 1.7525
## [84,] NA NA 1.7525
## [85,] NA NA 1.7525
## [86,] NA NA 1.7525
## [87,] NA NA 1.7525
## [88,] NA NA 1.7525
## [89,] NA NA 1.7525
## [90,] NA NA 1.7525
## [91,] NA NA 1.7525
## [92,] NA NA 1.7525
## [93,] NA NA 1.7525
## [94,] NA NA 1.7525
## [95,] NA NA 1.7525
## [96,] NA NA 1.7525
## [97,] NA NA 1.7525
## [98,] NA NA 1.7525
## [99,] NA NA 1.7525
## [100,] NA NA 1.7525
Adapun plot data deret waktu dari hasil peramalan yang dilakukan adalah sebagai berikut.
ts.plot(datampdw.ts, xlab="Periode ", ylab="SS", main= "SMA N=4 Data SS")
points(datampdw.ts)
lines(data.gab[,2],col="green",lwd=2)
lines(data.gab[,3],col="red",lwd=2)
legend("topleft",c("data aktual","data pemulusan","data peramalan"), lty=8, col=c("black","green","red"), cex=0.5)
Selanjutnya perhitungan akurasi dilakukan dengan ukuran akurasi Sum Squares Error (SSE), Mean Square Error (MSE), Mean Absolute Percentage Error (MAPE), dan Root Mean Square Error (RMSE). Perhitungan akurasi dilakukan baik pada data latih maupun pada data uji.
error_train.sma = train_ma.ts-data.ramal[1:length(train_ma.ts)]
SSE_train.sma = sum(error_train.sma[5:length(train_ma.ts)]^2)
MSE_train.sma = mean(error_train.sma[5:length(train_ma.ts)]^2)
MAPE_train.sma = mean(abs((error_train.sma[5:length(train_ma.ts)]/train_ma.ts[5:length(train_ma.ts)])*100))
RMSE_train.sma <- sqrt(MSE_train.sma)
akurasi_train.sma <- matrix(c(SSE_train.sma, MSE_train.sma, MAPE_train.sma, RMSE_train.sma))
row.names(akurasi_train.sma)<- c("SSE", "MSE", "MAPE", "RMSE")
colnames(akurasi_train.sma) <- c("Akurasi m = 4")
akurasi_train.sma
## Akurasi m = 4
## SSE 340.980806
## MSE 4.486590
## MAPE 2599.998615
## RMSE 2.118157
Perhitungan akurasi pada data latih menggunakan nilai MAPE menghasilkan nilai MAPE yang lebih dari 30% sehingga dikategorikan kurang baik. Selanjutnya, perhitungan nilai akurasi dilakukan pada data uji.
error_test.sma = test_ma.ts-data.gab[81:100,3]
SSE_test.sma = sum(error_test.sma^2)
MSE_test.sma = mean(error_test.sma^2)
MAPE_test.sma = mean(abs((error_test.sma/test_ma.ts*100)))
RMSE_test.sma <- sqrt(MSE_test.sma)
akurasi_test.sma <- matrix(c(SSE_test.sma, MSE_test.sma, MAPE_test.sma, RMSE_test.sma))
row.names(akurasi_test.sma)<- c("SSE", "MSE", "MAPE", "RMSE")
colnames(akurasi_test.sma) <- c("Akurasi m = 4")
akurasi_test.sma
## Akurasi m = 4
## SSE 137.606375
## MSE 6.880319
## MAPE 4425.461737
## RMSE 2.623036
Perhitungan akurasi menggunakan data latih menghasilkan nilai MAPE yang lebih dari 30% sehingga nilai akurasi ini dapat dikategorikan sebagai tidak terlalu baik
Metode pemulusan Double Moving Average (DMA) pada dasarnya mirip dengan SMA. Namun demikian, metode ini lebih cocok digunakan untuk pola data trend. Proses pemulusan dengan rata rata dalam metode ini dilakukan sebanyak 2 kali.
dma <- SMA(data.sma, n = 4)
At <- 2*data.sma - dma
Bt <- 2/(4-1)*(data.sma - dma)
data.dma<- At+Bt
data.ramal2<- c(NA, data.dma)
t = 1:20
f = c()
for (i in t) {
f[i] = At[length(At)] + Bt[length(Bt)]*(i)
}
data.gab2 <- cbind(aktual = c(train_ma.ts,rep(NA,20)), pemulusan1 = c(data.sma,rep(NA,20)),pemulusan2 = c(data.dma, rep(NA,20)),At = c(At, rep(NA,20)), Bt = c(Bt,rep(NA,20)),ramalan = c(data.ramal2, f[-1]))
data.gab2
## aktual pemulusan1 pemulusan2 At Bt ramalan
## [1,] 8.30 NA NA NA NA NA
## [2,] 9.50 NA NA NA NA NA
## [3,] 8.80 NA NA NA NA NA
## [4,] 7.60 8.5500 NA NA NA NA
## [5,] 5.00 7.7250 NA NA NA NA
## [6,] 4.00 6.3500 NA NA NA NA
## [7,] 4.30 5.2250 2.32916667 3.487500 -1.158333333 NA
## [8,] 2.70 4.0000 0.95833333 2.175000 -1.216666667 2.32916667
## [9,] 3.00 3.5000 1.38541667 2.231250 -0.845833333 0.95833333
## [10,] 1.00 2.7500 0.88541667 1.631250 -0.745833333 1.38541667
## [11,] 6.30 3.2500 3.04166667 3.125000 -0.083333333 0.88541667
## [12,] 5.20 3.8750 4.76041667 4.406250 0.354166667 3.04166667
## [13,] 6.40 4.7250 6.51666667 5.800000 0.716666667 4.76041667
## [14,] 6.20 6.0250 8.61875000 7.581250 1.037500000 6.51666667
## [15,] 6.00 5.9500 7.29375000 6.756250 0.537500000 8.61875000
## [16,] 2.10 5.1750 4.68541667 4.881250 -0.195833333 7.29375000
## [17,] 2.50 4.2000 2.30416667 3.062500 -0.758333333 4.68541667
## [18,] 5.80 4.1000 2.83958333 3.343750 -0.504166667 2.30416667
## [19,] 3.60 3.5000 2.26041667 2.756250 -0.495833333 2.83958333
## [20,] 3.00 3.7250 3.46458333 3.568750 -0.104166667 2.26041667
## [21,] 3.30 3.9250 4.11250000 4.037500 0.075000000 3.46458333
## [22,] 1.20 2.7750 1.59791667 2.068750 -0.470833333 4.11250000
## [23,] 2.80 2.5750 1.45000000 1.900000 -0.450000000 1.59791667
## [24,] 5.30 3.1500 3.22291667 3.193750 0.029166667 1.45000000
## [25,] 3.80 3.2750 3.82708333 3.606250 0.220833333 3.22291667
## [26,] 3.30 3.8000 4.80000000 4.400000 0.400000000 3.82708333
## [27,] 1.30 3.4250 3.44583333 3.437500 0.008333333 4.80000000
## [28,] 0.01 2.1025 0.35562500 1.054375 -0.698750000 3.44583333
## [29,] 2.00 1.6525 -0.16833333 0.560000 -0.728333333 0.35562500
## [30,] 0.01 0.8300 -1.12416667 -0.342500 -0.781666667 -0.16833333
## [31,] 3.20 1.3050 1.02583333 1.137500 -0.111666667 -1.12416667
## [32,] 0.01 1.3050 1.35812500 1.336875 0.021250000 1.02583333
## [33,] 1.20 1.1050 1.05291667 1.073750 -0.020833333 1.35812500
## [34,] 4.70 2.2775 3.57645833 3.056875 0.519583333 1.05291667
## [35,] 1.10 1.7525 1.99000000 1.895000 0.095000000 3.57645833
## [36,] 0.01 1.7525 1.80354167 1.783125 0.020416667 1.99000000
## [37,] 3.70 2.3775 2.94000000 2.715000 0.225000000 1.80354167
## [38,] 1.10 1.4775 0.87333333 1.115000 -0.241666667 2.94000000
## [39,] 0.01 1.2050 0.37479167 0.706875 -0.332083333 0.87333333
## [40,] 2.30 1.7775 1.89104167 1.845625 0.045416667 0.37479167
## [41,] 2.70 1.5275 1.57854167 1.558125 0.020416667 1.89104167
## [42,] 2.70 1.9275 2.45770833 2.245625 0.212083333 1.57854167
## [43,] 1.50 2.3000 2.99479167 2.716875 0.277916667 2.45770833
## [44,] 1.20 2.0250 2.15833333 2.105000 0.053333333 2.99479167
## [45,] 1.00 1.6000 0.99479167 1.236875 -0.242083333 2.15833333
## [46,] 3.60 1.8250 1.63750000 1.712500 -0.075000000 0.99479167
## [47,] 7.90 3.4250 5.43541667 4.631250 0.804166667 1.63750000
## [48,] 4.70 4.3000 6.82083333 5.812500 1.008333333 5.43541667
## [49,] 7.60 5.9500 9.40833333 8.025000 1.383333333 6.82083333
## [50,] 5.50 6.4250 8.75833333 7.825000 0.933333333 9.40833333
## [51,] 5.00 5.7000 5.87708333 5.806250 0.070833333 8.75833333
## [52,] 5.60 5.9250 5.80000000 5.850000 -0.050000000 5.87708333
## [53,] 4.20 5.0750 3.89791667 4.368750 -0.470833333 5.80000000
## [54,] 6.20 5.2500 4.85416667 5.012500 -0.158333333 3.89791667
## [55,] 8.40 6.1000 6.95416667 6.612500 0.341666667 4.85416667
## [56,] 8.10 6.7250 8.28750000 7.662500 0.625000000 6.95416667
## [57,] 4.60 6.8250 7.82500000 7.425000 0.400000000 8.28750000
## [58,] 3.60 6.1750 5.70625000 5.893750 -0.187500000 7.82500000
## [59,] 1.50 4.4500 1.79375000 2.856250 -1.062500000 5.70625000
## [60,] 2.20 2.9750 -0.57708333 0.843750 -1.420833333 1.79375000
## [61,] 0.01 1.8275 -1.55479167 -0.201875 -1.352916667 -0.57708333
## [62,] 0.20 0.9775 -1.65583333 -0.602500 -1.053333333 -1.55479167
## [63,] 4.50 1.7275 1.47854167 1.578125 -0.099583333 -1.65583333
## [64,] 2.80 1.8775 2.33583333 2.152500 0.183333333 1.47854167
## [65,] 0.01 1.8775 2.31500000 2.140000 0.175000000 2.33583333
## [66,] 1.20 2.1275 2.50250000 2.352500 0.150000000 2.31500000
## [67,] 0.01 1.0050 -0.18979167 0.288125 -0.477916667 2.50250000
## [68,] 1.20 0.6050 -0.72625000 -0.193750 -0.532500000 -0.18979167
## [69,] 0.50 0.7275 0.07958333 0.338750 -0.259166667 -0.72625000
## [70,] 0.30 0.5025 0.15666667 0.295000 -0.138333333 0.07958333
## [71,] 0.20 0.5500 0.47291667 0.503750 -0.030833333 0.15666667
## [72,] 2.80 0.9500 1.39583333 1.217500 0.178333333 0.47291667
## [73,] 0.01 0.8275 1.02750000 0.947500 0.080000000 1.39583333
## [74,] 2.50 1.3775 2.12958333 1.828750 0.300833333 1.02750000
## [75,] 3.00 2.0775 3.35979167 2.846875 0.512916667 2.12958333
## [76,] 1.70 1.8025 2.27125000 2.083750 0.187500000 3.35979167
## [77,] 0.01 1.8025 1.86500000 1.840000 0.025000000 2.27125000
## [78,] 0.60 1.3275 0.61916667 0.902500 -0.283333333 1.86500000
## [79,] 3.60 1.4775 1.26916667 1.352500 -0.083333333 0.61916667
## [80,] 2.80 1.7525 2.02333333 1.915000 0.108333333 1.26916667
## [81,] NA NA NA NA NA 2.02333333
## [82,] NA NA NA NA NA 2.13166667
## [83,] NA NA NA NA NA 2.24000000
## [84,] NA NA NA NA NA 2.34833333
## [85,] NA NA NA NA NA 2.45666667
## [86,] NA NA NA NA NA 2.56500000
## [87,] NA NA NA NA NA 2.67333333
## [88,] NA NA NA NA NA 2.78166667
## [89,] NA NA NA NA NA 2.89000000
## [90,] NA NA NA NA NA 2.99833333
## [91,] NA NA NA NA NA 3.10666667
## [92,] NA NA NA NA NA 3.21500000
## [93,] NA NA NA NA NA 3.32333333
## [94,] NA NA NA NA NA 3.43166667
## [95,] NA NA NA NA NA 3.54000000
## [96,] NA NA NA NA NA 3.64833333
## [97,] NA NA NA NA NA 3.75666667
## [98,] NA NA NA NA NA 3.86500000
## [99,] NA NA NA NA NA 3.97333333
## [100,] NA NA NA NA NA 4.08166667
Hasil pemulusan menggunakan metode DMA divisualisasikan sebagai berikut
ts.plot(datampdw.ts, xlab="Time Period ", ylab="Sales", main= "DMA N=4 Data Sales")
points(datampdw.ts)
lines(data.gab2[,3],col="green",lwd=2)
lines(data.gab2[,6],col="red",lwd=2)
legend("topleft",c("data aktual","data pemulusan","data peramalan"), lty=8, col=c("black","green","red"), cex=0.8)
Selanjutnya perhitungan akurasi dilakukan dengan ukuran akurasi Sum Squares Error (SSE), Mean Square Error (MSE), Mean Absolute Percentage Error (MAPE), dan Root Mean Square Error (RMSE). Perhitungan akurasi dilakukan baik pada data latih maupun pada data uji.
error_train.dma = train_ma.ts-data.ramal2[1:length(train_ma.ts)]
SSE_train.dma = sum(error_train.dma[8:length(train_ma.ts)]^2)
MSE_train.dma = mean(error_train.dma[8:length(train_ma.ts)]^2)
MAPE_train.dma = mean(abs((error_train.dma[8:length(train_ma.ts)]/train_ma.ts[8:length(train_ma.ts)])*100))
RMSE_train.dma <- sqrt(MSE_train.dma)
akurasi_train.dma <- matrix(c(SSE_train.dma, MSE_train.dma, MAPE_train.dma, RMSE_train.dma))
row.names(akurasi_train.dma)<- c("SSE", "MSE", "MAPE", "RMSE")
colnames(akurasi_train.dma) <- c("Akurasi m = 4")
akurasi_train.dma
## Akurasi m = 4
## SSE 445.345507
## MSE 6.100623
## MAPE 2343.908837
## RMSE 2.469944
Perhitungan akurasi pada data latih menggunakan nilai MAPE menghasilkan nilai MAPE yang lebih dari 30% sehingga dikategorikan kurang baik. Selanjutnya, perhitungan nilai akurasi dilakukan pada data uji.
error_test.dma = test_ma.ts-data.gab2[81:100,6]
SSE_test.dma = sum(error_test.dma^2)
MSE_test.dma = mean(error_test.dma^2)
MAPE_test.dma = mean(abs((error_test.dma/test_ma.ts*100)))
RMSE_test.dma <- sqrt(MSE_test.dma)
akurasi_test.dma <- matrix(c(SSE_test.dma, MSE_test.dma, MAPE_test.dma, RMSE_test.dma))
row.names(akurasi_test.dma)<- c("SSE", "MSE", "MAPE", "RMSE")
colnames(akurasi_test.dma) <- c("Akurasi m = 4")
akurasi_test.dma
## Akurasi m = 4
## SSE 161.672806
## MSE 8.083640
## MAPE 8510.108775
## RMSE 2.843174
Perhitungan akurasi menggunakan data latih menghasilkan nilai MAPE yang lebih dari 30% sehingga nilai akurasi ini dapat dikategorikan sebagai tidak terlalu baik
Pada data latih metode DMA lebih baik sedangkan pada data uji metode SMA lebih baik dibandingkan DMA
Metode Exponential Smoothing adalah metode pemulusan dengan melakukan pembobotan menurun secara eksponensial. Nilai yang lebih baru diberi bobot yang lebih besar dari nilai terdahulu. Terdapat satu atau lebih parameter pemulusan yang ditentukan secara eksplisit, dan hasil pemilihan parameter tersebut akan menentukan bobot yang akan diberikan pada nilai pengamatan. Ada dua macam model, yaitu model tunggal dan ganda.
training<-data.mpdw[1:80,]
testing<-data.mpdw[81:100,]
train.ts <- ts(training$SS)
test.ts <- ts(testing$SS)
plot(datampdw.ts, col="black",main="Plot semua data")
points(datampdw.ts)
plot(train.ts, col="red",main="Plot data latih")
points(train.ts)
plot(test.ts, col="blue",main="Plot data uji")
points(test.ts)
Eksplorasi data juga dapat dilakukan menggunakan package ggplot2 .
library(ggplot2)
ggplot() +
geom_line(data = data.mpdw, aes(x = Tanggal, y = SS, col = "Data Latih")) +
geom_line(data = testing, aes(x = Tanggal, y = SS, col = "Data Uji")) +
labs(x = "Periode Waktu", y = "Membaca", color = "Legend") +
scale_colour_manual(name="Keterangan:", breaks = c("Data Latih", "Data Uji"),
values = c("blue", "red")) +
theme_bw() + theme(legend.position = "bottom",
plot.caption = element_text(hjust=0.5, size=12))
merupakan metode pemulusan yang tepat digunakan untuk data dengan pola stasioner atau konstan.
Nilai pemulusan pada periode ke-t didapat dari persamaan:
yT=λyt+(1−λ)yT−1
Nilai parameter λ adalah nilai antara 0 dan 1. Nilai pemulusan periode ke-t bertindak sebagai nilai ramalan pada periode ke-(T+τ) .
yT+Ï„(T)=yT
Pemulusan dengan metode SES dapat dilakukan dengan dua fungsi dari packages berbeda, yaitu (1) fungsi ses() dari packages forecast dan (2) fungsi HoltWinters dari packages stats.
ses.1 <- ses(train.ts, h = 10, alpha = 0.2)
plot(ses.1)
ses.1
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 1.907256 -0.7807805 4.595293 -2.203741 6.018253
## 82 1.907256 -0.8340141 4.648526 -2.285155 6.099667
## 83 1.907256 -0.8862335 4.700746 -2.365018 6.179530
## 84 1.907256 -0.9374944 4.752007 -2.443414 6.257927
## 85 1.907256 -0.9878479 4.802360 -2.520423 6.334936
## 86 1.907256 -1.0373405 4.851853 -2.596116 6.410628
## 87 1.907256 -1.0860148 4.900527 -2.670557 6.485069
## 88 1.907256 -1.1339102 4.948422 -2.743806 6.558319
## 89 1.907256 -1.1810628 4.995575 -2.815920 6.630432
## 90 1.907256 -1.2275063 5.042019 -2.886949 6.701462
ses.2<- ses(train.ts, h = 10, alpha = 0.7)
plot(ses.2)
ses.2
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 2.770142 0.2590903 5.281194 -1.070180 6.610464
## 82 2.770142 -0.2949872 5.835271 -1.917569 7.457853
## 83 2.770142 -0.7632209 6.303505 -2.633670 8.173954
## 84 2.770142 -1.1762855 6.716569 -3.265398 8.805682
## 85 2.770142 -1.5500347 7.090319 -3.836998 9.377282
## 86 2.770142 -1.8939295 7.434213 -4.362940 9.903224
## 87 2.770142 -2.2141533 7.754437 -4.852680 10.392964
## 88 2.770142 -2.5150104 8.055294 -5.312801 10.853085
## 89 2.770142 -2.7996400 8.339924 -5.748105 11.288389
## 90 2.770142 -3.0704152 8.610699 -6.162219 11.702503
Untuk mendapatkan gambar hasil pemulusan pada data latih dengan fungsi ses() , perlu digunakan fungsi autoplot() dan autolayer() dari library packages ggplot2 .
autoplot(ses.1) +
autolayer(fitted(ses.1), series="Fitted") +
ylab("Membaca") + xlab("Periode")
Pada fungsi ses() , terdapat beberapa argumen yang umum digunakan, yaitu nilia y , gamma , beta , alpha , dan h .
Nilai y adalah nilai data deret waktu, gamma adalah parameter pemulusan untuk komponen musiman, beta adalah parameter pemulusan untuk tren, dan alpha adalah parameter pemulusan untuk stasioner, serta h adalah banyaknya periode yang akan diramalkan.
Kasus di atas merupakan contoh inisialisasi nilai parameter λ dengan nilai alpha 0,2 dan 0,7 dan banyak periode data yang akan diramalkan adalah sebanyak 20 periode. Selanjutnya akan digunakan fungsi HoltWinters() dengan nilai inisialisasi parameter dan panjang periode peramalan yang sama dengan fungsi ses() .
ses1<- HoltWinters(train.ts, gamma = FALSE, beta = FALSE, alpha = 0.2)
plot(ses1)
ramalan1<- forecast(ses1, h=20)
ramalan1
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 1.907256 -0.7557109 4.570223 -2.165401 5.979913
## 82 1.907256 -0.8084481 4.622960 -2.246055 6.060567
## 83 1.907256 -0.8601804 4.674693 -2.325173 6.139685
## 84 1.907256 -0.9109633 4.725476 -2.402839 6.217351
## 85 1.907256 -0.9608472 4.775359 -2.479129 6.293642
## 86 1.907256 -1.0098781 4.824390 -2.554116 6.368628
## 87 1.907256 -1.0580985 4.872611 -2.627862 6.442375
## 88 1.907256 -1.1055472 4.920059 -2.700429 6.514941
## 89 1.907256 -1.1522601 4.966772 -2.771870 6.586382
## 90 1.907256 -1.1982704 5.012783 -2.842237 6.656749
## 91 1.907256 -1.2436090 5.058121 -2.911576 6.726089
## 92 1.907256 -1.2883043 5.102817 -2.979932 6.794444
## 93 1.907256 -1.3323831 5.146895 -3.047345 6.861857
## 94 1.907256 -1.3758701 5.190382 -3.113852 6.928365
## 95 1.907256 -1.4187886 5.233301 -3.179490 6.994003
## 96 1.907256 -1.4611604 5.275673 -3.244292 7.058805
## 97 1.907256 -1.5030056 5.317518 -3.308289 7.122802
## 98 1.907256 -1.5443437 5.358856 -3.371510 7.186023
## 99 1.907256 -1.5851924 5.399705 -3.433983 7.248495
## 100 1.907256 -1.6255689 5.440081 -3.495734 7.310246
ses2<- HoltWinters(train.ts, gamma = FALSE, beta = FALSE, alpha = 0.7)
plot(ses2)
ramalan2<- forecast(ses2, h=20)
ramalan2
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 2.770142 0.2621366 5.278147 -1.065521 6.605805
## 82 2.770142 -0.2912687 5.831553 -1.911882 7.452166
## 83 2.770142 -0.7589343 6.299218 -2.627115 8.167399
## 84 2.770142 -1.1714978 6.711782 -3.258076 8.798360
## 85 2.770142 -1.5447936 7.085078 -3.828982 9.369266
## 86 2.770142 -1.8882712 7.428555 -4.354286 9.894570
## 87 2.770142 -2.2081065 7.748390 -4.843432 10.383716
## 88 2.770142 -2.5085986 8.048883 -5.302995 10.843279
## 89 2.770142 -2.7928830 8.333167 -5.737771 11.278054
## 90 2.770142 -3.0633296 8.603614 -6.151383 11.691667
## 91 2.770142 -3.3217818 8.862066 -6.546652 12.086935
## 92 2.770142 -3.5697066 9.109991 -6.925820 12.466104
## 93 2.770142 -3.8082944 9.348578 -7.290708 12.830992
## 94 2.770142 -4.0385267 9.578811 -7.642818 13.183102
## 95 2.770142 -4.2612244 9.801508 -7.983405 13.523689
## 96 2.770142 -4.4770822 10.017366 -8.313531 13.853815
## 97 2.770142 -4.6866940 10.226978 -8.634105 14.174388
## 98 2.770142 -4.8905726 10.430857 -8.945910 14.486194
## 99 2.770142 -5.0891641 10.629448 -9.249629 14.789913
## 100 2.770142 -5.2828597 10.823144 -9.545861 15.086145
Fungsi HoltWinters memiliki argumen yang sama dengan fungsi ses() . Nilai parameter α
dari kedua fungsi dapat dioptimalkan menyesuaikan dari error-nya paling minimumnya. Caranya adalah dengan membuat parameter α= NULL .
ses.opt <- ses(train.ts, h = 20, alpha = NULL)
plot(ses.opt)
ses.opt
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 2.570595 0.08260733 5.058582 -1.234454 6.375643
## 82 2.570595 -0.26051350 5.401703 -1.759212 6.900401
## 83 2.570595 -0.56632516 5.707515 -2.226910 7.368100
## 84 2.570595 -0.84486408 5.986054 -2.652899 7.794088
## 85 2.570595 -1.10234026 6.243530 -3.046675 8.187864
## 86 2.570595 -1.34291316 6.484103 -3.414599 8.555789
## 87 2.570595 -1.56953045 6.710720 -3.761180 8.902370
## 88 2.570595 -1.78437130 6.925561 -4.089751 9.230941
## 89 2.570595 -1.98910062 7.130290 -4.402858 9.544047
## 90 2.570595 -2.18502449 7.326214 -4.702497 9.843687
## 91 2.570595 -2.37318993 7.514379 -4.990271 10.131461
## 92 2.570595 -2.55445154 7.695641 -5.267487 10.408677
## 93 2.570595 -2.72951771 7.870707 -5.535228 10.676417
## 94 2.570595 -2.89898335 8.040173 -5.794403 10.935593
## 95 2.570595 -3.06335388 8.204543 -6.045786 11.186976
## 96 2.570595 -3.22306297 8.364252 -6.290040 11.431230
## 97 2.570595 -3.37848604 8.519676 -6.527739 11.668929
## 98 2.570595 -3.52995070 8.671140 -6.759384 11.900574
## 99 2.570595 -3.67774482 8.818934 -6.985416 12.126606
## 100 2.570595 -3.82212298 8.963313 -7.206223 12.347413
sesopt<- HoltWinters(train.ts, gamma = FALSE, beta = FALSE,alpha = NULL)
sesopt
## Holt-Winters exponential smoothing without trend and without seasonal component.
##
## Call:
## HoltWinters(x = train.ts, alpha = NULL, beta = FALSE, gamma = FALSE)
##
## Smoothing parameters:
## alpha: 0.5419207
## beta : FALSE
## gamma: FALSE
##
## Coefficients:
## [,1]
## a 2.56893
plot(sesopt)
ramalanopt<- forecast(sesopt, h=20)
ramalanopt
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 2.56893 0.0868195 5.051041 -1.227131 6.364992
## 82 2.56893 -0.2542218 5.392083 -1.748708 6.886569
## 83 2.56893 -0.5582891 5.696150 -2.213739 7.351600
## 84 2.56893 -0.8353046 5.973165 -2.637398 7.775259
## 85 2.56893 -1.0914151 6.229276 -3.029085 8.166946
## 86 2.56893 -1.3307418 6.468603 -3.395104 8.532964
## 87 2.56893 -1.5562068 6.694068 -3.739923 8.877783
## 88 2.56893 -1.7699716 6.907832 -4.066848 9.204708
## 89 2.56893 -1.9736883 7.111549 -4.378405 9.516266
## 90 2.56893 -2.1686532 7.306514 -4.676578 9.814439
## 91 2.56893 -2.3559058 7.493767 -4.962957 10.100818
## 92 2.56893 -2.5362949 7.674156 -5.238838 10.376699
## 93 2.56893 -2.7105241 7.848385 -5.505298 10.643159
## 94 2.56893 -2.8791842 8.017045 -5.763242 10.901103
## 95 2.56893 -3.0427776 8.180639 -6.013436 11.151297
## 96 2.56893 -3.2017351 8.339596 -6.256541 11.394402
## 97 2.56893 -3.3564299 8.494291 -6.493126 11.630987
## 98 2.56893 -3.5071874 8.645048 -6.723690 11.861551
## 99 2.56893 -3.6542940 8.792155 -6.948670 12.086531
## 100 2.56893 -3.7980026 8.935863 -7.168453 12.306314
Setelah dilakukan peramalan, akan dilakukan perhitungan keakuratan hasil peramalan. Perhitungan akurasi ini dilakukan baik pada data latih dan data uji
Perhitungan akurasi data dapat dilakukan dengan cara langsung maupun manual. Secara langsung, nilai akurasi dapat diambil dari objek yang tersimpan pada hasil SES, yaitu sum of squared errors (SSE). Nilai akurasi lain dapat dihitung pula dari nilai SSE tersebut.
SSE1<-ses1$SSE
MSE1<-ses1$SSE/length(train.ts)
RMSE1<-sqrt(MSE1)
akurasi1 <- matrix(c(SSE1,MSE1,RMSE1))
row.names(akurasi1)<- c("SSE", "MSE", "RMSE")
colnames(akurasi1) <- c("Akurasi lamda=0.2")
akurasi1
## Akurasi lamda=0.2
## SSE 349.718648
## MSE 4.371483
## RMSE 2.090809
SSE2<-ses2$SSE
MSE2<-ses2$SSE/length(train.ts)
RMSE2<-sqrt(MSE2)
akurasi2 <- matrix(c(SSE2,MSE2,RMSE2))
row.names(akurasi2)<- c("SSE", "MSE", "RMSE")
colnames(akurasi2) <- c("Akurasi lamda=0.7")
akurasi2
## Akurasi lamda=0.7
## SSE 299.520310
## MSE 3.744004
## RMSE 1.934943
fitted1<-ramalan1$fitted
sisaan1<-ramalan1$residuals
head(sisaan1)
## Time Series:
## Start = 1
## End = 6
## Frequency = 1
## [1] NA 1.20000 0.26000 -0.99200 -3.39360 -3.71488
resid1<-training$SS-ramalan1$fitted
head(resid1)
## Time Series:
## Start = 1
## End = 6
## Frequency = 1
## [1] NA 1.20000 0.26000 -0.99200 -3.39360 -3.71488
SSE.1=sum(sisaan1[2:length(train.ts)]^2)
SSE.1
## [1] 349.7186
MSE.1 = SSE.1/length(train.ts)
MSE.1
## [1] 4.371483
MAPE.1 = sum(abs(sisaan1[2:length(train.ts)]/train.ts[2:length(train.ts)])*
100)/length(train.ts)
MAPE.1
## [1] 3071.665
akurasi.1 <- matrix(c(SSE.1,MSE.1,MAPE.1))
row.names(akurasi.1)<- c("SSE", "MSE", "MAPE")
colnames(akurasi.1) <- c("Akurasi lamda=0.2")
akurasi.1
## Akurasi lamda=0.2
## SSE 349.718648
## MSE 4.371483
## MAPE 3071.664515
fitted2<-ramalan2$fitted
sisaan2<-ramalan2$residuals
head(sisaan2)
## Time Series:
## Start = 1
## End = 6
## Frequency = 1
## [1] NA 1.20000 -0.34000 -1.30200 -2.99060 -1.89718
resid2<-training$SS-ramalan2$fitted
head(resid2)
## Time Series:
## Start = 1
## End = 6
## Frequency = 1
## [1] NA 1.20000 -0.34000 -1.30200 -2.99060 -1.89718
SSE.2=sum(sisaan2[2:length(train.ts)]^2)
SSE.2
## [1] 299.5203
MSE.2 = SSE.2/length(train.ts)
MSE.2
## [1] 3.744004
MAPE.2 = sum(abs(sisaan2[2:length(train.ts)]/train.ts[2:length(train.ts)])*
100)/length(train.ts)
MAPE.2
## [1] 2499.32
akurasi.2 <- matrix(c(SSE.2,MSE.2,MAPE.2))
row.names(akurasi.2)<- c("SSE", "MSE", "MAPE")
colnames(akurasi.2) <- c("Akurasi lamda=0.7")
akurasi.2
## Akurasi lamda=0.7
## SSE 299.520310
## MSE 3.744004
## MAPE 2499.319818
Berdasarkan nilai SSE, MSE, RMSE, dan MAPE di antara kedua parameter, nilai parameter λ=0,7 menghasilkan akurasi yang lebih baik dibanding λ=0,2. Hal ini dilihat dari nilai masing-masing ukuran akurasi yang lebih kecil. Berdasarkan nilai MAPE-nya, hasil ini dapat dikategorikan sebagai peramalan kurang baik
Akurasi data uji dapat dihitung dengan cara yang hampir sama dengan perhitungan akurasi data latih
selisih1<-ramalan1$mean-testing$SS
SSEtesting1<-sum(selisih1^2)
MSEtesting1<-SSEtesting1/length(testing)
selisih2<-ramalan2$mean-testing$SS
SSEtesting2<-sum(selisih2^2)
MSEtesting2<-SSEtesting2/length(testing)
selisihopt<-ramalanopt$mean-testing$SS
SSEtestingopt<-sum(selisihopt^2)
MSEtestingopt<-SSEtestingopt/length(testing)
akurasitesting1 <- matrix(c(SSEtesting1,SSEtesting2,SSEtestingopt))
row.names(akurasitesting1)<- c("SSE1", "SSE2", "SSEopt")
akurasitesting1
## [,1]
## SSE1 133.8141
## SSE2 130.2314
## SSEopt 128.4041
akurasitesting2 <- matrix(c(MSEtesting1,MSEtesting2,MSEtestingopt))
row.names(akurasitesting2)<- c("MSE1", "MSE2", "MSEopt")
akurasitesting2
## [,1]
## MSE1 66.90705
## MSE2 65.11568
## MSEopt 64.20203
Selain dengan cara di atas, perhitungan nilai akurasi dapat menggunakan fungsi accuracy() dari package forecast . Penggunaannya yaitu dengan menuliskan accuracy(hasil ramalan, kondisi aktual) . Contohnya adalah sebagai berikut.
accuracy(ramalanopt,testing$SS)
## ME RMSE MAE MPE MAPE MASE
## Training set -0.1338668 1.929154 1.570676 -2532.036 2566.171 0.9294636
## Test set -0.1264304 2.533812 2.141286 -6458.751 6493.083 1.2671281
## ACF1
## Training set 0.05821546
## Test set NA
Metode pemulusan Double Exponential Smoothing (DES) digunakan untuk data yang memiliki pola tren. Metode DES adalah metode semacam SES, hanya saja dilakukan dua kali, yaitu pertama untuk tahapan ‘level’ dan kedua untuk tahapan ‘tren’. Pemulusan menggunakan metode ini akan menghasilkan peramalan tidak konstan untuk periode berikutnya.
Pemulusan dengan metode DES kali ini akan menggunakan fungsi HoltWinters() . Jika sebelumnya nilai argumen beta dibuat FALSE , kali ini argumen tersebut akan diinisialisasi bersamaan dengan nilai alpha .
des.1<- HoltWinters(train.ts, gamma = FALSE, beta = 0.2, alpha = 0.2)
plot(des.1)
ramalandes1<- forecast(des.1, h=20)
ramalandes1
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 1.975002 -1.437333 5.387338 -3.243715 7.193719
## 82 2.180514 -1.328720 5.689749 -3.186397 7.547426
## 83 2.386027 -1.250953 6.023007 -3.176254 7.948307
## 84 2.591539 -1.205824 6.388903 -3.216027 8.399106
## 85 2.797052 -1.194068 6.788172 -3.306839 8.900943
## 86 3.002564 -1.215502 7.220630 -3.448412 9.453540
## 87 3.208077 -1.269240 7.685394 -3.639389 10.055542
## 88 3.413589 -1.353921 8.181099 -3.877689 10.704867
## 89 3.619101 -1.467911 8.706114 -4.160813 11.399016
## 90 3.824614 -1.609470 9.258698 -4.486100 12.135328
## 91 4.030126 -1.776863 9.837115 -4.850897 12.911150
## 92 4.235639 -1.968434 10.439711 -5.252672 13.723949
## 93 4.441151 -2.182648 11.064951 -5.689076 14.571378
## 94 4.646664 -2.418107 11.711434 -6.157971 15.451298
## 95 4.852176 -2.673552 12.377904 -6.657431 16.361783
## 96 5.057688 -2.947857 13.063234 -7.185737 17.301114
## 97 5.263201 -3.240022 13.766423 -7.741356 18.267757
## 98 5.468713 -3.549154 14.486581 -8.322925 19.260351
## 99 5.674226 -3.874462 15.222914 -8.929232 20.277684
## 100 5.879738 -4.215240 15.974716 -9.559198 21.318675
des.2<- HoltWinters(train.ts, gamma = FALSE, beta = 0.3, alpha = 0.6)
plot(des.2)
ramalandes2<- forecast(des.2, h=20)
ramalandes2
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 3.070590 0.2391547 5.902026 -1.259717 7.400897
## 82 3.411404 -0.1794990 7.002308 -2.080408 8.903217
## 83 3.752218 -0.7514555 8.255892 -3.135557 10.639994
## 84 4.093033 -1.4479067 9.633972 -4.381103 12.567168
## 85 4.433847 -2.2497809 11.117474 -5.787880 14.655573
## 86 4.774661 -3.1442526 12.693574 -7.336272 16.885594
## 87 5.115475 -4.1222980 14.353248 -9.012480 19.243429
## 88 5.456289 -5.1772502 16.089828 -10.806306 21.718884
## 89 5.797103 -6.3039637 17.898170 -12.709881 24.304088
## 90 6.137917 -7.4983189 19.774153 -14.716906 26.992741
## 91 6.478731 -8.7569181 21.714381 -16.822184 29.779646
## 92 6.819545 -10.0768908 23.715982 -19.021324 32.660415
## 93 7.160360 -11.4557643 25.776483 -21.310545 35.631264
## 94 7.501174 -12.8913751 27.893722 -23.686538 38.688886
## 95 7.841988 -14.3818058 30.065781 -26.146371 41.830347
## 96 8.182802 -15.9253384 32.290942 -28.687417 45.053021
## 97 8.523616 -17.5204205 34.567652 -31.307301 48.354532
## 98 8.864430 -19.1656383 36.894498 -34.003860 51.732720
## 99 9.205244 -20.8596958 39.270184 -36.775114 55.185602
## 100 9.546058 -22.6013986 41.693515 -39.619234 58.711351
Selanjutnya jika ingin membandingkan plot data latih dan data uji adalah sebagai berikut.
plot(datampdw.ts)
lines(des.1$fitted[,1], lty=2, col="blue")
lines(ramalandes1$mean, col="red")
Untuk mendapatkan nilai parameter optimum dari DES, argumen alpha dan
beta dapat dibuat NULL seperti berikut.
des.opt<- HoltWinters(train.ts, gamma = FALSE)
des.opt
## Holt-Winters exponential smoothing with trend and without seasonal component.
##
## Call:
## HoltWinters(x = train.ts, gamma = FALSE)
##
## Smoothing parameters:
## alpha: 0.7002195
## beta : 0.08729664
## gamma: FALSE
##
## Coefficients:
## [,1]
## a 2.77980727
## b 0.08099978
plot(des.opt)
ramalandesopt<- forecast(des.opt, h=20)
ramalandesopt
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## 81 2.860807 0.1547533 5.566861 -1.277745 6.999359
## 82 2.941807 -0.4592713 6.342885 -2.259693 8.143307
## 83 3.022807 -1.0417818 7.087395 -3.193445 9.239058
## 84 3.103806 -1.6119215 7.819534 -4.108277 10.315890
## 85 3.184806 -2.1793048 8.548917 -5.018894 11.388506
## 86 3.265806 -2.7493742 9.280986 -5.933618 12.465230
## 87 3.346806 -3.3254450 10.019056 -6.857521 13.551133
## 88 3.427806 -3.9096341 10.765245 -7.793840 14.649451
## 89 3.508805 -4.5033347 11.520945 -8.744705 15.762316
## 90 3.589805 -5.1074793 12.287089 -9.711543 16.891154
## 91 3.670805 -5.7226952 13.064305 -10.695314 18.036923
## 92 3.751805 -6.3494020 13.853011 -11.696658 19.200267
## 93 3.832804 -6.9878737 14.653482 -12.715995 20.381603
## 94 3.913804 -7.6382806 15.465889 -13.753585 21.581193
## 95 3.994804 -8.3007181 16.290326 -14.809574 22.799182
## 96 4.075804 -8.9752267 17.126834 -15.884025 24.035632
## 97 4.156804 -9.6618066 17.975414 -16.976937 25.290544
## 98 4.237803 -10.3604280 18.836035 -18.088265 26.563871
## 99 4.318803 -11.0710387 19.708645 -19.217929 27.855535
## 100 4.399803 -11.7935701 20.593176 -20.365824 29.165430
Selanjutnya akan dilakukan perhitungan akurasi pada data latih maupun data uji dengan ukuran akurasi SSE, MSE dan MAPE. ### Akurasi data latih
ssedes.train1<-des.1$SSE
msedes.train1<-ssedes.train1/length(train.ts)
sisaandes1<-ramalandes1$residuals
head(sisaandes1)
## Time Series:
## Start = 1
## End = 6
## Frequency = 1
## [1] NA NA -1.900000 -3.844000 -6.645440 -7.020774
mapedes.train1 <- sum(abs(sisaandes1[3:length(train.ts)]/train.ts[3:length(train.ts)])
*100)/length(train.ts)
akurasides.1 <- matrix(c(ssedes.train1,msedes.train1,mapedes.train1))
row.names(akurasides.1)<- c("SSE", "MSE", "MAPE")
colnames(akurasides.1) <- c("Akurasi lamda=0.2 dan gamma=0.2")
akurasides.1
## Akurasi lamda=0.2 dan gamma=0.2
## SSE 553.835921
## MSE 6.922949
## MAPE 2128.747340
ssedes.train2<-des.2$SSE
msedes.train2<-ssedes.train2/length(train.ts)
sisaandes2<-ramalandes2$residuals
head(sisaandes2)
## Time Series:
## Start = 1
## End = 6
## Frequency = 1
## [1] NA NA -1.900000 -2.818000 -4.077960 -2.247911
mapedes.train2 <- sum(abs(sisaandes2[3:length(train.ts)]/train.ts[3:length(train.ts)])
*100)/length(train.ts)
akurasides.2 <- matrix(c(ssedes.train2,msedes.train2,mapedes.train2))
row.names(akurasides.2)<- c("SSE", "MSE", "MAPE")
colnames(akurasides.2) <- c("Akurasi lamda=0.6 dan gamma=0.3")
akurasides.2
## Akurasi lamda=0.6 dan gamma=0.3
## SSE 376.157137
## MSE 4.701964
## MAPE 2256.028157
Hasil akurasi dari data latih didapatkan skenario 2 dengan lamda=0.2 dan gamma=0.2 memiliki hasil yang lebih baik. Namun untuk kedua skenario dapat dikategorikan peramalan kurang baik berdasarkan nilai MAPE-nya. ### Akurasi data uji
selisihdes1<-ramalandes1$mean-testing$SS
selisihdes1
## Time Series:
## Start = 81
## End = 100
## Frequency = 1
## [1] -2.4249980 -2.5194855 1.7860269 0.7915393 2.7870517 -4.4974358
## [7] -0.9919234 0.6135890 2.6191014 1.6246139 -4.6698737 0.8356387
## [13] 4.1411511 4.6366636 4.8421760 5.0476884 5.2532008 4.2687133
## [19] 0.6742257 4.8797381
SSEtestingdes1<-sum(selisihdes1^2)
MSEtestingdes1<-SSEtestingdes1/length(testing$SS)
MAPEtestingdes1<-sum(abs(selisihdes1/testing$SS)*100)/length(testing$SS)
selisihdes2<-ramalandes2$mean-testing$SS
selisihdes2
## Time Series:
## Start = 81
## End = 100
## Frequency = 1
## [1] -1.3294097 -1.2885956 3.1522185 2.2930326 4.4238467 -2.7253392
## [7] 0.9154749 2.6562890 4.7971031 3.9379172 -2.2212687 3.4195454
## [13] 6.8603595 7.4911736 7.8319877 8.1728018 8.5136159 7.6644300
## [19] 4.2052441 8.5460582
SSEtestingdes2<-sum(selisihdes2^2)
MSEtestingdes2<-SSEtestingdes2/length(testing$SS)
MAPEtestingdes2<-sum(abs(selisihdes2/testing$SS)*100)/length(testing$SS)
selisihdesopt<-ramalandesopt$mean-testing$SS
selisihdesopt
## Time Series:
## Start = 81
## End = 100
## Frequency = 1
## [1] -1.5391930 -1.7581932 2.4228066 1.3038064 3.1748062 -4.2341941
## [7] -0.8531943 0.6278055 2.5088053 1.3898051 -5.0291952 0.3518046
## [13] 3.5328044 3.9038042 3.9848040 4.0658037 4.1468035 3.0378033
## [19] -0.6811969 3.3998029
SSEtestingdesopt<-sum(selisihdesopt^2)
MSEtestingdesopt<-SSEtestingdesopt/length(testing$SS)
MAPEtestingdesopt<-sum(abs(selisihdesopt/testing$SS)*100)/length(testing$SS)
akurasitestingdes <-
matrix(c(SSEtestingdes1,MSEtestingdes1,MAPEtestingdes1,SSEtestingdes2,MSEtestingdes2,
MAPEtestingdes2,SSEtestingdesopt,MSEtestingdesopt,MAPEtestingdesopt),
nrow=3,ncol=3)
row.names(akurasitestingdes)<- c("SSE", "MSE", "MAPE")
colnames(akurasitestingdes) <- c("des ske1","des ske2","des opt")
akurasitestingdes
## des ske1 des ske2 des opt
## SSE 235.06298 561.92213 174.379101
## MSE 11.75315 28.09611 8.718955
## MAPE 11443.76230 18492.33319 9778.729784
MSEfull <-
matrix(c(MSEtesting1,MSEtesting2,MSEtestingopt,MSEtestingdes1,MSEtestingdes2,
MSEtestingdesopt),nrow=3,ncol=2)
row.names(MSEfull)<- c("ske 1", "ske 2", "ske opt")
colnames(MSEfull) <- c("ses","des")
MSEfull
## ses des
## ske 1 66.90705 11.753149
## ske 2 65.11568 28.096107
## ske opt 64.20203 8.718955
Kedua metode dapat dibandingkan dengan menggunakan ukuran akurasi yang sama. Contoh di atas adalah perbandingan kedua metode dengan ukuran akurasi MSE. Hasilnya didapatkan metode DES lebih baik dibandingkan metode SES dilihat dari MSE yang lebih kecil nilainya.
training<-data.mpdw[1:80,2]
testing<-data.mpdw[81:100,2]
training.ts<-ts(training, frequency = 10)
testing.ts<-ts(testing, frequency = 10)
Perhitungan dengan model aditif dilakukan jika plot data asli menunjukkan fluktuasi musiman yang relatif stabil (konstan).
winter1 <- HoltWinters(training.ts,alpha=0.2,beta=0.1,gamma=0.1,seasonal = "additive")
winter1$fitted
## Time Series:
## Start = c(2, 1)
## End = c(8, 10)
## Frequency = 10
## xhat level trend season
## 2.0 6.91531818 5.0210000 -0.077181818 1.97150000
## 2.1 5.78776636 4.8207545 -0.089488182 1.05650000
## 2.2 6.70396958 4.6137131 -0.101243509 2.19150000
## 2.3 6.15085276 4.4516757 -0.107322901 1.80650000
## 2.4 5.72434226 4.3541822 -0.106339956 1.47650000
## 2.5 2.96864700 4.3029738 -0.100826801 -1.23350000
## 2.6 3.29171786 4.0284176 -0.118199741 -0.61850000
## 2.7 1.73434019 3.7518743 -0.134034098 -1.88350000
## 2.8 2.98475125 4.4309722 -0.052720902 -1.39350000
## 2.9 1.08738507 4.5013010 -0.040415927 -3.37350000
## 3.0 6.76351898 4.8434081 -0.002163629 1.92227455
## 3.1 5.08658532 4.1485406 -0.071434008 1.00947869
## 3.2 5.31780628 3.2997896 -0.149165715 2.16718243
## 3.3 4.25797253 2.6470626 -0.199521840 1.81043178
## 3.4 3.97581757 2.6559462 -0.178681291 1.49855262
## 3.5 0.95691204 2.4421014 -0.182197642 -1.30299176
## 3.6 1.91134808 2.7285214 -0.135335883 -0.68183743
## 3.7 0.76510583 2.4709159 -0.147562845 -1.55824722
## 3.8 0.66538682 2.1723319 -0.162664961 -1.34428010
## 3.9 -1.07987395 2.2765896 -0.135972698 -3.22049081
## 4.0 3.88960946 2.3585916 -0.114175219 1.64519303
## 4.1 2.67707900 2.1064945 -0.127967408 0.69855187
## 4.2 3.22956027 1.4451113 -0.181308988 1.96575793
## 4.3 2.52978407 0.8578903 -0.221900193 1.89379398
## 4.4 2.37602462 1.0700333 -0.178495875 1.48448721
## 4.5 -0.68322861 0.6363325 -0.204016367 -1.11554472
## 4.6 -0.34993523 0.5709618 -0.190151795 -0.73074528
## 4.7 -0.53701168 1.1907971 -0.109153090 -1.61865568
## 4.8 0.09512243 1.4090463 -0.076412857 -1.23751105
## 4.9 -1.89580720 1.3156090 -0.078115305 -3.13330089
## 5.0 3.67248024 2.0766551 0.005800839 1.59002427
## 5.1 2.35949670 1.8879599 -0.013648766 0.48518555
## 5.2 3.73896622 1.9424118 -0.006838700 1.80339311
## 5.3 3.50357310 1.4877799 -0.051618024 2.06741125
## 5.4 2.26016298 0.9754472 -0.097689486 1.38240524
## 5.5 -0.55725404 0.6257251 -0.122892746 -1.06008643
## 5.6 0.88778508 1.3342832 -0.039747665 -0.40675046
## 5.7 1.30978041 2.6969785 0.100496633 -1.48769475
## 5.8 2.39949926 3.4755191 0.168301025 -1.24432084
## 5.9 2.15859497 4.6839202 0.272311040 -2.79763631
## 6.0 7.47587729 5.6245123 0.339139140 1.51222585
## 6.1 6.27052338 5.4684760 0.289621595 0.51242581
## 6.2 7.52447983 5.6239929 0.276211127 1.62427581
## 6.3 7.32815499 5.2353081 0.209721530 1.88312540
## 6.4 6.68814922 5.2193986 0.187158431 1.28159221
## 6.5 5.24281651 5.7489272 0.221395446 -0.72750611
## 6.6 6.97452517 6.5417593 0.278539116 0.15422674
## 6.7 5.35996483 6.3453934 0.231048613 -1.21647718
## 6.8 5.59201758 6.2244490 0.195849316 -0.82828078
## 6.9 3.18557990 5.6018948 0.114008964 -2.53032391
## 7.0 6.92724087 5.5187878 0.094297366 1.31415567
## 7.1 4.64437351 4.2296370 -0.044047451 0.45878394
## 7.2 4.52209737 3.2967149 -0.132934921 1.35831742
## 7.3 4.81885661 3.1593605 -0.133376869 1.79287300
## 7.4 3.86699855 2.6222123 -0.173754001 1.41854027
## 7.5 0.92723317 1.6770586 -0.250893972 -0.49893143
## 7.6 1.19954406 1.4807180 -0.245438635 -0.03573528
## 7.7 -0.62913336 0.9973705 -0.269229516 -1.35727437
## 7.8 -0.29432136 1.0939677 -0.232646849 -1.15564219
## 7.9 -1.80574563 1.0201851 -0.216760422 -2.60917030
## 8.0 1.81070469 1.2245738 -0.174645509 0.76077640
## 8.1 0.62416181 0.7277874 -0.206859603 0.10323406
## 8.2 2.14930218 0.9560954 -0.163342839 1.35654963
## 8.3 1.79012771 0.3648921 -0.206128883 1.63136448
## 8.4 1.21878664 0.3007377 -0.191931437 1.10998038
## 8.5 -0.16836833 0.4650489 -0.156307170 -0.47711009
## 8.6 0.43257682 0.6824154 -0.118939803 -0.13089880
## 8.7 -0.85937478 0.4789603 -0.127391340 -1.21094370
## 8.8 -0.54685645 0.6434439 -0.098203844 -1.09209648
## 8.9 -1.08136605 1.3746113 -0.015266715 -2.44071065
xhat1 <- winter1$fitted[,2]
winter1.opt<- HoltWinters(training.ts, alpha= NULL,beta = NULL, gamma = NULL, seasonal = "additive")
winter1.opt
## Holt-Winters exponential smoothing with trend and additive seasonal component.
##
## Call:
## HoltWinters(x = training.ts, alpha = NULL, beta = NULL, gamma = NULL, seasonal = "additive")
##
## Smoothing parameters:
## alpha: 0.3329591
## beta : 0
## gamma: 0.4642618
##
## Coefficients:
## [,1]
## a 2.318339417
## b -0.077181818
## s1 -0.910883941
## s2 0.022436448
## s3 -0.015970667
## s4 1.317697229
## s5 1.013450546
## s6 0.747307642
## s7 0.027904447
## s8 -0.008403235
## s9 0.593746337
## s10 -0.275982120
winter1.opt$fitted
## Time Series:
## Start = c(2, 1)
## End = c(8, 10)
## Frequency = 10
## xhat level trend season
## 2.0 6.91531818 5.02100000 -0.07718182 1.971500000
## 2.1 5.71826059 4.73894241 -0.07718182 1.056500000
## 2.2 6.60351920 4.48920102 -0.07718182 2.191500000
## 2.3 6.07357382 4.34425563 -0.07718182 1.806500000
## 2.4 5.70848674 4.30916856 -0.07718182 1.476500000
## 2.5 3.01836691 4.32904873 -0.07718182 -1.233500000
## 2.6 3.25040649 3.94608831 -0.07718182 -0.618500000
## 2.7 1.65837002 3.61905183 -0.07718182 -1.883500000
## 2.8 3.45018151 4.92086333 -0.07718182 -1.393500000
## 2.9 1.44288312 4.89356494 -0.07718182 -3.373500000
## 3.0 7.03860478 5.33483933 -0.07718182 1.780947266
## 3.1 4.83167750 4.01285510 -0.07718182 0.896004220
## 3.2 4.77776530 2.72647327 -0.07718182 2.128473844
## 3.3 3.75924659 1.99077654 -0.07718182 1.845651866
## 3.4 3.91619704 2.42660256 -0.07718182 1.566776299
## 3.5 0.71564871 2.31073188 -0.07718182 -1.517901356
## 3.6 2.16596438 3.09403330 -0.07718182 -0.850887100
## 3.7 2.05042566 2.72852077 -0.07718182 -0.600913301
## 3.8 0.54767492 1.97196070 -0.07718182 -1.347103968
## 3.9 -0.59012762 2.37834371 -0.07718182 -2.891289517
## 4.0 3.04696809 2.50097984 -0.07718182 0.623170069
## 4.1 2.16891000 2.47475138 -0.07718182 -0.228659565
## 4.2 3.11755533 1.67874087 -0.07718182 1.515996275
## 4.3 3.20870466 0.96309159 -0.07718182 2.322794886
## 4.4 2.83606049 1.38245010 -0.07718182 1.530792210
## 4.5 -0.06752589 0.72723118 -0.07718182 -0.717575252
## 4.6 -0.52037987 0.67586231 -0.07718182 -1.119060355
## 4.7 0.69391684 2.00389429 -0.07718182 -1.232795632
## 4.8 1.08739415 2.06192155 -0.07718182 -0.897345578
## 4.9 -1.15661127 1.62601156 -0.07718182 -2.705441020
## 5.0 3.29311928 2.69973986 -0.07718182 0.670561240
## 5.1 1.45065745 2.42507359 -0.07718182 -0.897234319
## 5.2 3.60885453 2.76387172 -0.07718182 0.922164626
## 5.3 4.69196746 1.98452763 -0.07718182 2.784621648
## 5.4 1.66064790 0.74466354 -0.07718182 0.993166177
## 5.5 -0.32323572 0.44751300 -0.07718182 -0.693566907
## 5.6 1.78734006 1.67660815 -0.07718182 0.187913734
## 5.7 2.45047101 3.63469197 -0.07718182 -1.107039141
## 5.8 2.99833469 4.30651126 -0.07718182 -1.230994750
## 5.9 4.04932185 5.76149570 -0.07718182 -1.634992028
## 6.0 6.57703162 6.16733034 -0.07718182 0.486883097
## 6.1 4.97754381 5.56506152 -0.07718182 -0.510335891
## 6.2 5.88704146 5.69513215 -0.07718182 0.269091131
## 6.3 6.68227623 5.05623455 -0.07718182 1.703223493
## 6.4 5.52986833 4.81847448 -0.07718182 0.788575664
## 6.5 6.14113436 5.69692907 -0.07718182 0.521387102
## 6.6 8.27567973 6.27196936 -0.07718182 2.080892189
## 6.7 4.48335343 4.97093659 -0.07718182 -0.410401350
## 6.8 4.71650885 4.59963423 -0.07718182 0.194056436
## 6.9 2.18856110 3.45148658 -0.07718182 -1.185743662
## 7.0 3.29943701 3.37811345 -0.07718182 -0.001494616
## 7.1 1.81092923 2.20568370 -0.07718182 -0.317572649
## 7.2 1.26159194 1.59212836 -0.07718182 -0.253354607
## 7.3 4.06989351 2.59320392 -0.07718182 1.553871408
## 7.4 3.69342041 2.09319953 -0.07718182 1.677402700
## 7.5 1.84041941 0.78958943 -0.07718182 1.128011796
## 7.6 1.36459406 0.49917415 -0.07718182 0.942601722
## 7.7 -0.79017355 -0.02903206 -0.07718182 -0.683959673
## 7.8 -0.32278658 0.55643248 -0.07718182 -0.802037247
## 7.9 -0.50617813 0.75320493 -0.07718182 -1.182201245
## 8.0 -0.15290719 0.94444744 -0.07718182 -1.020172812
## 8.1 0.09113963 0.98476928 -0.07718182 -0.816447830
## 8.2 2.48186616 1.80952712 -0.07718182 0.749520860
## 8.3 1.99274192 0.90931501 -0.07718182 1.160608724
## 8.4 1.46056265 1.00102938 -0.07718182 0.536715087
## 8.5 2.28892106 1.43641721 -0.07718182 0.929685674
## 8.6 1.60907579 1.16314877 -0.07718182 0.523108834
## 8.7 0.40871883 0.55354015 -0.07718182 -0.067639497
## 8.8 -0.08437005 0.54004713 -0.07718182 -0.547235362
## 8.9 0.67988527 1.68960978 -0.07718182 -0.932542691
xhat1.opt <- winter1.opt$fitted[,2]
forecast1 <- predict(winter1, n.ahead = 20)
forecast1.opt <- predict(winter1.opt, n.ahead = 20)
plot(training.ts,main="Winter 0.2;0.1;0.1",type="l",col="black",
xlim=c(1,12), ylim=c(1,11),pch=12)
lines(xhat1,type="l",col="red")
lines(xhat1.opt,type="l",col="blue")
lines(forecast1,type="l",col="red")
lines(forecast1.opt,type="l",col="blue")
legend("topleft",c("Actual Data",expression(paste(winter1)),
expression(paste(winter1.opt))),cex=0.5,
col=c("black","red","blue"),lty=1)
SSE1<-winter1$SSE
MSE1<-winter1$SSE/length(training.ts)
RMSE1<-sqrt(MSE1)
akurasi1 <- matrix(c(SSE1,MSE1,RMSE1))
row.names(akurasi1)<- c("SSE", "MSE", "RMSE")
colnames(akurasi1) <- c("Akurasi")
akurasi1
## Akurasi
## SSE 457.696642
## MSE 5.721208
## RMSE 2.391905
SSE1.opt<-winter1.opt$SSE
MSE1.opt<-winter1.opt$SSE/length(training.ts)
RMSE1.opt<-sqrt(MSE1.opt)
akurasi1.opt <- matrix(c(SSE1.opt,MSE1.opt,RMSE1.opt))
row.names(akurasi1.opt)<- c("SSE1.opt", "MSE1.opt", "RMSE1.opt")
colnames(akurasi1.opt) <- c("Akurasi")
akurasi1.opt
## Akurasi
## SSE1.opt 340.590842
## MSE1.opt 4.257386
## RMSE1.opt 2.063343
akurasi1.train = data.frame(Model_Winter = c("Winter 1","Winter1 optimal"),
Nilai_SSE=c(SSE1,SSE1.opt),
Nilai_MSE=c(MSE1,MSE1.opt),Nilai_RMSE=c(RMSE1,RMSE1.opt))
akurasi1.train
## Model_Winter Nilai_SSE Nilai_MSE Nilai_RMSE
## 1 Winter 1 457.6966 5.721208 2.391905
## 2 Winter1 optimal 340.5908 4.257386 2.063343
forecast1<-data.frame(forecast1)
testing.ts<-data.frame(testing.ts)
selisih1<-forecast1-testing.ts
SSEtesting1<-sum(selisih1^2)
MSEtesting1<-SSEtesting1/length(testing.ts)
forecast1.opt<-data.frame(forecast1.opt)
selisih1.opt<-forecast1.opt-testing.ts
SSEtesting1.opt<-sum(selisih1.opt^2)
MSEtesting1.opt<-SSEtesting1.opt/length(testing.ts)
winter2 <- HoltWinters(training.ts,alpha=0.2,beta=0.1,gamma=0.3,seasonal = "multiplicative")
winter2$fitted
## Time Series:
## Start = c(2, 1)
## End = c(8, 10)
## Frequency = 10
## xhat level trend season
## 2.0 7.115179844 5.0210000 -0.0771818182 1.4392074
## 2.1 5.879501356 4.8305364 -0.0885099955 1.2398711
## 2.2 6.812570586 4.6324180 -0.0994708344 1.5029009
## 2.3 6.104005635 4.4780440 -0.1049611575 1.3958130
## 2.4 5.622603688 4.3868374 -0.1035856958 1.3126951
## 2.5 3.217557263 4.3407512 -0.0978357497 0.7583364
## 2.6 3.310852226 3.9481763 -0.1273096678 0.8665187
## 2.7 2.033753508 3.6337149 -0.1460248382 0.5831234
## 2.8 3.216621460 4.7794395 -0.0168498881 0.6753934
## 2.9 1.101445312 4.8761171 -0.0054971399 0.2261407
## 3.0 9.388288369 6.5497117 0.1624120305 1.3987061
## 3.1 7.127917424 5.8415637 0.0753560236 1.2046669
## 3.2 7.270232788 4.9327609 -0.0230598508 1.4807893
## 3.3 5.915997710 4.3059376 -0.0834362031 1.4010647
## 3.4 5.390707220 4.1345686 -0.0922294829 1.3335614
## 3.5 2.545990095 3.8037738 -0.1160860112 0.6904028
## 3.6 3.098911215 3.9061139 -0.0942434037 0.8129634
## 3.7 2.494983141 3.3693140 -0.1384990538 0.7722458
## 3.8 1.655390715 2.5872418 -0.2028563671 0.6942631
## 3.9 0.677389650 2.4836588 -0.1929290267 0.2957091
## 4.0 1.839181209 1.8393472 -0.2380672821 1.1485694
## 4.1 1.487865780 1.8382389 -0.2143713914 0.9162483
## 4.2 1.298936131 1.3012768 -0.2466304592 1.2316320
## 4.3 1.079062039 1.0385805 -0.2482370452 1.3653078
## 4.4 1.388046674 1.3207642 -0.1951949698 1.2331953
## 4.5 0.647576947 1.0788537 -0.1998665197 0.7367308
## 4.6 0.334694261 0.7059044 -0.2171747939 0.6848250
## 4.7 0.732778221 1.4715518 -0.1188925769 0.5417316
## 4.8 1.006145237 1.4882326 -0.1053352435 0.7275632
## 4.9 0.203693029 1.1090668 -0.1327183000 0.2086274
## 5.0 4.050608393 2.9859667 0.0682435213 1.3262376
## 5.1 1.865646974 2.8505350 0.0478760035 0.6436792
## 5.2 3.906085570 3.1576560 0.0738005010 1.2087693
## 5.3 5.801432567 2.8333515 0.0339900015 2.0232792
## 5.4 2.807046987 2.4124925 -0.0114948971 1.1691169
## 5.5 1.065639892 2.0918674 -0.0424079210 0.5199614
## 5.6 3.798955181 3.0242856 0.0550746969 1.2336832
## 5.7 2.323137982 3.7442060 0.1215592653 0.6009516
## 5.8 2.487016061 4.6567980 0.2006625366 0.5119992
## 5.9 2.736048540 6.8547229 0.4003887719 0.3771201
## 6.0 11.237549788 8.7209319 0.5469707948 1.2125235
## 6.1 6.139799216 8.2390484 0.4440853695 0.7070949
## 6.2 9.003717661 8.5304530 0.4288172844 1.0049611
## 6.3 13.050926063 8.0032695 0.3332172083 1.5655187
## 6.4 7.412504642 7.4612591 0.2456944476 0.9617944
## 6.5 5.897389530 7.9122979 0.2662288856 0.7210821
## 6.6 13.643752372 8.7894449 0.3273206912 1.4965562
## 6.7 5.870513988 7.9081571 0.2064598492 0.7234493
## 6.8 5.272884507 7.4869257 0.1436907241 0.6910168
## 6.9 2.978837022 6.5386360 0.0344926769 0.4531840
## 7.0 6.421565255 6.2294109 0.0001209009 1.0308263
## 7.1 3.363564760 4.9855656 -0.1242757158 0.6919079
## 7.2 3.212154853 3.9468431 -0.2157203975 0.8609084
## 7.3 5.171436855 4.0303055 -0.1858021123 1.3451508
## 7.4 3.243859468 3.4919129 -0.2210611607 0.9917476
## 7.5 1.822147101 2.6186981 -0.2862765323 0.7812255
## 7.6 2.286465695 2.1731469 -0.3022039983 1.2220927
## 7.7 0.754077387 1.4983908 -0.3394592022 0.6506660
## 7.8 0.536093190 1.2959982 -0.3257525500 0.5525335
## 7.9 0.266653669 0.9571810 -0.3270590116 0.4231779
## 8.0 0.231385748 0.6458819 -0.3254830156 0.7221802
## 8.1 -0.007315839 0.3117070 -0.3263522100 0.4995375
## 8.2 0.839492580 1.1093207 -0.2139556173 0.9375981
## 8.3 0.575447281 0.7184252 -0.2316496080 1.1821614
## 8.4 0.426459232 0.8123746 -0.1990897097 0.6953689
## 8.5 0.875261259 1.3534792 -0.1250702756 0.7125162
## 8.6 1.164430845 1.4599093 -0.1019202411 0.8574671
## 8.7 0.703824307 1.0887237 -0.1288467763 0.7332444
## 8.8 0.434721149 0.9315577 -0.1316786919 0.5434836
## 8.9 0.849138248 1.9646902 -0.0151975773 0.4355688
xhat2 <- winter2$fitted[,2]
winter2.opt<- HoltWinters(training.ts, alpha= NULL, beta = NULL, gamma = NULL, seasonal = "multiplicative")
winter2.opt$fitted
## Time Series:
## Start = c(2, 1)
## End = c(8, 10)
## Frequency = 10
## xhat level trend season
## 2.0 7.1151798 5.021000 -0.07718182 1.4392074
## 2.1 5.9733513 4.894902 -0.07718182 1.2398711
## 2.2 7.0436012 4.763852 -0.07718182 1.5029009
## 2.3 6.3823611 4.649686 -0.07718182 1.3958130
## 2.4 5.8861769 4.561221 -0.07718182 1.3126951
## 2.5 3.3475594 4.491528 -0.07718182 0.7583364
## 2.6 3.6351208 4.272268 -0.07718182 0.8665187
## 2.7 2.3352756 4.081953 -0.07718182 0.5831234
## 2.8 2.9992387 4.517910 -0.07718182 0.6753934
## 2.9 1.0041475 4.517548 -0.07718182 0.2261407
## 3.0 7.2334330 5.202580 -0.07718182 1.4112919
## 3.1 5.8298762 4.884695 -0.07718182 1.2126594
## 3.2 6.5115653 4.477784 -0.07718182 1.4796987
## 3.3 5.7047940 4.183976 -0.07718182 1.3891112
## 3.4 5.2736270 4.081628 -0.07718182 1.3169430
## 3.5 2.7174000 3.907808 -0.07718182 0.7093879
## 3.6 3.1356226 3.901553 -0.07718182 0.8199053
## 3.7 2.5291675 3.631021 -0.07718182 0.7116720
## 3.8 2.2123238 3.248133 -0.07718182 0.6976847
## 3.9 0.8909388 3.144668 -0.07718182 0.2904459
## 4.0 3.4822389 2.805543 -0.07718182 1.2763115
## 4.1 2.7356324 2.709263 -0.07718182 1.0393418
## 4.2 3.0991248 2.405598 -0.07718182 1.3310010
## 4.3 2.9206648 2.205191 -0.07718182 1.3724872
## 4.4 2.7115603 2.239972 -0.07718182 1.2537323
## 4.5 1.4501804 2.051779 -0.07718182 0.7344184
## 4.6 1.2704084 1.805241 -0.07718182 0.7351648
## 4.7 1.1262772 2.013473 -0.07718182 0.5816671
## 4.8 1.2733537 1.932390 -0.07718182 0.6863669
## 4.9 0.3850328 1.696246 -0.07718182 0.2378120
## 5.0 2.8164404 2.314495 -0.07718182 1.2588492
## 5.1 1.8280683 2.229325 -0.07718182 0.8494175
## 5.2 2.5674347 2.240795 -0.07718182 1.1866420
## 5.3 3.0244470 2.085927 -0.07718182 1.5056403
## 5.4 2.0499297 1.904095 -0.07718182 1.1220727
## 5.5 1.0025073 1.746103 -0.07718182 0.6006917
## 5.6 1.8422277 2.042369 -0.07718182 0.9374312
## 5.7 1.4172347 2.523272 -0.07718182 0.5793877
## 5.8 1.6049597 2.935416 -0.07718182 0.5615215
## 5.9 1.3942195 3.780280 -0.07718182 0.3765008
## 6.0 5.7100692 4.644894 -0.07718182 1.2500940
## 6.1 4.0623635 4.518657 -0.07718182 0.9146429
## 6.2 4.9643224 4.586662 -0.07718182 1.1008635
## 6.3 5.8809163 4.449519 -0.07718182 1.3450280
## 6.4 4.4074832 4.392826 -0.07718182 1.0212806
## 6.5 3.7243668 4.653264 -0.07718182 0.8138767
## 6.6 6.6499914 5.040394 -0.07718182 1.3398565
## 6.7 3.6455088 4.831076 -0.07718182 0.7668469
## 6.8 3.8650462 4.748769 -0.07718182 0.8273518
## 6.9 2.2810167 4.424713 -0.07718182 0.5246695
## 7.0 5.2095337 4.334195 -0.07718182 1.2237532
## 7.1 3.7016953 3.890071 -0.07718182 0.9708373
## 7.2 3.6709882 3.501389 -0.07718182 1.0720696
## 7.3 4.6332330 3.490990 -0.07718182 1.3572037
## 7.4 3.7515980 3.297154 -0.07718182 1.1651026
## 7.5 2.7490896 2.942627 -0.07718182 0.9593934
## 7.6 3.3606286 2.725999 -0.07718182 1.2687278
## 7.7 1.7933847 2.420739 -0.07718182 0.7652405
## 7.8 1.6226247 2.276589 -0.07718182 0.7377554
## 7.9 1.0382792 2.067991 -0.07718182 0.5215362
## 8.0 1.7908436 1.868555 -0.07718182 0.9997041
## 8.1 1.2664525 1.653943 -0.07718182 0.8031986
## 8.2 1.8506860 1.741654 -0.07718182 1.1118756
## 8.3 1.8256244 1.521500 -0.07718182 1.2640040
## 8.4 1.3453309 1.490395 -0.07718182 0.9519659
## 8.5 1.2842342 1.563326 -0.07718182 0.8641385
## 8.6 1.5037676 1.527696 -0.07718182 1.0367135
## 8.7 0.9011399 1.326077 -0.07718182 0.7215499
## 8.8 0.7345048 1.212851 -0.07718182 0.6467594
## 8.9 0.6561525 1.518304 -0.07718182 0.4553068
xhat2.opt <- winter2.opt$fitted[,2]
forecast2 <- predict(winter2, n.ahead = 20)
forecast2.opt <- predict(winter2.opt, n.ahead = 20)
plot(training.ts,main="Winter 0.2;0.1;0.1",type="l",col="black",
xlim=c(1,11), ylim=c(1,18),pch=12)
lines(xhat2,type="l",col="red")
lines(xhat2.opt,type="l",col="blue")
lines(forecast2,type="l",col="red")
lines(forecast2.opt,type="l",col="blue")
legend("topleft",c("Actual Data",expression(paste(winter2)),
expression(paste(winter2.opt))),cex=0.5,
col=c("black","red","blue"),lty=1)
#### Akurasi Data Latih
SSE2<-winter2$SSE
MSE2<-winter2$SSE/length(training.ts)
RMSE2<-sqrt(MSE2)
akurasi1 <- matrix(c(SSE2,MSE2,RMSE2))
row.names(akurasi1)<- c("SSE2", "MSE2", "RMSE2")
colnames(akurasi1) <- c("Akurasi lamda=0.2")
akurasi1
## Akurasi lamda=0.2
## SSE2 583.043816
## MSE2 7.288048
## RMSE2 2.699638
SSE2.opt<-winter2.opt$SSE
MSE2.opt<-winter2.opt$SSE/length(training.ts)
RMSE2.opt<-sqrt(MSE2.opt)
akurasi1.opt <- matrix(c(SSE2.opt,MSE2.opt,RMSE2.opt))
row.names(akurasi1.opt)<- c("SSE2.opt", "MSE2.opt", "RMSE2.opt")
colnames(akurasi1.opt) <- c("Akurasi")
akurasi1.opt
## Akurasi
## SSE2.opt 376.632640
## MSE2.opt 4.707908
## RMSE2.opt 2.169771
akurasi2.train = data.frame(Model_Winter = c("Winter 1","winter2 optimal"),
Nilai_SSE=c(SSE2,SSE2.opt),
Nilai_MSE=c(MSE2,MSE2.opt),Nilai_RMSE=c(RMSE2,RMSE2.opt))
akurasi2.train
## Model_Winter Nilai_SSE Nilai_MSE Nilai_RMSE
## 1 Winter 1 583.0438 7.288048 2.699638
## 2 winter2 optimal 376.6326 4.707908 2.169771
forecast2<-data.frame(forecast2)
testing.ts<-data.frame(testing.ts)
selisih2<-forecast2-testing.ts
SSEtesting2<-sum(selisih2^2)
MSEtesting2<-SSEtesting2/length(testing.ts)
forecast2.opt<-data.frame(forecast2.opt)
selisih2.opt<-forecast2.opt-testing.ts
SSEtesting2.opt<-sum(selisih2.opt^2)
MSEtesting2.opt<-SSEtesting2.opt/length(testing.ts)
akurasitesting1 <- matrix(c(SSEtesting2,SSEtesting2.opt))
row.names(akurasitesting1)<- c("SSE2", "SSEopt")
akurasitesting1
## [,1]
## SSE2 203.6052
## SSEopt 169.6105
akurasitesting2 <- matrix(c(MSEtesting2,MSEtesting2.opt))
row.names(akurasitesting2)<- c("MSE2", "MSEopt")
akurasitesting2
## [,1]
## MSE2 203.6052
## MSEopt 169.6105