South ural state university, Chelyabinsk, Russian federation
#Import
library(fpp2)
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
## -- Attaching packages ---------------------------------------------- fpp2 2.4 --
## v ggplot2 3.3.2 v fma 2.4
## v forecast 8.13 v expsmooth 2.3
##
library(forecast)
library(ggplot2)
library("readxl")
library(moments)
library(forecast)
require(forecast)
require(tseries)
## Loading required package: tseries
require(markovchain)
## Loading required package: markovchain
## Package: markovchain
## Version: 0.8.5-3
## Date: 2020-12-03
## BugReport: https://github.com/spedygiorgio/markovchain/issues
require(data.table)
## Loading required package: data.table
library(Hmisc)
## Loading required package: lattice
## Loading required package: survival
## Loading required package: Formula
##
## Attaching package: 'Hmisc'
## The following objects are masked from 'package:base':
##
## format.pval, units
##Global vriable##
Full_original_data <- read.csv("F:/Phd/University Conference/Chelyabinsk covid 19 data.csv") # path of your data ( time series data)
View(Full_original_data)
original_data<-Full_original_data$Recovery
y_lab <- "Covid 19 Recovery cases in Chelyabinsk" # input name of data
Actual_date_interval <- c("2020/03/12","2021/02/22")
Forecast_date_interval <- c("2021/02/23","2021/03/1")
validation_data_days <-7
frequency<-"day"
Population <-1130319 # population in Spain ( population size for SIR Model)
country.name <- "Chelyabinsk"
# Data Preparation & calculate some of statistics measures
View(original_data) # View data in table in R
summary(original_data) # Summary your time series
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 1704 9508 12051 22100 43335
describe((original_data)) # describe your time series
## (original_data)
## n missing distinct Info Mean Gmd .05 .10
## 348 0 314 1 12051 12471 0.0 19.3
## .25 .50 .75 .90 .95
## 1703.5 9507.5 22099.8 27290.3 35340.2
##
## lowest : 0 2 4 6 10, highest: 42176 42494 42792 43072 43335
# calculate standard deviation
library(pastecs)
##
## Attaching package: 'pastecs'
## The following objects are masked from 'package:data.table':
##
## first, last
stat.desc(original_data)
## nbr.val nbr.null nbr.na min max range
## 3.480000e+02 2.100000e+01 0.000000e+00 0.000000e+00 4.333500e+04 4.333500e+04
## sum median mean SE.mean CI.mean.0.95 var
## 4.193589e+06 9.507500e+03 1.205054e+04 6.078134e+02 1.195462e+03 1.285641e+08
## std.dev coef.var
## 1.133861e+04 9.409213e-01
data.frame(skewness=skewness(original_data)) # calculate Cofficient of skewness
## skewness
## 1 0.8599817
data.frame(kurtosis=kurtosis(original_data)) # calculate Cofficient of kurtosis
## kurtosis
## 1 2.921001
sd(original_data)
## [1] 11338.61
#processing on data (input data)
rows <- NROW(original_data) # calculate number of rows in time series (number of days)
training_data<-original_data[1:(rows-validation_data_days)] # Training data
testing_data<-original_data[(rows-validation_data_days+1):rows] #testing data
AD<-fulldate<-seq(as.Date(Actual_date_interval[1]),as.Date(Actual_date_interval[2]), frequency) #input range for actual date
FD<-seq(as.Date(Forecast_date_interval[1]),as.Date(Forecast_date_interval[2]), frequency) #input range forecasting date
N_forecasting_days<-nrow(data.frame(FD)) #calculate number of days that you want to forecasting
validation_dates<-tail(AD,validation_data_days) # select validation_dates
validation_data_by_name<-weekdays(validation_dates) # put names of validation dates
forecasting_data_by_name<-weekdays(FD) # put names of Forecasting dates
#NNAR Model
data_series<-ts(training_data)
model_NNAR<-nnetar(data_series, size = 5)
saveRDS(model_NNAR, file = "model_NNAR.RDS")
my_model <- readRDS("model_NNAR.RDS")
accuracy(model_NNAR) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 0.1119018 91.70778 59.69764 -Inf Inf 0.4968471 0.5615471
#Print Model Parameters
model_NNAR
## Series: data_series
## Model: NNAR(1,5)
## Call: nnetar(y = data_series, size = 5)
##
## Average of 20 networks, each of which is
## a 1-5-1 network with 16 weights
## options were - linear output units
##
## sigma^2 estimated as 8410
# Testing Data Evaluation
forecasting_NNAR <- predict(model_NNAR, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_NNAR$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using NNAR Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using NNAR Model for ==> Covid 19 Recovery cases in Chelyabinsk"
MAPE_Mean_All<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_Mean_All_NNAR<-round(mean(MAPE_Per_Day),3)
MAPE_NNAR<-paste(round(MAPE_Per_Day,3),"%")
MAPE_NNAR_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in NNAR Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in NNAR Model for ==> Covid 19 Recovery cases in Chelyabinsk"
paste(MAPE_Mean_All,"%")
## [1] "2.038 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in NNAR Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in NNAR Model for ==> Covid 19 Recovery cases in Chelyabinsk"
data.frame(date_NNAR=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_NNAR=validation_forecast,MAPE_NNAR_Model)
## date_NNAR validation_data_by_name actual_data forecasting_NNAR
## 1 2021-02-16 Tuesday 41496 41107.06
## 2 2021-02-17 Wednesday 41842 41320.38
## 3 2021-02-18 Thursday 42176 41497.04
## 4 2021-02-19 Friday 42494 41642.12
## 5 2021-02-20 Saturday 42792 41760.43
## 6 2021-02-21 Sunday 43072 41856.35
## 7 2021-02-22 Monday 43335 41933.76
## MAPE_NNAR_Model
## 1 0.937 %
## 2 1.247 %
## 3 1.61 %
## 4 2.005 %
## 5 2.411 %
## 6 2.822 %
## 7 3.233 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_NNAR=tail(forecasting_NNAR$mean,N_forecasting_days))
## FD forecating_date forecasting_by_NNAR
## 1 2021-02-23 Tuesday 41995.99
## 2 2021-02-24 Wednesday 42045.86
## 3 2021-02-25 Thursday 42085.73
## 4 2021-02-26 Friday 42117.53
## 5 2021-02-27 Saturday 42142.86
## 6 2021-02-28 Sunday 42163.01
## 7 2021-03-01 Monday 42179.02
plot(forecasting_NNAR)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph1<-autoplot(forecasting_NNAR,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph1

saveRDS(model_NNAR, file = "model_NNAR.RDS")
## Error of forecasting
Error_NNAR<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_NNAR<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_NNAR<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_NNAR<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_NNAR<-sqrt(sum((Error_NNAR^2))/validation_data_days) # Root mean square forecast error
MSE_NNAR<-(sum((Error_NNAR^2))/validation_data_days) # Root mean square forecast error
MAD_NNAR<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_NNAR<-c(Error_NNAR)
REOF_ANNAR<-c(paste(round(REOF_A_NNAR,3),"%"))
REOF_FNNAR<-c(paste(round(REOF_F_NNAR,3),"%"))
data.frame(correlation_NNAR,MSE_NNAR,RMSE_NNAR,MAPE_Mean_All,MAD_NNAR) # analysis of Error by using NNAR Model shows result of correlation ,MSE ,MPER
## correlation_NNAR MSE_NNAR RMSE_NNAR
## 1 0.9924613 873636.7 934.6854
## MAPE_Mean_All MAD_NNAR
## 1 2.038 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk 869.9805
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_NNAR,REOF_ANNAR,REOF_FNNAR) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_NNAR REOF_ANNAR REOF_FNNAR
## 1 2021-02-16 Tuesday 388.9448 0.937 % 0.946 %
## 2 2021-02-17 Wednesday 521.6206 1.247 % 1.262 %
## 3 2021-02-18 Thursday 678.9605 1.61 % 1.636 %
## 4 2021-02-19 Friday 851.8835 2.005 % 2.046 %
## 5 2021-02-20 Saturday 1031.5730 2.411 % 2.47 %
## 6 2021-02-21 Sunday 1215.6462 2.822 % 2.904 %
## 7 2021-02-22 Monday 1401.2351 3.233 % 3.342 %
##bats model
# Data Modeling
data_series<-ts(training_data) # make your data to time series
autoplot(data_series ,xlab=paste ("Time in ", frequency, sep=" "), ylab = y_lab, main=paste ("Actual Data :", y_lab, sep=" "))

model_bats<-bats(data_series)
accuracy(model_bats) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 3.946979 76.84142 43.27424 -Inf Inf 0.3601596 0.002562207
# Print Model Parameters
model_bats
## BATS(1, {0,0}, 1, -)
##
## Call: bats(y = data_series)
##
## Parameters
## Alpha: 1.188337
## Beta: 0.3738019
## Damping Parameter: 1
##
## Seed States:
## [,1]
## [1,] 0.3806254
## [2,] 2.9186830
##
## Sigma: 76.84142
## AIC: 4957.741
#ploting BATS Model
plot(model_bats,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4)

# Testing Data Evaluation
forecasting_bats <- predict(model_bats, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_bats$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using bats Model for ==> Covid 19 Recovery cases in Chelyabinsk"
MAPE_Mean_All.bats_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.bats<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_bats<-paste(round(MAPE_Per_Day,3),"%")
MAPE_bats_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in bats Model for ==> Covid 19 Recovery cases in Chelyabinsk"
paste(MAPE_Mean_All.bats,"%")
## [1] "1.101 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in bats Model for ==> Covid 19 Recovery cases in Chelyabinsk"
data.frame(date_bats=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_bats=validation_forecast,MAPE_bats_Model)
## date_bats validation_data_by_name actual_data forecasting_bats
## 1 2021-02-16 Tuesday 41496 41378.59
## 2 2021-02-17 Wednesday 41842 41884.62
## 3 2021-02-18 Thursday 42176 42390.64
## 4 2021-02-19 Friday 42494 42896.67
## 5 2021-02-20 Saturday 42792 43402.69
## 6 2021-02-21 Sunday 43072 43908.72
## 7 2021-02-22 Monday 43335 44414.75
## MAPE_bats_Model
## 1 0.283 %
## 2 0.102 %
## 3 0.509 %
## 4 0.948 %
## 5 1.427 %
## 6 1.943 %
## 7 2.492 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_bats=tail(forecasting_bats$mean,N_forecasting_days))
## FD forecating_date forecasting_by_bats
## 1 2021-02-23 Tuesday 44920.77
## 2 2021-02-24 Wednesday 45426.80
## 3 2021-02-25 Thursday 45932.82
## 4 2021-02-26 Friday 46438.85
## 5 2021-02-27 Saturday 46944.88
## 6 2021-02-28 Sunday 47450.90
## 7 2021-03-01 Monday 47956.93
plot(forecasting_bats)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph1<-autoplot(forecasting_bats,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph1

## Error of forecasting
Error_bats<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_bats<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_bats<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_bats<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_bats<-sqrt(sum((Error_bats^2))/validation_data_days) # Root mean square forecast error
MSE_bats<-(sum((Error_bats^2))/validation_data_days) # Root mean square forecast error
MAD_bats<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_bats<-c(Error_bats)
REOF_Abats<-c(paste(round(REOF_A_bats,3),"%"))
REOF_Fbats<-c(paste(round(REOF_F_bats,3),"%"))
data.frame(correlation_bats,MSE_bats,RMSE_bats,MAPE_Mean_All.bats_Model,MAD_bats) # analysis of Error by using Bats Model shows result of correlation ,MSE ,MPER
## correlation_bats MSE_bats RMSE_bats MAPE_Mean_All.bats_Model MAD_bats
## 1 0.9988132 351815.9 593.1408 1.101 438.5248
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_bats,REOF_Abats,REOF_Fbats) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_bats REOF_Abats REOF_Fbats
## 1 2021-02-16 Tuesday 117.41053 0.283 % 0.284 %
## 2 2021-02-17 Wednesday 42.61554 0.102 % 0.102 %
## 3 2021-02-18 Thursday 214.64161 0.509 % 0.506 %
## 4 2021-02-19 Friday 402.66768 0.948 % 0.939 %
## 5 2021-02-20 Saturday 610.69374 1.427 % 1.407 %
## 6 2021-02-21 Sunday 836.71981 1.943 % 1.906 %
## 7 2021-02-22 Monday 1079.74588 2.492 % 2.431 %
## TBATS Model
# Data Modeling
data_series<-ts(training_data)
model_TBATS<-forecast:::fitSpecificTBATS(data_series,use.box.cox=FALSE, use.beta=TRUE, seasonal.periods=c(6),use.damping=FALSE,k.vector=c(2))
accuracy(model_TBATS) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 3.923347 76.86155 44.49699 NaN Inf 0.3703363 0.002298664
# Print Model Parameters
model_TBATS
## TBATS(1, {0,0}, 1, {<6,2>})
##
## Call: NULL
##
## Parameters
## Alpha: 1.186546
## Beta: 0.375599
## Damping Parameter: 1
## Gamma-1 Values: 0.0001263085
## Gamma-2 Values: 0.0008068392
##
## Seed States:
## [,1]
## [1,] 0.5993243
## [2,] 2.9817775
## [3,] 7.3466487
## [4,] -1.0977175
## [5,] -2.2708810
## [6,] -2.4755462
##
## Sigma: 76.86155
## AIC: 4969.92
plot(model_TBATS,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)

# Testing Data Evaluation
forecasting_tbats <- predict(model_TBATS, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_tbats$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using TBATS Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using TBATS Model for ==> Covid 19 Recovery cases in Chelyabinsk"
MAPE_Mean_All.TBATS_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.TBATS<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_TBATS<-paste(round(MAPE_Per_Day,3),"%")
MAPE_TBATS_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in TBATS Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in TBATS Model for ==> Covid 19 Recovery cases in Chelyabinsk"
paste(MAPE_Mean_All.TBATS,"%")
## [1] "1.095 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in TBATS Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in TBATS Model for ==> Covid 19 Recovery cases in Chelyabinsk"
data.frame(date_TBATS=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_TBATS=validation_forecast,MAPE_TBATS_Model)
## date_TBATS validation_data_by_name actual_data forecasting_TBATS
## 1 2021-02-16 Tuesday 41496 41388.46
## 2 2021-02-17 Wednesday 41842 41890.38
## 3 2021-02-18 Thursday 42176 42389.49
## 4 2021-02-19 Friday 42494 42893.27
## 5 2021-02-20 Saturday 42792 43392.63
## 6 2021-02-21 Sunday 43072 43903.36
## 7 2021-02-22 Monday 43335 44421.34
## MAPE_TBATS_Model
## 1 0.259 %
## 2 0.116 %
## 3 0.506 %
## 4 0.94 %
## 5 1.404 %
## 6 1.93 %
## 7 2.507 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_TBATS=tail(forecasting_tbats$mean,N_forecasting_days))
## FD forecating_date forecasting_by_TBATS
## 1 2021-02-23 Tuesday 44923.27
## 2 2021-02-24 Wednesday 45422.37
## 3 2021-02-25 Thursday 45926.15
## 4 2021-02-26 Friday 46425.51
## 5 2021-02-27 Saturday 46936.25
## 6 2021-02-28 Sunday 47454.23
## 7 2021-03-01 Monday 47956.16
plot(forecasting_tbats)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph2<-autoplot(forecasting_tbats,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph2

## Error of forecasting TBATS Model
Error_tbats<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_tbats1<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_tbats<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_tbats<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_tbats<-sqrt(sum((Error_tbats^2))/validation_data_days) # Root mean square forecast error
MSE_tbats<-(sum((Error_tbats^2))/validation_data_days) # Root mean square forecast error
MAD_tbats<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_tbats<-c(Error_tbats)
REOF_A_tbats<-c(paste(round(REOF_A_tbats1,3),"%"))
REOF_F_tbats<-c(paste(round(REOF_F_tbats,3),"%"))
data.frame(correlation_tbats,MSE_tbats,RMSE_tbats,MAPE_Mean_All.TBATS_Model,MAD_tbats) # analysis of Error by using TBATS model shows result of correlation ,MSE ,MPER
## correlation_tbats MSE_tbats RMSE_tbats MAPE_Mean_All.TBATS_Model MAD_tbats
## 1 0.998534 350136.3 591.7232 1.095 438.8466
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_tbats,REOF_A_tbats,REOF_F_tbats) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_tbats REOF_A_tbats REOF_F_tbats
## 1 2021-02-16 Tuesday 107.54361 0.259 % 0.26 %
## 2 2021-02-17 Wednesday 48.38492 0.116 % 0.116 %
## 3 2021-02-18 Thursday 213.48628 0.506 % 0.504 %
## 4 2021-02-19 Friday 399.26569 0.94 % 0.931 %
## 5 2021-02-20 Saturday 600.62693 1.404 % 1.384 %
## 6 2021-02-21 Sunday 831.36248 1.93 % 1.894 %
## 7 2021-02-22 Monday 1086.34336 2.507 % 2.446 %
## Holt's linear trend
# Data Modeling
data_series<-ts(training_data)
model_holt<-holt(data_series,h=N_forecasting_days+validation_data_days,lambda = "auto")
accuracy(model_holt) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 1.276298 80.63935 45.45389 Inf Inf 0.3783003 0.2764601
# Print Model Parameters
summary(model_holt$model)
## Holt's method
##
## Call:
## holt(y = data_series, h = N_forecasting_days + validation_data_days,
##
## Call:
## lambda = "auto")
##
## Box-Cox transformation: lambda= 0.4499
##
## Smoothing parameters:
## alpha = 0.9999
## beta = 0.2704
##
## Initial states:
## l = -2.6137
## b = -1.002
##
## sigma: 0.5736
##
## AIC AICc BIC
## 1615.530 1615.709 1634.690
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 1.276298 80.63935 45.45389 Inf Inf 0.3783003 0.2764601
# Testing Data Evaluation
forecasting_holt <- predict(model_holt, h=N_forecasting_days+validation_data_days,lambda = "auto")
validation_forecast<-head(forecasting_holt$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using holt Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using holt Model for ==> Covid 19 Recovery cases in Chelyabinsk"
MAPE_Mean_All.Holt_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.Holt<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_holt<-paste(round(MAPE_Per_Day,3),"%")
MAPE_holt_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in holt Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in holt Model for ==> Covid 19 Recovery cases in Chelyabinsk"
paste(MAPE_Mean_All.Holt,"%")
## [1] "1.18 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in holt Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in holt Model for ==> Covid 19 Recovery cases in Chelyabinsk"
data.frame(date_holt=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_holt=validation_forecast,MAPE_holt_Model)
## date_holt validation_data_by_name actual_data forecasting_holt
## 1 2021-02-16 Tuesday 41496 41363.61
## 2 2021-02-17 Wednesday 41842 41878.73
## 3 2021-02-18 Thursday 42176 42397.36
## 4 2021-02-19 Friday 42494 42919.51
## 5 2021-02-20 Saturday 42792 43445.17
## 6 2021-02-21 Sunday 43072 43974.36
## 7 2021-02-22 Monday 43335 44507.07
## MAPE_holt_Model
## 1 0.319 %
## 2 0.088 %
## 3 0.525 %
## 4 1.001 %
## 5 1.526 %
## 6 2.095 %
## 7 2.705 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_holt=tail(forecasting_holt$mean,N_forecasting_days))
## FD forecating_date forecasting_by_holt
## 1 2021-02-23 Tuesday 45043.32
## 2 2021-02-24 Wednesday 45583.09
## 3 2021-02-25 Thursday 46126.41
## 4 2021-02-26 Friday 46673.27
## 5 2021-02-27 Saturday 47223.68
## 6 2021-02-28 Sunday 47777.64
## 7 2021-03-01 Monday 48335.16
plot(forecasting_holt)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph3<-autoplot(forecasting_holt,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph3

## Error of forecasting by using Holt's linear model
Error_Holt<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_Holt1<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_Holt<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_Holt<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_Holt<-sqrt(sum((Error_Holt^2))/validation_data_days) # Root mean square forecast error
MSE_Holt<-(sum((Error_Holt^2))/validation_data_days) # Root mean square forecast error
MAD_Holt<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_Holt<-c(Error_Holt)
REOF_A_Holt<-c(paste(round(REOF_A_Holt1,3),"%"))
REOF_F_Holt<-c(paste(round(REOF_F_Holt,3),"%"))
REOF_A_Holt11<-mean(abs(((testing_data-validation_forecast)/testing_data)*100))
data.frame(correlation_Holt,MSE_Holt,RMSE_Holt,MAPE_Mean_All.Holt_Model,MAD_Holt) # analysis of Error by using Holt's linear model shows result of correlation ,MSE ,MPER
## correlation_Holt MSE_Holt RMSE_Holt MAPE_Mean_All.Holt_Model MAD_Holt
## 1 0.9985134 409083.4 639.5963 1.18 468.4035
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_Holt,REOF_A_Holt,REOF_F_Holt) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_Holt REOF_A_Holt REOF_F_Holt
## 1 2021-02-16 Tuesday 132.39056 0.319 % 0.32 %
## 2 2021-02-17 Wednesday 36.73239 0.088 % 0.088 %
## 3 2021-02-18 Thursday 221.36463 0.525 % 0.522 %
## 4 2021-02-19 Friday 425.51051 1.001 % 0.991 %
## 5 2021-02-20 Saturday 653.17434 1.526 % 1.503 %
## 6 2021-02-21 Sunday 902.36040 2.095 % 2.052 %
## 7 2021-02-22 Monday 1172.07297 2.705 % 2.633 %
#Auto arima model
##################
require(tseries) # need to install tseries tj test Stationarity in time series
paste ("tests For Check Stationarity in series ==> ",y_lab, sep=" ")
## [1] "tests For Check Stationarity in series ==> Covid 19 Recovery cases in Chelyabinsk"
kpss.test(data_series) # applay kpss test
## Warning in kpss.test(data_series): p-value smaller than printed p-value
##
## KPSS Test for Level Stationarity
##
## data: data_series
## KPSS Level = 5.3877, Truncation lag parameter = 5, p-value = 0.01
pp.test(data_series) # applay pp test
## Warning in pp.test(data_series): p-value greater than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: data_series
## Dickey-Fuller Z(alpha) = 4.9461, Truncation lag parameter = 5, p-value
## = 0.99
## alternative hypothesis: stationary
adf.test(data_series) # applay adf test
## Warning in adf.test(data_series): p-value greater than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: data_series
## Dickey-Fuller = -0.032987, Lag order = 6, p-value = 0.99
## alternative hypothesis: stationary
ndiffs(data_series) # Doing first diffrencing on data
## [1] 2
#Taking the first difference
diff1_x1<-diff(data_series)
autoplot(diff1_x1, xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", ylab=y_lab,main = "1nd differenced series")
## Warning: Ignoring unknown parameters: col.main, col.lab, col.sub

##Testing the stationary of the first differenced series
paste ("tests For Check Stationarity in series after taking first differences in ==> ",y_lab, sep=" ")
## [1] "tests For Check Stationarity in series after taking first differences in ==> Covid 19 Recovery cases in Chelyabinsk"
kpss.test(diff1_x1) # applay kpss test after taking first differences
## Warning in kpss.test(diff1_x1): p-value smaller than printed p-value
##
## KPSS Test for Level Stationarity
##
## data: diff1_x1
## KPSS Level = 2.2374, Truncation lag parameter = 5, p-value = 0.01
pp.test(diff1_x1) # applay pp test after taking first differences
## Warning in pp.test(diff1_x1): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: diff1_x1
## Dickey-Fuller Z(alpha) = -60.11, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
adf.test(diff1_x1) # applay adf test after taking first differences
##
## Augmented Dickey-Fuller Test
##
## data: diff1_x1
## Dickey-Fuller = -2.4311, Lag order = 6, p-value = 0.3948
## alternative hypothesis: stationary
#Taking the second difference
diff2_x1=diff(diff1_x1)
autoplot(diff2_x1, xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", ylab=y_lab ,main = "2nd differenced series")
## Warning: Ignoring unknown parameters: col.main, col.lab, col.sub

##Testing the stationary of the first differenced series
paste ("tests For Check Stationarity in series after taking Second differences in",y_lab, sep=" ")
## [1] "tests For Check Stationarity in series after taking Second differences in Covid 19 Recovery cases in Chelyabinsk"
kpss.test(diff2_x1) # applay kpss test after taking Second differences
## Warning in kpss.test(diff2_x1): p-value greater than printed p-value
##
## KPSS Test for Level Stationarity
##
## data: diff2_x1
## KPSS Level = 0.065319, Truncation lag parameter = 5, p-value = 0.1
pp.test(diff2_x1) # applay pp test after taking Second differences
## Warning in pp.test(diff2_x1): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: diff2_x1
## Dickey-Fuller Z(alpha) = -340.37, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
adf.test(diff2_x1) # applay adf test after taking Second differences
## Warning in adf.test(diff2_x1): p-value smaller than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: diff2_x1
## Dickey-Fuller = -8.1551, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
####Fitting an ARIMA Model
#1. Using auto arima function
model1 <- auto.arima(data_series,stepwise=FALSE, approximation=FALSE, trace=T, test = c("kpss", "adf", "pp")) #applaying auto arima
##
## ARIMA(0,2,0) : 3978.415
## ARIMA(0,2,1) : 3925.845
## ARIMA(0,2,2) : 3914.22
## ARIMA(0,2,3) : 3915.933
## ARIMA(0,2,4) : 3914.227
## ARIMA(0,2,5) : 3915.392
## ARIMA(1,2,0) : 3954.769
## ARIMA(1,2,1) : 3915.615
## ARIMA(1,2,2) : 3916.138
## ARIMA(1,2,3) : 3917.257
## ARIMA(1,2,4) : 3915.997
## ARIMA(2,2,0) : 3937.909
## ARIMA(2,2,1) : 3914.767
## ARIMA(2,2,2) : 3914.334
## ARIMA(2,2,3) : 3911.46
## ARIMA(3,2,0) : 3915.657
## ARIMA(3,2,1) : 3911.489
## ARIMA(3,2,2) : 3912.564
## ARIMA(4,2,0) : 3909.99
## ARIMA(4,2,1) : 3912.061
## ARIMA(5,2,0) : 3912.061
##
##
##
## Best model: ARIMA(4,2,0)
model1 # show the result of autoarima
## Series: data_series
## ARIMA(4,2,0)
##
## Coefficients:
## ar1 ar2 ar3 ar4
## -0.4371 -0.3717 -0.3249 -0.1529
## s.e. 0.0538 0.0562 0.0565 0.0547
##
## sigma^2 estimated as 5863: log likelihood=-1949.9
## AIC=3909.81 AICc=3909.99 BIC=3928.94
#Make changes in the source of auto arima to run the best model
arima.string <- function (object, padding = FALSE)
{
order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)]
m <- order[7]
result <- paste("ARIMA(", order[1], ",", order[2], ",",
order[3], ")", sep = "")
if (m > 1 && sum(order[4:6]) > 0) {
result <- paste(result, "(", order[4], ",", order[5],
",", order[6], ")[", m, "]", sep = "")
}
if (padding && m > 1 && sum(order[4:6]) == 0) {
result <- paste(result, " ", sep = "")
if (m <= 9) {
result <- paste(result, " ", sep = "")
}
else if (m <= 99) {
result <- paste(result, " ", sep = "")
}
else {
result <- paste(result, " ", sep = "")
}
}
if (!is.null(object$xreg)) {
if (NCOL(object$xreg) == 1 && is.element("drift", names(object$coef))) {
result <- paste(result, "with drift ")
}
else {
result <- paste("Regression with", result, "errors")
}
}
else {
if (is.element("constant", names(object$coef)) || is.element("intercept",
names(object$coef))) {
result <- paste(result, "with non-zero mean")
}
else if (order[2] == 0 && order[5] == 0) {
result <- paste(result, "with zero mean ")
}
else {
result <- paste(result, " ")
}
}
if (!padding) {
result <- gsub("[ ]*$", "", result)
}
return(result)
}
bestmodel <- arima.string(model1, padding = TRUE)
bestmodel <- substring(bestmodel,7,11)
bestmodel <- gsub(" ", "", bestmodel)
bestmodel <- gsub(")", "", bestmodel)
bestmodel <- strsplit(bestmodel, ",")[[1]]
bestmodel <- c(strtoi(bestmodel[1]),strtoi(bestmodel[2]),strtoi(bestmodel[3]))
bestmodel
## [1] 4 2 0
strtoi(bestmodel[3])
## [1] 0
#2. Using ACF and PACF Function
#par(mfrow=c(1,2)) # Code for making two plot in one graph
acf(diff2_x1,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab, main=paste("ACF-2nd differenced series ",y_lab, sep=" ",lag.max=20)) # plot ACF "auto correlation function after taking second diffrences

pacf(diff2_x1,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab,main=paste("PACF-2nd differenced series ",y_lab, sep=" ",lag.max=20)) # plot PACF " Partial auto correlation function after taking second diffrences

library(forecast) # install library forecast
x1_model1= arima(data_series, order=c(bestmodel)) # Run Best model of auto arima for forecasting
x1_model1 # Show result of best model of auto arima
##
## Call:
## arima(x = data_series, order = c(bestmodel))
##
## Coefficients:
## ar1 ar2 ar3 ar4
## -0.4371 -0.3717 -0.3249 -0.1529
## s.e. 0.0538 0.0562 0.0565 0.0547
##
## sigma^2 estimated as 5794: log likelihood = -1949.9, aic = 3909.81
paste ("accuracy of autoarima Model For ==> ",y_lab, sep=" ")
## [1] "accuracy of autoarima Model For ==> Covid 19 Recovery cases in Chelyabinsk"
accuracy(x1_model1) # aacuracy of best model from auto arima
## ME RMSE MAE MPE MAPE MASE
## Training set 3.269987 75.89213 43.71218 0.449983 2.388426 0.3638045
## ACF1
## Training set -0.002642082
x1_model1$x # show result of best model from auto arima
## NULL
checkresiduals(x1_model1,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab) # checkresiduals from best model from using auto arima

##
## Ljung-Box test
##
## data: Residuals from ARIMA(4,2,0)
## Q* = 5.3283, df = 6, p-value = 0.5024
##
## Model df: 4. Total lags used: 10
paste("Box-Ljung test , Ljung-Box test For Modelling for ==> ",y_lab, sep=" ")
## [1] "Box-Ljung test , Ljung-Box test For Modelling for ==> Covid 19 Recovery cases in Chelyabinsk"
Box.test(x1_model1$residuals^2, lag=20, type="Ljung-Box") # Do test for resdulas by using Box-Ljung test , Ljung-Box test For Modelling
##
## Box-Ljung test
##
## data: x1_model1$residuals^2
## X-squared = 103.46, df = 20, p-value = 3.009e-13
library(tseries)
jarque.bera.test(x1_model1$residuals) # Do test jarque.bera.test
##
## Jarque Bera Test
##
## data: x1_model1$residuals
## X-squared = 1415.8, df = 2, p-value < 2.2e-16
#Actual Vs Fitted
plot(data_series, col='red',lwd=2, main="Actual vs Fitted Plot", xlab='Time in (days)', ylab=y_lab) # plot actual and Fitted model
lines(fitted(x1_model1), col='black')

#Test data
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) ) # make testing data in time series and start from rows-6
forecasting_auto_arima <- forecast(x1_model1, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_auto_arima$mean,validation_data_days)
MAPE_Per_Day<-round(abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using bats Model for ==> Covid 19 Recovery cases in Chelyabinsk"
MAPE_Mean_All.ARIMA_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.ARIMA<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_auto_arima<-paste(round(MAPE_Per_Day,3),"%")
MAPE_auto.arima_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in bats Model for ==> Covid 19 Recovery cases in Chelyabinsk"
paste(MAPE_Mean_All.ARIMA,"%")
## [1] "0.875 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in bats Model for ==> Covid 19 Recovery cases in Chelyabinsk"
data.frame(date_auto.arima=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_auto.arima=validation_forecast,MAPE_auto.arima_Model)
## date_auto.arima validation_data_by_name actual_data forecasting_auto.arima
## 1 2021-02-16 Tuesday 41496 41342.03
## 2 2021-02-17 Wednesday 41842 41799.96
## 3 2021-02-18 Thursday 42176 42274.91
## 4 2021-02-19 Friday 42494 42765.80
## 5 2021-02-20 Saturday 42792 43266.06
## 6 2021-02-21 Sunday 43072 43755.67
## 7 2021-02-22 Monday 43335 44238.68
## MAPE_auto.arima_Model
## 1 0.371 %
## 2 0.1 %
## 3 0.235 %
## 4 0.64 %
## 5 1.108 %
## 6 1.587 %
## 7 2.085 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_auto.arima=tail(forecasting_auto_arima$mean,N_forecasting_days))
## FD forecating_date forecasting_by_auto.arima
## 1 2021-02-23 Tuesday 44723.05
## 2 2021-02-24 Wednesday 45211.31
## 3 2021-02-25 Thursday 45701.13
## 4 2021-02-26 Friday 46189.39
## 5 2021-02-27 Saturday 46676.28
## 6 2021-02-28 Sunday 47163.25
## 7 2021-03-01 Monday 47650.96
plot(forecasting_auto_arima)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph4<-autoplot(forecasting_auto_arima,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph4

MAPE_Mean_All.ARIMA
## [1] "0.875 % MAPE 7 day Covid 19 Recovery cases in Chelyabinsk"
## Error of forecasting
Error_auto.arima<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_auto.arima<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_auto.arima<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_auto.arima<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_auto.arima<-sqrt(sum((Error_auto.arima^2))/validation_data_days) # Root mean square forecast error
MSE_auto.arima<-(sum((Error_auto.arima^2))/validation_data_days) # Root mean square forecast error
MAD_auto.arima<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_auto.arima<-c(Error_auto.arima)
REOF_auto.arima1<-c(paste(round(REOF_A_auto.arima,3),"%"))
REOF_auto.arima2<-c(paste(round(REOF_F_auto.arima,3),"%"))
data.frame(correlation_auto.arima,MSE_auto.arima,RMSE_auto.arima,MAPE_Mean_All.ARIMA_Model,MAD_auto.arima) # analysis of Error by using Auto ARIMAA model shows result of correlation ,MSE ,MPER
## correlation_auto.arima MSE_auto.arima RMSE_auto.arima
## 1 0.9983019 231130.2 480.76
## MAPE_Mean_All.ARIMA_Model MAD_auto.arima
## 1 0.875 319.4436
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_auto.arima,REOF_A_auto.arima=REOF_auto.arima1,REOF_F_auto.arima=REOF_auto.arima2) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_auto.arima REOF_A_auto.arima
## 1 2021-02-16 Tuesday 153.97368 0.371 %
## 2 2021-02-17 Wednesday 42.04276 0.1 %
## 3 2021-02-18 Thursday 98.90728 0.235 %
## 4 2021-02-19 Friday 271.80384 0.64 %
## 5 2021-02-20 Saturday 474.05858 1.108 %
## 6 2021-02-21 Sunday 683.67356 1.587 %
## 7 2021-02-22 Monday 903.67848 2.085 %
## REOF_F_auto.arima
## 1 0.372 %
## 2 0.101 %
## 3 0.234 %
## 4 0.636 %
## 5 1.096 %
## 6 1.562 %
## 7 2.043 %
# Table for MAPE For counry
best_recommended_model <- min(MAPE_Mean_All_NNAR,MAPE_Mean_All.bats_Model,MAPE_Mean_All.TBATS_Model,MAPE_Mean_All.Holt_Model,MAPE_Mean_All.ARIMA_Model)
paste("System Choose Least Error ==> ( MAPE %) of Forecasting by using bats model and BATS Model, Holt's Linear Models , and autoarima for ==> ", y_lab , sep=" ")
## [1] "System Choose Least Error ==> ( MAPE %) of Forecasting by using bats model and BATS Model, Holt's Linear Models , and autoarima for ==> Covid 19 Recovery cases in Chelyabinsk"
best_recommended_model
## [1] 0.875
x1<-if(best_recommended_model >= MAPE_Mean_All.bats_Model) {paste("BATS Model")}
x2<-if(best_recommended_model >= MAPE_Mean_All.TBATS_Model) {paste("TBATS Model")}
x3<-if(best_recommended_model >= MAPE_Mean_All.Holt_Model) {paste("Holt Model")}
x4<-if(best_recommended_model >= MAPE_Mean_All.ARIMA_Model) {paste("ARIMA Model")}
x5<-if(best_recommended_model >= MAPE_Mean_All_NNAR) {paste("NNAR Model")}
result<-c(x1,x2,x3,x4,x5)
table.error<-data.frame(country.name,NNAR.model=MAPE_Mean_All_NNAR, BATS.Model=MAPE_Mean_All.bats_Model,TBATS.Model=MAPE_Mean_All.TBATS_Model,Holt.Model=MAPE_Mean_All.Holt_Model,ARIMA.Model=MAPE_Mean_All.ARIMA_Model,Best.Model=result)
library(ascii)
print(ascii(table(table.error)), type = "rest")
##
## +---+--------------+------------+------------+-------------+------------+-------------+-------------+------+
## | | country.name | NNAR.model | BATS.Model | TBATS.Model | Holt.Model | ARIMA.Model | Best.Model | Freq |
## +===+==============+============+============+=============+============+=============+=============+======+
## | 1 | Chelyabinsk | 2.038 | 1.101 | 1.095 | 1.18 | 0.875 | ARIMA Model | 1.00 |
## +---+--------------+------------+------------+-------------+------------+-------------+-------------+------+
message("System finished Forecasting by using autoarima and Holt's ,TBATS, and BATS, ==>",y_lab, sep=" ")
## System finished Forecasting by using autoarima and Holt's ,TBATS, and BATS, ==>Covid 19 Recovery cases in Chelyabinsk
message(" Thank you for using our System For Modelling ==> ",y_lab, sep=" ")
## Thank you for using our System For Modelling ==> Covid 19 Recovery cases in Chelyabinsk