South ural state university, Chelyabinsk, Russian federation
#Import
library(fpp2)
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
## -- Attaching packages ---------------------------------------------- fpp2 2.4 --
## v ggplot2 3.3.2 v fma 2.4
## v forecast 8.13 v expsmooth 2.3
##
library(forecast)
library(ggplot2)
library("readxl")
library(moments)
library(forecast)
require(forecast)
require(tseries)
## Loading required package: tseries
require(markovchain)
## Loading required package: markovchain
## Package: markovchain
## Version: 0.8.5-3
## Date: 2020-12-03
## BugReport: https://github.com/spedygiorgio/markovchain/issues
require(data.table)
## Loading required package: data.table
library(Hmisc)
## Loading required package: lattice
## Loading required package: survival
## Loading required package: Formula
##
## Attaching package: 'Hmisc'
## The following objects are masked from 'package:base':
##
## format.pval, units
##Global vriable##
Full_original_data <- read.csv("F:/Phd/University Conference/Chelyabinsk covid 19 data.csv") # path of your data ( time series data)
View(Full_original_data)
original_data<-Full_original_data$Deaths
y_lab <- "Covid 19 deaths cases in Chelyabinsk" # input name of data
Actual_date_interval <- c("2020/03/12","2021/02/22")
Forecast_date_interval <- c("2021/02/23","2021/03/1")
validation_data_days <-7
frequency<-"day"
Population <-1130319 # population in Spain ( population size for SIR Model)
country.name <- "Chelyabinsk"
# Data Preparation & calculate some of statistics measures
View(original_data) # View data in table in R
summary(original_data) # Summary your time series
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0 27.5 104.5 212.4 265.5 963.0
describe((original_data)) # describe your time series
## (original_data)
## n missing distinct Info Mean Gmd .05 .10
## 348 0 172 0.997 212.4 255.3 0.0 0.0
## .25 .50 .75 .90 .95
## 27.5 104.5 265.5 662.7 822.5
##
## lowest : 0 1 2 3 4, highest: 937 942 950 957 963
# calculate standard deviation
library(pastecs)
##
## Attaching package: 'pastecs'
## The following objects are masked from 'package:data.table':
##
## first, last
stat.desc(original_data)
## nbr.val nbr.null nbr.na min max range
## 348.00000 48.00000 0.00000 0.00000 963.00000 963.00000
## sum median mean SE.mean CI.mean.0.95 var
## 73923.00000 104.50000 212.42241 13.84026 27.22136 66660.38301
## std.dev coef.var
## 258.18672 1.21544
data.frame(skewness=skewness(original_data)) # calculate Cofficient of skewness
## skewness
## 1 1.486538
data.frame(kurtosis=kurtosis(original_data)) # calculate Cofficient of kurtosis
## kurtosis
## 1 4.038715
sd(original_data)
## [1] 258.1867
#processing on data (input data)
rows <- NROW(original_data) # calculate number of rows in time series (number of days)
training_data<-original_data[1:(rows-validation_data_days)] # Training data
testing_data<-original_data[(rows-validation_data_days+1):rows] #testing data
AD<-fulldate<-seq(as.Date(Actual_date_interval[1]),as.Date(Actual_date_interval[2]), frequency) #input range for actual date
FD<-seq(as.Date(Forecast_date_interval[1]),as.Date(Forecast_date_interval[2]), frequency) #input range forecasting date
N_forecasting_days<-nrow(data.frame(FD)) #calculate number of days that you want to forecasting
validation_dates<-tail(AD,validation_data_days) # select validation_dates
validation_data_by_name<-weekdays(validation_dates) # put names of validation dates
forecasting_data_by_name<-weekdays(FD) # put names of Forecasting dates
#NNAR Model
data_series<-ts(training_data)
model_NNAR<-nnetar(data_series, size = 5)
saveRDS(model_NNAR, file = "model_NNAR.RDS")
my_model <- readRDS("model_NNAR.RDS")
accuracy(model_NNAR) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 2.461123e-05 2.160404 1.341017 -Inf Inf 0.4977575 0.2099648
#Print Model Parameters
model_NNAR
## Series: data_series
## Model: NNAR(1,5)
## Call: nnetar(y = data_series, size = 5)
##
## Average of 20 networks, each of which is
## a 1-5-1 network with 16 weights
## options were - linear output units
##
## sigma^2 estimated as 4.667
# Testing Data Evaluation
forecasting_NNAR <- predict(model_NNAR, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_NNAR$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using NNAR Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using NNAR Model for ==> Covid 19 deaths cases in Chelyabinsk"
MAPE_Mean_All<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_Mean_All_NNAR<-round(mean(MAPE_Per_Day),3)
MAPE_NNAR<-paste(round(MAPE_Per_Day,3),"%")
MAPE_NNAR_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in NNAR Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in NNAR Model for ==> Covid 19 deaths cases in Chelyabinsk"
paste(MAPE_Mean_All,"%")
## [1] "0.547 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in NNAR Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in NNAR Model for ==> Covid 19 deaths cases in Chelyabinsk"
data.frame(date_NNAR=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_NNAR=validation_forecast,MAPE_NNAR_Model)
## date_NNAR validation_data_by_name actual_data forecasting_NNAR
## 1 2021-02-16 Tuesday 924 922.5074
## 2 2021-02-17 Wednesday 930 928.5100
## 3 2021-02-18 Thursday 937 934.0104
## 4 2021-02-19 Friday 942 939.0193
## 5 2021-02-20 Saturday 950 943.5538
## 6 2021-02-21 Sunday 957 947.6364
## 7 2021-02-22 Monday 963 951.2937
## MAPE_NNAR_Model
## 1 0.162 %
## 2 0.16 %
## 3 0.319 %
## 4 0.316 %
## 5 0.679 %
## 6 0.978 %
## 7 1.216 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_NNAR=tail(forecasting_NNAR$mean,N_forecasting_days))
## FD forecating_date forecasting_by_NNAR
## 1 2021-02-23 Tuesday 954.5548
## 2 2021-02-24 Wednesday 957.4505
## 3 2021-02-25 Thursday 960.0119
## 4 2021-02-26 Friday 962.2699
## 5 2021-02-27 Saturday 964.2544
## 6 2021-02-28 Sunday 965.9939
## 7 2021-03-01 Monday 967.5148
plot(forecasting_NNAR)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph1<-autoplot(forecasting_NNAR,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph1

saveRDS(model_NNAR, file = "model_NNAR.RDS")
## Error of forecasting
Error_NNAR<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_NNAR<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_NNAR<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_NNAR<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_NNAR<-sqrt(sum((Error_NNAR^2))/validation_data_days) # Root mean square forecast error
MSE_NNAR<-(sum((Error_NNAR^2))/validation_data_days) # Root mean square forecast error
MAD_NNAR<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_NNAR<-c(Error_NNAR)
REOF_ANNAR<-c(paste(round(REOF_A_NNAR,3),"%"))
REOF_FNNAR<-c(paste(round(REOF_F_NNAR,3),"%"))
data.frame(correlation_NNAR,MSE_NNAR,RMSE_NNAR,MAPE_Mean_All,MAD_NNAR) # analysis of Error by using NNAR Model shows result of correlation ,MSE ,MPER
## correlation_NNAR MSE_NNAR RMSE_NNAR
## 1 0.9938404 41.21964 6.420252
## MAPE_Mean_All MAD_NNAR
## 1 0.547 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk 5.209861
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_NNAR,REOF_ANNAR,REOF_FNNAR) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_NNAR REOF_ANNAR REOF_FNNAR
## 1 2021-02-16 Tuesday 1.492579 0.162 % 0.162 %
## 2 2021-02-17 Wednesday 1.490048 0.16 % 0.16 %
## 3 2021-02-18 Thursday 2.989616 0.319 % 0.32 %
## 4 2021-02-19 Friday 2.980732 0.316 % 0.317 %
## 5 2021-02-20 Saturday 6.446217 0.679 % 0.683 %
## 6 2021-02-21 Sunday 9.363563 0.978 % 0.988 %
## 7 2021-02-22 Monday 11.706273 1.216 % 1.231 %
##bats model
# Data Modeling
data_series<-ts(training_data) # make your data to time series
autoplot(data_series ,xlab=paste ("Time in ", frequency, sep=" "), ylab = y_lab, main=paste ("Actual Data :", y_lab, sep=" "))

model_bats<-bats(data_series)
accuracy(model_bats) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 0.1128668 2.163422 1.204211 Inf Inf 0.446978 -0.004508876
# Print Model Parameters
model_bats
## BATS(1, {0,0}, 1, -)
##
## Call: bats(y = data_series)
##
## Parameters
## Alpha: 1.030885
## Beta: 0.2222292
## Damping Parameter: 1
##
## Seed States:
## [,1]
## [1,] 0.009660588
## [2,] -0.033358967
##
## Sigma: 2.163422
## AIC: 2522.965
#ploting BATS Model
plot(model_bats,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4)

# Testing Data Evaluation
forecasting_bats <- predict(model_bats, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_bats$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using bats Model for ==> Covid 19 deaths cases in Chelyabinsk"
MAPE_Mean_All.bats_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.bats<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_bats<-paste(round(MAPE_Per_Day,3),"%")
MAPE_bats_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in bats Model for ==> Covid 19 deaths cases in Chelyabinsk"
paste(MAPE_Mean_All.bats,"%")
## [1] "0.708 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in bats Model for ==> Covid 19 deaths cases in Chelyabinsk"
data.frame(date_bats=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_bats=validation_forecast,MAPE_bats_Model)
## date_bats validation_data_by_name actual_data forecasting_bats
## 1 2021-02-16 Tuesday 924 924.4592
## 2 2021-02-17 Wednesday 930 932.9789
## 3 2021-02-18 Thursday 937 941.4986
## 4 2021-02-19 Friday 942 950.0183
## 5 2021-02-20 Saturday 950 958.5380
## 6 2021-02-21 Sunday 957 967.0577
## 7 2021-02-22 Monday 963 975.5775
## MAPE_bats_Model
## 1 0.05 %
## 2 0.32 %
## 3 0.48 %
## 4 0.851 %
## 5 0.899 %
## 6 1.051 %
## 7 1.306 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_bats=tail(forecasting_bats$mean,N_forecasting_days))
## FD forecating_date forecasting_by_bats
## 1 2021-02-23 Tuesday 984.0972
## 2 2021-02-24 Wednesday 992.6169
## 3 2021-02-25 Thursday 1001.1366
## 4 2021-02-26 Friday 1009.6563
## 5 2021-02-27 Saturday 1018.1760
## 6 2021-02-28 Sunday 1026.6957
## 7 2021-03-01 Monday 1035.2154
plot(forecasting_bats)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph1<-autoplot(forecasting_bats,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph1

## Error of forecasting
Error_bats<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_bats<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_bats<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_bats<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_bats<-sqrt(sum((Error_bats^2))/validation_data_days) # Root mean square forecast error
MSE_bats<-(sum((Error_bats^2))/validation_data_days) # Root mean square forecast error
MAD_bats<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_bats<-c(Error_bats)
REOF_Abats<-c(paste(round(REOF_A_bats,3),"%"))
REOF_Fbats<-c(paste(round(REOF_F_bats,3),"%"))
data.frame(correlation_bats,MSE_bats,RMSE_bats,MAPE_Mean_All.bats_Model,MAD_bats) # analysis of Error by using Bats Model shows result of correlation ,MSE ,MPER
## correlation_bats MSE_bats RMSE_bats MAPE_Mean_All.bats_Model MAD_bats
## 1 0.9990562 60.83788 7.799864 0.708 6.732625
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_bats,REOF_Abats,REOF_Fbats) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_bats REOF_Abats REOF_Fbats
## 1 2021-02-16 Tuesday 0.4592291 0.05 % 0.05 %
## 2 2021-02-17 Wednesday 2.9789326 0.32 % 0.319 %
## 3 2021-02-18 Thursday 4.4986362 0.48 % 0.478 %
## 4 2021-02-19 Friday 8.0183398 0.851 % 0.844 %
## 5 2021-02-20 Saturday 8.5380433 0.899 % 0.891 %
## 6 2021-02-21 Sunday 10.0577469 1.051 % 1.04 %
## 7 2021-02-22 Monday 12.5774505 1.306 % 1.289 %
## TBATS Model
# Data Modeling
data_series<-ts(training_data)
model_TBATS<-forecast:::fitSpecificTBATS(data_series,use.box.cox=FALSE, use.beta=TRUE, seasonal.periods=c(6),use.damping=FALSE,k.vector=c(2))
accuracy(model_TBATS) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 0.1100632 2.147359 1.237981 NaN Inf 0.4595125 -0.004931208
# Print Model Parameters
model_TBATS
## TBATS(1, {0,0}, 1, {<6,2>})
##
## Call: NULL
##
## Parameters
## Alpha: 1.022415
## Beta: 0.2265549
## Damping Parameter: 1
## Gamma-1 Values: -0.001642296
## Gamma-2 Values: 0.001166739
##
## Seed States:
## [,1]
## [1,] 0.031251729
## [2,] -0.036238078
## [3,] 0.215435948
## [4,] 0.034530050
## [5,] -0.232526945
## [6,] -0.007323124
##
## Sigma: 2.147359
## AIC: 2529.883
plot(model_TBATS,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)

# Testing Data Evaluation
forecasting_tbats <- predict(model_TBATS, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_tbats$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using TBATS Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using TBATS Model for ==> Covid 19 deaths cases in Chelyabinsk"
MAPE_Mean_All.TBATS_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.TBATS<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_TBATS<-paste(round(MAPE_Per_Day,3),"%")
MAPE_TBATS_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in TBATS Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in TBATS Model for ==> Covid 19 deaths cases in Chelyabinsk"
paste(MAPE_Mean_All.TBATS,"%")
## [1] "0.682 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in TBATS Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in TBATS Model for ==> Covid 19 deaths cases in Chelyabinsk"
data.frame(date_TBATS=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_TBATS=validation_forecast,MAPE_TBATS_Model)
## date_TBATS validation_data_by_name actual_data forecasting_TBATS
## 1 2021-02-16 Tuesday 924 924.6090
## 2 2021-02-17 Wednesday 930 933.0580
## 3 2021-02-18 Thursday 937 941.1544
## 4 2021-02-19 Friday 942 949.3860
## 5 2021-02-20 Saturday 950 958.0357
## 6 2021-02-21 Sunday 957 966.7553
## 7 2021-02-22 Monday 963 975.4094
## MAPE_TBATS_Model
## 1 0.066 %
## 2 0.329 %
## 3 0.443 %
## 4 0.784 %
## 5 0.846 %
## 6 1.019 %
## 7 1.289 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_TBATS=tail(forecasting_tbats$mean,N_forecasting_days))
## FD forecating_date forecasting_by_TBATS
## 1 2021-02-23 Tuesday 983.8583
## 2 2021-02-24 Wednesday 991.9547
## 3 2021-02-25 Thursday 1000.1863
## 4 2021-02-26 Friday 1008.8360
## 5 2021-02-27 Saturday 1017.5556
## 6 2021-02-28 Sunday 1026.2097
## 7 2021-03-01 Monday 1034.6587
plot(forecasting_tbats)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph2<-autoplot(forecasting_tbats,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph2

## Error of forecasting TBATS Model
Error_tbats<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_tbats1<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_tbats<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_tbats<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_tbats<-sqrt(sum((Error_tbats^2))/validation_data_days) # Root mean square forecast error
MSE_tbats<-(sum((Error_tbats^2))/validation_data_days) # Root mean square forecast error
MAD_tbats<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_tbats<-c(Error_tbats)
REOF_A_tbats<-c(paste(round(REOF_A_tbats1,3),"%"))
REOF_F_tbats<-c(paste(round(REOF_F_tbats,3),"%"))
data.frame(correlation_tbats,MSE_tbats,RMSE_tbats,MAPE_Mean_All.TBATS_Model,MAD_tbats) # analysis of Error by using TBATS model shows result of correlation ,MSE ,MPER
## correlation_tbats MSE_tbats RMSE_tbats MAPE_Mean_All.TBATS_Model MAD_tbats
## 1 0.9992995 56.46622 7.514401 0.682 6.486809
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_tbats,REOF_A_tbats,REOF_F_tbats) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_tbats REOF_A_tbats REOF_F_tbats
## 1 2021-02-16 Tuesday 0.6090139 0.066 % 0.066 %
## 2 2021-02-17 Wednesday 3.0579715 0.329 % 0.328 %
## 3 2021-02-18 Thursday 4.1543825 0.443 % 0.441 %
## 4 2021-02-19 Friday 7.3859627 0.784 % 0.778 %
## 5 2021-02-20 Saturday 8.0356712 0.846 % 0.839 %
## 6 2021-02-21 Sunday 9.7553042 1.019 % 1.009 %
## 7 2021-02-22 Monday 12.4093556 1.289 % 1.272 %
## Holt's linear trend
# Data Modeling
data_series<-ts(training_data)
model_holt<-holt(data_series,h=N_forecasting_days+validation_data_days,lambda = "auto")
accuracy(model_holt) # accuracy on training data
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 0.06410443 2.172962 1.22817 NaN Inf 0.455871 0.05254633
# Print Model Parameters
summary(model_holt$model)
## Holt's method
##
## Call:
## holt(y = data_series, h = N_forecasting_days + validation_data_days,
##
## Call:
## lambda = "auto")
##
## Box-Cox transformation: lambda= 0.7583
##
## Smoothing parameters:
## alpha = 0.9999
## beta = 0.1952
##
## Initial states:
## l = -1.3226
## b = 3e-04
##
## sigma: 0.6449
##
## AIC AICc BIC
## 1695.493 1695.672 1714.652
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 0.06410443 2.172962 1.22817 NaN Inf 0.455871 0.05254633
# Testing Data Evaluation
forecasting_holt <- predict(model_holt, h=N_forecasting_days+validation_data_days,lambda = "auto")
validation_forecast<-head(forecasting_holt$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using holt Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using holt Model for ==> Covid 19 deaths cases in Chelyabinsk"
MAPE_Mean_All.Holt_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.Holt<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_holt<-paste(round(MAPE_Per_Day,3),"%")
MAPE_holt_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in holt Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in holt Model for ==> Covid 19 deaths cases in Chelyabinsk"
paste(MAPE_Mean_All.Holt,"%")
## [1] "0.812 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in holt Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in holt Model for ==> Covid 19 deaths cases in Chelyabinsk"
data.frame(date_holt=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_holt=validation_forecast,MAPE_holt_Model)
## date_holt validation_data_by_name actual_data forecasting_holt
## 1 2021-02-16 Tuesday 924 924.7117
## 2 2021-02-17 Wednesday 930 933.4431
## 3 2021-02-18 Thursday 937 942.1943
## 4 2021-02-19 Friday 942 950.9651
## 5 2021-02-20 Saturday 950 959.7556
## 6 2021-02-21 Sunday 957 968.5655
## 7 2021-02-22 Monday 963 977.3949
## MAPE_holt_Model
## 1 0.077 %
## 2 0.37 %
## 3 0.554 %
## 4 0.952 %
## 5 1.027 %
## 6 1.209 %
## 7 1.495 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_holt=tail(forecasting_holt$mean,N_forecasting_days))
## FD forecating_date forecasting_by_holt
## 1 2021-02-23 Tuesday 986.2435
## 2 2021-02-24 Wednesday 995.1114
## 3 2021-02-25 Thursday 1003.9985
## 4 2021-02-26 Friday 1012.9046
## 5 2021-02-27 Saturday 1021.8297
## 6 2021-02-28 Sunday 1030.7736
## 7 2021-03-01 Monday 1039.7364
plot(forecasting_holt)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph3<-autoplot(forecasting_holt,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph3

## Error of forecasting by using Holt's linear model
Error_Holt<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_Holt1<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_Holt<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_Holt<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_Holt<-sqrt(sum((Error_Holt^2))/validation_data_days) # Root mean square forecast error
MSE_Holt<-(sum((Error_Holt^2))/validation_data_days) # Root mean square forecast error
MAD_Holt<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_Holt<-c(Error_Holt)
REOF_A_Holt<-c(paste(round(REOF_A_Holt1,3),"%"))
REOF_F_Holt<-c(paste(round(REOF_F_Holt,3),"%"))
REOF_A_Holt11<-mean(abs(((testing_data-validation_forecast)/testing_data)*100))
data.frame(correlation_Holt,MSE_Holt,RMSE_Holt,MAPE_Mean_All.Holt_Model,MAD_Holt) # analysis of Error by using Holt's linear model shows result of correlation ,MSE ,MPER
## correlation_Holt MSE_Holt RMSE_Holt MAPE_Mean_All.Holt_Model MAD_Holt
## 1 0.9990907 79.40841 8.91114 0.812 7.718589
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_Holt,REOF_A_Holt,REOF_F_Holt) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_Holt REOF_A_Holt REOF_F_Holt
## 1 2021-02-16 Tuesday 0.7117273 0.077 % 0.077 %
## 2 2021-02-17 Wednesday 3.4431027 0.37 % 0.369 %
## 3 2021-02-18 Thursday 5.1942626 0.554 % 0.551 %
## 4 2021-02-19 Friday 8.9651119 0.952 % 0.943 %
## 5 2021-02-20 Saturday 9.7555565 1.027 % 1.016 %
## 6 2021-02-21 Sunday 11.5655037 1.209 % 1.194 %
## 7 2021-02-22 Monday 14.3948614 1.495 % 1.473 %
#Auto arima model
##################
require(tseries) # need to install tseries tj test Stationarity in time series
paste ("tests For Check Stationarity in series ==> ",y_lab, sep=" ")
## [1] "tests For Check Stationarity in series ==> Covid 19 deaths cases in Chelyabinsk"
kpss.test(data_series) # applay kpss test
## Warning in kpss.test(data_series): p-value smaller than printed p-value
##
## KPSS Test for Level Stationarity
##
## data: data_series
## KPSS Level = 4.3099, Truncation lag parameter = 5, p-value = 0.01
pp.test(data_series) # applay pp test
## Warning in pp.test(data_series): p-value greater than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: data_series
## Dickey-Fuller Z(alpha) = 3.5318, Truncation lag parameter = 5, p-value
## = 0.99
## alternative hypothesis: stationary
adf.test(data_series) # applay adf test
## Warning in adf.test(data_series): p-value greater than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: data_series
## Dickey-Fuller = 1.2681, Lag order = 6, p-value = 0.99
## alternative hypothesis: stationary
ndiffs(data_series) # Doing first diffrencing on data
## [1] 2
#Taking the first difference
diff1_x1<-diff(data_series)
autoplot(diff1_x1, xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", ylab=y_lab,main = "1nd differenced series")
## Warning: Ignoring unknown parameters: col.main, col.lab, col.sub

##Testing the stationary of the first differenced series
paste ("tests For Check Stationarity in series after taking first differences in ==> ",y_lab, sep=" ")
## [1] "tests For Check Stationarity in series after taking first differences in ==> Covid 19 deaths cases in Chelyabinsk"
kpss.test(diff1_x1) # applay kpss test after taking first differences
## Warning in kpss.test(diff1_x1): p-value smaller than printed p-value
##
## KPSS Test for Level Stationarity
##
## data: diff1_x1
## KPSS Level = 3.5973, Truncation lag parameter = 5, p-value = 0.01
pp.test(diff1_x1) # applay pp test after taking first differences
## Warning in pp.test(diff1_x1): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: diff1_x1
## Dickey-Fuller Z(alpha) = -174.26, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
adf.test(diff1_x1) # applay adf test after taking first differences
##
## Augmented Dickey-Fuller Test
##
## data: diff1_x1
## Dickey-Fuller = -2.6114, Lag order = 6, p-value = 0.3188
## alternative hypothesis: stationary
#Taking the second difference
diff2_x1=diff(diff1_x1)
autoplot(diff2_x1, xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", ylab=y_lab ,main = "2nd differenced series")
## Warning: Ignoring unknown parameters: col.main, col.lab, col.sub

##Testing the stationary of the first differenced series
paste ("tests For Check Stationarity in series after taking Second differences in",y_lab, sep=" ")
## [1] "tests For Check Stationarity in series after taking Second differences in Covid 19 deaths cases in Chelyabinsk"
kpss.test(diff2_x1) # applay kpss test after taking Second differences
## Warning in kpss.test(diff2_x1): p-value greater than printed p-value
##
## KPSS Test for Level Stationarity
##
## data: diff2_x1
## KPSS Level = 0.018282, Truncation lag parameter = 5, p-value = 0.1
pp.test(diff2_x1) # applay pp test after taking Second differences
## Warning in pp.test(diff2_x1): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: diff2_x1
## Dickey-Fuller Z(alpha) = -399.84, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
adf.test(diff2_x1) # applay adf test after taking Second differences
## Warning in adf.test(diff2_x1): p-value smaller than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: diff2_x1
## Dickey-Fuller = -10.465, Lag order = 6, p-value = 0.01
## alternative hypothesis: stationary
####Fitting an ARIMA Model
#1. Using auto arima function
model1 <- auto.arima(data_series,stepwise=FALSE, approximation=FALSE, trace=T, test = c("kpss", "adf", "pp")) #applaying auto arima
##
## ARIMA(0,2,0) : 1640.439
## ARIMA(0,2,1) : 1492.437
## ARIMA(0,2,2) : 1494.21
## ARIMA(0,2,3) : 1495.379
## ARIMA(0,2,4) : 1495.539
## ARIMA(0,2,5) : 1497.54
## ARIMA(1,2,0) : 1566.28
## ARIMA(1,2,1) : 1494.244
## ARIMA(1,2,2) : 1494.224
## ARIMA(1,2,3) : 1495.999
## ARIMA(1,2,4) : 1497.567
## ARIMA(2,2,0) : 1519.783
## ARIMA(2,2,1) : 1495.476
## ARIMA(2,2,2) : 1495.983
## ARIMA(2,2,3) : 1498.046
## ARIMA(3,2,0) : 1511.457
## ARIMA(3,2,1) : 1495.65
## ARIMA(3,2,2) : 1497.723
## ARIMA(4,2,0) : 1508.289
## ARIMA(4,2,1) : 1497.723
## ARIMA(5,2,0) : 1506.572
##
##
##
## Best model: ARIMA(0,2,1)
model1 # show the result of autoarima
## Series: data_series
## ARIMA(0,2,1)
##
## Coefficients:
## ma1
## -0.7673
## s.e. 0.0368
##
## sigma^2 estimated as 4.726: log likelihood=-744.2
## AIC=1492.4 AICc=1492.44 BIC=1500.05
#Make changes in the source of auto arima to run the best model
arima.string <- function (object, padding = FALSE)
{
order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)]
m <- order[7]
result <- paste("ARIMA(", order[1], ",", order[2], ",",
order[3], ")", sep = "")
if (m > 1 && sum(order[4:6]) > 0) {
result <- paste(result, "(", order[4], ",", order[5],
",", order[6], ")[", m, "]", sep = "")
}
if (padding && m > 1 && sum(order[4:6]) == 0) {
result <- paste(result, " ", sep = "")
if (m <= 9) {
result <- paste(result, " ", sep = "")
}
else if (m <= 99) {
result <- paste(result, " ", sep = "")
}
else {
result <- paste(result, " ", sep = "")
}
}
if (!is.null(object$xreg)) {
if (NCOL(object$xreg) == 1 && is.element("drift", names(object$coef))) {
result <- paste(result, "with drift ")
}
else {
result <- paste("Regression with", result, "errors")
}
}
else {
if (is.element("constant", names(object$coef)) || is.element("intercept",
names(object$coef))) {
result <- paste(result, "with non-zero mean")
}
else if (order[2] == 0 && order[5] == 0) {
result <- paste(result, "with zero mean ")
}
else {
result <- paste(result, " ")
}
}
if (!padding) {
result <- gsub("[ ]*$", "", result)
}
return(result)
}
bestmodel <- arima.string(model1, padding = TRUE)
bestmodel <- substring(bestmodel,7,11)
bestmodel <- gsub(" ", "", bestmodel)
bestmodel <- gsub(")", "", bestmodel)
bestmodel <- strsplit(bestmodel, ",")[[1]]
bestmodel <- c(strtoi(bestmodel[1]),strtoi(bestmodel[2]),strtoi(bestmodel[3]))
bestmodel
## [1] 0 2 1
strtoi(bestmodel[3])
## [1] 1
#2. Using ACF and PACF Function
#par(mfrow=c(1,2)) # Code for making two plot in one graph
acf(diff2_x1,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab, main=paste("ACF-2nd differenced series ",y_lab, sep=" ",lag.max=20)) # plot ACF "auto correlation function after taking second diffrences

pacf(diff2_x1,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab,main=paste("PACF-2nd differenced series ",y_lab, sep=" ",lag.max=20)) # plot PACF " Partial auto correlation function after taking second diffrences

library(forecast) # install library forecast
x1_model1= arima(data_series, order=c(bestmodel)) # Run Best model of auto arima for forecasting
x1_model1 # Show result of best model of auto arima
##
## Call:
## arima(x = data_series, order = c(bestmodel))
##
## Coefficients:
## ma1
## -0.7673
## s.e. 0.0368
##
## sigma^2 estimated as 4.712: log likelihood = -744.2, aic = 1492.4
paste ("accuracy of autoarima Model For ==> ",y_lab, sep=" ")
## [1] "accuracy of autoarima Model For ==> Covid 19 deaths cases in Chelyabinsk"
accuracy(x1_model1) # aacuracy of best model from auto arima
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 0.1068026 2.16429 1.206553 0.4439731 2.502436 0.4478471 0.01484319
x1_model1$x # show result of best model from auto arima
## NULL
checkresiduals(x1_model1,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab) # checkresiduals from best model from using auto arima

##
## Ljung-Box test
##
## data: Residuals from ARIMA(0,2,1)
## Q* = 13.33, df = 9, p-value = 0.1482
##
## Model df: 1. Total lags used: 10
paste("Box-Ljung test , Ljung-Box test For Modelling for ==> ",y_lab, sep=" ")
## [1] "Box-Ljung test , Ljung-Box test For Modelling for ==> Covid 19 deaths cases in Chelyabinsk"
Box.test(x1_model1$residuals^2, lag=20, type="Ljung-Box") # Do test for resdulas by using Box-Ljung test , Ljung-Box test For Modelling
##
## Box-Ljung test
##
## data: x1_model1$residuals^2
## X-squared = 19.906, df = 20, p-value = 0.4638
library(tseries)
jarque.bera.test(x1_model1$residuals) # Do test jarque.bera.test
##
## Jarque Bera Test
##
## data: x1_model1$residuals
## X-squared = 3311, df = 2, p-value < 2.2e-16
#Actual Vs Fitted
plot(data_series, col='red',lwd=2, main="Actual vs Fitted Plot", xlab='Time in (days)', ylab=y_lab) # plot actual and Fitted model
lines(fitted(x1_model1), col='black')

#Test data
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) ) # make testing data in time series and start from rows-6
forecasting_auto_arima <- forecast(x1_model1, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_auto_arima$mean,validation_data_days)
MAPE_Per_Day<-round(abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE % For 7 day by using bats Model for ==> Covid 19 deaths cases in Chelyabinsk"
MAPE_Mean_All.ARIMA_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.ARIMA<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_auto_arima<-paste(round(MAPE_Per_Day,3),"%")
MAPE_auto.arima_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] " MAPE that's Error of Forecasting for 7 days in bats Model for ==> Covid 19 deaths cases in Chelyabinsk"
paste(MAPE_Mean_All.ARIMA,"%")
## [1] "0.695 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk %"
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
## [1] "MAPE that's Error of Forecasting day by day for 7 days in bats Model for ==> Covid 19 deaths cases in Chelyabinsk"
data.frame(date_auto.arima=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_auto.arima=validation_forecast,MAPE_auto.arima_Model)
## date_auto.arima validation_data_by_name actual_data forecasting_auto.arima
## 1 2021-02-16 Tuesday 924 924.4738
## 2 2021-02-17 Wednesday 930 932.9477
## 3 2021-02-18 Thursday 937 941.4215
## 4 2021-02-19 Friday 942 949.8954
## 5 2021-02-20 Saturday 950 958.3692
## 6 2021-02-21 Sunday 957 966.8431
## 7 2021-02-22 Monday 963 975.3169
## MAPE_auto.arima_Model
## 1 0.051 %
## 2 0.317 %
## 3 0.472 %
## 4 0.838 %
## 5 0.881 %
## 6 1.029 %
## 7 1.279 %
data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_auto.arima=tail(forecasting_auto_arima$mean,N_forecasting_days))
## FD forecating_date forecasting_by_auto.arima
## 1 2021-02-23 Tuesday 983.7907
## 2 2021-02-24 Wednesday 992.2646
## 3 2021-02-25 Thursday 1000.7384
## 4 2021-02-26 Friday 1009.2123
## 5 2021-02-27 Saturday 1017.6861
## 6 2021-02-28 Sunday 1026.1599
## 7 2021-03-01 Monday 1034.6338
plot(forecasting_auto_arima)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)

graph4<-autoplot(forecasting_auto_arima,xlab = paste ("Time in ", frequency ,y_lab , sep=" "), col.main="black", col.lab="black", col.sub="black", cex.main=1, cex.lab=1, cex.sub=1,font.main=4, font.lab=4, ylab=y_lab)
graph4

MAPE_Mean_All.ARIMA
## [1] "0.695 % MAPE 7 day Covid 19 deaths cases in Chelyabinsk"
## Error of forecasting
Error_auto.arima<-abs(testing_data-validation_forecast) # Absolute error of forecast (AEOF)
REOF_A_auto.arima<-abs(((testing_data-validation_forecast)/testing_data)*100) #Relative error of forecast (divided by actual)(REOF_A)
REOF_F_auto.arima<-abs(((testing_data-validation_forecast)/validation_forecast)*100) #Relative error of forecast (divided by forecast)(REOF_F)
correlation_auto.arima<-cor(testing_data,validation_forecast, method = c("pearson")) # correlation coefficient between predicted and actual values
RMSE_auto.arima<-sqrt(sum((Error_auto.arima^2))/validation_data_days) # Root mean square forecast error
MSE_auto.arima<-(sum((Error_auto.arima^2))/validation_data_days) # Root mean square forecast error
MAD_auto.arima<-abs((sum(testing_data-validation_forecast))/validation_data_days) # average forecast accuracy
AEOF_auto.arima<-c(Error_auto.arima)
REOF_auto.arima1<-c(paste(round(REOF_A_auto.arima,3),"%"))
REOF_auto.arima2<-c(paste(round(REOF_F_auto.arima,3),"%"))
data.frame(correlation_auto.arima,MSE_auto.arima,RMSE_auto.arima,MAPE_Mean_All.ARIMA_Model,MAD_auto.arima) # analysis of Error by using Auto ARIMAA model shows result of correlation ,MSE ,MPER
## correlation_auto.arima MSE_auto.arima RMSE_auto.arima
## 1 0.9990562 58.49077 7.647926
## MAPE_Mean_All.ARIMA_Model MAD_auto.arima
## 1 0.695 6.609654
data.frame(validation_dates,Validation_day_name=validation_data_by_name,AEOF_auto.arima,REOF_A_auto.arima=REOF_auto.arima1,REOF_F_auto.arima=REOF_auto.arima2) # Analysis of error shows result AEOF,REOF_A,REOF_F
## validation_dates Validation_day_name AEOF_auto.arima REOF_A_auto.arima
## 1 2021-02-16 Tuesday 0.4738421 0.051 %
## 2 2021-02-17 Wednesday 2.9476843 0.317 %
## 3 2021-02-18 Thursday 4.4215264 0.472 %
## 4 2021-02-19 Friday 7.8953685 0.838 %
## 5 2021-02-20 Saturday 8.3692106 0.881 %
## 6 2021-02-21 Sunday 9.8430528 1.029 %
## 7 2021-02-22 Monday 12.3168949 1.279 %
## REOF_F_auto.arima
## 1 0.051 %
## 2 0.316 %
## 3 0.47 %
## 4 0.831 %
## 5 0.873 %
## 6 1.018 %
## 7 1.263 %
# Table for MAPE For counry
best_recommended_model <- min(MAPE_Mean_All_NNAR,MAPE_Mean_All.bats_Model,MAPE_Mean_All.TBATS_Model,MAPE_Mean_All.Holt_Model,MAPE_Mean_All.ARIMA_Model)
paste("System Choose Least Error ==> ( MAPE %) of Forecasting by using bats model and BATS Model, Holt's Linear Models , and autoarima for ==> ", y_lab , sep=" ")
## [1] "System Choose Least Error ==> ( MAPE %) of Forecasting by using bats model and BATS Model, Holt's Linear Models , and autoarima for ==> Covid 19 deaths cases in Chelyabinsk"
best_recommended_model
## [1] 0.547
x1<-if(best_recommended_model >= MAPE_Mean_All.bats_Model) {paste("BATS Model")}
x2<-if(best_recommended_model >= MAPE_Mean_All.TBATS_Model) {paste("TBATS Model")}
x3<-if(best_recommended_model >= MAPE_Mean_All.Holt_Model) {paste("Holt Model")}
x4<-if(best_recommended_model >= MAPE_Mean_All.ARIMA_Model) {paste("ARIMA Model")}
x5<-if(best_recommended_model >= MAPE_Mean_All_NNAR) {paste("NNAR Model")}
result<-c(x1,x2,x3,x4,x5)
table.error<-data.frame(country.name,NNAR.model=MAPE_Mean_All_NNAR, BATS.Model=MAPE_Mean_All.bats_Model,TBATS.Model=MAPE_Mean_All.TBATS_Model,Holt.Model=MAPE_Mean_All.Holt_Model,ARIMA.Model=MAPE_Mean_All.ARIMA_Model,Best.Model=result)
library(ascii)
print(ascii(table(table.error)), type = "rest")
##
## +---+--------------+------------+------------+-------------+------------+-------------+------------+------+
## | | country.name | NNAR.model | BATS.Model | TBATS.Model | Holt.Model | ARIMA.Model | Best.Model | Freq |
## +===+==============+============+============+=============+============+=============+============+======+
## | 1 | Chelyabinsk | 0.547 | 0.708 | 0.682 | 0.812 | 0.695 | NNAR Model | 1.00 |
## +---+--------------+------------+------------+-------------+------------+-------------+------------+------+
message("System finished Forecasting by using autoarima and Holt's ,TBATS, and BATS Model ==>",y_lab, sep=" ")
## System finished Forecasting by using autoarima and Holt's ,TBATS, and BATS Model ==>Covid 19 deaths cases in Chelyabinsk
message(" Thank you for using our System For Modelling ==> ",y_lab, sep=" ")
## Thank you for using our System For Modelling ==> Covid 19 deaths cases in Chelyabinsk