#Librerías
library(forecast)
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr 1.1.4 ✔ readr 2.1.5
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ ggplot2 3.5.1 ✔ tibble 3.2.1
## ✔ lubridate 1.9.4 ✔ tidyr 1.3.1
## ✔ purrr 1.0.4
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(ggplot2)
library(maps)
##
## Attaching package: 'maps'
##
## The following object is masked from 'package:purrr':
##
## map
library(readxl)
#Base de datos
poblacion <- read_csv("/Users/lishdz/Desktop/8vo/periodo 1/modulo 1 - r/population.csv", show_col_types = FALSE)
summary(poblacion) #min= 1900 max = 2019
## state year population
## Length:6020 Min. :1900 Min. : 43000
## Class :character 1st Qu.:1930 1st Qu.: 901483
## Mode :character Median :1960 Median : 2359000
## Mean :1960 Mean : 3726003
## 3rd Qu.:1990 3rd Qu.: 4541883
## Max. :2019 Max. :39512223
str(poblacion)
## spc_tbl_ [6,020 × 3] (S3: spec_tbl_df/tbl_df/tbl/data.frame)
## $ state : chr [1:6020] "AK" "AK" "AK" "AK" ...
## $ year : num [1:6020] 1950 1951 1952 1953 1954 ...
## $ population: num [1:6020] 135000 158000 189000 205000 215000 222000 224000 231000 224000 224000 ...
## - attr(*, "spec")=
## .. cols(
## .. state = col_character(),
## .. year = col_double(),
## .. population = col_double()
## .. )
## - attr(*, "problems")=<externalptr>
head(poblacion)
## # A tibble: 6 × 3
## state year population
## <chr> <dbl> <dbl>
## 1 AK 1950 135000
## 2 AK 1951 158000
## 3 AK 1952 189000
## 4 AK 1953 205000
## 5 AK 1954 215000
## 6 AK 1955 222000
poblacion_texas <- poblacion %>% filter(state == "TX")
ggplot(poblacion_texas, aes(x=year, y=population)) +
geom_line() +
labs(title = "Población de Texas", x = "Año", y = "Población")
ts_texas <- ts(poblacion_texas$population, start = 1900, frequency = 1)
arima_texas <- auto.arima (ts_texas)
summary(arima_texas)
## Series: ts_texas
## ARIMA(0,2,2)
##
## Coefficients:
## ma1 ma2
## -0.5950 -0.1798
## s.e. 0.0913 0.0951
##
## sigma^2 = 1.031e+10: log likelihood = -1527.14
## AIC=3060.28 AICc=3060.5 BIC=3068.6
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE
## Training set 12147.62 99818.31 59257.39 0.1046163 0.5686743 0.2672197
## ACF1
## Training set -0.02136734
pronostico_texas <- forecast(arima_texas, level=95, h=10) #h = num predicciones
pronostico_texas
## Point Forecast Lo 95 Hi 95
## 2020 29398472 29199487 29597457
## 2021 29806827 29463665 30149990
## 2022 30215183 29742956 30687410
## 2023 30623538 30024100 31222977
## 2024 31031894 30303359 31760429
## 2025 31440249 30579246 32301253
## 2026 31848605 30851090 32846119
## 2027 32256960 31118581 33395339
## 2028 32665316 31381587 33949044
## 2029 33073671 31640070 34507272
plot(pronostico_texas, main = "Población en Texas")
map(database="state")
map(database = "state", regions = "Texas", col="red", fill = TRUE, add = TRUE)
map(database = "state", regions = "New York", col="green", fill=TRUE, add = TRUE)
# dataframe para resultados
forecast_results <- list()
# lista de los estados
states <- unique(poblacion$state)
for (state in states) {
# Filtrar por estado
poblacion_state <- poblacion %>%
filter(state == !!state) %>%
arrange(year)
# debugging
#print(paste("Processing:", state))
#print(poblacion_state)
# timeseries
ts_state <- ts(poblacion_state$population, start = min(poblacion_state$year), frequency = 1)
if (length(ts_state) > 2) {
# modelo arima
arima_state <- auto.arima(ts_state)
# Forecast para el siguiente año
forecast_state <- forecast(arima_state, h = 1)
next_year <- max(poblacion_state$year) + 1
forecast_value <- as.numeric(forecast_state$mean)
#print(forecast_value)
forecast_results[[state]] <- data.frame(state = state, year = next_year, population = forecast_value)
}
}
# Juntar predicciones
forecast_results_df <- bind_rows(forecast_results)
# Juntar datos originales y predicción
poblacion <- bind_rows(poblacion, forecast_results_df)
datos_decadas <- poblacion %>% filter(year %in% c("1950", "1960", "1970", "1980", "1990", "2000", "2010", "2020"))
state_conversion <- data.frame(
Abbreviation = state.abb,
State = state.name
)
datos_decadas <- datos_decadas %>% left_join(state_conversion, by = c("state" = "Abbreviation")) %>%
select(-state)
library(sf)
library(maps)
library(mapdata)
library(scales)
us_states <- map_data("state") %>%
rename(State = region) %>%
mutate(State = str_to_title(State))
map_data_merged <- left_join(us_states, datos_decadas, by = c("State"))
ggplot(map_data_merged, aes(long, lat, group = group, fill = population)) +
geom_polygon(color = "white") +
scale_fill_viridis_c(option = "turbo", na.value = "gray80",
labels = label_comma()) +
theme_minimal() +
labs(title = "Distribución de la población - predicción 2020",
fill = "Población") +
coord_fixed(1.3) +
facet_wrap(~ year)
#Importar la base de datos
ventas <- read_xlsx("/Users/lishdz/Desktop/8vo/periodo 1/modulo 1 - r/Ventas_Históricas_Lechitas.xlsx")
ts_ventas <- ts(ventas$Ventas, start= c(2017, 1), frequency = 12)
autoplot(ts_ventas) + labs(title= "Ventas de Leche Saborizada Hershey's", x="Tiempo", y="Miles de dólares")
arima_ventas <- auto.arima(ts_ventas)
summary(arima_ventas)
## Series: ts_ventas
## ARIMA(1,0,0)(1,1,0)[12] with drift
##
## Coefficients:
## ar1 sar1 drift
## 0.6383 -0.5517 288.8979
## s.e. 0.1551 0.2047 14.5026
##
## sigma^2 = 202701: log likelihood = -181.5
## AIC=371 AICc=373.11 BIC=375.72
##
## Training set error measures:
## ME RMSE MAE MPE MAPE MASE ACF1
## Training set 25.22158 343.864 227.17 0.08059932 0.7069542 0.06491044 0.2081026
pronostico_ventas <- forecast(arima_ventas, level=95, h=12)
pronostico_ventas
## Point Forecast Lo 95 Hi 95
## Jan 2020 35498.90 34616.48 36381.32
## Feb 2020 34202.17 33155.28 35249.05
## Mar 2020 36703.01 35596.10 37809.92
## Apr 2020 36271.90 35141.44 37402.36
## May 2020 37121.98 35982.07 38261.90
## Jun 2020 37102.65 35958.90 38246.40
## Jul 2020 37151.04 36005.73 38296.34
## Aug 2020 38564.64 37418.70 39710.58
## Sep 2020 38755.22 37609.03 39901.42
## Oct 2020 39779.02 38632.72 40925.32
## Nov 2020 38741.63 37595.28 39887.97
## Dec 2020 38645.86 37499.50 39792.22
autoplot(pronostico_ventas) + labs(title= "Pronóstico de ventas de Leche Saborizada Hershey's 2020", x="Tiempo", y="Miles de dólares")
# Modelo de regresión lineal
ventas$mes <- 1:36
regresion_ventas <- lm(Ventas ~ mes, data = ventas)
summary(regresion_ventas)
##
## Call:
## lm(formula = Ventas ~ mes, data = ventas)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2075.79 -326.41 33.74 458.40 1537.04
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 24894.67 275.03 90.52 <2e-16 ***
## mes 298.37 12.96 23.02 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 808 on 34 degrees of freedom
## Multiple R-squared: 0.9397, Adjusted R-squared: 0.9379
## F-statistic: 529.8 on 1 and 34 DF, p-value: < 2.2e-16
siguiente_a <- data.frame(mes=37:48)
prediccion_regresion <- predict(regresion_ventas, siguiente_a)
prediccion_regresion
## 1 2 3 4 5 6 7 8
## 35934.49 36232.86 36531.23 36829.61 37127.98 37426.35 37724.73 38023.10
## 9 10 11 12
## 38321.47 38619.85 38918.22 39216.59
plot(ventas$mes, ventas$Ventas, main="Pronóstico de ventas de Leche Saborizada Hershey's 2020", xlab="Tiempo", ylab="Miles de dólares")
abline(regresion_ventas, col="blue")
points(siguiente_a$mes, prediccion_regresion, col="red")
predicciones_reales <- predict(regresion_ventas, ventas)
MAPE <- mean(abs((ventas$Ventas - predicciones_reales)/ventas$Ventas))*100
MAPE
## [1] 2.011297
El mejor modelo que se adapta a la serie es el SARIMA con un MAPE de 0.71%, comparado con la Regresión Lineal que es de 2.01%.
Para el siguiente año, la proyección de ventas es la siguiente:
| Mes y Año | Escenario Esperado | Escenario Pesimista | Escenario Optimista |
|---|---|---|---|
| Jan 2020 | 35498.90 | 34616.48 | 36381.32 |
| Feb 2020 | 34202.17 | 33155.28 | 35249.05 |
| Mar 2020 | 36703.01 | 35596.10 | 37809.92 |
| Apr 2020 | 36271.90 | 35141.44 | 37402.36 |
| May 2020 | 37121.98 | 35982.07 | 38261.90 |
| Jun 2020 | 37102.65 | 35958.90 | 38246.40 |
| Jul 2020 | 37151.04 | 36005.73 | 38296.34 |
| Aug 2020 | 38564.64 | 37418.70 | 39710.58 |
| Sep 2020 | 38755.22 | 37609.03 | 39901.42 |
| Oct 2020 | 39779.02 | 38632.72 | 40925.32 |
| Nov 2020 | 38741.63 | 37595.28 | 39887.97 |
| Dec 2020 | 38645.86 | 37499.50 | 39792.22 |
ventas_por_anio <- read.csv("/Users/lishdz/Desktop/8vo/periodo 1/modulo 1 - r/ventas_por_anio.csv")
ggplot(ventas_por_anio, aes(x=mes, y= ventas, col=as.factor(anio), group=anio)) +
geom_line() +
labs(title = "Ventas de Leche Saborizada Hershey's por Año", x="Mes", y="Miles de dólares")
Nuestra recomendación realizar campañas publicitarias para incitar el consumo del producto en el primer semestre del año.