#Exercise 17.14 (Open exercise: coffee)

#Cargar paquetes
library(bayesrules)
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.1     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(rstanarm)
## Loading required package: Rcpp
## This is rstanarm version 2.32.1
## - See https://mc-stan.org/rstanarm/articles/priors for changes to default priors!
## - Default priors may change, so it's safest to specify priors, even if equivalent to the defaults.
## - For execution on a local, multicore CPU with excess RAM we recommend calling
##   options(mc.cores = parallel::detectCores())
library(bayesplot)
## This is bayesplot version 1.11.1
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
##    * Does _not_ affect other ggplot2 plots
##    * See ?bayesplot_theme_set for details on theme setting
library(tidybayes)
library(broom.mixed)
library(corrplot)
## corrplot 0.95 loaded
library(patchwork)

#Cargar los datos
data(coffee_ratings_small)

#Renombrar datos
coffee <- coffee_ratings_small

#dimensiones de la base de datos
dim(coffee)
## [1] 636  11
#Summary de los datos
summary(coffee)
##                         farm_name   total_cup_points     aroma      
##  various                     : 47   Min.   :69.17    Min.   :5.080  
##  rio verde                   : 23   1st Qu.:81.42    1st Qu.:7.420  
##  several                     : 20   Median :82.75    Median :7.580  
##  finca medina                : 15   Mean   :82.17    Mean   :7.564  
##  doi tung development project: 13   3rd Qu.:83.67    3rd Qu.:7.750  
##  (Other)                     :159   Max.   :88.83    Max.   :8.580  
##  NA's                        :359                                   
##      flavor        aftertaste      acidity           body          balance    
##  Min.   :6.080   Min.   :6.17   Min.   :5.250   Min.   :5.250   Min.   :6.17  
##  1st Qu.:7.420   1st Qu.:7.25   1st Qu.:7.420   1st Qu.:7.420   1st Qu.:7.42  
##  Median :7.580   Median :7.50   Median :7.580   Median :7.580   Median :7.58  
##  Mean   :7.522   Mean   :7.41   Mean   :7.546   Mean   :7.549   Mean   :7.57  
##  3rd Qu.:7.750   3rd Qu.:7.67   3rd Qu.:7.750   3rd Qu.:7.750   3rd Qu.:7.75  
##  Max.   :8.500   Max.   :8.42   Max.   :8.500   Max.   :8.420   Max.   :8.75  
##                                                                               
##    uniformity     sweetness         moisture     
##  Min.   : 6.0   Min.   : 6.000   Min.   :0.0000  
##  1st Qu.:10.0   1st Qu.:10.000   1st Qu.:0.0000  
##  Median :10.0   Median :10.000   Median :0.1100  
##  Mean   : 9.8   Mean   : 9.846   Mean   :0.0781  
##  3rd Qu.:10.0   3rd Qu.:10.000   3rd Qu.:0.1100  
##  Max.   :10.0   Max.   :10.000   Max.   :0.2800  
## 
#nombre de las variables
coffee %>% 
  names()
##  [1] "farm_name"        "total_cup_points" "aroma"            "flavor"          
##  [5] "aftertaste"       "acidity"          "body"             "balance"         
##  [9] "uniformity"       "sweetness"        "moisture"
#observar primeras 6 filas de los datos
head(coffee)
## # A tibble: 6 × 11
##   farm_name total_cup_points aroma flavor aftertaste acidity  body balance
##   <fct>                <dbl> <dbl>  <dbl>      <dbl>   <dbl> <dbl>   <dbl>
## 1 <NA>                  88.8  8.58   8.42       8.42    8.5   8.25    8.33
## 2 <NA>                  88.8  8.42   8.5        8.33    8.5   8.25    8.25
## 3 <NA>                  87.3  8.17   8.33       8.25    8.33  8.42    8.33
## 4 several               87.2  8.08   8.25       8       8.17  8       8.33
## 5 <NA>                  87.1  8.42   8.17       7.92    8.17  8.33    8   
## 6 <NA>                  86.9  7.83   8.25       8.08    8.17  8.17    8.17
## # ℹ 3 more variables: uniformity <dbl>, sweetness <dbl>, moisture <dbl>
#Eliminamos filas con NAs
coffee <- coffee %>% na.omit()

#dimensiones de la base de datos una vez eliminadas filas con NAs
dim(coffee)
## [1] 277  11
#Estructura interna de la base de datos
str(coffee)
## tibble [277 × 11] (S3: tbl_df/tbl/data.frame)
##  $ farm_name       : Factor w/ 27 levels "agropecuaria quiagral",..: 26 22 14 17 26 26 17 27 14 27 ...
##  $ total_cup_points: num [1:277] 87.2 86.7 86.2 85.9 85.3 ...
##  $ aroma           : num [1:277] 8.08 8.17 8.25 7.92 7.92 8 8 8.17 7.5 7.83 ...
##  $ flavor          : num [1:277] 8.25 8.08 8.42 8.08 8.08 7.58 7.92 7.92 7.92 8 ...
##  $ aftertaste      : num [1:277] 8 8.08 8.08 7.92 8 7.58 7.75 7.83 8.25 7.83 ...
##  $ acidity         : num [1:277] 8.17 8 7.75 8.08 7.92 7.67 8 8 7.83 7.75 ...
##  $ body            : num [1:277] 8 8.08 7.67 8.08 8 8.08 7.92 7.58 7.92 7.67 ...
##  $ balance         : num [1:277] 8.33 8 7.83 7.83 8.08 8.5 7.83 7.83 7.75 7.83 ...
##  $ uniformity      : num [1:277] 10 10 10 10 10 10 10 10 10 10 ...
##  $ sweetness       : num [1:277] 10 10 10 10 9.33 10 10 10 10 10 ...
##  $ moisture        : num [1:277] 0.11 0.1 0 0.1 0 0.1 0.1 0.11 0 0.09 ...
##  - attr(*, "na.action")= 'omit' Named int [1:359] 1 2 3 5 6 8 9 10 12 14 ...
##   ..- attr(*, "names")= chr [1:359] "1" "2" "3" "5" ...
#Summary de los datos
summary(coffee)
##                         farm_name   total_cup_points     aroma      
##  various                     : 47   Min.   :69.17    Min.   :6.170  
##  rio verde                   : 23   1st Qu.:80.92    1st Qu.:7.420  
##  several                     : 20   Median :82.33    Median :7.500  
##  finca medina                : 15   Mean   :81.68    Mean   :7.508  
##  doi tung development project: 13   3rd Qu.:83.25    3rd Qu.:7.670  
##  fazenda capoeirnha          : 13   Max.   :87.17    Max.   :8.250  
##  (Other)                     :146                                   
##      flavor       aftertaste      acidity           body          balance     
##  Min.   :6.08   Min.   :6.17   Min.   :6.500   Min.   :6.330   Min.   :6.170  
##  1st Qu.:7.33   1st Qu.:7.17   1st Qu.:7.330   1st Qu.:7.330   1st Qu.:7.330  
##  Median :7.50   Median :7.33   Median :7.500   Median :7.500   Median :7.500  
##  Mean   :7.46   Mean   :7.31   Mean   :7.505   Mean   :7.481   Mean   :7.475  
##  3rd Qu.:7.67   3rd Qu.:7.50   3rd Qu.:7.670   3rd Qu.:7.670   3rd Qu.:7.670  
##  Max.   :8.42   Max.   :8.25   Max.   :8.420   Max.   :8.170   Max.   :8.580  
##                                                                               
##    uniformity      sweetness         moisture      
##  Min.   : 6.00   Min.   : 6.670   Min.   :0.00000  
##  1st Qu.:10.00   1st Qu.:10.000   1st Qu.:0.10000  
##  Median :10.00   Median :10.000   Median :0.11000  
##  Mean   : 9.81   Mean   : 9.842   Mean   :0.08783  
##  3rd Qu.:10.00   3rd Qu.:10.000   3rd Qu.:0.11000  
##  Max.   :10.00   Max.   :10.000   Max.   :0.28000  
## 
#Summary de los datos de coffee$total_cup_points
summary(coffee$total_cup_points)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##   69.17   80.92   82.33   81.68   83.25   87.17
#seleccionamos solo variables numéricas para ver correlaciones
coffee_num <- select(coffee,-farm_name)

#visualización de correlaciones
M <- cor(coffee_num)
corrplot(M, method = "number")

#Gráfico de barras para variable farm_name con conteo por categoría
ggplot(coffee) +
  geom_bar(aes(x = farm_name), fill = "blue") +
  xlab("farm_name") + ylab("Frecuencias")+ 
  theme(axis.text.x=element_text(angle=70,hjust=1,
                                 size=12,color="blue")) +
  labs(title="Frecuencias de observaciones por fincas de cultivo") 

#BOxplot de calificaciones por categorías de variable farm_name
ggplot(coffee, aes(x = farm_name, y = total_cup_points, fill =farm_name)) + 
  geom_boxplot() + 
  theme(legend.position='none' ,axis.text.x=element_text(angle=70,hjust=1,
                                 size=12,color="blue"))+
  labs(title="Distribuciones de calificaciones por fincas de cultivo") 

# MODELOS PARA LA VARIABLE DEPENDIENTE "total_cup_points"



#Comparar los distintos modelos agrupados completos (con 1 solo predictor)


#Modelo agrupado completo para total_cup_points ~ aroma
complete_pooled_model_1 <- stan_glm(
  total_cup_points ~  aroma, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000106 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.06 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.164 seconds (Warm-up)
## Chain 1:                0.252 seconds (Sampling)
## Chain 1:                0.416 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.3e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.163 seconds (Warm-up)
## Chain 2:                0.242 seconds (Sampling)
## Chain 2:                0.405 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.2e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.162 seconds (Warm-up)
## Chain 3:                0.264 seconds (Sampling)
## Chain 3:                0.426 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.2e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.164 seconds (Warm-up)
## Chain 4:                0.248 seconds (Sampling)
## Chain 4:                0.412 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ flavor
complete_pooled_model_2 <- stan_glm(
  total_cup_points ~  flavor, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.9e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.19 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.172 seconds (Warm-up)
## Chain 1:                0.255 seconds (Sampling)
## Chain 1:                0.427 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.3e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.173 seconds (Warm-up)
## Chain 2:                0.252 seconds (Sampling)
## Chain 2:                0.425 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.174 seconds (Warm-up)
## Chain 3:                0.251 seconds (Sampling)
## Chain 3:                0.425 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.8e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.164 seconds (Warm-up)
## Chain 4:                0.259 seconds (Sampling)
## Chain 4:                0.423 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ aftertaste
complete_pooled_model_3 <- stan_glm(
  total_cup_points ~  aftertaste, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.7e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.181 seconds (Warm-up)
## Chain 1:                0.244 seconds (Sampling)
## Chain 1:                0.425 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.2e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.175 seconds (Warm-up)
## Chain 2:                0.256 seconds (Sampling)
## Chain 2:                0.431 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.163 seconds (Warm-up)
## Chain 3:                0.267 seconds (Sampling)
## Chain 3:                0.43 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.2e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.178 seconds (Warm-up)
## Chain 4:                0.248 seconds (Sampling)
## Chain 4:                0.426 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ acidity
complete_pooled_model_4 <- stan_glm(
  total_cup_points ~  acidity, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.167 seconds (Warm-up)
## Chain 1:                0.259 seconds (Sampling)
## Chain 1:                0.426 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.8e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.167 seconds (Warm-up)
## Chain 2:                0.241 seconds (Sampling)
## Chain 2:                0.408 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.3e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.161 seconds (Warm-up)
## Chain 3:                0.268 seconds (Sampling)
## Chain 3:                0.429 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.2e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.165 seconds (Warm-up)
## Chain 4:                0.248 seconds (Sampling)
## Chain 4:                0.413 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ body
complete_pooled_model_5 <- stan_glm(
  total_cup_points ~  body, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.169 seconds (Warm-up)
## Chain 1:                0.256 seconds (Sampling)
## Chain 1:                0.425 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.1e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.167 seconds (Warm-up)
## Chain 2:                0.267 seconds (Sampling)
## Chain 2:                0.434 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.8e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.172 seconds (Warm-up)
## Chain 3:                0.249 seconds (Sampling)
## Chain 3:                0.421 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.2e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.167 seconds (Warm-up)
## Chain 4:                0.262 seconds (Sampling)
## Chain 4:                0.429 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ balance
complete_pooled_model_6 <- stan_glm(
  total_cup_points ~  balance, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.168 seconds (Warm-up)
## Chain 1:                0.252 seconds (Sampling)
## Chain 1:                0.42 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 2.5e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.25 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.181 seconds (Warm-up)
## Chain 2:                0.263 seconds (Sampling)
## Chain 2:                0.444 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.5e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.164 seconds (Warm-up)
## Chain 3:                0.251 seconds (Sampling)
## Chain 3:                0.415 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.171 seconds (Warm-up)
## Chain 4:                0.274 seconds (Sampling)
## Chain 4:                0.445 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ uniformity
complete_pooled_model_7 <- stan_glm(
  total_cup_points ~  uniformity, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.17 seconds (Warm-up)
## Chain 1:                0.251 seconds (Sampling)
## Chain 1:                0.421 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.2e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.178 seconds (Warm-up)
## Chain 2:                0.263 seconds (Sampling)
## Chain 2:                0.441 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.2e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.163 seconds (Warm-up)
## Chain 3:                0.266 seconds (Sampling)
## Chain 3:                0.429 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.2e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.167 seconds (Warm-up)
## Chain 4:                0.257 seconds (Sampling)
## Chain 4:                0.424 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ sweetness
complete_pooled_model_8 <- stan_glm(
  total_cup_points ~  sweetness, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.165 seconds (Warm-up)
## Chain 1:                0.25 seconds (Sampling)
## Chain 1:                0.415 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.4e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.172 seconds (Warm-up)
## Chain 2:                0.245 seconds (Sampling)
## Chain 2:                0.417 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.1e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.166 seconds (Warm-up)
## Chain 3:                0.256 seconds (Sampling)
## Chain 3:                0.422 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.2e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.164 seconds (Warm-up)
## Chain 4:                0.248 seconds (Sampling)
## Chain 4:                0.412 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ moisture
complete_pooled_model_9 <- stan_glm(
  total_cup_points ~ moisture, 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.167 seconds (Warm-up)
## Chain 1:                0.249 seconds (Sampling)
## Chain 1:                0.416 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 2.1e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.21 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.176 seconds (Warm-up)
## Chain 2:                0.256 seconds (Sampling)
## Chain 2:                0.432 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.3e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.165 seconds (Warm-up)
## Chain 3:                0.265 seconds (Sampling)
## Chain 3:                0.43 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.1e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.166 seconds (Warm-up)
## Chain 4:                0.267 seconds (Sampling)
## Chain 4:                0.433 seconds (Total)
## Chain 4:
#Comparar los modelos agrupados completos con expected log-predictive densities (ELPD)

elpd_1 <- loo(complete_pooled_model_1)
elpd_2 <- loo(complete_pooled_model_2)
elpd_3 <- loo(complete_pooled_model_3)
elpd_4 <- loo(complete_pooled_model_4)
elpd_5 <- loo(complete_pooled_model_5)
elpd_6 <- loo(complete_pooled_model_6)
elpd_7 <- loo(complete_pooled_model_7)
elpd_8 <- loo(complete_pooled_model_8)
elpd_9 <- loo(complete_pooled_model_9)

loo_compare(elpd_1 ,elpd_2,elpd_3,
            elpd_4,elpd_5,elpd_6,
            elpd_7,elpd_8,elpd_9)
##                         elpd_diff se_diff
## complete_pooled_model_3    0.0       0.0 
## complete_pooled_model_2   -2.6      10.7 
## complete_pooled_model_6   -7.9      15.6 
## complete_pooled_model_4  -40.2      13.9 
## complete_pooled_model_5  -42.1      12.9 
## complete_pooled_model_1  -43.9      16.2 
## complete_pooled_model_7  -75.5      25.7 
## complete_pooled_model_8 -126.8      17.4 
## complete_pooled_model_9 -146.9      15.4
# Modelos del mejor al peor:                |  Predictores de los modelos: 
#                          elpd_diff se_diff
# complete_pooled_model_3    0.0       0.0  |  aftertaste
# complete_pooled_model_2   -2.6      10.7  |  flavor
# complete_pooled_model_6   -7.9      15.6  |  balance
# complete_pooled_model_4  -40.2      13.9  |  acidity
# complete_pooled_model_5  -42.1      12.9  |  body
# complete_pooled_model_1  -43.9      16.2  |  aroma
# complete_pooled_model_7  -75.5      25.7  |  uniformity
# complete_pooled_model_8 -126.8      17.4  |  sweetness
# complete_pooled_model_9 -146.9      15.4  |  moisture


#Basandonos en el mejor modelo agrupado completo según expected log-predictive densities
#el predictor del mejor modelo es "aftertaste"

#Modelo agrupado completo complete_pooled_model_3 (con predictor "aftertaste")

#Summary de los datos de aftertaste
summary(coffee$aftertaste)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##    6.17    7.17    7.33    7.31    7.50    8.25
#Especificaciones del modelo a priori
prior_summary(complete_pooled_model_3)
## Priors for model 'complete_pooled_model_3' 
## ------
## Intercept (after predictors centered)
##   Specified prior:
##     ~ normal(location = 0, scale = 2.5)
##   Adjusted prior:
##     ~ normal(location = 0, scale = 6.8)
## 
## Coefficients
##   Specified prior:
##     ~ normal(location = 0, scale = 2.5)
##   Adjusted prior:
##     ~ normal(location = 0, scale = 18)
## 
## Auxiliary (sigma)
##   Specified prior:
##     ~ exponential(rate = 1)
##   Adjusted prior:
##     ~ exponential(rate = 0.37)
## ------
## See help('prior_summary.stanreg') for more details
#Summary a posteriori para complete_pooled_model_3
model_summary_3 <- tidy(complete_pooled_model_3, conf.int = TRUE, conf.level = 0.80)
model_summary_3
## # A tibble: 2 × 5
##   term        estimate std.error conf.low conf.high
##   <chr>          <dbl>     <dbl>    <dbl>     <dbl>
## 1 (Intercept)    39.0      1.87     36.6      41.4 
## 2 aftertaste      5.83     0.256     5.50      6.16
#Mediana a posteriori para complete_pooled_model_3
B0 <- model_summary_3$estimate[1]
B1 <- model_summary_3$estimate[2]
ggplot(coffee, aes(y = total_cup_points, x = aftertaste)) + 
  geom_point()+ 
  geom_abline(aes(intercept = B0, slope = B1),lwd=1,color="blue")+
  labs(title = "Mediana a posteriori de complete_pooled_model_3")

#Modelo jerárquico a priori con intersecciones variables y con el predictor "aftertaste"
#total_cup_points ~  aftertaste + (1 | farm_name)

#BOxplot de aftertastepor categorías de variable farm_name
ggplot(coffee, aes(x = farm_name, y = aftertaste, fill =farm_name)) + 
  geom_boxplot() + 
  theme(legend.position='none' ,axis.text.x=element_text(angle=70,hjust=1,
                                                         size=12,color="blue"))+
  labs(title="Distribuciones de aftertaste por fincas de cultivo") 

coffee_model_prior_3 <- stan_glmer(
total_cup_points ~  aftertaste + (1 | farm_name), 
data = coffee, family = gaussian,
prior_intercept = normal(82, 5, autoscale = TRUE),
prior = normal(6, 1, autoscale = TRUE), 
prior_aux = exponential(1, autoscale = TRUE),
prior_covariance = decov(reg = 1, conc = 1, shape = 1, scale = 1),
chains = 4, iter = 5000*2, seed = 841113, 
prior_PD = TRUE)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000637 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 6.37 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.473 seconds (Warm-up)
## Chain 1:                0.528 seconds (Sampling)
## Chain 1:                1.001 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1.7e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.516 seconds (Warm-up)
## Chain 2:                0.522 seconds (Sampling)
## Chain 2:                1.038 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.5e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.478 seconds (Warm-up)
## Chain 3:                0.547 seconds (Sampling)
## Chain 3:                1.025 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 2.4e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.24 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.551 seconds (Warm-up)
## Chain 4:                0.54 seconds (Sampling)
## Chain 4:                1.091 seconds (Total)
## Chain 4:
#Especificaciones del modelo a priori
prior_summary(coffee_model_prior_3 )
## Priors for model 'coffee_model_prior_3' 
## ------
## Intercept (after predictors centered)
##   Specified prior:
##     ~ normal(location = 82, scale = 5)
##   Adjusted prior:
##     ~ normal(location = 82, scale = 14)
## 
## Coefficients
##   Specified prior:
##     ~ normal(location = 6, scale = 1)
##   Adjusted prior:
##     ~ normal(location = 6, scale = 7.2)
## 
## Auxiliary (sigma)
##   Specified prior:
##     ~ exponential(rate = 1)
##   Adjusted prior:
##     ~ exponential(rate = 0.37)
## 
## Covariance
##  ~ decov(reg. = 1, conc. = 1, shape = 1, scale = 1)
## ------
## See help('prior_summary.stanreg') for more details
#4 conjuntos plausibles a priori de 27 relaciones específicas de lugares de cultivo de café ("farm_name")
#"total_cup_points" y "aftertaste", β0j+β1X
set.seed(841113)
coffee %>% 
  add_epred_draws(coffee_model_prior_3 , ndraws = 4) %>%
  ggplot(aes(x = aftertaste , y =  total_cup_points)) +
  geom_line(aes(y = .epred, group = paste( farm_name, .draw))) + 
  facet_wrap(~ .draw)+
  labs(title="4 conjuntos de simulaciones a priori para  β0j+β1X") 

#Simulación a posteriori para cofee_model_prior_3 
coffee_model_posterior_3 <- update(coffee_model_prior_3 , prior_PD = FALSE)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.002115 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 21.15 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 5.793 seconds (Warm-up)
## Chain 1:                1.408 seconds (Sampling)
## Chain 1:                7.201 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 3.4e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.34 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 3.073 seconds (Warm-up)
## Chain 2:                1.48 seconds (Sampling)
## Chain 2:                4.553 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 4e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.4 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 4.551 seconds (Warm-up)
## Chain 3:                1.412 seconds (Sampling)
## Chain 3:                5.963 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 4e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.4 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 3.796 seconds (Warm-up)
## Chain 4:                1.464 seconds (Sampling)
## Chain 4:                5.26 seconds (Total)
## Chain 4:
#Diagnósticos de las simulaciones con cadenas de Markov
mcmc_trace(coffee_model_posterior_3)

mcmc_dens_overlay(coffee_model_posterior_3)

mcmc_acf(coffee_model_posterior_3)

data.frame(neff_ratio(coffee_model_posterior_3))
##                                                           neff_ratio.coffee_model_posterior_3.
## (Intercept)                                                                            1.53410
## aftertaste                                                                             1.55565
## b[(Intercept) farm_name:agropecuaria_quiagral]                                         1.50830
## b[(Intercept) farm_name:apollo_estate]                                                 1.65640
## b[(Intercept) farm_name:bethel]                                                        1.50190
## b[(Intercept) farm_name:capoeirinha]                                                   1.44635
## b[(Intercept) farm_name:cerro_bueno]                                                   0.43395
## b[(Intercept) farm_name:conquista_/_morito]                                            1.12700
## b[(Intercept) farm_name:doi_tung_development_project]                                  1.25935
## b[(Intercept) farm_name:el_morito]                                                     1.53555
## b[(Intercept) farm_name:el_papaturro]                                                  0.91770
## b[(Intercept) farm_name:el_sacramento]                                                 1.01975
## b[(Intercept) farm_name:fazenda_capoeirnha]                                            1.29340
## b[(Intercept) farm_name:finca_medina]                                                  0.99615
## b[(Intercept) farm_name:gamboa]                                                        1.62515
## b[(Intercept) farm_name:kona_pacific_farmers_cooperative]                              1.54030
## b[(Intercept) farm_name:la_esmeralda]                                                  1.59205
## b[(Intercept) farm_name:la_esperanza]                                                  1.62295
## b[(Intercept) farm_name:la_esperanza_y_anexos]                                         0.99880
## b[(Intercept) farm_name:la_union_monte_verde]                                          1.65675
## b[(Intercept) farm_name:las_cuchillas]                                                 1.43525
## b[(Intercept) farm_name:las_delicias]                                                  1.69350
## b[(Intercept) farm_name:las_merceditas]                                                1.68715
## b[(Intercept) farm_name:los_hicaques]                                                  1.28645
## b[(Intercept) farm_name:piamonte]                                                      1.35435
## b[(Intercept) farm_name:rio_verde]                                                     0.98865
## b[(Intercept) farm_name:sethuraman_estates]                                            0.53045
## b[(Intercept) farm_name:several]                                                       0.78325
## b[(Intercept) farm_name:various]                                                       0.65790
## sigma                                                                                  0.94670
## Sigma[farm_name:(Intercept),(Intercept)]                                               0.36325
data.frame(rhat(coffee_model_posterior_3))
##                                                           rhat.coffee_model_posterior_3.
## (Intercept)                                                                    0.9999138
## aftertaste                                                                     0.9998900
## b[(Intercept) farm_name:agropecuaria_quiagral]                                 0.9998802
## b[(Intercept) farm_name:apollo_estate]                                         0.9999322
## b[(Intercept) farm_name:bethel]                                                0.9999660
## b[(Intercept) farm_name:capoeirinha]                                           0.9999900
## b[(Intercept) farm_name:cerro_bueno]                                           1.0001841
## b[(Intercept) farm_name:conquista_/_morito]                                    0.9999974
## b[(Intercept) farm_name:doi_tung_development_project]                          0.9999730
## b[(Intercept) farm_name:el_morito]                                             0.9999830
## b[(Intercept) farm_name:el_papaturro]                                          1.0000647
## b[(Intercept) farm_name:el_sacramento]                                         0.9999229
## b[(Intercept) farm_name:fazenda_capoeirnha]                                    0.9999578
## b[(Intercept) farm_name:finca_medina]                                          1.0004109
## b[(Intercept) farm_name:gamboa]                                                0.9999580
## b[(Intercept) farm_name:kona_pacific_farmers_cooperative]                      0.9998622
## b[(Intercept) farm_name:la_esmeralda]                                          0.9999028
## b[(Intercept) farm_name:la_esperanza]                                          0.9999639
## b[(Intercept) farm_name:la_esperanza_y_anexos]                                 1.0000068
## b[(Intercept) farm_name:la_union_monte_verde]                                  0.9999269
## b[(Intercept) farm_name:las_cuchillas]                                         1.0000665
## b[(Intercept) farm_name:las_delicias]                                          0.9999086
## b[(Intercept) farm_name:las_merceditas]                                        0.9999593
## b[(Intercept) farm_name:los_hicaques]                                          0.9999991
## b[(Intercept) farm_name:piamonte]                                              0.9999327
## b[(Intercept) farm_name:rio_verde]                                             1.0002849
## b[(Intercept) farm_name:sethuraman_estates]                                    0.9999940
## b[(Intercept) farm_name:several]                                               0.9999409
## b[(Intercept) farm_name:various]                                               1.0001570
## sigma                                                                          0.9999741
## Sigma[farm_name:(Intercept),(Intercept)]                                       1.0002063
#Summary a posteriori para cofee_model_posterior_3
tidy_summary_3 <- tidy(coffee_model_posterior_3, effects = "fixed",
                      conf.int = TRUE, conf.level = 0.80)

tidy_summary_3
## # A tibble: 2 × 5
##   term        estimate std.error conf.low conf.high
##   <chr>          <dbl>     <dbl>    <dbl>     <dbl>
## 1 (Intercept)    39.4      1.95     36.9      41.9 
## 2 aftertaste      5.79     0.265     5.46      6.13
ggplot(coffee, aes(x = aftertaste, y = total_cup_points)) + 
         geom_point() + 
         facet_wrap(~ farm_name)

#200 Líneas de modelo global plausibles a posteriori, β0+β1X, para la relación entre 
##"total_cup_points" y "aftertaste"
B0 <- tidy_summary_3$estimate[1]
B1 <- tidy_summary_3$estimate[2]
coffee %>%
  add_epred_draws(coffee_model_posterior_3, ndraws = 200, re_formula = NA) %>%
  ggplot(aes(x = aftertaste, y = total_cup_points)) +
  geom_line(aes(y = .epred, group = .draw), alpha = 0.1) +
  geom_abline(intercept = B0, slope = B1, color = "blue",lwd = 1) +
  lims(y = c(78, 85))+
  labs(title="200 líneas de modelo global plausibles a posteriori β0+β1X") 
## Warning: Removed 5875 rows containing missing values or values outside the scale range
## (`geom_line()`).

#Gráficos de densidad de 100 conjuntos plausibles a priori de "total_cup_points" 
set.seed(841113)
coffee %>%
  add_predicted_draws(coffee_model_prior_3, ndraws = 100) %>%
  ggplot(aes(x = total_cup_points)) +
  geom_density(aes(x = .prediction, group = .draw)) +
  xlim(20,120)+
  labs(title="Densidad de 100 conjuntos plausibles a priori") +
#Gráficos de densidad de 100 conjuntos plausibles a posteriori de "total_cup_points" 
coffee %>%
  add_predicted_draws(coffee_model_posterior_3, ndraws = 100) %>%
  ggplot(aes(x = total_cup_points)) +
  geom_density(aes(x = .prediction, group = .draw)) +
  xlim(65,95) +
  labs(title="Densidad de 100 conjuntos plausibles a posteriori") 
## Warning: Removed 167 rows containing non-finite outside the scale range
## (`stat_density()`).

#Posterior summaries of farm_name-specific intercepts
coffee_summaries_3 <- coffee_model_posterior_3 %>%
  spread_draws(`(Intercept)`, b[,farm_name]) %>% 
  mutate(farm_name_intercept = `(Intercept)` + b) %>% 
  select(-`(Intercept)`, -b) %>% 
  median_qi(.width = 0.80) %>% 
  select(farm_name, farm_name_intercept, .lower, .upper)

head(coffee_summaries_3)
## # A tibble: 6 × 4
##   farm_name                       farm_name_intercept .lower .upper
##   <chr>                                         <dbl>  <dbl>  <dbl>
## 1 farm_name:agropecuaria_quiagral                39.3   36.8   41.8
## 2 farm_name:apollo_estate                        39.5   36.9   42.0
## 3 farm_name:bethel                               39.5   37.0   42.0
## 4 farm_name:capoeirinha                          39.4   36.8   41.9
## 5 farm_name:cerro_bueno                          38.4   35.9   40.9
## 6 farm_name:conquista_/_morito                   39.7   37.2   42.0
#100 modelos a priori plausibles para farm_name: "piamonte","cerro bueno"y"various"
set.seed(841113)
coffee %>%
  filter(farm_name %in% c("piamonte","cerro bueno","various")) %>% 
  add_epred_draws(coffee_model_prior_3 , ndraws = 100) %>%
  ggplot(aes(x = aftertaste, y = total_cup_points)) +
  geom_line(
    aes(y = .epred, group = paste(farm_name, .draw), color = farm_name),
    alpha = 0.1,lwd = 0.8) + 
  geom_point(aes(color = farm_name)) +
  labs(title="100 modelos específicos plausibles a priori β0j+β1X") +
#100 modelos a posteriori plausibles para farm_name: "piamonte","cerro bueno"y"various"
  coffee %>%
  filter(farm_name %in% c("piamonte","cerro bueno","various")) %>% 
  add_epred_draws(coffee_model_posterior_3, ndraws = 100) %>%
  ggplot(aes(x = aftertaste, y = total_cup_points)) +
  geom_line(
    aes(y = .epred, group = paste(farm_name, .draw), color = farm_name),
    alpha = 0.1,lwd = 0.8) +
  geom_point(aes(color = farm_name))+
labs(title="100 modelos específicos plausibles a posteriori β0j+β1X") 

# Plot farm_name-specific models with the global model
ggplot(coffee, aes(y = total_cup_points, x = aftertaste, group = farm_name)) + 
  geom_abline(data = coffee_summaries_3, color = "grey",lwd=1.5,
              aes(intercept = farm_name_intercept, slope = B1)) + 
  geom_abline(intercept = B0, slope = B1, color = "blue",lwd=1.5) + 
  lims(x = c(6, 8), y = c(72, 85)) +
  labs(title="Modelos de mediana a posteriori (Específicos vs Global)")

#Análisis a posteriori de variabilidad dentro y entre grupos

tidy_sigma_3 <- tidy(coffee_model_posterior_3, effects = "ran_pars")
tidy_sigma_3
## # A tibble: 2 × 3
##   term                     group     estimate
##   <chr>                    <chr>        <dbl>
## 1 sd_(Intercept).farm_name farm_name    0.535
## 2 sd_Observation.Residual  Residual     1.54
sigma_0 <- tidy_sigma_3[1,3]
sigma_y <- tidy_sigma_3[2,3]

sigma_0^2 / (sigma_0^2 + sigma_y^2)
##   estimate
## 1 0.107375
sigma_y^2 / (sigma_0^2 + sigma_y^2)
##   estimate
## 1 0.892625
#variabilidad toal
sigma_0^2 + sigma_y^2
##   estimate
## 1 2.664099
#Modelo jerárquico con interceptos y pendientes variables

# Plot farm_name-specific models in the data
coffee %>% 
  filter(farm_name %in% c("piamonte","various",
                          "rio verde","las merceditas")) %>% 
  ggplot(., aes(x = aftertaste, y = total_cup_points)) + 
  geom_point() + 
  geom_smooth(method = "lm", se = FALSE) + 
  facet_grid(~ farm_name)+
  labs(title="Tendencias en 4 fincas para calificaciones en función de aftertaste")
## `geom_smooth()` using formula = 'y ~ x'

#Plot de los modelos de regresóion lineal para los 27 niveles de "farm_name"
ggplot(coffee, aes(x = aftertaste, y = total_cup_points, group = farm_name)) + 
  geom_smooth(method = "lm", se = FALSE, size = 1)+
  labs(title="Tendencias en las 27 fincas para calificaciones en función de aftertaste")
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## `geom_smooth()` using formula = 'y ~ x'

#Modelo jerárquico a priori con intersecciones y pendientes variables y con el predictor "aftertaste"
#total_cup_points ~  aftertaste + (aftertaste | farm_name)
coffee_model_prior_3_1 <- stan_glmer(
  total_cup_points ~  aftertaste + (aftertaste | farm_name), 
  data = coffee, family = gaussian,
  prior_intercept = normal(82, 5, autoscale = TRUE),
  prior = normal(6, 1, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  prior_covariance = decov(reg = 1, conc = 1, shape = 1, scale = 1),
  chains = 4, iter = 5000*2, seed = 841113, 
  prior_PD = TRUE, adapt_delta = 0.99999)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.001098 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 10.98 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 8.757 seconds (Warm-up)
## Chain 1:                10.854 seconds (Sampling)
## Chain 1:                19.611 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 5e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.5 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 8.525 seconds (Warm-up)
## Chain 2:                9.095 seconds (Sampling)
## Chain 2:                17.62 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 4.5e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.45 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 7.554 seconds (Warm-up)
## Chain 3:                9.013 seconds (Sampling)
## Chain 3:                16.567 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 4.3e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.43 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 9.761 seconds (Warm-up)
## Chain 4:                9.646 seconds (Sampling)
## Chain 4:                19.407 seconds (Total)
## Chain 4:
#Especificaciones del modelo a priori
prior_summary(coffee_model_prior_3_1)
## Priors for model 'coffee_model_prior_3_1' 
## ------
## Intercept (after predictors centered)
##   Specified prior:
##     ~ normal(location = 82, scale = 5)
##   Adjusted prior:
##     ~ normal(location = 82, scale = 14)
## 
## Coefficients
##   Specified prior:
##     ~ normal(location = 6, scale = 1)
##   Adjusted prior:
##     ~ normal(location = 6, scale = 7.2)
## 
## Auxiliary (sigma)
##   Specified prior:
##     ~ exponential(rate = 1)
##   Adjusted prior:
##     ~ exponential(rate = 0.37)
## 
## Covariance
##  ~ decov(reg. = 1, conc. = 1, shape = 1, scale = 1)
## ------
## See help('prior_summary.stanreg') for more details
#4 conjuntos plausibles a priori de 27 relaciones específicas de lugares de cultivo de café ("farm_name")
#"total_cup_points" y "aftertaste", β0j+β1jX
set.seed(841113)
coffee %>% 
  add_epred_draws(coffee_model_prior_3_1  , ndraws = 4) %>%
  ggplot(aes(x = aftertaste , y =  total_cup_points)) +
  geom_line(aes(y = .epred, group = paste(farm_name, .draw))) + 
  facet_wrap(~ .draw)+
  labs(title="4 conjuntos de simulaciones a priori para  β0j+β1jX") 

#Simulación a posteriori 
coffee_model_posterior_3_1 <- update(coffee_model_prior_3_1 , prior_PD = FALSE)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000108 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.08 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 353.768 seconds (Warm-up)
## Chain 1:                588.593 seconds (Sampling)
## Chain 1:                942.361 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 0.000674 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 6.74 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 374.705 seconds (Warm-up)
## Chain 2:                289.612 seconds (Sampling)
## Chain 2:                664.317 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 6.3e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.63 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 285.746 seconds (Warm-up)
## Chain 3:                259.693 seconds (Sampling)
## Chain 3:                545.439 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 8e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.8 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 283.455 seconds (Warm-up)
## Chain 4:                136.517 seconds (Sampling)
## Chain 4:                419.972 seconds (Total)
## Chain 4:
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
#Diagnósticos de las simulaciones con cadenas de Markov
mcmc_trace(coffee_model_posterior_3_1)

mcmc_dens_overlay(coffee_model_posterior_3_1)

mcmc_acf(coffee_model_posterior_3_1)

data.frame(neff_ratio(coffee_model_posterior_3_1))
##                                                           neff_ratio.coffee_model_posterior_3_1.
## (Intercept)                                                                              0.08355
## aftertaste                                                                               0.08450
## b[(Intercept) farm_name:agropecuaria_quiagral]                                           0.17385
## b[aftertaste farm_name:agropecuaria_quiagral]                                            0.17630
## b[(Intercept) farm_name:apollo_estate]                                                   1.24210
## b[aftertaste farm_name:apollo_estate]                                                    1.23150
## b[(Intercept) farm_name:bethel]                                                          1.51200
## b[aftertaste farm_name:bethel]                                                           1.49360
## b[(Intercept) farm_name:capoeirinha]                                                     1.65595
## b[aftertaste farm_name:capoeirinha]                                                      1.63475
## b[(Intercept) farm_name:cerro_bueno]                                                     0.01910
## b[aftertaste farm_name:cerro_bueno]                                                      0.01955
## b[(Intercept) farm_name:conquista_/_morito]                                              0.82440
## b[aftertaste farm_name:conquista_/_morito]                                               0.82825
## b[(Intercept) farm_name:doi_tung_development_project]                                    0.80305
## b[aftertaste farm_name:doi_tung_development_project]                                     0.81085
## b[(Intercept) farm_name:el_morito]                                                       0.94205
## b[aftertaste farm_name:el_morito]                                                        0.92745
## b[(Intercept) farm_name:el_papaturro]                                                    1.27275
## b[aftertaste farm_name:el_papaturro]                                                     1.24550
## b[(Intercept) farm_name:el_sacramento]                                                   1.30920
## b[aftertaste farm_name:el_sacramento]                                                    1.26695
## b[(Intercept) farm_name:fazenda_capoeirnha]                                              1.35395
## b[aftertaste farm_name:fazenda_capoeirnha]                                               1.34130
## b[(Intercept) farm_name:finca_medina]                                                    0.21620
## b[aftertaste farm_name:finca_medina]                                                     0.21610
## b[(Intercept) farm_name:gamboa]                                                          0.80255
## b[aftertaste farm_name:gamboa]                                                           0.80400
## b[(Intercept) farm_name:kona_pacific_farmers_cooperative]                                0.29245
## b[aftertaste farm_name:kona_pacific_farmers_cooperative]                                 0.30085
## b[(Intercept) farm_name:la_esmeralda]                                                    1.45890
## b[aftertaste farm_name:la_esmeralda]                                                     1.44190
## b[(Intercept) farm_name:la_esperanza]                                                    1.45685
## b[aftertaste farm_name:la_esperanza]                                                     1.43965
## b[(Intercept) farm_name:la_esperanza_y_anexos]                                           1.24055
## b[aftertaste farm_name:la_esperanza_y_anexos]                                            1.22040
## b[(Intercept) farm_name:la_union_monte_verde]                                            1.06465
## b[aftertaste farm_name:la_union_monte_verde]                                             1.04765
## b[(Intercept) farm_name:las_cuchillas]                                                   1.21755
## b[aftertaste farm_name:las_cuchillas]                                                    1.21505
## b[(Intercept) farm_name:las_delicias]                                                    1.47430
## b[aftertaste farm_name:las_delicias]                                                     1.45640
## b[(Intercept) farm_name:las_merceditas]                                                  1.00705
## b[aftertaste farm_name:las_merceditas]                                                   0.99025
## b[(Intercept) farm_name:los_hicaques]                                                    1.46760
## b[aftertaste farm_name:los_hicaques]                                                     1.46075
## b[(Intercept) farm_name:piamonte]                                                        1.59130
## b[aftertaste farm_name:piamonte]                                                         1.56395
## b[(Intercept) farm_name:rio_verde]                                                       0.08155
## b[aftertaste farm_name:rio_verde]                                                        0.08150
## b[(Intercept) farm_name:sethuraman_estates]                                              0.94650
## b[aftertaste farm_name:sethuraman_estates]                                               0.92155
## b[(Intercept) farm_name:several]                                                         0.30650
## b[aftertaste farm_name:several]                                                          0.30005
## b[(Intercept) farm_name:various]                                                         0.03515
## b[aftertaste farm_name:various]                                                          0.03540
## sigma                                                                                    0.06115
## Sigma[farm_name:(Intercept),(Intercept)]                                                 0.04860
## Sigma[farm_name:aftertaste,(Intercept)]                                                  0.04995
## Sigma[farm_name:aftertaste,aftertaste]                                                   0.05155
data.frame(rhat(coffee_model_posterior_3_1))
##                                                           rhat.coffee_model_posterior_3_1.
## (Intercept)                                                                      1.0031463
## aftertaste                                                                       1.0031081
## b[(Intercept) farm_name:agropecuaria_quiagral]                                   1.0016654
## b[aftertaste farm_name:agropecuaria_quiagral]                                    1.0016230
## b[(Intercept) farm_name:apollo_estate]                                           1.0001022
## b[aftertaste farm_name:apollo_estate]                                            1.0001042
## b[(Intercept) farm_name:bethel]                                                  0.9999537
## b[aftertaste farm_name:bethel]                                                   0.9999550
## b[(Intercept) farm_name:capoeirinha]                                             1.0000168
## b[aftertaste farm_name:capoeirinha]                                              1.0000150
## b[(Intercept) farm_name:cerro_bueno]                                             1.0150191
## b[aftertaste farm_name:cerro_bueno]                                              1.0147115
## b[(Intercept) farm_name:conquista_/_morito]                                      1.0002048
## b[aftertaste farm_name:conquista_/_morito]                                       1.0002596
## b[(Intercept) farm_name:doi_tung_development_project]                            1.0003190
## b[aftertaste farm_name:doi_tung_development_project]                             1.0003211
## b[(Intercept) farm_name:el_morito]                                               1.0004649
## b[aftertaste farm_name:el_morito]                                                1.0004815
## b[(Intercept) farm_name:el_papaturro]                                            0.9998922
## b[aftertaste farm_name:el_papaturro]                                             0.9998938
## b[(Intercept) farm_name:el_sacramento]                                           0.9999240
## b[aftertaste farm_name:el_sacramento]                                            0.9999285
## b[(Intercept) farm_name:fazenda_capoeirnha]                                      1.0000284
## b[aftertaste farm_name:fazenda_capoeirnha]                                       1.0000313
## b[(Intercept) farm_name:finca_medina]                                            1.0016078
## b[aftertaste farm_name:finca_medina]                                             1.0016217
## b[(Intercept) farm_name:gamboa]                                                  1.0006049
## b[aftertaste farm_name:gamboa]                                                   1.0006036
## b[(Intercept) farm_name:kona_pacific_farmers_cooperative]                        1.0015566
## b[aftertaste farm_name:kona_pacific_farmers_cooperative]                         1.0015180
## b[(Intercept) farm_name:la_esmeralda]                                            0.9999062
## b[aftertaste farm_name:la_esmeralda]                                             0.9999049
## b[(Intercept) farm_name:la_esperanza]                                            0.9998731
## b[aftertaste farm_name:la_esperanza]                                             0.9998694
## b[(Intercept) farm_name:la_esperanza_y_anexos]                                   0.9999362
## b[aftertaste farm_name:la_esperanza_y_anexos]                                    0.9999334
## b[(Intercept) farm_name:la_union_monte_verde]                                    1.0002687
## b[aftertaste farm_name:la_union_monte_verde]                                     1.0002632
## b[(Intercept) farm_name:las_cuchillas]                                           1.0001756
## b[aftertaste farm_name:las_cuchillas]                                            1.0001756
## b[(Intercept) farm_name:las_delicias]                                            0.9999374
## b[aftertaste farm_name:las_delicias]                                             0.9999386
## b[(Intercept) farm_name:las_merceditas]                                          1.0006268
## b[aftertaste farm_name:las_merceditas]                                           1.0006413
## b[(Intercept) farm_name:los_hicaques]                                            0.9998826
## b[aftertaste farm_name:los_hicaques]                                             0.9998821
## b[(Intercept) farm_name:piamonte]                                                1.0000421
## b[aftertaste farm_name:piamonte]                                                 1.0000463
## b[(Intercept) farm_name:rio_verde]                                               1.0037116
## b[aftertaste farm_name:rio_verde]                                                1.0037249
## b[(Intercept) farm_name:sethuraman_estates]                                      1.0002453
## b[aftertaste farm_name:sethuraman_estates]                                       1.0002573
## b[(Intercept) farm_name:several]                                                 1.0004302
## b[aftertaste farm_name:several]                                                  1.0004442
## b[(Intercept) farm_name:various]                                                 1.0066899
## b[aftertaste farm_name:various]                                                  1.0066565
## sigma                                                                            1.0045027
## Sigma[farm_name:(Intercept),(Intercept)]                                         1.0066844
## Sigma[farm_name:aftertaste,(Intercept)]                                          1.0065468
## Sigma[farm_name:aftertaste,aftertaste]                                           1.0063988
#Summary de los parámetros globales de regresión
tidy_summary_3_1 <- tidy(coffee_model_posterior_3_1, effects = "fixed",
                         conf.int = TRUE, conf.level = 0.80)
tidy_summary_3_1 
## # A tibble: 2 × 5
##   term        estimate std.error conf.low conf.high
##   <chr>          <dbl>     <dbl>    <dbl>     <dbl>
## 1 (Intercept)    43.6      3.16     39.6      47.8 
## 2 aftertaste      5.22     0.426     4.66      5.77
#200 Líneas de modelo global plausibles a posteriori, β0+β1X, para la relación entre 
##"total_cup_points" y "aftertaste"
B0 <- tidy_summary_3_1$estimate[1]
B1 <- tidy_summary_3_1$estimate[2]
coffee %>%
  add_epred_draws(coffee_model_posterior_3_1, ndraws = 200, re_formula = NA) %>%
  ggplot(aes(x = aftertaste, y = total_cup_points)) +
  geom_line(aes(y = .epred, group = .draw), alpha = 0.1) +
  geom_abline(intercept = B0, slope = B1, color = "blue",lwd = 1) +
  lims(y = c(78, 85))+
  labs(title="200 líneas de modelo global plausibles a posteriori β0+β1X") 
## Warning: Removed 5136 rows containing missing values or values outside the scale range
## (`geom_line()`).

#Gráficos de densidad de 100 conjuntos plausibles a priori de "total_cup_points" 
set.seed(841113)
coffee %>%
  add_predicted_draws(coffee_model_prior_3_1 , ndraws = 100) %>%
  ggplot(aes(x = total_cup_points)) +
  geom_density(aes(x = .prediction, group = .draw)) +
  xlim(30,120)+
  labs(title="Densidad de 100 conjuntos plausibles a priori") +
#Gráficos de densidad de 100 conjuntos plausibles a posteriori de "total_cup_points" 
coffee %>%
  add_predicted_draws(coffee_model_posterior_3_1 , ndraws = 100) %>%
  ggplot(aes(x = total_cup_points)) +
  geom_density(aes(x = .prediction, group = .draw)) +
  xlim(65,95)+
  labs(title="Densidad de 100 conjuntos plausibles a posteriori")
## Warning: Removed 2772 rows containing non-finite outside the scale range
## (`stat_density()`).

# Get MCMC chains for the farm_name-specific intercepts & slopes
coffee_chains_3_1 <- coffee_model_posterior_3_1  %>%
  spread_draws(`(Intercept)`, b[term, farm_name], `aftertaste`) %>% 
  pivot_wider(names_from = term, names_glue = "b_{term}",
              values_from = b) %>% 
  mutate(farm_name_intercept = `(Intercept)` + `b_(Intercept)`,
         farm_name_aftertaste = aftertaste + b_aftertaste)

# Posterior medians of farm_name-specific models
coffee_summaries_3_1  <- coffee_chains_3_1 %>% 
  group_by(farm_name) %>% 
  summarize(farm_name_intercept = median(farm_name_intercept),
            farm_name_aftertaste = median(farm_name_aftertaste))

head(coffee_summaries_3_1)
## # A tibble: 6 × 3
##   farm_name                       farm_name_intercept farm_name_aftertaste
##   <chr>                                         <dbl>                <dbl>
## 1 farm_name:agropecuaria_quiagral                37.2                 6.07
## 2 farm_name:apollo_estate                        45.9                 4.93
## 3 farm_name:bethel                               44.3                 5.15
## 4 farm_name:capoeirinha                          44.2                 5.15
## 5 farm_name:cerro_bueno                          19.9                 8.35
## 6 farm_name:conquista_/_morito                   45.1                 5.02
#100 modelos a priori plausibles para farm_name: "piamonte","cerro bueno"y"various"
set.seed(841113)
coffee %>%
  filter(farm_name %in% c("piamonte","cerro bueno","various")) %>% 
  add_epred_draws(coffee_model_prior_3_1, ndraws = 100) %>%
  ggplot(aes(x = aftertaste, y = total_cup_points)) +
  geom_line(
    aes(y = .epred, group = paste(farm_name, .draw), color = farm_name),
    alpha = 0.1,lwd = 0.8) +
  geom_point(aes(color = farm_name)) +
  labs(title="100 modelos específicos plausibles a priori β0j+β1jX") +
#100 modelos a posteriori plausibles para farm_name: "piamonte","cerro bueno"y"various"
coffee %>%
  filter(farm_name %in% c("piamonte","cerro bueno","various")) %>% 
  add_epred_draws(coffee_model_posterior_3_1, ndraws = 100) %>%
  ggplot(aes(x = aftertaste, y = total_cup_points)) +
  geom_line(
    aes(y = .epred, group = paste(farm_name, .draw), color = farm_name),
    alpha = 0.1,lwd = 0.8) +
  geom_point(aes(color = farm_name)) +
  labs(title="100 modelos específicos plausibles a posteriori β0j+β1jX")

# Modelos de mediana a posteriori para los 27 niveles de "farm_name", calculados a 
#partir del modelo jerárquico de pendientes e intersecciones aleatorias
ggplot(coffee, aes(y = total_cup_points, x = aftertaste, group = farm_name)) + 
  geom_abline(data = coffee_summaries_3_1 , color = "gray",lwd=1,
              aes(intercept = farm_name_intercept, slope = farm_name_aftertaste)) + 
  lims(x = c(4, 10), y = c(70, 90))+
  labs(title="Modelos de mediana a posteriori (Específicos)") 

#Análisis a posteriori de variabilidad dentro y entre grupos
tidy(coffee_model_posterior_3_1 , effects = "ran_pars")
## # A tibble: 4 × 3
##   term                                 group     estimate
##   <chr>                                <chr>        <dbl>
## 1 sd_(Intercept).farm_name             farm_name    8.79 
## 2 sd_aftertaste.farm_name              farm_name    1.17 
## 3 cor_(Intercept).aftertaste.farm_name farm_name   -0.999
## 4 sd_Observation.Residual              Residual     1.44
#Comparación para los 3 modelos de un predictor (aftertaste)

#Comparar visualmente con pp_check los modelos cofee_model_posterior_3_1,
#complete_pooled_model_11,cofee_model_posterior_11 y cofee_model_posterior_11_1
pp_check(complete_pooled_model_3)+
  labs(x = "total_cup_points", title = "complete_pooled_model_3")+
pp_check(coffee_model_posterior_3)+
  labs(x = "total_cup_points", title = "coffee_model_posterior_3")+
pp_check(coffee_model_posterior_3_1)+
  labs(x = "total_cup_points", title = "coffee_model_posterior_3_1")

#Nuevos modelos agrupados completos con más de un predictor


#Modelo agrupado completo para total_cup_points ~ aftertaste + flavor
complete_pooled_model_10 <- stan_glm(
  total_cup_points ~  aftertaste + flavor , 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.002267 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 22.67 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.465 seconds (Warm-up)
## Chain 1:                0.566 seconds (Sampling)
## Chain 1:                1.031 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 2.1e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.21 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.42 seconds (Warm-up)
## Chain 2:                0.556 seconds (Sampling)
## Chain 2:                0.976 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.6e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.418 seconds (Warm-up)
## Chain 3:                0.569 seconds (Sampling)
## Chain 3:                0.987 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1.7e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.428 seconds (Warm-up)
## Chain 4:                0.572 seconds (Sampling)
## Chain 4:                1 seconds (Total)
## Chain 4:
#Modelo agrupado completo para total_cup_points ~ ftertaste + flavor + balance
complete_pooled_model_11 <- stan_glm(
  total_cup_points ~  aftertaste + flavor + balance , 
  data = coffee, family = gaussian, 
  prior_intercept = normal(0, 2.5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  chains = 4, iter = 5000*2, seed = 841113)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.3e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.51 seconds (Warm-up)
## Chain 1:                0.598 seconds (Sampling)
## Chain 1:                1.108 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 2.5e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.25 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.448 seconds (Warm-up)
## Chain 2:                0.649 seconds (Sampling)
## Chain 2:                1.097 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 2.3e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.602 seconds (Warm-up)
## Chain 3:                0.84 seconds (Sampling)
## Chain 3:                1.442 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 6.1e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.61 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.677 seconds (Warm-up)
## Chain 4:                0.909 seconds (Sampling)
## Chain 4:                1.586 seconds (Total)
## Chain 4:
#Comparar los modelos agrupados completos con expected log-predictive densities (ELPD)
#Comparamos también con nuestro mejor modelo con un predictor

elpd_10 <- loo(complete_pooled_model_10)
elpd_11 <- loo(complete_pooled_model_11)

loo_compare(elpd_3,elpd_10,elpd_11)
##                          elpd_diff se_diff
## complete_pooled_model_11   0.0       0.0  
## complete_pooled_model_10 -36.8       8.4  
## complete_pooled_model_3  -58.9      11.4
#Modelos del mejor al peor:                 |  #Predictores de los modelos: 
# complete_pooled_model_11   0.0       0.0  |  aftertaste + flavor + balance 
# complete_pooled_model_10  -36.8      8.4  |  aftertaste + flavor
# complete_pooled_model_3   -58.9      11.4 |  aftertaste 


#Agregar  "flavor" y "balance" mejora mucho el modelo agrupado completo
#Elegimos el modelo de 3 predictores:  aftertaste + flavor + balance 

#Summary de los datos de flavor
summary(coffee$flavor)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##    6.08    7.33    7.50    7.46    7.67    8.42
#Summary de los datos de flavor
summary(coffee$balance)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##   6.170   7.330   7.500   7.475   7.670   8.580
#BOxplot de flavor por categorías de variable farm_name
ggplot(coffee, aes(x = farm_name, y = flavor, fill =farm_name)) + 
  geom_boxplot() + 
  theme(legend.position='none' ,axis.text.x=element_text(angle=70,hjust=1,
                                                         size=12,color="blue"))+
  labs(title="Distribuciones de flavor por fincas de cultivo") 

#BOxplot de balance por categorías de variable farm_name
ggplot(coffee, aes(x = farm_name, y = balance, fill =farm_name)) + 
  geom_boxplot() + 
  theme(legend.position='none' ,axis.text.x=element_text(angle=70,hjust=1,
                                                         size=12,color="blue"))+
  labs(title="Distribuciones de balance por fincas de cultivo") 

#Modelo jerárquico a posteriori con intersecciones variables y con 3 predictores
#total_cup_points ~ aftertaste + flavor + balance  + (1 | farm_name)
coffee_model_posterior_11 <- stan_glmer(
  total_cup_points ~  aftertaste + flavor + balance  + (1 | farm_name), 
  data = coffee, family = gaussian,
  prior_intercept = normal(82, 5, autoscale = TRUE),
  prior = normal(0, 2.5, autoscale = TRUE), 
  prior_aux = exponential(1, autoscale = TRUE),
  prior_covariance = decov(reg = 1, conc = 1, shape = 1, scale = 1),
  chains = 4, iter = 5000*2, seed = 841113, 
  prior_PD = FALSE)
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.002841 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 28.41 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 14.621 seconds (Warm-up)
## Chain 1:                1.854 seconds (Sampling)
## Chain 1:                16.475 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 0.000163 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 1.63 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 6.859 seconds (Warm-up)
## Chain 2:                3.434 seconds (Sampling)
## Chain 2:                10.293 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 9.4e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.94 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 6.085 seconds (Warm-up)
## Chain 3:                1.809 seconds (Sampling)
## Chain 3:                7.894 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 4.3e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.43 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
## Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 3.038 seconds (Warm-up)
## Chain 4:                3.107 seconds (Sampling)
## Chain 4:                6.145 seconds (Total)
## Chain 4:
#Especificaciones del modelo a priori
prior_summary(coffee_model_posterior_11)
## Priors for model 'coffee_model_posterior_11' 
## ------
## Intercept (after predictors centered)
##   Specified prior:
##     ~ normal(location = 82, scale = 5)
##   Adjusted prior:
##     ~ normal(location = 82, scale = 14)
## 
## Coefficients
##   Specified prior:
##     ~ normal(location = [0,0,0], scale = [2.5,2.5,2.5])
##   Adjusted prior:
##     ~ normal(location = [0,0,0], scale = [18.00,18.68,19.17])
## 
## Auxiliary (sigma)
##   Specified prior:
##     ~ exponential(rate = 1)
##   Adjusted prior:
##     ~ exponential(rate = 0.37)
## 
## Covariance
##  ~ decov(reg. = 1, conc. = 1, shape = 1, scale = 1)
## ------
## See help('prior_summary.stanreg') for more details
#Diagnósticos de las simulaciones con cadenas de Markov
mcmc_trace(coffee_model_posterior_11)

mcmc_dens_overlay(coffee_model_posterior_11)

mcmc_acf(coffee_model_posterior_11)

neff_ratio(coffee_model_posterior_11)
##                                               (Intercept) 
##                                                   1.10390 
##                                                aftertaste 
##                                                   0.85715 
##                                                    flavor 
##                                                   0.87390 
##                                                   balance 
##                                                   1.21350 
##            b[(Intercept) farm_name:agropecuaria_quiagral] 
##                                                   0.52970 
##                    b[(Intercept) farm_name:apollo_estate] 
##                                                   1.30440 
##                           b[(Intercept) farm_name:bethel] 
##                                                   0.98520 
##                      b[(Intercept) farm_name:capoeirinha] 
##                                                   1.01905 
##                      b[(Intercept) farm_name:cerro_bueno] 
##                                                   0.64825 
##               b[(Intercept) farm_name:conquista_/_morito] 
##                                                   0.72800 
##     b[(Intercept) farm_name:doi_tung_development_project] 
##                                                   1.19140 
##                        b[(Intercept) farm_name:el_morito] 
##                                                   1.17280 
##                     b[(Intercept) farm_name:el_papaturro] 
##                                                   1.17100 
##                    b[(Intercept) farm_name:el_sacramento] 
##                                                   1.30430 
##               b[(Intercept) farm_name:fazenda_capoeirnha] 
##                                                   1.06320 
##                     b[(Intercept) farm_name:finca_medina] 
##                                                   0.62545 
##                           b[(Intercept) farm_name:gamboa] 
##                                                   1.38455 
## b[(Intercept) farm_name:kona_pacific_farmers_cooperative] 
##                                                   1.41015 
##                     b[(Intercept) farm_name:la_esmeralda] 
##                                                   1.24485 
##                     b[(Intercept) farm_name:la_esperanza] 
##                                                   1.36265 
##            b[(Intercept) farm_name:la_esperanza_y_anexos] 
##                                                   1.17655 
##             b[(Intercept) farm_name:la_union_monte_verde] 
##                                                   1.25250 
##                    b[(Intercept) farm_name:las_cuchillas] 
##                                                   1.26780 
##                     b[(Intercept) farm_name:las_delicias] 
##                                                   1.35530 
##                   b[(Intercept) farm_name:las_merceditas] 
##                                                   1.26285 
##                     b[(Intercept) farm_name:los_hicaques] 
##                                                   1.15315 
##                         b[(Intercept) farm_name:piamonte] 
##                                                   1.32990 
##                        b[(Intercept) farm_name:rio_verde] 
##                                                   1.17925 
##               b[(Intercept) farm_name:sethuraman_estates] 
##                                                   0.43440 
##                          b[(Intercept) farm_name:several] 
##                                                   0.69540 
##                          b[(Intercept) farm_name:various] 
##                                                   0.87940 
##                                                     sigma 
##                                                   0.85375 
##                  Sigma[farm_name:(Intercept),(Intercept)] 
##                                                   0.32335
rhat(coffee_model_posterior_11)
##                                               (Intercept) 
##                                                 0.9998618 
##                                                aftertaste 
##                                                 0.9999928 
##                                                    flavor 
##                                                 0.9998865 
##                                                   balance 
##                                                 0.9999114 
##            b[(Intercept) farm_name:agropecuaria_quiagral] 
##                                                 1.0004211 
##                    b[(Intercept) farm_name:apollo_estate] 
##                                                 0.9999590 
##                           b[(Intercept) farm_name:bethel] 
##                                                 0.9999666 
##                      b[(Intercept) farm_name:capoeirinha] 
##                                                 1.0001787 
##                      b[(Intercept) farm_name:cerro_bueno] 
##                                                 1.0003551 
##               b[(Intercept) farm_name:conquista_/_morito] 
##                                                 1.0005565 
##     b[(Intercept) farm_name:doi_tung_development_project] 
##                                                 1.0000235 
##                        b[(Intercept) farm_name:el_morito] 
##                                                 0.9999648 
##                     b[(Intercept) farm_name:el_papaturro] 
##                                                 1.0001915 
##                    b[(Intercept) farm_name:el_sacramento] 
##                                                 0.9998481 
##               b[(Intercept) farm_name:fazenda_capoeirnha] 
##                                                 1.0000815 
##                     b[(Intercept) farm_name:finca_medina] 
##                                                 1.0003395 
##                           b[(Intercept) farm_name:gamboa] 
##                                                 0.9999436 
## b[(Intercept) farm_name:kona_pacific_farmers_cooperative] 
##                                                 0.9999090 
##                     b[(Intercept) farm_name:la_esmeralda] 
##                                                 0.9999893 
##                     b[(Intercept) farm_name:la_esperanza] 
##                                                 0.9998724 
##            b[(Intercept) farm_name:la_esperanza_y_anexos] 
##                                                 1.0000193 
##             b[(Intercept) farm_name:la_union_monte_verde] 
##                                                 1.0001404 
##                    b[(Intercept) farm_name:las_cuchillas] 
##                                                 1.0000350 
##                     b[(Intercept) farm_name:las_delicias] 
##                                                 1.0000023 
##                   b[(Intercept) farm_name:las_merceditas] 
##                                                 0.9999315 
##                     b[(Intercept) farm_name:los_hicaques] 
##                                                 1.0001378 
##                         b[(Intercept) farm_name:piamonte] 
##                                                 0.9999023 
##                        b[(Intercept) farm_name:rio_verde] 
##                                                 1.0000443 
##               b[(Intercept) farm_name:sethuraman_estates] 
##                                                 1.0004876 
##                          b[(Intercept) farm_name:several] 
##                                                 1.0000242 
##                          b[(Intercept) farm_name:various] 
##                                                 0.9999631 
##                                                     sigma 
##                                                 0.9999922 
##                  Sigma[farm_name:(Intercept),(Intercept)] 
##                                                 1.0008893
#Comparar visualmente con pp_check los modelos cofee_model_posterior_3_1,
#complete_pooled_model_11 y cofee_model_posterior_11 
pp_check(coffee_model_posterior_3_1)+
labs(x = "total_cup_points", title = "coffee_model_posterior_3_1")+
pp_check(complete_pooled_model_11)+
labs(x = "total_cup_points", title = "complete_pooled_model_11")+
pp_check(coffee_model_posterior_11)+
  labs(x = "total_cup_points", title = "coffee_model_posterior_11")

# Calculate prediction summaries
set.seed(841113)
prediction_summary(model = coffee_model_posterior_3_1, data = coffee)
##         mae mae_scaled within_50 within_95
## 1 0.6987606  0.4639352 0.6606498 0.9602888
prediction_summary(model = complete_pooled_model_11, data = coffee)
##         mae mae_scaled within_50 within_95
## 1 0.5816711  0.4429149 0.7075812 0.9422383
prediction_summary(model = coffee_model_posterior_11, data = coffee)
##         mae mae_scaled within_50 within_95
## 1 0.5259862  0.4106818 0.7184116 0.9494585
#Comparar los modelos: 
#total_cup_points ~ aftertaste + (aftertaste | farm_name)
#total_cup_points ~ aftertaste + flavor + balance  + (1 | farm_name) 
#con expected log-predictive densities (ELPD)
#Comparamos también con nuestro mejor modelo agrupado completo total_cup_points ~ aftertaste + flavor + balance


elpd_3_1 <- loo(coffee_model_posterior_3_1)
## Warning: Found 4 observation(s) with a pareto_k > 0.7. We recommend calling 'loo' again with argument 'k_threshold = 0.7' in order to calculate the ELPD without the assumption that these observations are negligible. This will refit the model 4 times to compute the ELPDs for the problematic observations directly.
elpd_11 <- loo(complete_pooled_model_11)
elpd_11_1 <- loo(coffee_model_posterior_11)
## Warning: Found 1 observation(s) with a pareto_k > 0.7. We recommend calling 'loo' again with argument 'k_threshold = 0.7' in order to calculate the ELPD without the assumption that these observations are negligible. This will refit the model 1 times to compute the ELPDs for the problematic observations directly.
loo_compare(elpd_3_1 , elpd_11 , elpd_11_1)
##                            elpd_diff se_diff
## coffee_model_posterior_11    0.0       0.0  
## complete_pooled_model_11    -3.8       2.7  
## coffee_model_posterior_3_1 -46.7      12.0
#Seleccionamos coffee_model_posterior_11

#Summary a posteriori para coffee_model_posterior_11
tidy(coffee_model_posterior_11, effects = "fixed",
     conf.int = TRUE, conf.level = 0.80)
## # A tibble: 4 × 5
##   term        estimate std.error conf.low conf.high
##   <chr>          <dbl>     <dbl>    <dbl>     <dbl>
## 1 (Intercept)    27.9      1.85     25.5      30.2 
## 2 aftertaste      1.99     0.408     1.47      2.52
## 3 flavor          2.26     0.421     1.73      2.80
## 4 balance         3.00     0.317     2.59      3.41
#Análisis a posteriori de variabilidad dentro y entre grupos

tidy_sigma_11 <- tidy(coffee_model_posterior_11, effects = "ran_pars")
tidy_sigma_11
## # A tibble: 2 × 3
##   term                     group     estimate
##   <chr>                    <chr>        <dbl>
## 1 sd_(Intercept).farm_name farm_name    0.386
## 2 sd_Observation.Residual  Residual     1.25
sigma_0 <- tidy_sigma_11[1,3]
sigma_y <- tidy_sigma_11[2,3]

sigma_0^2 / (sigma_0^2 + sigma_y^2)
##     estimate
## 1 0.08687946
sigma_y^2 / (sigma_0^2 + sigma_y^2)
##    estimate
## 1 0.9131205
#variabilidad toal
sigma_0^2 + sigma_y^2
##   estimate
## 1 1.713512
# Simulate posterior predictive models for the 3 runners
set.seed(841113)
predict_next_puntuacion_2 <- posterior_predict(
  coffee_model_posterior_11, 
  newdata = data.frame(farm_name = c("cerro bueno","mi finca","finca medina"),
                       aftertaste = c(6,6,6),
                       flavor = c(7.2,7.2,7.2),
                       balance = c(8.5,8.5,8.5)))

# Posterior predictive model plots
mcmc_areas(predict_next_puntuacion_2, prob = 0.8) +
  ggplot2::scale_y_discrete(labels = c("cerro bueno","mi finca","finca medina"))
## Scale for y is already present.
## Adding another scale for y, which will replace the existing scale.