library(readxl)
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.3     ✓ purrr   0.3.4
## ✓ tibble  3.1.0     ✓ dplyr   1.0.5
## ✓ tidyr   1.1.3     ✓ stringr 1.4.0
## ✓ readr   1.4.0     ✓ forcats 0.5.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
# Le o banco .sav exportado pelo SurveyMonkey
data <- haven::read_sav("~/Dropbox/Laboratorio/Pedro Brandao/Covid/covid_mov_disorders.sav")
#data <- haven::read_sav("C:/Dropbox/Laboratorio/Pedro Brandao/Covid/covid_mov_disorders.sav")
#colnames(data)

# Limpa os nomes das variaveis
data <- tibble::as_tibble(data, .name_repair = janitor::make_clean_names)
colnames(data)
##  [1] "authors"                           "ref_number"                       
##  [3] "continent"                         "country"                          
##  [5] "year"                              "number_of_cases"                  
##  [7] "myoclonus_prop"                    "myoclonus"                        
##  [9] "dystonia_prop"                     "dystonia"                         
## [11] "catatonia_prop"                    "catatonia"                        
## [13] "tardive_prop"                      "tardive_syndromes"                
## [15] "diplopia_prop"                     "diplopia"                         
## [17] "chorea_prop"                       "chorea"                           
## [19] "oculomotor_dysf_prop"              "oculomotor_disturbances"          
## [21] "functional_prop"                   "functional_mov_disorder"          
## [23] "parkinsonism_prop"                 "parkinsonism"                     
## [25] "tremor_prop"                       "action_postural_tremor"           
## [27] "ataxia_prop"                       "ataxia"                           
## [29] "encephalopathy_prop"               "encephalopathy_confusion"         
## [31] "stroke_prop"                       "stroke"                           
## [33] "pet_reported_prop"                 "pet_reported"                     
## [35] "mri_ct_reported_prop"              "mri_ct_reported"                  
## [37] "pet_fdg_result"                    "pet_cerebellar_hypermetabolism"   
## [39] "pet_cortical_hypomet"              "pet_basal_ganglia_hypermetabolism"
## [41] "pet_fdg_normal"                    "mri_focal_edema_prop"             
## [43] "mri_focal_edema_or_enhancement"    "mri_unesp_hyperintensity_prop"    
## [45] "mri_unespecific_hyperintensities"  "mri_ct_stroke_prop"               
## [47] "mri_ct_stroke"                     "mri_ct_microbleeds_prop"          
## [49] "mri_ct_microbleeds"                "normal_imaging_prop"              
## [51] "mri_ct_normal"                     "mri_or_ct"                        
## [53] "spinal_tap_prop"                   "spinal_tap_reported"              
## [55] "csf_normal_cells"                  "csf_pleocytosis"                  
## [57] "csf_cell_count"                    "csf_normal_protein"               
## [59] "csf_protein"                       "enmg_reported"                    
## [61] "enmg"                              "eeg_backgroung_slowing"           
## [63] "eeg_normal"                        "eeg_reported"                     
## [65] "eeg"                               "presynaptic_dopa_imaging"         
## [67] "da_t_spect_or_f_dopa"              "levetiracetam"                    
## [69] "cyproheptadine"                    "clonazepam"                       
## [71] "valproic_acid"                     "steroids_iv_or_oral"              
## [73] "plasma_exchange"                   "ivig"                             
## [75] "pramipexol"                        "ketamine"                         
## [77] "primidone"                         "lorazepam"                        
## [79] "dexmedetomedine"                   "midazolam"                        
## [81] "convalescent_plasma"               "tocilizumab"                      
## [83] "ect"
# Exporta para o formato .csv
#readr::write_csv(data, file ="~/Dropbox/Laboratorio/Pedro Brandao/Covid/covid_mov_disorders.csv")
#mov <- readr::read_csv("~/Dropbox/Laboratorio/Pedro Brandao/Covid/covid_mov_disorders.csv")
#mov %>% glimpse

options(max.print=1000000)
#psych::describe(data)

dataset <- data %>% select(-c(pet_cerebellar_hypermetabolism, pet_cortical_hypomet, pet_basal_ganglia_hypermetabolism, pet_fdg_normal))

psych::describe(data, skew = F)
##                                   vars  n  mean    sd min max range   se
## authors*                             1 44 22.23 12.50   1  43    42 1.88
## ref_number*                          2 44 22.50 12.85   1  44    43 1.94
## continent*                           3 44  2.27  0.92   1   4     3 0.14
## country*                             4 44  9.45  4.77   1  15    14 0.72
## year*                                5 44  1.14  0.35   1   2     1 0.05
## number_of_cases                      6 44  2.11  2.05   1   8     7 0.31
## myoclonus_prop*                      7 44  1.00  0.00   1   1     0 0.00
## myoclonus                            8 44  1.34  2.18   0   8     8 0.33
## dystonia_prop*                       9 44  1.00  0.00   1   1     0 0.00
## dystonia                            10 44  0.02  0.15   0   1     1 0.02
## catatonia_prop*                     11 44  1.00  0.00   1   1     0 0.00
## catatonia                           12 44  0.05  0.21   0   1     1 0.03
## tardive_prop*                       13 44  1.00  0.00   1   1     0 0.00
## tardive_syndromes                   14 44  0.07  0.45   0   3     3 0.07
## diplopia_prop*                      15 44  1.00  0.00   1   1     0 0.00
## diplopia                            16 44  0.16  0.43   0   2     2 0.06
## chorea_prop*                        17 44  1.00  0.00   1   1     0 0.00
## chorea                              18 44  0.02  0.15   0   1     1 0.02
## oculomotor_dysf_prop*               19 44  1.00  0.00   1   1     0 0.00
## oculomotor_disturbances             20 44  0.43  0.85   0   4     4 0.13
## functional_prop*                    21 44  1.00  0.00   1   1     0 0.00
## functional_mov_disorder             22 44  0.07  0.33   0   2     2 0.05
## parkinsonism_prop*                  23 44  1.00  0.00   1   1     0 0.00
## parkinsonism                        24 44  0.11  0.32   0   1     1 0.05
## tremor_prop*                        25 44  1.00  0.00   1   1     0 0.00
## action_postural_tremor              26 44  0.23  0.80   0   5     5 0.12
## ataxia_prop*                        27 44  1.00  0.00   1   1     0 0.00
## ataxia                              28 44  0.82  1.33   0   7     7 0.20
## encephalopathy_prop*                29 44  1.95  0.21   1   2     1 0.03
## encephalopathy_confusion            30 44  0.84  1.31   0   5     5 0.20
## stroke_prop*                        31 44  1.00  0.00   1   1     0 0.00
## stroke                              32 44  0.11  0.32   0   1     1 0.05
## pet_reported_prop*                  33 44  1.00  0.00   1   1     0 0.00
## pet_reported                        34 44  0.11  0.39   0   2     2 0.06
## mri_ct_reported_prop*               35 44  1.00  0.00   1   1     0 0.00
## mri_ct_reported                     36 44  1.75  1.82   0   7     7 0.27
## pet_fdg_result*                     37 44  3.89  0.58   1   5     4 0.09
## pet_cerebellar_hypermetabolism      38  4  0.75  0.50   0   1     1 0.25
## pet_cortical_hypomet                39  4  0.50  0.58   0   1     1 0.29
## pet_basal_ganglia_hypermetabolism   40  4  0.50  0.58   0   1     1 0.29
## pet_fdg_normal                      41  4  0.25  0.50   0   1     1 0.25
## mri_focal_edema_prop*               42 44  1.00  0.00   1   1     0 0.00
## mri_focal_edema_or_enhancement      43 44  0.23  0.68   0   4     4 0.10
## mri_unesp_hyperintensity_prop*      44 44  1.91  0.29   1   2     1 0.04
## mri_unespecific_hyperintensities    45 44  0.11  0.32   0   1     1 0.05
## mri_ct_stroke_prop*                 46 44  1.00  0.00   1   1     0 0.00
## mri_ct_stroke                       47 44  0.09  0.29   0   1     1 0.04
## mri_ct_microbleeds_prop*            48 44  1.00  0.00   1   1     0 0.00
## mri_ct_microbleeds                  49 44  0.11  0.62   0   4     4 0.09
## normal_imaging_prop*                50 44  1.00  0.00   1   1     0 0.00
## mri_ct_normal                       51 44  1.16  1.33   0   6     6 0.20
## mri_or_ct*                          52 44  9.75  4.53   1  21    20 0.68
## spinal_tap_prop*                    53 44  1.00  0.00   1   1     0 0.00
## spinal_tap_reported                 54 44  1.02  1.49   0   8     8 0.22
## csf_normal_cells                    55 44  0.75  1.22   0   5     5 0.18
## csf_pleocytosis                     56 44  0.27  0.62   0   3     3 0.09
## csf_cell_count*                     57 44  5.41  2.51   1  11    10 0.38
## csf_normal_protein                  58 44  0.52  0.90   0   4     4 0.14
## csf_protein*                        59 44  6.59  2.43   1  11    10 0.37
## enmg_reported                       60 43  0.21  0.71   0   4     4 0.11
## enmg*                               61 44  3.86  1.03   1   7     6 0.15
## eeg_backgroung_slowing              62 44  0.36  0.81   0   3     3 0.12
## eeg_normal                          63 44  0.20  0.46   0   2     2 0.07
## eeg_reported                        64 44  0.61  0.97   0   3     3 0.15
## eeg*                                65 44  5.91  1.84   1  11    10 0.28
## presynaptic_dopa_imaging            66 44  0.16  0.64   0   4     4 0.10
## da_t_spect_or_f_dopa*               67 44  2.14  0.55   1   4     3 0.08
## levetiracetam                       68 44  0.32  0.74   0   3     3 0.11
## cyproheptadine                      69 43  0.02  0.15   0   1     1 0.02
## clonazepam                          70 44  0.39  0.99   0   6     6 0.15
## valproic_acid                       71 44  0.23  0.89   0   5     5 0.13
## steroids_iv_or_oral                 72 44  0.48  0.82   0   3     3 0.12
## plasma_exchange                     73 44  0.02  0.15   0   1     1 0.02
## ivig                                74 44  0.41  1.00   0   5     5 0.15
## pramipexol                          75 44  0.05  0.21   0   1     1 0.03
## ketamine                            76 44  0.02  0.15   0   1     1 0.02
## primidone                           77 44  0.02  0.15   0   1     1 0.02
## lorazepam                           78 44  0.11  0.49   0   3     3 0.07
## dexmedetomedine                     79 44  0.11  0.75   0   5     5 0.11
## midazolam                           80 44  0.02  0.15   0   1     1 0.02
## convalescent_plasma                 81 44  0.02  0.15   0   1     1 0.02
## tocilizumab                         82 44  0.02  0.15   0   1     1 0.02
## ect                                 83 44  0.02  0.15   0   1     1 0.02
library(DataExplorer)
#plot_missing(dataset)
#plot_bar(dataset)
plot_histogram(dataset, ggtheme = theme_classic())

# http://htmlpreview.github.io/?https://raw.githubusercontent.com/spgarbet/tangram-vignettes/master/example.html
library(tangram)
## Loading required package: R6
## Loading required package: magrittr
## 
## Attaching package: 'magrittr'
## The following object is masked from 'package:purrr':
## 
##     set_names
## The following object is masked from 'package:tidyr':
## 
##     extract
## Loading required package: knitr
## 
## Attaching package: 'tangram'
## The following object is masked from 'package:dplyr':
## 
##     add_row
## The following object is masked from 'package:readr':
## 
##     cols
## The following object is masked from 'package:tibble':
## 
##     add_row
vars = c("myoclonus", "dystonia", "catatonia", "tardive_syndromes", "diplopia", "chorea", "oculomotor_disturbances", "functional_mov_disorder", "parkinsonism", "action_postural_tremor", "ataxia", "encephalopathy_confusion")
  
tbl <- tangram(number_of_cases ~ myoclonus + dystonia + catatonia + tardive_syndromes + diplopia + chorea + oculomotor_disturbances + functional_mov_disorder + parkinsonism + action_postural_tremor + ataxia + encephalopathy_confusion, id = "ref_number", dataset)

tbl
N numberofcases
(N=44)
myoclonus 44 ρ=0.67
dystonia 44 ρ=0.30
catatonia 44 ρ=-0.15
tardive_syndromes 44 ρ=0.26
diplopia 44 ρ=0.01
chorea 44 ρ=-0.11
oculomotor_disturbances 44 ρ=0.19
functionalmovdisorder 44 ρ=0.03
parkinsonism 44 ρ=-0.25
actionposturaltremor 44 ρ=-0.01
ataxia 44 ρ=0.11
encephalopathy_confusion 44 ρ=0.40
N is the number of non-missing value. 1Kruskal-Wallis. 2Pearson. 3Wilcoxon.
tangram("myoclonus ~ dystonia + catatonia + tardive_syndromes + diplopia + chorea + oculomotor_disturbances + functional_mov_disorder + parkinsonism + action_postural_tremor + ataxia + encephalopathy_confusion", id = "ref_number", dataset,
        style="nejm", caption = "Table NEJM Style",
        relsize=-2,
        capture_units=TRUE)
Table NEJM Style
N myoclonus
(N=44)
dystonia 44 ρ=0.26
catatonia 44 ρ=-0.20
tardive_syndromes 44 ρ=0.23
diplopia 44 ρ=-0.11
chorea 44 ρ=-0.14
oculomotor_disturbances 44 ρ=0.29
functionalmovdisorder 44 ρ=-0.20
parkinsonism 44 ρ=-0.23
actionposturaltremor 44 ρ=0.06
ataxia 44 ρ=-0.06
encephalopathy_confusion 44 ρ=0.31
N is the number of non-missing value. 1Kruskal-Wallis. 2Pearson. 3Wilcoxon.
# https://arxiv.org/pdf/1705.11123.pdf
# https://cran.r-project.org/web/packages/brms/vignettes/brms_distreg.html
#https://towardsdatascience.com/an-illustrated-guide-to-the-zero-inflated-poisson-model-b22833343057

library(brms)
## Loading required package: Rcpp
## Loading 'brms' package (version 2.15.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
## 
## Attaching package: 'brms'
## The following object is masked from 'package:stats':
## 
##     ar
library(bayesplot)
## This is bayesplot version 1.8.0
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
##    * Does _not_ affect other ggplot2 plots
##    * See ?bayesplot_theme_set for details on theme setting
# Zero inflated Poisson
b11.4 <-
  brm(data = dataset, family = zero_inflated_poisson("log"),
      myoclonus ~ 1 + eeg_normal + eeg_backgroung_slowing + mri_ct_normal + ataxia + oculomotor_disturbances + encephalopathy_confusion,
      prior = c(prior(normal(0, 10), class = Intercept),
                prior(beta(2, 2), class = zi)),  # the brms default is beta(1, 1)
      iter = 20000, warmup = 5000, chains = 4, cores = 4,
      seed = 123)
## Compiling Stan program...
## Start sampling
summary(b11.4)
##  Family: zero_inflated_poisson 
##   Links: mu = log; zi = identity 
## Formula: myoclonus ~ 1 + eeg_normal + eeg_backgroung_slowing + mri_ct_normal + ataxia + oculomotor_disturbances + encephalopathy_confusion 
##    Data: dataset (Number of observations: 44) 
## Samples: 4 chains, each with iter = 20000; warmup = 5000; thin = 1;
##          total post-warmup samples = 60000
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -0.30      0.34    -0.96     0.37 1.00    38194
## eeg_normal                  -0.20      0.36    -0.95     0.44 1.00    41610
## eeg_backgroung_slowing      -0.28      0.25    -0.79     0.19 1.00    28599
## mri_ct_normal                0.52      0.14     0.24     0.79 1.00    31498
## ataxia                      -0.02      0.09    -0.20     0.16 1.00    44457
## oculomotor_disturbances     -0.32      0.22    -0.75     0.10 1.00    27813
## encephalopathy_confusion     0.30      0.11     0.07     0.52 1.00    34919
##                          Tail_ESS
## Intercept                   41682
## eeg_normal                  34433
## eeg_backgroung_slowing      35337
## mri_ct_normal               37106
## ataxia                      43394
## oculomotor_disturbances     34216
## encephalopathy_confusion    39917
## 
## Family Specific Parameters: 
##    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## zi     0.29      0.11     0.09     0.51 1.00    40890    31349
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# Posteriors
post2 <-
  posterior_samples(b11.4)

# Correlations
post2 %>%
  select(-zi, -lp__) %>%
  psych::lowerCor() %>%
  corrplot::corrplot()
##                            b_Int b_g_n b_g__ b_m__ b_atx b_cl_ b_nc_
## b_Intercept                 1.00                                    
## b_eeg_normal               -0.41  1.00                              
## b_eeg_backgroung_slowing   -0.15  0.34  1.00                        
## b_mri_ct_normal            -0.35 -0.04 -0.66  1.00                  
## b_ataxia                    0.12 -0.41 -0.17 -0.29  1.00            
## b_oculomotor_disturbances   0.03  0.10  0.81 -0.72 -0.10  1.00      
## b_encephalopathy_confusion -0.39  0.10 -0.50  0.40  0.18 -0.67  1.00

  #psych::cor.plot()

# Plot
post2 %>%
  select(-zi, -lp__) %>%
  mcmc_intervals(prob = .5, prob_outer = .95) +
  theme(axis.ticks.y = element_blank(),
        axis.text.y  = element_text(hjust = 0))

# Compute a Bayesian version of R-squared for regression models
bayes_R2(b11.4)
##     Estimate Est.Error      Q2.5     Q97.5
## R2 0.3817289 0.1333514 0.1257179 0.6218341
# Plot densities
plot(b11.4, N = 2, ask = FALSE)

# Plot conditionals
#plot(conditional_effects(b11.4), points = TRUE, ask = FALSE)
plot(conditional_effects(b11.4), points = FALSE, ask = FALSE)

library(bayestestR)
#https://easystats.github.io/bayestestR/articles/example1.html
# Posteriors
posteriors <- insight::get_parameters(b11.4)
head(posteriors) # Show the first 6 rows
##   b_Intercept b_eeg_normal b_eeg_backgroung_slowing b_mri_ct_normal    b_ataxia
## 1  -0.5669891   -0.0617629               -0.5678852       0.7598823 -0.03471616
## 2  -0.6650704   -0.2100586               -0.3349172       0.6513143 -0.04608057
## 3  -0.3718611   -0.3868625               -0.5018273       0.6837904 -0.04801854
## 4  -0.4463948   -0.4926240               -0.3026802       0.6163537 -0.05939852
## 5  -0.3883172   -0.1839353               -0.5217115       0.7285788 -0.03204576
## 6  -0.0668791   -0.4858783               -0.1465746       0.5510676 -0.07741510
##   b_oculomotor_disturbances b_encephalopathy_confusion
## 1                -0.6902995                  0.4709696
## 2                -0.4391715                  0.2824489
## 3                -0.4099570                  0.2043963
## 4                -0.3631250                  0.3606093
## 5                -0.5680346                  0.2597345
## 6                -0.2170041                  0.1622791
nrow(posteriors) # Size (number of rows)
## [1] 60000
ggplot(posteriors, aes(x = b_encephalopathy_confusion)) +
  geom_density(fill = "skyblue")

mean(posteriors$b_encephalopathy_confusion)
## [1] 0.2993514
median(posteriors$b_encephalopathy_confusion)
## [1] 0.2993325
# Maximum a posteriori
map_estimate(posteriors$b_encephalopathy_confusion)
## MAP Estimate: 0.29
ggplot(posteriors, aes(x = b_encephalopathy_confusion)) +
  geom_density(fill = "skyblue") +
  # The mean in blue
  geom_vline(xintercept = mean(posteriors$b_encephalopathy_confusion), color = "blue", size = 1) +
  # The median in red
  geom_vline(xintercept = median(posteriors$b_encephalopathy_confusion), color = "red", size = 1) +
  # The MAP in purple
  geom_vline(xintercept = map_estimate(posteriors$b_encephalopathy_confusion), color = "purple", size = 1)

# Uncertanty
range(posteriors$b_encephalopathy_confusion)
## [1] -0.1532022  0.8157571
hdi(posteriors$b_encephalopathy_confusion, ci = 0.89)
## 89% HDI: [0.12, 0.48]
# The proportion of HDI lying within this “null” region can be used as an decision criterion for “null-hypothesis” testing. Such a Test for Practical Equivalence, implemented via equivalence_test(), is based on the “HDI+ROPE decision rule” (Kruschke, 2018) to check whether parameter values should be accepted or rejected against an explicitly formulated “null hypothesis” (i.e., a ROPE). If the HDI is completely outside the ROPE, the “null hypothesis” for this parameter is “rejected”. If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the ROPE, the null hypothesis is accepted. Otherwise, whether to accept or reject the null hypothesis is undecided.

library(see)
equivalence_test(b11.4)
## Warning: Could not estimate a good default ROPE range. Using 'c(-0.1, 0.1)'.
## Possible multicollinearity between b_oculomotor_disturbances and b_eeg_backgroung_slowing (r = 0.81), b_oculomotor_disturbances and b_mri_ct_normal (r = 0.72). This might lead to inappropriate results. See 'Details' in '?equivalence_test'.
## # Test for Practical Equivalence
## 
##   ROPE: [-0.10 0.10]
## 
## Parameter                |        H0 | inside ROPE |      95% HDI
## -----------------------------------------------------------------
## Intercept                | Undecided |     16.67 % | [-0.95 0.38]
## eeg_normal               | Undecided |     21.57 % | [-0.92 0.47]
## eeg_backgroung_slowing   | Undecided |     18.59 % | [-0.77 0.20]
## mri_ct_normal            |  Rejected |      0.00 % | [ 0.25 0.79]
## ataxia                   | Undecided |     75.94 % | [-0.20 0.16]
## oculomotor_disturbances  | Undecided |     13.15 % | [-0.75 0.10]
## encephalopathy_confusion | Undecided |      1.82 % | [ 0.07 0.52]
library(bayestestR)
# b_encephalopathy_confusion, the effect is positive with a probability of 99.53%. We call this index the Probability of Direction (pd).
describe_posterior(
    post2,
    effects = "all",
    component = "all",
    test = c("p_direction", "p_significance"),
    centrality = "all"
)
## Summary of Posterior Distribution
## 
## Parameter                  | Median |   Mean |       MAP |           95% CI |     pd |   ps
## -------------------------------------------------------------------------------------------
## b_Intercept                |  -0.30 |  -0.30 |     -0.26 | [ -0.95,   0.38] | 81.25% | 0.72
## b_eeg_normal               |  -0.19 |  -0.20 |     -0.14 | [ -0.92,   0.47] | 70.36% | 0.60
## b_eeg_backgroung_slowing   |  -0.28 |  -0.28 |     -0.26 | [ -0.77,   0.20] | 87.39% | 0.77
## b_mri_ct_normal            |   0.52 |   0.52 |      0.52 | [  0.25,   0.79] | 99.98% | 1.00
## b_ataxia                   |  -0.01 |  -0.02 | -6.22e-03 | [ -0.20,   0.16] | 56.28% | 0.18
## b_oculomotor_disturbances  |  -0.33 |  -0.32 |     -0.32 | [ -0.75,   0.10] | 93.12% | 0.85
## b_encephalopathy_confusion |   0.30 |   0.30 |      0.29 | [  0.07,   0.52] | 99.53% | 0.96
## zi                         |   0.29 |   0.29 |      0.30 | [  0.08,   0.50] |   100% | 0.97
## lp__                       | -69.79 | -70.13 |    -69.18 | [-74.15, -66.75] |   100% | 1.00
describe_posterior(post2, test = c("p_direction", "rope", "bayesfactor"))
## Warning in bayesfactor_parameters.data.frame(x, prior = bf_prior, ...): Prior
## not specified! Please specify priors (with column order matching 'posterior') to
## get meaningful results.
## Loading required namespace: logspline
## Summary of Posterior Distribution
## 
## Parameter                  | Median |           95% CI |     pd |          ROPE | % in ROPE |   BF
## --------------------------------------------------------------------------------------------------
## b_Intercept                |  -0.30 | [ -0.95,   0.38] | 81.25% | [-0.10, 0.10] |    16.67% | 1.00
## b_eeg_normal               |  -0.19 | [ -0.92,   0.47] | 70.36% | [-0.10, 0.10] |    21.57% | 1.00
## b_eeg_backgroung_slowing   |  -0.28 | [ -0.77,   0.20] | 87.39% | [-0.10, 0.10] |    18.59% | 1.00
## b_mri_ct_normal            |   0.52 | [  0.25,   0.79] | 99.98% | [-0.10, 0.10] |        0% | 1.00
## b_ataxia                   |  -0.01 | [ -0.20,   0.16] | 56.28% | [-0.10, 0.10] |    75.94% | 1.00
## b_oculomotor_disturbances  |  -0.33 | [ -0.75,   0.10] | 93.12% | [-0.10, 0.10] |    13.15% | 1.00
## b_encephalopathy_confusion |   0.30 | [  0.07,   0.52] | 99.53% | [-0.10, 0.10] |     1.82% | 1.00
## zi                         |   0.29 | [  0.08,   0.50] |   100% | [-0.10, 0.10] |     1.64% | 1.00
## lp__                       | -69.79 | [-74.15, -66.75] |   100% | [-0.10, 0.10] |        0% | 1.00
# Interestingly, it so happens that this index is usually highly correlated with the frequentist p-value. We could almost roughly infer the corresponding p-value with a simple transformation:

pd <- 99.53
onesided_p <- 1 - pd / 100
twosided_p <- onesided_p * 2
twosided_p
## [1] 0.0094
# Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.

# ROPE Percentage
#T esting whether this distribution is different from 0 doesn’t make sense, as 0 is a single value (and the probability that any distribution is different from a single value is infinite).

# However, one way to assess significance could be to define an area around 0, which will consider as practically equivalent to zero (i.e., absence of, or a negligible, effect). This is called the Region of Practical Equivalence (ROPE), and is one way of testing the significance of parameters.

rope(posteriors$b_oculomotor_disturbances, range = c(-0.1, 0.1), ci = 0.9)
## # Proportion of samples inside the ROPE [-0.10, 0.10]:
## 
## inside ROPE
## -----------
## 11.29 %
#rope_value <- rope_range(b11.4)
#rope_value
#rope(posteriors$b_encephalopathy_confusion, range = rope_range, ci = 0.89)
library(BayesFactor)
## Loading required package: coda
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
## ************
## Welcome to BayesFactor 0.9.12-4.2. If you have questions, please contact Richard Morey (richarddmorey@gmail.com).
## 
## Type BFManual() to open the manual.
## ************
result <- correlationBF(dataset$myoclonus, dataset$oculomotor_disturbances)
describe_posterior(result)
## Summary of Posterior Distribution
## 
## Parameter | Median |       95% CI |     pd |          ROPE | % in ROPE |    BF |         Prior
## ----------------------------------------------------------------------------------------------
## rho       |   0.36 | [0.12, 0.60] | 99.65% | [-0.05, 0.05] |        0% | 10.16 | Beta (3 +- 3)
bayesfactor(result)
## Bayes Factors for Model Comparison
## 
##     Model         BF
## [2] (rho != 0) 10.16
## 
## * Against Denominator: [1] (rho = 0)
## *   Bayes Factor Type: JZS (BayesFactor)
#We got a BF of 10.16. What does it mean? Bayes factors are continuous measures of relative evidence, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as the numerator), and a Bayes factor smaller than 1 giving evidence in favour of the other model (the denominator).

# https://easystats.github.io/bayestestR/articles/example2.html 
# Visualise the Bayes factor
library(see)

plot(bayesfactor(result)) +
  scale_fill_pizza()

# Store results
result_pd <- p_direction(b11.4) # Print and plot results
print(result_pd)
## Probability of Direction
## 
## Parameter                |     pd
## ---------------------------------
## (Intercept)              | 81.25%
## eeg_normal               | 70.36%
## eeg_backgroung_slowing   | 87.39%
## mri_ct_normal            | 99.98%
## ataxia                   | 56.28%
## oculomotor_disturbances  | 93.12%
## encephalopathy_confusion | 99.53%
# Plot results
p = plot(result_pd)

library(ggthemes)
p + (ggtheme = theme_bw())

como mioclonia e ataxia sao os principais disturbios do movimento, poderíamos fazer uma outra regressao em que ataxia é a variavel dependente e aquelas mesmas outras são as variáveis preditoras.. mioclonia, encefalopatia, resultados do EEG, resultados da MRI, resultados do LCR

# Zero inflated Poisson
b11.4 <-
  brm(data = dataset, family = zero_inflated_poisson("log"),
      ataxia ~ 1 + eeg_normal + eeg_backgroung_slowing + mri_ct_normal + myoclonus + oculomotor_disturbances + encephalopathy_confusion,
      prior = c(prior(normal(0, 10), class = Intercept),
                prior(beta(2, 2), class = zi)),  # the brms default is beta(1, 1)
      iter = 20000, warmup = 5000, chains = 4, cores = 4,
      seed = 123)
## Compiling Stan program...
## Start sampling
summary(b11.4)
##  Family: zero_inflated_poisson 
##   Links: mu = log; zi = identity 
## Formula: ataxia ~ 1 + eeg_normal + eeg_backgroung_slowing + mri_ct_normal + myoclonus + oculomotor_disturbances + encephalopathy_confusion 
##    Data: dataset (Number of observations: 44) 
## Samples: 4 chains, each with iter = 20000; warmup = 5000; thin = 1;
##          total post-warmup samples = 60000
## 
## Population-Level Effects: 
##                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept                   -1.21      0.36    -1.95    -0.54 1.00    42503
## eeg_normal                   0.08      0.54    -0.99     1.10 1.00    25545
## eeg_backgroung_slowing       0.37      0.66    -0.81     1.73 1.00    22011
## mri_ct_normal                0.55      0.38    -0.18     1.30 1.00    25118
## myoclonus                   -0.27      0.20    -0.66     0.11 1.00    26775
## oculomotor_disturbances      0.49      0.41    -0.31     1.31 1.00    30455
## encephalopathy_confusion     0.11      0.35    -0.58     0.76 1.00    21986
##                          Tail_ESS
## Intercept                   40256
## eeg_normal                  35066
## eeg_backgroung_slowing      29122
## mri_ct_normal               30280
## myoclonus                   33592
## oculomotor_disturbances     34021
## encephalopathy_confusion    34217
## 
## Family Specific Parameters: 
##    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## zi     0.20      0.09     0.05     0.39 1.00    36184    26159
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# Posteriors
post2 <-
  posterior_samples(b11.4)

# Correlations
post2 %>%
  select(-zi, -lp__) %>%
  psych::lowerCor() %>%
  corrplot::corrplot()
##                            b_Int b_g_n b_g__ b_m__ b_myc b_cl_ b_nc_
## b_Intercept                 1.00                                    
## b_eeg_normal                0.09  1.00                              
## b_eeg_backgroung_slowing    0.04  0.71  1.00                        
## b_mri_ct_normal            -0.34 -0.25 -0.31  1.00                  
## b_myoclonus                 0.26 -0.12 -0.24 -0.77  1.00            
## b_oculomotor_disturbances  -0.07  0.12  0.47 -0.73  0.31  1.00      
## b_encephalopathy_confusion -0.17 -0.76 -0.88  0.02  0.42 -0.10  1.00

  #psych::cor.plot()

# Plot
post2 %>%
  select(-zi, -lp__) %>%
  mcmc_intervals(prob = .5, prob_outer = .95) +
  theme(axis.ticks.y = element_blank(),
        axis.text.y  = element_text(hjust = 0))

# Compute a Bayesian version of R-squared for regression models
bayes_R2(b11.4)
##     Estimate  Est.Error     Q2.5     Q97.5
## R2 0.4858474 0.08400813 0.272014 0.6730633
# Plot densities
plot(b11.4, N = 2, ask = FALSE)

# Plot conditionals
#plot(conditional_effects(b11.4), points = TRUE, ask = FALSE)
plot(conditional_effects(b11.4), points = FALSE, ask = FALSE)

library(bayestestR)
#https://easystats.github.io/bayestestR/articles/example1.html
# Posteriors
posteriors <- insight::get_parameters(b11.4)
head(posteriors) # Show the first 6 rows
##   b_Intercept  b_eeg_normal b_eeg_backgroung_slowing b_mri_ct_normal
## 1  -0.9032087  0.3069660279               0.05031461     -0.06752816
## 2  -1.2422134 -0.4155413347              -0.12017383      1.28458144
## 3  -1.6352534 -0.6303895632               0.11232942      1.38009344
## 4  -1.3340756 -0.4587305862              -0.27784983      0.93202772
## 5  -1.2148672 -0.1439042332               0.42607820      0.82392736
## 6  -1.3570124 -0.0005213227               0.63386933      0.47929947
##   b_myoclonus b_oculomotor_disturbances b_encephalopathy_confusion
## 1  -0.1024413                 0.9005320                0.341230878
## 2  -0.5451952                -0.3097661                0.003682511
## 3  -0.4990940                -0.1812204                0.133504186
## 4  -0.4705083                 0.5279498                0.490109258
## 5  -0.3761630                 0.1663300               -0.067389899
## 6  -0.3929597                 0.9894662                0.020853779
nrow(posteriors) # Size (number of rows)
## [1] 60000
ggplot(posteriors, aes(x = b_myoclonus)) +
  geom_density(fill = "orange")

mean(posteriors$b_myoclonus)
## [1] -0.271551
median(posteriors$b_myoclonus)
## [1] -0.2672608
# Maximum a posteriori
map_estimate(posteriors$b_myoclonus)
## MAP Estimate: -0.25
ggplot(posteriors, aes(x = b_myoclonus)) +
  geom_density(fill = "orange") +
  # The mean in blue
  geom_vline(xintercept = mean(posteriors$b_myoclonus), color = "blue", size = 1) +
  # The median in red
  geom_vline(xintercept = median(posteriors$b_myoclonus), color = "red", size = 1) +
  # The MAP in purple
  geom_vline(xintercept = map_estimate(posteriors$b_myoclonus), color = "purple", size = 1)

# Uncertanty
range(posteriors$b_myoclonus)
## [1] -1.1044467  0.6991193
hdi(posteriors$b_myoclonus, ci = 0.89)
## 89% HDI: [-0.59, 0.03]
# The proportion of HDI lying within this “null” region can be used as an decision criterion for “null-hypothesis” testing. Such a Test for Practical Equivalence, implemented via equivalence_test(), is based on the “HDI+ROPE decision rule” (Kruschke, 2018) to check whether parameter values should be accepted or rejected against an explicitly formulated “null hypothesis” (i.e., a ROPE). If the HDI is completely outside the ROPE, the “null hypothesis” for this parameter is “rejected”. If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the ROPE, the null hypothesis is accepted. Otherwise, whether to accept or reject the null hypothesis is undecided.

library(see)
equivalence_test(b11.4)
## Warning: Could not estimate a good default ROPE range. Using 'c(-0.1, 0.1)'.
## Possible multicollinearity between b_eeg_backgroung_slowing and b_eeg_normal (r = 0.71), b_encephalopathy_confusion and b_eeg_normal (r = 0.76), b_encephalopathy_confusion and b_eeg_backgroung_slowing (r = 0.88), b_myoclonus and b_mri_ct_normal (r = 0.77), b_oculomotor_disturbances and b_mri_ct_normal (r = 0.73). This might lead to inappropriate results. See 'Details' in '?equivalence_test'.
## # Test for Practical Equivalence
## 
##   ROPE: [-0.10 0.10]
## 
## Parameter                |        H0 | inside ROPE |       95% HDI
## ------------------------------------------------------------------
## Intercept                |  Rejected |      0.00 % | [-1.93 -0.52]
## eeg_normal               | Undecided |     14.99 % | [-0.95  1.13]
## eeg_backgroung_slowing   | Undecided |     12.43 % | [-0.85  1.68]
## mri_ct_normal            | Undecided |      7.71 % | [-0.18  1.30]
## myoclonus                | Undecided |     16.73 % | [-0.66  0.11]
## oculomotor_disturbances  | Undecided |     10.28 % | [-0.31  1.31]
## encephalopathy_confusion | Undecided |     19.35 % | [-0.58  0.76]

Bayesian binomial test

library(BayesianFirstAid)
## Loading required package: rjags
## Linked to JAGS 4.3.0
## Loaded modules: basemod,bugs
# Myoclonus
sum(dataset$number_of_cases)
## [1] 93
sum(dataset$myoclonus)
## [1] 59
# N total = 93
# Myoclonus = 59
# Non-myoclonus = N total - Myoclonus = 34
#fit <- bayes.binom.test(c(59, 34))

fit <- bayes.binom.test(c(sum(dataset$myoclonus), (sum(dataset$number_of_cases) - sum(dataset$myoclonus))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$myoclonus), (sum(dataset$number_of_cases) - sum(dataset$myoclonus)))
## number of successes = 59, number of trials = 93
## Estimated relative frequency of success:
##   0.63 
## 95% credible interval:
##   0.53 0.72 
## The relative frequency of success is more than 0.5 by a probability of 0.995 
## and less than 0.5 by a probability of 0.005
summary(fit)
##   Data
## number of successes = 59, number of trials = 93
## 
##   Model parameters and generated quantities
## theta: the relative frequency of success
## x_pred: predicted number of successes in a replication
## 
##   Measures
##          mean    sd  HDIlo  HDIup %<comp %>comp
## theta   0.631 0.049  0.532  0.724  0.005  0.995
## x_pred 58.742 6.509 45.000 70.000  0.000  1.000
## 
## 'HDIlo' and 'HDIup' are the limits of a 95% HDI credible interval.
## '%<comp' and '%>comp' are the probabilities of the respective parameter being
## smaller or larger than 0.5.
## 
##   Quantiles
##         q2.5%   q25% median   q75% q97.5%
## theta   0.532  0.598  0.632  0.665  0.725
## x_pred 46.000 54.000 59.000 63.000 71.000
plot(fit)

diagnostics(fit)
## 
## Iterations = 2:5001
## Thinning interval = 1 
## Number of chains = 3 
## Sample size per chain = 5000 
## 
##   Diagnostic measures
##          mean    sd mcmc_se n_eff Rhat
## theta   0.631 0.049   0.000 15405    1
## x_pred 58.742 6.509   0.054 14740    1
## 
## mcmc_se: the estimated standard error of the MCMC approximation of the mean.
## n_eff: a crude measure of effective MCMC sample size.
## Rhat: the potential scale reduction factor (at convergence, Rhat=1).
## 
##   Model parameters and generated quantities
## theta: The relative frequency of success
## x_pred: Predicted number of successes in a replication

model.code(fit)
## ### Model code for the Bayesian First Aid alternative to the binomial test ###
## require(rjags)
## 
## # Setting up the data
## x <- 59 
## n <- 93 
## 
## # The model string written in the JAGS language
## model_string <- "model {
##   x ~ dbinom(theta, n)
##   theta ~ dbeta(1, 1)
##   x_pred ~ dbinom(theta, n)
## }"
## 
## # Running the model
## model <- jags.model(textConnection(model_string), data = list(x = x, n = n), 
##                     n.chains = 3, n.adapt=1000)
## samples <- coda.samples(model, c("theta", "x_pred"), n.iter=5000)
## 
## # Inspecting the posterior
## plot(samples)
## summary(samples)
# In order to change the flat prior on θ into Jeffrey’s prior we simply change dbeta(1, 1) into dbeta(0.5, 0.5) and rerun the code:

x <- 34
n <- 59

# dbeta(0.5, 0.5)
model_string <-"model {
  x ~ dbinom(theta, n)
  theta ~ dbeta(0.5, 0.5)
  x_pred ~ dbinom(theta, n)
}"

model <- jags.model(textConnection(model_string), data = list(x = x, n = n),
                    n.chains = 4, n.adapt=2000)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 1
##    Unobserved stochastic nodes: 2
##    Total graph size: 5
## 
## Initializing model
samples <- coda.samples(model, c("theta", "x_pred"), n.iter=5000)
summary(samples)
## 
## Iterations = 2001:7000
## Thinning interval = 1 
## Number of chains = 4 
## Sample size per chain = 5000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##           Mean      SD  Naive SE Time-series SE
## theta   0.5742 0.06366 0.0004502       0.000575
## x_pred 33.8830 5.32671 0.0376656       0.043467
## 
## 2. Quantiles for each variable:
## 
##           2.5%     25%     50%     75%   97.5%
## theta   0.4486  0.5307  0.5745  0.6177  0.6979
## x_pred 23.0000 30.0000 34.0000 38.0000 44.0000
# dbeta(2, 2)
model_string <-"model {
  x ~ dbinom(theta, n)
  theta ~ dbeta(2, 2)
  x_pred ~ dbinom(theta, n)
}"

model <- jags.model(textConnection(model_string), data = list(x = x, n = n),
                    n.chains = 4, n.adapt=2000)
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 1
##    Unobserved stochastic nodes: 2
##    Total graph size: 5
## 
## Initializing model
samples <- coda.samples(model, c("theta", "x_pred"), n.iter=5000)
summary(samples)
## 
## Iterations = 2001:7000
## Thinning interval = 1 
## Number of chains = 4 
## Sample size per chain = 5000 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##           Mean     SD  Naive SE Time-series SE
## theta   0.5726 0.0617 0.0004363      0.0005454
## x_pred 33.7707 5.2084 0.0368288      0.0422599
## 
## 2. Quantiles for each variable:
## 
##           2.5%     25%     50%     75%   97.5%
## theta   0.4492  0.5311  0.5735  0.6152  0.6909
## x_pred 23.0000 30.0000 34.0000 37.0000 44.0000
# Ataxia 
fit <- bayes.binom.test(c(sum(dataset$ataxia), (sum(dataset$number_of_cases) - sum(dataset$ataxia))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$ataxia), (sum(dataset$number_of_cases) - sum(dataset$ataxia)))
## number of successes = 36, number of trials = 93
## Estimated relative frequency of success:
##   0.39 
## 95% credible interval:
##   0.29 0.49 
## The relative frequency of success is more than 0.5 by a probability of 0.015 
## and less than 0.5 by a probability of 0.985
#summary(fit)
plot(fit)

# oculomotor_disturbances 
fit <- bayes.binom.test(c(sum(dataset$oculomotor_disturbances), (sum(dataset$number_of_cases) - sum(dataset$oculomotor_disturbances))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$oculomotor_disturbances), (sum(dataset$number_of_cases) -     sum(dataset$oculomotor_disturbances)))
## number of successes = 19, number of trials = 93
## Estimated relative frequency of success:
##   0.21 
## 95% credible interval:
##   0.13 0.29 
## The relative frequency of success is more than 0.5 by a probability of <0.001 
## and less than 0.5 by a probability of >0.999
#summary(fit)
plot(fit)

# action_postural_tremor
fit <- bayes.binom.test(c(sum(dataset$action_postural_tremor), (sum(dataset$number_of_cases) - sum(dataset$action_postural_tremor))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$action_postural_tremor), (sum(dataset$number_of_cases) -     sum(dataset$action_postural_tremor)))
## number of successes = 10, number of trials = 93
## Estimated relative frequency of success:
##   0.11 
## 95% credible interval:
##   0.055 0.18 
## The relative frequency of success is more than 0.5 by a probability of <0.001 
## and less than 0.5 by a probability of >0.999
#summary(fit)
plot(fit)

# parkinsonism
fit <- bayes.binom.test(c(sum(dataset$parkinsonism), (sum(dataset$number_of_cases) - sum(dataset$parkinsonism))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$parkinsonism), (sum(dataset$number_of_cases) -     sum(dataset$parkinsonism)))
## number of successes = 5, number of trials = 93
## Estimated relative frequency of success:
##   0.060 
## 95% credible interval:
##   0.019 0.11 
## The relative frequency of success is more than 0.5 by a probability of <0.001 
## and less than 0.5 by a probability of >0.999
#summary(fit)
plot(fit)

# catatonia
fit <- bayes.binom.test(c(sum(dataset$catatonia), (sum(dataset$number_of_cases) - sum(dataset$catatonia))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$catatonia), (sum(dataset$number_of_cases) - sum(dataset$catatonia)))
## number of successes = 2, number of trials = 93
## Estimated relative frequency of success:
##   0.028 
## 95% credible interval:
##   0.0032 0.066 
## The relative frequency of success is more than 0.5 by a probability of <0.001 
## and less than 0.5 by a probability of >0.999
#summary(fit)
plot(fit)

# chorea
fit <- bayes.binom.test(c(sum(dataset$chorea), (sum(dataset$number_of_cases) - sum(dataset$chorea))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$chorea), (sum(dataset$number_of_cases) - sum(dataset$chorea)))
## number of successes = 1, number of trials = 93
## Estimated relative frequency of success:
##   0.018 
## 95% credible interval:
##   0.00044 0.050 
## The relative frequency of success is more than 0.5 by a probability of <0.001 
## and less than 0.5 by a probability of >0.999
#summary(fit)
plot(fit)

# dystonia
fit <- bayes.binom.test(c(sum(dataset$dystonia), (sum(dataset$number_of_cases) - sum(dataset$dystonia))))
print(fit)
## 
##  Bayesian First Aid binomial test
## 
## data: c(sum(dataset$dystonia), (sum(dataset$number_of_cases) - sum(dataset$dystonia)))
## number of successes = 1, number of trials = 93
## Estimated relative frequency of success:
##   0.018 
## 95% credible interval:
##   0.00032 0.050 
## The relative frequency of success is more than 0.5 by a probability of <0.001 
## and less than 0.5 by a probability of >0.999
#summary(fit)
plot(fit)

# https://bookdown.org/ajkurz/Statistical_Rethinking_recoded/counting-and-classification.html
# https://bookdown.org/ajkurz/Statistical_Rethinking_recoded/small-worlds-and-large-worlds.html#from-counts-to-probability.

tibble(prob = seq(from = 0, to = 1, by = .01)) %>% 
    ggplot(aes(x = prob,
               y = dbinom(x = 59, size = 93, prob = prob))) +
    geom_line() +
    labs(x = "probability",
         y = "binomial likelihood") +
    theme(panel.grid = element_blank())

library(brms)
# Myoclonus (n=59)
fit.flat <-
    brm(data = list(w = 59), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(1, 1), class = Intercept), #Jeffrey
        iter = 4000, warmup = 1000,
        control = list(adapt_delta = .9),
        seed = 4)
## Compiling Stan program...
## Start sampling
## 
## SAMPLING FOR MODEL 'a641937b9a3642a732150451298fd74d' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 1: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 1: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 1: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 1: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 1: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 1: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 1: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 1: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 1: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 1: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.020138 seconds (Warm-up)
## Chain 1:                0.04982 seconds (Sampling)
## Chain 1:                0.069958 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'a641937b9a3642a732150451298fd74d' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.16817, but must be in the interval [0, 1]  (in 'model8e99a448d84_a641937b9a3642a732150451298fd74d' at line 22)
## 
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is 1.21464, but must be in the interval [0, 1]  (in 'model8e99a448d84_a641937b9a3642a732150451298fd74d' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 6e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 2: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 2: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 2: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 2: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 2: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 2: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 2: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 2: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 2: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 2: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.018856 seconds (Warm-up)
## Chain 2:                0.047937 seconds (Sampling)
## Chain 2:                0.066793 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'a641937b9a3642a732150451298fd74d' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.3474, but must be in the interval [0, 1]  (in 'model8e99a448d84_a641937b9a3642a732150451298fd74d' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is -0.920633, but must be in the interval [0, 1]  (in 'model8e99a448d84_a641937b9a3642a732150451298fd74d' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.24986, but must be in the interval [0, 1]  (in 'model8e99a448d84_a641937b9a3642a732150451298fd74d' at line 22)
## 
## Chain 3: 
## Chain 3: Gradient evaluation took 6e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 3: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 3: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 3: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 3: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 3: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 3: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 3: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 3: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 3: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 3: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.017942 seconds (Warm-up)
## Chain 3:                0.049081 seconds (Sampling)
## Chain 3:                0.067023 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'a641937b9a3642a732150451298fd74d' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 9e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 4: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 4: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 4: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 4: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 4: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 4: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 4: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 4: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 4: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 4: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.019742 seconds (Warm-up)
## Chain 4:                0.060727 seconds (Sampling)
## Chain 4:                0.080469 seconds (Total)
## Chain 4:
print(summary(fit.flat), digits = 4) # prop=0.6314
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 59) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup samples = 12000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.6314    0.0489   0.5335   0.7244 1.0012     4501     4774
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
plot(fit.flat)

posterior_samples(fit.flat) %>% 
  mutate(n = "n = 59") %>%
  ggplot(aes(x = b_Intercept)) +
  geom_density(fill = "skyblue") +
  labs(x = "proportion myoclonus") +
  xlim(0, 1) +
  theme(panel.grid = element_blank()) +
  facet_wrap(~n)

# https://stats.stackexchange.com/questions/70661/how-does-the-beta-prior-affect-the-posterior-under-a-binomial-likelihood
fit.beta <-
    brm(data = list(w = 59), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept), #Jeffrey
        iter = 4000, warmup = 1000,
        control = list(adapt_delta = .9),
        seed = 4)
## Compiling Stan program...
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.5e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 1: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 1: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 1: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 1: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 1: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 1: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 1: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 1: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 1: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 1: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.019736 seconds (Warm-up)
## Chain 1:                0.057342 seconds (Sampling)
## Chain 1:                0.077078 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.16817, but must be in the interval [0, 1]  (in 'model8e997931d68f_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is 1.21464, but must be in the interval [0, 1]  (in 'model8e997931d68f_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 7e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 2: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 2: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 2: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 2: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 2: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 2: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 2: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 2: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 2: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 2: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.018304 seconds (Warm-up)
## Chain 2:                0.046537 seconds (Sampling)
## Chain 2:                0.064841 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.3474, but must be in the interval [0, 1]  (in 'model8e997931d68f_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is -0.920633, but must be in the interval [0, 1]  (in 'model8e997931d68f_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.24986, but must be in the interval [0, 1]  (in 'model8e997931d68f_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: 
## Chain 3: Gradient evaluation took 6e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 3: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 3: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 3: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 3: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 3: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 3: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 3: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 3: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 3: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 3: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.018401 seconds (Warm-up)
## Chain 3:                0.046729 seconds (Sampling)
## Chain 3:                0.06513 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 4e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.04 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 4: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 4: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 4: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 4: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 4: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 4: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 4: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 4: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 4: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 4: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.018099 seconds (Warm-up)
## Chain 4:                0.057987 seconds (Sampling)
## Chain 4:                0.076086 seconds (Total)
## Chain 4:
print(summary(fit.beta), digits = 4) # prop = 0.6282
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 59) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup samples = 12000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.6282    0.0488   0.5296   0.7193 1.0001     4641     4894
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
fit.Jeffrey <-
    brm(data = list(w = 59), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(0.5, 0.5), class = Intercept), #Jeffrey
        iter = 4000, warmup = 1000,
        control = list(adapt_delta = .9),
        seed = 4)
## Compiling Stan program...
## Start sampling
## 
## SAMPLING FOR MODEL '566279bad7a4a079288994bb4fd4e297' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.9e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.19 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 1: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 1: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 1: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 1: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 1: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 1: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 1: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 1: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 1: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 1: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.019642 seconds (Warm-up)
## Chain 1:                0.055033 seconds (Sampling)
## Chain 1:                0.074675 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '566279bad7a4a079288994bb4fd4e297' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.16817, but must be in the interval [0, 1]  (in 'model8e99648025f2_566279bad7a4a079288994bb4fd4e297' at line 22)
## 
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is 1.21464, but must be in the interval [0, 1]  (in 'model8e99648025f2_566279bad7a4a079288994bb4fd4e297' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 1.5e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 2: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 2: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 2: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 2: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 2: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 2: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 2: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 2: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 2: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 2: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.02013 seconds (Warm-up)
## Chain 2:                0.060688 seconds (Sampling)
## Chain 2:                0.080818 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '566279bad7a4a079288994bb4fd4e297' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.3474, but must be in the interval [0, 1]  (in 'model8e99648025f2_566279bad7a4a079288994bb4fd4e297' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is -0.920633, but must be in the interval [0, 1]  (in 'model8e99648025f2_566279bad7a4a079288994bb4fd4e297' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.24986, but must be in the interval [0, 1]  (in 'model8e99648025f2_566279bad7a4a079288994bb4fd4e297' at line 22)
## 
## Chain 3: 
## Chain 3: Gradient evaluation took 7e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 3: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 3: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 3: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 3: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 3: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 3: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 3: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 3: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 3: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 3: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.017884 seconds (Warm-up)
## Chain 3:                0.057027 seconds (Sampling)
## Chain 3:                0.074911 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '566279bad7a4a079288994bb4fd4e297' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 4: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 4: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 4: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 4: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 4: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 4: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 4: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 4: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 4: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 4: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.019674 seconds (Warm-up)
## Chain 4:                0.047792 seconds (Sampling)
## Chain 4:                0.067466 seconds (Total)
## Chain 4:
print(summary(fit.Jeffrey), digits = 4) # prop = 0.6331
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 59) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup samples = 12000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.6331    0.0508   0.5326   0.7306 1.0004     3595     4127
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# https://jrnold.github.io/bayesian_notes/priors.html
# https://cran.r-project.org/web/packages/rstanarm/vignettes/priors.html
# https://www.nist.gov/system/files/documents/2017/05/09/bayesclassday2-1.pdf
# ataxia (n=36)
fit.beta <-
    brm(data = list(w = 36), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept), #Jeffrey
        iter = 4000, warmup = 1000,
        control = list(adapt_delta = .9),
        seed = 4)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 3.1e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.31 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 1: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 1: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 1: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 1: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 1: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 1: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 1: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 1: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 1: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 1: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.018741 seconds (Warm-up)
## Chain 1:                0.053537 seconds (Sampling)
## Chain 1:                0.072278 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.16817, but must be in the interval [0, 1]  (in 'model8e99661b1ca2_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is 1.21464, but must be in the interval [0, 1]  (in 'model8e99661b1ca2_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 5e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.05 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 2: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 2: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 2: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 2: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 2: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 2: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 2: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 2: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 2: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 2: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.018673 seconds (Warm-up)
## Chain 2:                0.057467 seconds (Sampling)
## Chain 2:                0.07614 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.3474, but must be in the interval [0, 1]  (in 'model8e99661b1ca2_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is -0.920633, but must be in the interval [0, 1]  (in 'model8e99661b1ca2_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.24986, but must be in the interval [0, 1]  (in 'model8e99661b1ca2_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: 
## Chain 3: Gradient evaluation took 4e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.04 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 3: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 3: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 3: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 3: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 3: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 3: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 3: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 3: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 3: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 3: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.017548 seconds (Warm-up)
## Chain 3:                0.047965 seconds (Sampling)
## Chain 3:                0.065513 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 9e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 4: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 4: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 4: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 4: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 4: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 4: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 4: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 4: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 4: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 4: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.018194 seconds (Warm-up)
## Chain 4:                0.050648 seconds (Sampling)
## Chain 4:                0.068842 seconds (Total)
## Chain 4:
print(summary(fit.beta), digits = 4)
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 36) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup samples = 12000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.3915    0.0496   0.2971   0.4908 1.0007     4108     4007
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# oculomotor_disturbances (n=19)
fit.beta <-
    brm(data = list(w = 19), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept), #Jeffrey
        iter = 4000, warmup = 1000,
        control = list(adapt_delta = .9),
        seed = 4)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.7e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.17 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 1: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 1: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 1: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 1: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 1: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 1: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 1: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 1: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 1: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 1: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.019617 seconds (Warm-up)
## Chain 1:                0.058584 seconds (Sampling)
## Chain 1:                0.078201 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.16817, but must be in the interval [0, 1]  (in 'model8e99127c70cf_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is 1.21464, but must be in the interval [0, 1]  (in 'model8e99127c70cf_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 1.2e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 2: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 2: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 2: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 2: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 2: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 2: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 2: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 2: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 2: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 2: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.020115 seconds (Warm-up)
## Chain 2:                0.059231 seconds (Sampling)
## Chain 2:                0.079346 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.3474, but must be in the interval [0, 1]  (in 'model8e99127c70cf_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is -0.920633, but must be in the interval [0, 1]  (in 'model8e99127c70cf_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.24986, but must be in the interval [0, 1]  (in 'model8e99127c70cf_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: 
## Chain 3: Gradient evaluation took 1.4e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.14 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 3: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 3: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 3: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 3: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 3: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 3: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 3: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 3: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 3: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 3: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.019356 seconds (Warm-up)
## Chain 3:                0.055438 seconds (Sampling)
## Chain 3:                0.074794 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 4: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 4: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 4: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 4: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 4: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 4: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 4: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 4: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 4: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 4: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.019171 seconds (Warm-up)
## Chain 4:                0.060229 seconds (Sampling)
## Chain 4:                0.0794 seconds (Total)
## Chain 4:
print(summary(fit.beta), digits = 4)
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 19) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup samples = 12000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.2157    0.0415   0.1413   0.3028 1.0009     3890     3689
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# action_postural_tremor (n=10)
fit.beta <-
    brm(data = list(w = 10), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept), #Jeffrey
        iter = 4000, warmup = 1000,
        control = list(adapt_delta = .9),
        seed = 4)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.1e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.21 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 1: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 1: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 1: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 1: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 1: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 1: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 1: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 1: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 1: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 1: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 1: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.021518 seconds (Warm-up)
## Chain 1:                0.048697 seconds (Sampling)
## Chain 1:                0.070215 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.16817, but must be in the interval [0, 1]  (in 'model8e995a534209_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is 1.21464, but must be in the interval [0, 1]  (in 'model8e995a534209_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 5e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.05 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 2: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 2: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 2: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 2: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 2: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 2: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 2: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 2: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 2: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 2: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 2: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.021296 seconds (Warm-up)
## Chain 2:                0.06294 seconds (Sampling)
## Chain 2:                0.084236 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.3474, but must be in the interval [0, 1]  (in 'model8e995a534209_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is -0.920633, but must be in the interval [0, 1]  (in 'model8e995a534209_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: Rejecting initial value:
## Chain 3:   Error evaluating the log probability at the initial value.
## Chain 3: Exception: binomial_lpmf: Probability parameter[1] is 1.24986, but must be in the interval [0, 1]  (in 'model8e995a534209_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 3: 
## Chain 3: Gradient evaluation took 4e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.04 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 3: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 3: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 3: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 3: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 3: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 3: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 3: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 3: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 3: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 3: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 3: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.020807 seconds (Warm-up)
## Chain 3:                0.058221 seconds (Sampling)
## Chain 3:                0.079028 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 1e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 4000 [  0%]  (Warmup)
## Chain 4: Iteration:  400 / 4000 [ 10%]  (Warmup)
## Chain 4: Iteration:  800 / 4000 [ 20%]  (Warmup)
## Chain 4: Iteration: 1001 / 4000 [ 25%]  (Sampling)
## Chain 4: Iteration: 1400 / 4000 [ 35%]  (Sampling)
## Chain 4: Iteration: 1800 / 4000 [ 45%]  (Sampling)
## Chain 4: Iteration: 2200 / 4000 [ 55%]  (Sampling)
## Chain 4: Iteration: 2600 / 4000 [ 65%]  (Sampling)
## Chain 4: Iteration: 3000 / 4000 [ 75%]  (Sampling)
## Chain 4: Iteration: 3400 / 4000 [ 85%]  (Sampling)
## Chain 4: Iteration: 3800 / 4000 [ 95%]  (Sampling)
## Chain 4: Iteration: 4000 / 4000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.022023 seconds (Warm-up)
## Chain 4:                0.059189 seconds (Sampling)
## Chain 4:                0.081212 seconds (Total)
## Chain 4:
## Warning: There were 2 divergent transitions after warmup. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.
## Warning: Examine the pairs() plot to diagnose sampling problems
print(summary(fit.beta), digits = 4)
## Warning: There were 2 divergent transitions after warmup. Increasing adapt_delta
## above 0.9 may help. See http://mc-stan.org/misc/warnings.html#divergent-
## transitions-after-warmup
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 10) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 4000; warmup = 1000; thin = 1;
##          total post-warmup samples = 12000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.1236    0.0338   0.0657   0.1963 1.0013     3576     3566
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# parkinsonism (n=5)
fit.beta <-
    brm(data = list(w = 5), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept),
        iter = 10000, warmup = 2000,
        control = list(adapt_delta = .9),
        seed = 41)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.4e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.24 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.043259 seconds (Warm-up)
## Chain 1:                0.138681 seconds (Sampling)
## Chain 1:                0.18194 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.47371, but must be in the interval [0, 1]  (in 'model8e995ed07b22_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 7e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.043396 seconds (Warm-up)
## Chain 2:                0.171546 seconds (Sampling)
## Chain 2:                0.214942 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 7e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.043036 seconds (Warm-up)
## Chain 3:                0.159969 seconds (Sampling)
## Chain 3:                0.203005 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is 1.68102, but must be in the interval [0, 1]  (in 'model8e995ed07b22_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is -0.98122, but must be in the interval [0, 1]  (in 'model8e995ed07b22_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is -0.345877, but must be in the interval [0, 1]  (in 'model8e995ed07b22_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: 
## Chain 4: Gradient evaluation took 8e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.043233 seconds (Warm-up)
## Chain 4:                0.175159 seconds (Sampling)
## Chain 4:                0.218392 seconds (Total)
## Chain 4:
## Warning: There were 59 divergent transitions after warmup. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.

## Warning: Examine the pairs() plot to diagnose sampling problems
print(summary(fit.beta), digits = 4)
## Warning: There were 59 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See http://mc-stan.org/misc/
## warnings.html#divergent-transitions-after-warmup
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 5) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 10000; warmup = 2000; thin = 1;
##          total post-warmup samples = 32000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.0720    0.0263   0.0295   0.1323 1.0004     7466     8053
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# catatonia (n=2)
fit.beta <-
    brm(data = list(w = 2), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept),
        iter = 10000, warmup = 2000,
        control = list(adapt_delta = .9),
        seed = 41)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.9e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.19 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.054651 seconds (Warm-up)
## Chain 1:                0.231089 seconds (Sampling)
## Chain 1:                0.28574 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.47371, but must be in the interval [0, 1]  (in 'model8e9924d9ad6d_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 9e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.053055 seconds (Warm-up)
## Chain 2:                0.181814 seconds (Sampling)
## Chain 2:                0.234869 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 2e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.2 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.054103 seconds (Warm-up)
## Chain 3:                0.1864 seconds (Sampling)
## Chain 3:                0.240503 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is 1.68102, but must be in the interval [0, 1]  (in 'model8e9924d9ad6d_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is -0.98122, but must be in the interval [0, 1]  (in 'model8e9924d9ad6d_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is -0.345877, but must be in the interval [0, 1]  (in 'model8e9924d9ad6d_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: 
## Chain 4: Gradient evaluation took 9e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.056132 seconds (Warm-up)
## Chain 4:                0.245995 seconds (Sampling)
## Chain 4:                0.302127 seconds (Total)
## Chain 4:
## Warning: There were 542 divergent transitions after warmup. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.

## Warning: Examine the pairs() plot to diagnose sampling problems
print(summary(fit.beta), digits = 4)
## Warning: There were 542 divergent transitions after warmup. Increasing
## adapt_delta above 0.9 may help. See http://mc-stan.org/misc/
## warnings.html#divergent-transitions-after-warmup
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 2) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 10000; warmup = 2000; thin = 1;
##          total post-warmup samples = 32000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.0412    0.0200   0.0115   0.0884 1.0005     7817     8162
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
# chorea (n=1)
fit.beta <-
    brm(data = list(w = 1), 
        family = binomial(link = "identity"),
        w | trials(93) ~ 1,
        prior(beta(2, 2), class = Intercept),
        iter = 10000, warmup = 2000,
        control = list(adapt_delta = .9),
        seed = 41)
## Compiling Stan program...
## recompiling to avoid crashing R session
## Start sampling
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 1: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 1: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 1: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 1: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.064355 seconds (Warm-up)
## Chain 1:                0.207272 seconds (Sampling)
## Chain 1:                0.271627 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Error evaluating the log probability at the initial value.
## Chain 2: Exception: binomial_lpmf: Probability parameter[1] is -1.47371, but must be in the interval [0, 1]  (in 'model8e99714ea9e6_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 2: 
## Chain 2: Gradient evaluation took 1e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 2: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 2: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 2: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 2: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.062339 seconds (Warm-up)
## Chain 2:                0.210359 seconds (Sampling)
## Chain 2:                0.272698 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 9e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 3: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 3: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 3: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 3: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.060544 seconds (Warm-up)
## Chain 3:                0.224367 seconds (Sampling)
## Chain 3:                0.284911 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL '192e1e70f7a686e058a5db7b2f6bed57' NOW (CHAIN 4).
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is 1.68102, but must be in the interval [0, 1]  (in 'model8e99714ea9e6_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is -0.98122, but must be in the interval [0, 1]  (in 'model8e99714ea9e6_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: Rejecting initial value:
## Chain 4:   Error evaluating the log probability at the initial value.
## Chain 4: Exception: binomial_lpmf: Probability parameter[1] is -0.345877, but must be in the interval [0, 1]  (in 'model8e99714ea9e6_192e1e70f7a686e058a5db7b2f6bed57' at line 22)
## 
## Chain 4: 
## Chain 4: Gradient evaluation took 6e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
## Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
## Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
## Chain 4: Iteration: 2001 / 10000 [ 20%]  (Sampling)
## Chain 4: Iteration: 3000 / 10000 [ 30%]  (Sampling)
## Chain 4: Iteration: 4000 / 10000 [ 40%]  (Sampling)
## Chain 4: Iteration: 5000 / 10000 [ 50%]  (Sampling)
## Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
## Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
## Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
## Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
## Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.06441 seconds (Warm-up)
## Chain 4:                0.242369 seconds (Sampling)
## Chain 4:                0.306779 seconds (Total)
## Chain 4:
## Warning: There were 1071 divergent transitions after warmup. See
## http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.

## Warning: Examine the pairs() plot to diagnose sampling problems
print(summary(fit.beta), digits = 4)
## Warning: There were 1071 divergent transitions after warmup.
## Increasing adapt_delta above 0.9 may help. See http://mc-stan.org/misc/
## warnings.html#divergent-transitions-after-warmup
##  Family: binomial 
##   Links: mu = identity 
## Formula: w | trials(93) ~ 1 
##    Data: list(w = 1) (Number of observations: 1) 
## Samples: 4 chains, each with iter = 10000; warmup = 2000; thin = 1;
##          total post-warmup samples = 32000
## 
## Population-Level Effects: 
##           Estimate Est.Error l-95% CI u-95% CI   Rhat Bulk_ESS Tail_ESS
## Intercept   0.0309    0.0176   0.0064   0.0729 1.0016     5107     5427
## 
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).