library(factoextra) ; library(dplyr) ; library(tidyverse) ;library(BayesFactor)
## Warning: package 'factoextra' was built under R version 4.3.2
## Loading required package: ggplot2
## Welcome! Want to learn more? See two factoextra-related books at https://goo.gl/ve3WBa
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
## Warning: package 'stringr' was built under R version 4.3.2
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ forcats 1.0.0 ✔ stringr 1.5.1
## ✔ lubridate 1.9.3 ✔ tibble 3.2.1
## ✔ purrr 1.0.2 ✔ tidyr 1.3.0
## ✔ readr 2.1.4
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
## Loading required package: coda
##
## Loading required package: Matrix
##
##
## Attaching package: 'Matrix'
##
##
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
##
##
## ************
## Welcome to BayesFactor 0.9.12-4.5. If you have questions, please contact Richard Morey (richarddmorey@gmail.com).
##
## Type BFManual() to open the manual.
## ************
library(rstanarm) ; library(BAS) ; library(broom) ;library(GGally); library(table1)
## Loading required package: Rcpp
## This is rstanarm version 2.26.1
## - See https://mc-stan.org/rstanarm/articles/priors for changes to default priors!
## - Default priors may change, so it's safest to specify priors, even if equivalent to the defaults.
## - For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores())
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
##
## Attaching package: 'table1'
##
## The following objects are masked from 'package:base':
##
## units, units<-
dongmau = read.csv("E:\\OneDrive - UMP\\R - lenh 2016\\dongmau.csv", header=T)
attach(dongmau)
names(dongmau)
## [1] "ID" "MSBN" "HOTENBN" "gioi" "nam" "tuoi"
## [7] "chandoan" "tcks" "tckpt" "tck1st" "tck2nd" "tckDER"
library(table1)
table1 (~ tcks + tckpt + tck1st + tck2nd + tckDER | chandoan, data = dongmau,
transpose = F)
| binhthuong (N=32) |
shocknhiemtrung (N=33) |
xogan (N=28) |
Overall (N=93) |
|
|---|---|---|---|---|
| tcks | ||||
| Mean (SD) | 31.1 (2.99) | 32.1 (2.84) | 31.6 (3.00) | 31.6 (2.94) |
| Median [Min, Max] | 31.2 [26.0, 36.7] | 32.3 [26.4, 36.7] | 31.4 [27.4, 38.3] | 31.7 [26.0, 38.3] |
| tckpt | ||||
| Mean (SD) | 1.02 (0.0973) | 1.06 (0.0963) | 1.04 (0.101) | 1.04 (0.0985) |
| Median [Min, Max] | 1.03 [0.850, 1.20] | 1.06 [0.870, 1.22] | 1.03 [0.890, 1.25] | 1.05 [0.850, 1.25] |
| tck1st | ||||
| Mean (SD) | 283 (66.0) | 394 (160) | 281 (133) | 322 (136) |
| Median [Min, Max] | 270 [188, 545] | 362 [127, 850] | 253 [112, 703] | 295 [112, 850] |
| tck2nd | ||||
| Mean (SD) | 776 (144) | 1070 (427) | 822 (314) | 895 (342) |
| Median [Min, Max] | 751 [487, 1030] | 1050 [294, 2310] | 762 [359, 1630] | 854 [294, 2310] |
| tckDER | ||||
| Mean (SD) | 349 (74.0) | 334 (146) | 299 (137) | 329 (123) |
| Median [Min, Max] | 344 [210, 592] | 322 [89.5, 809] | 291 [118, 695] | 322 [89.5, 809] |
m1= aov(tcks ~ chandoan, data=dongmau)
summary(m1)
## Df Sum Sq Mean Sq F value Pr(>F)
## chandoan 2 17.3 8.648 1.001 0.372
## Residuals 90 778.0 8.644
TukeyHSD(m1)
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = tcks ~ chandoan, data = dongmau)
##
## $chandoan
## diff lwr upr p adj
## shocknhiemtrung-binhthuong 1.0315341 -0.7067556 2.769824 0.3379399
## xogan-binhthuong 0.5013393 -1.3117495 2.314428 0.7877749
## xogan-shocknhiemtrung -0.5301948 -2.3304181 1.270028 0.7630055
m1 = stan_glm(tckpt ~ chandoan, data=dongmau) summary(m1) prior_summary(m1) post.r2 = bayes_R2(m1) summary(post.r2)
# Đa biến cho outcome định lượng
```r
m.da = stan_glm(tck1st ~ chandoan, data = dongmau,
prior = default_prior_coef(family),
prior_intercept = default_prior_intercept(family),
prior_PD = T,
adapt_delta = NULL)
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.001907 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 19.07 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.113 seconds (Warm-up)
## Chain 1: 0.03 seconds (Sampling)
## Chain 1: 0.143 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.119 seconds (Warm-up)
## Chain 2: 0.03 seconds (Sampling)
## Chain 2: 0.149 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 1e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.101 seconds (Warm-up)
## Chain 3: 0.028 seconds (Sampling)
## Chain 3: 0.129 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 1.5e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.15 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.131 seconds (Warm-up)
## Chain 4: 0.028 seconds (Sampling)
## Chain 4: 0.159 seconds (Total)
## Chain 4:
summary(m.da)
##
## Model Info:
## function: stan_glm
## family: gaussian [identity]
## formula: tck1st ~ chandoan
## algorithm: sampling
## sample: 4000 (posterior sample size)
## priors: see help('prior_summary')
## observations: 93
## predictors: 3
##
## Estimates:
## mean sd 10% 50% 90%
## (Intercept) 327.1 495.1 -305.2 326.0 954.4
## chandoanshocknhiemtrung 3.5 711.1 -914.7 3.4 928.6
## chandoanxogan -10.9 752.5 -972.5 -4.9 940.9
## sigma 136.9 132.1 15.3 99.0 308.6
##
## MCMC diagnostics
## mcse Rhat n_eff
## (Intercept) 7.7 1.0 4151
## chandoanshocknhiemtrung 11.1 1.0 4136
## chandoanxogan 11.3 1.0 4453
## sigma 2.0 1.0 4447
## log-posterior 0.0 1.0 1427
##
## For each parameter, mcse is Monte Carlo standard error, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence Rhat=1).
prior_summary(m.da)
## Priors for model 'm.da'
## ------
## Intercept (after predictors centered)
## Specified prior:
## ~ normal(location = 322, scale = 2.5)
## Adjusted prior:
## ~ normal(location = 322, scale = 340)
##
## Coefficients
## Specified prior:
## ~ normal(location = [0,0], scale = [2.5,2.5])
## Adjusted prior:
## ~ normal(location = [0,0], scale = [707.47,737.91])
##
## Auxiliary (sigma)
## Specified prior:
## ~ exponential(rate = 1)
## Adjusted prior:
## ~ exponential(rate = 0.0073)
## ------
## See help('prior_summary.stanreg') for more details
post.r2.m.da = bayes_R2(m.da)
summary(post.r2.m.da)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0002059 0.7550779 0.9350035 0.8240120 0.9892565 1.0000000
library(nnet); require(brms)
## Loading required package: brms
## Loading 'brms' package (version 2.20.4). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
##
## Attaching package: 'brms'
## The following objects are masked from 'package:rstanarm':
##
## dirichlet, exponential, get_y, lasso, ngrps
## The following object is masked from 'package:stats':
##
## ar
m.2 = multinom(tck1st ~ chandoan, data=dongmau)
## # weights: 372 (276 variable)
## initial value 421.531753
## iter 10 value 323.622807
## iter 20 value 319.613088
## iter 30 value 319.590047
## final value 319.590028
## converged
summary(m.2)
## Warning in sqrt(diag(vc)): NaNs produced
## Call:
## multinom(formula = tck1st ~ chandoan, data = dongmau)
##
## Coefficients:
## (Intercept) chandoanshocknhiemtrung chandoanxogan
## 115.573 -48.65432 -30.79723 48.65439
## 126.559 -22.01389 72.46810 -27.23003
## 141.879 -22.01388 72.46811 -27.23003
## 146.397 -48.65429 -30.79723 48.65442
## 156.641 -48.65428 -30.79723 48.65443
## 166.004 -48.65435 -30.79723 48.65436
## 170.105 -48.65435 -30.79723 48.65436
## 174.416 -22.01388 72.46810 -27.23003
## 188.307 33.11988 -46.72115 -49.38600
## 195.077 -22.01388 72.46810 -27.23003
## 198.757 33.11988 -46.72115 -49.38600
## 212.563 -48.65434 -30.79723 48.65437
## 214.184 -48.65434 -30.79723 48.65437
## 216.285 -48.65434 -30.79723 48.65437
## 216.934 -48.65434 -30.79723 48.65437
## 222.462 -48.65435 -30.79723 48.65436
## 224.259 -48.65427 -30.79723 48.65444
## 224.547 33.11988 -46.72115 -49.38600
## 226.97 33.11988 -46.72115 -49.38600
## 230.662 33.11988 -46.72115 -49.38600
## 232.754 33.11988 -46.72115 -49.38600
## 236.494 33.11988 -46.72115 -49.38600
## 238.336 33.11988 -46.72115 -49.38600
## 242.188 -22.01388 72.46810 -27.23003
## 243.385 33.11988 -46.72115 -49.38600
## 248.273 -48.65434 -30.79723 48.65437
## 248.286 33.11988 -46.72115 -49.38600
## 248.707 -48.65435 -30.79723 48.65436
## 256.367 33.11988 -46.72115 -49.38600
## 257.016 -48.65433 -30.79723 48.65438
## 257.281 33.11988 -46.72115 -49.38600
## 257.49 33.11988 -46.72115 -49.38600
## 258.079 33.11988 -46.72115 -49.38600
## 260.441 33.11988 -46.72115 -49.38600
## 261.813 -48.65434 -30.79723 48.65437
## 263.79 -48.65434 -30.79723 48.65437
## 265.239 33.11988 -46.72115 -49.38600
## 275.28 33.11988 -46.72115 -49.38600
## 279.88 -48.65439 -30.79723 48.65432
## 281.012 -22.01387 72.46811 -27.23003
## 282.262 33.11988 -46.72115 -49.38600
## 283.358 33.11988 -46.72115 -49.38600
## 291.704 33.11988 -46.72115 -49.38600
## 292.727 33.11988 -46.72115 -49.38600
## 292.751 -48.65440 -30.79723 48.65431
## 295.139 33.11988 -46.72115 -49.38600
## 296.141 33.11988 -46.72115 -49.38600
## 298.364 -48.65439 -30.79723 48.65432
## 299.059 -22.01388 72.46810 -27.23003
## 299.362 33.11988 -46.72115 -49.38600
## 299.876 33.11988 -46.72115 -49.38600
## 307.204 33.11988 -46.72115 -49.38600
## 307.857 -22.01387 72.46811 -27.23003
## 313.172 -22.01388 72.46810 -27.23003
## 317.241 -22.01387 72.46812 -27.23003
## 318.942 -48.65439 -30.79723 48.65432
## 329.934 -48.65442 -30.79723 48.65429
## 331.483 -22.01388 72.46810 -27.23003
## 332.894 -22.01387 72.46812 -27.23003
## 333.085 -22.01389 72.46810 -27.23003
## 333.094 -22.01388 72.46810 -27.23003
## 337.244 -48.65442 -30.79723 48.65429
## 337.48 33.11988 -46.72115 -49.38600
## 337.741 33.11988 -46.72115 -49.38600
## 338.479 -22.01389 72.46809 -27.23003
## 348.561 -22.01385 72.46813 -27.23003
## 357.583 33.11988 -46.72115 -49.38600
## 361.822 -22.01388 72.46810 -27.23003
## 368.229 33.11988 -46.72115 -49.38600
## 370.558 33.11988 -46.72115 -49.38600
## 382.452 -22.01389 72.46809 -27.23003
## 401.949 -22.01386 72.46812 -27.23003
## 406.774 -48.65443 -30.79723 48.65428
## 407.087 -22.01388 72.46811 -27.23003
## 412.589 -48.65443 -30.79723 48.65428
## 416.469 -22.01389 72.46809 -27.23003
## 423.73 -22.01390 72.46809 -27.23003
## 425.193 -22.01393 72.46806 -27.23003
## 453.294 -22.01390 72.46809 -27.23003
## 467.159 -22.01391 72.46807 -27.23003
## 483.342 -22.01390 72.46809 -27.23003
## 506.116 -22.01391 72.46807 -27.23003
## 511.718 -48.65435 -30.79723 48.65436
## 518.452 -22.01393 72.46806 -27.23003
## 529.056 -22.01392 72.46807 -27.23003
## 535.7 -48.65437 -30.79723 48.65434
## 544.843 33.11988 -46.72115 -49.38600
## 565.907 -22.01392 72.46807 -27.23003
## 594.786 -22.01393 72.46806 -27.23003
## 702.818 -48.65435 -30.79723 48.65436
## 808.405 -22.01391 72.46808 -27.23003
## 849.882 -22.01393 72.46806 -27.23003
##
## Std. Errors:
## (Intercept) chandoanshocknhiemtrung chandoanxogan
## 115.573 0.7070930 NaN 7.070930e-01
## 126.559 0.4923642 4.923642e-01 NaN
## 141.879 0.4923605 4.923605e-01 7.652773e-06
## 146.397 0.7070841 1.269046e-05 7.070841e-01
## 156.641 0.7070791 NaN 7.070791e-01
## 166.004 0.7071037 NaN 7.071037e-01
## 170.105 0.7071038 1.252201e-05 7.071038e-01
## 174.416 0.4923621 4.923621e-01 5.837152e-06
## 188.307 300.9819240 1.172057e-05 3.365058e+03
## 195.077 0.4923617 4.923617e-01 NaN
## 198.757 300.9819240 NaN 3.365058e+03
## 212.563 0.7071026 3.818811e-06 7.071026e-01
## 214.184 0.7071004 NaN 7.071004e-01
## 216.285 0.7071001 8.223406e-06 7.071001e-01
## 216.934 0.7071010 2.650556e-06 7.071010e-01
## 222.462 0.7071037 NaN 7.071037e-01
## 224.259 0.7070761 NaN 7.070761e-01
## 224.547 300.9819240 7.546290e-06 3.365058e+03
## 226.97 300.9819240 NaN 3.365058e+03
## 230.662 300.9819240 2.656620e-05 3.365058e+03
## 232.754 300.9819240 5.073577e-06 3.365058e+03
## 236.494 300.9819240 2.789580e-06 3.365058e+03
## 238.336 300.9819240 3.117514e-05 3.365058e+03
## 242.188 0.4923611 4.923611e-01 2.631290e-06
## 243.385 300.9819240 NaN 3.365058e+03
## 248.273 0.7071010 9.788949e-06 7.071010e-01
## 248.286 300.9819240 1.154506e-05 3.365058e+03
## 248.707 0.7071035 4.341190e-06 7.071035e-01
## 256.367 300.9819240 9.440645e-06 3.365058e+03
## 257.016 0.7070994 5.719719e-06 7.070994e-01
## 257.281 300.9819240 NaN 3.365058e+03
## 257.49 300.9819240 5.418528e-06 3.365058e+03
## 258.079 300.9819240 NaN 3.365058e+03
## 260.441 300.9819240 1.070475e-06 3.365058e+03
## 261.813 0.7071012 4.678173e-06 7.071012e-01
## 263.79 0.7070999 NaN 7.070999e-01
## 265.239 300.9819240 4.428544e-06 3.365058e+03
## 275.28 300.9819240 NaN 3.365058e+03
## 279.88 0.7071207 NaN 7.071207e-01
## 281.012 0.4923571 4.923571e-01 NaN
## 282.262 300.9819240 1.595658e-06 3.365058e+03
## 283.358 300.9819240 NaN 3.365058e+03
## 291.704 300.9819240 NaN 3.365058e+03
## 292.727 300.9819240 1.155678e-12 3.365058e+03
## 292.751 0.7071243 NaN 7.071243e-01
## 295.139 300.9819240 1.914460e-12 3.365058e+03
## 296.141 300.9819240 NaN 3.365058e+03
## 298.364 0.7071175 1.100012e-48 7.071175e-01
## 299.059 0.4923615 4.923615e-01 1.784572e-13
## 299.362 300.9819240 2.634419e-19 3.365058e+03
## 299.876 300.9819240 2.682592e-19 3.365058e+03
## 307.204 300.9819240 2.670185e-19 3.365058e+03
## 307.857 0.4923573 4.923573e-01 1.285242e-13
## 313.172 0.4923615 4.923615e-01 1.999354e-14
## 317.241 0.4923550 4.923550e-01 NaN
## 318.942 0.7071208 2.135141e-48 7.071208e-01
## 329.934 0.7071304 2.357249e-48 7.071304e-01
## 331.483 0.4923618 4.923618e-01 NaN
## 332.894 0.4923528 4.923528e-01 2.669332e-13
## 333.085 0.4923646 4.923646e-01 2.974348e-13
## 333.094 0.4923612 4.923612e-01 1.609684e-13
## 337.244 0.7071289 2.255402e-48 7.071289e-01
## 337.48 300.9819240 2.633732e-19 3.365058e+03
## 337.741 300.9819240 2.697459e-19 3.365058e+03
## 338.479 0.4923663 4.923663e-01 NaN
## 348.561 0.4923477 4.923477e-01 1.716611e-13
## 357.583 300.9819240 2.671205e-19 3.365058e+03
## 361.822 0.4923613 4.923613e-01 4.896137e-14
## 368.229 300.9819240 2.677762e-19 3.365058e+03
## 370.558 300.9819240 2.718654e-19 3.365058e+03
## 382.452 0.4923663 4.923663e-01 2.218428e-13
## 401.949 0.4923512 4.923512e-01 1.982719e-13
## 406.774 0.7071317 1.804019e-48 7.071317e-01
## 407.087 0.4923599 4.923599e-01 3.073899e-13
## 412.589 0.7071347 1.594123e-48 7.071347e-01
## 416.469 0.4923668 4.923668e-01 1.894405e-13
## 423.73 0.4923677 4.923677e-01 NaN
## 425.193 0.4923824 4.923824e-01 NaN
## 453.294 0.4923678 4.923678e-01 1.959853e-13
## 467.159 0.4923753 4.923753e-01 NaN
## 483.342 0.4923678 4.923678e-01 1.721581e-13
## 506.116 0.4923753 4.923753e-01 1.733862e-13
## 511.718 0.7071050 1.688959e-48 7.071050e-01
## 518.452 0.4923825 4.923825e-01 1.472799e-13
## 529.056 0.4923794 4.923794e-01 2.302823e-13
## 535.7 0.7071132 1.931807e-48 7.071132e-01
## 544.843 300.9819240 2.652998e-19 3.365058e+03
## 565.907 0.4923769 4.923769e-01 2.373366e-13
## 594.786 0.4923824 4.923824e-01 1.452461e-13
## 702.818 0.7071041 1.990696e-48 7.071041e-01
## 808.405 0.4923745 4.923745e-01 NaN
## 849.882 0.4923829 4.923829e-01 3.038411e-13
##
## Residual Deviance: 639.1801
## AIC: 1191.18
library(bayesplot) ;library(ggplot2)
## This is bayesplot version 1.10.0
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
## * Does _not_ affect other ggplot2 plots
## * See ?bayesplot_theme_set for details on theme setting
##
## Attaching package: 'bayesplot'
## The following object is masked from 'package:brms':
##
## rhat
p = ggplot(dongmau, aes(x = tcks, chandoan=chandoan))
p + geom_density(aes(fill = chandoan), alpha=0.5) + theme_light(base_size = 12)
p = ggplot(dongmau, aes(x = tckpt, chandoan=chandoan))
p + geom_density(aes(fill = chandoan), alpha=0.5) + theme_light(base_size = 12)
p = ggplot(dongmau, aes(x = tck1st, chandoan=chandoan))
p + geom_density(aes(fill = chandoan), alpha=0.5) + theme_light(base_size = 12)
p = ggplot(dongmau, aes(x = tck2nd, chandoan=chandoan))
p + geom_density(aes(fill = chandoan), alpha=0.5) + theme_light(base_size = 12)
p = ggplot(dongmau, aes(x = tckDER, chandoan=chandoan))
p + geom_density(aes(fill = chandoan), alpha=0.5) + theme_light(base_size = 12)
# Gọi thư viện
library(rstan) ; library(shinystan) ; library(rstanarm) ; library(brms)
## Warning: package 'rstan' was built under R version 4.3.2
## Loading required package: StanHeaders
##
## rstan version 2.32.3 (Stan version 2.26.1)
## For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores()).
## To avoid recompilation of unchanged Stan programs, we recommend calling
## rstan_options(auto_write = TRUE)
## For within-chain threading using `reduce_sum()` or `map_rect()` Stan functions,
## change `threads_per_chain` option:
## rstan_options(threads_per_chain = 1)
## Do not specify '-march=native' in 'LOCAL_CPPFLAGS' or a Makevars file
##
## Attaching package: 'rstan'
## The following object is masked from 'package:coda':
##
## traceplot
## The following object is masked from 'package:tidyr':
##
## extract
## Loading required package: shiny
## Warning: package 'shiny' was built under R version 4.3.2
##
## This is shinystan version 2.6.0
set.seed(123)
prior0 = get_prior(tcks ~ chandoan, family = gaussian, data=dongmau)
bf.1 = brm(data=dongmau, tcks ~ chandoan, prior0,
family = gaussian, chains = 2, iter = 100)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 2.5e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.25 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: WARNING: There aren't enough warmup iterations to fit the
## Chain 1: three stages of adaptation as currently configured.
## Chain 1: Reducing each adaptation stage to 15%/75%/10% of
## Chain 1: the given number of warmup iterations:
## Chain 1: init_buffer = 7
## Chain 1: adapt_window = 38
## Chain 1: term_buffer = 5
## Chain 1:
## Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 1: Iteration: 100 / 100 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.004 seconds (Warm-up)
## Chain 1: 0.003 seconds (Sampling)
## Chain 1: 0.007 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1.3e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: WARNING: There aren't enough warmup iterations to fit the
## Chain 2: three stages of adaptation as currently configured.
## Chain 2: Reducing each adaptation stage to 15%/75%/10% of
## Chain 2: the given number of warmup iterations:
## Chain 2: init_buffer = 7
## Chain 2: adapt_window = 38
## Chain 2: term_buffer = 5
## Chain 2:
## Chain 2: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 2: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 2: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 2: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 2: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 2: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 2: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 2: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 2: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 2: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 2: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 2: Iteration: 100 / 100 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.006 seconds (Warm-up)
## Chain 2: 0.003 seconds (Sampling)
## Chain 2: 0.009 seconds (Total)
## Chain 2:
## Warning: The largest R-hat is 1.07, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
bf.1$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=100; warmup=50; thin=1;
## post-warmup draws per chain=50, total post-warmup draws=100.
##
## mean se_mean sd 2.5% 25% 50% 75%
## b_Intercept 31.06 0.06 0.48 30.18 30.76 31.00 31.36
## b_chandoanshocknhiemtrung 0.96 0.09 0.63 -0.23 0.55 0.96 1.29
## b_chandoanxogan 0.48 0.11 0.71 -0.77 0.04 0.40 1.02
## sigma 2.95 0.02 0.18 2.65 2.84 2.93 3.06
## lprior -4.18 0.01 0.05 -4.30 -4.21 -4.17 -4.14
## lp__ -235.32 0.17 1.10 -238.19 -235.71 -235.16 -234.48
## 97.5% n_eff Rhat
## b_Intercept 31.97 62 0.99
## b_chandoanshocknhiemtrung 2.17 45 1.00
## b_chandoanxogan 2.09 46 1.02
## sigma 3.32 57 1.01
## lprior -4.09 50 1.02
## lp__ -234.00 44 1.00
##
## Samples were drawn using NUTS(diag_e) at Tue Nov 21 09:20:13 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.1)
prior1 = get_prior(tcks ~ chandoan-1, family = gaussian, data=dongmau)
bf.2 = brm(data = dongmau, tcks ~ chandoan-1, prior1,
family = gaussian, chains = 2, iter = 100)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 2.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: WARNING: There aren't enough warmup iterations to fit the
## Chain 1: three stages of adaptation as currently configured.
## Chain 1: Reducing each adaptation stage to 15%/75%/10% of
## Chain 1: the given number of warmup iterations:
## Chain 1: init_buffer = 7
## Chain 1: adapt_window = 38
## Chain 1: term_buffer = 5
## Chain 1:
## Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 1: Iteration: 100 / 100 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.005 seconds (Warm-up)
## Chain 1: 0.002 seconds (Sampling)
## Chain 1: 0.007 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 6e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: WARNING: There aren't enough warmup iterations to fit the
## Chain 2: three stages of adaptation as currently configured.
## Chain 2: Reducing each adaptation stage to 15%/75%/10% of
## Chain 2: the given number of warmup iterations:
## Chain 2: init_buffer = 7
## Chain 2: adapt_window = 38
## Chain 2: term_buffer = 5
## Chain 2:
## Chain 2: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 2: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 2: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 2: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 2: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 2: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 2: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 2: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 2: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 2: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 2: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 2: Iteration: 100 / 100 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.003 seconds (Warm-up)
## Chain 2: 0.003 seconds (Sampling)
## Chain 2: 0.006 seconds (Total)
## Chain 2:
## Warning: The largest R-hat is 1.06, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
bf.2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=100; warmup=50; thin=1;
## post-warmup draws per chain=50, total post-warmup draws=100.
##
## mean se_mean sd 2.5% 25% 50% 75%
## b_chandoanbinhthuong 31.07 0.07 0.57 29.95 30.71 31.09 31.50
## b_chandoanshocknhiemtrung 32.06 0.05 0.49 31.11 31.84 32.03 32.31
## b_chandoanxogan 31.56 0.07 0.59 30.39 31.26 31.51 31.83
## sigma 2.95 0.02 0.18 2.58 2.83 2.92 3.03
## lprior -1.97 0.00 0.05 -2.10 -2.00 -1.97 -1.94
## lp__ -233.55 0.24 1.44 -236.48 -234.45 -233.18 -232.34
## 97.5% n_eff Rhat
## b_chandoanbinhthuong 32.12 73 0.99
## b_chandoanshocknhiemtrung 33.17 82 1.00
## b_chandoanxogan 32.75 64 1.01
## sigma 3.38 144 0.98
## lprior -1.87 143 0.98
## lp__ -231.74 36 1.04
##
## Samples were drawn using NUTS(diag_e) at Tue Nov 21 09:21:02 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.2)
hypothesis(bf.2, "chandoanxogan > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanxogan)-(... > 0 0.49 0.76 -0.57 1.67 2.45
## Post.Prob Star
## 1 0.71
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "chandoanshocknhiemtrung > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanshocknhi... > 0 0.99 0.78 -0.29 2.22 10.11
## Post.Prob Star
## 1 0.91
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(bf.2, ignore_prior = T, theme = ggplot2::theme())
marginal_effects(bf.2, probs=c(0.05,0.95), conditions=chandoan)
## Warning: Method 'marginal_effects' is deprecated. Please use
## 'conditional_effects' instead.
## Warning: Argument 'probs' is deprecated. Please use 'prob' instead.
## Warning: The following variables in 'conditions' are not part of the model:
## 'conditions'
#launch_shinystan(bf.2, rstudio = getOption(“shinystan.rstudio”))
prior1 = get_prior(tckpt ~ chandoan-1, family = gaussian, data=dongmau)
bf.2 = brm(data = dongmau, tckpt ~ chandoan-1, prior1,
family = gaussian, chains = 2, iter = 100)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 2.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: WARNING: There aren't enough warmup iterations to fit the
## Chain 1: three stages of adaptation as currently configured.
## Chain 1: Reducing each adaptation stage to 15%/75%/10% of
## Chain 1: the given number of warmup iterations:
## Chain 1: init_buffer = 7
## Chain 1: adapt_window = 38
## Chain 1: term_buffer = 5
## Chain 1:
## Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 1: Iteration: 100 / 100 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.002 seconds (Warm-up)
## Chain 1: 0.002 seconds (Sampling)
## Chain 1: 0.004 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 6e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: WARNING: There aren't enough warmup iterations to fit the
## Chain 2: three stages of adaptation as currently configured.
## Chain 2: Reducing each adaptation stage to 15%/75%/10% of
## Chain 2: the given number of warmup iterations:
## Chain 2: init_buffer = 7
## Chain 2: adapt_window = 38
## Chain 2: term_buffer = 5
## Chain 2:
## Chain 2: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 2: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 2: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 2: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 2: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 2: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 2: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 2: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 2: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 2: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 2: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 2: Iteration: 100 / 100 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.001 seconds (Warm-up)
## Chain 2: 0.001 seconds (Sampling)
## Chain 2: 0.002 seconds (Total)
## Chain 2:
## Warning: The largest R-hat is 1.21, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
bf.2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=100; warmup=50; thin=1;
## post-warmup draws per chain=50, total post-warmup draws=100.
##
## mean se_mean sd 2.5% 25% 50% 75% 97.5%
## b_chandoanbinhthuong 1.02 0.00 0.02 0.99 1.01 1.02 1.03 1.05
## b_chandoanshocknhiemtrung 1.06 0.00 0.02 1.03 1.05 1.06 1.07 1.10
## b_chandoanxogan 1.04 0.00 0.02 0.99 1.02 1.04 1.05 1.07
## sigma 0.10 0.00 0.01 0.09 0.09 0.10 0.11 0.12
## lprior -1.23 0.00 0.00 -1.23 -1.23 -1.23 -1.22 -1.22
## lp__ 79.71 0.16 1.42 76.32 79.07 79.97 80.81 81.54
## n_eff Rhat
## b_chandoanbinhthuong 200 1.01
## b_chandoanshocknhiemtrung 112 0.99
## b_chandoanxogan 198 0.99
## sigma 14 1.23
## lprior 14 1.23
## lp__ 78 0.98
##
## Samples were drawn using NUTS(diag_e) at Tue Nov 21 09:21:48 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.2)
# kiem tra gia thuyet
hypothesis(bf.2, "chandoanxogan > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanxogan)-(... > 0 0.01 0.03 -0.03 0.06 1.86
## Post.Prob Star
## 1 0.65
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "chandoanshocknhiemtrung > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanshocknhi... > 0 0.04 0.02 0 0.08 19
## Post.Prob Star
## 1 0.95
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(bf.2, ignore_prior = T, theme = ggplot2::theme())
marginal_effects(bf.2, probs=c(0.05,0.95), conditions=chandoan)
#launch_shinystan(bf.2, rstudio = getOption("shinystan.rstudio"))
## tck1st
```r
prior1 = get_prior(tck1st~ chandoan-1, family = gaussian, data=dongmau)
bf.2 = brm(data = dongmau, tck1st ~ chandoan-1, prior1,
family = gaussian, chains = 2, iter = 100)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 2.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: WARNING: There aren't enough warmup iterations to fit the
## Chain 1: three stages of adaptation as currently configured.
## Chain 1: Reducing each adaptation stage to 15%/75%/10% of
## Chain 1: the given number of warmup iterations:
## Chain 1: init_buffer = 7
## Chain 1: adapt_window = 38
## Chain 1: term_buffer = 5
## Chain 1:
## Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 1: Iteration: 100 / 100 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.054 seconds (Warm-up)
## Chain 1: 0.137 seconds (Sampling)
## Chain 1: 0.191 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 7e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: WARNING: There aren't enough warmup iterations to fit the
## Chain 2: three stages of adaptation as currently configured.
## Chain 2: Reducing each adaptation stage to 15%/75%/10% of
## Chain 2: the given number of warmup iterations:
## Chain 2: init_buffer = 7
## Chain 2: adapt_window = 38
## Chain 2: term_buffer = 5
## Chain 2:
## Chain 2: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 2: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 2: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 2: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 2: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 2: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 2: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 2: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 2: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 2: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 2: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 2: Iteration: 100 / 100 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.126 seconds (Warm-up)
## Chain 2: 0.247 seconds (Sampling)
## Chain 2: 0.373 seconds (Total)
## Chain 2:
## Warning: There were 64 transitions after warmup that exceeded the maximum treedepth. Increase max_treedepth above 10. See
## https://mc-stan.org/misc/warnings.html#maximum-treedepth-exceeded
## Warning: Examine the pairs() plot to diagnose sampling problems
## Warning: The largest R-hat is 1.14, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
bf.2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=100; warmup=50; thin=1;
## post-warmup draws per chain=50, total post-warmup draws=100.
##
## mean se_mean sd 2.5% 25% 50% 75%
## b_chandoanbinhthuong 281.49 3.93 28.10 237.11 259.04 281.93 300.81
## b_chandoanshocknhiemtrung 397.36 2.34 22.08 358.21 381.60 396.65 412.74
## b_chandoanxogan 276.55 3.54 22.95 236.71 258.90 278.20 295.32
## sigma 126.08 1.04 8.96 110.13 118.88 124.86 132.26
## lprior -5.80 0.01 0.11 -6.04 -5.87 -5.78 -5.71
## lp__ -583.74 0.32 1.59 -587.17 -584.61 -583.36 -582.66
## 97.5% n_eff Rhat
## b_chandoanbinhthuong 340.74 51 1.02
## b_chandoanshocknhiemtrung 440.08 89 1.00
## b_chandoanxogan 315.80 42 1.00
## sigma 145.42 75 1.00
## lprior -5.61 74 1.00
## lp__ -581.77 24 1.09
##
## Samples were drawn using NUTS(diag_e) at Tue Nov 21 09:22:31 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.2)
# kiem tra gia thuyet
hypothesis(bf.2, "chandoanxogan > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanxogan)-(... > 0 -4.94 37.38 -64.03 50.94 0.89
## Post.Prob Star
## 1 0.47
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "chandoanshocknhiemtrung > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanshocknhi... > 0 115.87 37.69 64.07 163.17 Inf
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(bf.2, ignore_prior = T, theme = ggplot2::theme())
marginal_effects(bf.2, probs=c(0.05,0.95), conditions=chandoan)
## Warning: Method 'marginal_effects' is deprecated. Please use
## 'conditional_effects' instead.
## Warning: Argument 'probs' is deprecated. Please use 'prob' instead.
## Warning: The following variables in 'conditions' are not part of the model:
## 'conditions'
#launch_shinystan(bf.2, rstudio = getOption("shinystan.rstudio"))
prior1 = get_prior(tck2nd ~ chandoan-1, family = gaussian, data=dongmau)
bf.2 = brm(data = dongmau, tck2nd ~ chandoan-1, prior1,
family = gaussian, chains = 2, iter = 100)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000103 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.03 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: WARNING: There aren't enough warmup iterations to fit the
## Chain 1: three stages of adaptation as currently configured.
## Chain 1: Reducing each adaptation stage to 15%/75%/10% of
## Chain 1: the given number of warmup iterations:
## Chain 1: init_buffer = 7
## Chain 1: adapt_window = 38
## Chain 1: term_buffer = 5
## Chain 1:
## Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 1: Iteration: 100 / 100 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.088 seconds (Warm-up)
## Chain 1: 0.169 seconds (Sampling)
## Chain 1: 0.257 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 7e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: WARNING: There aren't enough warmup iterations to fit the
## Chain 2: three stages of adaptation as currently configured.
## Chain 2: Reducing each adaptation stage to 15%/75%/10% of
## Chain 2: the given number of warmup iterations:
## Chain 2: init_buffer = 7
## Chain 2: adapt_window = 38
## Chain 2: term_buffer = 5
## Chain 2:
## Chain 2: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 2: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 2: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 2: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 2: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 2: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 2: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 2: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 2: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 2: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 2: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 2: Iteration: 100 / 100 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.105 seconds (Warm-up)
## Chain 2: 0.176 seconds (Sampling)
## Chain 2: 0.281 seconds (Total)
## Chain 2:
## Warning: There were 84 transitions after warmup that exceeded the maximum treedepth. Increase max_treedepth above 10. See
## https://mc-stan.org/misc/warnings.html#maximum-treedepth-exceeded
## Warning: There were 1 chains where the estimated Bayesian Fraction of Missing Information was low. See
## https://mc-stan.org/misc/warnings.html#bfmi-low
## Warning: Examine the pairs() plot to diagnose sampling problems
## Warning: The largest R-hat is 1.43, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
bf.2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=100; warmup=50; thin=1;
## post-warmup draws per chain=50, total post-warmup draws=100.
##
## mean se_mean sd 2.5% 25% 50% 75%
## b_chandoanbinhthuong 781.70 11.05 55.48 688.30 750.01 782.21 813.69
## b_chandoanshocknhiemtrung 1035.65 27.72 90.36 765.46 1015.99 1061.58 1086.03
## b_chandoanxogan 813.06 33.87 95.40 615.29 761.72 832.04 877.47
## sigma 325.11 5.87 31.21 279.98 305.30 324.04 336.45
## lprior -6.73 0.03 0.14 -7.15 -6.78 -6.72 -6.64
## lp__ -670.85 1.50 5.26 -687.48 -670.43 -669.33 -668.34
## 97.5% n_eff Rhat
## b_chandoanbinhthuong 885.68 25 1.02
## b_chandoanshocknhiemtrung 1129.45 11 1.14
## b_chandoanxogan 948.17 8 1.46
## sigma 417.08 28 1.02
## lprior -6.53 28 1.02
## lp__ -667.67 12 1.12
##
## Samples were drawn using NUTS(diag_e) at Tue Nov 21 09:23:16 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.2)
# kiem tra gia thuyet
hypothesis(bf.2, "chandoanxogan > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanxogan)-(... > 0 31.37 87.37 -141.05 142.05 2.7
## Post.Prob Star
## 1 0.73
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "chandoanshocknhiemtrung > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanshocknhi... > 0 253.95 83.47 76.1 358.56 49
## Post.Prob Star
## 1 0.98 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(bf.2, ignore_prior = T, theme = ggplot2::theme())
marginal_effects(bf.2, probs=c(0.05,0.95), conditions=chandoan)
## Warning: Method 'marginal_effects' is deprecated. Please use
## 'conditional_effects' instead.
## Warning: Argument 'probs' is deprecated. Please use 'prob' instead.
## Warning: The following variables in 'conditions' are not part of the model:
## 'conditions'
#launch_shinystan(bf.2, rstudio = getOption(“shinystan.rstudio”))
##tckDER
prior1 = get_prior(tckDER ~ chandoan-1, family = gaussian, data=dongmau)
bf.2 = brm(data = dongmau, tckDER ~ chandoan-1, prior1,
family = gaussian, chains = 2, iter = 100)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 7e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.7 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: WARNING: There aren't enough warmup iterations to fit the
## Chain 1: three stages of adaptation as currently configured.
## Chain 1: Reducing each adaptation stage to 15%/75%/10% of
## Chain 1: the given number of warmup iterations:
## Chain 1: init_buffer = 7
## Chain 1: adapt_window = 38
## Chain 1: term_buffer = 5
## Chain 1:
## Chain 1: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 1: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 1: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 1: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 1: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 1: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 1: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 1: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 1: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 1: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 1: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 1: Iteration: 100 / 100 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.084 seconds (Warm-up)
## Chain 1: 0.131 seconds (Sampling)
## Chain 1: 0.215 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 6e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.06 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: WARNING: There aren't enough warmup iterations to fit the
## Chain 2: three stages of adaptation as currently configured.
## Chain 2: Reducing each adaptation stage to 15%/75%/10% of
## Chain 2: the given number of warmup iterations:
## Chain 2: init_buffer = 7
## Chain 2: adapt_window = 38
## Chain 2: term_buffer = 5
## Chain 2:
## Chain 2: Iteration: 1 / 100 [ 1%] (Warmup)
## Chain 2: Iteration: 10 / 100 [ 10%] (Warmup)
## Chain 2: Iteration: 20 / 100 [ 20%] (Warmup)
## Chain 2: Iteration: 30 / 100 [ 30%] (Warmup)
## Chain 2: Iteration: 40 / 100 [ 40%] (Warmup)
## Chain 2: Iteration: 50 / 100 [ 50%] (Warmup)
## Chain 2: Iteration: 51 / 100 [ 51%] (Sampling)
## Chain 2: Iteration: 60 / 100 [ 60%] (Sampling)
## Chain 2: Iteration: 70 / 100 [ 70%] (Sampling)
## Chain 2: Iteration: 80 / 100 [ 80%] (Sampling)
## Chain 2: Iteration: 90 / 100 [ 90%] (Sampling)
## Chain 2: Iteration: 100 / 100 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.065 seconds (Warm-up)
## Chain 2: 0.142 seconds (Sampling)
## Chain 2: 0.207 seconds (Total)
## Chain 2:
## Warning: There were 53 transitions after warmup that exceeded the maximum treedepth. Increase max_treedepth above 10. See
## https://mc-stan.org/misc/warnings.html#maximum-treedepth-exceeded
## Warning: Examine the pairs() plot to diagnose sampling problems
## Warning: The largest R-hat is 1.05, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
bf.2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=100; warmup=50; thin=1;
## post-warmup draws per chain=50, total post-warmup draws=100.
##
## mean se_mean sd 2.5% 25% 50% 75%
## b_chandoanbinhthuong 349.77 2.70 21.45 309.75 332.43 349.37 366.76
## b_chandoanshocknhiemtrung 333.50 2.84 21.65 293.30 315.48 333.73 348.39
## b_chandoanxogan 300.06 3.97 19.31 270.01 289.16 297.34 308.73
## sigma 124.15 0.95 9.41 108.95 116.05 123.88 130.27
## lprior -5.76 0.01 0.11 -5.98 -5.83 -5.75 -5.67
## lp__ -580.72 0.15 1.10 -583.03 -581.52 -580.62 -579.76
## 97.5% n_eff Rhat
## b_chandoanbinhthuong 385.83 63 1.01
## b_chandoanshocknhiemtrung 375.82 58 1.03
## b_chandoanxogan 341.27 24 1.03
## sigma 143.27 98 0.99
## lprior -5.59 98 0.99
## lp__ -579.07 53 0.99
##
## Samples were drawn using NUTS(diag_e) at Tue Nov 21 09:23:59 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.2)
# kiem tra gia thuyet
hypothesis(bf.2, "chandoanxogan > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanxogan)-(... > 0 -49.72 28.84 -98.74 2.18 0.08
## Post.Prob Star
## 1 0.07
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "chandoanshocknhiemtrung > chandoanbinhthuong", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (chandoanshocknhi... > 0 -16.28 31.65 -64.05 45.6 0.33
## Post.Prob Star
## 1 0.25
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
plot(bf.2, ignore_prior = T, theme = ggplot2::theme()) marginal_effects(bf.2, probs=c(0.05,0.95), conditions=chandoan)
#launch_shinystan(bf.2, rstudio = getOption(“shinystan.rstudio”))