# gọi thư viện
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.2 ──
## ✔ ggplot2 3.3.6 ✔ purrr 0.3.5
## ✔ tibble 3.1.8 ✔ dplyr 1.0.10
## ✔ tidyr 1.2.1 ✔ stringr 1.4.1
## ✔ readr 2.1.3 ✔ forcats 0.5.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
library(dplyr)
library(table1)
##
## Attaching package: 'table1'
##
## The following objects are masked from 'package:base':
##
## units, units<-
thuan = read.csv("D:/OneDrive - UMP/00 - 2021 - 2022/00 LVT/23102022THUAN.csv", header=T)
names(thuan)
## [1] "id" "gioi" "nam" "thang" "thamnien" "sinhnam"
## [7] "tuoi" "congviec" "dantoc" "uwes1" "uwes2" "uwes3"
## [13] "uwes4" "uwes5" "uwes6" "uwes7" "uwes8" "uwes9"
## [19] "uwes_tol" "gjs1" "gjs2" "gjs3" "gjs4" "gjs5"
## [25] "gjs6" "gjs7" "gjs8" "gjs9" "gjs10" "gjs_tol"
## [31] "wss1" "wss2" "wss3" "wss4" "wss5" "wss6"
## [37] "wss7" "wss8" "wss_tol" "brcs1" "brcs2" "brcs3"
## [43] "brcs4" "brcs_tol" "brs1" "brs2" "brs3" "brs4"
## [49] "brs5" "brs6" "brs_tol"
attach(thuan)
set.seed(12345)
library(BayesFactor)
## Loading required package: coda
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
## ************
## Welcome to BayesFactor 0.9.12-4.4. If you have questions, please contact Richard Morey (richarddmorey@gmail.com).
##
## Type BFManual() to open the manual.
## ************
bf = ttestBF(formula = uwes_tol ~ gioi, data=thuan)
mcmc = posterior(bf, iter = 10000)
# Summary
summary(mcmc)
##
## Iterations = 1:10000
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 10000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## mu 37.5719 0.7232 0.007232 0.008258
## beta (1 - 2) 3.0545 1.4153 0.014153 0.017048
## sig2 155.6661 11.9600 0.119600 0.119600
## delta 0.2453 0.1138 0.001138 0.001367
## g 3.2200 86.0171 0.860171 0.860171
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## mu 36.17116 37.0878 37.5598 38.0520 39.0027
## beta (1 - 2) 0.21792 2.1280 3.0624 3.9791 5.8857
## sig2 134.06199 147.3031 154.9149 163.2828 180.6724
## delta 0.01741 0.1705 0.2452 0.3203 0.4695
## g 0.07589 0.2046 0.4071 0.9656 11.4939
table(mcmc [, 2] <0) # nam nho hon nu
##
## FALSE TRUE
## 9826 174
# Vẽ biểu đồ
plot(mcmc [, 2])
library(rstan)
## Loading required package: StanHeaders
##
## rstan version 2.26.13 (Stan version 2.26.1)
## For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores()).
## To avoid recompilation of unchanged Stan programs, we recommend calling
## rstan_options(auto_write = TRUE)
## For within-chain threading using `reduce_sum()` or `map_rect()` Stan functions,
## change `threads_per_chain` option:
## rstan_options(threads_per_chain = 1)
## Do not specify '-march=native' in 'LOCAL_CPPFLAGS' or a Makevars file
##
## Attaching package: 'rstan'
## The following object is masked from 'package:coda':
##
## traceplot
## The following object is masked from 'package:tidyr':
##
## extract
library(shinystan)
## Loading required package: shiny
##
## This is shinystan version 2.6.0
library(rstanarm)
## Loading required package: Rcpp
## This is rstanarm version 2.21.3
## - See https://mc-stan.org/rstanarm/articles/priors for changes to default priors!
## - Default priors may change, so it's safest to specify priors, even if equivalent to the defaults.
## - For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores())
##
## Attaching package: 'rstanarm'
## The following object is masked from 'package:rstan':
##
## loo
library(psych)
##
## Attaching package: 'psych'
## The following object is masked from 'package:rstanarm':
##
## logit
## The following object is masked from 'package:rstan':
##
## lookup
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
library(brms)
## Loading 'brms' package (version 2.18.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
##
## Attaching package: 'brms'
## The following object is masked from 'package:psych':
##
## cs
## The following objects are masked from 'package:rstanarm':
##
## dirichlet, exponential, get_y, lasso, ngrps
## The following object is masked from 'package:rstan':
##
## loo
## The following object is masked from 'package:stats':
##
## ar
m1 = stan_glm(uwes_tol ~ thamnien, data=thuan)
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.048 seconds (Warm-up)
## Chain 1: 0.065 seconds (Sampling)
## Chain 1: 0.113 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.045 seconds (Warm-up)
## Chain 2: 0.066 seconds (Sampling)
## Chain 2: 0.111 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.047 seconds (Warm-up)
## Chain 3: 0.064 seconds (Sampling)
## Chain 3: 0.111 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.048 seconds (Warm-up)
## Chain 4: 0.067 seconds (Sampling)
## Chain 4: 0.115 seconds (Total)
## Chain 4:
summary(m1)
##
## Model Info:
## function: stan_glm
## family: gaussian [identity]
## formula: uwes_tol ~ thamnien
## algorithm: sampling
## sample: 4000 (posterior sample size)
## priors: see help('prior_summary')
## observations: 345
## predictors: 2
##
## Estimates:
## mean sd 10% 50% 90%
## (Intercept) 35.0 1.0 33.7 34.9 36.2
## thamnien 0.3 0.1 0.2 0.3 0.5
## sigma 12.4 0.5 11.8 12.4 13.0
##
## Fit Diagnostics:
## mean sd 10% 50% 90%
## mean_PPD 37.0 0.9 35.8 37.0 38.2
##
## The mean_ppd is the sample average posterior predictive distribution of the outcome variable (for details see help('summary.stanreg')).
##
## MCMC diagnostics
## mcse Rhat n_eff
## (Intercept) 0.0 1.0 3472
## thamnien 0.0 1.0 3447
## sigma 0.0 1.0 3782
## mean_PPD 0.0 1.0 3944
## log-posterior 0.0 1.0 1888
##
## For each parameter, mcse is Monte Carlo standard error, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence Rhat=1).
prior_summary(m1)
## Priors for model 'm1'
## ------
## Intercept (after predictors centered)
## Specified prior:
## ~ normal(location = 37, scale = 2.5)
## Adjusted prior:
## ~ normal(location = 37, scale = 31)
##
## Coefficients
## Specified prior:
## ~ normal(location = 0, scale = 2.5)
## Adjusted prior:
## ~ normal(location = 0, scale = 5.3)
##
## Auxiliary (sigma)
## Specified prior:
## ~ exponential(rate = 1)
## Adjusted prior:
## ~ exponential(rate = 0.08)
## ------
## See help('prior_summary.stanreg') for more details
post.r2 = bayes_R2(m1)
summary(post.r2)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 4.510e-06 1.591e-02 2.600e-02 2.840e-02 3.851e-02 1.104e-01
library(MCMCpack)
## Loading required package: MASS
##
## Attaching package: 'MASS'
## The following object is masked from 'package:dplyr':
##
## select
## ##
## ## Markov Chain Monte Carlo Package (MCMCpack)
## ## Copyright (C) 2003-2022 Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park
## ##
## ## Support provided by the U.S. National Science Foundation
## ## (Grants SES-0350646 and SES-0350613)
## ##
##
## Attaching package: 'MCMCpack'
## The following objects are masked from 'package:brms':
##
## ddirichlet, rdirichlet
library(broom)
posterior = MCMCregress(formula= uwes_tol ~ thamnien,
b0=0, B0=0.01,
data=thuan, verbose=10000)
##
##
## MCMCregress iteration 1 of 11000
## beta =
## 36.01760
## 0.30841
## sigma2 = 159.45579
##
##
## MCMCregress iteration 10001 of 11000
## beta =
## 34.32700
## 0.37637
## sigma2 = 185.57493
plot(posterior, col= "red")
raftery.diag(posterior)
##
## Quantile (q) = 0.025
## Accuracy (r) = +/- 0.005
## Probability (s) = 0.95
##
## Burn-in Total Lower bound Dependence
## (M) (N) (Nmin) factor (I)
## (Intercept) 2 3710 3746 0.990
## thamnien 2 3620 3746 0.966
## sigma2 2 3834 3746 1.020
summary(posterior)
##
## Iterations = 1001:11000
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 10000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## (Intercept) 34.6722 0.9427 0.009427 0.009427
## thamnien 0.3659 0.1137 0.001137 0.001137
## sigma2 154.2307 11.8974 0.118974 0.118974
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## (Intercept) 32.822 34.0365 34.6774 35.3076 36.5228
## thamnien 0.142 0.2897 0.3647 0.4407 0.5904
## sigma2 132.635 145.9139 153.6650 161.9784 178.9890
apply(posterior, 2, quantile, probs=c(0.025, 0.5, 0.975))
## (Intercept) thamnien sigma2
## 2.5% 32.82197 0.1419582 132.6354
## 50% 34.67737 0.3647385 153.6650
## 97.5% 36.52283 0.5903872 178.9890
library(nnet)
require(brms)
m.2 = multinom(uwes_tol ~ congviec, data=thuan)
## # weights: 135 (88 variable)
## initial value 1313.298559
## iter 10 value 1192.550584
## iter 20 value 1180.379734
## iter 30 value 1177.437923
## iter 40 value 1176.437853
## iter 50 value 1176.083368
## iter 60 value 1175.839225
## iter 70 value 1175.457473
## iter 80 value 1175.447267
## iter 90 value 1175.444488
## iter 90 value 1175.444477
## iter 90 value 1175.444477
## final value 1175.444477
## converged
summary(m.2)
## Call:
## multinom(formula = uwes_tol ~ congviec, data = thuan)
##
## Coefficients:
## (Intercept) congviec
## 9 -11.802196 11.48491
## 11 -13.330988 12.27014
## 12 -14.430939 12.27063
## 13 -12.428488 12.13496
## 14 -12.218544 12.33142
## 15 -14.431633 12.27073
## 16 -16.307623 12.99968
## 17 -14.734660 12.83139
## 18 -12.821879 12.27062
## 19 -11.862282 12.10030
## 21 -11.285963 11.67566
## 22 -12.428478 12.13496
## 23 -10.885513 11.48474
## 24 -11.360386 11.78897
## 25 -11.375413 11.96112
## 26 -10.244013 11.32643
## 27 -10.059978 11.84251
## 28 -14.174160 12.68000
## 29 -14.838550 12.60902
## 30 -10.741226 11.69512
## 31 -10.590375 10.78143
## 32 -11.676064 11.91965
## 33 -10.590420 10.78139
## 34 -11.360388 11.78896
## 35 -10.590360 10.78148
## 36 -12.310590 12.15784
## 37 -11.669421 11.84251
## 38 -10.590376 10.78142
## 39 -12.081661 11.91970
## 40 -12.310590 12.15784
## 41 -11.676069 11.91963
## 42 -11.038414 11.64577
## 43 -10.284129 11.68825
## 44 -10.982829 11.91962
## 45 -9.560441 11.77332
## 46 -11.201880 12.08869
## 47 -10.538170 11.76785
## 48 -9.900087 11.60057
## 49 -10.741276 11.69515
## 50 -8.692600 10.90007
## 51 -11.298056 12.04168
## 52 -11.676056 11.91966
## 53 85.921193 -85.22807
## 54 -9.550469 11.48472
##
## Std. Errors:
## (Intercept) congviec
## 9 1.6593275 0.8672873
## 11 1.5970757 0.6858691
## 12 2.6644417 0.9357633
## 13 1.2308873 0.6273468
## 14 0.9646266 0.5680285
## 15 2.6650426 0.9358858
## 16 2.7955761 0.8172192
## 17 1.6513998 0.6399863
## 18 1.2814758 0.6239966
## 19 1.0160414 0.5894883
## 21 1.1367563 0.6586965
## 22 1.2308879 0.6273474
## 23 1.1235685 0.6808996
## 24 1.0716464 0.6283952
## 25 0.9473311 0.5862872
## 26 0.9872611 0.6567511
## 27 0.6957408 0.5459377
## 28 1.5221040 0.6306523
## 29 2.2266187 0.7633107
## 30 0.9163891 0.5995924
## 31 1.7147185 1.1071689
## 32 1.0923462 0.6198191
## 33 1.7148013 1.1072258
## 34 1.0716505 0.6283966
## 35 1.7146371 1.1071079
## 36 1.1506534 0.6097790
## 37 1.1593038 0.6437246
## 38 1.7147232 1.1071724
## 39 1.2864492 0.6647908
## 40 1.1506532 0.6097790
## 41 1.0923608 0.6198237
## 42 1.0564629 0.6403225
## 43 0.7969131 0.5725448
## 44 0.8552766 0.5713121
## 45 0.6470797 0.5395338
## 46 0.8214753 0.5573379
## 47 0.8197553 0.5728962
## 48 0.7516624 0.5671124
## 49 0.9163865 0.5995894
## 50 0.7730542 0.6179751
## 51 0.8725575 0.5678478
## 52 1.0923302 0.6198141
## 53 0.6123573 0.6123573
## 54 0.7280620 0.5673165
##
## Residual Deviance: 2350.889
## AIC: 2526.889
exp(coefficients(m.2))
## (Intercept) congviec
## 9 7.488096e-06 9.723742e+04
## 11 1.623399e-06 2.132336e+05
## 12 5.404092e-07 2.133370e+05
## 13 4.002913e-06 1.862718e+05
## 14 4.938030e-06 2.267078e+05
## 15 5.400345e-07 2.133584e+05
## 16 8.273498e-08 4.422722e+05
## 17 3.988582e-07 3.737678e+05
## 18 2.701026e-06 2.133356e+05
## 19 7.051417e-06 1.799251e+05
## 21 1.254783e-05 1.176721e+05
## 22 4.002953e-06 1.862709e+05
## 23 1.872758e-05 9.722058e+04
## 24 1.164789e-05 1.317901e+05
## 25 1.147416e-05 1.565477e+05
## 26 3.556982e-05 8.298592e+04
## 27 4.275698e-05 1.390393e+05
## 28 6.986191e-07 3.212581e+05
## 29 3.595007e-07 2.992471e+05
## 30 2.163439e-05 1.199851e+05
## 31 2.515697e-05 4.811873e+04
## 32 8.494739e-06 1.501885e+05
## 33 2.515586e-05 4.811703e+04
## 34 1.164787e-05 1.317896e+05
## 35 2.515736e-05 4.812116e+04
## 36 4.503796e-06 1.905816e+05
## 37 8.551354e-06 1.390391e+05
## 38 2.515695e-05 4.811859e+04
## 39 5.662408e-06 1.501969e+05
## 40 4.503797e-06 1.905817e+05
## 41 8.494693e-06 1.501864e+05
## 42 1.607229e-05 1.142076e+05
## 43 3.417114e-05 1.191631e+05
## 44 1.699097e-05 1.501842e+05
## 45 7.046171e-05 1.297448e+05
## 46 1.364852e-05 1.778491e+05
## 47 2.650519e-05 1.290360e+05
## 48 5.017030e-05 1.091596e+05
## 49 2.163331e-05 1.199886e+05
## 50 1.678231e-04 5.418016e+04
## 51 1.239700e-05 1.696814e+05
## 52 8.494805e-06 1.501907e+05
## 53 2.065855e+37 9.680974e-38
## 54 7.116790e-05 9.721914e+04
table1(~ uwes_tol | factor(congviec), data=thuan)
library(bayesplot) library(ggplot2) p = ggplot(thuan, aes(x = uwes_tol, group=congviec)) p + geom_density(aes(fill = congviec), alpha=0.5) + theme_light(base_size = 15)
library(rstan)
library(shinystan)
library(rstanarm)
library(psych)
library(brms)
set.seed(123)
# co chua intercep
prior0 = get_prior(uwes_tol ~ congviec, family = gaussian, data=thuan)
bayesfit1 = brm(data=thuan, uwes1 ~ congviec, prior0,
family = gaussian, chains=2, iter= 10000)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 5.3e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.53 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 10000 [ 0%] (Warmup)
## Chain 1: Iteration: 1000 / 10000 [ 10%] (Warmup)
## Chain 1: Iteration: 2000 / 10000 [ 20%] (Warmup)
## Chain 1: Iteration: 3000 / 10000 [ 30%] (Warmup)
## Chain 1: Iteration: 4000 / 10000 [ 40%] (Warmup)
## Chain 1: Iteration: 5000 / 10000 [ 50%] (Warmup)
## Chain 1: Iteration: 5001 / 10000 [ 50%] (Sampling)
## Chain 1: Iteration: 6000 / 10000 [ 60%] (Sampling)
## Chain 1: Iteration: 7000 / 10000 [ 70%] (Sampling)
## Chain 1: Iteration: 8000 / 10000 [ 80%] (Sampling)
## Chain 1: Iteration: 9000 / 10000 [ 90%] (Sampling)
## Chain 1: Iteration: 10000 / 10000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.109 seconds (Warm-up)
## Chain 1: 0.138 seconds (Sampling)
## Chain 1: 0.247 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 8e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 10000 [ 0%] (Warmup)
## Chain 2: Iteration: 1000 / 10000 [ 10%] (Warmup)
## Chain 2: Iteration: 2000 / 10000 [ 20%] (Warmup)
## Chain 2: Iteration: 3000 / 10000 [ 30%] (Warmup)
## Chain 2: Iteration: 4000 / 10000 [ 40%] (Warmup)
## Chain 2: Iteration: 5000 / 10000 [ 50%] (Warmup)
## Chain 2: Iteration: 5001 / 10000 [ 50%] (Sampling)
## Chain 2: Iteration: 6000 / 10000 [ 60%] (Sampling)
## Chain 2: Iteration: 7000 / 10000 [ 70%] (Sampling)
## Chain 2: Iteration: 8000 / 10000 [ 80%] (Sampling)
## Chain 2: Iteration: 9000 / 10000 [ 90%] (Sampling)
## Chain 2: Iteration: 10000 / 10000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.115 seconds (Warm-up)
## Chain 2: 0.139 seconds (Sampling)
## Chain 2: 0.254 seconds (Total)
## Chain 2:
summary(bayesfit1)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: uwes1 ~ congviec
## Data: thuan (Number of observations: 345)
## Draws: 2 chains, each with iter = 10000; warmup = 5000; thin = 1;
## total post-warmup draws = 10000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 4.54 0.17 4.20 4.88 1.00 10267 7311
## congviec -0.18 0.07 -0.31 -0.05 1.00 9993 7358
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.43 0.06 1.33 1.54 1.00 9555 7171
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
bayesfit1$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=10000; warmup=5000; thin=1;
## post-warmup draws per chain=5000, total post-warmup draws=10000.
##
## mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff
## b_Intercept 4.54 0.00 0.17 4.20 4.42 4.54 4.65 4.88 10238
## b_congviec -0.18 0.00 0.07 -0.31 -0.23 -0.18 -0.14 -0.05 9965
## sigma 1.43 0.00 0.06 1.33 1.39 1.43 1.47 1.54 9529
## lprior -9.23 0.00 0.01 -9.24 -9.23 -9.23 -9.22 -9.21 9922
## lp__ -621.33 0.02 1.21 -624.44 -621.89 -621.02 -620.44 -619.93 4579
## Rhat
## b_Intercept 1
## b_congviec 1
## sigma 1
## lprior 1
## lp__ 1
##
## Samples were drawn using NUTS(diag_e) at Wed Nov 2 07:42:19 2022.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
prior1 = get_prior(uwes_tol ~ congviec - 1, family = gaussian, data=thuan)
bayesfit2 = brm(data = thuan, uwes_tol ~ congviec-1, prior1,
family = gaussian, chains = 2, iter = 20000)
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 2.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 1: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 1: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 1: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 1: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 1: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 1: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 1: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 1: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 1: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 1: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 1: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.192 seconds (Warm-up)
## Chain 1: 0.22 seconds (Sampling)
## Chain 1: 0.412 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 8e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 2: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 2: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 2: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 2: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 2: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 2: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 2: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 2: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 2: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 2: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 2: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.195 seconds (Warm-up)
## Chain 2: 0.216 seconds (Sampling)
## Chain 2: 0.411 seconds (Total)
## Chain 2:
# Summary
summary(bayesfit2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: uwes_tol ~ congviec - 1
## Data: thuan (Number of observations: 345)
## Draws: 2 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup draws = 20000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## congviec 12.03 0.44 11.16 12.90 1.00 18527 12875
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 22.17 0.85 20.60 23.90 1.00 19131 12890
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
pairs(bayesfit2)
bayesfit2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=20000; warmup=10000; thin=1;
## post-warmup draws per chain=10000, total post-warmup draws=20000.
##
## mean se_mean sd 2.5% 25% 50% 75% 97.5%
## b_congviec 12.03 0.00 0.44 11.16 11.74 12.03 12.33 12.90
## sigma 22.17 0.01 0.85 20.60 21.58 22.14 22.72 23.90
## lprior -4.32 0.00 0.08 -4.49 -4.37 -4.32 -4.26 -4.17
## lp__ -1560.03 0.01 1.00 -1562.69 -1560.43 -1559.73 -1559.32 -1559.06
## n_eff Rhat
## b_congviec 18583 1
## sigma 19008 1
## lprior 19017 1
## lp__ 8797 1
##
## Samples were drawn using NUTS(diag_e) at Wed Nov 2 07:43:08 2022.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
# Biểu đồ
plot(bayesfit2, ignore_prior = T, theme = ggplot2::theme())
launch_shinystan(bayesfit2, rstudio = getOption("shinystan.rstudio"))
##
## Launching ShinyStan interface... for large models this may take some time.
##
## Listening on http://127.0.0.1:4168
## Warning: Error in as.POSIXlt.character: character string is not in a standard
## unambiguous format
## Warning in renderWidget(instance): Ignoring explicitly provided widget ID
## "wr9uzUwXgV"; Shiny doesn't use them
marginal_effects(bayesfit2, probs = c(0.05, 0.95))
## Warning: Method 'marginal_effects' is deprecated. Please use
## 'conditional_effects' instead.
## Warning: Argument 'probs' is deprecated. Please use 'prob' instead.
marginal_effects(bayesfit2, probs=c(0.05,0.95), conditions=congviec)
## Warning: Method 'marginal_effects' is deprecated. Please use
## 'conditional_effects' instead.
## Warning: Argument 'probs' is deprecated. Please use 'prob' instead.
## Warning: The following variables in 'conditions' are not part of the model:
## 'conditions'
head(thuan)
## id gioi nam thang thamnien sinhnam tuoi congviec dantoc uwes1 uwes2 uwes3
## 1 1 2 9 0 9.0 1987 35 1 1 2 3 3
## 2 2 2 0 3 0.3 1994 28 5 1 3 3 3
## 3 4 2 1 0 1.0 1996 26 3 1 3 3 2
## 4 5 2 2 9 2.9 1986 36 2 1 5 6 5
## 5 6 2 1 6 1.6 1984 38 4 1 6 6 6
## 6 7 2 1 8 1.8 1996 26 5 1 4 3 5
## uwes4 uwes5 uwes6 uwes7 uwes8 uwes9 uwes_tol gjs1 gjs2 gjs3 gjs4 gjs5 gjs6
## 1 3 3 3 3 3 3 26 5 4 3 2 2 3
## 2 3 3 3 4 3 3 28 4 4 4 4 3 4
## 3 2 1 4 2 2 2 21 3 4 2 2 2 3
## 4 5 5 5 5 5 3 44 5 5 5 5 3 5
## 5 6 5 6 6 5 5 51 4 4 4 4 4 4
## 6 5 3 3 5 3 1 32 4 4 4 4 3 2
## gjs7 gjs8 gjs9 gjs10 gjs_tol wss1 wss2 wss3 wss4 wss5 wss6 wss7 wss8 wss_tol
## 1 3 3 3 3 31 3 3 4 3 2 2 3 3 23
## 2 1 3 4 4 35 1 2 4 3 3 4 4 4 25
## 3 1 2 3 2 24 3 4 4 4 4 2 2 2 25
## 4 1 4 5 5 43 3 1 2 2 2 5 5 5 25
## 5 3 4 4 4 39 2 3 3 2 2 4 4 3 23
## 6 2 4 4 4 35 3 3 3 2 2 4 3 4 24
## brcs1 brcs2 brcs3 brcs4 brcs_tol brs1 brs2 brs3 brs4 brs5 brs6 brs_tol
## 1 3 4 4 4 15 4 4 2 2 2 2 16
## 2 4 3 2 4 13 2 4 2 4 4 4 20
## 3 3 4 4 2 13 2 4 2 4 4 5 21
## 4 4 4 4 4 16 4 4 3 3 3 2 19
## 5 3 3 3 3 12 3 3 3 3 3 3 18
## 6 4 3 3 3 13 4 3 2 2 3 2 16