library(factoextra)
## Loading required package: ggplot2
## Welcome! Want to learn more? See two factoextra-related books at https://goo.gl/ve3WBa
library(tidyverse)
## ── Attaching packages
## ───────────────────────────────────────
## tidyverse 1.3.2 ──
## ✔ tibble 3.1.8 ✔ dplyr 1.0.10
## ✔ tidyr 1.2.1 ✔ stringr 1.4.1
## ✔ readr 2.1.3 ✔ forcats 0.5.2
## ✔ purrr 0.3.5
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag() masks stats::lag()
library(dplyr)
library(table1)
##
## Attaching package: 'table1'
##
## The following objects are masked from 'package:base':
##
## units, units<-
thuan = read.csv("D:\\OneDrive - UMP\\000 -2021-2022\\00 LVT\\bvlvt02112022.csv", header=T)
thuan$congviec = recode(thuan$congviec, "1"="AA",
"2"="BB", "3"="CC",
"4"= "DD","5"="EE",
.default = NA_character_ )
thuan$gioi = recode(thuan$gioi, "1"="nam", "2"="nu", .default = NA_character_ )
attach(thuan)
names(thuan)
## [1] "id" "gioi" "thang" "nam" "thamnien" "a3"
## [7] "tuoi" "congviec" "a5" "uwes1" "uwes2" "uwes3"
## [13] "uwes4" "uwes5" "uwes6" "uwes7" "uwes8" "uwes9"
## [19] "uwes_tol" "gjs1" "gjs2" "gjs3" "gjs4" "gjs5"
## [25] "gjs6" "gjs7" "gjs8" "gjs9" "gjs10" "gjs_tol"
## [31] "wss1" "wss2" "wss3" "wss4" "wss5" "wss6"
## [37] "wss7" "wss8" "wss_tol" "brcs1" "brcs2" "brcs3"
## [43] "brcs4" "brcs_tol" "brs1" "brs2" "brs3" "brs4"
## [49] "brs5" "brs6" "brs_tol"
library(table1)
table1(~ factor(uwes1)+ factor(uwes2)+ factor(uwes3)
+ factor(uwes4)+ factor(uwes5) + factor(uwes6)
+ factor(uwes7)+ factor(uwes8)
+ factor(uwes9), data=thuan, transpose = F)
| Overall (N=532) |
|
|---|---|
| factor(uwes1) | |
| 0 | 8 (1.5%) |
| 1 | 12 (2.3%) |
| 2 | 30 (5.6%) |
| 3 | 108 (20.3%) |
| 4 | 42 (7.9%) |
| 5 | 190 (35.7%) |
| 6 | 142 (26.7%) |
| factor(uwes2) | |
| 0 | 4 (0.8%) |
| 1 | 8 (1.5%) |
| 2 | 24 (4.5%) |
| 3 | 112 (21.1%) |
| 4 | 42 (7.9%) |
| 5 | 216 (40.6%) |
| 6 | 126 (23.7%) |
| factor(uwes3) | |
| 0 | 4 (0.8%) |
| 1 | 16 (3.0%) |
| 2 | 26 (4.9%) |
| 3 | 74 (13.9%) |
| 4 | 64 (12.0%) |
| 5 | 184 (34.6%) |
| 6 | 164 (30.8%) |
| factor(uwes4) | |
| 0 | 10 (1.9%) |
| 1 | 14 (2.6%) |
| 2 | 32 (6.0%) |
| 3 | 70 (13.2%) |
| 4 | 76 (14.3%) |
| 5 | 188 (35.3%) |
| 6 | 142 (26.7%) |
| factor(uwes5) | |
| 0 | 18 (3.4%) |
| 1 | 24 (4.5%) |
| 2 | 28 (5.3%) |
| 3 | 86 (16.2%) |
| 4 | 62 (11.7%) |
| 5 | 172 (32.3%) |
| 6 | 142 (26.7%) |
| factor(uwes6) | |
| 0 | 10 (1.9%) |
| 1 | 14 (2.6%) |
| 2 | 28 (5.3%) |
| 3 | 70 (13.2%) |
| 4 | 68 (12.8%) |
| 5 | 200 (37.6%) |
| 6 | 142 (26.7%) |
| factor(uwes7) | |
| 0 | 10 (1.9%) |
| 1 | 8 (1.5%) |
| 2 | 22 (4.1%) |
| 3 | 68 (12.8%) |
| 4 | 48 (9.0%) |
| 5 | 182 (34.2%) |
| 6 | 194 (36.5%) |
| factor(uwes8) | |
| 0 | 14 (2.6%) |
| 1 | 14 (2.6%) |
| 2 | 30 (5.6%) |
| 3 | 76 (14.3%) |
| 4 | 84 (15.8%) |
| 5 | 190 (35.7%) |
| 6 | 124 (23.3%) |
| factor(uwes9) | |
| 0 | 20 (3.8%) |
| 1 | 26 (4.9%) |
| 2 | 38 (7.1%) |
| 3 | 90 (16.9%) |
| 4 | 70 (13.2%) |
| 5 | 178 (33.5%) |
| 6 | 110 (20.7%) |
table1(~ factor(gjs1) + factor(gjs2) + factor(gjs3)
+ factor(gjs4) + factor(gjs5) +
factor(gjs6) + factor(gjs7) +
factor(gjs8) + factor(gjs9) +
factor(gjs10) , data=thuan)
| Overall (N=532) |
|
|---|---|
| factor(gjs1) | |
| 1 | 4 (0.8%) |
| 2 | 26 (4.9%) |
| 3 | 94 (17.7%) |
| 4 | 274 (51.5%) |
| 5 | 134 (25.2%) |
| factor(gjs2) | |
| 1 | 4 (0.8%) |
| 2 | 2 (0.4%) |
| 3 | 54 (10.2%) |
| 4 | 310 (58.3%) |
| 5 | 162 (30.5%) |
| factor(gjs3) | |
| 1 | 8 (1.5%) |
| 2 | 14 (2.6%) |
| 3 | 110 (20.7%) |
| 4 | 268 (50.4%) |
| 5 | 132 (24.8%) |
| factor(gjs4) | |
| 1 | 14 (2.6%) |
| 2 | 38 (7.1%) |
| 3 | 100 (18.8%) |
| 4 | 248 (46.6%) |
| 5 | 132 (24.8%) |
| factor(gjs5) | |
| 1 | 16 (3.0%) |
| 2 | 36 (6.8%) |
| 3 | 142 (26.7%) |
| 4 | 224 (42.1%) |
| 5 | 114 (21.4%) |
| factor(gjs6) | |
| 1 | 14 (2.6%) |
| 2 | 46 (8.6%) |
| 3 | 100 (18.8%) |
| 4 | 240 (45.1%) |
| 5 | 132 (24.8%) |
| factor(gjs7) | |
| 1 | 44 (8.3%) |
| 2 | 118 (22.2%) |
| 3 | 110 (20.7%) |
| 4 | 168 (31.6%) |
| 5 | 92 (17.3%) |
| factor(gjs8) | |
| 1 | 4 (0.8%) |
| 2 | 38 (7.1%) |
| 3 | 86 (16.2%) |
| 4 | 252 (47.4%) |
| 5 | 152 (28.6%) |
| factor(gjs9) | |
| 1 | 4 (0.8%) |
| 2 | 4 (0.8%) |
| 3 | 120 (22.6%) |
| 4 | 290 (54.5%) |
| 5 | 114 (21.4%) |
| factor(gjs10) | |
| 1 | 2 (0.4%) |
| 2 | 24 (4.5%) |
| 3 | 108 (20.3%) |
| 4 | 254 (47.7%) |
| 5 | 144 (27.1%) |
table1(~ factor(wss1) + factor(wss2) +
factor(wss3) + factor(wss4) +
factor(wss5) + factor(wss6) +
factor(wss7) + factor(wss8),
data=thuan, transpose = T)
| factor(wss1) | factor(wss2) | factor(wss3) | factor(wss4) | factor(wss5) | factor(wss6) | factor(wss7) | factor(wss8) | |
|---|---|---|---|---|---|---|---|---|
| Overall (N=532) |
: 1: 42 (7.9%) 2: 162 (30.5%) 3: 234 (44.0%) 4: 68 (12.8%) 5: 26 (4.9%) |
: 1: 94 (17.7%) 2: 172 (32.3%) 3: 172 (32.3%) 4: 62 (11.7%) 5: 32 (6.0%) |
: 1: 68 (12.8%) 2: 176 (33.1%) 3: 202 (38.0%) 4: 62 (11.7%) 5: 24 (4.5%) |
: 1: 90 (16.9%) 2: 142 (26.7%) 3: 216 (40.6%) 4: 58 (10.9%) 5: 26 (4.9%) |
: 1: 114 (21.4%) 2: 136 (25.6%) 3: 196 (36.8%) 4: 64 (12.0%) 5: 22 (4.1%) |
: 1: 26 (4.9%) 2: 66 (12.4%) 3: 170 (32.0%) 4: 218 (41.0%) 5: 52 (9.8%) |
: 1: 22 (4.1%) 2: 104 (19.5%) 3: 232 (43.6%) 4: 130 (24.4%) 5: 44 (8.3%) |
: 1: 12 (2.3%) 2: 82 (15.4%) 3: 230 (43.2%) 4: 166 (31.2%) 5: 42 (7.9%) |
table1(~ factor(brcs1)+ factor(brcs2)+ factor(brcs3)+ factor(brcs4), data=thuan)
| Overall (N=532) |
|
|---|---|
| factor(brcs1) | |
| 2 | 2 (0.4%) |
| 3 | 216 (40.6%) |
| 4 | 236 (44.4%) |
| 5 | 78 (14.7%) |
| factor(brcs2) | |
| 2 | 2 (0.4%) |
| 3 | 176 (33.1%) |
| 4 | 266 (50.0%) |
| 5 | 88 (16.5%) |
| factor(brcs3) | |
| 2 | 2 (0.4%) |
| 3 | 164 (30.8%) |
| 4 | 274 (51.5%) |
| 5 | 92 (17.3%) |
| factor(brcs4) | |
| 2 | 8 (1.5%) |
| 3 | 170 (32.0%) |
| 4 | 250 (47.0%) |
| 5 | 104 (19.5%) |
table1(~ factor(brs1)+ factor(brs2)+ factor(brs3)+
factor(brs4)+ factor(brs5)+ factor(brs6), data=thuan)
| Overall (N=532) |
|
|---|---|
| factor(brs1) | |
| 2 | 24 (4.5%) |
| 3 | 104 (19.5%) |
| 4 | 316 (59.4%) |
| 5 | 88 (16.5%) |
| factor(brs2) | |
| 2 | 56 (10.5%) |
| 3 | 200 (37.6%) |
| 4 | 198 (37.2%) |
| 5 | 78 (14.7%) |
| factor(brs3) | |
| 2 | 66 (12.4%) |
| 3 | 156 (29.3%) |
| 4 | 244 (45.9%) |
| 5 | 66 (12.4%) |
| factor(brs4) | |
| 1 | 20 (3.8%) |
| 2 | 130 (24.4%) |
| 3 | 164 (30.8%) |
| 4 | 138 (25.9%) |
| 5 | 80 (15.0%) |
| factor(brs5) | |
| 1 | 10 (1.9%) |
| 2 | 44 (8.3%) |
| 3 | 164 (30.8%) |
| 4 | 224 (42.1%) |
| 5 | 90 (16.9%) |
| factor(brs6) | |
| 1 | 18 (3.4%) |
| 2 | 106 (19.9%) |
| 3 | 182 (34.2%) |
| 4 | 144 (27.1%) |
| 5 | 82 (15.4%) |
library(factoextra)
library(ggdendro)
##
## Attaching package: 'ggdendro'
## The following object is masked from 'package:table1':
##
## label
library(dendextend)
##
## ---------------------
## Welcome to dendextend version 1.16.0
## Type citation('dendextend') for how to cite the package.
##
## Type browseVignettes(package = 'dendextend') for the package vignette.
## The github page is: https://github.com/talgalili/dendextend/
##
## Suggestions and bug-reports can be submitted at: https://github.com/talgalili/dendextend/issues
## You may ask questions at stackoverflow, use the r and dendextend tags:
## https://stackoverflow.com/questions/tagged/dendextend
##
## To suppress this message use: suppressPackageStartupMessages(library(dendextend))
## ---------------------
##
## Attaching package: 'dendextend'
## The following object is masked from 'package:ggdendro':
##
## theme_dendro
## The following object is masked from 'package:stats':
##
## cutree
library(tidyverse)
library(cluster)
library(cluster.datasets)
#gioi + thamnien + a3 + tuoi + congviec + a5 +
uwes = data.frame(uwes1, uwes2, uwes3, uwes4, uwes5, uwes6, uwes7, uwes8, uwes9)
gjs = data.frame(gjs1, gjs2, gjs3, gjs4, gjs5, gjs6, gjs7, gjs8, gjs9, gjs10)
wss = data.frame(wss1, wss2, wss3, wss4, wss5, wss6, wss7, wss8)
brcs = data.frame(brcs1, brcs2, brcs3, brcs4)
brs = data.frame(brs1, brs2, brs3, brs4, brs5, brs6)
OME = data.frame(uwes1, uwes2, uwes3, uwes4, uwes5, uwes6, uwes7, uwes8, uwes9,
gjs1, gjs2, gjs3, gjs4, gjs5, gjs6, gjs7, gjs8, gjs9, gjs10,
wss1, wss2, wss3, wss4, wss5, wss6, wss7, wss8,
brs1, brs2, brs3, brs4, brs5, brs6, thamnien)
ome.tp = data.frame(uwes_tol, gjs_tol, wss_tol, brcs_tol, brs_tol)
library(psych)
##
## Attaching package: 'psych'
## The following objects are masked from 'package:ggplot2':
##
## %+%, alpha
omega(ome.tp)
## Loading required namespace: GPArotation
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## Warning in fac(r = r, nfactors = nfactors, n.obs = n.obs, rotate = rotate, : An
## ultra-Heywood case was detected. Examine the results carefully
## Warning in cov2cor(t(w) %*% r %*% w): diag(.) had 0 or NA entries; non-finite
## result is doubtful
## Omega
## Call: omegah(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip,
## digits = digits, title = title, sl = sl, labels = labels,
## plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option,
## covar = covar)
## Alpha: 0.67
## G.6: 0.69
## Omega Hierarchical: 0.65
## Omega H asymptotic: 0.83
## Omega Total 0.78
##
## Schmid Leiman Factor loadings greater than 0.2
## g F1* F2* F3* h2 u2 p2
## uwes_tol 0.61 0.49 0.62 0.38 0.60
## gjs_tol 0.65 0.48 0.65 0.35 0.65
## wss_tol- -0.52 0.28 0.72 0.00
## brcs_tol 0.76 0.56 0.44 1.04
## brs_tol 0.61 0.25 0.49 0.51 0.76
##
## With Sums of squares of:
## g F1* F2* F3*
## 1.74 0.47 0.35 0.00
##
## general/max 3.67 max/min = Inf
## mean percent general = 0.61 with sd = 0.38 and cv of 0.62
## Explained Common Variance of the general factor = 0.68
##
## The degrees of freedom are -2 and the fit is 0
## The number of observations was 532 with Chi Square = 0 with prob < NA
## The root mean square of the residuals is 0
## The df corrected root mean square of the residuals is NA
##
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 5 and the fit is 0.23
## The number of observations was 532 with Chi Square = 121.96 with prob < 1.2e-24
## The root mean square of the residuals is 0.11
## The df corrected root mean square of the residuals is 0.16
##
## RMSEA index = 0.21 and the 10 % confidence intervals are 0.179 0.243
## BIC = 90.58
##
## Measures of factor score adequacy
## g F1* F2* F3*
## Correlation of scores with factors 0.86 0.66 0.60 0
## Multiple R square of scores with factors 0.74 0.44 0.36 0
## Minimum correlation of factor score estimates 0.48 -0.13 -0.28 -1
##
## Total, General and Subset omega for each subset
## g F1* F2* F3*
## Omega total for total scores and subscales 0.78 0.78 0.58 NA
## Omega general for total scores and subscales 0.65 0.49 0.54 NA
## Omega group for total scores and subscales 0.10 0.29 0.05 NA
omega(uwes)
## Omega
## Call: omegah(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip,
## digits = digits, title = title, sl = sl, labels = labels,
## plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option,
## covar = covar)
## Alpha: 0.96
## G.6: 0.97
## Omega Hierarchical: 0.94
## Omega H asymptotic: 0.96
## Omega Total 0.98
##
## Schmid Leiman Factor loadings greater than 0.2
## g F1* F2* F3* h2 u2 p2
## uwes1 0.87 0.77 0.23 0.97
## uwes2 0.91 0.84 0.16 0.98
## uwes3 0.92 0.86 0.14 0.98
## uwes4 0.92 0.86 0.14 0.98
## uwes5 0.88 0.48 1.00 0.00 0.77
## uwes6 0.88 0.79 0.21 0.98
## uwes7 0.83 0.69 0.31 0.99
## uwes8 0.77 0.55 0.89 0.11 0.66
## uwes9 0.68 0.62 0.85 0.15 0.54
##
## With Sums of squares of:
## g F1* F2* F3*
## 6.54 0.06 0.70 0.25
##
## general/max 9.39 max/min = 11.4
## mean percent general = 0.87 with sd = 0.17 and cv of 0.2
## Explained Common Variance of the general factor = 0.87
##
## The degrees of freedom are 12 and the fit is 0.13
## The number of observations was 532 with Chi Square = 66.08 with prob < 1.7e-09
## The root mean square of the residuals is 0.01
## The df corrected root mean square of the residuals is 0.02
## RMSEA index = 0.092 and the 10 % confidence intervals are 0.071 0.114
## BIC = -9.24
##
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 27 and the fit is 0.92
## The number of observations was 532 with Chi Square = 483.4 with prob < 4.1e-85
## The root mean square of the residuals is 0.06
## The df corrected root mean square of the residuals is 0.07
##
## RMSEA index = 0.178 and the 10 % confidence intervals are 0.165 0.193
## BIC = 313.93
##
## Measures of factor score adequacy
## g F1* F2* F3*
## Correlation of scores with factors 0.98 0.22 0.89 0.92
## Multiple R square of scores with factors 0.96 0.05 0.79 0.85
## Minimum correlation of factor score estimates 0.91 -0.90 0.57 0.70
##
## Total, General and Subset omega for each subset
## g F1* F2* F3*
## Omega total for total scores and subscales 0.98 0.96 0.93 0.99
## Omega general for total scores and subscales 0.94 0.95 0.56 0.77
## Omega group for total scores and subscales 0.03 0.01 0.37 0.23
omega(gjs)
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## Warning in fac(r = r, nfactors = nfactors, n.obs = n.obs, rotate = rotate, : An
## ultra-Heywood case was detected. Examine the results carefully
## Warning in cov2cor(t(w) %*% r %*% w): diag(.) had 0 or NA entries; non-finite
## result is doubtful
## Omega
## Call: omegah(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip,
## digits = digits, title = title, sl = sl, labels = labels,
## plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option,
## covar = covar)
## Alpha: 0.93
## G.6: 0.94
## Omega Hierarchical: 0.91
## Omega H asymptotic: 0.96
## Omega Total 0.95
##
## Schmid Leiman Factor loadings greater than 0.2
## g F1* F2* F3* h2 u2 p2
## gjs1 0.74 0.26 0.63 0.37 0.88
## gjs2 0.49 0.45 0.46 0.54 0.51
## gjs3 0.82 0.72 0.28 0.94
## gjs4 0.86 0.74 0.26 0.99
## gjs5 0.88 0.77 0.23 1.00
## gjs6 0.86 0.74 0.26 1.00
## gjs7 0.72 -0.21 0.59 0.41 0.87
## gjs8 0.69 0.48 0.71 0.29 0.68
## gjs9 0.64 0.21 0.46 0.54 0.89
## gjs10 0.85 0.77 0.23 0.94
##
## With Sums of squares of:
## g F1* F2* F3*
## 5.84 0.00 0.39 0.38
##
## general/max 15.07 max/min = Inf
## mean percent general = 0.87 with sd = 0.16 and cv of 0.18
## Explained Common Variance of the general factor = 0.88
##
## The degrees of freedom are 18 and the fit is 0.14
## The number of observations was 532 with Chi Square = 73.73 with prob < 1e-08
## The root mean square of the residuals is 0.02
## The df corrected root mean square of the residuals is 0.03
## RMSEA index = 0.076 and the 10 % confidence intervals are 0.059 0.095
## BIC = -39.25
##
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 35 and the fit is 0.42
## The number of observations was 532 with Chi Square = 220.4 with prob < 9.4e-29
## The root mean square of the residuals is 0.05
## The df corrected root mean square of the residuals is 0.06
##
## RMSEA index = 0.1 and the 10 % confidence intervals are 0.087 0.113
## BIC = 0.72
##
## Measures of factor score adequacy
## g F1* F2* F3*
## Correlation of scores with factors 0.97 0 0.69 0.68
## Multiple R square of scores with factors 0.95 0 0.48 0.47
## Minimum correlation of factor score estimates 0.89 -1 -0.04 -0.06
##
## Total, General and Subset omega for each subset
## g F1* F2* F3*
## Omega total for total scores and subscales 0.95 NA 0.90 0.89
## Omega general for total scores and subscales 0.91 NA 0.88 0.82
## Omega group for total scores and subscales 0.02 NA 0.02 0.07
omega(wss)
## Omega
## Call: omegah(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip,
## digits = digits, title = title, sl = sl, labels = labels,
## plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option,
## covar = covar)
## Alpha: 0.86
## G.6: 0.9
## Omega Hierarchical: 0.72
## Omega H asymptotic: 0.77
## Omega Total 0.93
##
## Schmid Leiman Factor loadings greater than 0.2
## g F1* F2* F3* h2 u2 p2
## wss1 0.79 0.22 0.68 0.32 0.91
## wss2 0.83 0.55 1.00 0.00 0.69
## wss3 0.84 0.30 0.80 0.20 0.88
## wss4 0.76 0.26 0.65 0.35 0.89
## wss5 0.76 0.64 0.36 0.91
## wss6 0.34 0.62 0.52 0.48 0.23
## wss7 0.71 0.54 0.46 0.07
## wss8 0.28 0.87 0.84 0.16 0.10
##
## With Sums of squares of:
## g F1* F2* F3*
## 3.40 0.25 1.66 0.35
##
## general/max 2.05 max/min = 6.61
## mean percent general = 0.58 with sd = 0.38 and cv of 0.66
## Explained Common Variance of the general factor = 0.6
##
## The degrees of freedom are 7 and the fit is 0.02
## The number of observations was 532 with Chi Square = 11.96 with prob < 0.1
## The root mean square of the residuals is 0.01
## The df corrected root mean square of the residuals is 0.02
## RMSEA index = 0.036 and the 10 % confidence intervals are 0 0.071
## BIC = -31.98
##
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 20 and the fit is 1.18
## The number of observations was 532 with Chi Square = 622.45 with prob < 5.3e-119
## The root mean square of the residuals is 0.18
## The df corrected root mean square of the residuals is 0.21
##
## RMSEA index = 0.238 and the 10 % confidence intervals are 0.222 0.254
## BIC = 496.92
##
## Measures of factor score adequacy
## g F1* F2* F3*
## Correlation of scores with factors 0.93 0.45 0.92 0.84
## Multiple R square of scores with factors 0.86 0.21 0.86 0.70
## Minimum correlation of factor score estimates 0.72 -0.59 0.71 0.40
##
## Total, General and Subset omega for each subset
## g F1* F2* F3*
## Omega total for total scores and subscales 0.93 0.89 0.83 1.00
## Omega general for total scores and subscales 0.72 0.82 0.10 0.69
## Omega group for total scores and subscales 0.19 0.08 0.73 0.31
omega(brcs)
## Warning in fa.stats(r = r, f = f, phi = phi, n.obs = n.obs, np.obs = np.obs, :
## The estimated weights for the factor scores are probably incorrect. Try a
## different factor score estimation method.
## Warning in fac(r = r, nfactors = nfactors, n.obs = n.obs, rotate = rotate, : An
## ultra-Heywood case was detected. Examine the results carefully
## Warning in cov2cor(t(w) %*% r %*% w): diag(.) had 0 or NA entries; non-finite
## result is doubtful
## Omega
## Call: omegah(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip,
## digits = digits, title = title, sl = sl, labels = labels,
## plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option,
## covar = covar)
## Alpha: 0.9
## G.6: 0.89
## Omega Hierarchical: 0.85
## Omega H asymptotic: 0.9
## Omega Total 0.94
##
## Schmid Leiman Factor loadings greater than 0.2
## g F1* F2* F3* h2 u2 p2
## brcs1 0.69 0.54 0.82 0.18 0.59
## brcs2 0.84 0.24 0.73 0.27 0.96
## brcs3 0.78 0.46 -0.23 0.83 0.17 0.73
## brcs4 0.92 0.87 0.13 0.96
##
## With Sums of squares of:
## g F1* F2* F3*
## 2.64 0.00 0.50 0.14
##
## general/max 5.25 max/min = Inf
## mean percent general = 0.81 with sd = 0.18 and cv of 0.23
## Explained Common Variance of the general factor = 0.8
##
## The degrees of freedom are -3 and the fit is 0
## The number of observations was 532 with Chi Square = 0 with prob < NA
## The root mean square of the residuals is 0
## The df corrected root mean square of the residuals is NA
##
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 2 and the fit is 0.3
## The number of observations was 532 with Chi Square = 159.53 with prob < 2.3e-35
## The root mean square of the residuals is 0.1
## The df corrected root mean square of the residuals is 0.17
##
## RMSEA index = 0.385 and the 10 % confidence intervals are 0.336 0.437
## BIC = 146.98
##
## Measures of factor score adequacy
## g F1* F2* F3*
## Correlation of scores with factors 0.96 0 0.82 0.65
## Multiple R square of scores with factors 0.91 0 0.67 0.42
## Minimum correlation of factor score estimates 0.83 -1 0.33 -0.16
##
## Total, General and Subset omega for each subset
## g F1* F2* F3*
## Omega total for total scores and subscales 0.94 NA 0.91 0.9
## Omega general for total scores and subscales 0.85 NA 0.62 0.9
## Omega group for total scores and subscales 0.08 NA 0.28 0.0
omega(brs)
## Omega
## Call: omegah(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip,
## digits = digits, title = title, sl = sl, labels = labels,
## plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option,
## covar = covar)
## Alpha: 0.85
## G.6: 0.87
## Omega Hierarchical: 0.6
## Omega H asymptotic: 0.65
## Omega Total 0.93
##
## Schmid Leiman Factor loadings greater than 0.2
## g F1* F2* F3* h2 u2 p2
## brs1 0.42 0.90 1.00 0.00 0.18
## brs2 1.00 1.00 0.00 1.00
## brs3 0.42 0.39 0.33 0.43 0.57 0.40
## brs4 0.61 0.66 0.81 0.19 0.46
## brs5 0.49 0.54 0.54 0.46 0.44
## brs6 0.60 0.72 0.87 0.13 0.41
##
## With Sums of squares of:
## g F1* F2* F3*
## 2.32 1.39 0.94 0.00
##
## general/max 1.67 max/min = 322.82
## mean percent general = 0.48 with sd = 0.27 and cv of 0.57
## Explained Common Variance of the general factor = 0.5
##
## The degrees of freedom are 0 and the fit is 0.02
## The number of observations was 532 with Chi Square = 8.99 with prob < NA
## The root mean square of the residuals is 0.01
## The df corrected root mean square of the residuals is NA
##
## Compare this with the adequacy of just a general factor and no group factors
## The degrees of freedom for just the general factor are 9 and the fit is 1.57
## The number of observations was 532 with Chi Square = 830.03 with prob < 7.3e-173
## The root mean square of the residuals is 0.23
## The df corrected root mean square of the residuals is 0.29
##
## RMSEA index = 0.414 and the 10 % confidence intervals are 0.391 0.439
## BIC = 773.54
##
## Measures of factor score adequacy
## g F1* F2* F3*
## Correlation of scores with factors 1.00 0.94 1 0.09
## Multiple R square of scores with factors 0.99 0.88 1 0.01
## Minimum correlation of factor score estimates 0.98 0.76 1 -0.98
##
## Total, General and Subset omega for each subset
## g F1* F2* F3*
## Omega total for total scores and subscales 0.93 0.87 1.00 1.00
## Omega general for total scores and subscales 0.60 0.40 0.18 0.99
## Omega group for total scores and subscales 0.29 0.47 0.82 0.00
library(NbClust)
OME %>%select(uwes1, uwes2, uwes3, uwes4, uwes5,
uwes6, uwes7,uwes8, uwes9,
gjs1, gjs2, gjs3, gjs4, gjs5,
gjs6, gjs7, gjs8, gjs9, gjs10,
wss1, wss2, wss3, wss4, wss5, wss6, wss7, wss8,
brs1, brs2, brs3, brs4, brs5, brs6)%>%
dist()%>% hclust()%>% as.dendrogram ()-> dend
dend %>% as.dendrogram(border = 2,lty = 2, lwd = 6, text_cex = 2, x = 2) %>%
color_branches(k = 4) %>%
color_labels(k = 4) %>%
plot()
df = scale(OME)
fviz_nbclust(df, kmeans, method="wss") + labs(subthitle="Elbow methhod")
fviz_nbclust(df, kmeans, method="silhouette") + labs(subthitle="silhouette")
fviz_nbclust(df, kmeans, nstart=400, method="gap_stat", nboot=20) + labs(subthitle="PP")
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
## Warning: did not converge in 10 iterations
km = kmeans(df, centers=4, nstart=400)
fviz_cluster(km, df, ellipse.type="norm")
# hopkins
c1 = get_clust_tendency(df, n=400, graph=T)
c1$hopkins
## [1] 0.7445335
print(c1$plot)
gap = clusGap(df, FUN = kmeans, nstart = 250, K.max = 10, B = 10)
fviz_gap_stat(gap)
km = kmeans(df,4, nstart=250)
fviz_cluster(km, df)
# kiem tra xm giai thich bao nhieu %
sil = silhouette(km$cluster, dist(df))
fviz_silhouette(sil)
## cluster size ave.sil.width
## 1 1 138 0.18
## 2 2 58 0.27
## 3 3 202 0.14
## 4 4 134 0.13
# cach khac
library(FactoMineR)
pca.ome <- PCA(OME, graph = FALSE)
get_eig(pca.ome)
## eigenvalue variance.percent cumulative.variance.percent
## Dim.1 11.97826158 35.2301811 35.23018
## Dim.2 4.77693474 14.0498080 49.27999
## Dim.3 2.73129974 8.0332345 57.31322
## Dim.4 2.45179242 7.2111542 64.52438
## Dim.5 1.51055495 4.4428087 68.96719
## Dim.6 1.05753668 3.1104020 72.07759
## Dim.7 0.87930383 2.5861877 74.66378
## Dim.8 0.86236281 2.5363612 77.20014
## Dim.9 0.78759741 2.3164630 79.51660
## Dim.10 0.66510860 1.9562018 81.47280
## Dim.11 0.65971259 1.9403311 83.41313
## Dim.12 0.49630380 1.4597171 84.87285
## Dim.13 0.47673432 1.4021598 86.27501
## Dim.14 0.44629829 1.3126420 87.58765
## Dim.15 0.41237936 1.2128805 88.80053
## Dim.16 0.38160784 1.1223760 89.92291
## Dim.17 0.34174561 1.0051342 90.92804
## Dim.18 0.30551950 0.8985868 91.82663
## Dim.19 0.29136070 0.8569432 92.68357
## Dim.20 0.27069506 0.7961619 93.47973
## Dim.21 0.25829267 0.7596843 94.23942
## Dim.22 0.23253622 0.6839300 94.92335
## Dim.23 0.21559224 0.6340948 95.55744
## Dim.24 0.18989773 0.5585227 96.11597
## Dim.25 0.17522741 0.5153747 96.63134
## Dim.26 0.17207582 0.5061054 97.13745
## Dim.27 0.16746134 0.4925333 97.62998
## Dim.28 0.14231115 0.4185622 98.04854
## Dim.29 0.13955347 0.4104514 98.45899
## Dim.30 0.13577214 0.3993298 98.85832
## Dim.31 0.11183524 0.3289272 99.18725
## Dim.32 0.10562528 0.3106626 99.49791
## Dim.33 0.08714520 0.2563094 99.75422
## Dim.34 0.08356426 0.2457772 100.00000
fviz_screeplot(pca.ome, addlabels=T, ylim = c(0, 50))
var <- get_pca_var(pca.ome)
var
## Principal Component Analysis Results for variables
## ===================================================
## Name Description
## 1 "$coord" "Coordinates for the variables"
## 2 "$cor" "Correlations between variables and dimensions"
## 3 "$cos2" "Cos2 for the variables"
## 4 "$contrib" "contributions of the variables"
head(var$coord)
## Dim.1 Dim.2 Dim.3 Dim.4 Dim.5
## uwes1 0.8088926 0.01163001 0.1176360 -0.2780873 0.09543588
## uwes2 0.8154224 -0.04862415 0.1628397 -0.3504950 0.05884299
## uwes3 0.8467865 -0.02913339 0.1558574 -0.3580510 0.02279204
## uwes4 0.8392867 -0.05821053 0.1458257 -0.3431431 0.08006149
## uwes5 0.8457610 -0.03770585 0.1084617 -0.2425841 0.13154022
## uwes6 0.8061857 -0.05892272 0.1805785 -0.3512779 0.06082302
head(var$contrib)
## Dim.1 Dim.2 Dim.3 Dim.4 Dim.5
## uwes1 5.462456 0.002831465 0.5066538 3.154122 0.60295763
## uwes2 5.551004 0.049494249 0.9708476 5.010488 0.22922023
## uwes3 5.986240 0.017767759 0.8893763 5.228850 0.03438981
## uwes4 5.880671 0.070933885 0.7785718 4.802494 0.42433692
## uwes5 5.971749 0.029762420 0.4307087 2.400164 1.14546170
## uwes6 5.425957 0.072680220 1.1938859 5.032895 0.24490604
fviz_pca_var(pca.ome, col.var = "blue")
fviz_pca_var(pca.ome, col.var="contrib",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),repel = T )
## Warning: ggrepel: 17 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
fviz_contrib(pca.ome, choice = "var", axes = 1, top = 10)
fviz_contrib(pca.ome, choice = "var", axes = 2, top = 10)
ind <- get_pca_ind(pca.ome)
ind
## Principal Component Analysis Results for individuals
## ===================================================
## Name Description
## 1 "$coord" "Coordinates for the individuals"
## 2 "$cos2" "Cos2 for the individuals"
## 3 "$contrib" "contributions of the individuals"
head(ind$coord)
## Dim.1 Dim.2 Dim.3 Dim.4 Dim.5
## 1 -4.6654070 -0.90823690 0.4224287 0.67267844 1.2364928
## 2 -4.6654070 -0.90823690 0.4224287 0.67267844 1.2364928
## 3 0.2284159 1.34978452 0.8587337 -0.32759573 1.6970776
## 4 -6.9929796 0.01790024 -0.5273573 0.07830325 0.5583694
## 5 -2.0087876 -0.51964124 -0.1880761 0.59004007 1.3039404
## 6 -2.0329380 -1.08541533 -0.3140483 -1.18757830 0.5933732
fviz_pca_ind(pca.ome, col.ind = "cos2",gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),repel = T)
## Warning: ggrepel: 447 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
fviz_pca_biplot(pca.ome, repel = TRUE)
## Warning: ggrepel: 444 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
## Warning: ggrepel: 14 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
fviz_pca_ind(pca.ome,label = "none", palette = c("#00AFBB", "#E7B800", "#FC4E07"),addEllipses = T)
library(lavaan)
## This is lavaan 0.6-12
## lavaan is FREE software! Please report any bugs.
##
## Attaching package: 'lavaan'
## The following object is masked from 'package:psych':
##
## cor2cov
library(lavaanPlot)
library(OpenMx)
##
## Attaching package: 'OpenMx'
## The following object is masked from 'package:psych':
##
## tr
m1a <- ' UWES = ~ uwes1 + uwes2 + uwes3 + uwes4 + uwes5 + uwes6 + uwes7 + uwes8 + uwes9
GJS = ~ gjs1 + gjs2 + gjs3 + gjs4 + gjs5 + gjs6 + gjs7 + gjs8 + gjs9 + gjs10
WSS = ~ wss1+ wss2+ wss3+ wss4+ wss5+ wss6+ wss7+ wss8
BRCS = ~ brcs1+ brcs2+ brcs3+ brcs4
BRS = ~ brs1+ brs2+ brs3+ brs4+ brs5+ brs6
CH = ~ gioi*congviec + gioi*thamnien + congviec*thamnien '
m1a.1 <- cfa(m1a, data=thuan, std.lv=T)
summary(m1a.1, std=T)
## lavaan 0.6-12 ended normally after 54 iterations
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 93
## Number of equality constraints 1
##
## Number of observations 532
##
## Model Test User Model:
##
## Test statistic 4576.818
## Degrees of freedom 688
## P-value (Chi-square) 0.000
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Latent Variables:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## UWES =~
## uwes1 1.257 0.050 25.195 0.000 1.257 0.868
## uwes2 1.188 0.045 26.685 0.000 1.188 0.899
## uwes3 1.293 0.046 28.268 0.000 1.293 0.929
## uwes4 1.342 0.048 28.133 0.000 1.342 0.926
## uwes5 1.433 0.055 26.104 0.000 1.433 0.887
## uwes6 1.276 0.049 26.267 0.000 1.276 0.890
## uwes7 1.166 0.050 23.375 0.000 1.166 0.828
## uwes8 1.164 0.053 21.799 0.000 1.164 0.791
## uwes9 1.169 0.061 19.090 0.000 1.169 0.720
## GJS =~
## gjs1 0.618 0.031 19.813 0.000 0.618 0.744
## gjs2 0.354 0.028 12.667 0.000 0.354 0.522
## gjs3 0.681 0.030 22.789 0.000 0.681 0.818
## gjs4 0.819 0.034 24.091 0.000 0.819 0.848
## gjs5 0.849 0.034 25.249 0.000 0.849 0.873
## gjs6 0.830 0.035 23.637 0.000 0.830 0.838
## gjs7 0.884 0.046 19.128 0.000 0.884 0.725
## gjs8 0.666 0.034 19.842 0.000 0.666 0.745
## gjs9 0.488 0.029 17.112 0.000 0.488 0.667
## gjs10 0.715 0.029 24.782 0.000 0.715 0.863
## WSS =~
## wss1 0.780 0.034 22.869 0.000 0.780 0.827
## wss2 0.926 0.039 23.738 0.000 0.926 0.848
## wss3 0.848 0.036 23.859 0.000 0.848 0.850
## wss4 0.816 0.039 20.987 0.000 0.816 0.782
## wss5 0.885 0.039 22.521 0.000 0.885 0.819
## wss6 0.361 0.043 8.391 0.000 0.361 0.366
## wss7 0.204 0.043 4.756 0.000 0.204 0.212
## wss8 0.277 0.039 7.044 0.000 0.277 0.310
## BRCS =~
## brcs1 0.573 0.026 22.154 0.000 0.573 0.814
## brcs2 0.562 0.026 22.031 0.000 0.562 0.811
## brcs3 0.607 0.024 25.053 0.000 0.607 0.881
## brcs4 0.617 0.027 22.849 0.000 0.617 0.830
## BRS =~
## brs1 0.235 0.032 7.380 0.000 0.235 0.324
## brs2 0.598 0.034 17.658 0.000 0.598 0.691
## brs3 0.502 0.035 14.274 0.000 0.502 0.584
## brs4 0.976 0.038 25.640 0.000 0.976 0.891
## brs5 0.688 0.035 19.678 0.000 0.688 0.747
## brs6 0.961 0.036 26.342 0.000 0.961 0.906
## CH =~
## congvic (gioi) 0.864 0.143 6.025 0.000 0.864 0.754
## thamnin (gioi) 0.864 0.143 6.025 0.000 0.864 0.171
##
## Covariances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## UWES ~~
## GJS 0.669 0.026 25.867 0.000 0.669 0.669
## WSS -0.250 0.043 -5.772 0.000 -0.250 -0.250
## BRCS 0.545 0.033 16.279 0.000 0.545 0.545
## BRS 0.225 0.044 5.146 0.000 0.225 0.225
## CH 0.068 0.059 1.156 0.248 0.068 0.068
## GJS ~~
## WSS -0.227 0.044 -5.120 0.000 -0.227 -0.227
## BRCS 0.569 0.033 17.300 0.000 0.569 0.569
## BRS 0.251 0.044 5.737 0.000 0.251 0.251
## CH 0.100 0.060 1.655 0.098 0.100 0.100
## WSS ~~
## BRCS -0.071 0.047 -1.490 0.136 -0.071 -0.071
## BRS 0.290 0.044 6.644 0.000 0.290 0.290
## CH -0.176 0.065 -2.725 0.006 -0.176 -0.176
## BRCS ~~
## BRS 0.348 0.042 8.208 0.000 0.348 0.348
## CH 0.155 0.064 2.421 0.015 0.155 0.155
## BRS ~~
## CH 0.173 0.065 2.684 0.007 0.173 0.173
##
## Variances:
## Estimate Std.Err z-value P(>|z|) Std.lv Std.all
## .uwes1 0.516 0.035 14.784 0.000 0.516 0.246
## .uwes2 0.336 0.024 14.211 0.000 0.336 0.192
## .uwes3 0.266 0.020 13.158 0.000 0.266 0.137
## .uwes4 0.298 0.022 13.277 0.000 0.298 0.142
## .uwes5 0.556 0.038 14.467 0.000 0.556 0.213
## .uwes6 0.426 0.030 14.400 0.000 0.426 0.207
## .uwes7 0.623 0.041 15.225 0.000 0.623 0.314
## .uwes8 0.813 0.053 15.482 0.000 0.813 0.375
## .uwes9 1.271 0.081 15.777 0.000 1.271 0.482
## .gjs1 0.308 0.020 15.233 0.000 0.308 0.447
## .gjs2 0.334 0.021 15.985 0.000 0.334 0.727
## .gjs3 0.229 0.016 14.541 0.000 0.229 0.330
## .gjs4 0.262 0.019 14.073 0.000 0.262 0.281
## .gjs5 0.225 0.017 13.509 0.000 0.225 0.238
## .gjs6 0.293 0.021 14.253 0.000 0.293 0.298
## .gjs7 0.704 0.046 15.346 0.000 0.704 0.474
## .gjs8 0.356 0.023 15.228 0.000 0.356 0.446
## .gjs9 0.297 0.019 15.613 0.000 0.297 0.555
## .gjs10 0.175 0.013 13.758 0.000 0.175 0.255
## .wss1 0.280 0.021 13.163 0.000 0.280 0.315
## .wss2 0.336 0.027 12.607 0.000 0.336 0.282
## .wss3 0.276 0.022 12.520 0.000 0.276 0.277
## .wss4 0.424 0.030 14.049 0.000 0.424 0.389
## .wss5 0.384 0.029 13.355 0.000 0.384 0.329
## .wss6 0.843 0.052 16.091 0.000 0.843 0.866
## .wss7 0.877 0.054 16.243 0.000 0.877 0.955
## .wss8 0.722 0.045 16.159 0.000 0.722 0.904
## .brcs1 0.168 0.013 13.063 0.000 0.168 0.338
## .brcs2 0.165 0.013 13.135 0.000 0.165 0.343
## .brcs3 0.107 0.010 10.554 0.000 0.107 0.224
## .brcs4 0.171 0.014 12.609 0.000 0.171 0.311
## .brs1 0.472 0.029 16.144 0.000 0.472 0.895
## .brs2 0.392 0.026 15.000 0.000 0.392 0.523
## .brs3 0.487 0.031 15.573 0.000 0.487 0.659
## .brs4 0.246 0.024 10.257 0.000 0.246 0.205
## .brs5 0.374 0.026 14.474 0.000 0.374 0.441
## .brs6 0.201 0.022 9.265 0.000 0.201 0.178
## .congviec 0.566 0.245 2.306 0.021 0.566 0.431
## .thamnien 24.862 1.544 16.106 0.000 24.862 0.971
## UWES 1.000 1.000 1.000
## GJS 1.000 1.000 1.000
## WSS 1.000 1.000 1.000
## BRCS 1.000 1.000 1.000
## BRS 1.000 1.000 1.000
## CH 1.000 1.000 1.000
library(semPlot)
fit <- sem(m1a, data = thuan)
lavaanPlot(model = fit, coefs = T, stand = T, covs = T,
edge_options = list(color="red"),
node_options = list(color="blue"),
sig = .05, digits = 2,
stars = c("regress","latent", "covs"))
library(BayesFactor)
## Loading required package: coda
## Loading required package: Matrix
##
## Attaching package: 'Matrix'
## The following objects are masked from 'package:OpenMx':
##
## %&%, expm
## The following objects are masked from 'package:tidyr':
##
## expand, pack, unpack
## ************
## Welcome to BayesFactor 0.9.12-4.4. If you have questions, please contact Richard Morey (richarddmorey@gmail.com).
##
## Type BFManual() to open the manual.
## ************
bf = ttestBF(formula = uwes_tol ~ gioi, data=thuan)
bf
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 0.2293691 ±0.08%
##
## Against denominator:
## Null, mu1-mu2 = 0
## ---
## Bayes factor type: BFindepSample, JZS
mcmc = posterior(bf, iter =1000)
summary(mcmc)
##
## Iterations = 1:1000
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 1000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## mu 40.2342 0.49369 0.015612 0.016286
## beta (nam - nu) 1.3428 1.01926 0.032232 0.033818
## sig2 133.5222 8.23227 0.260327 0.263653
## delta 0.1163 0.08806 0.002785 0.002926
## g 3.0685 36.68015 1.159928 1.242858
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## mu 39.29472 39.90925 40.2200 40.5706 41.159
## beta (nam - nu) -0.63234 0.65641 1.3028 2.0335 3.302
## sig2 118.48917 127.42468 133.3207 138.7276 150.696
## delta -0.05435 0.05624 0.1145 0.1765 0.287
## g 0.07140 0.18444 0.3910 0.8625 8.696
table(mcmc [, 2] <0) # nam nho hon nu
##
## FALSE TRUE
## 909 91
plot(mcmc [, 2], col="blue")
bf.lm = lmBF(formula = uwes_tol ~ gioi, data=thuan)
bf.lm
## Bayes factor analysis
## --------------
## [1] gioi : 0.2293691 ±0.08%
##
## Against denominator:
## Intercept only
## ---
## Bayes factor type: BFlinearModel, JZS
summary(bf.lm)
## Bayes factor analysis
## --------------
## [1] gioi : 0.2293691 ±0.08%
##
## Against denominator:
## Intercept only
## ---
## Bayes factor type: BFlinearModel, JZS
mcmc = posterior(bf.lm, iter =1000)
summary(mcmc)
##
## Iterations = 1:1000
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 1000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## mu 40.2653 0.5165 0.01633 0.01633
## gioi-nam 0.6638 0.5057 0.01599 0.01599
## gioi-nu -0.6638 0.5057 0.01599 0.01599
## sig2 133.5450 8.0881 0.25577 0.25577
## g_gioi 1.0309 4.3101 0.13630 0.13630
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## mu 39.26166 39.90849 40.2807 40.6237 41.2716
## gioi-nam -0.33172 0.31820 0.6612 0.9947 1.6328
## gioi-nu -1.63276 -0.99469 -0.6612 -0.3182 0.3317
## sig2 118.59792 127.88214 133.4464 138.6075 150.1646
## g_gioi 0.03914 0.09532 0.1852 0.4303 8.6776
library(rstanarm)
## Loading required package: Rcpp
## This is rstanarm version 2.21.3
## - See https://mc-stan.org/rstanarm/articles/priors for changes to default priors!
## - Default priors may change, so it's safest to specify priors, even if equivalent to the defaults.
## - For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores())
##
## Attaching package: 'rstanarm'
## The following object is masked from 'package:psych':
##
## logit
library(BAS)
library(broom)
library(GGally)
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
m1 = stan_glm(uwes_tol ~ congviec, data=thuan)
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.095 seconds (Warm-up)
## Chain 1: 0.119 seconds (Sampling)
## Chain 1: 0.214 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.089 seconds (Warm-up)
## Chain 2: 0.116 seconds (Sampling)
## Chain 2: 0.205 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.102 seconds (Warm-up)
## Chain 3: 0.148 seconds (Sampling)
## Chain 3: 0.25 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.12 seconds (Warm-up)
## Chain 4: 0.12 seconds (Sampling)
## Chain 4: 0.24 seconds (Total)
## Chain 4:
m1
## stan_glm
## family: gaussian [identity]
## formula: uwes_tol ~ congviec
## observations: 532
## predictors: 5
## ------
## Median MAD_SD
## (Intercept) 37.9 1.0
## congviecBB 3.9 1.2
## congviecCC -0.3 1.7
## congviecDD 5.0 1.8
## congviecEE 0.8 2.3
##
## Auxiliary parameter(s):
## Median MAD_SD
## sigma 11.4 0.3
##
## ------
## * For help interpreting the printed output see ?print.stanreg
## * For info on the priors used see ?prior_summary.stanreg
summary(m1)
##
## Model Info:
## function: stan_glm
## family: gaussian [identity]
## formula: uwes_tol ~ congviec
## algorithm: sampling
## sample: 4000 (posterior sample size)
## priors: see help('prior_summary')
## observations: 532
## predictors: 5
##
## Estimates:
## mean sd 10% 50% 90%
## (Intercept) 37.9 1.0 36.6 37.9 39.1
## congviecBB 3.9 1.2 2.3 3.9 5.4
## congviecCC -0.3 1.7 -2.5 -0.3 1.8
## congviecDD 4.9 1.8 2.6 5.0 7.1
## congviecEE 0.8 2.2 -2.0 0.8 3.6
## sigma 11.4 0.3 11.0 11.4 11.8
##
## Fit Diagnostics:
## mean sd 10% 50% 90%
## mean_PPD 40.1 0.7 39.3 40.1 41.0
##
## The mean_ppd is the sample average posterior predictive distribution of the outcome variable (for details see help('summary.stanreg')).
##
## MCMC diagnostics
## mcse Rhat n_eff
## (Intercept) 0.0 1.0 2634
## congviecBB 0.0 1.0 3110
## congviecCC 0.0 1.0 3082
## congviecDD 0.0 1.0 3899
## congviecEE 0.0 1.0 3734
## sigma 0.0 1.0 4134
## mean_PPD 0.0 1.0 4549
## log-posterior 0.0 1.0 1949
##
## For each parameter, mcse is Monte Carlo standard error, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence Rhat=1).
prior_summary(m1)
## Priors for model 'm1'
## ------
## Intercept (after predictors centered)
## Specified prior:
## ~ normal(location = 40, scale = 2.5)
## Adjusted prior:
## ~ normal(location = 40, scale = 29)
##
## Coefficients
## Specified prior:
## ~ normal(location = [0,0,0,...], scale = [2.5,2.5,2.5,...])
## Adjusted prior:
## ~ normal(location = [0,0,0,...], scale = [58.11,86.31,88.58,...])
##
## Auxiliary (sigma)
## Specified prior:
## ~ exponential(rate = 1)
## Adjusted prior:
## ~ exponential(rate = 0.087)
## ------
## See help('prior_summary.stanreg') for more details
post.r2 = bayes_R2(m1)
summary(post.r2)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.003172 0.027170 0.036769 0.038284 0.047880 0.111365
m.da = stan_glm(uwes_tol ~ congviec + gioi*thamnien*congviec + tuoi, data=thuan,
prior = default_prior_coef(family),
prior_intercept = default_prior_intercept(family),
prior_aux = exponential(autoscale = T),
prior_PD = T,
adapt_delta = NULL,
QR = F)
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.077 seconds (Warm-up)
## Chain 1: 0.086 seconds (Sampling)
## Chain 1: 0.163 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 0 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.08 seconds (Warm-up)
## Chain 2: 0.08 seconds (Sampling)
## Chain 2: 0.16 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 0 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.078 seconds (Warm-up)
## Chain 3: 0.084 seconds (Sampling)
## Chain 3: 0.162 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'continuous' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 0 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.074 seconds (Warm-up)
## Chain 4: 0.082 seconds (Sampling)
## Chain 4: 0.156 seconds (Total)
## Chain 4:
summary(m.da)
##
## Model Info:
## function: stan_glm
## family: gaussian [identity]
## formula: uwes_tol ~ congviec + gioi * thamnien * congviec + tuoi
## algorithm: sampling
## sample: 4000 (posterior sample size)
## priors: see help('prior_summary')
## observations: 532
## predictors: 21
##
## Estimates:
## mean sd 10% 50% 90%
## (Intercept) 43.0 150.5 -149.9 42.3 237.8
## congviecBB -1.1 58.2 -77.3 -0.3 72.6
## congviecCC 0.2 88.6 -113.8 0.2 113.5
## congviecDD -0.3 88.6 -114.3 -0.3 112.0
## congviecEE 0.5 120.7 -153.1 -0.1 151.6
## gioinu 0.6 57.6 -71.0 0.6 74.9
## thamnien -0.1 5.8 -7.3 -0.1 7.3
## tuoi -0.1 4.3 -5.6 0.0 5.5
## gioinu:thamnien 0.1 6.1 -7.7 0.2 8.0
## congviecBB:gioinu -0.9 59.9 -76.8 -1.3 74.6
## congviecCC:gioinu -0.4 128.9 -165.3 -1.1 165.3
## congviecDD:gioinu 1.5 156.7 -200.3 -0.4 198.7
## congviecEE:gioinu 0.4 145.0 -187.3 0.4 189.1
## congviecBB:thamnien -0.2 12.7 -16.8 -0.4 15.9
## congviecCC:thamnien 0.1 16.7 -21.4 0.1 22.0
## congviecDD:thamnien 0.1 20.2 -25.5 -0.2 26.0
## congviecEE:thamnien 0.1 7.2 -9.3 0.1 9.3
## congviecBB:gioinu:thamnien -0.4 16.2 -21.4 -0.4 20.9
## congviecCC:gioinu:thamnien 0.2 20.8 -26.4 0.5 26.6
## congviecDD:gioinu:thamnien 0.5 40.2 -52.3 0.8 52.4
## congviecEE:gioinu:thamnien 0.0 7.1 -9.2 0.2 9.1
## sigma 11.5 11.2 1.2 8.1 26.2
##
## MCMC diagnostics
## mcse Rhat n_eff
## (Intercept) 1.8 1.0 6774
## congviecBB 0.7 1.0 6905
## congviecCC 1.1 1.0 6613
## congviecDD 1.1 1.0 6546
## congviecEE 1.5 1.0 6110
## gioinu 0.7 1.0 6225
## thamnien 0.1 1.0 7417
## tuoi 0.1 1.0 7240
## gioinu:thamnien 0.1 1.0 7286
## congviecBB:gioinu 0.7 1.0 7256
## congviecCC:gioinu 1.4 1.0 8319
## congviecDD:gioinu 1.9 1.0 6860
## congviecEE:gioinu 1.7 1.0 7174
## congviecBB:thamnien 0.2 1.0 7137
## congviecCC:thamnien 0.2 1.0 6693
## congviecDD:thamnien 0.2 1.0 8270
## congviecEE:thamnien 0.1 1.0 7820
## congviecBB:gioinu:thamnien 0.2 1.0 6886
## congviecCC:gioinu:thamnien 0.2 1.0 6963
## congviecDD:gioinu:thamnien 0.5 1.0 7589
## congviecEE:gioinu:thamnien 0.1 1.0 6329
## sigma 0.2 1.0 5480
## log-posterior 0.1 1.0 1886
##
## For each parameter, mcse is Monte Carlo standard error, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split chains (at convergence Rhat=1).
prior_summary(m.da)
## Priors for model 'm.da'
## ------
## Intercept (after predictors centered)
## Specified prior:
## ~ normal(location = 40, scale = 2.5)
## Adjusted prior:
## ~ normal(location = 40, scale = 29)
##
## Coefficients
## Specified prior:
## ~ normal(location = [0,0,0,...], scale = [2.5,2.5,2.5,...])
## Adjusted prior:
## ~ normal(location = [0,0,0,...], scale = [58.11,86.31,88.58,...])
##
## Auxiliary (sigma)
## Specified prior:
## ~ exponential(rate = 1)
## Adjusted prior:
## ~ exponential(rate = 0.087)
## ------
## See help('prior_summary.stanreg') for more details
post.r2.m.da = bayes_R2(m.da)
summary(post.r2.m.da)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.4268 0.9826 0.9957 0.9823 0.9992 1.0000
#exp(coefficients(m.da)) # exp(confint(m.da)) # Tương tự có thể dùng cách khác
library(MCMCpack)
## Loading required package: MASS
##
## Attaching package: 'MASS'
## The following object is masked _by_ '.GlobalEnv':
##
## OME
## The following object is masked from 'package:dplyr':
##
## select
## ##
## ## Markov Chain Monte Carlo Package (MCMCpack)
## ## Copyright (C) 2003-2022 Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park
## ##
## ## Support provided by the U.S. National Science Foundation
## ## (Grants SES-0350646 and SES-0350613)
## ##
##
## Attaching package: 'MCMCpack'
## The following object is masked from 'package:OpenMx':
##
## vech
library(broom)
posterior = MCMCregress(formula= uwes_tol ~ congviec, b0=0, B0=0.01,
data=thuan, verbose=1000)
##
##
## MCMCregress iteration 1 of 11000
## beta =
## 38.95438
## 3.25862
## -0.65702
## 3.82715
## -2.07595
## sigma2 = 133.28743
##
##
## MCMCregress iteration 1001 of 11000
## beta =
## 36.88854
## 5.40816
## -1.22882
## 6.94356
## 5.25650
## sigma2 = 138.17597
##
##
## MCMCregress iteration 2001 of 11000
## beta =
## 37.34851
## 4.43741
## -0.67343
## 5.05991
## 4.18612
## sigma2 = 128.20106
##
##
## MCMCregress iteration 3001 of 11000
## beta =
## 37.49809
## 4.24768
## 2.31207
## 5.72699
## 0.07868
## sigma2 = 126.97501
##
##
## MCMCregress iteration 4001 of 11000
## beta =
## 36.20236
## 5.17951
## -1.08205
## 4.69871
## 1.24844
## sigma2 = 135.10380
##
##
## MCMCregress iteration 5001 of 11000
## beta =
## 37.30842
## 4.71932
## -0.51615
## 4.55272
## 2.85432
## sigma2 = 126.28800
##
##
## MCMCregress iteration 6001 of 11000
## beta =
## 36.79915
## 4.48369
## 0.41630
## 4.51543
## 2.12132
## sigma2 = 133.26854
##
##
## MCMCregress iteration 7001 of 11000
## beta =
## 36.62721
## 4.89230
## 0.16056
## 4.72504
## 2.80937
## sigma2 = 111.80068
##
##
## MCMCregress iteration 8001 of 11000
## beta =
## 37.60595
## 3.27644
## 1.57067
## 3.59207
## -2.42057
## sigma2 = 140.61759
##
##
## MCMCregress iteration 9001 of 11000
## beta =
## 37.11475
## 5.13868
## 1.55237
## 6.57697
## 2.84493
## sigma2 = 135.85121
##
##
## MCMCregress iteration 10001 of 11000
## beta =
## 36.85370
## 6.02170
## -0.94371
## 6.13341
## 0.82122
## sigma2 = 126.40923
plot(posterior, col= "red")
raftery.diag(posterior)
##
## Quantile (q) = 0.025
## Accuracy (r) = +/- 0.005
## Probability (s) = 0.95
##
## Burn-in Total Lower bound Dependence
## (M) (N) (Nmin) factor (I)
## (Intercept) 2 3680 3746 0.982
## congviecBB 2 3802 3746 1.010
## congviecCC 2 3741 3746 0.999
## congviecDD 2 3834 3746 1.020
## congviecEE 2 3710 3746 0.990
## sigma2 2 3680 3746 0.982
summary(posterior)
##
## Iterations = 1001:11000
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 10000
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## (Intercept) 37.585220 0.9548 0.009548 0.009548
## congviecBB 4.099785 1.2125 0.012125 0.012125
## congviecCC -0.009223 1.6601 0.016601 0.016601
## congviecDD 5.098020 1.6895 0.016895 0.016895
## congviecEE 1.074506 2.1865 0.021865 0.022144
## sigma2 130.424244 8.0903 0.080903 0.080903
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## (Intercept) 35.702 36.9448 37.582004 38.234 39.461
## congviecBB 1.696 3.2801 4.113517 4.909 6.414
## congviecCC -3.243 -1.1264 -0.001409 1.107 3.214
## congviecDD 1.849 3.9312 5.084197 6.243 8.430
## congviecEE -3.314 -0.3648 1.077564 2.510 5.353
## sigma2 115.265 124.9167 130.113319 135.557 147.256
apply(posterior, 2, quantile, probs=c(0.025, 0.5, 0.975))
## (Intercept) congviecBB congviecCC congviecDD congviecEE sigma2
## 2.5% 35.70248 1.695555 -3.242690215 1.849044 -3.314267 115.2649
## 50% 37.58200 4.113517 -0.001408582 5.084197 1.077564 130.1133
## 97.5% 39.46135 6.413691 3.214152577 8.430052 5.353425 147.2561
library(nnet)
require(brms)
## Loading required package: brms
## Loading 'brms' package (version 2.18.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
##
## Attaching package: 'brms'
## The following objects are masked from 'package:MCMCpack':
##
## ddirichlet, rdirichlet
## The following objects are masked from 'package:rstanarm':
##
## dirichlet, exponential, get_y, lasso, ngrps
## The following object is masked from 'package:psych':
##
## cs
## The following object is masked from 'package:stats':
##
## ar
m.2 = multinom(uwes_tol ~ congviec, data=thuan)
## # weights: 258 (210 variable)
## initial value 2000.958462
## iter 10 value 1648.574858
## iter 20 value 1573.039108
## iter 30 value 1561.982468
## iter 40 value 1560.030397
## iter 50 value 1557.877417
## iter 60 value 1557.692569
## iter 70 value 1557.637142
## iter 80 value 1557.629256
## final value 1557.628872
## converged
summary(m.2)
## Warning in sqrt(diag(vc)): NaNs produced
## Call:
## multinom(formula = uwes_tol ~ congviec, data = thuan)
##
## Coefficients:
## (Intercept) congviecBB congviecCC congviecDD congviecEE
## 4 -1.020607e-04 -35.718952 19.172306 -19.8789973 -9.0803859
## 9 -2.660375e+01 38.514248 -2.569582 -0.2549817 0.4421150
## 10 -1.095017e-04 -41.048907 -29.621361 -22.2837786 -8.2569823
## 14 -2.660375e+01 38.514248 -2.569582 -0.2549817 0.4421150
## 15 -2.940663e+01 41.317323 -9.107669 -6.4978207 64.1604153
## 18 -1.095017e-04 -41.048907 -29.621361 -22.2837786 -8.2569823
## 19 -2.660375e+01 38.514248 -2.569582 -0.2549817 0.4421150
## 20 -1.082178e-04 -37.347641 -27.319007 21.2810783 -9.0436800
## 21 -3.363748e+01 45.547802 -7.496265 54.9187231 -1.7228803
## 22 6.930376e-01 -40.368834 17.786000 -27.2376099 -14.9658300
## 23 -1.899663e-04 11.910660 19.172470 -19.1189838 -9.5600365
## 24 -1.388822e-04 11.910659 18.479327 -19.9811662 -9.6471472
## 25 -1.114447e-04 11.910595 -25.306455 -19.7831209 33.6554969
## 26 6.930062e-01 11.910804 -32.014260 -26.2304377 -14.5062884
## 27 1.609285e+00 12.380901 16.869522 -33.0939824 32.0460523
## 28 -3.710485e+01 50.114281 55.583717 58.3853743 -3.9994490
## 29 -2.660375e+01 38.514248 -2.569582 -0.2549817 0.4421150
## 30 -3.565540e+01 47.566168 54.134676 57.6294611 -4.1677510
## 31 -3.153585e+01 -1.693606 50.014930 -1.1338836 0.1378777
## 32 1.386061e+00 11.623442 -37.418655 -32.1804420 -21.2161692
## 33 6.929671e-01 11.217522 -33.430924 -27.4037995 -14.9584860
## 34 6.929833e-01 11.910969 17.785878 -25.3378835 -14.6328881
## 35 1.480421e-05 13.009094 18.479599 -19.1605159 -9.4757937
## 36 1.609345e+00 11.687552 17.563098 20.3648867 32.0457335
## 37 -3.683675e+01 48.747222 56.009117 -8.5259920 70.4917673
## 38 -3.231798e+01 44.228490 50.797225 -4.1788204 -1.5983787
## 39 6.931899e-01 -43.610384 -35.170636 -28.8174602 -14.8055082
## 40 6.931335e-01 11.217746 -31.164027 21.6864928 -14.7113719
## 41 -3.129368e+01 43.897491 49.773188 -4.8450555 -2.1903719
## 42 6.931749e-01 12.316069 -31.195904 20.5881594 -14.7932889
## 43 6.932481e-01 13.296832 -32.361852 -26.2024414 -14.4614565
## 44 1.386151e+00 -39.386033 -35.819129 20.5880563 32.2694854
## 45 1.386084e+00 13.297173 19.038975 21.8409170 33.3679729
## 46 1.098582e+00 10.812088 18.073887 20.1823999 -18.6410926
## 47 1.098810e+00 13.008994 -36.068048 -30.2105940 -18.3926044
## 48 -9.729509e-05 13.009450 -24.095644 21.9743861 -9.7155563
## 49 -1.509637e-04 13.520236 -25.182844 21.2811726 -10.1323810
## 50 6.930185e-01 12.316319 18.479081 21.6866414 -16.2905335
## 51 -3.625380e+01 48.857762 54.732458 58.2280390 -4.3790747
## 52 1.791680e+00 12.064857 17.786093 19.4891660 -25.6373993
## 53 -3.129392e+01 43.897317 49.773120 -4.8450555 -2.1903719
## 54 1.609366e+00 13.073867 -56.659693 20.3648892 33.6553633
##
## Std. Errors:
## (Intercept) congviecBB congviecCC congviecDD congviecEE
## 4 0.9999792 5.790644e-14 8.426963e-01 4.838881e-14 NaN
## 9 0.4580379 4.580379e-01 NaN 2.629645e-13 1.058308e-13
## 10 0.9999811 NaN 8.042607e-14 1.057609e-13 9.598539e-14
## 14 0.4580379 4.580379e-01 NaN NaN NaN
## 15 0.4999216 5.106816e-01 1.541841e-14 NaN 4.325946e-01
## 18 0.9999811 4.684536e-14 9.761089e-15 6.308333e-14 NaN
## 19 0.4580379 4.580379e-01 NaN 3.929610e-14 NaN
## 20 0.9999807 NaN 5.958965e-15 9.520202e-01 NaN
## 21 0.5301336 5.433450e-01 NaN 5.640066e-01 2.197052e-14
## 22 0.8660008 NaN 8.427119e-01 NaN 4.550272e-15
## 23 1.0000011 9.794753e-01 8.402711e-01 1.275250e-15 NaN
## 24 0.9999883 9.794514e-01 9.628707e-01 NaN NaN
## 25 0.9999814 9.800911e-01 2.057787e-15 NaN 8.952190e-01
## 26 0.8660053 7.056084e-01 6.183705e-17 NaN NaN
## 27 0.7745700 4.269545e-01 7.591616e-01 2.186898e-16 7.150989e-01
## 28 0.5319993 4.113830e-01 5.705950e-01 5.834044e-01 3.681431e-16
## 29 0.4580379 4.580379e-01 NaN 3.968289e-16 4.353436e-16
## 30 0.5378102 5.747721e-01 5.755947e-01 4.834546e-01 2.610153e-16
## 31 0.4608407 5.747580e-16 4.608407e-01 1.536419e-16 NaN
## 32 0.7905507 5.512777e-01 NaN 7.863494e-17 1.421851e-17
## 33 0.8660109 8.548130e-01 2.025028e-18 NaN 4.094768e-21
## 34 0.8660086 7.041334e-01 8.415473e-01 2.972904e-20 5.936603e-21
## 35 0.9999499 8.055459e-01 9.627380e-01 1.014571e-17 7.140943e-19
## 36 0.7745662 4.898335e-01 5.953060e-01 5.853734e-01 7.151172e-01
## 37 0.5371769 5.719025e-01 4.646177e-01 7.754315e-29 5.748419e-01
## 38 0.5165800 5.506451e-01 5.525654e-01 7.195207e-25 3.943948e-29
## 39 0.8659788 6.841841e-24 4.227323e-23 8.262700e-22 4.551223e-21
## 40 0.8659869 8.538899e-01 2.419063e-21 6.326366e-01 5.431748e-21
## 41 0.4892241 4.457613e-01 5.296500e-01 1.040092e-24 6.141732e-29
## 42 0.8659809 6.472361e-01 2.342546e-21 8.297551e-01 5.002508e-21
## 43 0.8659703 5.685764e-01 7.159239e-22 1.179829e-20 6.679488e-21
## 44 0.7905436 9.052780e-22 4.272062e-23 6.045413e-01 7.283170e-01
## 45 0.7905488 4.188776e-01 4.670463e-01 4.577082e-01 5.348187e-01
## 46 0.8164613 8.066846e-01 6.425082e-01 7.848227e-01 1.506040e-22
## 47 0.8164381 4.891202e-01 2.538575e-23 2.982563e-22 1.835092e-22
## 48 0.9999779 8.064163e-01 1.697718e-18 8.290164e-01 5.506197e-19
## 49 0.9999913 7.669966e-01 5.742673e-19 9.505916e-01 3.650390e-19
## 50 0.8660035 6.456660e-01 6.967072e-01 6.313852e-01 1.171183e-21
## 51 0.5229913 4.462874e-01 5.632490e-01 4.689759e-01 3.482099e-32
## 52 0.7637309 4.187387e-01 5.164041e-01 7.381728e-01 2.497936e-25
## 53 0.4892523 4.458272e-01 5.297191e-01 1.019967e-24 6.020471e-29
## 54 0.7745648 3.917580e-01 4.724023e-32 5.859143e-01 4.689042e-01
##
## Residual Deviance: 3115.258
## AIC: 3535.258
#exp(coefficients(m.2)) #exp(confint(m.2))
library(bayesplot)
## This is bayesplot version 1.10.0
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
## * Does _not_ affect other ggplot2 plots
## * See ?bayesplot_theme_set for details on theme setting
library(ggplot2)
p = ggplot(thuan, aes(x = uwes_tol, group=congviec))
p + geom_density(aes(fill = congviec), alpha=0.5) + theme_light(base_size = 12)
library(rstan)
## Loading required package: StanHeaders
##
## rstan version 2.26.13 (Stan version 2.26.1)
## For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores()).
## To avoid recompilation of unchanged Stan programs, we recommend calling
## rstan_options(auto_write = TRUE)
## For within-chain threading using `reduce_sum()` or `map_rect()` Stan functions,
## change `threads_per_chain` option:
## rstan_options(threads_per_chain = 1)
## Do not specify '-march=native' in 'LOCAL_CPPFLAGS' or a Makevars file
##
## Attaching package: 'rstan'
## The following object is masked from 'package:coda':
##
## traceplot
## The following object is masked from 'package:psych':
##
## lookup
## The following object is masked from 'package:tidyr':
##
## extract
library(shinystan)
## Loading required package: shiny
##
## This is shinystan version 2.6.0
library(rstanarm)
library(psych)
library(brms)
set.seed(123)
prior0 = get_prior(uwes_tol ~ congviec + gioi + congviec*gioi, family = gaussian, data=thuan)
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
bf.1 = brm(data=thuan, uwes1 ~ congviec + gioi + congviec*gioi,
prior0, family = gaussian, chains = 2, iter = 1000)
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000132 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.32 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 1000 [ 0%] (Warmup)
## Chain 1: Iteration: 100 / 1000 [ 10%] (Warmup)
## Chain 1: Iteration: 200 / 1000 [ 20%] (Warmup)
## Chain 1: Iteration: 300 / 1000 [ 30%] (Warmup)
## Chain 1: Iteration: 400 / 1000 [ 40%] (Warmup)
## Chain 1: Iteration: 500 / 1000 [ 50%] (Warmup)
## Chain 1: Iteration: 501 / 1000 [ 50%] (Sampling)
## Chain 1: Iteration: 600 / 1000 [ 60%] (Sampling)
## Chain 1: Iteration: 700 / 1000 [ 70%] (Sampling)
## Chain 1: Iteration: 800 / 1000 [ 80%] (Sampling)
## Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling)
## Chain 1: Iteration: 1000 / 1000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.086 seconds (Warm-up)
## Chain 1: 0.075 seconds (Sampling)
## Chain 1: 0.161 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1.8e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 1000 [ 0%] (Warmup)
## Chain 2: Iteration: 100 / 1000 [ 10%] (Warmup)
## Chain 2: Iteration: 200 / 1000 [ 20%] (Warmup)
## Chain 2: Iteration: 300 / 1000 [ 30%] (Warmup)
## Chain 2: Iteration: 400 / 1000 [ 40%] (Warmup)
## Chain 2: Iteration: 500 / 1000 [ 50%] (Warmup)
## Chain 2: Iteration: 501 / 1000 [ 50%] (Sampling)
## Chain 2: Iteration: 600 / 1000 [ 60%] (Sampling)
## Chain 2: Iteration: 700 / 1000 [ 70%] (Sampling)
## Chain 2: Iteration: 800 / 1000 [ 80%] (Sampling)
## Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling)
## Chain 2: Iteration: 1000 / 1000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.118 seconds (Warm-up)
## Chain 2: 0.069 seconds (Sampling)
## Chain 2: 0.187 seconds (Total)
## Chain 2:
summary(bf.1)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: uwes1 ~ congviec + gioi + congviec * gioi
## Data: thuan (Number of observations: 532)
## Draws: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup draws = 1000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 4.21 0.16 3.88 4.52 1.00 538 689
## congviecBB 0.39 0.26 -0.09 0.89 1.00 586 584
## congviecCC 0.09 0.28 -0.45 0.67 1.00 603 617
## congviecDD 0.74 0.27 0.23 1.26 1.00 713 595
## congviecEE 0.42 0.48 -0.50 1.35 1.00 804 727
## gioinu -0.37 0.24 -0.86 0.09 1.00 508 637
## congviecBB:gioinu 0.43 0.33 -0.20 1.07 1.00 552 611
## congviecCC:gioinu 0.36 0.44 -0.48 1.20 1.00 623 509
## congviecDD:gioinu -0.18 0.46 -1.05 0.76 1.00 816 739
## congviecEE:gioinu 0.03 0.57 -1.17 1.05 1.00 683 706
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 1.43 0.04 1.35 1.52 1.00 1097 594
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
bf.1$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=1000; warmup=500; thin=1;
## post-warmup draws per chain=500, total post-warmup draws=1000.
##
## mean se_mean sd 2.5% 25% 50% 75%
## b_Intercept 4.21 0.01 0.16 3.88 4.10 4.21 4.32
## b_congviecBB 0.39 0.01 0.26 -0.09 0.22 0.39 0.56
## b_congviecCC 0.09 0.01 0.28 -0.45 -0.09 0.10 0.29
## b_congviecDD 0.74 0.01 0.27 0.23 0.55 0.74 0.92
## b_congviecEE 0.42 0.02 0.48 -0.50 0.09 0.41 0.73
## b_gioinu -0.37 0.01 0.24 -0.86 -0.53 -0.36 -0.20
## b_congviecBB:gioinu 0.43 0.01 0.33 -0.20 0.23 0.43 0.64
## b_congviecCC:gioinu 0.36 0.02 0.44 -0.48 0.07 0.36 0.65
## b_congviecDD:gioinu -0.18 0.02 0.46 -1.05 -0.47 -0.20 0.14
## b_congviecEE:gioinu 0.03 0.02 0.57 -1.17 -0.35 0.02 0.43
## sigma 1.43 0.00 0.04 1.35 1.40 1.43 1.46
## lprior -9.36 0.00 0.00 -9.37 -9.36 -9.36 -9.36
## lp__ -954.03 0.11 2.35 -959.42 -955.41 -953.61 -952.35
## 97.5% n_eff Rhat
## b_Intercept 4.52 531 1.00
## b_congviecBB 0.89 565 1.00
## b_congviecCC 0.67 593 1.00
## b_congviecDD 1.26 711 1.00
## b_congviecEE 1.35 795 1.00
## b_gioinu 0.09 503 1.00
## b_congviecBB:gioinu 1.07 530 1.00
## b_congviecCC:gioinu 1.20 604 1.00
## b_congviecDD:gioinu 0.76 817 1.00
## b_congviecEE:gioinu 1.05 671 1.00
## sigma 1.52 1069 1.00
## lprior -9.35 1422 1.00
## lp__ -950.60 427 1.01
##
## Samples were drawn using NUTS(diag_e) at Fri Nov 18 22:05:36 2022.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
prior1 = get_prior(uwes_tol ~ congviec - 1, family = gaussian, data=thuan)
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
bf.2 = brm(data = thuan, uwes_tol ~ congviec-1, prior1,
family = gaussian, chains = 2, iter = 1000)
## Found more than one class "family" in cache; using the first, from namespace 'lme4'
## Also defined by 'MatrixModels'
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 3.3e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.33 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 1000 [ 0%] (Warmup)
## Chain 1: Iteration: 100 / 1000 [ 10%] (Warmup)
## Chain 1: Iteration: 200 / 1000 [ 20%] (Warmup)
## Chain 1: Iteration: 300 / 1000 [ 30%] (Warmup)
## Chain 1: Iteration: 400 / 1000 [ 40%] (Warmup)
## Chain 1: Iteration: 500 / 1000 [ 50%] (Warmup)
## Chain 1: Iteration: 501 / 1000 [ 50%] (Sampling)
## Chain 1: Iteration: 600 / 1000 [ 60%] (Sampling)
## Chain 1: Iteration: 700 / 1000 [ 70%] (Sampling)
## Chain 1: Iteration: 800 / 1000 [ 80%] (Sampling)
## Chain 1: Iteration: 900 / 1000 [ 90%] (Sampling)
## Chain 1: Iteration: 1000 / 1000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.14 seconds (Warm-up)
## Chain 1: 0.04 seconds (Sampling)
## Chain 1: 0.18 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 9e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 1000 [ 0%] (Warmup)
## Chain 2: Iteration: 100 / 1000 [ 10%] (Warmup)
## Chain 2: Iteration: 200 / 1000 [ 20%] (Warmup)
## Chain 2: Iteration: 300 / 1000 [ 30%] (Warmup)
## Chain 2: Iteration: 400 / 1000 [ 40%] (Warmup)
## Chain 2: Iteration: 500 / 1000 [ 50%] (Warmup)
## Chain 2: Iteration: 501 / 1000 [ 50%] (Sampling)
## Chain 2: Iteration: 600 / 1000 [ 60%] (Sampling)
## Chain 2: Iteration: 700 / 1000 [ 70%] (Sampling)
## Chain 2: Iteration: 800 / 1000 [ 80%] (Sampling)
## Chain 2: Iteration: 900 / 1000 [ 90%] (Sampling)
## Chain 2: Iteration: 1000 / 1000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.151 seconds (Warm-up)
## Chain 2: 0.032 seconds (Sampling)
## Chain 2: 0.183 seconds (Total)
## Chain 2:
summary(bf.2)
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: uwes_tol ~ congviec - 1
## Data: thuan (Number of observations: 532)
## Draws: 2 chains, each with iter = 1000; warmup = 500; thin = 1;
## total post-warmup draws = 1000
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## congviecAA 37.87 0.96 36.02 39.87 1.01 1410 741
## congviecBB 41.71 0.75 40.34 43.25 1.01 1547 701
## congviecCC 37.56 1.39 34.77 40.19 1.00 1629 814
## congviecDD 42.75 1.40 39.99 45.54 1.00 1383 650
## congviecEE 38.75 1.99 34.90 42.63 1.00 1430 796
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 11.41 0.33 10.77 12.07 1.00 1562 845
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
bf.2$fit
## Inference for Stan model: anon_model.
## 2 chains, each with iter=1000; warmup=500; thin=1;
## post-warmup draws per chain=500, total post-warmup draws=1000.
##
## mean se_mean sd 2.5% 25% 50% 75% 97.5%
## b_congviecAA 37.87 0.03 0.96 36.02 37.22 37.90 38.49 39.87
## b_congviecBB 41.71 0.02 0.75 40.34 41.20 41.71 42.19 43.25
## b_congviecCC 37.56 0.03 1.39 34.77 36.65 37.57 38.52 40.19
## b_congviecDD 42.75 0.04 1.40 39.99 41.86 42.78 43.64 45.54
## b_congviecEE 38.75 0.05 1.99 34.90 37.40 38.73 40.14 42.63
## sigma 11.41 0.01 0.33 10.77 11.19 11.39 11.63 12.07
## lprior -3.32 0.00 0.03 -3.37 -3.34 -3.32 -3.30 -3.27
## lp__ -2050.63 0.08 1.65 -2054.68 -2051.58 -2050.38 -2049.36 -2048.25
## n_eff Rhat
## b_congviecAA 1393 1
## b_congviecBB 1490 1
## b_congviecCC 1611 1
## b_congviecDD 1368 1
## b_congviecEE 1513 1
## sigma 1501 1
## lprior 1500 1
## lp__ 470 1
##
## Samples were drawn using NUTS(diag_e) at Fri Nov 18 22:06:26 2022.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
pairs(bf.2)
plot(bf.2, ignore_prior = T, theme = ggplot2::theme())
marginal_effects(bf.2, probs=c(0.05,0.95), conditions=congviec)
## Warning: Method 'marginal_effects' is deprecated. Please use
## 'conditional_effects' instead.
## Warning: Argument 'probs' is deprecated. Please use 'prob' instead.
## Warning: The following variables in 'conditions' are not part of the model:
## 'conditions'
# launch_shinystan(bf.1, rstudio = getOption(“shinystan.rstudio”))
hypothesis(bf.2, "congviecAA > congviecCC", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (congviecAA)-(con... > 0 0.31 1.75 -2.6 3.17 1.24
## Post.Prob Star
## 1 0.55
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "congviecBB > congviecCC", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (congviecBB)-(con... > 0 4.16 1.6 1.5 6.91 999
## Post.Prob Star
## 1 1 *
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
hypothesis(bf.2, "congviecEE > congviecCC", digits = 4)
## Hypothesis Tests for class b:
## Hypothesis Estimate Est.Error CI.Lower CI.Upper Evid.Ratio
## 1 (congviecEE)-(con... > 0 1.19 2.39 -2.81 5.11 2.26
## Post.Prob Star
## 1 0.69
## ---
## 'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
## '*': For one-sided hypotheses, the posterior probability exceeds 95%;
## for two-sided hypotheses, the value tested against lies outside the 95%-CI.
## Posterior probabilities of point hypotheses assume equal prior probabilities.
#KẾT THÚC