library(sparklyr)
## Warning: package 'sparklyr' was built under R version 4.4.2
##
## Attaching package: 'sparklyr'
## The following object is masked from 'package:stats':
##
## filter
# Nháºp dữ liệu và o R
digits = 1
data <- data.frame(
Authors = c("Audras-Torrent (2021) Autistic", "Wechsler (2014b) Autistic",
"Wechsler (2014a) Autistic", "Kuenzel (2021) Autistic",
"Stephenson (2021) Autistic", "Becker (2021a) ADHD",
"Becker (2021b) ADHD", "Pauls (2018) ADHD",
"Wechsler (2014c) ADHD"),
N = c(121, 30, 32, 98, 349, 13, 62, 103, 48),
VCI_mean = c(100.8, 80.4, 102.5, 98, 95.5, 105.8, 97.47, 93.36, 97.8),
VCI_SD = c(20.2, 18.2, 14.4, 21.48, 17.16, 9.62, 14.37, 15.88, 11.4),
VSI_mean = c(102.2, 82.8, 100.7, 97, 97.05, 106, 99.18, 90.33, 97.3),
VSI_SD = c(14.6, 22.2, 17.1, 17.78, 16.8, 12.89, 13.63, 14.63, 16.7),
FRI_mean = c(102, 84.3, 100.9, 97, 95.86, 106.77, 97.9, 89.65, 97.6),
FRI_SD = c(14, 20.6, 17.5, 15.56, 17.84, 12.93, 13.98, 13.97, 13.4),
WMI_mean = c(94, 77.6, 95.4, 88, 88.71, 100.77, 93.48, 88.78, 94.8),
WMI_SD = c(13.6, 19.4, 16.8, 16.67, 16.39, 12.14, 13.28, 14.48, 13.3),
PSI_mean = c(93.2, 75.8, 89.4, 89, 84.15, 94.69, 90.48, 92.4, 94.2),
PSI_SD = c(14, 19, 18.4, 22.22, 16.77, 12.64, 12.3, 14.22, 13.9),
FSIQ_mean = c(98.9, 76.3, 98.3, 94, 90.41, 101.38, 93.87, 88.82, 95.6),
FSIQ_SD = c(15.7, 19.1, 17.4, 21.85, 17.53, 12.64, 12.98, 14.31, 11.7)
)
data$VCI_SE <- data$VCI_SD / sqrt(data$N)
data$VSI_SE <- data$VSI_SD / sqrt(data$N)
data$FRI_SE <- data$FRI_SD / sqrt(data$N)
data$WMI_SE <- data$WMI_SD / sqrt(data$N)
data$PSI_SE <- data$PSI_SD / sqrt(data$N)
data$FSIQ_SE <- data$FSIQ_SD / sqrt(data$N)
library(bayesmeta)
## Loading required package: forestplot
## Warning: package 'forestplot' was built under R version 4.4.2
## Loading required package: grid
## Loading required package: checkmate
## Loading required package: abind
## Warning: package 'abind' was built under R version 4.4.1
## Loading required package: metafor
## Warning: package 'metafor' was built under R version 4.4.1
## Loading required package: Matrix
## Loading required package: metadat
## Loading required package: numDeriv
##
## Loading the 'metafor' package (version 4.6-0). For an
## introduction to the package please type: help(metafor)
## Loading required package: mvtnorm
## Warning: package 'mvtnorm' was built under R version 4.4.2
##
## Attaching package: 'bayesmeta'
## The following object is masked from 'package:stats':
##
## convolve
library(bayesplot)
## This is bayesplot version 1.11.1
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
## * Does _not_ affect other ggplot2 plots
## * See ?bayesplot_theme_set for details on theme setting
# Hà m thá»±c hiện phân tÃch meta Bayesian
meta_analysis_bayesian <- function(data, mean_col, se_col, variable_name) {
meta_model <- bayesmeta(
y = data[[mean_col]],
sigma = data[[se_col]],
labels = data$Authors,
digits = 1,
au.prior = "Jeffreys",
model = "random",
burnin = 1000,
iter = 5000
)
cat("\nKết quả phân tÃch cho biến:", variable_name, "\n")
print(summary(meta_model))
forestplot(meta_model, digits = 1,
title = paste("Phân tÃch gá»™p Bayesian", variable_name),
xlab = "Ước số gốc-Quoted ; Ước số thu hẹp-Shrinkage",
xlim = range(80, 120),
col = fpColors(box = "blue", lines = "red", summary = "green")
)
return(meta_model)
}
meta_VCI <- meta_analysis_bayesian(data, "VCI_mean", "VCI_SE", "VCI")
##
## Kết quả phân tÃch cho biến: VCI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 100.80 1.8363636
## Wechsler (2014b) Autistic 80.40 3.3228502
## Wechsler (2014a) Autistic 102.50 2.5455844
## Kuenzel (2021) Autistic 98.00 2.1698077
## Stephenson (2021) Autistic 95.50 0.9185537
## Becker (2021a) ADHD 105.80 2.6681079
## Becker (2021b) ADHD 97.47 1.8249918
## Pauls (2018) ADHD 93.36 1.5647029
## Wechsler (2014c) ADHD 97.80 1.6454483
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.642135 97.00790
## ML marginal 6.137108 97.02398
## MAP joint 5.642135 97.00790
## MAP marginal 6.137101 97.02422
##
## marginal posterior summary:
## tau mu theta
## mode 6.137101 97.024215 97.031564
## median 6.941423 96.990935 96.992859
## mean 7.472308 96.973601 96.973601
## sd 2.808224 2.755521 8.449936
## 95% lower 3.093212 91.413100 79.895798
## 95% upper 12.984751 102.484267 113.996674
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9384059
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.642135 97.00790
## ML marginal 6.137108 97.02398
## MAP joint 5.642135 97.00790
## MAP marginal 6.137101 97.02422
##
## marginal posterior summary:
## tau mu
## mode 6.137101 97.024215
## median 6.941423 96.990935
## mean 7.472308 96.973601
## sd 2.808224 2.755521
## 95% lower 3.093212 91.413100
## 95% upper 12.984751 102.484267
##
## (quoted intervals are shortest credible intervals.)
meta_VSI <- meta_analysis_bayesian(data, "VSI_mean", "VSI_SE", "VSI")
##
## Kết quả phân tÃch cho biến: VSI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 102.20 1.3272727
## Wechsler (2014b) Autistic 82.80 4.0531469
## Wechsler (2014a) Autistic 100.70 3.0228815
## Kuenzel (2021) Autistic 97.00 1.7960512
## Stephenson (2021) Autistic 97.05 0.8992834
## Becker (2021a) ADHD 106.00 3.5750428
## Becker (2021b) ADHD 99.18 1.7310117
## Pauls (2018) ADHD 90.33 1.4415367
## Wechsler (2014c) ADHD 97.30 2.4104374
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.911203 97.14463
## ML marginal 5.397338 97.15185
## MAP joint 4.911957 97.14285
## MAP marginal 5.397341 97.15205
##
## marginal posterior summary:
## tau mu theta
## mode 5.397341 97.15205 97.16107
## median 6.171423 97.11950 97.12213
## mean 6.673515 97.10136 97.10136
## sd 2.633444 2.51768 7.60600
## 95% lower 2.595118 92.01487 81.68685
## 95% upper 11.813634 102.13620 112.45689
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9265767
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.911203 97.14463
## ML marginal 5.397338 97.15185
## MAP joint 4.911957 97.14285
## MAP marginal 5.397341 97.15205
##
## marginal posterior summary:
## tau mu
## mode 5.397341 97.15205
## median 6.171423 97.11950
## mean 6.673515 97.10136
## sd 2.633444 2.51768
## 95% lower 2.595118 92.01487
## 95% upper 11.813634 102.13620
##
## (quoted intervals are shortest credible intervals.)
meta_FRI <- meta_analysis_bayesian(data, "FRI_mean", "FRI_SE", "FRI")
##
## Kết quả phân tÃch cho biến: FRI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 102.00 1.2727273
## Wechsler (2014b) Autistic 84.30 3.7610282
## Wechsler (2014a) Autistic 100.90 3.0935922
## Kuenzel (2021) Autistic 97.00 1.5717974
## Stephenson (2021) Autistic 95.86 0.9549533
## Becker (2021a) ADHD 106.77 3.5861368
## Becker (2021b) ADHD 97.90 1.7754618
## Pauls (2018) ADHD 89.65 1.3765050
## Wechsler (2014c) ADHD 97.60 1.9341234
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.958736 96.89960
## ML marginal 5.420331 96.89136
## MAP joint 4.960887 96.89958
## MAP marginal 5.420329 96.89127
##
## marginal posterior summary:
## tau mu theta
## mode 5.420329 96.891271 96.888944
## median 6.181766 96.895952 96.895611
## mean 6.676858 96.897190 96.897190
## sd 2.574177 2.501372 7.583901
## 95% lower 2.705716 91.874772 81.581268
## 95% upper 11.699984 101.923851 112.217594
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9314985
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.958736 96.89960
## ML marginal 5.420331 96.89136
## MAP joint 4.960887 96.89958
## MAP marginal 5.420329 96.89127
##
## marginal posterior summary:
## tau mu
## mode 5.420329 96.891271
## median 6.181766 96.895952
## mean 6.676858 96.897190
## sd 2.574177 2.501372
## 95% lower 2.705716 91.874772
## 95% upper 11.699984 101.923851
##
## (quoted intervals are shortest credible intervals.)
meta_WMI <- meta_analysis_bayesian(data, "WMI_mean", "WMI_SE", "WMI")
##
## Kết quả phân tÃch cho biến: WMI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 94.00 1.2363636
## Wechsler (2014b) Autistic 77.60 3.5419392
## Wechsler (2014a) Autistic 95.40 2.9698485
## Kuenzel (2021) Autistic 88.00 1.6839243
## Stephenson (2021) Autistic 88.71 0.8773366
## Becker (2021a) ADHD 100.77 3.3670302
## Becker (2021b) ADHD 93.48 1.6865617
## Pauls (2018) ADHD 88.78 1.4267568
## Wechsler (2014c) ADHD 94.80 1.9196896
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.663912 91.32215
## ML marginal 5.155844 91.30997
## MAP joint 4.664074 91.32339
## MAP marginal 5.155805 91.30992
##
## marginal posterior summary:
## tau mu theta
## mode 5.155805 91.309922 91.30316
## median 5.866173 91.313953 91.31358
## mean 6.333346 91.313374 91.31337
## sd 2.544876 2.388393 7.23600
## 95% lower 2.314030 86.503307 76.65196
## 95% upper 11.323852 96.121553 105.97208
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9274184
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.663912 91.32215
## ML marginal 5.155844 91.30997
## MAP joint 4.664074 91.32339
## MAP marginal 5.155805 91.30992
##
## marginal posterior summary:
## tau mu
## mode 5.155805 91.309922
## median 5.866173 91.313953
## mean 6.333346 91.313374
## sd 2.544876 2.388393
## 95% lower 2.314030 86.503307
## 95% upper 11.323852 96.121553
##
## (quoted intervals are shortest credible intervals.)
meta_PSI <- meta_analysis_bayesian(data, "PSI_mean", "PSI_SE", "PSI")
##
## Kết quả phân tÃch cho biến: PSI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 93.20 1.2727273
## Wechsler (2014b) Autistic 75.80 3.4689095
## Wechsler (2014a) Autistic 89.40 3.2526912
## Kuenzel (2021) Autistic 89.00 2.2445590
## Stephenson (2021) Autistic 84.15 0.8976775
## Becker (2021a) ADHD 94.69 3.5057052
## Becker (2021b) ADHD 90.48 1.5621016
## Pauls (2018) ADHD 92.40 1.4011382
## Wechsler (2014c) ADHD 94.20 2.0062922
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.484582 89.48527
## ML marginal 4.897355 89.50126
## MAP joint 4.485070 89.48601
## MAP marginal 4.897354 89.50157
##
## marginal posterior summary:
## tau mu theta
## mode 4.897354 89.501570 89.514939
## median 5.593034 89.458005 89.461895
## mean 6.044482 89.434507 89.434507
## sd 2.347872 2.292042 6.883301
## 95% lower 2.423531 84.802810 75.492194
## 95% upper 10.621827 93.998117 103.297677
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9155832
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.484582 89.48527
## ML marginal 4.897355 89.50126
## MAP joint 4.485070 89.48601
## MAP marginal 4.897354 89.50157
##
## marginal posterior summary:
## tau mu
## mode 4.897354 89.501570
## median 5.593034 89.458005
## mean 6.044482 89.434507
## sd 2.347872 2.292042
## 95% lower 2.423531 84.802810
## 95% upper 10.621827 93.998117
##
## (quoted intervals are shortest credible intervals.)
meta_FSIQ <- meta_analysis_bayesian(data, "FSIQ_mean", "FSIQ_SE", "FSIQ")
##
## Kết quả phân tÃch cho biến: FSIQ
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 98.90 1.4272727
## Wechsler (2014b) Autistic 76.30 3.4871669
## Wechsler (2014a) Autistic 98.30 3.0759145
## Kuenzel (2021) Autistic 94.00 2.2071833
## Stephenson (2021) Autistic 90.41 0.9383594
## Becker (2021a) ADHD 101.38 3.5057052
## Becker (2021b) ADHD 93.87 1.6484616
## Pauls (2018) ADHD 88.82 1.4100062
## Wechsler (2014c) ADHD 95.60 1.6887495
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.826747 93.15122
## ML marginal 6.336101 93.15822
## MAP joint 5.825598 93.14903
## MAP marginal 6.336095 93.15834
##
## marginal posterior summary:
## tau mu theta
## mode 6.336095 93.15834 93.162254
## median 7.180328 93.14033 93.141364
## mean 7.735180 93.13098 93.130976
## sd 2.903899 2.85773 8.748224
## 95% lower 3.233576 87.37887 75.472031
## 95% upper 13.415834 98.85606 110.760173
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9454267
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.826747 93.15122
## ML marginal 6.336101 93.15822
## MAP joint 5.825598 93.14903
## MAP marginal 6.336095 93.15834
##
## marginal posterior summary:
## tau mu
## mode 6.336095 93.15834
## median 7.180328 93.14033
## mean 7.735180 93.13098
## sd 2.903899 2.85773
## 95% lower 3.233576 87.37887
## 95% upper 13.415834 98.85606
##
## (quoted intervals are shortest credible intervals.)
variables <- list(
list(mean_col = "VCI_mean", se_col = "VCI_SE", name = "VCI"),
list(mean_col = "VSI_mean", se_col = "VSI_SE", name = "VSI"),
list(mean_col = "FRI_mean", se_col = "FRI_SE", name = "FRI"),
list(mean_col = "WMI_mean", se_col = "WMI_SE", name = "WMI"),
list(mean_col = "PSI_mean", se_col = "PSI_SE", name = "PSI"),
list(mean_col = "FSIQ_mean", se_col = "FSIQ_SE", name = "FSIQ")
)
models <- list()
for (var in variables) {
models[[var$name]] <- meta_analysis_bayesian(data,
var$mean_col,
var$se_col,
var$name)
}
##
## Kết quả phân tÃch cho biến: VCI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 100.80 1.8363636
## Wechsler (2014b) Autistic 80.40 3.3228502
## Wechsler (2014a) Autistic 102.50 2.5455844
## Kuenzel (2021) Autistic 98.00 2.1698077
## Stephenson (2021) Autistic 95.50 0.9185537
## Becker (2021a) ADHD 105.80 2.6681079
## Becker (2021b) ADHD 97.47 1.8249918
## Pauls (2018) ADHD 93.36 1.5647029
## Wechsler (2014c) ADHD 97.80 1.6454483
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.642135 97.00790
## ML marginal 6.137108 97.02398
## MAP joint 5.642135 97.00790
## MAP marginal 6.137101 97.02422
##
## marginal posterior summary:
## tau mu theta
## mode 6.137101 97.024215 97.031564
## median 6.941423 96.990935 96.992859
## mean 7.472308 96.973601 96.973601
## sd 2.808224 2.755521 8.449936
## 95% lower 3.093212 91.413100 79.895798
## 95% upper 12.984751 102.484267 113.996674
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9384059
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.642135 97.00790
## ML marginal 6.137108 97.02398
## MAP joint 5.642135 97.00790
## MAP marginal 6.137101 97.02422
##
## marginal posterior summary:
## tau mu
## mode 6.137101 97.024215
## median 6.941423 96.990935
## mean 7.472308 96.973601
## sd 2.808224 2.755521
## 95% lower 3.093212 91.413100
## 95% upper 12.984751 102.484267
##
## (quoted intervals are shortest credible intervals.)
##
## Kết quả phân tÃch cho biến: VSI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 102.20 1.3272727
## Wechsler (2014b) Autistic 82.80 4.0531469
## Wechsler (2014a) Autistic 100.70 3.0228815
## Kuenzel (2021) Autistic 97.00 1.7960512
## Stephenson (2021) Autistic 97.05 0.8992834
## Becker (2021a) ADHD 106.00 3.5750428
## Becker (2021b) ADHD 99.18 1.7310117
## Pauls (2018) ADHD 90.33 1.4415367
## Wechsler (2014c) ADHD 97.30 2.4104374
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.911203 97.14463
## ML marginal 5.397338 97.15185
## MAP joint 4.911957 97.14285
## MAP marginal 5.397341 97.15205
##
## marginal posterior summary:
## tau mu theta
## mode 5.397341 97.15205 97.16107
## median 6.171423 97.11950 97.12213
## mean 6.673515 97.10136 97.10136
## sd 2.633444 2.51768 7.60600
## 95% lower 2.595118 92.01487 81.68685
## 95% upper 11.813634 102.13620 112.45689
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9265767
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.911203 97.14463
## ML marginal 5.397338 97.15185
## MAP joint 4.911957 97.14285
## MAP marginal 5.397341 97.15205
##
## marginal posterior summary:
## tau mu
## mode 5.397341 97.15205
## median 6.171423 97.11950
## mean 6.673515 97.10136
## sd 2.633444 2.51768
## 95% lower 2.595118 92.01487
## 95% upper 11.813634 102.13620
##
## (quoted intervals are shortest credible intervals.)
##
## Kết quả phân tÃch cho biến: FRI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 102.00 1.2727273
## Wechsler (2014b) Autistic 84.30 3.7610282
## Wechsler (2014a) Autistic 100.90 3.0935922
## Kuenzel (2021) Autistic 97.00 1.5717974
## Stephenson (2021) Autistic 95.86 0.9549533
## Becker (2021a) ADHD 106.77 3.5861368
## Becker (2021b) ADHD 97.90 1.7754618
## Pauls (2018) ADHD 89.65 1.3765050
## Wechsler (2014c) ADHD 97.60 1.9341234
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.958736 96.89960
## ML marginal 5.420331 96.89136
## MAP joint 4.960887 96.89958
## MAP marginal 5.420329 96.89127
##
## marginal posterior summary:
## tau mu theta
## mode 5.420329 96.891271 96.888944
## median 6.181766 96.895952 96.895611
## mean 6.676858 96.897190 96.897190
## sd 2.574177 2.501372 7.583901
## 95% lower 2.705716 91.874772 81.581268
## 95% upper 11.699984 101.923851 112.217594
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9314985
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.958736 96.89960
## ML marginal 5.420331 96.89136
## MAP joint 4.960887 96.89958
## MAP marginal 5.420329 96.89127
##
## marginal posterior summary:
## tau mu
## mode 5.420329 96.891271
## median 6.181766 96.895952
## mean 6.676858 96.897190
## sd 2.574177 2.501372
## 95% lower 2.705716 91.874772
## 95% upper 11.699984 101.923851
##
## (quoted intervals are shortest credible intervals.)
##
## Kết quả phân tÃch cho biến: WMI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 94.00 1.2363636
## Wechsler (2014b) Autistic 77.60 3.5419392
## Wechsler (2014a) Autistic 95.40 2.9698485
## Kuenzel (2021) Autistic 88.00 1.6839243
## Stephenson (2021) Autistic 88.71 0.8773366
## Becker (2021a) ADHD 100.77 3.3670302
## Becker (2021b) ADHD 93.48 1.6865617
## Pauls (2018) ADHD 88.78 1.4267568
## Wechsler (2014c) ADHD 94.80 1.9196896
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.663912 91.32215
## ML marginal 5.155844 91.30997
## MAP joint 4.664074 91.32339
## MAP marginal 5.155805 91.30992
##
## marginal posterior summary:
## tau mu theta
## mode 5.155805 91.309922 91.30316
## median 5.866173 91.313953 91.31358
## mean 6.333346 91.313374 91.31337
## sd 2.544876 2.388393 7.23600
## 95% lower 2.314030 86.503307 76.65196
## 95% upper 11.323852 96.121553 105.97208
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9274184
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.663912 91.32215
## ML marginal 5.155844 91.30997
## MAP joint 4.664074 91.32339
## MAP marginal 5.155805 91.30992
##
## marginal posterior summary:
## tau mu
## mode 5.155805 91.309922
## median 5.866173 91.313953
## mean 6.333346 91.313374
## sd 2.544876 2.388393
## 95% lower 2.314030 86.503307
## 95% upper 11.323852 96.121553
##
## (quoted intervals are shortest credible intervals.)
##
## Kết quả phân tÃch cho biến: PSI
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 93.20 1.2727273
## Wechsler (2014b) Autistic 75.80 3.4689095
## Wechsler (2014a) Autistic 89.40 3.2526912
## Kuenzel (2021) Autistic 89.00 2.2445590
## Stephenson (2021) Autistic 84.15 0.8976775
## Becker (2021a) ADHD 94.69 3.5057052
## Becker (2021b) ADHD 90.48 1.5621016
## Pauls (2018) ADHD 92.40 1.4011382
## Wechsler (2014c) ADHD 94.20 2.0062922
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.484582 89.48527
## ML marginal 4.897355 89.50126
## MAP joint 4.485070 89.48601
## MAP marginal 4.897354 89.50157
##
## marginal posterior summary:
## tau mu theta
## mode 4.897354 89.501570 89.514939
## median 5.593034 89.458005 89.461895
## mean 6.044482 89.434507 89.434507
## sd 2.347872 2.292042 6.883301
## 95% lower 2.423531 84.802810 75.492194
## 95% upper 10.621827 93.998117 103.297677
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9155832
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 4.484582 89.48527
## ML marginal 4.897355 89.50126
## MAP joint 4.485070 89.48601
## MAP marginal 4.897354 89.50157
##
## marginal posterior summary:
## tau mu
## mode 4.897354 89.501570
## median 5.593034 89.458005
## mean 6.044482 89.434507
## sd 2.347872 2.292042
## 95% lower 2.423531 84.802810
## 95% upper 10.621827 93.998117
##
## (quoted intervals are shortest credible intervals.)
##
## Kết quả phân tÃch cho biến: FSIQ
## 'bayesmeta' object.
## data (9 estimates):
## y sigma
## Audras-Torrent (2021) Autistic 98.90 1.4272727
## Wechsler (2014b) Autistic 76.30 3.4871669
## Wechsler (2014a) Autistic 98.30 3.0759145
## Kuenzel (2021) Autistic 94.00 2.2071833
## Stephenson (2021) Autistic 90.41 0.9383594
## Becker (2021a) ADHD 101.38 3.5057052
## Becker (2021b) ADHD 93.87 1.6484616
## Pauls (2018) ADHD 88.82 1.4100062
## Wechsler (2014c) ADHD 95.60 1.6887495
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.826747 93.15122
## ML marginal 6.336101 93.15822
## MAP joint 5.825598 93.14903
## MAP marginal 6.336095 93.15834
##
## marginal posterior summary:
## tau mu theta
## mode 6.336095 93.15834 93.162254
## median 7.180328 93.14033 93.141364
## mean 7.735180 93.13098 93.130976
## sd 2.903899 2.85773 8.748224
## 95% lower 3.233576 87.37887 75.472031
## 95% upper 13.415834 98.85606 110.760173
##
## (quoted intervals are shortest credible intervals.)
##
## relative heterogeneity I^2 (posterior median): 0.9454267
## 'bayesmeta' object.
##
## 9 estimates:
## Audras-Torrent (2021) Autistic, Wechsler (2014b) Autistic, Wechsler (2014a) Autistic, Kuenzel (2021) Autistic, Stephenson (2021) Autistic, Becker (2021a) ADHD, Becker (2021b) ADHD, Pauls (2018) ADHD, Wechsler (2014c) ADHD
##
## tau prior (improper):
## uniform(min=0, max=Inf)
##
## mu prior (improper):
## uniform(min=-Inf, max=Inf)
##
## ML and MAP estimates:
## tau mu
## ML joint 5.826747 93.15122
## ML marginal 6.336101 93.15822
## MAP joint 5.825598 93.14903
## MAP marginal 6.336095 93.15834
##
## marginal posterior summary:
## tau mu
## mode 6.336095 93.15834
## median 7.180328 93.14033
## mean 7.735180 93.13098
## sd 2.903899 2.85773
## 95% lower 3.233576 87.37887
## 95% upper 13.415834 98.85606
##
## (quoted intervals are shortest credible intervals.)
# ket thuc #
library(tidyr)
## Warning: package 'tidyr' was built under R version 4.4.1
##
## Attaching package: 'tidyr'
## The following objects are masked from 'package:Matrix':
##
## expand, pack, unpack
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(ggplot2)
## Warning: package 'ggplot2' was built under R version 4.4.2
data <- data %>%
mutate(nhom = case_when(Authors == "Author1" ~ "Autistic", TRUE ~ "ADHD" ))
data_long <- data %>%
pivot_longer(cols = starts_with(c("VCI", "VSI", "FRI", "WMI", "PSI", "FSIQ")),
names_to = c("Index", ".value"),
names_sep = "_")
ggplot(data_long, aes(x = Index, y = mean, nhom = Authors, color = Authors)) +
geom_line(aes(linetype = "dotdash"), linewidth = 0.5) +
geom_point(size = 4, shape = 18) +
geom_errorbar(aes(ymin = mean - SD, ymax = mean + SD),
width = 0.3,
size = 0.5,
color = "blue",
linetype = "solid") +
geom_errorbar(aes(ymin = mean - SD, ymax = mean + SD), width = 0.2) +
labs(title = "So sánh chỉ số",
x = "Biểu đồ 7: So sánh chỉ số giữa các nghiên cứu",
y = "Trung bình ± độ lệch chuẩn") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## `geom_line()`: Each group consists of only one observation.
## ℹ Do you need to adjust the group aesthetic?
library(bayesmeta)
library(bayesplot)
library(gridExtra)
##
## Attaching package: 'gridExtra'
## The following object is masked from 'package:dplyr':
##
## combine
library(grid)
forestplot_grob <- function(meta_model, title) {
fp <- forestplot(meta_model, digits = 1,
title = title,
xlab = "Ước số gốc-Quoted ; Ước số thu hẹp-Shrinkage",
col = fpColors(box = "blue", lines = "red", summary = "green"))
return(grid.grab())
}
p1 <- forestplot_grob(meta_VCI, "Biểu đồ 1: Phân tÃch gá»™p Bayesian VCI")
p2 <- forestplot_grob(meta_VSI, "Biểu đồ 2: Phân tÃch gá»™p Bayesian VSI")
p3 <- forestplot_grob(meta_FRI, "Biểu đồ 3: Phân tÃch gá»™p Bayesian FRI")
p4 <- forestplot_grob(meta_WMI, "Biểu đồ 4: Phân tÃch gá»™p Bayesian WMI")
p5 <- forestplot_grob(meta_PSI, "Biểu đồ 5: Phân tÃch gá»™p Bayesian PSI")
p6 <- forestplot_grob(meta_FSIQ, "Biểu đồ 6: Phân tÃch gá»™p Bayesian FSIQ")
grid.arrange(p1)
grid.arrange(p2)
grid.arrange(p3)
grid.arrange(p4)
grid.arrange(p5)
grid.arrange(p6)
# grid.arrange(p5, p6, ncol = 2)
Tu_ky <- list(
vci = rnorm(630, mean = 95.7, sd = 7.3),
vsi = rnorm(630, mean = 96.6, sd = 5.8),
fri = rnorm(630, mean = 96.5, sd = 5.4),
wmi = rnorm(630, mean = 89.1, sd = 5.4),
psi = rnorm(630, mean = 86.6, sd = 5.5),
fsiq = rnorm(630, mean = 91.8, sd = 7.6)
)
ADHD <- list(
vci = rnorm(226, mean = 98.3, sd = 4.7),
vsi = rnorm(226, mean = 96.6, sd = 6.5),
fri = rnorm(226, mean = 96.8, sd = 7.1),
wmi = rnorm(226, mean = 93.7, sd = 4.55),
psi = rnorm(226, mean = 92.3, sd = 4.5),
fsiq = rnorm(226, mean = 94.1, sd = 4.8)
)
data <- data.frame(
vci = c(Tu_ky$vci, ADHD$vci),
vsi = c(Tu_ky$vsi, ADHD$vsi),
fri = c(Tu_ky$fri, ADHD$fri),
wmi = c(Tu_ky$wmi, ADHD$wmi),
psi = c(Tu_ky$psi, ADHD$psi),
fsiq = c(Tu_ky$fsiq, ADHD$fsiq),
nhom = factor(c(rep("Tu_ky"), rep("ADHD")))
)
library(brms)
## Warning: package 'brms' was built under R version 4.4.2
## Loading required package: Rcpp
## Warning: package 'Rcpp' was built under R version 4.4.2
## Loading 'brms' package (version 2.22.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
##
## Attaching package: 'brms'
## The following object is masked from 'package:bayesplot':
##
## rhat
## The following object is masked from 'package:stats':
##
## ar
variables <- c("vci", "vsi", "fri", "wmi", "psi", "fsiq")
models <- lapply(variables, function(var) {
brm(
as.formula(paste(var, "~ nhom")),
data = data,
family = gaussian(),
prior = c(
prior(normal(0, 10), class = "b"),
prior(normal(0, 10), class = "Intercept")
),
iter = 2000,
warmup = 1000,
chains = 2
)
})
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 6.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.68 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.048 seconds (Warm-up)
## Chain 1: 0.039 seconds (Sampling)
## Chain 1: 0.087 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2.9e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.29 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.045 seconds (Warm-up)
## Chain 2: 0.036 seconds (Sampling)
## Chain 2: 0.081 seconds (Total)
## Chain 2:
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 5.5e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.55 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.054 seconds (Warm-up)
## Chain 1: 0.033 seconds (Sampling)
## Chain 1: 0.087 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1.6e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.16 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.066 seconds (Warm-up)
## Chain 2: 0.043 seconds (Sampling)
## Chain 2: 0.109 seconds (Total)
## Chain 2:
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 4.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.46 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.096 seconds (Warm-up)
## Chain 1: 0.048 seconds (Sampling)
## Chain 1: 0.144 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1.8e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.073 seconds (Warm-up)
## Chain 2: 0.049 seconds (Sampling)
## Chain 2: 0.122 seconds (Total)
## Chain 2:
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 5.2e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.52 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.062 seconds (Warm-up)
## Chain 1: 0.028 seconds (Sampling)
## Chain 1: 0.09 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1.3e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.13 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.07 seconds (Warm-up)
## Chain 2: 0.069 seconds (Sampling)
## Chain 2: 0.139 seconds (Total)
## Chain 2:
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000124 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.24 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.181 seconds (Warm-up)
## Chain 1: 0.081 seconds (Sampling)
## Chain 1: 0.262 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2.2e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.22 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.123 seconds (Warm-up)
## Chain 2: 0.089 seconds (Sampling)
## Chain 2: 0.212 seconds (Total)
## Chain 2:
## Compiling Stan program...
## Start sampling
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000124 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.24 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.158 seconds (Warm-up)
## Chain 1: 0.07 seconds (Sampling)
## Chain 1: 0.228 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2.3e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.23 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.155 seconds (Warm-up)
## Chain 2: 0.088 seconds (Sampling)
## Chain 2: 0.243 seconds (Total)
## Chain 2:
lapply(models, summary)
## [[1]]
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: vci ~ nhom
## Data: data (Number of observations: 856)
## Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 2000
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 96.57 0.31 95.99 97.17 1.00 2098 1448
## nhomTu_ky -0.39 0.43 -1.23 0.47 1.00 2515 1611
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 6.46 0.16 6.17 6.79 1.00 2248 1408
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
##
## [[2]]
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: vsi ~ nhom
## Data: data (Number of observations: 856)
## Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 2000
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 96.05 0.28 95.49 96.60 1.00 1987 1448
## nhomTu_ky -0.18 0.40 -0.96 0.63 1.00 1899 1444
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 5.97 0.15 5.69 6.27 1.00 1900 1374
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
##
## [[3]]
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: fri ~ nhom
## Data: data (Number of observations: 856)
## Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 2000
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 96.72 0.29 96.15 97.28 1.00 2162 1581
## nhomTu_ky -0.48 0.40 -1.27 0.30 1.00 2075 1581
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 5.86 0.13 5.60 6.13 1.00 1827 1475
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
##
## [[4]]
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: wmi ~ nhom
## Data: data (Number of observations: 856)
## Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 2000
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 90.41 0.26 89.91 90.92 1.00 1926 1557
## nhomTu_ky -0.35 0.37 -1.06 0.38 1.00 1920 1601
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 5.57 0.14 5.31 5.84 1.00 1830 1213
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
##
## [[5]]
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: psi ~ nhom
## Data: data (Number of observations: 856)
## Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 2000
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 88.01 0.28 87.47 88.56 1.00 2277 1580
## nhomTu_ky 0.11 0.40 -0.68 0.88 1.00 2225 1351
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 5.75 0.14 5.48 6.04 1.00 2116 1434
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
##
## [[6]]
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: fsiq ~ nhom
## Data: data (Number of observations: 856)
## Draws: 2 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup draws = 2000
##
## Regression Coefficients:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 92.45 0.32 91.82 93.05 1.00 2249 1668
## nhomTu_ky -0.03 0.46 -0.90 0.84 1.00 2173 1557
##
## Further Distributional Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 6.71 0.17 6.39 7.03 1.00 1954 1235
##
## Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
summary(cars)
## speed dist
## Min. : 4.0 Min. : 2.00
## 1st Qu.:12.0 1st Qu.: 26.00
## Median :15.0 Median : 36.00
## Mean :15.4 Mean : 42.98
## 3rd Qu.:19.0 3rd Qu.: 56.00
## Max. :25.0 Max. :120.00
You can also embed plots, for example:
Note that the echo = FALSE parameter was added to the
code chunk to prevent printing of the R code that generated the
plot.