============================================================================================================
Part I: matrix data structure, ‘R2jags’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1018077 (unreliable estimates)
Part II: ragged data structure, ‘R2jags’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1021087
Part III: matrix data structure, ‘rstan’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1021097
Part IV: ragged data structure, ‘rstan’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1022575
Part V: modeling fixed effects, https://rpubs.com/sherloconan/1070217
In Loftus and Masson (1994), some to-be-recalled 20-word lists are presented at a rate of 1, 2, or 5 sec per word. Suppose that the hypothetical experiment is run as a balanced one-way between-subjects design with three conditions.
dat <- matrix(c(10,6,11,22,16,15,1,12,9,8,
13,8,14,23,18,17,1,15,12,9,
13,8,14,25,20,17,4,17,12,12), ncol=3,
dimnames=list(NULL, paste0("Level",1:3))) #wide data format
# dat <- dat * 100
df <- data.frame("Level"=factor(rep(paste0("Level",1:3), each=10)),
"Response"=c(dat)) #long data format
Or, testing a simulated data set.
m1 <- 100; sd <- 20; n <- 24
m2 <- m1 - pwr::pwr.t.test(n=n, power=.8)$d * sd #83.47737
set.seed(277)
dat <- matrix(c(rnorm(n, m1, sd),
rnorm(n, m2, sd)), ncol=2,
dimnames=list(NULL, paste0("Level",1:2)))
# dat <- dat * 100
df <- data.frame("Level"=factor(rep(paste0("Level",1:2), each=n)),
"Response"=c(dat))
## The Bayes factors will be computed for the simulated data (in the second chunk).
Method: Constructing the Jeffreys-Zellner-Siow (JZS) Bayes factor.
\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]
\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]
\[\begin{equation} \tag{3} d_i\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]
\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]
(JZS_BF <- anovaBF(Response~Level, data=df, whichRandom="Level", progress=F))
## Bayes factor analysis
## --------------
## [1] Level : 15.57637 ±0%
##
## Against denominator:
## Intercept only
## ---
## Bayes factor type: BFlinearModel, JZS
Note: Random effects are assumed. See the discussion here.
Markov chain Monte Carlo (MCMC) diagnostics. Plots of iterations versus sampled values for each variable in the chain.
set.seed(277)
draws <- posterior(JZS_BF, iterations=1e5, progress=F)
summary(draws)
##
## Iterations = 1:1e+05
## Thinning interval = 1
## Number of chains = 1
## Sample size per chain = 1e+05
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## mu 95.263 40.55 0.1282 0.1206
## Level-Level1 9.402 40.56 0.1283 0.1205
## Level-Level2 -9.648 40.55 0.1282 0.1206
## sig2 468.706 101.48 0.3209 0.3415
## g_Level 8.382 396.02 1.2523 1.6672
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## mu 40.0456 84.4686 95.087 105.916 149.69
## Level-Level1 -44.7282 -1.2284 9.296 20.156 65.37
## Level-Level2 -64.3941 -20.1608 -9.278 1.291 45.60
## sig2 309.8889 397.2583 454.970 525.217 705.63
## g_Level 0.1857 0.5092 1.031 2.485 27.46
plot(draws, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)
\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]
\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]
\[\begin{equation} \tag{3} d_i\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]
\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]
Specify the full model \(\mathcal{M}_1\) in (1). Compile the model in JAGS. Note: JAGS does not have a diag() function, but does have a rep() function.
| Variable | Description |
|---|---|
mu |
condition mean vector (\(\mu_i=\mu+t_i\)) |
err_prec * R |
homoscedastic precision matrix of the error vector |
R |
identity matrix of size nC |
nS |
number of subjects in the balanced group |
nC |
number of conditions |
t |
treatment effect vector (\(t_i=\sigma_\epsilon d_i\)) |
mu_overall |
grand mean \(\mu\) |
lwr |
lower bound for the grand mean |
upr |
upper bound for the grand mean |
gt_prec |
precision of the standardized treatment effect \(d_i\) |
model.full <- "model {
for (j in 1:nS) {
Y[j, 1:nC] ~ dmnorm(mu[1:nC], err_prec * R)
}
for (i in 1:nC) {
mu[i] <- mu_overall + t[i]
t[i] ~ dnorm(0, err_prec * gt_prec)
}
mu_overall ~ dunif(lwr, upr)
##ALSO TRIED mu_overall ~ dnorm(0, 0.0001)
#gt_var ~ Scale-inv-chi-squared(df, h^2) <=>
#gt_var ~ Inv-gamma(df/2, scale = df*(h^2)/2) <=>
#gt_prec ~ Gamma(df/2, rate = df*(h^2)/2)
gt_prec ~ dgamma(.5, .5 * ht * ht)
err_prec ~ dgamma(gamma_shape, gamma_rate) #Jeffreys prior for the precision: p(𝜏) ∝ 1/𝜏
}"
datalist <- list("nC"=ncol(dat), "nS"=nrow(dat), "R"=diag(ncol(dat)), "Y"=dat,
"ht"=1, "lwr"=mean(dat)-6*sd(dat), "upr"=mean(dat)+6*sd(dat),
"gamma_shape"=0.001, "gamma_rate"=0.001)
\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]
\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]
\[\begin{equation} \tag{3} d_i\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]
\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]
Take a parameter vector (e.g.,
c(err_prec = 0.002, gt_prec = 0.916, mu_overall = 113.580, "mu[1]" = 107.985, "mu[2]" = 91.516,
"t[1]" = -4.565, "t[2]" = -21.038)) for \(a=2\) and the data as input and return the
log of the unnormalized posterior density (i.e., a scalar value).
log.posterior.full <- function(pars, data) {
mu_overall <- pars["mu_overall"] #grand mean
mu <- pars[c(4:(3+data$nC))] #condition means
trt <- pars[c((4+data$nC):(3+2*data$nC))] #treatment effect
gt_prec <- pars["gt_prec"] #precision of the standardized treatment effect
gt_sd <- 1/sqrt(gt_prec) #standard deviation of the standardized treatment effect
err_prec <- pars["err_prec"] #precision of the error term
err_sd <- 1/sqrt(err_prec) #standard deviation of the error term
Sigma <- diag(rep(1/err_prec, data$nC)) #covariance matrix of the error vector
#log of the unnormalized posterior density
dunif(mu_overall, data$lwr, data$upr, log=T) +
sum(dnorm(trt, 0, err_sd * gt_sd, log=T)) +
dgamma(gt_prec, 0.5, rate=0.5 * data$ht * data$ht, log=T) +
dgamma(err_prec, data$gamma_shape, rate=data$gamma_rate, log=T) +
sum(mvtnorm::dmvnorm(data$Y, mu, Sigma, log=T))
}
We did the debugging! See the discussion here.
The R2jags::jags (v 0.7-1) object alphabetically sorts the parameters by uppercase first, e.g., B, C, a, b, c.
And, bridgesampling::bridgesampler (v 1.1-2) somehow ignores all the parameters before “deviance”. The GitHub version (e.g., v 1.1-5) may have fixed it.
devtools::install_github("quentingronau/bridgesampling@master")
Create named vectors with lower and upper bounds (lb and ub) for parameters.
ub.full <- rep(Inf, 3+2*datalist$nC)
names(ub.full) <- c("err_prec", "gt_prec", "mu_overall", paste0("mu[",1:datalist$nC,"]"), paste0("t[",1:datalist$nC,"]"))
ub.full[3] <- datalist$upr
lb.full <- -ub.full
lb.full[c(1,2)] <- 0
lb.full[3] <- datalist$lwr
lb.full #lower bounds
## err_prec gt_prec mu_overall mu[1] mu[2] t[1] t[2]
## 0.0000 0.0000 -45.0145 -Inf -Inf -Inf -Inf
ub.full #upper bounds
## err_prec gt_prec mu_overall mu[1] mu[2] t[1] t[2]
## Inf Inf 235.3221 Inf Inf Inf Inf
\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]
\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]
\[\begin{equation} \tag{3} d_i\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]
\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]
Specify the null model \(\mathcal{M}_0\) in (1). Compile the model in JAGS. Note: JAGS does not have a diag() function, but does have a rep() function.
| Variable | Description |
|---|---|
err_prec * R |
homoscedastic precision matrix of the error vector |
R |
identity matrix of size nC |
nS |
number of subjects in the balanced group |
nC |
number of conditions |
mu_overall |
grand mean \(\mu\) |
lwr |
lower bound for the grand mean |
upr |
upper bound for the grand mean |
model.null <- "model {
for (j in 1:nS) {
Y[j, 1:nC] ~ dmnorm(rep(mu_overall, nC), err_prec * R)
}
mu_overall ~ dunif(lwr, upr)
err_prec ~ dgamma(gamma_shape, gamma_rate) #Jeffreys prior for the precision: p(𝜏) ∝ 1/𝜏
}"
\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]
\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]
\[\begin{equation} \tag{3} d_i\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]
\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]
Take a parameter vector and the data as input and return the log of the unnormalized posterior density (i.e., a scalar value).
log.posterior.null <- function(pars, data) {
mu_overall <- pars["mu_overall"] #grand mean
err_prec <- pars["err_prec"] #precision of the error term
err_var <- 1/err_prec #variance of the error term
Sigma <- diag(rep(err_var, data$nC)) #covariance matrix of the error vector
#log of the unnormalized posterior density
dunif(mu_overall, data$lwr, data$upr, log=T) +
dgamma(err_prec, data$gamma_shape, rate=data$gamma_rate, log=T) +
sum(mvtnorm::dmvnorm(data$Y, rep(mu_overall, data$nC), Sigma, log=T))
}
Create named vectors with lower and upper bounds (lb and ub) for parameters.
(ub.null <- c("err_prec"=Inf, "mu_overall"=datalist$upr)) #upper bounds
## err_prec mu_overall
## Inf 235.3221
(lb.null <- c("err_prec"=0, "mu_overall"=datalist$lwr)) #lower bounds
## err_prec mu_overall
## 0.0000 -45.0145
Compute log marginal likelihoods for full and null models via bridge sampling. Then, \(\textit{BF}_{10}=\frac{p(\text{Data}\ \mid\ \mathcal{M}_1)}{p(\text{Data}\ \mid\ \mathcal{M}_0)}\).
BFBS <- function(seed=277, diagnostics=F) {
set.seed(seed)
jags.full <- jags(model.file=textConnection(model.full),
data=datalist, n.chains=2, n.iter=100000, n.burnin=50000, quiet=T, progress.bar="none",
inits=list(list("mu_overall"=mean(datalist$Y)*1.01),
list("mu_overall"=mean(datalist$Y)*0.99)),
parameters.to.save=c("err_prec", "gt_prec", "mu", "mu_overall", "t"))
set.seed(seed)
jags.null <- jags(model.file=textConnection(model.null),
data=datalist[-5], n.chains=2, n.iter=100000, n.burnin=50000, quiet=T, progress.bar="none",
inits=list(list("mu_overall"=mean(datalist$Y)*1.01),
list("mu_overall"=mean(datalist$Y)*0.99)),
parameters.to.save=c("err_prec", "mu_overall"))
set.seed(seed)
bridge.full <- bridge_sampler(samples=jags.full,
log_posterior=log.posterior.full,
data=datalist,
lb=lb.full, ub=ub.full, silent=T)
set.seed(seed)
bridge.null <- bridge_sampler(samples=jags.null,
log_posterior=log.posterior.null,
data=datalist[-5],
lb=lb.null, ub=ub.null, silent=T)
if (diagnostics) {
list("bf"=exp(bridge.full$logml - bridge.null$logml),
"pererr.M1"=error_measures(bridge.full)$cv,
"pererr.M0"=error_measures(bridge.null)$cv,
"jags.M1"=as.mcmc(jags.full), "jags.M0"=as.mcmc(jags.null))
} else {
list("bf"=exp(bridge.full$logml - bridge.null$logml),
"pererr.M1"=error_measures(bridge.full)$cv,
"pererr.M0"=error_measures(bridge.null)$cv)
}
}
BFBS() #ESTIMATE OF THE BAYES FACTOR SEEMS UNRELIABLE (AND SLOWER THAN PART II)
## $bf
## [1] 0.8208816
##
## $pererr.M1
## [1] 0.03183901
##
## $pererr.M0
## [1] 0.002921359
Check the post here.
Check the Monte Carlo error of estimating the Bayes factor.
num <- 50; BF <- PerErr.M1 <- PerErr.M0 <- numeric(num)
for (i in 1:num) {
result <- BFBS(seed=i)
BF[i] <- result$bf
PerErr.M1[i] <- result$pererr.M1
PerErr.M0[i] <- result$pererr.M0
}
range(PerErr.M1); range(PerErr.M0)
hist(BF,prob=T,col="white",yaxt="n",ylab="",
xlab="Bayes factor estimates",main="")
lines(density(BF),col="red",lwd=2)
## [1] 0.01747061 0.04293378
## [1] 0.002238879 0.003902842
MCMC diagnostics. Plots of iterations versus sampled values for each variable in the chain.
MCMC <- BFBS(diag=T); MCMC.M1 <- MCMC$jags.M1; MCMC.M0 <- MCMC$jags.M0
plot(MCMC.M1, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)
plot(MCMC.M0, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)
library(R2jags) #install JAGS at first: http://mcmc-jags.sourceforge.net/
# devtools::install_github("quentingronau/bridgesampling@master")
library(bridgesampling)
require(pwr)
require(mvtnorm)
library(BayesFactor)
run.this=F #if TRUE, test the hypothetical data; if FALSE, test the simulated data
if (run.this) {
dat <- matrix(c(10,6,11,22,16,15,1,12,9,8,
13,8,14,23,18,17,1,15,12,9,
13,8,14,25,20,17,4,17,12,12), ncol=3,
dimnames=list(NULL, paste0("Level",1:3))) #wide data format
df <- data.frame("Level"=factor(rep(paste0("Level",1:3), each=10)),
"Response"=c(dat)) #long data format
} else {
m1 <- 100; sd <- 20; n <- 24
m2 <- m1 - pwr::pwr.t.test(n=n, power=.8)$d * sd #83.47737
set.seed(277)
dat <- matrix(c(rnorm(n, m1, sd),
rnorm(n, m2, sd)), ncol=2,
dimnames=list(NULL, paste0("Level",1:2)))
df <- data.frame("Level"=factor(rep(paste0("Level",1:2), each=n)),
"Response"=c(dat))
}
(JZS_BF <- anovaBF(Response~Level, data=df, whichRandom="Level", progress=F)) #Random effects are assumed.
# set.seed(277)
# draws <- posterior(JZS_BF, iterations=1e5, progress=F)
# summary(draws)
# plot(draws, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)
{
model.full <- "model {
for (j in 1:nS) {
Y[j, 1:nC] ~ dmnorm(mu[1:nC], err_prec * R)
}
for (i in 1:nC) {
mu[i] = mu_overall + t[i]
t[i] ~ dnorm(0, err_prec * gt_prec)
}
mu_overall ~ dunif(lwr, upr)
##ALSO TRIED mu_overall ~ dnorm(0, 0.0001)
#gt_var ~ Scale-inv-chi-squared(df, h^2) <=>
#gt_var ~ Inv-gamma(df/2, scale = df*(h^2)/2) <=>
#gt_prec ~ Gamma(df/2, rate = df*(h^2)/2)
gt_prec ~ dgamma(.5, .5 * ht * ht)
err_prec ~ dgamma(gamma_shape, gamma_rate) #Jeffreys prior for the precision: p(𝜏) ∝ 1/𝜏
}"
model.null <- "model {
for (j in 1:nS) {
Y[j, 1:nC] ~ dmnorm(rep(mu_overall, nC), err_prec * R)
}
mu_overall ~ dunif(lwr, upr)
err_prec ~ dgamma(gamma_shape, gamma_rate) #Jeffreys prior for the precision: p(𝜏) ∝ 1/𝜏
}"
} #JAGS models
datalist <- list("nC"=ncol(dat), "nS"=nrow(dat), "R"=diag(ncol(dat)), "Y"=dat,
"ht"=1, "lwr"=mean(dat)-6*sd(dat), "upr"=mean(dat)+6*sd(dat),
"gamma_shape"=0.001, "gamma_rate"=0.001)
{
log.posterior.full <- function(pars, data) {
mu_overall <- pars["mu_overall"] #grand mean
mu <- pars[c(4:(3+data$nC))] #condition means
trt <- pars[c((4+data$nC):(3+2*data$nC))] #treatment effect
gt_prec <- pars["gt_prec"] #precision of the standardized treatment effect
gt_sd <- 1/sqrt(gt_prec) #standard deviation of the standardized treatment effect
err_prec <- pars["err_prec"] #precision of the error term
err_sd <- 1/sqrt(err_prec) #standard deviation of the error term
Sigma <- diag(rep(1/err_prec, data$nC)) #covariance matrix of the error vector
#log of the unnormalized posterior density
dunif(mu_overall, data$lwr, data$upr, log=T) +
sum(dnorm(trt, 0, err_sd * gt_sd, log=T)) +
dgamma(gt_prec, 0.5, rate=0.5 * data$ht * data$ht, log=T) +
dgamma(err_prec, data$gamma_shape, rate=data$gamma_rate, log=T) +
sum(mvtnorm::dmvnorm(data$Y, mu, Sigma, log=T))
}
log.posterior.null <- function(pars, data) {
mu_overall <- pars["mu_overall"] #grand mean
err_prec <- pars["err_prec"] #precision of the error term
err_var <- 1/err_prec #variance of the error term
Sigma <- diag(rep(err_var, data$nC)) #covariance matrix of the error vector
#log of the unnormalized posterior density
dunif(mu_overall, data$lwr, data$upr, log=T) +
dgamma(err_prec, data$gamma_shape, rate=data$gamma_rate, log=T) +
sum(mvtnorm::dmvnorm(data$Y, rep(mu_overall, data$nC), Sigma, log=T))
}
} #log posterior functions
{
ub.full <- rep(Inf, 3+2*datalist$nC)
names(ub.full) <- c("err_prec", "gt_prec", "mu_overall", paste0("mu[",1:datalist$nC,"]"), paste0("t[",1:datalist$nC,"]"))
ub.full[3] <- datalist$upr
lb.full <- -ub.full
lb.full[c(1,2)] <- 0
lb.full[3] <- datalist$lwr
ub.null <- c("err_prec"=Inf, "mu_overall"=datalist$upr)
lb.null <- c("err_prec"=0, "mu_overall"=datalist$lwr)
} #bounds for parameters
BFBS <- function(seed=277, diagnostics=F) {
set.seed(seed)
jags.full <- jags(model.file=textConnection(model.full),
data=datalist, n.chains=2, n.iter=100000, n.burnin=50000, quiet=T, progress.bar="none",
inits=list(list("mu_overall"=mean(datalist$Y)*1.01),
list("mu_overall"=mean(datalist$Y)*0.99)),
parameters.to.save=c("err_prec", "gt_prec", "mu", "mu_overall", "t"))
set.seed(seed)
jags.null <- jags(model.file=textConnection(model.null),
data=datalist[-5], n.chains=2, n.iter=100000, n.burnin=50000, quiet=T, progress.bar="none",
inits=list(list("mu_overall"=mean(datalist$Y)*1.01),
list("mu_overall"=mean(datalist$Y)*0.99)),
parameters.to.save=c("err_prec", "mu_overall"))
set.seed(seed)
bridge.full <- bridge_sampler(samples=jags.full,
log_posterior=log.posterior.full,
data=datalist,
lb=lb.full, ub=ub.full, silent=T)
set.seed(seed)
bridge.null <- bridge_sampler(samples=jags.null,
log_posterior=log.posterior.null,
data=datalist[-5],
lb=lb.null, ub=ub.null, silent=T)
if (diagnostics) {
list("bf"=exp(bridge.full$logml - bridge.null$logml),
"pererr.M1"=error_measures(bridge.full)$cv,
"pererr.M0"=error_measures(bridge.null)$cv,
"jags.M1"=as.mcmc(jags.full), "jags.M0"=as.mcmc(jags.null))
} else {
list("bf"=exp(bridge.full$logml - bridge.null$logml),
"pererr.M1"=error_measures(bridge.full)$cv,
"pererr.M0"=error_measures(bridge.null)$cv)
}
}
BFBS() #ESTIMATE OF THE BAYES FACTOR SEEMS UNRELIABLE
# MCMC <- BFBS(diag=T)
# MCMC.M1 <- MCMC$jags.M1
# MCMC.M0 <- MCMC$jags.M0
# plot(MCMC.M1, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)