============================================================================================================
Part I: matrix data structure, ‘R2jags’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1018077 (unreliable estimates)

Part II: ragged data structure, ‘R2jags’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1021087

Part III: matrix data structure, ‘rstan’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1021097

Part IV: ragged data structure, ‘rstan’, & ‘bridgesampling’, https://rpubs.com/sherloconan/1022575

Part V: modeling fixed effects, https://rpubs.com/sherloconan/1070217

1. Getting Started

 

In Loftus and Masson (1994), some to-be-recalled 20-word lists are presented at a rate of 1, 2, or 5 sec per word. Suppose that the hypothetical experiment is run as a balanced one-way between-subjects design with three conditions.

dat <- matrix(c(10,6,11,22,16,15,1,12,9,8,
                13,8,14,23,18,17,1,15,12,9,
                13,8,14,25,20,17,4,17,12,12), ncol=3,
              dimnames=list(NULL, paste0("Level",1:3))) #wide data format
# dat <- dat * 100

df <- data.frame("Level"=factor(rep(paste0("Level",1:3), each=10)),
                 "Response"=c(dat)) #long data format

 

Or, testing a simulated data set.

m1 <- 100; sd <- 20; n <- 24
m2 <- m1 - pwr::pwr.t.test(n=n, power=.8)$d * sd #83.47737
set.seed(277)
dat <- matrix(c(rnorm(n, m1, sd),
                rnorm(n, m2, sd)), ncol=2,
              dimnames=list(NULL, paste0("Level",1:2)))
# dat <- dat * 100

df <- data.frame("Level"=factor(rep(paste0("Level",1:2), each=n)),
                 "Response"=c(dat))
## The Bayes factors will be computed for the simulated data (in the second chunk).

 

Method: Constructing the Jeffreys-Zellner-Siow (JZS) Bayes factor.

\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]

\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]

\[\begin{equation} \tag{3} d_i^\star\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]

\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]

\[\begin{equation} \tag{5} (d_1^\star,\dotsb,d_{a-1}^\star)=(d_1,\dotsb,d_a)\cdot\mathbf{Q} \end{equation}\]

\[\begin{equation} \tag{6} \mathbf{I}_a-\frac{1}{a}\mathbf{J}_a=\mathbf{Q}\cdot\mathbf{Q}^\top \end{equation}\]

\(\mathbf{Q}\) is an \(a\times(a-1)\) matrix of the \(a-1\) eigenvectors of unit length corresponding to the nonzero eigenvalues of the left side term in (6). For example, the projected main effect \(d^\star=(d_1-d_2)/\sqrt{2}\) when \(a=2\). In the other direction (given \(d_1+d_2=0\)), \(d_1=d^\star/\sqrt{2}\) and \(d_2=-d^\star/\sqrt{2}\).

(JZS_BF <- anovaBF(Response~Level, data=df, progress=F))
## Bayes factor analysis
## --------------
## [1] Level : 17.5748 ±0%
## 
## Against denominator:
##   Intercept only 
## ---
## Bayes factor type: BFlinearModel, JZS
ttestBF(x=df$Response[df$Level=="Level1"], y=df$Response[df$Level=="Level2"]) #Same estimates when a=2
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 17.5748 ±0%
## 
## Against denominator:
##   Null, mu1-mu2 = 0 
## ---
## Bayes factor type: BFindepSample, JZS

Note: Fixed effects are assumed. See the discussion here.

 

Markov chain Monte Carlo (MCMC) diagnostics. Plots of iterations versus sampled values for each variable in the chain.

set.seed(277)
draws <- posterior(JZS_BF, iterations=1e5, progress=F)
summary(draws)
## 
## Iterations = 1:1e+05
## Thinning interval = 1 
## Number of chains = 1 
## Sample size per chain = 1e+05 
## 
## 1. Empirical mean and standard deviation for each variable,
##    plus standard error of the mean:
## 
##                 Mean      SD Naive SE Time-series SE
## mu            95.152   3.137 0.009919       0.009899
## Level-Level1   8.871   3.150 0.009962       0.011177
## Level-Level2  -8.871   3.150 0.009962       0.011177
## sig2         475.052 103.102 0.326037       0.341818
## g_Level        2.961  50.387 0.159337       0.159337
## 
## 2. Quantiles for each variable:
## 
##                   2.5%      25%      50%     75%   97.5%
## mu            88.98314  93.0632  95.1624  97.233 101.331
## Level-Level1   2.73197   6.7557   8.8379  10.978  15.102
## Level-Level2 -15.10248 -10.9780  -8.8379  -6.756  -2.732
## sig2         314.51113 401.8004 461.5993 532.429 716.150
## g_Level        0.06614   0.2058   0.4338   1.084  12.514
plot(draws, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)

   

2A. Full Stan Model

 

\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]

\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]

\[\begin{equation} \tag{3} d_i^\star\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]

\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]

\[\begin{equation} \tag{5} (d_1^\star,\dotsb,d_{a-1}^\star)=(d_1,\dotsb,d_a)\cdot\mathbf{Q} \end{equation}\]

\[\begin{equation} \tag{6} \mathbf{I}_a-\frac{1}{a}\mathbf{J}_a=\mathbf{Q}\cdot\mathbf{Q}^\top \end{equation}\]

\(\mathbf{Q}\) is an \(a\times(a-1)\) matrix of the \(a-1\) eigenvectors of unit length corresponding to the nonzero eigenvalues of the left side term in (6). For example, the projected main effect \(d^\star=(d_1-d_2)/\sqrt{2}\) when \(a=2\). In the other direction (given \(d_1+d_2=0\)), \(d_1=d^\star/\sqrt{2}\) and \(d_2=-d^\star/\sqrt{2}\).

computeQ <- function(C) {
  #' reduced parametrization for the fixed treatment effects
  S <- diag(C) - matrix(1, nrow = C, ncol = C) / C
  e <- qr(S)
  Q <- qr.Q(e) %*% diag(sign(diag(qr.R(e))))
  Q[, which(abs(e$qraux) > 1e-3), drop = FALSE]
}

 

Specify the full model \(\mathcal{M}_1\) in (1). stancodeM1 initially declares the reduced (fixed) treatment effects tf (i.e., \(\sigma d_{i}^\star\)). Compile and fit the model in Stan.

Note that to compute the (log) marginal likelihood for a Stan model, we need to specify the model in a certain way. Instead of using “~” signs for specifying distributions, we need to directly use the (log) density functions. The reason for this is that when using the “~” sign, constant terms are dropped which are not needed for sampling from the posterior. However, for computing the marginal likelihood, these constants need to be retained. For instance, instead of writing y ~ normal(mu, sigma); we would need to write target += normal_lpdf(y | mu, sigma); (Gronau, 2021). See also the discussion regarding the implicit uniform prior for computing the log marginal likelihood.

stancodeM1 <- "
data {
  int<lower=1> N;        // number of subjects in the balanced group
  int<lower=2> C;        // number of conditions
  // vector[C] Y[N];        // responses [removed features]
  array[N] vector[C] Y;  // responses [Stan 2.26+ syntax for array declarations]
  matrix[C,C-1] P;       // projecting C-1 fixed effects into C
  real<lower=0> ht;      // scale parameter of the variance of the standardized reduced treatment effect
  real lwr;              // lower bound for the grand mean
  real upr;              // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  real<lower=0> sigma;               // standard deviation of the error
  real<lower=0> gt;                  // variance of the standardized reduced treatment effect
  vector[C-1] tf;                    // reduced treatment effect
}

transformed parameters {
  real<lower=0> eta;    // standard deviation of the reduced treatment effect
  vector[C] t;          // treatment effect
  eta = sigma * sqrt(gt);
  t = P * tf;
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += -log(sigma);                            // Jeffreys prior
  target += scaled_inv_chi_square_lpdf(gt | 1, ht); // Rouder et al. (2012)
  target += normal_lpdf(tf | 0, eta);
  for (i in 1:N) {
    target += normal_lpdf(Y[i] | mu + t, sigma);
  }
}"

stanmodelM1 <- stan_model(model_code=stancodeM1)
stanfitM1 <- sampling(stanmodelM1, data=list("N"=nrow(dat), "C"=ncol(dat), "Y"=dat, 
                                             "P"=computeQ(ncol(dat)), "ht"=.5,
                                             "lwr"=mean(dat)-6*sd(dat),
                                             "upr"=mean(dat)+6*sd(dat)),
                      iter=50000, warmup=10000, chains=4, seed=277, refresh=0)
## Warning: There were 2 divergent transitions after warmup. See
## https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.
## Warning: Examine the pairs() plot to diagnose sampling problems
rstan::summary(stanfitM1)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 95.1477 0.0082 3.1387 88.9933 93.0672 95.1424 97.2254 101.3541 144999.42 1
sigma 21.6619 0.0062 2.2968 17.7308 20.0464 21.4647 23.0673 26.6934 138429.98 1
gt 5.1012 1.1778 370.0124 0.0659 0.2064 0.4336 1.0737 12.6775 98685.66 1
tf[1] 12.5497 0.0120 4.4360 3.9274 9.5610 12.5303 15.5030 21.3025 136526.96 1
eta 21.1835 0.1987 43.6881 5.5480 9.7897 14.1783 22.2785 76.4993 48359.52 1
t[1] 8.8740 0.0085 3.1367 2.7771 6.7606 8.8602 10.9623 15.0631 136526.96 1
t[2] -8.8740 0.0085 3.1367 -15.0631 -10.9623 -8.8602 -6.7606 -2.7771 136526.96 1
lp__ -222.7786 0.0059 1.4869 -226.5116 -223.5153 -222.4411 -221.6837 -220.9253 64504.57 1

   

2B. Null Stan Model

 

\[\begin{align*} \tag{1} \mathcal{M}_1:\ Y_{ij}&=\mu+\sigma_\epsilon d_i+\epsilon_{ij}\\ \text{versus}\quad\mathcal{M}_0:\ Y_{ij}&=\mu+\epsilon_{ij},\qquad\epsilon_{ij}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\sigma_\epsilon^2) \text{ for } i=1,\dotsb,a;\ j=1,\dotsb,n. \end{align*}\]

\[\begin{equation} \tag{2} \pi(\mu,\sigma_\epsilon^2)\propto 1/\ \sigma_\epsilon^{2} \end{equation}\]

\[\begin{equation} \tag{3} d_i^\star\mid g\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g) \end{equation}\]

\[\begin{equation} \tag{4} g\sim\text{Scale-inv-}\chi^2(1,h^2) \end{equation}\]

\[\begin{equation} \tag{5} (d_1^\star,\dotsb,d_{a-1}^\star)=(d_1,\dotsb,d_a)\cdot\mathbf{Q} \end{equation}\]

\[\begin{equation} \tag{6} \mathbf{I}_a-\frac{1}{a}\mathbf{J}_a=\mathbf{Q}\cdot\mathbf{Q}^\top \end{equation}\]

\(\mathbf{Q}\) is an \(a\times(a-1)\) matrix of the \(a-1\) eigenvectors of unit length corresponding to the nonzero eigenvalues of the left side term in (6). For example, the projected main effect \(d^\star=(d_1-d_2)/\sqrt{2}\) when \(a=2\). In the other direction (given \(d_1+d_2=0\)), \(d_1=d^\star/\sqrt{2}\) and \(d_2=-d^\star/\sqrt{2}\).

 

Specify the null model \(\mathcal{M}_0\) in (1). Compile and fit the model in Stan.

stancodeM0 <- "
data {
  int<lower=1> N;        // total number of subjects
  vector[N] Y;           // responses
  real lwr;              // lower bound for the grand mean
  real upr;              // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  real<lower=0> sigma;               // standard deviation of the error
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += -log(sigma);             // Jeffreys prior
  target += normal_lpdf(Y | mu, sigma);
}"

stanmodelM0 <- stan_model(model_code=stancodeM0)
stanfitM0 <- sampling(stanmodelM0, data=list("N"=length(dat), "Y"=c(dat),
                                             "lwr"=mean(dat)-6*sd(dat),
                                             "upr"=mean(dat)+6*sd(dat)),
                      iter=50000, warmup=10000, chains=4, seed=277, refresh=0)

rstan::summary(stanfitM0)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 95.1550 0.0096 3.4545 88.3366 92.8554 95.1628 97.4477 101.9427 129408.41 1
sigma 23.7295 0.0073 2.5147 19.4183 21.9589 23.5224 25.2621 29.2509 118611.49 1
lp__ -221.2673 0.0038 1.0285 -224.0292 -221.6629 -220.9494 -220.5351 -220.2686 71402.53 1

   

3. Bayes Factor via Bridge Sampling

 

Compute log marginal likelihoods for full and null models via bridge sampling. Then, \(\textit{BF}_{10}=\frac{p(\text{Data}\ \mid\ \mathcal{M}_1)}{p(\text{Data}\ \mid\ \mathcal{M}_0)}\).

M1 <- bridge_sampler(stanfitM1, silent=T)
summary(M1)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -220.846
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 7.632878e-07
##  Coefficient of Variation: 0.0008736634
##  Percentage Error: 0%
## 
## Note:
## All error measures are approximate.
M0 <- bridge_sampler(stanfitM0, silent=T)
summary(M0)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -223.7116
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 9.081812e-08
##  Coefficient of Variation: 0.0003013604
##  Percentage Error: 0%
## 
## Note:
## All error measures are approximate.
bf(M1, M0)
## Estimated Bayes factor in favor of M1 over M0: 17.55912

 

Check the Monte Carlo error of estimating the Bayes factor.

num <- 50; BF <- PerErr.M1 <- PerErr.M0 <- numeric(num)
system.time(
for (i in 1:num) { #roughly 24 min of run time
  result <- BFBS(seed=i) #defined in All-in-One R Script
  BF[i] <- result$bf
  PerErr.M1[i] <- result$pererr.M1
  PerErr.M0[i] <- result$pererr.M0
})


range(PerErr.M1); range(PerErr.M0)

hist(BF,prob=T,col="white",yaxt="n",ylab="",
     xlab="Bayes factor estimates",main="")
lines(density(BF),col="red",lwd=2)
## [1] 0.0008621659 0.0008943735
## [1] 0.0002913398 0.0003053285

   

All-in-One R Script

 

library(rstan)
# devtools::install_github("quentingronau/bridgesampling@master")
library(bridgesampling)
library(BayesFactor)

run.this=F #if TRUE, test the hypothetical data; if FALSE, test the simulated data
if (run.this) {
  dat <- matrix(c(10,6,11,22,16,15,1,12,9,8,
                  13,8,14,23,18,17,1,15,12,9,
                  13,8,14,25,20,17,4,17,12,12), ncol=3,
                dimnames=list(NULL, paste0("Level",1:3))) #wide data format
  
  df <- data.frame("Level"=factor(rep(paste0("Level",1:3), each=10)),
                   "Response"=c(dat)) #long data format
} else {
  m1 <- 100; sd <- 20; n <- 24
  m2 <- m1 - pwr::pwr.t.test(n=n, power=.8)$d * sd #83.47737
  set.seed(277)
  dat <- matrix(c(rnorm(n, m1, sd),
                  rnorm(n, m2, sd)), ncol=2,
                dimnames=list(NULL, paste0("Level",1:2)))
  
  df <- data.frame("Level"=factor(rep(paste0("Level",1:2), each=n)),
                   "Response"=c(dat))
}

(JZS_BF <- anovaBF(Response~Level, data=df, progress=F))
# ttestBF(x=df$Response[df$Level=="Level1"], y=df$Response[df$Level=="Level2"]) #Same estimates when a=2
# set.seed(277)
# draws <- posterior(JZS_BF, iterations=1e5, progress=F)
# summary(draws)
# plot(draws, cex.lab=1.5, cex.axis=1.5, cex.main=1.5, cex.sub=1.5)


computeQ <- function(C) {
  #' reduced parametrization for the fixed treatment effects
  S <- diag(C) - matrix(1, nrow = C, ncol = C) / C
  e <- qr(S)
  Q <- qr.Q(e) %*% diag(sign(diag(qr.R(e))))
  Q[, which(abs(e$qraux) > 1e-3), drop = FALSE]
}

{
  stancodeM1 <- "
  data {
    int<lower=1> N;        // number of subjects in the balanced group
    int<lower=2> C;        // number of conditions
    // vector[C] Y[N];        // responses [removed features]
    array[N] vector[C] Y;  // responses [Stan 2.26+ syntax for array declarations]
    matrix[C,C-1] P;       // projecting C-1 fixed effects into C
    real<lower=0> ht;      // scale parameter of the variance of the standardized reduced treatment effect
    real lwr;              // lower bound for the grand mean
    real upr;              // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    real<lower=0> sigma;               // standard deviation of the error
    real<lower=0> gt;                  // variance of the standardized reduced treatment effect
    vector[C-1] tf;                    // reduced treatment effect
  }

  transformed parameters {
    real<lower=0> eta;    // standard deviation of the reduced treatment effect
    vector[C] t;          // treatment effect
    eta = sigma * sqrt(gt);
    t = P * tf;
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += -log(sigma);                            // Jeffreys prior
    target += scaled_inv_chi_square_lpdf(gt | 1, ht); // Rouder et al. (2012)
    target += normal_lpdf(tf | 0, eta);
    for (i in 1:N) {
      target += normal_lpdf(Y[i] | mu + t, sigma);
    }
}"

  stanmodelM1 <- stan_model(model_code=stancodeM1)

  stancodeM0 <- "
  data {
    int<lower=1> N;        // total number of subjects
    vector[N] Y;           // responses
    real lwr;              // lower bound for the grand mean
    real upr;              // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    real<lower=0> sigma;               // standard deviation of the error
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += -log(sigma);             // Jeffreys prior
    target += normal_lpdf(Y | mu, sigma);
  }"
  
  stanmodelM0 <- stan_model(model_code=stancodeM0)
} #Stan models


BFBS <- function(seed=277, diagnostics=F) {
  stanfitM0 <- sampling(stanmodelM0, data=list("N"=length(dat), "Y"=c(dat),
                                               "lwr"=mean(dat)-6*sd(dat),
                                               "upr"=mean(dat)+6*sd(dat)),
                        iter=50000, warmup=10000, chains=4,
                        seed=seed, refresh=0)
  stanfitM1 <- sampling(stanmodelM1, data=list("N"=nrow(dat), "C"=ncol(dat), "Y"=dat,
                                               "P"=computeQ(ncol(dat)), "ht"=.5,
                                               "lwr"=mean(dat)-6*sd(dat), "upr"=mean(dat)+6*sd(dat)),
                        iter=50000, warmup=10000, chains=4,
                        seed=seed, refresh=0)
  
  set.seed(seed)
  M0 <- bridge_sampler(stanfitM0, silent=T)
  
  set.seed(seed)
  M1 <- bridge_sampler(stanfitM1, silent=T)
  
  if (diagnostics) {
    list("bf"=exp(M1$logml - M0$logml),
         "pererr.M1"=error_measures(M1)$cv,
         "pererr.M0"=error_measures(M0)$cv,
         "stan.M1"=stanfitM1, "stan.M0"=stanfitM0)
  } else {
    list("bf"=exp(M1$logml - M0$logml),
         "pererr.M1"=error_measures(M1)$cv,
         "pererr.M0"=error_measures(M0)$cv)
  }
}

BFBS()


# MCMC <- BFBS(diag=T)
# rstan::summary(MCMC$stan.M1)[[1]]
# rstan::summary(MCMC$stan.M0)[[1]]

   

4. \(\times100\)

 

All the responses multiplied 100 times.

if (run.this) { #hypothetical
  dat <- dat * 100
  df <- data.frame("Level"=factor(rep(paste0("Level",1:3), each=10)),
                   "Response"=c(dat))
} else { #simulated
  dat <- dat * 100
  df <- data.frame("Level"=factor(rep(paste0("Level",1:2), each=n)),
                   "Response"=c(dat))
}
## The Bayes factors will be computed for the simulated data (in the else-statement).

 

(JZS_BF <- anovaBF(Response~Level, data=df, progress=F))
## Bayes factor analysis
## --------------
## [1] Level : 17.5748 ±0%
## 
## Against denominator:
##   Intercept only 
## ---
## Bayes factor type: BFlinearModel, JZS

   

Compute log marginal likelihoods for full and null models via bridge sampling.

stanfitM1 <- sampling(stanmodelM1, data=list("N"=nrow(dat), "C"=ncol(dat), "Y"=dat, 
                                             "P"=computeQ(ncol(dat)), "ht"=.5,
                                             "lwr"=mean(dat)-6*sd(dat),
                                             "upr"=mean(dat)+6*sd(dat)),
                      iter=50000, warmup=10000, chains=4, seed=277, refresh=0)
## Warning: There were 8 divergent transitions after warmup. See
## https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.
## Warning: Examine the pairs() plot to diagnose sampling problems
rstan::summary(stanfitM1)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 9514.9527 0.8609 315.5928 8894.3144 9305.2899 9514.7799 9725.0819 10135.9677 134382.77 1.0000
sigma 2166.0133 0.6348 229.3674 1771.8823 2004.7855 2146.6428 2306.4131 2670.1726 130547.31 1.0000
gt 4.4915 0.7549 201.4105 0.0660 0.2064 0.4365 1.0774 12.5433 71175.15 1.0000
tf[1] 1255.9417 1.2117 442.3748 393.3307 957.4231 1254.5898 1550.1576 2132.9278 133291.32 1.0000
eta 2123.4853 18.2790 3984.4435 554.9997 980.5741 1422.8264 2230.0872 7627.0673 47514.88 1.0001
t[1] 888.0849 0.8568 312.8062 278.1268 677.0004 887.1289 1096.1270 1508.2077 133291.32 1.0000
t[2] -888.0849 0.8568 312.8062 -1508.2077 -1096.1270 -887.1289 -677.0004 -278.1268 133291.32 1.0000
lp__ -448.4353 0.0059 1.4947 -452.1995 -449.1693 -448.0947 -447.3388 -446.5783 65172.07 1.0000
M1 <- bridge_sampler(stanfitM1, silent=T)
summary(M1)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -441.8976
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 5.231275e-06
##  Coefficient of Variation: 0.002287198
##  Percentage Error: 0%
## 
## Note:
## All error measures are approximate.
stanfitM0 <- sampling(stanmodelM0, data=list("N"=length(dat), "Y"=c(dat),
                                             "lwr"=mean(dat)-6*sd(dat),
                                             "upr"=mean(dat)+6*sd(dat)),
                      iter=50000, warmup=10000, chains=4, seed=277, refresh=0)
rstan::summary(stanfitM0)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 9515.487 0.9086 345.4563 8837.8582 9285.5813 9514.4101 9745.2412 10195.9477 144567.7 1
sigma 2374.411 0.6965 250.2960 1946.0886 2197.8732 2353.1920 2527.4301 2923.2654 129134.6 1
lp__ -442.311 0.0038 1.0161 -445.0409 -442.7037 -442.0015 -441.5866 -441.3168 72172.0 1
M0 <- bridge_sampler(stanfitM0, silent=T)
summary(M0)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -444.7601
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 8.615725e-08
##  Coefficient of Variation: 0.0002935256
##  Percentage Error: 0%
## 
## Note:
## All error measures are approximate.
bf(M1, M0) #Bayes factor
## Estimated Bayes factor in favor of M1 over M0: 17.50401