============================================================================================================ Part 1: Using JAGS, https://rpubs.com/sherloconan/1015445 (unreliable estimates)

Part 2: Using Stan, https://rpubs.com/sherloconan/1024130

The paradox on real data, https://rpubs.com/sherloconan/1033617

The paradox on simulated data, https://rpubs.com/sherloconan/1036792

Three ways to standardize effects, https://rpubs.com/sherloconan/1071855

Three ways to standardize effects (continued), https://rpubs.com/sherloconan/1071863

GLS and BIC approximation, https://rpubs.com/sherloconan/1093319

MORE: https://osf.io/5dm28/

1. Getting Started

 

The data set (long format) consists of four columns:

  • RT the dependent variable, which is the number of seconds that it took to complete a puzzle;

  • ID which is a participant identifier;

  • shape and color, which are two factors that describe the type of puzzle solved.

shape and color each have two levels, and each of 12 participants completed puzzles within combination of shape and color. The design is thus \(2\times2\) factorial within-subject (Morey, 2022). Let shape be factor A (with levels \(\alpha_i\)) and color be factor B (with levels \(\beta_j\)) for \(i,j\in\{1,2\}\).

data(puzzles)
dat <- matrix(puzzles$RT, ncol=4,
              dimnames=list(paste0("s",1:12),
                            c("round&mono","square&mono","round&color","square&color"))) #wide data format
cov(dat) %>% #sample covariance matrix
  kable(digits=3) %>% kable_styling(full_width=F)
round&mono square&mono round&color square&color
round&mono 4.727 3.273 4.545 4.545
square&mono 3.273 6.000 5.909 4.727
round&color 4.545 5.909 7.636 5.273
square&color 4.545 4.727 5.273 7.455

 

Method 1: Compare the model without each effect to the full model in (1).

\[\begin{equation} \tag{1} \mathcal{M}_{\mathrm{full}}:\ Y_{ijk}=\mu+s_k+\alpha_i+\beta_j+(\alpha\beta)_{ij}+\epsilon_{ijk},\quad \epsilon_{ijk}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\ \sigma_\epsilon^2) \end{equation}\]

set.seed(277)
anovaBF(RT ~ shape * color + ID, data=puzzles, whichRandom="ID", whichModels="top", progress=F)
## Bayes factor top-down analysis
## --------------
## When effect is omitted from shape + color + shape:color + ID , BF is...
## [1] Omit color:shape : 2.780721  ±4.21%
## [2] Omit color       : 0.280169  ±11.35%
## [3] Omit shape       : 0.2494565 ±3.3%
## 
## Against denominator:
##   RT ~ shape + color + shape:color + ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

Method 2: Include the random slopes in (2) and test each effect again.

\[\begin{equation} \tag{2} \mathcal{M}_{\mathrm{full}}^{'}:\ Y_{ijk}=\mu+s_k+\alpha_i+\beta_j+(\alpha\beta)_{ij}+(\alpha s)_{ik}+(\beta s)_{jk}+\epsilon_{ijk},\quad \epsilon_{ijk}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,\ \sigma_\epsilon^2) \end{equation}\]

  • Assume \(\alpha_i\), \(\beta_j\), and \((\alpha\beta)_{ij}\) are the fixed effects; \(s_k\) are the subject-specific random effects.

The model that only omits AB in (2) versus the full (2).

randomEffects <- c("ID")

set.seed(277)
full <-   lmBF(RT~shape+color+ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)

set.seed(277)
omitAB <- lmBF(RT~shape+color+ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitAB / full
## Bayes factor analysis
## --------------
## [1] shape + color + ID + shape:ID + color:ID : 2.664999 ±3.24%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that only omits A in (2) versus the full (2).

set.seed(277)
omitA <-  lmBF(RT~      color+ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitA / full
## Bayes factor analysis
## --------------
## [1] color + ID + shape:color + shape:ID + color:ID : 0.5335185 ±3.21%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that only omits B in (2) versus the full (2).

set.seed(277)
omitB <-  lmBF(RT~shape+      ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitB / full
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:color + shape:ID + color:ID : 0.4508712 ±3.2%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

Method 3:

The model that excludes A, AB, and A:subj in (2) versus the full (2) that drops AB (i.e., an additive model).

set.seed(277)
notA <-   lmBF(RT~      color+ID+                     color:ID, puzzles, whichRandom=randomEffects, progress=F)
notA / omitAB
## Bayes factor analysis
## --------------
## [1] color + ID + color:ID : 1.264948 ±1.75%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that excludes B, AB, and B:subj in (2) versus the full (2) that drops AB.

set.seed(277)
notB <-   lmBF(RT~shape+      ID+            shape:ID         , puzzles, whichRandom=randomEffects, progress=F)
notB / omitAB
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:ID : 2.993029 ±1.76%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

Method 4:

The model that excludes A and AB in (2) versus the full (2) that drops AB. Keep all random slopes.

set.seed(277)
exclA <-  lmBF(RT~      color+ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
exclA / omitAB
## Bayes factor analysis
## --------------
## [1] color + ID + shape:ID + color:ID : 0.5450039 ±1.76%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that excludes B and AB in (2) versus the full (2) that drops AB. Keep all random slopes.

set.seed(277)
exclB <-  lmBF(RT~shape+      ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
exclB / omitAB
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:ID + color:ID : 0.4599249 ±1.75%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

     

Method 1’:

  • Assume \(\alpha_i\), \(\beta_j\), \((\alpha\beta)_{ij}\), and \(s_k\) are the random effects.
randomEffects <- c("ID","shape","color")

set.seed(277)
full <-   lmBF(RT~shape+color+ID+shape:color                  , puzzles, whichRandom=randomEffects, progress=F)

set.seed(277)
omitAB <- lmBF(RT~shape+color+ID                              , puzzles, whichRandom=randomEffects, progress=F)
omitAB / full
## Bayes factor analysis
## --------------
## [1] shape + color + ID : 5.749677 ±7.08%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color 
## ---
## Bayes factor type: BFlinearModel, JZS
set.seed(277)
omitA <-  lmBF(RT~      color+ID+shape:color                  , puzzles, whichRandom=randomEffects, progress=F)
omitA / full
## Bayes factor analysis
## --------------
## [1] color + ID + shape:color : 1.600381 ±5.1%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color 
## ---
## Bayes factor type: BFlinearModel, JZS
set.seed(277)
omitB <-  lmBF(RT~shape+      ID+shape:color                  , puzzles, whichRandom=randomEffects, progress=F)
omitB / full
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:color : 1.600381 ±5.1%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color 
## ---
## Bayes factor type: BFlinearModel, JZS

 

Method 2’:

The model that only omits AB in (2) versus the full (2). All are random effects.

set.seed(277)
full <-   lmBF(RT~shape+color+ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)

set.seed(277)
omitAB <- lmBF(RT~shape+color+ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitAB / full
## Bayes factor analysis
## --------------
## [1] shape + color + ID + shape:ID + color:ID : 6.419993 ±6.59%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that only omits A in (2) versus the full (2). All are random effects.

set.seed(277)
omitA <-  lmBF(RT~      color+ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitA / full
## Bayes factor analysis
## --------------
## [1] color + ID + shape:color + shape:ID + color:ID : 1.845765 ±5.97%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that only omits B in (2) versus the full (2). All are random effects.

set.seed(277)
omitB <-  lmBF(RT~shape+      ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitB / full
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:color + shape:ID + color:ID : 1.839681 ±6.13%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:color + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

Method 3’:

The model that excludes A, AB, and A:subj in (2) versus the full (2) that drops AB. All are random effects.

set.seed(277)
notA <-   lmBF(RT~      color+ID+                     color:ID, puzzles, whichRandom=randomEffects, progress=F)
notA / omitAB
## Bayes factor analysis
## --------------
## [1] color + ID + color:ID : 1.245677 ±5.15%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that excludes B, AB, and B:subj in (2) versus the full (2) that drops AB. All are random effects.

set.seed(277)
notB <-   lmBF(RT~shape+      ID+            shape:ID         , puzzles, whichRandom=randomEffects, progress=F)
notB / omitAB
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:ID : 3.031622 ±5.16%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

Method 4’:

The model that excludes A and AB in (2) versus the full (2) that drops AB. All are random effects. Keep all random slopes.

set.seed(277)
exclA <-  lmBF(RT~      color+ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
exclA / omitAB
## Bayes factor analysis
## --------------
## [1] color + ID + shape:ID + color:ID : 0.6674978 ±5.45%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

 

The model that excludes B and AB in (2) versus the full (2) that drops AB. All are random effects. Keep all random slopes.

set.seed(277)
exclB <-  lmBF(RT~shape+      ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
exclB / omitAB
## Bayes factor analysis
## --------------
## [1] shape + ID + shape:ID + color:ID : 0.5530826 ±5.43%
## 
## Against denominator:
##   RT ~ shape + color + ID + shape:ID + color:ID 
## ---
## Bayes factor type: BFlinearModel, JZS

   

2. New (Full) Model

 

Specify the full model in (3), which has no explicit terms for the subject-specific random effects at all. Compile the model in Stan. \(\alpha_i\), \(\beta_j\), and \((\alpha\beta)_{ij}\) are coded as random effects (in Sections 2, 3.1, 3.2, & 3.3) for \(i,j\in\{1,2\}\).

\[\begin{equation} \tag{3} \mathcal{M}_{\mathrm{full}}^{*}:\ Y_{ijk}=\mu+\alpha_i+\beta_j+(\alpha\beta)_{ij}+\epsilon_{ijk}^{*} \end{equation}\]

\[\begin{equation} \pi(\mu)\propto 1 \end{equation}\]

\[\begin{equation} \alpha_i\mid g_A\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g_A),\quad\beta_j\mid g_B\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g_B),\quad(\alpha\beta)_{ij}\mid g_{AB}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g_{AB}) \end{equation}\]

\[\begin{equation} g_A\sim\text{Scale-inv-}\chi^2(1,h_A^2),\quad g_B\sim\text{Scale-inv-}\chi^2(1,h_B^2),\quad g_{AB}\sim\text{Scale-inv-}\chi^2(1,h_{AB}^2) \end{equation}\]

\[\begin{equation} \boldsymbol{\epsilon}_k=(\epsilon_{11k}^{*},\epsilon_{21k}^{*},\epsilon_{12k}^{*},\epsilon_{22k}^{*})^{\top}\overset{\text{i.i.d.}}{\sim}\boldsymbol{\mathcal{N}}_4(\mathbf{0},\mathbf{\Sigma}) \end{equation}\]

\[\begin{equation} \mathbf{\Sigma}\sim\mathcal{W}^{-1}(\mathbf{I}_{4},5) \end{equation}\]

 

Note: the Wishart prior is assumed for the precision matrix \(\mathbf{\Omega}\) (which is the inverse of the covariance matrix) of the error term; thus, the inverse-Wishart prior is assumed for the covariance matrix \(\mathbf{\Sigma}\) of the error term. ChatGPT (GPT-3.5) may confuse them (See pic.).

model.full <- "
data {
  int<lower=1> nS;        // number of subjects
  int<lower=2> nA;        // number of levels of the treatment effect A
  int<lower=2> nB;        // number of levels of the treatment effect B
  // vector[nA*nB] Y[nS];        // responses [removed features]
  array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
  matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[nA*nB] Sigma;           // error covariance matrix
  real<lower=0> gA;                  // variance of the treatment effect A
  real<lower=0> gB;                  // variance of the treatment effect B
  real<lower=0> gAB;                 // variance of the interaction effect AB
  vector[nA] tA;                     // treatment effect A
  vector[nB] tB;                     // treatment effect B
  matrix[nA,nB] tAB;                 // interaction effect AB
}

transformed parameters {
  matrix[nA,nB] muS;     // condition means
  for (i in 1:nA) {
    for (j in 1:nB) {
      muS[i,j] = mu + tA[i] + tB[j] + tAB[i,j];
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gA | 1, hA);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
  target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
  target += normal_lpdf(tA | 0, sqrt(gA));
  target += normal_lpdf(tB | 0, sqrt(gB));
  target += normal_lpdf(to_vector(tAB) | 0, sqrt(gAB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma); // a1b1, a2b1, a1b2, a2b2
  }
}
"

stanmodel.full <- stan_model(model_code=model.full)
datalist <- list("nA"=2, "nB"=2, "nS"=nrow(dat), "I"=diag(4), "Y"=dat,
                 "hA"=1, "hB"=1, "hAB"=1,
                 "lwr"=mean(dat)-6*sd(dat), "upr"=mean(dat)+6*sd(dat))
stanfit.full <- sampling(stanmodel.full, data=datalist,
                         iter=10000, warmup=1000, chains=2, seed=277, refresh=0)

rstan::summary(stanfit.full)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 44.9370 0.0479 2.2442 40.1594 43.9369 45.0037 46.0577 49.1852 2197.728 1.0007
Sigma[1,1] 4.7913 0.0280 2.2235 2.1471 3.3103 4.2590 5.6642 10.4947 6314.159 1.0006
Sigma[1,2] 3.2741 0.0289 2.0368 0.6419 1.9380 2.8506 4.0982 8.4359 4954.976 1.0007
Sigma[1,3] 4.5362 0.0344 2.4524 1.4973 2.9173 3.9786 5.5179 10.8893 5082.029 1.0010
Sigma[1,4] 4.5266 0.0334 2.4321 1.5124 2.9072 3.9914 5.5015 10.7885 5298.355 1.0009
Sigma[2,1] 3.2741 0.0289 2.0368 0.6419 1.9380 2.8506 4.0982 8.4359 4954.976 1.0007
Sigma[2,2] 6.0748 0.0378 2.8273 2.7076 4.1940 5.4380 7.2296 13.1355 5597.777 1.0002
Sigma[2,3] 5.8845 0.0407 2.9332 2.3353 3.9356 5.2161 7.0648 13.2749 5182.526 1.0003
Sigma[2,4] 4.7403 0.0380 2.7083 1.4050 2.9761 4.1306 5.8246 11.4325 5090.010 1.0005
Sigma[3,1] 4.5362 0.0344 2.4524 1.4973 2.9173 3.9786 5.5179 10.8893 5082.029 1.0010
Sigma[3,2] 5.8845 0.0407 2.9332 2.3353 3.9356 5.2161 7.0648 13.2749 5182.526 1.0003
Sigma[3,3] 7.6885 0.0476 3.5481 3.4345 5.3196 6.8367 9.1091 16.8830 5556.459 1.0004
Sigma[3,4] 5.2716 0.0431 3.0164 1.5082 3.2883 4.6008 6.5030 12.8149 4903.622 1.0008
Sigma[4,1] 4.5266 0.0334 2.4321 1.5124 2.9072 3.9914 5.5015 10.7885 5298.355 1.0009
Sigma[4,2] 4.7403 0.0380 2.7083 1.4050 2.9761 4.1306 5.8246 11.4325 5090.010 1.0005
Sigma[4,3] 5.2716 0.0431 3.0164 1.5082 3.2883 4.6008 6.5030 12.8149 4903.622 1.0008
Sigma[4,4] 7.5247 0.0460 3.5017 3.3626 5.1839 6.6753 8.9607 16.4599 5801.913 1.0008
gA 5.9908 0.7664 41.5611 0.1712 0.5051 1.0857 2.7301 32.1692 2940.679 1.0010
gB 5.7037 0.8034 74.0960 0.1705 0.4957 1.0499 2.6619 27.8130 8506.189 1.0000
gAB 1.4607 0.0730 4.1918 0.1423 0.3603 0.6588 1.3391 7.3803 3298.813 1.0001
tA[1] 0.4219 0.0368 1.6280 -2.3495 -0.2341 0.3162 0.9402 3.7947 1953.407 1.0009
tA[2] -0.2575 0.0356 1.5836 -3.1431 -0.8695 -0.2683 0.2959 2.8576 1976.582 1.0007
tB[1] 0.3637 0.0292 1.4526 -2.2082 -0.2315 0.3088 0.8944 3.2038 2475.732 1.0005
tB[2] -0.3157 0.0294 1.4684 -3.1466 -0.8848 -0.2915 0.2512 2.3307 2498.172 1.0007
tAB[1,1] 0.2629 0.0117 0.9090 -1.4385 -0.2218 0.2245 0.7169 2.1466 6004.470 1.0002
tAB[1,2] -0.0246 0.0126 0.9023 -1.8194 -0.4857 -0.0245 0.4399 1.7490 5139.517 1.0007
tAB[2,1] -0.0028 0.0126 0.8999 -1.8338 -0.4527 0.0010 0.4549 1.7418 5079.759 1.0005
tAB[2,2] -0.2847 0.0137 0.9246 -2.2178 -0.7281 -0.2340 0.2158 1.3557 4564.726 1.0010
muS[1,1] 45.9855 0.0046 0.6167 44.7511 45.5971 45.9817 46.3801 47.2032 17880.647 1.0000
muS[1,2] 45.0186 0.0055 0.7647 43.4886 44.5299 45.0244 45.5031 46.5445 19482.126 0.9999
muS[2,1] 45.0403 0.0048 0.6723 43.6908 44.6124 45.0404 45.4673 46.3972 20000.999 1.0000
muS[2,2] 44.0791 0.0056 0.7662 42.5782 43.5865 44.0760 44.5580 45.6343 18619.701 0.9999
lp__ -132.9434 0.0810 4.5799 -143.4452 -135.6268 -132.4647 -129.6626 -125.4633 3199.313 1.0002

 

Note that to compute the (log) marginal likelihood for a Stan model, we need to specify the model in a certain way. Instead of using “~” signs for specifying distributions, we need to directly use the (log) density functions. The reason for this is that when using the “~” sign, constant terms are dropped which are not needed for sampling from the posterior. However, for computing the marginal likelihood, these constants need to be retained. For instance, instead of writing y ~ normal(mu, sigma); we would need to write target += normal_lpdf(y | mu, sigma); (Gronau, 2021). See also the discussion regarding the implicit uniform prior for computing the log marginal likelihood.

 

The computation of marginal likelihoods based on bridge sampling requires a lot more posterior draws than usual. A good conservative rule of thump is perhaps 10-fold more draws (bridge_sampler.brmsfit {brms}).

 

Tips: Knit the R markdown in a fresh session without ‘rstan’ loaded. Otherwise, the compiler will generate the clang messages: ld: warning: -undefined dynamic_lookup may not work with chained fixups.

   

3.1 New (Omitted AB) Model

 

Specify the additive model in (4).

\[\begin{equation} \tag{4} \mathcal{M}_{\text{omit-}\textit{AB}}^{*}:\ Y_{ijk}=\mu+\alpha_i+\beta_j+\epsilon_{ijk}^{*} \end{equation}\]

model.omitAB <- "
data {
  int<lower=1> nS;        // number of subjects
  int<lower=2> nA;        // number of levels of the treatment effect A
  int<lower=2> nB;        // number of levels of the treatment effect B
  // vector[nA*nB] Y[nS];        // responses [removed features]
  array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[nA*nB] Sigma;           // error covariance matrix
  real<lower=0> gA;                  // variance of the treatment effect A
  real<lower=0> gB;                  // variance of the treatment effect B
  vector[nA] tA;                     // treatment effect A
  vector[nB] tB;                     // treatment effect B
}

transformed parameters {
  matrix[nA,nB] muS;     // condition means
  for (i in 1:nA) {
    for (j in 1:nB) {
      muS[i,j] = mu + tA[i] + tB[j];
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gA | 1, hA);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
  target += normal_lpdf(tA | 0, sqrt(gA));
  target += normal_lpdf(tB | 0, sqrt(gB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.omitAB <- stan_model(model_code=model.omitAB)
stanfit.omitAB <- sampling(stanmodel.omitAB, data=datalist[-8],
                           iter=10000, warmup=1000, chains=2, seed=277, refresh=0)

rstan::summary(stanfit.omitAB)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.0543 0.0421 1.9612 41.3287 44.1240 45.0769 46.0311 48.7157 2170.582 1.0006
Sigma[1,1] 4.8381 0.0352 2.3428 2.1278 3.3081 4.2900 5.7153 10.7775 4425.712 1.0001
Sigma[1,2] 3.3772 0.0368 2.1890 0.7040 1.9810 2.8934 4.2003 8.8701 3531.286 1.0000
Sigma[1,3] 4.6472 0.0440 2.6494 1.5421 2.9439 4.0334 5.6120 11.2931 3624.301 1.0001
Sigma[1,4] 4.5758 0.0417 2.5848 1.5068 2.9031 3.9739 5.5503 11.1129 3836.275 1.0000
Sigma[2,1] 3.3772 0.0368 2.1890 0.7040 1.9810 2.8934 4.2003 8.8701 3531.286 1.0000
Sigma[2,2] 6.0573 0.0442 2.9075 2.6717 4.1441 5.3653 7.1928 13.2973 4331.537 1.0001
Sigma[2,3] 5.8949 0.0499 3.0836 2.3044 3.9075 5.1658 7.0104 13.6406 3816.952 1.0002
Sigma[2,4] 4.8698 0.0459 2.8378 1.4789 3.0329 4.2417 5.9285 12.0604 3821.200 1.0001
Sigma[3,1] 4.6472 0.0440 2.6494 1.5421 2.9439 4.0334 5.6120 11.2931 3624.301 1.0001
Sigma[3,2] 5.8949 0.0499 3.0836 2.3044 3.9075 5.1658 7.0104 13.6406 3816.952 1.0002
Sigma[3,3] 7.7184 0.0597 3.7674 3.4055 5.2990 6.8078 9.0791 17.1578 3976.827 1.0001
Sigma[3,4] 5.4224 0.0532 3.2109 1.6041 3.3682 4.7082 6.6138 13.5169 3644.931 1.0001
Sigma[4,1] 4.5758 0.0417 2.5848 1.5068 2.9031 3.9739 5.5503 11.1129 3836.275 1.0000
Sigma[4,2] 4.8698 0.0459 2.8378 1.4789 3.0329 4.2417 5.9285 12.0604 3821.200 1.0001
Sigma[4,3] 5.4224 0.0532 3.2109 1.6041 3.3682 4.7082 6.6138 13.5169 3644.931 1.0001
Sigma[4,4] 7.6088 0.0547 3.6954 3.3155 5.1823 6.7402 8.9940 17.0792 4563.934 0.9999
gA 4.7096 0.5290 26.8028 0.1850 0.5231 1.0406 2.5309 25.8774 2567.171 1.0005
gB 4.5255 0.5753 50.4389 0.1880 0.5190 1.0387 2.4829 24.5949 7686.118 1.0006
tA[1] 0.4862 0.0353 1.3962 -1.8966 -0.0686 0.4338 0.9550 3.0698 1562.437 1.0016
tA[2] -0.4251 0.0353 1.3946 -2.8651 -0.9677 -0.4552 0.0640 2.0798 1558.106 1.0017
tB[1] 0.4476 0.0243 1.2835 -1.9886 -0.0253 0.4553 0.9549 2.7942 2794.149 1.0000
tB[2] -0.4914 0.0244 1.2851 -2.9631 -0.9699 -0.4658 0.0183 1.8395 2779.405 0.9999
muS[1,1] 45.9880 0.0043 0.5948 44.8121 45.6107 45.9896 46.3631 47.1652 18820.254 1.0000
muS[1,2] 45.0491 0.0053 0.7050 43.6605 44.5986 45.0453 45.4945 46.4522 17792.693 1.0000
muS[2,1] 45.0767 0.0045 0.6046 43.9078 44.6877 45.0745 45.4576 46.2852 18436.522 1.0000
muS[2,2] 44.1378 0.0056 0.7464 42.6854 43.6601 44.1232 44.6039 45.6596 17872.267 1.0000
lp__ -126.0832 0.0591 3.7576 -134.4823 -128.3485 -125.6413 -123.3640 -119.9853 4045.665 1.0000

   

3.2 New (Omitted A) Model

 

New Method 2:

Specify the model in (5.1), which violates the principle of marginality.

\[\begin{equation} \tag{5.1} \mathcal{M}_{\text{omit-}\textit{A}}^{*}:\ Y_{ijk}=\mu+\beta_j+(\alpha\beta)_{ij}+\epsilon_{ijk}^{*} \end{equation}\]

model.omitA <- "
data {
  int<lower=1> nS;        // number of subjects
  int<lower=2> nA;        // number of levels of the treatment effect A
  int<lower=2> nB;        // number of levels of the treatment effect B
  // vector[nA*nB] Y[nS];        // responses [removed features]
  array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
  matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[nA*nB] Sigma;           // error covariance matrix
  real<lower=0> gB;                  // variance of the treatment effect B
  real<lower=0> gAB;                 // variance of the interaction effect AB
  vector[nB] tB;                     // treatment effect B
  matrix[nA,nB] tAB;                 // interaction effect AB
}

transformed parameters {
  matrix[nA,nB] muS;     // condition means
  for (i in 1:nA) {
    for (j in 1:nB) {
      muS[i,j] = mu + tB[j] + tAB[i,j];
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
  target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
  target += normal_lpdf(tB | 0, sqrt(gB));
  target += normal_lpdf(to_vector(tAB) | 0, sqrt(gAB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.omitA <- stan_model(model_code=model.omitA)
stanfit.omitA <- sampling(stanmodel.omitA, data=datalist[-6],
                          iter=10000, warmup=1000, chains=2, seed=277, refresh=0)

rstan::summary(stanfit.omitA)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.0409 0.0373 1.7276 41.8456 44.2396 45.0566 45.8863 48.3319 2150.781 1.0002
Sigma[1,1] 4.8199 0.0317 2.2655 2.1698 3.3220 4.2999 5.6967 10.5014 5096.303 1.0009
Sigma[1,2] 3.2946 0.0326 2.0310 0.6766 1.9450 2.8719 4.1627 8.3987 3889.031 1.0008
Sigma[1,3] 4.5805 0.0387 2.4880 1.5331 2.9307 4.0294 5.5727 10.7103 4141.816 1.0010
Sigma[1,4] 4.5257 0.0372 2.4719 1.5302 2.9181 3.9785 5.4868 10.7584 4414.125 1.0007
Sigma[2,1] 3.2946 0.0326 2.0310 0.6766 1.9450 2.8719 4.1627 8.3987 3889.031 1.0008
Sigma[2,2] 6.0840 0.0406 2.7973 2.7096 4.2228 5.4533 7.2367 13.0474 4750.690 1.0002
Sigma[2,3] 5.8965 0.0445 2.9058 2.3425 3.9364 5.2487 7.1101 13.2290 4262.434 1.0004
Sigma[2,4] 4.7830 0.0415 2.7030 1.4491 3.0066 4.2008 5.8551 11.5280 4251.485 1.0008
Sigma[3,1] 4.5805 0.0387 2.4880 1.5331 2.9307 4.0294 5.5727 10.7103 4141.816 1.0010
Sigma[3,2] 5.8965 0.0445 2.9058 2.3425 3.9364 5.2487 7.1101 13.2290 4262.434 1.0004
Sigma[3,3] 7.7294 0.0526 3.5433 3.4609 5.3317 6.9085 9.2041 16.8557 4538.601 1.0006
Sigma[3,4] 5.3069 0.0472 3.0166 1.5421 3.3206 4.6413 6.5478 12.9154 4086.396 1.0010
Sigma[4,1] 4.5257 0.0372 2.4719 1.5302 2.9181 3.9785 5.4868 10.7584 4414.125 1.0007
Sigma[4,2] 4.7830 0.0415 2.7030 1.4491 3.0066 4.2008 5.8551 11.5280 4251.485 1.0008
Sigma[4,3] 5.3069 0.0472 3.0166 1.5421 3.3206 4.6413 6.5478 12.9154 4086.396 1.0010
Sigma[4,4] 7.5719 0.0502 3.5718 3.4044 5.2280 6.7378 8.9282 16.6939 5072.188 1.0009
gB 5.8355 0.7172 39.8825 0.1722 0.5075 1.0952 2.7894 32.4249 3092.270 1.0006
gAB 1.4465 0.0416 3.1958 0.1748 0.4236 0.7452 1.4209 7.0424 5905.312 1.0001
tB[1] 0.3513 0.0376 1.5657 -2.6077 -0.2849 0.2807 0.9032 3.4317 1737.428 1.0003
tB[2] -0.3108 0.0358 1.5491 -3.3636 -0.8947 -0.2795 0.2727 2.5347 1872.319 1.0003
tAB[1,1] 0.5394 0.0122 0.8174 -1.0063 0.0862 0.5198 0.9743 2.1857 4524.128 1.0002
tAB[1,2] 0.2498 0.0100 0.7856 -1.3100 -0.1752 0.2512 0.6905 1.7696 6198.454 1.0001
tAB[2,1] -0.2903 0.0120 0.8088 -1.9067 -0.7191 -0.2750 0.1519 1.2725 4535.968 1.0004
tAB[2,2] -0.5567 0.0101 0.8018 -2.2431 -0.9701 -0.5153 -0.0883 0.9172 6288.800 1.0003
muS[1,1] 45.9316 0.0043 0.6137 44.7084 45.5376 45.9335 46.3263 47.1406 20188.194 1.0000
muS[1,2] 44.9799 0.0052 0.7748 43.4634 44.4708 44.9759 45.4815 46.5156 21780.937 1.0000
muS[2,1] 45.1019 0.0046 0.6839 43.7436 44.6588 45.0953 45.5399 46.4632 22075.567 1.0000
muS[2,2] 44.1733 0.0054 0.7724 42.6921 43.6624 44.1622 44.6661 45.7481 20319.532 1.0002
lp__ -128.7220 0.0693 3.9867 -137.7551 -131.0930 -128.2635 -125.8452 -122.2603 3307.417 1.0006

   

New Method 4:

Specify the model in (5.2), which follows the principle of marginality.

\[\begin{equation} \tag{5.2} \mathcal{M}_{\text{not-}\textit{A}}^{*}:\ Y_{ijk}=\mu+\beta_j+\epsilon_{ijk}^{*} \end{equation}\]

model.notA <- "
data {
  int<lower=1> nS;        // number of subjects
  int<lower=2> nA;        // number of levels of the treatment effect A
  int<lower=2> nB;        // number of levels of the treatment effect B
  // vector[nA*nB] Y[nS];        // responses [removed features]
  array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[nA*nB] Sigma;           // error covariance matrix
  real<lower=0> gB;                  // variance of the treatment effect B
  vector[nB] tB;                     // treatment effect B
}

transformed parameters {
  matrix[nA,nB] muS;     // condition means
  for (i in 1:nA) {
    for (j in 1:nB) {
      muS[i,j] = mu + tB[j];
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
  target += normal_lpdf(tB | 0, sqrt(gB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.notA <- stan_model(model_code=model.notA)
stanfit.notA <- sampling(stanmodel.notA, data=datalist[-c(6,8)],
                         iter=10000, warmup=1000, chains=2, seed=277, refresh=0)

rstan::summary(stanfit.notA)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.1748 0.0383 1.5853 42.2659 44.4289 45.1903 45.9323 48.0861 1713.076 1.0004
Sigma[1,1] 5.0933 0.0320 2.4852 2.2230 3.4359 4.5066 6.0535 11.3721 6048.445 1.0006
Sigma[1,2] 3.2550 0.0329 2.1949 0.4613 1.8532 2.7939 4.1341 8.7680 4437.357 1.0002
Sigma[1,3] 4.8193 0.0394 2.7149 1.5880 3.0529 4.1824 5.8863 11.6861 4745.783 1.0005
Sigma[1,4] 4.3791 0.0376 2.6014 1.1630 2.6657 3.8076 5.4435 11.0294 4775.714 1.0003
Sigma[2,1] 3.2550 0.0329 2.1949 0.4613 1.8532 2.7939 4.1341 8.7680 4437.357 1.0002
Sigma[2,2] 6.3944 0.0436 3.1039 2.7768 4.3241 5.6605 7.5653 14.5122 5079.376 1.0001
Sigma[2,3] 5.8856 0.0452 3.0892 2.2044 3.8470 5.1745 7.0841 13.7788 4666.663 1.0004
Sigma[2,4] 5.3374 0.0458 3.1776 1.5723 3.2387 4.5977 6.5612 13.4604 4811.970 1.0001
Sigma[3,1] 4.8193 0.0394 2.7149 1.5880 3.0529 4.1824 5.8863 11.6861 4745.783 1.0005
Sigma[3,2] 5.8856 0.0452 3.0892 2.2044 3.8470 5.1745 7.0841 13.7788 4666.663 1.0004
Sigma[3,3] 7.8773 0.0535 3.7575 3.4451 5.3946 7.0091 9.3385 17.3732 4940.208 1.0006
Sigma[3,4] 5.4089 0.0487 3.2943 1.4040 3.2653 4.6946 6.6846 13.6531 4575.136 1.0002
Sigma[4,1] 4.3791 0.0376 2.6014 1.1630 2.6657 3.8076 5.4435 11.0294 4775.714 1.0003
Sigma[4,2] 5.3374 0.0458 3.1776 1.5723 3.2387 4.5977 6.5612 13.4604 4811.970 1.0001
Sigma[4,3] 5.4089 0.0487 3.2943 1.4040 3.2653 4.6946 6.6846 13.6531 4575.136 1.0002
Sigma[4,4] 8.2511 0.0560 4.0972 3.4526 5.5066 7.2694 9.8289 18.8650 5360.863 1.0000
gB 5.5166 0.7921 47.4010 0.1702 0.4746 0.9673 2.3235 32.2468 3581.115 1.0015
tB[1] 0.3883 0.0377 1.3978 -2.1827 -0.1070 0.3676 0.8707 2.9297 1371.960 1.0003
tB[2] -0.3757 0.0376 1.3984 -2.9962 -0.8649 -0.3693 0.1200 2.1033 1384.899 1.0003
muS[1,1] 45.5631 0.0061 0.7052 44.1980 45.1021 45.5597 46.0141 46.9621 13217.075 1.0000
muS[1,2] 44.7991 0.0076 0.8534 43.1295 44.2457 44.7920 45.3526 46.4862 12636.857 1.0000
muS[2,1] 45.5631 0.0061 0.7052 44.1980 45.1021 45.5597 46.0141 46.9621 13217.075 1.0000
muS[2,2] 44.7991 0.0076 0.8534 43.1295 44.2457 44.7920 45.3526 46.4862 12636.857 1.0000
lp__ -125.2033 0.0519 3.2768 -132.6707 -127.1011 -124.7933 -122.8421 -120.0217 3981.995 1.0016

   

3.3 New (Omitted B) Model

 

New Method 2:

Specify the model in (6.1), which violates the principle of marginality.

\[\begin{equation} \tag{6.1} \mathcal{M}_{\text{omit-}\textit{B}}^{*}:\ Y_{ijk}=\mu+\alpha_i+(\alpha\beta)_{ij}+\epsilon_{ijk}^{*} \end{equation}\]

datalist[["Y"]] <- dat[,c(1,3,2,4)]
stanfit.omitB <- sampling(stanmodel.omitA, data=datalist[-6],
                          iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
rstan::summary(stanfit.omitB)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.1024 0.0372 1.7156 41.7044 44.3118 45.1202 45.9302 48.3645 2132.035 1.0006
Sigma[1,1] 4.7752 0.0273 2.2304 2.1114 3.2926 4.2630 5.6464 10.5090 6676.550 1.0001
Sigma[1,2] 4.5108 0.0330 2.4433 1.5132 2.8755 3.9848 5.4914 10.7049 5496.861 1.0001
Sigma[1,3] 3.2599 0.0270 2.0008 0.6363 1.9406 2.8526 4.1089 8.3304 5474.374 1.0000
Sigma[1,4] 4.5240 0.0324 2.4658 1.5448 2.8939 3.9773 5.4774 10.8027 5797.976 0.9999
Sigma[2,1] 4.5108 0.0330 2.4433 1.5132 2.8755 3.9848 5.4914 10.7049 5496.861 1.0001
Sigma[2,2] 7.6489 0.0462 3.5215 3.4166 5.2887 6.8480 9.0493 16.6971 5798.230 1.0000
Sigma[2,3] 5.8466 0.0385 2.8808 2.3534 3.9084 5.1988 7.0245 13.2073 5612.313 1.0000
Sigma[2,4] 5.2815 0.0410 3.0234 1.5257 3.3105 4.6261 6.5172 12.9138 5434.580 1.0001
Sigma[3,1] 3.2599 0.0270 2.0008 0.6363 1.9406 2.8526 4.1089 8.3304 5474.374 1.0000
Sigma[3,2] 5.8466 0.0385 2.8808 2.3534 3.9084 5.1988 7.0245 13.2073 5612.313 1.0000
Sigma[3,3] 6.0347 0.0349 2.7553 2.7379 4.1920 5.4078 7.1398 12.9694 6246.392 1.0001
Sigma[3,4] 4.7502 0.0352 2.6785 1.4033 2.9757 4.1687 5.8262 11.5360 5802.117 1.0003
Sigma[4,1] 4.5240 0.0324 2.4658 1.5448 2.8939 3.9773 5.4774 10.8027 5797.976 0.9999
Sigma[4,2] 5.2815 0.0410 3.0234 1.5257 3.3105 4.6261 6.5172 12.9138 5434.580 1.0001
Sigma[4,3] 4.7502 0.0352 2.6785 1.4033 2.9757 4.1687 5.8262 11.5360 5802.117 1.0003
Sigma[4,4] 7.5726 0.0455 3.5939 3.3563 5.2209 6.7315 8.9453 16.6880 6231.997 1.0000
gB 6.0483 0.8342 66.9731 0.1716 0.4949 1.0401 2.6402 31.9598 6444.873 1.0004
gAB 1.5306 0.0671 4.0408 0.1856 0.4421 0.7758 1.4772 7.1123 3621.843 0.9999
tB[1] 0.3101 0.0351 1.5143 -2.4820 -0.2935 0.2606 0.8601 3.3513 1858.665 1.0012
tB[2] -0.3000 0.0359 1.5307 -3.3022 -0.8701 -0.2658 0.2966 2.5696 1820.904 1.0009
tAB[1,1] 0.6242 0.0124 0.8263 -0.8348 0.1666 0.5760 1.0238 2.3252 4443.371 1.0006
tAB[1,2] 0.2947 0.0113 0.8288 -1.3958 -0.1380 0.3027 0.7375 1.8654 5352.228 1.0004
tAB[2,1] -0.2767 0.0123 0.8115 -1.7605 -0.7149 -0.3036 0.1267 1.3635 4348.923 1.0004
tAB[2,2] -0.5766 0.0112 0.8349 -2.3046 -1.0113 -0.5466 -0.1221 0.9698 5525.801 1.0003
muS[1,1] 46.0367 0.0045 0.6263 44.8025 45.6272 46.0340 46.4372 47.2850 18986.751 1.0000
muS[1,2] 45.0971 0.0048 0.6856 43.7440 44.6508 45.0959 45.5381 46.4648 20171.934 1.0000
muS[2,1] 45.1359 0.0054 0.7741 43.6209 44.6403 45.1307 45.6369 46.6658 20727.247 1.0000
muS[2,2] 44.2258 0.0056 0.7913 42.6746 43.7114 44.2101 44.7285 45.8316 19966.527 1.0000
lp__ -128.7577 0.0664 4.0734 -138.0179 -131.1657 -128.3020 -125.8331 -122.1384 3759.369 1.0000

   

New Method 4:

Specify the model in (6.2), which follows the principle of marginality.

\[\begin{equation} \tag{6.2} \mathcal{M}_{\text{not-}\textit{B}}^{*}:\ Y_{ijk}=\mu+\alpha_i+\epsilon_{ijk}^{*} \end{equation}\]

stanfit.notB <- sampling(stanmodel.notA, data=datalist[-c(6,8)],
                         iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
rstan::summary(stanfit.notB)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 46.1346 0.0174 1.4000 43.4521 45.3920 46.1269 46.8658 48.8727 6437.206 1.0002
Sigma[1,1] 5.1625 0.0395 2.6256 2.2251 3.4375 4.5317 6.1505 11.7787 4415.145 1.0001
Sigma[1,2] 5.2788 0.0560 3.3286 1.5136 3.1422 4.5073 6.4896 13.5024 3539.235 1.0001
Sigma[1,3] 3.7744 0.0441 2.5890 0.7410 2.1386 3.1940 4.7217 10.1706 3446.627 1.0001
Sigma[1,4] 5.3406 0.0591 3.5658 1.2000 3.0865 4.5190 6.6431 14.2174 3645.593 1.0001
Sigma[2,1] 5.2788 0.0560 3.3286 1.5136 3.1422 4.5073 6.4896 13.5024 3539.235 1.0001
Sigma[2,2] 9.5311 0.0809 4.9696 3.8695 6.2516 8.3524 11.4169 21.8817 3770.323 1.0002
Sigma[2,3] 6.9682 0.0651 3.9033 2.5692 4.4169 6.0381 8.4268 16.7734 3597.372 1.0003
Sigma[2,4] 7.7703 0.0831 4.8980 2.1982 4.6107 6.6305 9.5738 20.0238 3472.210 1.0002
Sigma[3,1] 3.7744 0.0441 2.5890 0.7410 2.1386 3.1940 4.7217 10.1706 3446.627 1.0001
Sigma[3,2] 6.9682 0.0651 3.9033 2.5692 4.4169 6.0381 8.4268 16.7734 3597.372 1.0003
Sigma[3,3] 6.7733 0.0545 3.4744 2.8700 4.5257 5.9441 8.0452 15.5087 4058.062 1.0003
Sigma[3,4] 6.3224 0.0696 4.1706 1.6614 3.6323 5.3505 7.8408 16.5792 3595.003 1.0002
Sigma[4,1] 5.3406 0.0591 3.5658 1.2000 3.0865 4.5190 6.6431 14.2174 3645.593 1.0001
Sigma[4,2] 7.7703 0.0831 4.8980 2.1982 4.6107 6.6305 9.5738 20.0238 3472.210 1.0002
Sigma[4,3] 6.3224 0.0696 4.1706 1.6614 3.6323 5.3505 7.8408 16.5792 3595.003 1.0002
Sigma[4,4] 10.7079 0.0922 5.8356 4.1818 6.9515 9.3285 12.8528 25.1048 4002.129 1.0000
gB 3.6558 0.2377 21.3914 0.1566 0.4397 0.8948 2.1792 21.2815 8096.869 0.9999
tB[1] 0.2697 0.0156 1.2033 -2.0985 -0.2130 0.2583 0.7663 2.6295 5963.683 1.0002
tB[2] -0.3046 0.0154 1.2020 -2.7443 -0.7760 -0.2764 0.2080 1.9637 6054.782 1.0001
muS[1,1] 46.4043 0.0086 0.8016 44.8324 45.8906 46.4111 46.9146 47.9920 8596.118 1.0000
muS[1,2] 45.8300 0.0082 0.7832 44.2780 45.3288 45.8291 46.3325 47.3985 9226.334 1.0000
muS[2,1] 46.4043 0.0086 0.8016 44.8324 45.8906 46.4111 46.9146 47.9920 8596.118 1.0000
muS[2,2] 45.8300 0.0082 0.7832 44.2780 45.3288 45.8291 46.3325 47.3985 9226.334 1.0000
lp__ -127.6715 0.0472 3.2022 -134.9644 -129.5605 -127.2718 -125.3452 -122.5582 4607.570 1.0004

   

4. Bayes Factor via Bridge Sampling

 

Compute log marginal likelihoods for competing models via bridge sampling. Then, \(\textit{BF}_{10}=\frac{p(\text{Data}\ \mid\ \mathcal{M}_1)}{p(\text{Data}\ \mid\ \mathcal{M}_0)}\).

set.seed(277)
M.full <- bridge_sampler(stanfit.full, silent=T)
summary(M.full)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -121.528
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.0004260025
##  Coefficient of Variation: 0.02063983
##  Percentage Error: 2%
## 
## Note:
## All error measures are approximate.
set.seed(277)
M.omitAB <- bridge_sampler(stanfit.omitAB, silent=T)
summary(M.omitAB)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -120.2143
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.000197953
##  Coefficient of Variation: 0.01406958
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
bf(M.omitAB, M.full) #New Method of testing AB
## Estimated Bayes factor in favor of M.omitAB over M.full: 3.71982

   

set.seed(277)
M.omitA <- bridge_sampler(stanfit.omitA, silent=T)
summary(M.omitA)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -121.158
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.0001989138
##  Coefficient of Variation: 0.01410368
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
bf(M.omitA, M.full) #New Method 2 of testing A
## Estimated Bayes factor in favor of M.omitA over M.full: 1.44768

   

set.seed(277)
M.notA <- bridge_sampler(stanfit.notA, silent=T)
summary(M.notA)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -121.7739
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.0001194225
##  Coefficient of Variation: 0.01092806
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
bf(M.notA, M.omitAB) #New Method 4 of testing A
## Estimated Bayes factor in favor of M.notA over M.omitAB: 0.21021

   

set.seed(277)
M.omitB <- bridge_sampler(stanfit.omitB, silent=T)
summary(M.omitB)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -121.1627
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.0001911197
##  Coefficient of Variation: 0.0138246
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
bf(M.omitB, M.full) #New Method 2 of testing B
## Estimated Bayes factor in favor of M.omitB over M.full: 1.44094

   

set.seed(277)
M.notB <- bridge_sampler(stanfit.notB, silent=T)
summary(M.notB)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -123.448
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.0001233827
##  Coefficient of Variation: 0.01110778
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
bf(M.notB, M.omitAB) #New Method 4 of testing B
## Estimated Bayes factor in favor of M.notB over M.omitAB: 0.03941

     

Check the Monte Carlo error of estimating the Bayes factor.

num <- 50; BF <- PerErr.full <- PerErr.omit <- numeric(num)
system.time(
for (i in 1:num) { #roughly 43 min of run time
  result <- BFBS(seed=i) #defined in All-in-One R Script
  BF[i] <- result$bf
  PerErr.full[i] <- result$pererr.full
  PerErr.omit[i] <- result$pererr.omit
})


range(PerErr.full); range(PerErr.omit)

hist(BF,prob=T,col="white",yaxt="n",ylab="",
     xlab="Bayes factor (omitted AB) estimates",main="")
lines(density(BF),col="red",lwd=2)
## [1] 0.01579613 0.02618247
## [1] 0.01265831 0.01735461

   

5. Modeling Fixed Effects

 

Specify the full model in (3). Compile the model (which only works for \(2\times2\)) in Stan. \(\alpha_i\), \(\beta_j\), and \((\alpha\beta)_{ij}\) are now coded as fixed effects by projecting a set of \(a\) main effects into \(a-1\) parameters with the property that the marginal prior on all \(a\) main effects is identical.

\[\begin{equation} \tag{3} \mathcal{M}_{\mathrm{full}}^{*}:\ Y_{ijk}=\mu+\alpha_i+\beta_j+(\alpha\beta)_{ij}+\epsilon_{ijk}^{*} \end{equation}\]

\[\begin{equation} \pi(\mu)\propto 1 \end{equation}\]

\[\begin{equation} \tag{$\star$} (\alpha_1^\star,\dotsb,\alpha_{a-1}^\star)=(\alpha_1,\dotsb,\alpha_a)\cdot\mathbf{Q},\quad\mathbf{I}_a-a^{-1}\mathbf{J}_a=\mathbf{Q}\cdot\mathbf{Q}^\top,\quad\dotsb \end{equation}\]

\[\begin{equation} \alpha_i^\star\mid g_A\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g_A),\quad\beta_j^\star\mid g_B\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g_B),\quad(\alpha\beta)_{ij}^\star\mid g_{AB}\overset{\text{i.i.d.}}{\sim}\mathcal{N}(0,g_{AB}) \end{equation}\]

\[\begin{equation} g_A\sim\text{Scale-inv-}\chi^2(1,h_A^2),\quad g_B\sim\text{Scale-inv-}\chi^2(1,h_B^2),\quad g_{AB}\sim\text{Scale-inv-}\chi^2(1,h_{AB}^2) \end{equation}\]

\[\begin{equation} \boldsymbol{\epsilon}_k=(\epsilon_{11k}^{*},\epsilon_{21k}^{*},\epsilon_{12k}^{*},\epsilon_{22k}^{*})^{\top}\overset{\text{i.i.d.}}{\sim}\boldsymbol{\mathcal{N}}_4(\mathbf{0},\mathbf{\Sigma}) \end{equation}\]

\[\begin{equation} \mathbf{\Sigma}\sim\mathcal{W}^{-1}(\mathbf{I}_{4},5) \end{equation}\]

 

\(\mathbf{Q}\) is an \(a\times(a-1)\) matrix of the \(a-1\) eigenvectors of unit length corresponding to the nonzero eigenvalues of the left side term. For example, the projected main effect \(\alpha^\star=(\alpha_1-\alpha_2)/\sqrt{2}\) when \(a=2\). In the other direction (given \(\alpha_1+\alpha_2=0\)), \(\alpha_1=\alpha^\star/\sqrt{2}\) and \(\alpha_2=-\alpha^\star/\sqrt{2}\). The same reduced parametrization applies to \(\beta_j\).

As for the interaction effects, four variables are projected into one. \((\alpha\beta)^\star=((\alpha\beta)_{11}-(\alpha\beta)_{21}-(\alpha\beta)_{12}+(\alpha\beta)_{22})/2\). In the other direction (given \((\alpha\beta)_{11}+(\alpha\beta)_{12}=0\), \((\alpha\beta)_{21}+(\alpha\beta)_{22}=0\), and \((\alpha\beta)_{11}+(\alpha\beta)_{21}=0\)), \((\alpha\beta)_{11}=(\alpha\beta)^\star/2\), \((\alpha\beta)_{21}=-(\alpha\beta)^\star/2\), \((\alpha\beta)_{12}=-(\alpha\beta)^\star/2\), and \((\alpha\beta)_{22}=(\alpha\beta)^\star/2\).

model.full.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hA;       // (square root) scale parameter of the variance of the projected treatment effect A
  real<lower=0> hB;       // (square root) scale parameter of the variance of the projected treatment effect B
  real<lower=0> hAB;      // (square root) scale parameter of the variance of the projected interaction effect AB
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gA;                  // variance of the projected treatment effect A
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real<lower=0> gAB;                 // variance of the projected interaction effect AB
  real tAfix;                        // projected treatment effect A
  real tBfix;                        // projected treatment effect B
  real tABfix;                       // projected interaction effect AB
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,i+1) * tAfix / sqrt(2) 
                    + pow(-1,j+1) * tBfix / sqrt(2)
                    + pow(-1,i+j) * tABfix / 2;
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gA | 1, hA);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tAfix | 0, sqrt(gA));
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  target += normal_lpdf(tABfix | 0, sqrt(gAB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"
stanmodel.full.fixed <- stan_model(model_code=model.full.fixed)
datalist.fixed <- list("nS"=nrow(dat), "I"=diag(4), "Y"=dat,
                       "hA"=.5, "hB"=.5, "hAB"=.5,
                       "lwr"=mean(dat)-6*sd(dat),
                       "upr"=mean(dat)+6*sd(dat))

stanfit.full.fixed <- sampling(stanmodel.full.fixed, data=datalist.fixed,
                               iter=10000, warmup=1000, chains=2, seed=277, refresh=0)

set.seed(277)
M.full.fixed <- bridge_sampler(stanfit.full.fixed, silent=T)

   

  • Proposing a New (Omitted Fixed AB) Model
model.omitAB.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hA;       // (square root) scale parameter of the variance of the projected treatment effect A
  real<lower=0> hB;       // (square root) scale parameter of the variance of the projected treatment effect B
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gA;                  // variance of the projected treatment effect A
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real tAfix;                        // projected treatment effect A
  real tBfix;                        // projected treatment effect B
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,i+1) * tAfix / sqrt(2) 
                    + pow(-1,j+1) * tBfix / sqrt(2);
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gA | 1, hA);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tAfix | 0, sqrt(gA));
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.omitAB.fixed <- stan_model(model_code=model.omitAB.fixed)
stanfit.omitAB.fixed <- sampling(stanmodel.omitAB.fixed, data=datalist.fixed[-6],
                                 iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.omitAB.fixed <- bridge_sampler(stanfit.omitAB.fixed, silent=T)
bf(M.omitAB.fixed, M.full.fixed)
## Estimated Bayes factor in favor of M.omitAB.fixed over M.full.fixed: 2.02006

   

  • Proposing a New (Omitted Fixed A) Model

 

New Method 2, which violates the principle of marginality.

model.omitA.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hB;       // (square root) scale parameter of the variance of the projected treatment effect B
  real<lower=0> hAB;      // (square root) scale parameter of the variance of the projected interaction effect AB
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real<lower=0> gAB;                 // variance of the projected interaction effect AB
  real tBfix;                        // projected treatment effect B
  real tABfix;                       // projected interaction effect AB
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,j+1) * tBfix / sqrt(2)
                    + pow(-1,i+j) * tABfix / 2;
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  target += normal_lpdf(tABfix | 0, sqrt(gAB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.omitA.fixed <- stan_model(model_code=model.omitA.fixed)
stanfit.omitA.fixed <- sampling(stanmodel.omitA.fixed, data=datalist.fixed[-4],
                                iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.omitA.fixed <- bridge_sampler(stanfit.omitA.fixed, silent=T)
bf(M.omitA.fixed, M.full.fixed)
## Estimated Bayes factor in favor of M.omitA.fixed over M.full.fixed: 0.22845

   

New Method 4, which follows the principle of marginality.

model.notA.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hB;       // (square root) scale parameter of the variance of the projected treatment effect B
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real tBfix;                        // projected treatment effect B
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,j+1) * tBfix / sqrt(2);
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.notA.fixed <- stan_model(model_code=model.notA.fixed)
stanfit.notA.fixed <- sampling(stanmodel.notA.fixed, data=datalist.fixed[-c(4,6)],
                               iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.notA.fixed <- bridge_sampler(stanfit.notA.fixed, silent=T)
bf(M.notA.fixed, M.omitAB.fixed)
## Estimated Bayes factor in favor of M.notA.fixed over M.omitAB.fixed: 0.20387

   

  • Proposing a New (Omitted Fixed B) Model

 

New Method 2, which violates the principle of marginality.

datalist.fixed[["Y"]] <- dat[,c(1,3,2,4)]
stanfit.omitB.fixed <- sampling(stanmodel.omitA.fixed, data=datalist.fixed[-4],
                                iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.omitB.fixed <- bridge_sampler(stanfit.omitB.fixed, silent=T)
bf(M.omitB.fixed, M.full.fixed)
## Estimated Bayes factor in favor of M.omitB.fixed over M.full.fixed: 0.05270

   

New Method 4, which follows the principle of marginality.

stanfit.notB.fixed <- sampling(stanmodel.notA.fixed, data=datalist.fixed[-c(4,6)],
                               iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.notB.fixed <- bridge_sampler(stanfit.notB.fixed, silent=T)
bf(M.notB.fixed, M.omitAB.fixed)
## Estimated Bayes factor in favor of M.notB.fixed over M.omitAB.fixed: 0.04318

   

summary(M.full.fixed)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -120.6754
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.000100883
##  Coefficient of Variation: 0.01004405
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
summary(M.omitAB.fixed)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -119.9723
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 7.695205e-05
##  Coefficient of Variation: 0.008772232
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
summary(M.omitA.fixed)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -122.1518
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 9.712311e-05
##  Coefficient of Variation: 0.009855106
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
summary(M.notA.fixed)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -121.5626
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 7.089621e-05
##  Coefficient of Variation: 0.008419988
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
summary(M.omitB.fixed)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -123.6185
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 0.0001089034
##  Coefficient of Variation: 0.01043568
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
summary(M.notB.fixed)
## 
## Bridge sampling log marginal likelihood estimate 
## (method = "normal", repetitions = 1):
## 
##  -123.1148
## 
## Error Measures:
## 
##  Relative Mean-Squared Error: 7.976624e-05
##  Coefficient of Variation: 0.008931195
##  Percentage Error: 1%
## 
## Note:
## All error measures are approximate.
rstan::summary(stanfit.full.fixed)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.1410 0.0066 0.6573 43.8873 44.7138 45.1274 45.5521 46.4863 10069.083 0.9999
Sigma[1,1] 4.8175 0.0310 2.2598 2.1424 3.3205 4.3041 5.6724 10.6146 5324.422 1.0002
Sigma[1,2] 3.3160 0.0323 2.1012 0.6603 1.9683 2.8724 4.1485 8.6207 4237.071 1.0004
Sigma[1,3] 4.5737 0.0381 2.5216 1.5344 2.9152 4.0134 5.5447 11.0895 4386.016 1.0003
Sigma[1,4] 4.5458 0.0379 2.5423 1.5079 2.8845 3.9672 5.5429 10.8350 4494.758 1.0002
Sigma[2,1] 3.3160 0.0323 2.1012 0.6603 1.9683 2.8724 4.1485 8.6207 4237.071 1.0004
Sigma[2,2] 6.0850 0.0448 2.8813 2.6883 4.1754 5.4303 7.1899 13.4502 4139.055 1.0004
Sigma[2,3] 5.8791 0.0457 2.9870 2.3054 3.9062 5.2010 7.0366 13.5036 4273.050 1.0004
Sigma[2,4] 4.8313 0.0456 2.8576 1.4185 2.9891 4.1768 5.9232 11.9332 3919.453 1.0003
Sigma[3,1] 4.5737 0.0381 2.5216 1.5344 2.9152 4.0134 5.5447 11.0895 4386.016 1.0003
Sigma[3,2] 5.8791 0.0457 2.9870 2.3054 3.9062 5.2010 7.0366 13.5036 4273.050 1.0004
Sigma[3,3] 7.6766 0.0533 3.6220 3.4354 5.2813 6.8311 9.0757 16.9747 4615.646 1.0002
Sigma[3,4] 5.3402 0.0490 3.1402 1.5312 3.2917 4.6579 6.5664 13.2086 4107.469 1.0002
Sigma[4,1] 4.5458 0.0379 2.5423 1.5079 2.8845 3.9672 5.5429 10.8350 4494.758 1.0002
Sigma[4,2] 4.8313 0.0456 2.8576 1.4185 2.9891 4.1768 5.9232 11.9332 3919.453 1.0003
Sigma[4,3] 5.3402 0.0490 3.1402 1.5312 3.2917 4.6579 6.5664 13.2086 4107.469 1.0002
Sigma[4,4] 7.6504 0.0557 3.7574 3.3936 5.2171 6.7628 9.0579 17.0939 4548.152 1.0002
gA 2.8916 0.3375 35.9392 0.0604 0.1974 0.4223 1.0722 13.9539 11342.369 1.0000
gB 4.0769 0.8844 91.9400 0.0706 0.2208 0.4601 1.1212 12.1348 10806.827 1.0001
gAB 1.7941 0.2444 25.6529 0.0404 0.1193 0.2487 0.6255 7.0619 11019.405 1.0001
tAfix 0.5653 0.0023 0.2588 0.0415 0.3983 0.5700 0.7395 1.0603 12800.717 1.0000
tBfix 0.6122 0.0019 0.2003 0.2012 0.4837 0.6158 0.7451 0.9989 10624.291 1.0001
tABfix 0.0125 0.0031 0.3480 -0.6799 -0.2021 0.0093 0.2242 0.7221 12465.659 1.0002
muS[1,1] 45.9799 0.0055 0.6267 44.7499 45.5767 45.9780 46.3765 47.2366 12798.153 0.9999
muS[1,2] 45.1016 0.0077 0.7742 43.6056 44.5995 45.0884 45.5922 46.6756 10011.810 0.9999
muS[2,1] 45.1679 0.0065 0.6745 43.8756 44.7314 45.1557 45.5857 46.5622 10802.743 0.9999
muS[2,2] 44.3146 0.0081 0.7853 42.8270 43.8005 44.2927 44.8068 45.9254 9343.367 1.0000
lp__ -125.5465 0.0501 3.5217 -133.7251 -127.5970 -125.1267 -123.0216 -119.9153 4947.860 1.0002
rstan::summary(stanfit.omitAB.fixed)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.1288 0.0056 0.6310 43.9076 44.7260 45.1173 45.5235 46.4109 12706.778 1.0000
Sigma[1,1] 4.7535 0.0276 2.1826 2.1502 3.2902 4.2447 5.6117 10.3240 6245.961 1.0004
Sigma[1,2] 3.2675 0.0291 2.0186 0.6904 1.9555 2.8381 4.0762 8.3975 4813.247 1.0002
Sigma[1,3] 4.5275 0.0341 2.4277 1.5505 2.9073 3.9716 5.5158 10.7159 5082.729 1.0003
Sigma[1,4] 4.4695 0.0330 2.3902 1.5063 2.8792 3.9482 5.4250 10.4899 5258.063 1.0002
Sigma[2,1] 3.2675 0.0291 2.0186 0.6904 1.9555 2.8381 4.0762 8.3975 4813.247 1.0002
Sigma[2,2] 5.9709 0.0375 2.8097 2.6777 4.0989 5.3108 7.0432 13.1039 5610.344 0.9999
Sigma[2,3] 5.7795 0.0406 2.9054 2.3213 3.8242 5.1076 6.9247 13.1964 5125.286 0.9999
Sigma[2,4] 4.7869 0.0381 2.7172 1.4665 2.9982 4.1932 5.8477 11.8310 5095.896 0.9999
Sigma[3,1] 4.5275 0.0341 2.4277 1.5505 2.9073 3.9716 5.5158 10.7159 5082.729 1.0003
Sigma[3,2] 5.7795 0.0406 2.9054 2.3213 3.8242 5.1076 6.9247 13.1964 5125.286 0.9999
Sigma[3,3] 7.5745 0.0472 3.4951 3.3931 5.2114 6.7666 8.9883 16.4423 5477.866 1.0000
Sigma[3,4] 5.3043 0.0431 3.0092 1.6048 3.3149 4.6214 6.5059 13.0231 4875.053 1.0000
Sigma[4,1] 4.4695 0.0330 2.3902 1.5063 2.8792 3.9482 5.4250 10.4899 5258.063 1.0002
Sigma[4,2] 4.7869 0.0381 2.7172 1.4665 2.9982 4.1932 5.8477 11.8310 5095.896 0.9999
Sigma[4,3] 5.3043 0.0431 3.0092 1.6048 3.3149 4.6214 6.5059 13.0231 4875.053 1.0000
Sigma[4,4] 7.5458 0.0464 3.4975 3.3762 5.1968 6.7754 8.9018 16.7145 5687.877 1.0000
gA 1.9776 0.1310 13.0721 0.0610 0.1999 0.4289 1.0795 11.0098 9958.970 0.9999
gB 7.4943 4.0782 318.5492 0.0711 0.2195 0.4593 1.1633 13.3140 6101.207 1.0002
tAfix 0.5697 0.0021 0.2534 0.0624 0.4014 0.5726 0.7391 1.0598 14279.938 0.9999
tBfix 0.6175 0.0017 0.1966 0.2087 0.4960 0.6233 0.7472 0.9880 12997.034 0.9999
muS[1,1] 45.9683 0.0048 0.5978 44.7820 45.5883 45.9672 46.3488 47.1514 15587.680 1.0000
muS[1,2] 45.0950 0.0062 0.7118 43.7061 44.6366 45.0875 45.5423 46.5277 13097.237 1.0000
muS[2,1] 45.1627 0.0053 0.6039 43.9963 44.7712 45.1518 45.5365 46.4054 13126.765 0.9999
muS[2,2] 44.2894 0.0071 0.7547 42.8567 43.7916 44.2748 44.7528 45.8650 11327.051 0.9999
lp__ -122.8226 0.0464 3.2932 -130.3131 -124.7879 -122.4356 -120.4366 -117.5032 5029.982 1.0001
rstan::summary(stanfit.omitA.fixed)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.2574 0.0088 0.7890 43.7144 44.7448 45.2583 45.7675 46.8315 8097.684 1.0003
Sigma[1,1] 5.0967 0.0373 2.7164 2.2099 3.4221 4.4604 5.9700 11.8838 5297.578 1.0002
Sigma[1,2] 3.2790 0.0357 2.3763 0.3943 1.8320 2.7973 4.1393 8.9494 4424.611 1.0002
Sigma[1,3] 4.7790 0.0440 2.9268 1.5613 2.9810 4.1119 5.7603 11.9380 4421.942 1.0002
Sigma[1,4] 4.4093 0.0416 2.8492 1.1198 2.6466 3.7992 5.4136 11.4270 4682.629 1.0001
Sigma[2,1] 3.2790 0.0357 2.3763 0.3943 1.8320 2.7973 4.1393 8.9494 4424.611 1.0002
Sigma[2,2] 6.5276 0.0448 3.2724 2.8081 4.3685 5.7434 7.7568 14.6226 5328.418 1.0000
Sigma[2,3] 6.0337 0.0479 3.3132 2.2552 3.9003 5.2501 7.2361 14.2871 4782.193 1.0001
Sigma[2,4] 5.4248 0.0480 3.3588 1.5510 3.2790 4.6042 6.6785 13.7811 4889.910 0.9999
Sigma[3,1] 4.7790 0.0440 2.9268 1.5613 2.9810 4.1119 5.7603 11.9380 4421.942 1.0002
Sigma[3,2] 6.0337 0.0479 3.3132 2.2552 3.9003 5.2501 7.2361 14.2871 4782.193 1.0001
Sigma[3,3] 7.9419 0.0578 3.9988 3.5007 5.3899 6.9871 9.3427 18.0964 4791.667 1.0002
Sigma[3,4] 5.4599 0.0527 3.5518 1.2583 3.2257 4.6413 6.8036 14.2304 4539.763 1.0000
Sigma[4,1] 4.4093 0.0416 2.8492 1.1198 2.6466 3.7992 5.4136 11.4270 4682.629 1.0001
Sigma[4,2] 5.4248 0.0480 3.3588 1.5510 3.2790 4.6042 6.6785 13.7811 4889.910 0.9999
Sigma[4,3] 5.4599 0.0527 3.5518 1.2583 3.2257 4.6413 6.8036 14.2304 4539.763 1.0000
Sigma[4,4] 8.5294 0.0603 4.4231 3.5867 5.6375 7.4556 10.1841 19.6041 5375.406 1.0000
gB 5.2267 2.3155 159.5574 0.0569 0.1714 0.3591 0.9125 11.2414 4748.546 1.0004
gAB 2.3275 0.5215 43.5371 0.0414 0.1258 0.2678 0.7044 8.6694 6968.773 1.0002
tBfix 0.4843 0.0022 0.2268 0.0341 0.3360 0.4853 0.6331 0.9252 10436.991 1.0000
tABfix 0.0427 0.0040 0.4077 -0.7677 -0.1998 0.0361 0.2802 0.8791 10202.503 1.0001
muS[1,1] 45.6213 0.0076 0.7311 44.1875 45.1503 45.6201 46.0891 47.0828 9278.057 1.0003
muS[1,2] 44.8936 0.0104 0.9142 43.1020 44.3013 44.8881 45.4883 46.7200 7734.594 1.0002
muS[2,1] 45.5786 0.0085 0.7805 44.0218 45.0782 45.5756 46.0772 47.1339 8469.740 1.0002
muS[2,2] 44.9363 0.0097 0.8829 43.2282 44.3543 44.9318 45.5077 46.7092 8205.360 1.0002
lp__ -126.2423 0.0451 3.2857 -133.7117 -128.1665 -125.8452 -123.8817 -120.9666 5296.880 1.0003
rstan::summary(stanfit.notA.fixed)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 45.2631 0.0083 0.7595 43.7774 44.7791 45.2613 45.7484 46.7804 8440.268 1.0000
Sigma[1,1] 4.9768 0.0281 2.3213 2.1994 3.4176 4.4300 5.8861 11.1013 6832.141 1.0001
Sigma[1,2] 3.1720 0.0301 2.0467 0.4081 1.8434 2.7632 4.0606 8.1823 4612.807 1.0000
Sigma[1,3] 4.6545 0.0351 2.5014 1.5594 2.9999 4.0865 5.6746 10.9684 5078.968 1.0001
Sigma[1,4] 4.2782 0.0340 2.4747 1.0672 2.6298 3.7691 5.3409 10.4932 5302.071 1.0000
Sigma[2,1] 3.1720 0.0301 2.0467 0.4081 1.8434 2.7632 4.0606 8.1823 4612.807 1.0000
Sigma[2,2] 6.3554 0.0417 3.0019 2.7985 4.3500 5.6584 7.5627 13.8692 5188.271 1.0000
Sigma[2,3] 5.8288 0.0435 2.9582 2.2005 3.8696 5.1515 7.0470 13.1932 4626.576 1.0001
Sigma[2,4] 5.3538 0.0454 3.0907 1.6270 3.3340 4.6614 6.5911 12.9858 4637.742 0.9999
Sigma[3,1] 4.6545 0.0351 2.5014 1.5594 2.9999 4.0865 5.6746 10.9684 5078.968 1.0001
Sigma[3,2] 5.8288 0.0435 2.9582 2.2005 3.8696 5.1515 7.0470 13.1932 4626.576 1.0001
Sigma[3,3] 7.6998 0.0491 3.5216 3.4423 5.3200 6.8897 9.1657 16.5559 5149.284 1.0003
Sigma[3,4] 5.3865 0.0469 3.1790 1.3866 3.2943 4.7033 6.6972 13.2208 4601.711 1.0001
Sigma[4,1] 4.2782 0.0340 2.4747 1.0672 2.6298 3.7691 5.3409 10.4932 5302.071 1.0000
Sigma[4,2] 5.3538 0.0454 3.0907 1.6270 3.3340 4.6614 6.5911 12.9858 4637.742 0.9999
Sigma[4,3] 5.3865 0.0469 3.1790 1.3866 3.2943 4.7033 6.6972 13.2208 4601.711 1.0001
Sigma[4,4] 8.3854 0.0564 4.1380 3.5225 5.6295 7.4075 9.9652 18.7306 5373.661 1.0000
gB 2.1272 0.2398 20.2222 0.0560 0.1731 0.3672 0.9155 10.5541 7114.122 0.9999
tBfix 0.4889 0.0024 0.2232 0.0463 0.3442 0.4899 0.6344 0.9308 8793.730 0.9999
muS[1,1] 45.6088 0.0072 0.6952 44.2404 45.1654 45.6074 46.0601 46.9993 9224.097 1.0000
muS[1,2] 44.9174 0.0095 0.8488 43.2532 44.3671 44.9199 45.4542 46.6265 8010.281 1.0000
muS[2,1] 45.6088 0.0072 0.6952 44.2404 45.1654 45.6074 46.0601 46.9993 9224.097 1.0000
muS[2,2] 44.9174 0.0095 0.8488 43.2532 44.3671 44.9199 45.4542 46.6265 8010.281 1.0000
lp__ -123.5102 0.0416 2.9945 -130.2652 -125.2681 -123.1251 -121.3640 -118.7708 5193.325 1.0000
rstan::summary(stanfit.omitB.fixed)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 46.1062 0.0088 0.7731 44.5893 45.6032 46.1061 46.6004 47.6667 7780.766 1.0001
Sigma[1,1] 5.2290 0.0336 2.6150 2.2209 3.4783 4.5763 6.2281 12.0991 6064.148 1.0001
Sigma[1,2] 5.2777 0.0465 3.2434 1.4737 3.1368 4.4894 6.5087 13.6936 4857.261 1.0002
Sigma[1,3] 3.8194 0.0368 2.5816 0.6714 2.1428 3.2045 4.8417 10.3749 4910.171 1.0001
Sigma[1,4] 5.4374 0.0501 3.5478 1.1610 3.0602 4.6078 6.8489 14.7496 5016.767 1.0002
Sigma[2,1] 5.2777 0.0465 3.2434 1.4737 3.1368 4.4894 6.5087 13.6936 4857.261 1.0002
Sigma[2,2] 9.5040 0.0693 4.9585 3.8259 6.2035 8.2803 11.3695 22.5012 5124.308 1.0004
Sigma[2,3] 7.1284 0.0568 3.9693 2.5876 4.4935 6.1298 8.6193 17.3956 4875.406 1.0006
Sigma[2,4] 7.8080 0.0708 4.8591 2.1229 4.5696 6.6143 9.6856 20.5381 4715.297 1.0004
Sigma[3,1] 3.8194 0.0368 2.5816 0.6714 2.1428 3.2045 4.8417 10.3749 4910.171 1.0001
Sigma[3,2] 7.1284 0.0568 3.9693 2.5876 4.4935 6.1298 8.6193 17.3956 4875.406 1.0006
Sigma[3,3] 6.9790 0.0498 3.5900 2.9043 4.5905 6.0472 8.3265 16.3621 5192.297 1.0005
Sigma[3,4] 6.4968 0.0615 4.2250 1.5595 3.7277 5.4386 8.1066 17.6446 4719.221 1.0005
Sigma[4,1] 5.4374 0.0501 3.5478 1.1610 3.0602 4.6078 6.8489 14.7496 5016.767 1.0002
Sigma[4,2] 7.8080 0.0708 4.8591 2.1229 4.5696 6.6143 9.6856 20.5381 4715.297 1.0004
Sigma[4,3] 6.4968 0.0615 4.2250 1.5595 3.7277 5.4386 8.1066 17.6446 4719.221 1.0005
Sigma[4,4] 11.1849 0.0816 5.9556 4.2681 7.1782 9.7222 13.4648 26.6484 5323.830 1.0004
gB 4.4513 2.4654 167.6356 0.0441 0.1378 0.2930 0.7437 9.5626 4623.444 1.0003
gAB 2.3678 0.6136 63.9804 0.0428 0.1270 0.2742 0.7240 9.1832 10872.592 1.0001
tBfix 0.3249 0.0030 0.3013 -0.2363 0.1240 0.3137 0.5150 0.9431 9766.838 1.0006
tABfix 0.0852 0.0045 0.4368 -0.7774 -0.1792 0.0682 0.3365 1.0129 9379.698 1.0002
muS[1,1] 46.3785 0.0092 0.8166 44.7770 45.8425 46.3748 46.9080 48.0045 7944.362 1.0000
muS[1,2] 45.8339 0.0095 0.8457 44.1599 45.2917 45.8325 46.3749 47.5382 7930.742 1.0003
muS[2,1] 46.2933 0.0098 0.8683 44.5608 45.7307 46.2934 46.8586 48.0313 7821.221 1.0000
muS[2,2] 45.9191 0.0087 0.7919 44.3758 45.4059 45.9150 46.4256 47.5064 8205.253 1.0002
lp__ -128.5782 0.0456 3.2176 -135.8299 -130.5182 -128.2114 -126.2480 -123.3809 4984.355 1.0001
rstan::summary(stanfit.notB.fixed)[[1]] %>% kable(digits=4) %>% kable_classic(full_width=F)
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
mu 46.1257 0.0097 0.7643 44.6130 45.6387 46.1245 46.6091 47.6576 6215.759 1.0004
Sigma[1,1] 5.1773 0.0352 2.6005 2.1911 3.4632 4.5443 6.1162 12.0951 5442.973 1.0004
Sigma[1,2] 5.2915 0.0511 3.2543 1.5004 3.1380 4.5022 6.5149 13.9723 4060.900 1.0009
Sigma[1,3] 3.8559 0.0398 2.6066 0.7433 2.1519 3.2344 4.8255 10.7380 4297.415 1.0011
Sigma[1,4] 5.3640 0.0544 3.5411 1.1029 3.0499 4.5144 6.6930 14.8780 4237.813 1.0007
Sigma[2,1] 5.2915 0.0511 3.2543 1.5004 3.1380 4.5022 6.5149 13.9723 4060.900 1.0009
Sigma[2,2] 9.5314 0.0797 4.8733 3.8600 6.2616 8.3284 11.4463 22.2564 3741.184 1.0008
Sigma[2,3] 7.1402 0.0643 3.8981 2.5737 4.5070 6.1943 8.6825 17.3085 3675.454 1.0009
Sigma[2,4] 7.9000 0.0802 4.8410 2.1882 4.6242 6.7029 9.8147 20.7525 3646.747 1.0007
Sigma[3,1] 3.8559 0.0398 2.6066 0.7433 2.1519 3.2344 4.8255 10.7380 4297.415 1.0011
Sigma[3,2] 7.1402 0.0643 3.8981 2.5737 4.5070 6.1943 8.6825 17.3085 3675.454 1.0009
Sigma[3,3] 6.9664 0.0537 3.5036 2.8656 4.6142 6.1211 8.3340 16.0749 4252.640 1.0008
Sigma[3,4] 6.5962 0.0674 4.1701 1.6859 3.8156 5.5710 8.1876 17.6799 3832.836 1.0010
Sigma[4,1] 5.3640 0.0544 3.5411 1.1029 3.0499 4.5144 6.6930 14.8780 4237.813 1.0007
Sigma[4,2] 7.9000 0.0802 4.8410 2.1882 4.6242 6.7029 9.8147 20.7525 3646.747 1.0007
Sigma[4,3] 6.5962 0.0674 4.1701 1.6859 3.8156 5.5710 8.1876 17.6799 3832.836 1.0010
Sigma[4,4] 11.0775 0.0867 5.8380 4.2290 7.0950 9.6181 13.3784 26.2842 4530.283 1.0005
gB 1.6384 0.1625 17.8051 0.0440 0.1352 0.2872 0.7285 8.3503 12003.485 1.0002
tBfix 0.3278 0.0030 0.2984 -0.2421 0.1299 0.3209 0.5207 0.9284 9892.924 1.0000
muS[1,1] 46.3575 0.0101 0.8038 44.7457 45.8464 46.3571 46.8721 47.9612 6329.851 1.0003
muS[1,2] 45.8939 0.0097 0.7818 44.3713 45.3858 45.8902 46.3866 47.4778 6477.936 1.0004
muS[2,1] 46.3575 0.0101 0.8038 44.7457 45.8464 46.3571 46.8721 47.9612 6329.851 1.0003
muS[2,2] 45.8939 0.0097 0.7818 44.3713 45.3858 45.8902 46.3866 47.4778 6477.936 1.0004
lp__ -125.8371 0.0396 2.9811 -132.6273 -127.6327 -125.4346 -123.6928 -121.1686 5662.348 1.0002

   

6. Summary

 

\[\begin{align*} \mathcal{M}_{\mathrm{full}}:&\ Y_{ijk}=\mu+s_k+\alpha_i+\beta_j+(\alpha\beta)_{ij}+\epsilon_{ijk}\tag{1}\\ \mathcal{M}_{\mathrm{full}}^{'}:&\ Y_{ijk}=\mu+s_k+\alpha_i+\beta_j+(\alpha\beta)_{ij}+(\alpha s)_{ik}+(\beta s)_{jk}+\epsilon_{ijk}\tag{2}\\ \mathcal{M}_{\mathrm{full}}^{*}:&\ Y_{ijk}=\mu+\alpha_i+\beta_j+(\alpha\beta)_{ij}+\epsilon_{ijk}^{*}\tag{3} \end{align*}\]

 


Method

Classical

JASP

1

2

3

4
New
2
New
4
fixed effect AB 1 2.66 2.78 2.66 2.66 2.66 2.03 2.03
fixed effect A 0.019 0.55 0.25 0.53 1.26 0.55 0.23 0.20
fixed effect B 0.0033 0.46 0.28 0.45 2.99 0.46 0.05 0.04
random effect AB NA 5.75 6.42 6.42 6.42 3.92 3.92
random effect A NA 1.60 1.85 1.25 0.67 1.53 0.21
random effect B NA 1.60 1.84 3.03 0.55 1.48 0.04

Note: Factor A is shape. Factor B is color. All the remaining columns are Bayes factors (for the reduced model against the full [or the additive] model) except for the first column where the values are p-values. Stan results will be reproducible only if several configurations are identical, such as computer hardware and the C++ compiler (Stan Development Team, 2023, chap. 22). Therefore, the Bayes factor values may be slightly different when knitting this R markdown on different operating systems and ‘rstan’ versions.

 

  • Classical: Repeated-measures ANOVA

  • JASP: The reduced model excludes each effect term and the interaction in (2). Version 0.16.3 or later seems to match Method 4.

  • Method 1: Compare the model without each effect to the full model in (1). BayesFactor version 0.9.12-4.6.

  • Method 2: Include the random slopes in (2) and test each effect again. The reduced model only omits each effect term in (2).

  • Method 3: The reduced model excludes each effect term, the interaction, and the random slope for such an effect in (2).

  • Method 4: The reduced model excludes each effect term and the interaction in (2).

  • New Method 2: The full model in (3) has no explicit terms for the subject-specific random effects. The reduced model only omits each effect term in (3).

  • New Method 4: The reduced model excludes each effect term and the interaction in (3).

 

JASP Results
JASP Results

Transfer to (type-II) ‘top’ from ‘withmain’: 2.66≈3.600/1.351, 0.55≈1.962/3.600, 0.46≈1.656/3.600

 

\(\mathit{BF}_\mathcal{M}=\frac{p(\mathcal{M}\ \mid\ \mathrm{data})\ /\ (1-p(\mathcal{M}\ \mid\ \mathrm{data}))}{p(\mathcal{M})\ /\ (1-p(\mathcal{M}))}\)

 

\(\mathit{BF}_{10}=\frac{p(\mathcal{M}_1\ \mid\ \mathrm{data})\ /\ p(\mathcal{M}_1)}{p(\mathcal{M}_0\ \mid\ \mathrm{data})\ /\ p(\mathcal{M}_0)}\)

   

All-in-One R Script

 

library(rstan)
# devtools::install_github("quentingronau/bridgesampling@master")
library(bridgesampling)
library(BayesFactor)

data(puzzles)
dat <- matrix(puzzles$RT, ncol=4,
              dimnames=list(paste0("s",1:12),
                            c("round&mono","square&mono","round&color","square&color"))) #wide data format


set.seed(277)
(JZS_BF <- anovaBF(RT ~ shape * color + ID, data=puzzles, whichRandom="ID", whichModels="top", progress=F))


# randomEffects <- c("ID","shape","color")
randomEffects <- c("ID")


## Method 1
set.seed(277)
full <-   lmBF(RT~shape+color+ID+shape:color                  , puzzles, whichRandom=randomEffects, progress=F)

set.seed(277)
omitAB <- lmBF(RT~shape+color+ID                              , puzzles, whichRandom=randomEffects, progress=F)
omitAB / full

set.seed(277)
omitA <-  lmBF(RT~      color+ID+shape:color                  , puzzles, whichRandom=randomEffects, progress=F)
omitA / full

set.seed(277)
omitB <-  lmBF(RT~shape+      ID+shape:color                  , puzzles, whichRandom=randomEffects, progress=F)
omitB / full


## Method 2
set.seed(277)
full <-   lmBF(RT~shape+color+ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)

set.seed(277)
omitAB <- lmBF(RT~shape+color+ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitAB / full

set.seed(277)
omitA <-  lmBF(RT~      color+ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitA / full

set.seed(277)
omitB <-  lmBF(RT~shape+      ID+shape:color+shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
omitB / full


## Method 3
set.seed(277)
notA <-   lmBF(RT~      color+ID+                     color:ID, puzzles, whichRandom=randomEffects, progress=F)
notA / omitAB

set.seed(277)
notB <-   lmBF(RT~shape+      ID+            shape:ID         , puzzles, whichRandom=randomEffects, progress=F)
notB / omitAB


## Method 4
set.seed(277)
exclA <-  lmBF(RT~      color+ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
exclA / omitAB

set.seed(277)
exclB <-  lmBF(RT~shape+      ID+            shape:ID+color:ID, puzzles, whichRandom=randomEffects, progress=F)
exclB / omitAB


# #DATASET: https://www.kaggle.com/datasets/sherloconan/alternative-covariance-structure-of-the-error-term?select=UpDown3.txt
# df <- read.table("~/Documents/UVic/Project Meeting/paradox/UpDown3.txt",header=T,stringsAsFactors=T)
# dat <- matrix(df$RT,ncol=4,byrow=T,
#               dimnames=list(paste0("s",1:31),
#                             c("onehand&A","twohands&A","onehand&M","twohands&M"))) #factor A: resp; factor B: align


{
  model.full <- "
  data {
    int<lower=1> nS;        // number of subjects
    int<lower=2> nA;        // number of levels of the treatment effect A
    int<lower=2> nB;        // number of levels of the treatment effect B
    // vector[nA*nB] Y[nS];        // responses [removed features]
    array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
    real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
    real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
    real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
    matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
    real lwr;               // lower bound for the grand mean
    real upr;               // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    cov_matrix[nA*nB] Sigma;           // error covariance matrix
    real<lower=0> gA;                  // variance of the treatment effect A
    real<lower=0> gB;                  // variance of the treatment effect B
    real<lower=0> gAB;                 // variance of the interaction effect AB
    vector[nA] tA;                     // treatment effect A
    vector[nB] tB;                     // treatment effect B
    matrix[nA,nB] tAB;                 // interaction effect AB
  }

  transformed parameters {
    matrix[nA,nB] muS;     // condition means
    for (i in 1:nA) {
      for (j in 1:nB) {
        muS[i,j] = mu + tA[i] + tB[j] + tAB[i,j];
      }
    }
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += scaled_inv_chi_square_lpdf(gA | 1, hA);
    target += scaled_inv_chi_square_lpdf(gB | 1, hB);
    target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
    target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
    target += normal_lpdf(tA | 0, sqrt(gA));
    target += normal_lpdf(tB | 0, sqrt(gB));
    target += normal_lpdf(to_vector(tAB) | 0, sqrt(gAB));
    for (k in 1:nS) {
      target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
    }
  }
"
  stanmodel.full <- stan_model(model_code=model.full)
  
  model.omitAB <- "
  data {
    int<lower=1> nS;        // number of subjects
    int<lower=2> nA;        // number of levels of the treatment effect A
    int<lower=2> nB;        // number of levels of the treatment effect B
    // vector[nA*nB] Y[nS];        // responses [removed features]
    array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
    real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
    real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
    matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
    real lwr;               // lower bound for the grand mean
    real upr;               // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    cov_matrix[nA*nB] Sigma;           // error covariance matrix
    real<lower=0> gA;                  // variance of the treatment effect A
    real<lower=0> gB;                  // variance of the treatment effect B
    vector[nA] tA;                     // treatment effect A
    vector[nB] tB;                     // treatment effect B
  }

  transformed parameters {
    matrix[nA,nB] muS;     // condition means
    for (i in 1:nA) {
      for (j in 1:nB) {
        muS[i,j] = mu + tA[i] + tB[j];
      }
    }
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += scaled_inv_chi_square_lpdf(gA | 1, hA);
    target += scaled_inv_chi_square_lpdf(gB | 1, hB);
    target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
    target += normal_lpdf(tA | 0, sqrt(gA));
    target += normal_lpdf(tB | 0, sqrt(gB));
    for (k in 1:nS) {
      target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
    }
  }
"
  stanmodel.omitAB <- stan_model(model_code=model.omitAB)
  
  model.omitA <- "
  data {
    int<lower=1> nS;        // number of subjects
    int<lower=2> nA;        // number of levels of the treatment effect A
    int<lower=2> nB;        // number of levels of the treatment effect B
    // vector[nA*nB] Y[nS];        // responses [removed features]
    array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
    real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
    real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
    matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
    real lwr;               // lower bound for the grand mean
    real upr;               // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    cov_matrix[nA*nB] Sigma;           // error covariance matrix
    real<lower=0> gB;                  // variance of the treatment effect B
    real<lower=0> gAB;                 // variance of the interaction effect AB
    vector[nB] tB;                     // treatment effect B
    matrix[nA,nB] tAB;                 // interaction effect AB
  }

  transformed parameters {
    matrix[nA,nB] muS;     // condition means
    for (i in 1:nA) {
      for (j in 1:nB) {
        muS[i,j] = mu + tB[j] + tAB[i,j];
      }
    }
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += scaled_inv_chi_square_lpdf(gB | 1, hB);
    target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
    target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
    target += normal_lpdf(tB | 0, sqrt(gB));
    target += normal_lpdf(to_vector(tAB) | 0, sqrt(gAB));
    for (k in 1:nS) {
      target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
    }
  }
"
  stanmodel.omitA <- stan_model(model_code=model.omitA)
  
  model.notA <- "
  data {
    int<lower=1> nS;        // number of subjects
    int<lower=2> nA;        // number of levels of the treatment effect A
    int<lower=2> nB;        // number of levels of the treatment effect B
    // vector[nA*nB] Y[nS];        // responses [removed features]
    array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
    real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
    matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
    real lwr;               // lower bound for the grand mean
    real upr;               // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    cov_matrix[nA*nB] Sigma;           // error covariance matrix
    real<lower=0> gB;                  // variance of the treatment effect B
    vector[nB] tB;                     // treatment effect B
  }

  transformed parameters {
    matrix[nA,nB] muS;     // condition means
    for (i in 1:nA) {
      for (j in 1:nB) {
        muS[i,j] = mu + tB[j];
      }
    }
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += scaled_inv_chi_square_lpdf(gB | 1, hB);
    target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
    target += normal_lpdf(tB | 0, sqrt(gB));
    for (k in 1:nS) {
      target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
    }
  }
"
  stanmodel.notA <- stan_model(model_code=model.notA)
  
  model.omitB <- "
  data {
    int<lower=1> nS;        // number of subjects
    int<lower=2> nA;        // number of levels of the treatment effect A
    int<lower=2> nB;        // number of levels of the treatment effect B
    // vector[nA*nB] Y[nS];        // responses [removed features]
    array[nS] vector[nA*nB] Y;  // responses [Stan 2.26+ syntax for array declarations]
    real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
    real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
    matrix[nA*nB,nA*nB] I;  // scale matrix of the inverse-Wishart prior
    real lwr;               // lower bound for the grand mean
    real upr;               // upper bound for the grand mean
  }

  parameters {
    real<lower=lwr, upper=upr> mu;     // grand mean
    cov_matrix[nA*nB] Sigma;           // error covariance matrix
    real<lower=0> gA;                  // variance of the treatment effect A
    real<lower=0> gAB;                 // variance of the interaction effect AB
    vector[nB] tA;                     // treatment effect A
    matrix[nA,nB] tAB;                 // interaction effect AB
  }

  transformed parameters {
    matrix[nA,nB] muS;     // condition means
    for (i in 1:nA) {
      for (j in 1:nB) {
        muS[i,j] = mu + tA[i] + tAB[i,j];
      }
    }
  }

  model {
    target += uniform_lpdf(mu | lwr, upr);
    target += scaled_inv_chi_square_lpdf(gA | 1, hA);
    target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
    target += inv_wishart_lpdf(Sigma | nA*nB+1, I);
    target += normal_lpdf(tA | 0, sqrt(gA));
    target += normal_lpdf(to_vector(tAB) | 0, sqrt(gAB));
    for (k in 1:nS) {
      target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
    }
  }
"
  stanmodel.omitB <- stan_model(model_code=model.omitB)
} #Stan models


BFBS <- function(seed=277, omit="AB", diagnostics=F, n.iter=10000, n.burnin=1000,
                 hA=1, hB=1, hAB=1, data=dat) {
  
  datalist <- list("nA"=2, "nB"=2, "nS"=nrow(data), "I"=diag(4), "Y"=data,
                   "hA"=hA, "hB"=hB, "hAB"=hAB,
                   "lwr"=mean(data)-6*sd(data), "upr"=mean(data)+6*sd(data))
  if (omit=="notA") {
    stanfit.full <- sampling(stanmodel.omitAB, data=datalist[-8],
                             iter=n.iter, warmup=n.burnin, chains=2, seed=seed, refresh=0)
  } else {
    stanfit.full <- sampling(stanmodel.full, data=datalist,
                           iter=n.iter, warmup=n.burnin, chains=2, seed=seed, refresh=0)
  }
  
  if (omit=="AB") {
    stanfit.omit <- sampling(stanmodel.omitAB, data=datalist[-8],
                             iter=n.iter, warmup=n.burnin, chains=2, seed=seed, refresh=0)
  } else if (omit=="A") {
    stanfit.omit <- sampling(stanmodel.omitA, data=datalist[-6],
                             iter=n.iter, warmup=n.burnin, chains=2, seed=seed, refresh=0)
  } else if (omit=="notA") {
    stanfit.omit <- sampling(stanmodel.notA, data=datalist[-c(6,8)],
                             iter=n.iter, warmup=n.burnin, chains=2, seed=seed, refresh=0)
  } else if (omit=="B") {
    stanfit.omit <- sampling(stanmodel.omitB, data=datalist[-7],
                             iter=n.iter, warmup=n.burnin, chains=2, seed=seed, refresh=0)
  } else {return(NULL)}
  
  set.seed(seed)
  logML.full <- bridge_sampler(stanfit.full, silent=T)
  
  set.seed(seed)
  logML.omit <- bridge_sampler(stanfit.omit, silent=T)
  
  if (diagnostics) {
    list("bf"=exp(logML.omit$logml - logML.full$logml),
         "pererr.full"=error_measures(logML.full)$cv,
         "pererr.omit"=error_measures(logML.omit)$cv,
         "stanfit.full"=stanfit.full, "stanfit.omit"=stanfit.omit)
  } else {
    list("bf"=exp(logML.omit$logml - logML.full$logml),
         "pererr.full"=error_measures(logML.full)$cv,
         "pererr.omit"=error_measures(logML.omit)$cv)
  }
}

BFBS()


# BFBS(data=dat[,c(1,3,2,4)])
# MCMC <- BFBS(diag=T)
# rstan::summary(MCMC$stanfit.full)[[1]]
# rstan::summary(MCMC$stanfit.omit)[[1]]


num <- 50; BF <- PerErr.full <- PerErr.omit <- numeric(num)
system.time(
  for (i in 1:num) { #roughly 43 min of run time
    result <- BFBS(seed=i)
    BF[i] <- result$bf
    PerErr.full[i] <- result$pererr.full
    PerErr.omit[i] <- result$pererr.omit
  })

bf <- c(4.05647210548262,3.99419348691491,4.09949693967847,3.92878661912672,3.99346501969941,4.1041977415027,4.01187854024637,3.85262577393865,3.93498613022074,3.7959632067108,4.06273998871494,3.86612903376162,3.91669558490761,3.92666677237002,4.01556215448371,4.08630732676226,3.88046822429843,3.72550155503768,3.86042880980317,3.8324611039173,3.81374112157872,3.86796086097283,3.79699321356372,3.86859354664543,3.90203078090476,3.89034672364113,3.96044534960275,4.02858796476192,3.97144086796937,3.9940119621657,3.73634963463427,4.09346812624026,3.93332471789049,4.06223087398158,4.02565452655435,3.8421857771291,4.03985169808534,4.21403970656319,3.78081959663943,3.98815402559567,3.88689203403628,3.98423957029962,3.86265923891633,3.86058720330999,3.94699831001704,3.81781977057256,4.07450593127947,4.07830256912448,3.73186618797362,3.94338997899317)
pererr.full <- c(0.0172196117880734,0.0170345113842662,0.0179809716041862,0.0185990743099796,0.0171536276721085,0.0164671902593211,0.0179489003002355,0.0173734213263208,0.0177290613263816,0.0199503074729354,0.0172021924497536,0.0196874018703012,0.0179513215986035,0.0160792946284345,0.0261824659846771,0.0187934074119955,0.0162978787204508,0.0182594243710483,0.0183313084800472,0.0170976912708907,0.016946984722067,0.0179195591378035,0.0205100539918351,0.0167399564142454,0.016833777996574,0.0173066089185493,0.0206933657519296,0.0196094027233308,0.0178569572635722,0.0179315266586975,0.017775138664447,0.0157961314022224,0.0182256624991207,0.0170120101984966,0.017398653144724,0.016531036242842,0.0167728282910102,0.0188375727737249,0.022680216883963,0.0187263177939863,0.0180876008568488,0.0169167757530576,0.0170801957262873,0.0169563852012543,0.0175382520688494,0.0233873851390953,0.0182587766016287,0.016424604376866,0.0177290704726127,0.0200079953879853)
pererr.omit <- c(0.015364069594792,0.0141872909121512,0.0134198986608653,0.0129788776041122,0.0140943671776956,0.0136505907466585,0.0147413047257379,0.0142159032532239,0.0135242004008789,0.0126583094819708,0.0136330010887258,0.014790755219527,0.017354614681238,0.0130105948181429,0.0140545283812849,0.0154755827101834,0.0139585887542481,0.0142634663268628,0.0157833224837662,0.0148191000932366,0.0140327424339935,0.0145012976828302,0.0151994553981779,0.0133733316779908,0.0137276642626478,0.0131931689478851,0.0154935659507819,0.0134917103937086,0.0127786422417655,0.0142739744406493,0.0152448596004383,0.0166337364055373,0.0155611431864445,0.0142243931582313,0.0130412881409578,0.0157796872849447,0.0155288354790176,0.01473337640433,0.0137710817930144,0.0139942231444543,0.0148273483659526,0.0132881441498138,0.0144139902201374,0.0132214262847577,0.0151863824494358,0.0172596500242996,0.0136941698170868,0.015377412270964,0.0136345845973714,0.0165631485379363)





model.full.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gA;                  // variance of the projected treatment effect A
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real<lower=0> gAB;                 // variance of the projected interaction effect AB
  real tAfix;                        // projected treatment effect A
  real tBfix;                        // projected treatment effect B
  real tABfix;                       // projected interaction effect AB
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,i+1) * tAfix / sqrt(2) 
                    + pow(-1,j+1) * tBfix / sqrt(2)
                    + pow(-1,i+j) * tABfix / 2;
    }
  }
}

model {
  // mu ~ implicit uniform prior
  // target += uniform_lpdf(mu | -1000, 1000);
  // target += normal_lpdf(mu | 0, 100);
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gA | 1, hA);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tAfix | 0, sqrt(gA));
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  target += normal_lpdf(tABfix | 0, sqrt(gAB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"
stanmodel.full.fixed <- stan_model(model_code=model.full.fixed)
datalist.fixed <- list("nS"=nrow(dat), "I"=diag(4), "Y"=dat,
                       "hA"=.5, "hB"=.5, "hAB"=.5,
                       "lwr"=mean(dat)-6*sd(dat),
                       "upr"=mean(dat)+6*sd(dat))

stanfit.full.fixed <- sampling(stanmodel.full.fixed, data=datalist.fixed,
                               iter=10000, warmup=1000, chains=2, seed=277, refresh=0)

set.seed(277)
M.full.fixed <- bridge_sampler(stanfit.full.fixed, silent=T)


model.omitAB.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hA;       // (square root) scale parameter of the variance of the treatment effect A
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gA;                  // variance of the projected treatment effect A
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real tAfix;                        // projected treatment effect A
  real tBfix;                        // projected treatment effect B
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,i+1) * tAfix / sqrt(2) 
                    + pow(-1,j+1) * tBfix / sqrt(2);
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gA | 1, hA);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tAfix | 0, sqrt(gA));
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.omitAB.fixed <- stan_model(model_code=model.omitAB.fixed)
stanfit.omitAB.fixed <- sampling(stanmodel.omitAB.fixed, data=datalist.fixed[-6],
                                 iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.omitAB.fixed <- bridge_sampler(stanfit.omitAB.fixed, silent=T)
bf(M.omitAB.fixed, M.full.fixed)


model.omitA.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  real<lower=0> hAB;      // (square root) scale parameter of the variance of the interaction effect AB
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real<lower=0> gAB;                 // variance of the projected interaction effect AB
  real tBfix;                        // projected treatment effect B
  real tABfix;                       // projected interaction effect AB
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,j+1) * tBfix / sqrt(2)
                    + pow(-1,i+j) * tABfix / 2;
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += scaled_inv_chi_square_lpdf(gAB | 1, hAB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  target += normal_lpdf(tABfix | 0, sqrt(gAB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.omitA.fixed <- stan_model(model_code=model.omitA.fixed)
stanfit.omitA.fixed <- sampling(stanmodel.omitA.fixed, data=datalist.fixed[-4],
                                iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.omitA.fixed <- bridge_sampler(stanfit.omitA.fixed, silent=T)
bf(M.omitA.fixed, M.full.fixed)


model.notA.fixed <- "
data {
  int<lower=1> nS;        // number of subjects
  // vector[4] Y[nS];        // responses [removed features]
  array[nS] vector[4] Y;  // responses [Stan 2.26+ syntax for array declarations]
  real<lower=0> hB;       // (square root) scale parameter of the variance of the treatment effect B
  matrix[4,4] I;          // scale matrix of the inverse-Wishart prior
  real lwr;               // lower bound for the grand mean
  real upr;               // upper bound for the grand mean
}

parameters {
  real<lower=lwr, upper=upr> mu;     // grand mean
  cov_matrix[4] Sigma;               // error covariance matrix
  real<lower=0> gB;                  // variance of the projected treatment effect B
  real tBfix;                        // projected treatment effect B
}

transformed parameters {
  matrix[2,2] muS;       // condition means
  for (i in 1:2) {
    for (j in 1:2) {
      muS[i,j] = mu + pow(-1,j+1) * tBfix / sqrt(2);
    }
  }
}

model {
  target += uniform_lpdf(mu | lwr, upr);
  target += scaled_inv_chi_square_lpdf(gB | 1, hB);
  target += inv_wishart_lpdf(Sigma | 5, I);
  target += normal_lpdf(tBfix | 0, sqrt(gB));
  for (k in 1:nS) {
    target += multi_normal_lpdf(Y[k] | to_vector(muS), Sigma);
  }
}
"

stanmodel.notA.fixed <- stan_model(model_code=model.notA.fixed)
stanfit.notA.fixed <- sampling(stanmodel.notA.fixed, data=datalist.fixed[-c(4,6)],
                               iter=10000, warmup=1000, chains=2, seed=277, refresh=0)
set.seed(277)
M.notA.fixed <- bridge_sampler(stanfit.notA.fixed, silent=T)
bf(M.notA.fixed, M.omitAB.fixed)