Bayes Model of Full Opioid Tree in JAGS

Here we look at the full opioid tree, to compare with the subtree of the non-healthcare attended overdoses. For reference, the subtree is as follows: Opioid Pathways tree with data errors The version of the full pathways tree being used to construct this model is the one used in the manuscript: Full opioid pathways tree as defined by PHAC The subtree used in model is a modification of the subtree with root node \(A\) from the full tree above, and further splits node \(D\) in the above into split data sources (BC Coroners data, Vital stats), as well as an additional node for data errors to incorporate uncertainty in recorded data. The full Bayesian model will be constructed using the above tree with some modifications, namely making adjustments to preserve tree structure (trees are acyclic graphs where any two vertices are connected by exactly one path; this is not the case in the above, where, for example, a cycle beginning and ending at \(O\) is possible). This structure can be recovered by aggregating data into nodes with more generalized definitions where needed in order to preserve unique children (ie, no sibling node can point to the same child). In particular, we combine nodes \(H,I,J\) to create one new node with children \(K,L,M,N,O\), which could each contain individuals from any of the three previously defined parents. The resulting tree has the following form and corresponding graphical model; consistency in labeling will be used in the JAGS model: Full opioid pathways tree for modeling, incorporating data errors The graphical model for this tree is given by the following: DAG for full opioid tree model, incorporating data errors This tree creates issues in the modeling. I suspect this is due to the fact that we are claiming there are data errors (latent ndoes \(U, R, I\), in particular), however the observed data suggests child nodes always sum to the parent, so that the latent node sibling value must be zero. The model in this case looks as follows:

## fullopioidJAGS.mod

data {
p.params <- c(alphap, betap);
q.params <- c(alphaq, betaq);
r.params <- c(alphar, betar, gammar, deltar, psir);
s.params <- c(alphas, betas, gammas);
t.params <- c(alphat, betat, gammat, deltat, psit, zetat);
u.params <- c(alphau, betau, gammau);
}

model {
Z ~ dnegbin(alphaZ, betaZ) T(Lz,Uz);
p ~ dbeta(p.params[1], p.params[2]);
AB[1] ~ dbinom(p, Z); 

q ~ dbeta(q.params[1], q.params[2]);
D ~ dbinom(q, AB[1]);
C <- AB[1] - D;

r ~ ddirch(r.params);
B.bin[1] <- AB[2];
r.bin[1] <- r[1];
for (i in 2:5){
    B.bin[i] <- B.bin[i-1] - EFGHI[i-1]
    r.bin[i] <- r[i]/(sum(r[i:5]))
}
for (i in 1:4){
    EFGHI[i] ~ dbinom(r.bin[i], B.bin[i])
}
EFGHI[5] <- B.bin[1] - sum(EFGHI[1:4])

s ~ ddirch(s.params);
D.bin[1] <- D;
s.bin[1] <- s[1];
D.bin[2] <- D.bin[1] - JKL[1];
s.bin[2] <- s[2]/(sum(s[2:3]));
for (i in 1:2){
    JKL[i] ~ dbinom(s.bin[i], D.bin[i])
}
JKL[3] <- D.bin[1] - sum(JKL[1:2]);

t ~ ddirch(t.params);
G.bin[1] <- EFGHI[3];
t.bin[1] <- t[1];
for (i in 2:6){
    G.bin[i] <- G.bin[i-1] - MNOPQR[i-1]
    t.bin[i] <- t[i]/(sum(t[i:6]))
}
for (i in 1:5){
    MNOPQR[i] ~ dbinom(t.bin[i], G.bin[i])
}
MNOPQR[6] <- G.bin[1] - sum(MNOPQR[1:5])

u ~ ddirch(u.params);
P.bin[1] <- MNOPQR[4];
u.bin[1] <- u[1];
P.bin[2] <- P.bin[1] - STU[1];
u.bin[2] <- u[2]/(sum(u[2:3]));
for (i in 1:2){
    STU[i] ~ dbinom(u.bin[i], P.bin[i])
}
STU[3] <- P.bin[1] - sum(STU[1:2]);

}

The output from running the model with the above DAG and all data at all levels is the following:

# Create the data
library(gtools)
set.seed(19)

# PHAC has supplied prior bounds on A (11994,22742), which combined with HC
# attended cohort number provides a lower bound on Z.  An upper bound on Z was
# set by the same method, supposing HC data misses no more than 15% of all
# HC attended overdoses (34113*1.15 + 22742)
op.dat <- list("STU" = c(2270, 106, NA),
               "MNOPQR" = c(11678, 199, 1030, 2376, 45, NA),
               "JKL" = c(173, 2279, NA),
               "EFGHI" = c(16922, 1390, 15328, 473, NA),
               "AB" = c(NA, 34113),
               "alphau" = 30,     # dirichlet parameters for prior on u
               "betau" = 30,     # last parameter to sample U
               "gammau" = 3,    
               "alphat" = 5,     # dirichlet parameters for prior on t
               "betat" = 5,      # last parameter to sample R
               "gammat" = 5,     
               "deltat" = 5,     
               "psit" = 5,       
               "zetat" = 1,      
               "alphas" = 5,     # dirichlet parameters for prior on s
               "betas" = 5,      # last parameter samples L
               "gammas" = 1,     
               "alphar" = 5,     # dirichlet parameters for prior on r
               "betar" = 5,      # last parameter to sample I
               "gammar" = 5,     
               "deltar" = 5,     
               "psir" = 1,       
               "alphaq" = 3,     # beta parameters for prior on q
               "betaq" = 20,     # samples D
               "alphap" = 20,      # beta parameters for prior on p
               "betap" = 30,       # samples A
               "alphaZ" = 0.5,     # parameter for NegBin prior on Z
               "betaZ" = 50000,    # size parameter for NegBin prior on Z
               "Lz" = 11994 + 34113,    # lower bound on Z
               "Uz" = round(34113*1.15 + 22742))    #upper bound on Z

# define parameters whose posteriors we are interested in
mod.params <- c("Z",  "AB", "D", "C", "JKL", "MNOPQR", "EFGHI",
                "STU", "p", "q", "r", "s", "t", "u")

# define the starting values for JAGS
# this can be done with a function as follows, which generates random
# start values for each parameter.  for more than 1 chain, random values will
# be drawn for each chain
mod.inits <- function(){ # change A initialization and add others
  list("Z" = round(runif(1, 11994+34113, round(34113*1.13 + 22742))),
       "AB" = c(round(runif(1, 11994, 22742)), NA),
       "D" = round(runif(1, 2279+173, 10000)), 
       "p" = runif(1,0,1), 
       "q" = as.vector(rbeta(1,1,1)),
       "r" = as.vector(rdirichlet(1, c(1,1,1,1,1))),
       "s" = as.vector(rdirichlet(1, c(1,1,1))),
       "t" = as.vector(rdirichlet(1, c(1,1,1,1,1,1))),
       "u" = as.vector(rdirichlet(1, c(1,1,1))))
}

# Generate list of initial values to match number of chains
numchains <- 6
mod.initial <- list()
i <- 1
while(i <= numchains){
  mod.initial[[i]] <- mod.inits()
  i = i+1
}

# before using R2jags, we load the package and set a random seed
library(R2jags)
## Loading required package: rjags
## Loading required package: coda
## Linked to JAGS 4.3.0
## Loaded modules: basemod,bugs
## 
## Attaching package: 'R2jags'
## The following object is masked from 'package:coda':
## 
##     traceplot
# now fit the model in JAGS
mod.fit <- jags(data = op.dat, inits = mod.initial, parameters.to.save = mod.params, 
                n.chains = numchains, n.iter = 100000, n.burnin = 10000, 
                model.file = "fullopioidJAGS.mod")
## module glm loaded
## Compiling data graph
##    Resolving undeclared variables
##    Allocating nodes
##    Initializing
##    Reading data back into data table
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 13
##    Unobserved stochastic nodes: 9
##    Total graph size: 143
## 
## Initializing model
print(mod.fit)
## Inference for Bugs model at "fullopioidJAGS.mod", fit using jags,
##  6 chains, each with 1e+05 iterations (first 10000 discarded), n.thin = 90
##  n.sims = 6000 iterations saved
##             mu.vect  sd.vect      2.5%       25%       50%       75%     97.5%
## AB[1]     20657.283 3339.990 14316.000 18342.750 20599.500 22939.000 27301.150
## AB[2]     34113.000    0.000 34113.000 34113.000 34113.000 34113.000 34113.000
## C         17922.413 3326.115 11658.725 15636.750 17833.500 20193.750 24637.075
## D          2734.870  288.179  2459.000  2531.000  2649.000  2840.000  3513.025
## EFGHI[1]  16922.000    0.000 16922.000 16922.000 16922.000 16922.000 16922.000
## EFGHI[2]   1390.000    0.000  1390.000  1390.000  1390.000  1390.000  1390.000
## EFGHI[3]  15328.000    0.000 15328.000 15328.000 15328.000 15328.000 15328.000
## EFGHI[4]    473.000    0.000   473.000   473.000   473.000   473.000   473.000
## EFGHI[5]      0.000    0.000     0.000     0.000     0.000     0.000     0.000
## JKL[1]      173.000    0.000   173.000   173.000   173.000   173.000   173.000
## JKL[2]     2279.000    0.000  2279.000  2279.000  2279.000  2279.000  2279.000
## JKL[3]      282.870  288.179     7.000    79.000   197.000   388.000  1061.025
## MNOPQR[1] 11678.000    0.000 11678.000 11678.000 11678.000 11678.000 11678.000
## MNOPQR[2]   199.000    0.000   199.000   199.000   199.000   199.000   199.000
## MNOPQR[3]  1030.000    0.000  1030.000  1030.000  1030.000  1030.000  1030.000
## MNOPQR[4]  2376.000    0.000  2376.000  2376.000  2376.000  2376.000  2376.000
## MNOPQR[5]    45.000    0.000    45.000    45.000    45.000    45.000    45.000
## MNOPQR[6]     0.000    0.000     0.000     0.000     0.000     0.000     0.000
## STU[1]     2270.000    0.000  2270.000  2270.000  2270.000  2270.000  2270.000
## STU[2]      106.000    0.000   106.000   106.000   106.000   106.000   106.000
## STU[3]        0.000    0.000     0.000     0.000     0.000     0.000     0.000
## Z         50004.960  309.320 49406.875 49790.000 50002.000 50212.000 50616.050
## p             0.413    0.067     0.286     0.367     0.412     0.459     0.547
## q             0.136    0.026     0.095     0.117     0.133     0.151     0.195
## r[1]          0.496    0.003     0.491     0.494     0.496     0.498     0.501
## r[2]          0.041    0.001     0.039     0.040     0.041     0.042     0.043
## r[3]          0.449    0.003     0.444     0.447     0.449     0.451     0.454
## r[4]          0.014    0.001     0.013     0.014     0.014     0.014     0.015
## r[5]          0.000    0.000     0.000     0.000     0.000     0.000     0.000
## s[1]          0.065    0.008     0.048     0.061     0.066     0.070     0.079
## s[2]          0.840    0.075     0.648     0.801     0.859     0.898     0.927
## s[3]          0.095    0.081     0.003     0.032     0.074     0.137     0.301
## t[1]          0.761    0.003     0.754     0.759     0.761     0.763     0.768
## t[2]          0.013    0.001     0.012     0.013     0.013     0.014     0.015
## t[3]          0.067    0.002     0.064     0.066     0.067     0.069     0.072
## t[4]          0.155    0.003     0.150     0.153     0.155     0.157     0.161
## t[5]          0.003    0.000     0.002     0.003     0.003     0.004     0.004
## t[6]          0.000    0.000     0.000     0.000     0.000     0.000     0.000
## u[1]          0.943    0.005     0.933     0.940     0.943     0.946     0.952
## u[2]          0.056    0.005     0.047     0.053     0.056     0.059     0.065
## u[3]          0.001    0.001     0.000     0.001     0.001     0.002     0.003
## deviance    105.897    7.904    92.639   100.291   105.192   110.781   123.367
##            Rhat n.eff
## AB[1]     1.017   220
## AB[2]     1.000     1
## C         1.014   270
## D         1.039   130
## EFGHI[1]  1.000     1
## EFGHI[2]  1.000     1
## EFGHI[3]  1.000     1
## EFGHI[4]  1.000     1
## EFGHI[5]  1.000     1
## JKL[1]    1.000     1
## JKL[2]    1.000     1
## JKL[3]    1.043   140
## MNOPQR[1] 1.000     1
## MNOPQR[2] 1.000     1
## MNOPQR[3] 1.000     1
## MNOPQR[4] 1.000     1
## MNOPQR[5] 1.000     1
## MNOPQR[6] 1.000     1
## STU[1]    1.000     1
## STU[2]    1.000     1
## STU[3]    1.000     1
## Z         1.001  6000
## p         1.017   220
## q         1.006  1200
## r[1]      1.001  6000
## r[2]      1.001  6000
## r[3]      1.001  6000
## r[4]      1.001  6000
## r[5]      1.042   150
## s[1]      1.024   190
## s[2]      1.039   130
## s[3]      1.034   140
## t[1]      1.001  6000
## t[2]      1.001  6000
## t[3]      1.002  3500
## t[4]      1.001  6000
## t[5]      1.003  1700
## t[6]      1.096    52
## u[1]      1.001  6000
## u[2]      1.001  5700
## u[3]      1.001  6000
## deviance  1.003  1200
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule, pD = var(deviance)/2)
## pD = 31.1 and DIC = 137.0
## DIC is an estimate of expected predictive error (lower deviance is better).

The traceplots and density plots are as follows (note that density plots for latent nodes in the healthcare branch of the tree could not be produced as they generated too sparse values):

mod.fit.mcmc <- as.mcmc(mod.fit)
library(mcmcplots)
## Registered S3 method overwritten by 'mcmcplots':
##   method        from  
##   as.mcmc.rjags R2jags
denplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]")) 

denplot(mod.fit.mcmc, parms = c("p", "q","r", "s", "t","u"))

traplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]"))

traplot(mod.fit.mcmc, parms = c("p", "q", "r"))

traplot(mod.fit.mcmc, parms = c("s", "t","u"))

traplot(mod.fit.mcmc, parms = c("EFGHI[5]", "MNOPQR[6]", "STU[3]"))

Now we re-run the model with observed values at leaves only. In particular, this involves removing the observations for nodes \(B, G, P\). To make JAGS run the sequential binomials, it was more straightforward to put latent nodes at the last indices of for-loops. This results in the following, modified DAG and relabelling for nodes in the pathways tree: Full opioid pathways tree for modeling, incorporating data errors and node relabelling for computational reasons

DAG for full opioid tree model, incorporating data errors. Relabelled nodes for computational reasons, therefore these nodes do not correspond to those used above with observed parent nodes. In this case, the modeling has the following form in JAGS:

## fullopioidJAGS_option2.mod

data {
p.params <- c(alphap, betap);
q.params <- c(alphaq, betaq);
r.params <- c(alphar, betar, gammar, deltar, psir);
s.params <- c(alphas, betas, gammas);
t.params <- c(alphat, betat, gammat, deltat, psit, zetat);
u.params <- c(alphau, betau, gammau);
}

model {
Z ~ dnegbin(alphaZ, betaZ) T(Lz,Uz);
p ~ dbeta(p.params[1], p.params[2]);
AB[2] ~ dbinom(1-p, Z);
AB[1] <- Z - AB[2];

q ~ dbeta(q.params[1], q.params[2]);
D ~ dbinom(q, AB[1]);
C <- AB[1] - D;

r ~ ddirch(r.params);
B.bin[1] <- Z - AB[1];
r.bin[1] <- r[1];
for (i in 2:5){
    B.bin[i] <- B.bin[i-1] - EFGHI[i-1]
    r.bin[i] <- r[i]/(sum(r[i:5]))
}
for (i in 1:4){
    EFGHI[i] ~ dbinom(r.bin[i], B.bin[i])
}
EFGHI[5] <- B.bin[1] - sum(EFGHI[1:4])


s ~ ddirch(s.params);
D.bin[1] <- D;
s.bin[1] <- s[1];
D.bin[2] <- D.bin[1] - JKL[1];
s.bin[2] <- s[2]/(sum(s[2:3]));
for (i in 1:2){
    JKL[i] ~ dbinom(s.bin[i], D.bin[i])
}
JKL[3] <- D.bin[1] - sum(JKL[1:2]);

t ~ ddirch(t.params);
G.bin[1] <- EFGHI[3];
t.bin[1] <- t[1];
for (i in 2:6){
    G.bin[i] <- G.bin[i-1] - MNOPQR[i-1]
    t.bin[i] <- t[i]/(sum(t[i:6]))
}
for (i in 1:5){
    MNOPQR[i] ~ dbinom(t.bin[i], G.bin[i])
}
MNOPQR[6] <- G.bin[1] - sum(MNOPQR[1:5])

u ~ ddirch(u.params);
P.bin[1] <- MNOPQR[4];
u.bin[1] <- u[1];
P.bin[2] <- P.bin[1] - STU[1];
u.bin[2] <- u[2]/(sum(u[2:3]));
for (i in 1:2){
    STU[i] ~ dbinom(u.bin[i], P.bin[i])
}
STU[3] <- P.bin[1] - sum(STU[1:2]);

}

The output of this model is as follows:

# Create the data
library(gtools)
set.seed(19)

# PHAC has supplied prior bounds on A (11994,22742), which combined with HC
# attended cohort number provides a lower bound on Z.  An upper bound on Z was
# set by the same method, supposing HC data misses no more than 15% of all
# HC attended overdoses (34113*1.15 + 22742)
op.dat <- list("STU" = c(2270, 106, NA),
               "MNOPQR" = c(11678, 199, 1030, 45, NA, NA),
               "JKL" = c(173, 2279, NA),
               "EFGHI" = c(16922, 1390,  473, NA, NA),
               "AB" = c(NA, NA),
               "alphau" = 30,     # dirichlet parameters for prior on u
               "betau" = 30,     # last parameter to sample U
               "gammau" = 3,    
               "alphat" = 5,     # dirichlet parameters for prior on t
               "betat" = 5,      # last parameter to sample R
               "gammat" = 5,     
               "deltat" = 5,     
               "psit" = 5,       
               "zetat" = 1,      
               "alphas" = 5,     # dirichlet parameters for prior on s
               "betas" = 5,      # last parameter samples L
               "gammas" = 1,     
               "alphar" = 5,     # dirichlet parameters for prior on r
               "betar" = 5,      # last parameter to sample I
               "gammar" = 5,     
               "deltar" = 5,     
               "psir" = 1,       
               "alphaq" = 3,     # beta parameters for prior on q
               "betaq" = 20,     # samples D
               "alphap" = 20,      # beta parameters for prior on p
               "betap" = 30,       # samples A
               "alphaZ" = 0.5,     # parameter for NegBin prior on Z
               "betaZ" = 50000,    # size parameter for NegBin prior on Z
               "Lz" = 11994 + 34113,    # lower bound on Z
               "Uz" = round(34113*1.15 + 22742))    #upper bound on Z

# define parameters whose posteriors we are interested in
mod.params <- c("Z",  "AB", "D", "C", "JKL", "MNOPQR", "EFGHI",
                "STU", "p", "q", "r", "s", "t", "u")

# define the starting values for JAGS
# this can be done with a function as follows, which generates random
# start values for each parameter.  for more than 1 chain, random values will
# be drawn for each chain
mod.inits <- function(){ 
  list("Z" = round(runif(1, 11994+34113, round(34113*1.13 + 22742))),
       "AB" = c(NA, round(runif(1, 34185, 35000))),
       "EFGHI" = c(NA, NA, NA, round(runif(1, 15352, 15400)), NA),
       "MNOPQR" = c(NA, NA, NA, NA, round(runif(1, 2376, 2400)), NA),
       #"STU" = c(NA, NA, ),
       "D" = round(runif(1, 2279+173, 10000)), 
       "p" = runif(1,0,1), 
       "q" = as.vector(rbeta(1,1,1)),
       "r" = as.vector(rdirichlet(1, c(1,1,1,1,1))),
       "s" = as.vector(rdirichlet(1, c(1,1,1))),
       "t" = as.vector(rdirichlet(1, c(1,1,1,1,1,1))),
       "u" = as.vector(rdirichlet(1, c(1,1,1))))
}

# Generate list of initial values to match number of chains
numchains <- 6
mod.initial <- list()
i <- 1
while(i <= numchains){
  mod.initial[[i]] <- mod.inits()
  i = i+1
}

# before using R2jags, we load the package and set a random seed
library(R2jags)

# now fit the model in JAGS
mod.fit <- jags(data = op.dat, inits = mod.initial, parameters.to.save = mod.params, 
                n.chains = numchains, n.iter = 100000, n.burnin = 10000, 
                model.file = "fullopioidJAGS_option2.mod")
## Compiling data graph
##    Resolving undeclared variables
##    Allocating nodes
##    Initializing
##    Reading data back into data table
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 11
##    Unobserved stochastic nodes: 11
##    Total graph size: 146
## 
## Initializing model
print(mod.fit)
## Inference for Bugs model at "fullopioidJAGS_option2.mod", fit using jags,
##  6 chains, each with 1e+05 iterations (first 10000 discarded), n.thin = 90
##  n.sims = 6000 iterations saved
##             mu.vect sd.vect      2.5%       25%       50%       75%     97.5%
## AB[1]     14654.344 888.191 12246.625 14261.750 14811.500 15260.000 15918.150
## AB[2]     35391.114 848.951 34379.000 34798.750 35194.000 35730.250 37831.150
## C         11978.839 920.107  9532.925 11546.000 12134.000 12610.000 13324.050
## D          2675.505 235.484  2459.000  2521.000  2601.000  2744.000  3339.000
## EFGHI[1]  16922.000   0.000 16922.000 16922.000 16922.000 16922.000 16922.000
## EFGHI[2]   1390.000   0.000  1390.000  1390.000  1390.000  1390.000  1390.000
## EFGHI[3]    473.000   0.000   473.000   473.000   473.000   473.000   473.000
## EFGHI[4]  15764.502 324.477 15408.975 15538.000 15667.000 15900.000 16664.025
## EFGHI[5]    841.612 731.852    25.000   359.750   650.000  1123.250  2958.050
## JKL[1]      173.000   0.000   173.000   173.000   173.000   173.000   173.000
## JKL[2]     2279.000   0.000  2279.000  2279.000  2279.000  2279.000  2279.000
## JKL[3]      223.505 235.484     7.000    69.000   149.000   292.000   887.000
## MNOPQR[1] 11678.000   0.000 11678.000 11678.000 11678.000 11678.000 11678.000
## MNOPQR[2]   199.000   0.000   199.000   199.000   199.000   199.000   199.000
## MNOPQR[3]  1030.000   0.000  1030.000  1030.000  1030.000  1030.000  1030.000
## MNOPQR[4]    45.000   0.000    45.000    45.000    45.000    45.000    45.000
## MNOPQR[5]  2492.737  71.851  2398.000  2440.000  2478.000  2529.000  2669.000
## MNOPQR[6]   319.764 316.799    17.000    89.000   209.000   453.000  1187.025
## STU[1]     2270.000   0.000  2270.000  2270.000  2270.000  2270.000  2270.000
## STU[2]      106.000   0.000   106.000   106.000   106.000   106.000   106.000
## STU[3]      116.737  71.851    22.000    64.000   102.000   153.000   293.000
## Z         50045.458 317.456 49436.000 49830.000 50043.000 50255.000 50675.050
## p             0.293   0.017     0.245     0.285     0.296     0.305     0.316
## q             0.183   0.020     0.157     0.169     0.178     0.192     0.240
## r[1]          0.478   0.011     0.447     0.473     0.481     0.486     0.494
## r[2]          0.039   0.001     0.036     0.039     0.039     0.040     0.042
## r[3]          0.014   0.001     0.012     0.013     0.014     0.014     0.015
## r[4]          0.445   0.010     0.423     0.440     0.446     0.452     0.463
## r[5]          0.023   0.019     0.001     0.010     0.019     0.032     0.078
## s[1]          0.067   0.007     0.052     0.062     0.067     0.072     0.080
## s[2]          0.856   0.065     0.680     0.829     0.875     0.903     0.927
## s[3]          0.078   0.070     0.003     0.027     0.057     0.107     0.266
## t[1]          0.740   0.015     0.700     0.733     0.744     0.751     0.759
## t[2]          0.013   0.001     0.011     0.012     0.013     0.014     0.015
## t[3]          0.066   0.002     0.061     0.064     0.066     0.067     0.070
## t[4]          0.003   0.000     0.002     0.003     0.003     0.003     0.004
## t[5]          0.158   0.006     0.147     0.155     0.158     0.162     0.170
## t[6]          0.020   0.019     0.001     0.006     0.013     0.029     0.072
## u[1]          0.901   0.025     0.841     0.887     0.905     0.919     0.937
## u[2]          0.053   0.005     0.044     0.050     0.053     0.056     0.063
## u[3]          0.046   0.026     0.010     0.027     0.041     0.061     0.109
## deviance    102.745   6.788    91.616    97.976   101.864   106.909   118.327
##            Rhat n.eff
## AB[1]     1.210    24
## AB[2]     1.262    20
## C         1.193    26
## D         1.015   720
## EFGHI[1]  1.000     1
## EFGHI[2]  1.000     1
## EFGHI[3]  1.000     1
## EFGHI[4]  1.102    58
## EFGHI[5]  1.662    11
## JKL[1]    1.000     1
## JKL[2]    1.000     1
## JKL[3]    1.019   680
## MNOPQR[1] 1.000     1
## MNOPQR[2] 1.000     1
## MNOPQR[3] 1.000     1
## MNOPQR[4] 1.000     1
## MNOPQR[5] 1.009   470
## MNOPQR[6] 1.082    54
## STU[1]    1.000     1
## STU[2]    1.000     1
## STU[3]    1.008   480
## Z         1.001  4800
## p         1.225    23
## q         1.076    57
## r[1]      1.246    21
## r[2]      1.089    47
## r[3]      1.042    90
## r[4]      1.129    37
## r[5]      1.670    11
## s[1]      1.005  1600
## s[2]      1.015   710
## s[3]      1.012   530
## t[1]      1.094    63
## t[2]      1.007   580
## t[3]      1.023   170
## t[4]      1.002  2000
## t[5]      1.034   120
## t[6]      1.081    54
## u[1]      1.008   510
## u[2]      1.002  2700
## u[3]      1.008   490
## deviance  1.001  4700
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule, pD = var(deviance)/2)
## pD = 23.0 and DIC = 125.8
## DIC is an estimate of expected predictive error (lower deviance is better).

When parents are made latent, total estimate at \(Z\) is not changed much, but the value of node \(A\) is quite different between the two models; individuals not counted in data uncertainty of the HC branch in the first model seem to be reallocated to data uncertainty in the non-HC branch. In the second model with upstream data deleted, we do see data errors in HC branch, and the total at \(A\) is reduced by approximately that same number, resulting in a similar estimate of \(Z\) between the two models. This second model is a closer match to the estimated value at \(A\) by the subtree model. The traceplots are:

mod.fit.mcmc <- as.mcmc(mod.fit)
library(mcmcplots)
denplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]")) 

denplot(mod.fit.mcmc, parms = c("p", "q","r", "s", "t","u"))

traplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]"))

traplot(mod.fit.mcmc, parms = c("p", "q", "r"))

traplot(mod.fit.mcmc, parms = c("s", "t","u"))

traplot(mod.fit.mcmc, parms = c("EFGHI[5]", "MNOPQR[6]", "STU[3]"))

The mixing on the above model needs some work. We try again now running for more iterations and with more chains:

# Generate list of initial values to match number of chains
numchains <- 12
mod.initial <- list()
i <- 1
while(i <= numchains){
  mod.initial[[i]] <- mod.inits()
  i = i+1
}

# now fit the model in JAGS
mod.fit <- jags(data = op.dat, inits = mod.initial, parameters.to.save = mod.params, 
                n.chains = numchains, n.iter = 300000, n.burnin = 150000, 
                model.file = "fullopioidJAGS_option2.mod")
## Compiling data graph
##    Resolving undeclared variables
##    Allocating nodes
##    Initializing
##    Reading data back into data table
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 11
##    Unobserved stochastic nodes: 11
##    Total graph size: 146
## 
## Initializing model
print(mod.fit)
## Inference for Bugs model at "fullopioidJAGS_option2.mod", fit using jags,
##  12 chains, each with 3e+05 iterations (first 150000 discarded), n.thin = 150
##  n.sims = 12000 iterations saved
##             mu.vect sd.vect      2.5%       25%       50%       75%     97.5%
## AB[1]     14749.885 762.877 12883.950 14336.000 14861.000 15284.000 15906.000
## AB[2]     35294.980 704.049 34390.975 34782.000 35145.500 35649.000 37106.025
## C         12069.917 780.755 10223.850 11623.000 12183.000 12622.000 13292.000
## D          2679.968 234.195  2457.975  2515.000  2609.000  2768.000  3307.000
## EFGHI[1]  16922.000   0.000 16922.000 16922.000 16922.000 16922.000 16922.000
## EFGHI[2]   1390.000   0.000  1390.000  1390.000  1390.000  1390.000  1390.000
## EFGHI[3]    473.000   0.000   473.000   473.000   473.000   473.000   473.000
## EFGHI[4]  15866.458 403.743 15428.000 15584.000 15747.000 16043.250 16893.000
## EFGHI[5]    643.522 576.349    29.000   225.000   458.000   917.000  2160.050
## JKL[1]      173.000   0.000   173.000   173.000   173.000   173.000   173.000
## JKL[2]     2279.000   0.000  2279.000  2279.000  2279.000  2279.000  2279.000
## JKL[3]      227.968 234.195     5.975    63.000   157.000   316.000   855.000
## MNOPQR[1] 11678.000   0.000 11678.000 11678.000 11678.000 11678.000 11678.000
## MNOPQR[2]   199.000   0.000   199.000   199.000   199.000   199.000   199.000
## MNOPQR[3]  1030.000   0.000  1030.000  1030.000  1030.000  1030.000  1030.000
## MNOPQR[4]    45.000   0.000    45.000    45.000    45.000    45.000    45.000
## MNOPQR[5]  2495.603  71.479  2398.000  2443.000  2482.000  2534.000  2672.000
## MNOPQR[6]   418.855 401.340    16.000   136.000   293.500   591.000  1451.025
## STU[1]     2270.000   0.000  2270.000  2270.000  2270.000  2270.000  2270.000
## STU[2]      106.000   0.000   106.000   106.000   106.000   106.000   106.000
## STU[3]      119.603  71.479    22.000    67.000   106.000   158.000   296.000
## Z         50044.865 312.847 49422.975 49833.000 50046.000 50256.000 50652.000
## p             0.295   0.015     0.258     0.287     0.297     0.305     0.316
## q             0.182   0.018     0.158     0.169     0.178     0.191     0.228
## r[1]          0.479   0.010     0.455     0.474     0.481     0.486     0.493
## r[2]          0.040   0.001     0.037     0.039     0.040     0.040     0.042
## r[3]          0.014   0.001     0.012     0.013     0.014     0.014     0.015
## r[4]          0.449   0.010     0.429     0.444     0.450     0.455     0.468
## r[5]          0.018   0.016     0.001     0.006     0.013     0.026     0.058
## s[1]          0.067   0.007     0.052     0.062     0.067     0.071     0.079
## s[2]          0.854   0.065     0.690     0.821     0.872     0.904     0.927
## s[3]          0.079   0.070     0.002     0.025     0.060     0.114     0.257
## t[1]          0.736   0.018     0.691     0.727     0.740     0.748     0.758
## t[2]          0.013   0.001     0.011     0.012     0.013     0.013     0.015
## t[3]          0.065   0.002     0.060     0.064     0.065     0.067     0.070
## t[4]          0.003   0.000     0.002     0.003     0.003     0.003     0.004
## t[5]          0.157   0.006     0.145     0.154     0.157     0.161     0.170
## t[6]          0.026   0.024     0.001     0.009     0.019     0.037     0.086
## u[1]          0.900   0.025     0.840     0.885     0.904     0.918     0.936
## u[2]          0.053   0.005     0.044     0.050     0.053     0.056     0.063
## u[3]          0.047   0.026     0.010     0.028     0.042     0.062     0.110
## deviance    102.761   6.958    91.626    97.738   101.999   106.943   118.615
##            Rhat n.eff
## AB[1]     1.057   140
## AB[2]     1.070   110
## C         1.049   160
## D         1.012   790
## EFGHI[1]  1.000     1
## EFGHI[2]  1.000     1
## EFGHI[3]  1.000     1
## EFGHI[4]  1.060   160
## EFGHI[5]  1.209    44
## JKL[1]    1.000     1
## JKL[2]    1.000     1
## JKL[3]    1.014   770
## MNOPQR[1] 1.000     1
## MNOPQR[2] 1.000     1
## MNOPQR[3] 1.000     1
## MNOPQR[4] 1.000     1
## MNOPQR[5] 1.004  1900
## MNOPQR[6] 1.067   150
## STU[1]    1.000     1
## STU[2]    1.000     1
## STU[3]    1.004  1900
## Z         1.001 12000
## p         1.060   130
## q         1.016   510
## r[1]      1.065   120
## r[2]      1.020   340
## r[3]      1.010   700
## r[4]      1.062   120
## r[5]      1.211    44
## s[1]      1.006  1200
## s[2]      1.012   790
## s[3]      1.005  1300
## t[1]      1.058   170
## t[2]      1.007  1000
## t[3]      1.020   400
## t[4]      1.002  4000
## t[5]      1.027   280
## t[6]      1.089    99
## u[1]      1.004  1900
## u[2]      1.002  5800
## u[3]      1.004  1800
## deviance  1.002  4200
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule, pD = var(deviance)/2)
## pD = 24.2 and DIC = 126.9
## DIC is an estimate of expected predictive error (lower deviance is better).
mod.fit.mcmc <- as.mcmc(mod.fit)
denplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]")) 

denplot(mod.fit.mcmc, parms = c("p", "q","r", "s", "t","u"))

traplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]"))

traplot(mod.fit.mcmc, parms = c("p", "q", "r"))

traplot(mod.fit.mcmc, parms = c("s", "t","u"))

traplot(mod.fit.mcmc, parms = c("EFGHI[5]", "MNOPQR[6]", "STU[3]"))

This is looking a little better, so for now we will move to adjusting the distribution of \(Z\) to see if this impacts the estimation. The above model estimates an average \(\hat{Z} = 50042\), with \(95\%\) credible interval \([49431, 50656]\). For comparison to the subtree, we also get an average for the number of healthcare unattended overdose events, \(\hat{A} = 14641\), \([12535, 15893]\). This model estimates this to be slightly lower than the subtree model. The traceplots for the data uncertainty nodes, in particular, need to be discussed. However I proceed for now to trying to see what effect the \(Z\) prior has on the final estimate. Let’s first try adjusting the probability parameter of the negative binomial of \(Z\) from 0.5 to 0.2. For now, we keep the size parameter equal to \(50000\):

op.dat <- list("STU" = c(2270, 106, NA),
               "MNOPQR" = c(11678, 199, 1030, 45, NA, NA),
               "JKL" = c(173, 2279, NA),
               "EFGHI" = c(16922, 1390,  473, NA, NA),
               "AB" = c(NA, NA),
               "alphau" = 30,     # dirichlet parameters for prior on u
               "betau" = 30,     # last parameter to sample U
               "gammau" = 3,    
               "alphat" = 5,     # dirichlet parameters for prior on t
               "betat" = 5,      # last parameter to sample R
               "gammat" = 5,     
               "deltat" = 5,     
               "psit" = 5,       
               "zetat" = 1,      
               "alphas" = 5,     # dirichlet parameters for prior on s
               "betas" = 5,      # last parameter samples L
               "gammas" = 1,     
               "alphar" = 5,     # dirichlet parameters for prior on r
               "betar" = 5,      # last parameter to sample I
               "gammar" = 5,     
               "deltar" = 5,     
               "psir" = 1,       
               "alphaq" = 3,     # beta parameters for prior on q
               "betaq" = 20,     # samples D
               "alphap" = 20,      # beta parameters for prior on p
               "betap" = 30,       # samples A
               "alphaZ" = 0.2,     # parameter for NegBin prior on Z
               "betaZ" = 50000,    # size parameter for NegBin prior on Z
               "Lz" = 11994 + 34113,    # lower bound on Z
               "Uz" = round(34113*1.15 + 22742))    #upper bound on Z

# now fit the model in JAGS
mod.fit <- jags(data = op.dat, inits = mod.initial, parameters.to.save = mod.params, 
                n.chains = numchains, n.iter = 300000, n.burnin = 150000, 
                model.file = "fullopioidJAGS_option2.mod")
## Compiling data graph
##    Resolving undeclared variables
##    Allocating nodes
##    Initializing
##    Reading data back into data table
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 11
##    Unobserved stochastic nodes: 11
##    Total graph size: 146
## 
## Initializing model
print(mod.fit)
## Inference for Bugs model at "fullopioidJAGS_option2.mod", fit using jags,
##  12 chains, each with 3e+05 iterations (first 150000 discarded), n.thin = 150
##  n.sims = 12000 iterations saved
##             mu.vect  sd.vect      2.5%       25%       50%       75%     97.5%
## AB[1]     25650.231 1521.538 21628.000 24944.000 26056.000 26763.000 27483.025
## AB[2]     36319.592 1521.594 34487.000 35207.000 35913.500 37026.000 40339.125
## C         22913.510 1549.804 18902.900 22164.750 23288.000 24049.000 24870.025
## D          2736.721  300.370  2458.000  2532.000  2643.000  2843.000  3543.000
## EFGHI[1]  16922.000    0.000 16922.000 16922.000 16922.000 16922.000 16922.000
## EFGHI[2]   1390.000    0.000  1390.000  1390.000  1390.000  1390.000  1390.000
## EFGHI[3]    473.000    0.000   473.000   473.000   473.000   473.000   473.000
## EFGHI[4]  15967.418  538.197 15401.000 15560.000 15818.000 16207.250 17402.000
## EFGHI[5]   1567.174 1443.443    71.000   512.750  1160.000  2189.250  5512.075
## JKL[1]      173.000    0.000   173.000   173.000   173.000   173.000   173.000
## JKL[2]     2279.000    0.000  2279.000  2279.000  2279.000  2279.000  2279.000
## JKL[3]      284.721  300.370     6.000    80.000   191.000   391.000  1091.000
## MNOPQR[1] 11678.000    0.000 11678.000 11678.000 11678.000 11678.000 11678.000
## MNOPQR[2]   199.000    0.000   199.000   199.000   199.000   199.000   199.000
## MNOPQR[3]  1030.000    0.000  1030.000  1030.000  1030.000  1030.000  1030.000
## MNOPQR[4]    45.000    0.000    45.000    45.000    45.000    45.000    45.000
## MNOPQR[5]  2495.598   72.314  2398.000  2443.000  2482.000  2531.000  2680.000
## MNOPQR[6]   519.820  533.482     7.000   106.000   358.500   749.250  1964.025
## STU[1]     2270.000    0.000  2270.000  2270.000  2270.000  2270.000  2270.000
## STU[2]      106.000    0.000   106.000   106.000   106.000   106.000   106.000
## STU[3]      119.598   72.314    22.000    67.000   106.000   155.000   304.000
## Z         61969.823    2.642 61963.000 61969.000 61971.000 61972.000 61972.000
## p             0.414    0.025     0.349     0.402     0.420     0.432     0.444
## q             0.107    0.014     0.091     0.097     0.104     0.113     0.142
## r[1]          0.467    0.019     0.419     0.457     0.471     0.480     0.491
## r[2]          0.038    0.002     0.034     0.037     0.039     0.040     0.041
## r[3]          0.013    0.001     0.012     0.013     0.013     0.014     0.015
## r[4]          0.440    0.019     0.395     0.430     0.443     0.452     0.471
## r[5]          0.042    0.036     0.002     0.015     0.032     0.059     0.137
## s[1]          0.065    0.008     0.049     0.061     0.066     0.071     0.079
## s[2]          0.839    0.077     0.640     0.800     0.861     0.898     0.926
## s[3]          0.095    0.083     0.003     0.032     0.072     0.138     0.310
## t[1]          0.731    0.024     0.670     0.719     0.737     0.749     0.759
## t[2]          0.013    0.001     0.011     0.012     0.013     0.013     0.015
## t[3]          0.065    0.003     0.059     0.063     0.065     0.067     0.070
## t[4]          0.003    0.000     0.002     0.003     0.003     0.003     0.004
## t[5]          0.157    0.007     0.141     0.152     0.157     0.161     0.170
## t[6]          0.032    0.031     0.001     0.007     0.023     0.046     0.112
## u[1]          0.900    0.025     0.837     0.886     0.904     0.918     0.936
## u[2]          0.053    0.005     0.044     0.050     0.053     0.056     0.063
## u[3]          0.047    0.026     0.010     0.028     0.043     0.061     0.114
## deviance    103.094    6.904    91.882    98.074   102.349   107.426   118.181
##            Rhat n.eff
## AB[1]     1.056   140
## AB[2]     1.057   140
## C         1.053   150
## D         1.034   250
## EFGHI[1]  1.000     1
## EFGHI[2]  1.000     1
## EFGHI[3]  1.000     1
## EFGHI[4]  1.130    65
## EFGHI[5]  1.077   100
## JKL[1]    1.000     1
## JKL[2]    1.000     1
## JKL[3]    1.039   250
## MNOPQR[1] 1.000     1
## MNOPQR[2] 1.000     1
## MNOPQR[3] 1.000     1
## MNOPQR[4] 1.000     1
## MNOPQR[5] 1.005  1500
## MNOPQR[6] 1.132    66
## STU[1]    1.000     1
## STU[2]    1.000     1
## STU[3]    1.004  2000
## Z         1.000     1
## p         1.056   140
## q         1.030   240
## r[1]      1.055   140
## r[2]      1.040   180
## r[3]      1.023   310
## r[4]      1.094    83
## r[5]      1.079   100
## s[1]      1.022   360
## s[2]      1.034   250
## s[3]      1.026   290
## t[1]      1.129    66
## t[2]      1.021   320
## t[3]      1.060   120
## t[4]      1.008   920
## t[5]      1.060   130
## t[6]      1.165    53
## u[1]      1.005  1600
## u[2]      1.001  7100
## u[3]      1.004  2100
## deviance  1.002  5700
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule, pD = var(deviance)/2)
## pD = 23.8 and DIC = 126.9
## DIC is an estimate of expected predictive error (lower deviance is better).
mod.fit.mcmc <- as.mcmc(mod.fit)
denplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]")) 

denplot(mod.fit.mcmc, parms = c("p", "q","r", "s", "t","u"))

traplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]"))

traplot(mod.fit.mcmc, parms = c("p", "q", "r"))

traplot(mod.fit.mcmc, parms = c("s", "t","u"))

traplot(mod.fit.mcmc, parms = c("EFGHI[5]", "MNOPQR[6]", "STU[3]"))

As we can see by the above output, this drastically changes the root estimate. The biggest contribution to the increase in \(Z\) was an increase at node \(A\); this is not surprising, as this non-HC branch of the tree has far less observed data and hence more wiggle-room to accomodate an increase in the estimate of \(Z\) (a similar effect to the data uncertainty when parents were observed). The traceplot of \(Z\) also suggests the values were often “hoping” to be larger but were truncated by the upper bound imposed on the distribution of \(Z\). How can we tell what is plausible here? Let’s try running this model for longer to see if it maybe just needs long to get closer to the results given by the first prior on \(Z\):

mod.fit <- jags(data = op.dat, inits = mod.initial, parameters.to.save = mod.params, 
                n.chains = numchains, n.iter = 750000, n.burnin = 500000, 
                model.file = "fullopioidJAGS_option2.mod")
## Compiling data graph
##    Resolving undeclared variables
##    Allocating nodes
##    Initializing
##    Reading data back into data table
## Compiling model graph
##    Resolving undeclared variables
##    Allocating nodes
## Graph information:
##    Observed stochastic nodes: 11
##    Unobserved stochastic nodes: 11
##    Total graph size: 146
## 
## Initializing model
print(mod.fit)
## Inference for Bugs model at "fullopioidJAGS_option2.mod", fit using jags,
##  12 chains, each with 750000 iterations (first 5e+05 discarded), n.thin = 250
##  n.sims = 12000 iterations saved
##             mu.vect  sd.vect      2.5%       25%       50%       75%     97.5%
## AB[1]     25386.090 1758.713 20974.950 24463.000 25838.500 26700.000 27496.000
## AB[2]     36583.661 1758.703 34473.000 35270.000 36132.000 37507.000 40994.050
## C         22642.088 1772.985 18285.875 21687.750 23071.000 23961.250 24859.000
## D          2744.002  327.783  2458.000  2530.000  2640.000  2844.000  3635.000
## EFGHI[1]  16922.000    0.000 16922.000 16922.000 16922.000 16922.000 16922.000
## EFGHI[2]   1390.000    0.000  1390.000  1390.000  1390.000  1390.000  1390.000
## EFGHI[3]    473.000    0.000   473.000   473.000   473.000   473.000   473.000
## EFGHI[4]  15989.976  537.787 15422.000 15612.000 15832.000 16193.250 17427.100
## EFGHI[5]   1808.685 1699.757    43.000   478.000  1351.000  2647.500  6155.000
## JKL[1]      173.000    0.000   173.000   173.000   173.000   173.000   173.000
## JKL[2]     2279.000    0.000  2279.000  2279.000  2279.000  2279.000  2279.000
## JKL[3]      292.002  327.783     6.000    78.000   188.000   392.000  1183.000
## MNOPQR[1] 11678.000    0.000 11678.000 11678.000 11678.000 11678.000 11678.000
## MNOPQR[2]   199.000    0.000   199.000   199.000   199.000   199.000   199.000
## MNOPQR[3]  1030.000    0.000  1030.000  1030.000  1030.000  1030.000  1030.000
## MNOPQR[4]    45.000    0.000    45.000    45.000    45.000    45.000    45.000
## MNOPQR[5]  2500.174   75.807  2399.000  2445.000  2485.000  2538.000  2691.000
## MNOPQR[6]   537.802  534.943    14.000   159.000   368.000   738.000  1973.000
## STU[1]     2270.000    0.000  2270.000  2270.000  2270.000  2270.000  2270.000
## STU[2]      106.000    0.000   106.000   106.000   106.000   106.000   106.000
## STU[3]      124.174   75.807    23.000    69.000   109.000   162.000   315.000
## Z         61969.751    2.706 61962.000 61969.000 61971.000 61972.000 61972.000
## p             0.410    0.028     0.339     0.395     0.417     0.431     0.444
## q             0.109    0.015     0.091     0.098     0.105     0.115     0.147
## r[1]          0.463    0.021     0.412     0.451     0.468     0.480     0.492
## r[2]          0.038    0.002     0.034     0.037     0.038     0.040     0.042
## r[3]          0.013    0.001     0.011     0.013     0.013     0.014     0.015
## r[4]          0.438    0.021     0.389     0.426     0.441     0.452     0.472
## r[5]          0.048    0.042     0.001     0.014     0.037     0.071     0.150
## s[1]          0.065    0.008     0.048     0.061     0.066     0.071     0.079
## s[2]          0.839    0.081     0.629     0.800     0.861     0.899     0.927
## s[3]          0.096    0.087     0.003     0.031     0.071     0.137     0.323
## t[1]          0.730    0.023     0.669     0.720     0.737     0.747     0.758
## t[2]          0.013    0.001     0.011     0.012     0.013     0.013     0.015
## t[3]          0.065    0.003     0.058     0.063     0.065     0.067     0.070
## t[4]          0.003    0.000     0.002     0.003     0.003     0.003     0.004
## t[5]          0.157    0.007     0.141     0.152     0.157     0.161     0.170
## t[6]          0.033    0.031     0.001     0.010     0.023     0.046     0.114
## u[1]          0.898    0.026     0.835     0.884     0.902     0.917     0.936
## u[2]          0.053    0.005     0.044     0.050     0.053     0.056     0.063
## u[3]          0.049    0.027     0.010     0.029     0.044     0.063     0.115
## deviance    103.103    6.958    91.808    98.131   102.277   107.281   118.881
##            Rhat n.eff
## AB[1]     1.149    58
## AB[2]     1.156    54
## C         1.133    64
## D         1.016   590
## EFGHI[1]  1.000     1
## EFGHI[2]  1.000     1
## EFGHI[3]  1.000     1
## EFGHI[4]  1.051   150
## EFGHI[5]  1.155    56
## JKL[1]    1.000     1
## JKL[2]    1.000     1
## JKL[3]    1.020   600
## MNOPQR[1] 1.000     1
## MNOPQR[2] 1.000     1
## MNOPQR[3] 1.000     1
## MNOPQR[4] 1.000     1
## MNOPQR[5] 1.002  3700
## MNOPQR[6] 1.051   160
## STU[1]    1.000     1
## STU[2]    1.000     1
## STU[3]    1.003  3200
## Z         1.000     1
## p         1.148    58
## q         1.021   350
## r[1]      1.154    55
## r[2]      1.110    72
## r[3]      1.070   100
## r[4]      1.127    66
## r[5]      1.188    48
## s[1]      1.011   770
## s[2]      1.016   580
## s[3]      1.012   600
## t[1]      1.050   160
## t[2]      1.010   710
## t[3]      1.027   260
## t[4]      1.004  1700
## t[5]      1.023   310
## t[6]      1.084    97
## u[1]      1.002  3600
## u[2]      1.001 11000
## u[3]      1.002  3300
## deviance  1.001 11000
## 
## For each parameter, n.eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
## 
## DIC info (using the rule, pD = var(deviance)/2)
## pD = 24.2 and DIC = 127.3
## DIC is an estimate of expected predictive error (lower deviance is better).
mod.fit.mcmc <- as.mcmc(mod.fit)
denplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]")) 

denplot(mod.fit.mcmc, parms = c("p", "q","r", "s", "t","u"))

traplot(mod.fit.mcmc, parms = c("Z", "AB[1]", "D", "JKL[3]"))

traplot(mod.fit.mcmc, parms = c("p", "q", "r"))

traplot(mod.fit.mcmc, parms = c("s", "t","u"))

traplot(mod.fit.mcmc, parms = c("EFGHI[5]", "MNOPQR[6]", "STU[3]"))

Even after three quarters of a million iterations, this model is still quite certain \(Z\) is higher and closer to the prior, and that \(A\) should be adjusted accordingly.