The INLA Approach to Bayesian models

The Integrated Nested Laplace Approximation, or INLA, approach is a recently developed, computationally simpler method for fitting Bayesian models [(Rue et al., 2009, compared to traditional Markov Chain Monte Carlo (MCMC) approaches. INLA fits models that are classified as latent Gaussian models, which are applicable in many settings (Martino & Rue, 2010. In general, INLA fits a general form of additive models such as:

\(\eta = \alpha + \sum_{j=1}^{nf} f^{(j)}(u_{ij}) + \sum_{k=1}^{n\beta}\beta_k z_{ki} + \epsilon_i\)

where \(\eta\) is the linear predictor for a generalized linear model formula, and is composed of a linear function of some variables u, \(\beta\) are the effects of covariates, z, and \(\epsilon\) is an unstructured residual (Rue et al., 2009). As this model is often parameterized as a Bayesian one, we are interested in the posterior marginal distributions of all the model parameters. Rue and Martino (2007) show that the posterior marginal for the random effects (x) in such models can be approximated as:

\(\tilde{p}(x_i|y) = \sum_k \tilde{p}(x_i|\theta_k, y) \tilde{p}(\theta_k|y) \Delta_k\)

via numerical integration (Rue & Martino, 2007; Schrodle & Held, 2011a, 2011b). The posterior distribution of the hyperparameters (\(\theta\)) of the model can also be approximated as:

\(\tilde{p}(\theta | y)) \propto \frac{p(x, \theta, y)}{\tilde{p}G(x| \theta,y)} \mid _{x} = x^*(\theta)\)

, where G is a Gaussian approximation of the posterior and \(x^*(\theta)\) is the mode of the conditional distribution of \(p(x|\theta,y)\). Thus, instead of using MCMC to find an iterative, sampling-based estimate of the posterior, it is arrived at numerically. This method of fitting the spatial models specified above has been presented by numerous authors (Blangiardo & Cameletti, 2015; Blangiardo et al., 2013; Lindgren & Rue, 2015; Martins et al., 2013; Schrodle & Held, 2011a, 2011b), with comparable results to MCMC.

Below, I show examples of using INLA to fit Bayesian regression models for data from US counties. One example will be a relatively small data set from Texas, while the other example will be all US counties. The US county example basically emulates the paper by Sparks et al 2012.

library(maptools)
## Loading required package: sp
## Checking rgeos availability: TRUE
library(spdep)
## Loading required package: Matrix
library(RColorBrewer)
library(lattice)
library(INLA)
## Loading required package: splines
#read in big US shapefile with lots of stuff in it
usdat<-readShapePoly("~/Google Drive//dem7263/data/output12711.shp")
## Field name: 'SP_ID_1' changed to: 'SP_ID_1.1'
#get outcome, which are the numbers of infant deaths
usdat$infd03<-usdat$f1194803
#get denominator, which are the number of births  
usdat$births03<-usdat$f1254603
usdat$e<-usdat$births03*(sum(usdat$infd03)/sum(usdat$births03))


#form spatial neighbors
#In INLA, we don't need FIPS codes, we need a simple numeric index for our counties
usdat$struct<-1:nrow(usdat@data)


#!!Be sure to set the row names of the neighbor list to the struct variable we just made!!
us.nb<-poly2nb(usdat, row.names=usdat$struct)

#Save the neighbor list to an external file, INLA needs the list this way!!
nb2INLA(us.nb, file="~/Google Drive//dem7263/usa.gra")

We can fit the OLS model using the bayesian framework as:

#US Data
fit.us<-inla(I(infd03/births03)~metro+giniz+hispz+blackz+povz+ne_bad+isowbz, family = "gaussian", data=usdat@data, control.compute = list(dic=T))
summary(fit.us)
## 
## Call:
## c("inla(formula = I(infd03/births03) ~ metro + giniz + hispz + blackz + ",  "    povz + ne_bad + isowbz, family = \"gaussian\", data = usdat@data, ",  "    control.compute = list(dic = T))")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.7684          1.8592          0.6747          3.3023 
## 
## Fixed effects:
##                mean    sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0066 1e-04     0.0063   0.0066     0.0068  0.0066   0
## metro        0.0007 2e-04     0.0003   0.0007     0.0011  0.0007   0
## giniz       -0.0001 1e-04    -0.0003  -0.0001     0.0001 -0.0001   0
## hispz       -0.0004 1e-04    -0.0006  -0.0004    -0.0002 -0.0004   0
## blackz       0.0016 1e-04     0.0014   0.0016     0.0018  0.0016   0
## povz         0.0010 1e-04     0.0007   0.0010     0.0013  0.0010   0
## ne_bad       0.0000 3e-04    -0.0005   0.0000     0.0006  0.0000   0
## isowbz       0.0006 1e-04     0.0004   0.0006     0.0008  0.0006   0
## 
## The model has no random effects
## 
## Model hyperparameters:
##                                              mean       sd 0.025quant
## Precision for the Gaussian observations 193255.36 19374.34  158795.78
##                                          0.5quant 0.975quant      mode
## Precision for the Gaussian observations 191945.80  234876.96 189053.14
## 
## Expected number of effective parameters(std dev): 2332.88(44.31)
## Number of equivalent replicates : 1.313 
## 
## Deviance Information Criterion (DIC) ...: -26244.50
## Effective number of parameters .........: 2326.66
## 
## Marginal log-Likelihood:  12017.41 
## Posterior marginals for linear predictor and fitted values computed
inla.hpdmarginal(p = .95, fit.us$marginals.fixed$giniz)
##                      low         high
## level:0.95 -0.0003258306 0.0001275077
inla.hpdmarginal(p = .95, fit.us$marginals.fixed$ne_bad)
##                      low         high
## level:0.95 -0.0004676027 0.0005546334
inla.hpdmarginal(p = .95, fit.us$marginals.fixed$isowbz)
##                     low         high
## level:0.95 0.0003972515 0.0008076013

OR, as a poisson model:

#US data
fit.p_us<-inla(infd03~metro+giniz+hispz+blackz+povz+ne_bad+isowbz, family = "poisson",E=e+.0001, data=usdat@data, control.compute = list(dic=T))
summary(fit.p_us)
## 
## Call:
## c("inla(formula = infd03 ~ metro + giniz + hispz + blackz + povz + ",  "    ne_bad + isowbz, family = \"poisson\", data = usdat@data, E = e + ",  "    1e-04, control.compute = list(dic = T))")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.7007          0.9405          0.3361          1.9774 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0348 0.0151     0.0050   0.0348     0.0643  0.0349   0
## metro        0.0649 0.0208     0.0241   0.0649     0.1058  0.0649   0
## giniz       -0.0016 0.0055    -0.0123  -0.0017     0.0092 -0.0018   0
## hispz       -0.0747 0.0075    -0.0893  -0.0747    -0.0600 -0.0747   0
## blackz       0.1389 0.0114     0.1165   0.1389     0.1614  0.1389   0
## povz         0.1448 0.0108     0.1237   0.1448     0.1659  0.1449   0
## ne_bad      -0.2301 0.0189    -0.2672  -0.2301    -0.1931 -0.2300   0
## isowbz      -0.0461 0.0126    -0.0708  -0.0461    -0.0213 -0.0461   0
## 
## The model has no random effects
## 
## The model has no hyperparameters
## 
## Expected number of effective parameters(std dev): 8.445(0.00)
## Number of equivalent replicates : 362.83 
## 
## Deviance Information Criterion (DIC) ...: 10303.27
## Effective number of parameters .........: 8.444
## 
## Marginal log-Likelihood:  -5205.15 
## Posterior marginals for linear predictor and fitted values computed
#or negative binonmial on the us data
fit.nb_us<-inla(infd03~metro+giniz+hispz+blackz+povz+ne_bad+isowbz, family = "nbinomial",E=e+.0001, data=usdat@data, control.compute = list(dic=T))
summary(fit.nb_us)
## 
## Call:
## c("inla(formula = infd03 ~ metro + giniz + hispz + blackz + povz + ",  "    ne_bad + isowbz, family = \"nbinomial\", data = usdat@data, ",  "    E = e + 1e-04, control.compute = list(dic = T))")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.6801          2.1633          0.8464          3.6898 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0333 0.0159     0.0020   0.0334     0.0644  0.0334   0
## metro        0.0601 0.0232     0.0146   0.0600     0.1055  0.0600   0
## giniz       -0.0046 0.0100    -0.0242  -0.0045     0.0149 -0.0045   0
## hispz       -0.0757 0.0101    -0.0955  -0.0757    -0.0559 -0.0757   0
## blackz       0.1178 0.0143     0.0897   0.1178     0.1460  0.1177   0
## povz         0.1574 0.0138     0.1303   0.1574     0.1844  0.1575   0
## ne_bad      -0.1969 0.0246    -0.2453  -0.1969    -0.1485 -0.1969   0
## isowbz      -0.0585 0.0164    -0.0907  -0.0585    -0.0262 -0.0586   0
## 
## The model has no random effects
## 
## Model hyperparameters:
##                                                       mean    sd
## size for the nbinomial observations (overdispersion) 52.45 4.545
##                                                      0.025quant 0.5quant
## size for the nbinomial observations (overdispersion)      44.10    52.25
##                                                      0.975quant  mode
## size for the nbinomial observations (overdispersion)      61.95 51.85
## 
## Expected number of effective parameters(std dev): 8.252(0.0051)
## Number of equivalent replicates : 371.32 
## 
## Deviance Information Criterion (DIC) ...: 10268.82
## Effective number of parameters .........: 8.635
## 
## Marginal log-Likelihood:  -5234.51 
## Posterior marginals for linear predictor and fitted values computed

Or a simple, unstructured random county intercept model:

#US model
fit.pre_u<-inla(infd03~metro+giniz+hispz+blackz+povz+ne_bad+isowbz + f(struct, model = "iid"), family = "poisson",E=e+.0001, data=usdat@data, control.compute = list(dic=T))
summary(fit.pre_u)
## 
## Call:
## c("inla(formula = infd03 ~ metro + giniz + hispz + blackz + povz + ",  "    ne_bad + isowbz + f(struct, model = \"iid\"), family = \"poisson\", ",  "    data = usdat@data, E = e + 1e-04, control.compute = list(dic = T))" )
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          0.8219          5.5256          1.0193          7.3668 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0303 0.0155    -0.0002   0.0304     0.0606  0.0304   0
## metro        0.0623 0.0219     0.0193   0.0623     0.1054  0.0623   0
## giniz       -0.0034 0.0078    -0.0187  -0.0034     0.0119 -0.0035   0
## hispz       -0.0753 0.0089    -0.0929  -0.0753    -0.0578 -0.0753   0
## blackz       0.1240 0.0130     0.0984   0.1240     0.1495  0.1240   0
## povz         0.1547 0.0123     0.1305   0.1547     0.1789  0.1547   0
## ne_bad      -0.2093 0.0220    -0.2523  -0.2093    -0.1662 -0.2093   0
## isowbz      -0.0554 0.0147    -0.0843  -0.0554    -0.0265 -0.0555   0
## 
## Random effects:
## Name   Model
##  struct   IID model 
## 
## Model hyperparameters:
##                        mean    sd 0.025quant 0.5quant 0.975quant   mode
## Precision for struct 141.38 26.09      98.76   138.36     200.59 132.13
## 
## Expected number of effective parameters(std dev): 149.09(16.08)
## Number of equivalent replicates : 20.55 
## 
## Deviance Information Criterion (DIC) ...: 10201.46
## Effective number of parameters .........: 149.41
## 
## Marginal log-Likelihood:  -5170.65 
## Posterior marginals for linear predictor and fitted values computed

A more complicated, Besag ICAR model:

#US Models
fit.psp1_u<-inla(infd03~metro+giniz+hispz+blackz+povz+ne_bad+isowbz + f(struct, model = "besag", graph = "~/Google Drive//dem7263/usa.gra"), family = "poisson",E=e+.0001, data=usdat@data, control.compute = list(dic=T))
summary(fit.psp1_u)
## 
## Call:
## c("inla(formula = infd03 ~ metro + giniz + hispz + blackz + povz + ",  "    ne_bad + isowbz + f(struct, model = \"besag\", graph = \"~/Google Drive//dem7263/usa.gra\"), ",  "    family = \"poisson\", data = usdat@data, E = e + 1e-04, control.compute = list(dic = T))" )
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          2.0745          9.6045          0.9670         12.6461 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0279 0.0156    -0.0028   0.0279     0.0583  0.0280   0
## metro        0.0581 0.0221     0.0147   0.0581     0.1015  0.0581   0
## giniz        0.0008 0.0080    -0.0148   0.0008     0.0164  0.0007   0
## hispz       -0.0565 0.0117    -0.0793  -0.0565    -0.0334 -0.0566   0
## blackz       0.1073 0.0148     0.0782   0.1073     0.1363  0.1073   0
## povz         0.1475 0.0131     0.1217   0.1476     0.1733  0.1476   0
## ne_bad      -0.1634 0.0243    -0.2110  -0.1634    -0.1157 -0.1634   0
## isowbz      -0.0658 0.0155    -0.0962  -0.0659    -0.0354 -0.0659   0
## 
## Random effects:
## Name   Model
##  struct   Besags ICAR model 
## 
## Model hyperparameters:
##                       mean    sd 0.025quant 0.5quant 0.975quant  mode
## Precision for struct 70.71 14.69      46.84    68.98     104.29 65.51
## 
## Expected number of effective parameters(std dev): 110.90(11.68)
## Number of equivalent replicates : 27.63 
## 
## Deviance Information Criterion (DIC) ...: 10099.51
## Effective number of parameters .........: 111.24
## 
## Marginal log-Likelihood:  -7506.17 
## Posterior marginals for linear predictor and fitted values computed
1/fit.psp1_u$summary.hyperpar$mean
## [1] 0.01414291
#Change the default prior on the spatial variance:

fit.psp2_u<-inla(infd03~metro+giniz+hispz+blackz+povz+ne_bad+isowbz + f(struct, model = "besag", graph = "~/Google Drive//dem7263/usa.gra", hyper = list(theta=list(prior="loggamma", param=c(.5, .005)))), family = "poisson",E=e+.0001, data=usdat@data, control.compute = list(dic=T))
summary(fit.psp2_u)
## 
## Call:
## c("inla(formula = infd03 ~ metro + giniz + hispz + blackz + povz + ",  "    ne_bad + isowbz + f(struct, model = \"besag\", graph = \"~/Google Drive//dem7263/usa.gra\", ",  "    hyper = list(theta = list(prior = \"loggamma\", param = c(0.5, ",  "        0.005)))), family = \"poisson\", data = usdat@data, E = e + ",  "    1e-04, control.compute = list(dic = T))")
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          1.8419          9.3860          0.4710         11.6989 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0278 0.0156    -0.0030   0.0278     0.0582  0.0279   0
## metro        0.0580 0.0221     0.0145   0.0580     0.1014  0.0580   0
## giniz        0.0008 0.0080    -0.0149   0.0008     0.0166  0.0008   0
## hispz       -0.0562 0.0118    -0.0792  -0.0562    -0.0331 -0.0563   0
## blackz       0.1072 0.0149     0.0780   0.1072     0.1363  0.1072   0
## povz         0.1475 0.0132     0.1216   0.1475     0.1734  0.1475   0
## ne_bad      -0.1630 0.0244    -0.2108  -0.1630    -0.1152 -0.1630   0
## isowbz      -0.0659 0.0155    -0.0963  -0.0659    -0.0354 -0.0660   0
## 
## Random effects:
## Name   Model
##  struct   Besags ICAR model 
## 
## Model hyperparameters:
##                       mean    sd 0.025quant 0.5quant 0.975quant  mode
## Precision for struct 68.21 13.86      45.57    66.61      99.79 63.41
## 
## Expected number of effective parameters(std dev): 113.35(11.69)
## Number of equivalent replicates : 27.03 
## 
## Deviance Information Criterion (DIC) ...: 10100.05
## Effective number of parameters .........: 113.67
## 
## Marginal log-Likelihood:  -7501.95 
## Posterior marginals for linear predictor and fitted values computed
1/fit.psp2_u$summary.hyperpar$mean
## [1] 0.01466119

You notice less variation in the US hyperparameter than in the Texas hyperparameter, that’s becuase we have more data in the US example.

Compare the marginal distributions for the precision parameter from each model. The precision = 1/\(\sigma^2\), so we need to transform it to a variance scale so we can interpret it as we normally would!

#US example
1/fit.psp1_u$summary.hyperpar$mean
## [1] 0.01414291
um<- inla.tmarginal(
        function(x) 1/x,
        fit.psp1_u$marginals.hyperpar$'Precision for struct')

plot(um, type="l", main="Posterior distibution for model hyperparameter - US data")

1/fit.psp2_u$summary.hyperpar$mean
## [1] 0.01466119
um2<- inla.tmarginal(
        function(x) 1/x,
        fit.psp2_u$marginals.hyperpar$'Precision for struct')
lines(um2,lty=3, lwd=3    )

The two different priors show very similar results in terms of the posterior distribution of the county level variance.

Let’s have a look at the raw vs fitted SMR for infant mortality:

##US example
usdat$smr<-usdat$infd03/usdat$e
usdat$fittedsmr<-fit.psp2_u$summary.fitted.values$mean
plot(usdat$smr, usdat$fittedsmr)

brks<-quantile(c(usdat$smr, usdat$fittedsmr), p=seq(0, 1,length.out = 9), na.rm=T)

brks
##        0%     12.5%       25%     37.5%       50%     62.5%       75% 
## 0.0000000 0.6108312 0.8172425 0.9131458 1.0001000 1.1017729 1.2424355 
##     87.5%      100% 
## 1.5388163 8.6480283
spplot(usdat, c("smr", "fittedsmr"), col.regions=brewer.pal(n=9, "RdBu"), at=brks , main="Raw and Fitted SMR")

spplot(usdat, "fittedsmr", col.regions=brewer.pal(n=9, "RdBu"), at=brks, main="US County Fitted SMR")

usdat$spre<-fit.psp2_u$summary.random$struct$mean
spplot(usdat, "spre", col.regions=brewer.pal(n=9, "RdBu"), at=quantile(usdat$spre, p=seq(0, 1,length.out = 9)), main="US County Fitted Spatial Random Effect")

The Besag, York and Mollie model. This model has two random effects, one which is spatially correlated, and the other which is iid normal. In INLA, and when using the BYM model, the first 1:n rows of the random effect are the combined spatial and “nonspatial” effects.

#US data
fit.us_bym<-inla(infd03~metro+giniz+hispz+blackz+povz+ne_bad+isowbz + f(struct, model = "bym", graph = "~/Google Drive//dem7263/usa.gra"), family = "poisson",E=e+.0001, data=usdat@data, control.compute = list(dic=T))
summary(fit.us_bym)
## 
## Call:
## c("inla(formula = infd03 ~ metro + giniz + hispz + blackz + povz + ",  "    ne_bad + isowbz + f(struct, model = \"bym\", graph = \"~/Google Drive//dem7263/usa.gra\"), ",  "    family = \"poisson\", data = usdat@data, E = e + 1e-04, control.compute = list(dic = T))" )
## 
## Time used:
##  Pre-processing    Running inla Post-processing           Total 
##          2.6308         51.6637          0.6827         54.9771 
## 
## Fixed effects:
##                mean     sd 0.025quant 0.5quant 0.975quant    mode kld
## (Intercept)  0.0278 0.0156    -0.0029   0.0279     0.0583  0.0280   0
## metro        0.0579 0.0221     0.0145   0.0579     0.1014  0.0579   0
## giniz        0.0009 0.0080    -0.0149   0.0008     0.0166  0.0008   0
## hispz       -0.0569 0.0117    -0.0798  -0.0570    -0.0338 -0.0571   0
## blackz       0.1071 0.0148     0.0779   0.1071     0.1362  0.1071   0
## povz         0.1476 0.0132     0.1217   0.1476     0.1734  0.1477   0
## ne_bad      -0.1636 0.0244    -0.2115  -0.1636    -0.1156 -0.1636   0
## isowbz      -0.0659 0.0155    -0.0964  -0.0659    -0.0354 -0.0660   0
## 
## Random effects:
## Name   Model
##  struct   BYM model 
## 
## Model hyperparameters:
##                                             mean      sd 0.025quant
## Precision for struct (iid component)     3290.53 2277.74     694.23
## Precision for struct (spatial component)   73.54   15.71      47.99
##                                          0.5quant 0.975quant    mode
## Precision for struct (iid component)      2733.32    9210.81 1758.50
## Precision for struct (spatial component)    71.70     109.41   68.07
## 
## Expected number of effective parameters(std dev): 115.82(14.48)
## Number of equivalent replicates : 26.46 
## 
## Deviance Information Criterion (DIC) ...: 10102.84
## Effective number of parameters .........: 116.38
## 
## Marginal log-Likelihood:  -4689.67 
## Posterior marginals for linear predictor and fitted values computed
1/fit.us_bym$summary.hyperpar$mean
## [1] 0.0003039025 0.0135981835
usdat$bym_est<-fit.us_bym$summary.fitted.values$mean
spplot(usdat, "bym_est", col.regions=brewer.pal(n=9, "RdBu"), at=quantile(usdat$bym_est, p=seq(0, 1,length.out = 9)), main="US County Fitted SMR from BYM model")

Exceedence probabilities

Here we calculate our exceedence probabilities, in this case, I calculate the probability that a county has a SMR greater than 1.25.

a2<-1.25
usdat$inlaprob<-unlist(lapply(fit.us_bym$marginals.fitted.values, function(X){
  1-inla.pmarginal(a2, X)
}))
hist(usdat$inlaprob)

usdat$exceed.cut<-cut(usdat$inlaprob,breaks =  c(0,0.2,0.5,0.8,.95,1), include.lowest = T)

spplot(usdat,"exceed.cut" , col.regions=brewer.pal(n=6, name="Blues"), key.space="bottom", main= expression(paste("Exceedence Probability Infant SMR ","Pr( ",theta," >1.25"," )") ))

##           used (Mb) gc trigger  (Mb) max used  (Mb)
## Ncells 1775382 94.9    4703850 251.3  4703850 251.3
## Vcells 4876669 37.3   36707569 280.1 45884451 350.1

INLA for Non - Aggregate data

Above, we saw how to use INLA to fit a Bayesian regression model to areal data (US Counties). This example will focus on how to use INLA to fit a Bayesian multi-level model, where our outcome is observed at the individual level, and we may or may not have information avaialble at a higher level of aggregation.

Age-period-cohort models

In this example, we use data from the Integrated Health Interview Series to fit an Age-Period-Cohort model. Discussion of these models can be found in the recent text on the subject. In addition to the APC model, hierarchical models could consider county or city of residence, or some other geography.

Here, we load the IHIS data and do some recodes.

library(readstata13)
library(car)

dat<- read.dta13("~/Google Drive/dem7903_App_Hier/data/ihis_00008.dta", convert.factors = F)
names(dat)
##  [1] "year"         "serial"       "strata"       "psu"         
##  [5] "nhishid"      "hhweight"     "nhispid"      "hhx"         
##  [9] "fmx"          "px"           "perweight"    "sampweight"  
## [13] "fweight"      "astatflg"     "cstatflg"     "pernum"      
## [17] "age"          "sex"          "marstat"      "marst"       
## [21] "birthyr"      "racea"        "hispeth"      "yrsinus"     
## [25] "hispyn"       "usborn"       "citizen"      "racenew"     
## [29] "racebr"       "educrec2"     "educrec1"     "educ"        
## [33] "health"       "bmicalc"      "alcstat1"     "smokestatus2"
## [37] "mod10fwk"     "vig10fwk"     "nhisiid"
names(dat)<-tolower(names(dat))
dat<-dat[dat$age>=18,]
dat$goodhealth<-recode(dat$health, recodes="1:3=0; 4:5=1; else=NA")
dat$bmi_clean<-ifelse(dat$bmicalc%in%c(0,996),NA, dat$bmicalc)
dat$obese<-ifelse(dat$bmi_clean>=30,1,0)
dat$hisp<-ifelse(dat$hispeth==10, 0, ifelse(dat$hispeth %in%c(90:93), NA, 1))
dat$race<-recode(dat$racea, recodes="100=1; 200=2; 300:340=3; 400:434=4; 500:600=5; else=NA")
dat$race_eth<-factor(ifelse(dat$hisp==1, "Hispanic",
                     ifelse(dat$hisp==0&dat$race==1, "NHWhite", 
                            ifelse(dat$hisp==0&dat$race==2, "NHBlack",
                                   ifelse(dat$hisp==0&dat$race==3, "NHNAHN",
                                          ifelse(dat$hisp==0&dat$race==4, "NHAsian", 
                                                 ifelse(dat$hisp==0&dat$race==5, "NHAsian", NA)))))))
dat$educ<-recode(dat$educrec2, recodes="10:41='lths'; 42='hs';50:53='somecoll'; 54:60='colledu'; else=NA", as.factor.result=T, levels = c("lths", "hs", "somecoll", "colledu"))
dat$male<-recode(dat$sex, recodes="1=1;2=0")
dat$agec<-cut(dat$age, breaks =seq(15, 100, 5))
dat$currsmoke<-recode(dat$smokestatus2, recodes="0=NA;90=NA; 10:13=1; else=0", as.factor.result = F)
dat$currdrink<-recode(dat$alcstat1, recodes="0='never'; 1:2='former'; 3='current'; else=NA", as.factor.result = T)
dat$birth_year<-ifelse(dat$birthyr>=9997, NA, dat$birthyr)
dat$age2<-dat$age^2
dat<-subset(dat, complete.cases(goodhealth,bmi_clean, educ, currsmoke, currdrink, birth_year))
dat$bmiz<- scale(dat$bmi_clean)
dat$cohort<- cut(dat$birth_year, breaks=seq(1915, 2000, 5))
set.seed(1115)
samps<-sample(1:nrow(dat), size = 75000, replace = F) #take a sample of subjects, because the NHIS is huge.
dat<- dat[samps,]

Below we fit the model using INLA. The APC model basically has three random effects, one for Age, one for period (year of survey) and a third random effect for cohort (birth year). If we have a continuous outcome, our model structure would be :

\(y_{ijkl} = \mu_{jkl} +\sum_c \beta_c x_{ik} + e_{ij}\) with \(\mu_{jkl} = \mu + u_j + v_k + \tau_l\)

\(u_j \sim N(0, \sigma^2_u)\)

\(v_k \sim N(0, \sigma^2_v)\)

\(\tau_l \sim N(0, \sigma^2_\tau)\)

library(lme4)
#user lmer to find good starting values for INLA
fit_in1.mer<- lmer(bmiz~ race_eth+educ+male+currsmoke+currdrink+(1|agec)+(1|year)+(1|cohort), data=dat)


#INLA parameterizes variances on the log-precision scale, so if we take log(1/variance), we can get that number and feed it to INLA
vc<-as.data.frame(VarCorr(fit_in1.mer))
vc$init<-log(1/vc$vcov); vc
##        grp        var1 var2       vcov      sdcor       init
## 1   cohort (Intercept) <NA> 0.00170954 0.04134659 6.37153073
## 2     year (Intercept) <NA> 0.00578871 0.07608357 5.15184586
## 3     agec (Intercept) <NA> 0.04411133 0.21002697 3.12103867
## 4 Residual        <NA> <NA> 0.92036819 0.95935822 0.08298148
#Feed the init values to the hyperparameter inits in each f()

fit_in1<- inla(bmiz~ race_eth+educ+male+currsmoke+currdrink+f(agec, model="iid",hyper = list(prec=list(initial=vc$init[3])))+f(year, model="iid",hyper = list(prec=list(initial=vc$init[2])) )+f(cohort, model="iid",hyper = list(prec=list(initial=vc$init[1]))), family = "gaussian", data=dat, num.threads = 2, verbose = F,  control.inla=list(strategy='gaussian'), control.family = list(hyper = list(prec=list(initial=vc$init[4]))))

#Posterior summaries for our fixed effects
round(fit_in1$summary.fixed, 3)
##                   mean     sd 0.025quant 0.5quant 0.975quant   mode kld
## (Intercept)      0.064  0.061     -0.056    0.064      0.185  0.064   0
## race_ethNHAsian -0.491  0.019     -0.528   -0.491     -0.453 -0.491   0
## race_ethNHBlack  0.176  0.013      0.151    0.176      0.202  0.176   0
## race_ethNHNAHN   0.200  0.044      0.114    0.200      0.286  0.200   0
## race_ethNHWhite -0.092  0.010     -0.113   -0.092     -0.072 -0.092   0
## educhs          -0.036  0.011     -0.058   -0.036     -0.013 -0.036   0
## educsomecoll    -0.051  0.012     -0.074   -0.051     -0.028 -0.051   0
## educcolledu     -0.277  0.013     -0.302   -0.277     -0.253 -0.277   0
## male             0.125  0.007      0.111    0.125      0.139  0.125   0
## currsmoke       -0.166  0.009     -0.184   -0.166     -0.148 -0.166   0
## currdrinkformer  0.066  0.008      0.050    0.066      0.081  0.066   0
## currdrinknever   0.000 31.623    -62.086   -0.001     62.034  0.000   0
plot( fit_in1$summary.random$agec$mean, type="l",xaxt="n", ylab="Age Random Effect (IID)", main="Age random effect", ylim=c(-1,1))
axis(1, at=1:17, labels=levels(dat$agec))
lines(fit_in1$summary.random$agec$`0.025quant`, col=1, lty=5)
lines(fit_in1$summary.random$agec$`0.975quant`, col=1, lty=5)
abline(h=0, lwd=2)
legend("topright", legend = c("Posterior Mean", "95% Credible Interval"), lty=c(1,5))

plot(fit_in1$summary.random$year$mean, type="l",xaxt="n", ylab="Period Random Effect (IID)", main="Period random effect", ylim=c(-.3, .3))
yrs<-unique(dat$year); yrs<-yrs[order(yrs)]
axis(1, at=1:length(yrs), labels=yrs)
lines(fit_in1$summary.random$year$`0.025quant`, col=1, lty=5)
lines(fit_in1$summary.random$year$`0.975quant`, col=1, lty=5)
abline(h=0, lwd=2)
legend("topright", legend = c("Posterior Mean", "95% Credible Interval"), lty=c(1,5))

plot(fit_in1$summary.random$cohort$mean, type="l", xaxt="n", ylab="Cohort Random Effect (IID)", main="Cohort random effect", ylim=c(-.2, .2))
coh<-unique(dat$cohort); coh<-coh[order(coh)]
axis(1, at=1:length(coh), labels=coh)
lines(fit_in1$summary.random$cohort$`0.025quant`, col=1, lty=5)
lines(fit_in1$summary.random$cohort$`0.975quant`, col=1, lty=5)
abline(h=0, lwd=2)
legend("topright", legend = c("Posterior Mean", "95% Credible Interval"), lty=c(1,5))

Plot the marginals for the variance terms

m1<- inla.tmarginal(
        function(x) 1/x,
        fit_in1$marginals.hyperpar$`Precision for agec`)
m2<- inla.tmarginal(
        function(x) 1/x,
        fit_in1$marginals.hyperpar$`Precision for year`)

m3<- inla.tmarginal(
        function(x) 1/x,
        fit_in1$marginals.hyperpar$`Precision for cohort`)


plot(m1, type="l", main="Posterior distibution for age variance")

plot(m2, type="l", main="Posterior distibution for year variance")

plot(m3, type="l", main="Posterior distibution for cohort variance")