Chapter 9 - Markov Chain Monte Carlo

This week has been an informal introduction to Markov chain Monte Carlo (MCMC) estimation. The goal has been to introduce the purpose and approach MCMC algorithms. The major algorithms introduced were the Metropolis, Gibbs sampling, and Hamiltonian Monte Carlo algorithms. Each has its advantages and disadvantages. The ulam function in the rethinking package was introduced. It uses the Stan (mc-stan.org) Hamiltonian Monte Carlo engine to fit models as they are defined in this book. General advice about diagnosing poor MCMC fits was introduced by the use of a couple of pathological examples.

Place each answer inside the code chunk (grey box). The code chunks should contain a text response or a code that completes/answers the question or activity requested. Make sure to include plots if the question requests them.

Finally, upon completion, name your final output .html file as: YourName_ANLY505-Year-Semester.html and publish the assignment to your R Pubs account and submit the link to Canvas. Each question is worth 5 points.

Questions

8-1. Re-estimate the terrain ruggedness model from the chapter, but now using a uniform prior for the standard deviation, sigma. The uniform prior should be dunif(0,1). Visualize the priors. Use ulam to estimate the posterior distribution of sigma. Visualize the posterior distribution of sigma for both models. Do not use a ‘pairs’ plot. Does the different prior have any detectable influence on the posterior distribution of sigma? Why or why not?

data(rugged)
data= rugged
data$log_gdp = log(data$rgdppc_2000)
dd = data[ complete.cases(data$rgdppc_2000) , ]

dd$log_gdp_std = dd$log_gdp/ mean(dd$log_gdp)
dd$rugged_std = dd$rugged/max(dd$rugged)

dd$cid=ifelse(dd$cont_africa==1,1,2)

dat_slim = list(
  log_gdp_std = dd$log_gdp_std,
  rugged_std = dd$rugged_std,
  cid = as.integer( dd$cid )
)

model1 = ulam(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 ) ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , chains=4, cores = 4)
precis(model1, depth=2)
##             mean         sd         5.5%       94.5%    n_eff     Rhat4
## a[1]   0.8865485 0.01619488  0.861080923  0.91230902 2169.069 1.0031947
## a[2]   1.0506702 0.01008351  1.035216048  1.06699294 3200.183 0.9993874
## b[1]   0.1314476 0.07788550  0.007517365  0.25635589 1961.418 0.9997367
## b[2]  -0.1431186 0.05428872 -0.230142883 -0.05442691 1986.678 1.0002017
## sigma  0.1115717 0.00609983  0.102336265  0.12173812 2332.777 0.9992276
pairs(model1)

model2 = ulam(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 ) ,
    sigma ~ dunif(0, 1 )
  ), data=dat_slim , chains=4, cores = 4 )
precis(model2, depth=2)
##             mean          sd        5.5%       94.5%    n_eff     Rhat4
## a[1]   0.8871594 0.015580337  0.86165067  0.91114732 2231.100 0.9997811
## a[2]   1.0504244 0.010161913  1.03430555  1.06721078 3378.399 0.9991585
## b[1]   0.1331131 0.073978781  0.01784638  0.25117352 2152.798 1.0002878
## b[2]  -0.1418801 0.056605678 -0.23434206 -0.05051204 2147.940 0.9982133
## sigma  0.1114289 0.006128343  0.10211794  0.12147693 2545.388 0.9987717
pairs(model2)

#According to the test, it does not have detectable influence on the posterior distribution of sigma.

8-2. Modify the terrain ruggedness model again. This time, change the prior for b[cid] to dexp(0.3). Plot the joint posterior. Do not use a ‘pairs’ plot. What does this do to the posterior distribution? Can you explain it?

model3 = ulam(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dexp(0.3) ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , chains=4, cores = 4 )

precis(model3 , depth=2)
##             mean          sd         5.5%      94.5%    n_eff     Rhat4
## a[1]  0.88736248 0.016716959 0.8605738460 0.91499290 1835.525 0.9997306
## a[2]  1.04813482 0.010533240 1.0306648022 1.06471895 2007.221 0.9992118
## b[1]  0.14922314 0.072398935 0.0390826695 0.26897419 1379.343 0.9993071
## b[2]  0.01823428 0.016902094 0.0009501633 0.05073293 1566.752 1.0018190
## sigma 0.11410318 0.006436402 0.1042891021 0.12480139 1625.370 0.9992047
pairs(model3)

#We can see that there is no differences in the posterior distribution. 

8-3. Re-estimate one of the Stan models from the chapter, but at different numbers (at least 5) of warmup iterations. Be sure to use the same number of sampling iterations in each case. Compare the n_eff values. How much warmup is enough?

Stan_model_100 = map2stan(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 )  ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , warmup = 100, iter = 4000, chains=4, cores = 4 )

precis(Stan_model_100,depth=2)
##             mean          sd        5.5%       94.5%     n_eff     Rhat4
## a[1]   0.8867481 0.016161828  0.86107009  0.91272679 17155.388 0.9998874
## a[2]   1.0505612 0.010132651  1.03436340  1.06693112 18142.889 1.0002068
## b[1]   0.1332726 0.075489369  0.01251663  0.25392739  7458.406 1.0006111
## b[2]  -0.1419964 0.056504994 -0.23104392 -0.05290946  9506.919 1.0002504
## sigma  0.1116030 0.006158997  0.10226769  0.12181566  8954.895 1.0002278
Stan_model_200 = map2stan(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 )  ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , warmup = 200, iter = 4000, chains=4, cores = 4 )

Stan_model_300 = map2stan(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 )  ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , warmup = 300, iter = 4000, chains=4, cores = 4 )

Stan_model_400 = map2stan(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 )  ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , warmup = 400, iter = 4000, chains=4, cores = 4 )

Stan_model_500 = map2stan(
  alist(
    log_gdp_std ~ dnorm( mu , sigma ) ,
    mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
    a[cid] ~ dnorm( 1 , 0.1 ) ,
    b[cid] ~ dnorm( 0 , 0.3 )  ,
    sigma ~ dexp( 1 )
  ), data=dat_slim , warmup = 500, iter = 4000, chains=4, cores = 4 )

#Based on the observation, during warmup is increased, n_eff got closer to 
#number of iterations. And then 400 warmup iterations should be enough for it.

8-4. Run the model below and then inspect the posterior distribution and explain what it is accomplishing.

mp = ulam(
 alist(
   a ~ dnorm(0,1),
   b ~ dcauchy(0,1)
 ), data=list(y=1) , chains=1 )
## 
## SAMPLING FOR MODEL '3bd3f4d287e9cccab124308e5415245c' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 9e-06 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:   1 / 1000 [  0%]  (Warmup)
## Chain 1: Iteration: 100 / 1000 [ 10%]  (Warmup)
## Chain 1: Iteration: 200 / 1000 [ 20%]  (Warmup)
## Chain 1: Iteration: 300 / 1000 [ 30%]  (Warmup)
## Chain 1: Iteration: 400 / 1000 [ 40%]  (Warmup)
## Chain 1: Iteration: 500 / 1000 [ 50%]  (Warmup)
## Chain 1: Iteration: 501 / 1000 [ 50%]  (Sampling)
## Chain 1: Iteration: 600 / 1000 [ 60%]  (Sampling)
## Chain 1: Iteration: 700 / 1000 [ 70%]  (Sampling)
## Chain 1: Iteration: 800 / 1000 [ 80%]  (Sampling)
## Chain 1: Iteration: 900 / 1000 [ 90%]  (Sampling)
## Chain 1: Iteration: 1000 / 1000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.004112 seconds (Warm-up)
## Chain 1:                0.004259 seconds (Sampling)
## Chain 1:                0.008371 seconds (Total)
## Chain 1:
p=precis(mp)
traceplot(mp)

Compare the samples for the parameters a and b. Plot the trace plots. Can you explain the different trace plots? If you are unfamiliar with the Cauchy distribution, you should look it up. The key feature to attend to is that it has no expected value. Can you connect this fact to the trace plot?

#We can see that the plot a should be a normal distribution as the prior is #aroung 0 and spread in between 2 and -2. Plot b is Cauchy distribution which #contains some extreme value go up to over 30 and -50.

8-5. Recall the divorce rate example from Chapter 5. Repeat that analysis, using ulam this time, fitting models m5.1, m5.2, and m5.3. Use compare to compare the models on the basis of WAIC or PSIS. To use WAIC or PSIS with ulam, you need add the argument log_log=TRUE. Explain the model comparison results.

data(WaffleDivorce)
data = WaffleDivorce

data$Divorce_sd=standardize(data$Divorce)
data$Marriage_sd=standardize(data$Marriage)
data$MedianAgeMarriage_sd=standardize(data$MedianAgeMarriage)

d_trim = list(D = data$Divorce_sd, M = data$Marriage_sd, A = data$MedianAgeMarriage_sd)

m5.1 = ulam(
  alist(
    D ~ dnorm(mu, sigma),
    mu <- a + bA * A,
    a ~ dnorm(0, 0.2),
    bA ~ dnorm(0, 0.5),
    sigma ~ dexp(1)
  ),
  data = d_trim,
  chains = 4, 
  cores = 4,
  log_lik = TRUE
)

m5.2 = ulam(
  alist(
    D ~ dnorm(mu, sigma),
    mu <- a + bM * M,
    a ~ dnorm(0, 0.2),
    bM ~ dnorm(0, 0.5),
    sigma ~ dexp(1)
  ),
  data = d_trim,
  chains = 4, 
  cores = 4,
  log_lik = TRUE 
)

m5.3 = ulam(
  alist(
    D ~ dnorm(mu, sigma),
    mu <- a + bA * A + bM * M,
    a ~ dnorm(0, 0.2),
    bA ~ dnorm(0, 0.5),
    bM ~ dnorm(0, 0.5),
    sigma ~ dexp(1)
  ),
  data = d_trim,
  chains = 4, 
  cores = 4,
  log_lik = TRUE 
)
set.seed(77)
compare( m5.1 , m5.2 , m5.3 , func=WAIC )
##          WAIC        SE     dWAIC       dSE    pWAIC      weight
## m5.1 125.7473 12.534651  0.000000        NA 3.599905 0.736036102
## m5.3 127.8047 12.589800  2.057449 0.6531786 4.816174 0.263105332
## m5.2 139.2548  9.886594 13.507542 9.1218067 2.944743 0.000858566
#We can see that model m5.1 with only median age at marriage as a predictor
#performs best, but is not really distinguishable from model m5.3.
#And m5.2 clearly performs worse than both.