# log odds
Y = c(0.028, -0.741, -0.541, -0.246, 0.069, -0.584, -0.512, -0.079, -0.424,
-0.335, -0.213, -0.039, -0.593, 0.282, -0.321, -0.135, 0.141, 0.322, 0.444,
-0.218, -0.591, -0.608)
Vtheta = c(0.85, 0.483, 0.565, 0.138, 0.281, 0.676, 0.139, 0.204, 0.274, 0.117,
0.195, 0.229, 0.425, 0.205, 0.298, 0.261, 0.364, 0.553, 0.717, 0.26, 0.257,
0.272)
### First simulate tau by rejection sampling
Vmu = function(tau) 1/(sum(1/(Vtheta^2 + tau^2)))
Mmu = function(tau) sum((1/(Vtheta^2 + tau^2)) * Y) * Vmu(tau)
ptau = function(tau) {
sqrt(Vmu(tau)) * prod((Vtheta^2 + tau^2)^(-1/2) * exp(-(Y - Mmu(tau))^2/2 *
(Vtheta^2 + tau^2)))
}
Rejection Sampling of hyper tau
M = 8457128963
U = c()
while (length(U) < 1000) {
u1 = runif(1)
u2 = runif(1)
if (ptau(u1)/M > u2) {
U = c(U, u1)
}
}
plot(density(U), xlim = c(0.015, 0.4), ylim = c(0, 14), xlab = "Tau", main = "density")
lines(c(0, 0), c(0, 14))
TAU = U
_plot estimated theta thru function _
############# plot estimated theta thru function
THETA = matrix(nrow = 1000, ncol = 22)
for (j in c(1:22)) {
response = c()
for (x in seq(0, 0.6, length.out = 1000)) {
NEW = ((1/Vtheta[j]^2) * Y[j] + (1/x^2) * (sum((1/(Vtheta^2 + x^2)) *
Y) * (1/(sum(1/(Vtheta^2 + x^2))))))/((1/Vtheta[j]^2) + (1/x^2))
response = c(response, NEW)
}
THETA[, j] = response
}
plot(x = seq(0, 0.6, length.out = 1000), y = THETA[, 1], type = "l", xlab = "Hyper-parameter Tau",
xlim = c(0, 0.3), ylim = c(-0.45, -0.09), ylab = "Estimated Treatment Effects",
col = 1)
for (j in c(2:22)) {
lines(x = seq(0, 0.6, length.out = 1000), y = THETA[, j], type = "l", xlim = c(0,
0.4), col = j)
}
legend("topleft", legend = c(1:22), fill = c(1:22), cex = 0.4)
Try to plot the same thing, not use tau=seq(0,0.6,length.out=1000) but use the points generated from rejection sampling of tau
for (j in c(1:22)) {
response = c()
for (x in TAU) {
NEW = ((1/Vtheta[j]^2) * Y[j] + (1/x^2) * (sum((1/(Vtheta^2 + x^2)) *
Y) * (1/(sum(1/(Vtheta^2 + x^2))))))/((1/Vtheta[j]^2) + (1/x^2))
response = c(response, NEW)
}
THETA[, j] = response
}
plot(x = TAU, y = THETA[, 1], type = "l", xlab = "Hyper-parameter Tau", xlim = c(0,
0.3), ylim = c(-0.45, -0.09), ylab = "Estimated Treatment Effects", col = 1,
cex = 0.1)
for (j in c(2:22)) {
points(x = TAU, y = THETA[, j], , xlim = c(0, 0.6), col = j, cex = 0.1)
}
legend("topleft", legend = c(1:22), fill = c(1:22), cex = 0.4)
plot posterior sd against tau
SD = matrix(nrow = 1000, ncol = 22)
for (j in c(1:22)) {
response = c()
for (x in seq(0, 0.6, length.out = 1000)) {
NEW = 1/((1/Vtheta[j]^2) + (1/x^2))
response = c(response, NEW)
}
SD[, j] = response
}
plot(x = seq(0, 0.6, length.out = 1000), y = SD[, 1], type = "l", xlab = "Hyper-parameter Tau",
ylim = c(0, 0.08), xlim = c(0, 0.3), ylab = "Posterior SD", col = 1)
for (j in c(2:22)) {
lines(x = seq(0, 0.6, length.out = 1000), y = SD[, j], type = "l", xlim = c(0,
0.3), col = j)
}
legend("topleft", legend = c(1:22), fill = c(1:22), cex = 0.4)
means = apply(THETA, 2, mean)
median = c(24, 28, 26, 25, 21, 26, 36, 21, 28, 29, 24, 21, 28, 12, 26, 23, 21,
23, 23, 25, 31, 31) * (-0.01)
plot(x = c(1:22), y = median, cex = 0.5, pch = 6, xlab = "studies", ylab = "mean vs. median")
points(x = c(1:22), means, cex = 0.5, pch = 7)
legend("topleft", legend = c("mean", "median"), pch = c(7, 6), cex = 0.5)
More deviation between post means and post medians, more skew the distribution tends to be.
I use the given point estimates as crude estimates.
plot(x = c(1:22), y = Y, cex = 0.5, pch = 6, xlab = "studies", ylab = "crude estimate vs. median")
points(x = c(1:22), median, cex = 0.5, pch = 7)
legend("topleft", legend = c("crude", "median"), pch = c(6, 7), cex = 0.5)
Draw a new thetaj with a pair of mu and tau.
tau = TAU[1]
mu_mean = Mmu(tau)
vu = sqrt(Vmu(tau))
mu = rnorm(1, mu_mean, vu)
thetanew = rnorm(1, mu, tau)
# draw y from thetanew i need sigmathetanew, which is missing here. Instead
# I use mean(Vtheta) to replace it.
sigmathetanew = mean(Vtheta)
ynew = rnorm(200, thetanew, sigmathetanew)
library(MASS)
truehist(ynew)
The hierarchical model here is straightforward.
On first level,we choose a conjugate normal prior for population normal distribution.
On second level, we can also directly use a non-informative prior for p(mu,tau) since we have a closed form for marginal likelihood distribution in normal distribution case.But here the text book use another trick to further simplify the model, which is also taken in my computation.