# ============================================================
# 4. LEVEL-2 MATRIX (Z1 & Z2 PER KOTA)
# ============================================================
Z_tmp <- bayes_dat %>%
group_by(kota_f) %>%
summarise(
Z1_s = mean(Z1_s),
Z2_s = mean(Z2_s),
.groups = "drop"
)
Z_tmp <- Z_tmp[order(as.integer(Z_tmp$kota_f)), ]
Z_mat <- as.matrix(Z_tmp[, c("Z1_s", "Z2_s")])
# ============================================================
# 5. KOMPONEN MIXTURE SESUAI SOAL
# X1 ≤ 0.1 → KOMPONEN 1
# X1 > 0.1 → KOMPONEN 2
# ============================================================
comp <- ifelse(bayes_dat$X1 <= 0.1, 1L, 2L)
table(comp)
## comp
## 1 2
## 233 148
# ============================================================
# 6. DATA UNTUK MODEL HIERARKI TANPA MIXTURE
# ============================================================
# (isi: Y, X1_s, X2_s, X3_s, X4_s, X5_s, kota_f, Z1_s, Z2_s)
N <- nrow(bayes_dat)
J <- length(unique(bayes_dat$kota_f))
R <- 5L # X1..X5
M <- 2L # Z1, Z2
X_mat_nomix <- as.matrix(
bayes_dat[, c("X1_s","X2_s","X3_s","X4_s","X5_s")]
)
stan_data_nomix <- list(
N = N,
J = J,
R = R,
M = M,
Y = bayes_dat$Y,
X = X_mat_nomix,
kota = as.integer(bayes_dat$kota_f),
Z = Z_mat # matriks Z1_s & Z2_s per kota
)
# ============================================================
# 7. RUN STAN MODEL HIERARKI TANPA MIXTURE
# ============================================================
fit_hier_nomix <- stan(
file = "hier_skewnormal.stan", # model tanpa indeks komponen k
data = stan_data_nomix,
chains = 4,
warmup = 1000,
iter = 2000,
seed = 123,
control = list(
adapt_delta = 0.90,
max_treedepth = 20
)
)
## Warning: There were 924 divergent transitions after warmup. See
## https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
## to find out why this is a problem and how to eliminate them.
## Warning: Examine the pairs() plot to diagnose sampling problems
## Warning: The largest R-hat is 1.3, indicating chains have not mixed.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#r-hat
## Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#bulk-ess
## Warning: Tail Effective Samples Size (ESS) is too low, indicating posterior variances and tail quantiles may be unreliable.
## Running the chains for more iterations may help. See
## https://mc-stan.org/misc/warnings.html#tail-ess
saveRDS(fit_hier_nomix, "fit_hier_nomix_skew.rds")
# ============================================================
# 8. RINGKASAN PARAMETER LEVEL-1 & LEVEL-2
# ============================================================
print(
fit_hier_nomix,
pars = c(
"alpha", # skewness (scalar)
"sigma", # sd residual (scalar)
"beta0", # intercept per kota (panjang J)
"beta", # slopes x1..x5 per kota (J x R)
"gamma0_0", # intercept global level-2
"gamma0", # efek Z pada intercept (panjang M)
"gamma", # efek Z pada slopes (R x M)
"tau0","tau"
),
probs = c(0.025, 0.5, 0.975)
)
## Inference for Stan model: anon_model.
## 4 chains, each with iter=2000; warmup=1000; thin=1;
## post-warmup draws per chain=1000, total post-warmup draws=4000.
##
## mean se_mean sd 2.5% 50% 97.5% n_eff Rhat
## alpha 4.54 0.02 0.75 3.17 4.56 6.26 1333 1.00
## sigma 11.52 0.05 0.52 10.57 11.45 12.62 116 1.03
## beta0[1] 46.67 0.04 0.98 44.55 46.56 48.70 747 1.01
## beta0[2] 41.53 0.03 0.98 39.70 41.37 43.79 791 1.01
## beta0[3] 41.01 0.10 0.74 39.45 41.08 42.37 58 1.04
## beta0[4] 44.64 0.05 0.99 42.53 44.58 46.59 480 1.02
## beta0[5] 42.65 0.03 0.93 40.82 42.64 44.73 1149 1.00
## beta0[6] 41.25 0.25 1.39 38.77 41.17 43.90 31 1.06
## beta0[7] 40.98 0.04 0.98 38.72 40.93 42.84 488 1.02
## beta0[8] 44.24 0.13 0.91 42.33 44.29 45.94 47 1.06
## beta0[9] 52.41 0.21 2.81 48.39 51.67 59.99 180 1.03
## beta[1,1] 1.37 0.05 0.90 -0.27 1.45 3.44 363 1.01
## beta[1,2] 0.00 0.04 1.07 -2.26 -0.09 2.18 862 1.01
## beta[1,3] 1.04 0.02 0.49 -0.02 1.09 1.99 720 1.01
## beta[1,4] -0.58 0.03 0.76 -2.45 -0.47 0.72 785 1.01
## beta[1,5] -0.23 0.06 1.10 -2.41 -0.16 2.14 380 1.01
## beta[2,1] -0.85 0.03 1.05 -2.84 -0.98 1.42 1135 1.01
## beta[2,2] 0.60 0.39 1.80 -2.30 0.50 4.68 22 1.09
## beta[2,3] 0.77 0.07 1.21 -1.66 1.01 3.07 275 1.02
## beta[2,4] 0.05 0.14 0.80 -1.55 0.12 1.46 31 1.08
## beta[2,5] 0.79 0.36 1.84 -2.35 0.79 4.44 26 1.09
## beta[3,1] -1.47 0.03 0.72 -3.01 -1.59 0.02 570 1.02
## beta[3,2] 0.93 0.05 0.96 -0.84 0.81 3.05 437 1.03
## beta[3,3] 0.98 0.03 0.88 -0.81 0.97 2.86 661 1.00
## beta[3,4] -0.05 0.04 0.57 -1.24 -0.11 1.06 258 1.02
## beta[3,5] 0.65 0.30 1.29 -1.47 0.69 3.22 18 1.13
## beta[4,1] 0.44 0.09 0.76 -1.26 0.54 1.74 71 1.05
## beta[4,2] 2.57 0.14 1.08 0.36 2.65 4.46 58 1.05
## beta[4,3] 2.34 0.27 1.20 -0.04 2.35 4.27 19 1.11
## beta[4,4] -0.21 0.03 0.78 -1.49 -0.37 1.79 680 1.01
## beta[4,5] -0.21 0.16 1.33 -3.33 0.00 1.88 66 1.06
## beta[5,1] -0.83 0.03 0.96 -3.25 -0.73 0.95 946 1.00
## beta[5,2] 1.50 0.09 1.04 -0.44 1.53 3.66 121 1.03
## beta[5,3] 0.19 0.10 1.12 -1.86 -0.01 2.70 134 1.03
## beta[5,4] -0.08 0.02 0.78 -1.66 -0.11 1.50 1428 1.01
## beta[5,5] 0.61 0.28 1.40 -1.70 0.55 3.67 26 1.08
## beta[6,1] -1.27 0.12 1.48 -3.93 -1.49 1.94 143 1.04
## beta[6,2] -1.11 0.49 1.60 -4.16 -1.19 1.32 11 1.15
## beta[6,3] 2.58 0.11 1.45 -0.44 2.79 5.36 185 1.02
## beta[6,4] 0.49 0.39 1.16 -1.46 0.61 2.66 9 1.21
## beta[6,5] -0.16 0.32 2.11 -3.83 -0.31 4.06 45 1.06
## beta[7,1] -1.30 0.03 0.97 -3.84 -1.17 0.30 823 1.01
## beta[7,2] 0.92 0.10 1.83 -2.08 0.51 5.40 313 1.02
## beta[7,3] 1.87 0.08 1.49 -0.91 1.99 5.02 309 1.02
## beta[7,4] 0.31 0.16 0.73 -1.12 0.31 1.59 20 1.08
## beta[7,5] 0.73 0.33 1.70 -1.71 0.70 4.34 27 1.09
## beta[8,1] -0.31 0.02 0.78 -2.35 -0.21 1.15 1063 1.01
## beta[8,2] 1.87 0.18 1.37 -0.68 1.98 4.55 60 1.05
## beta[8,3] 0.53 0.09 0.92 -1.43 0.59 2.22 106 1.04
## beta[8,4] -0.24 0.02 0.35 -1.11 -0.19 0.31 379 1.02
## beta[8,5] 0.42 0.07 1.27 -1.97 0.25 3.65 302 1.02
## beta[9,1] 2.08 0.10 1.14 -0.40 2.26 4.26 139 1.04
## beta[9,2] 1.56 0.08 0.97 -0.48 1.66 3.32 156 1.04
## beta[9,3] 2.01 0.04 0.99 0.03 1.87 4.00 733 1.01
## beta[9,4] -0.13 0.04 1.34 -2.98 0.01 2.37 1282 1.01
## beta[9,5] -2.82 0.33 2.70 -9.83 -2.32 0.97 66 1.06
## gamma0_0 42.30 0.38 5.51 22.73 43.99 45.39 216 1.02
## gamma0[1] 0.13 0.12 0.95 -1.97 0.16 1.82 68 1.04
## gamma0[2] 3.26 0.08 1.41 -0.93 3.39 5.25 311 1.01
## gamma[1,1] -0.04 0.03 0.71 -1.45 -0.15 1.39 584 1.01
## gamma[1,2] 1.30 0.03 0.69 -0.21 1.44 2.62 590 1.02
## gamma[2,1] -0.62 0.25 0.91 -2.45 -0.60 0.77 13 1.14
## gamma[2,2] -0.29 0.11 0.86 -1.87 -0.35 1.40 60 1.05
## gamma[3,1] 0.43 0.26 0.92 -1.36 0.42 1.82 12 1.16
## gamma[3,2] 0.37 0.32 1.01 -1.59 0.36 2.05 10 1.17
## gamma[4,1] 0.19 0.14 0.59 -0.75 0.16 1.36 18 1.09
## gamma[4,2] -0.09 0.02 0.67 -1.44 -0.06 1.23 1166 1.01
## gamma[5,1] -0.43 0.03 0.95 -2.45 -0.25 1.42 858 1.01
## gamma[5,2] -1.09 0.37 1.39 -3.91 -1.08 1.08 14 1.19
## tau0 2.28 0.26 3.64 0.18 1.00 14.76 191 1.02
## tau[1] 0.59 0.14 0.61 0.04 0.41 2.13 18 1.12
## tau[2] 1.81 0.07 0.70 0.57 1.81 3.33 116 1.05
## tau[3] 1.99 0.04 0.68 0.72 2.04 3.52 353 1.03
## tau[4] 0.57 0.03 0.43 0.05 0.53 1.65 224 1.02
## tau[5] 0.98 0.06 0.82 0.09 0.80 3.25 189 1.02
##
## Samples were drawn using NUTS(diag_e) at Wed Dec 10 12:22:42 2025.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at
## convergence, Rhat=1).
# Cek konvergensi (Rhat)
range(summary(fit_hier_nomix)$summary[,"Rhat"], na.rm = TRUE)
## [1] 0.9991634 1.3235478
# ============================================================
# 9. KEBAIKAN MODEL: LOO, WAIC, BIC, Bayes R^2
# ============================================================
# log-likelihood matrix (perlu 'log_lik' di generated quantities)
log_lik_nomix <- extract_log_lik(fit_hier_nomix)
loo_nomix <- loo(log_lik_nomix)
## Warning: Some Pareto k diagnostic values are too high. See help('pareto-k-diagnostic') for details.
waic_nomix <- waic(log_lik_nomix)
## Warning:
## 10 (2.6%) p_waic estimates greater than 0.4. We recommend trying loo instead.
# BIC (pakai total logLik & p_loo dari LOO)
logLik_tot_nomix <- apply(log_lik_nomix, 2, mean) |> sum()
p_eff_nomix <- loo_nomix$estimates["p_loo", "Estimate"]
BIC_nomix <- -2 * logLik_tot_nomix + p_eff_nomix * log(N)
BIC_nomix
## [1] 2701.9
# Bayes R^2 (pakai y_rep dari Stan)
yrep_nomix <- rstan::extract(fit_hier_nomix)$y_rep # draws x N
Bayes_R2 <- function(y, yrep){
mu_rep <- apply(yrep, 2, mean)
var_f <- var(mu_rep)
var_r <- mean(apply((yrep - y)^2, 1, var))
var_f / (var_f + var_r)
}
Bayes_R2_nomix <- Bayes_R2(bayes_dat$Y, yrep_nomix)
Bayes_R2_nomix
## [1] 0.0002759153
# ============================================================
# 10. TRACE PLOT PARAMETER UTAMA
# ============================================================
mcmc_arr_nomix <- as.array(fit_hier_nomix)
mcmc_trace(
mcmc_arr_nomix,
pars = c("alpha","sigma","gamma0_0","tau0")
)
