############################################################

#1a. EXPLORATORY DATA ANALYSIS (EDA)

############################################################

# Statistik deskriptif tambahan

eda_stats <- data.frame(
Mean      = mean(x, na.rm = TRUE),
Median    = median(x, na.rm = TRUE),
SD        = sd(x, na.rm = TRUE),
Skewness  = skewness(x, na.rm = TRUE),
Kurtosis  = kurtosis(x, na.rm = TRUE)
)
cat("\n=== Statistik Deskriptif Data Eyes ===\n")
## 
## === Statistik Deskriptif Data Eyes ===
print(eda_stats)
##       Mean Median       SD Skewness Kurtosis
## 1 541.5333    540 7.022406 0.198078 1.772935
# Siapkan data.frame untuk ggplot

df_eyes <- data.frame(Eyes = x)

# Histogram + kurva densitas kernel

ggplot(df_eyes, aes(x = Eyes)) +
geom_histogram(aes(y = ..density..),
bins = 10,
colour = "black",
fill   = "lightblue") +
geom_density(linewidth = 1) +
labs(title = "Histogram dan Kurva Densitas Data Eyes",
x = "Eyes",
y = "Density") +
theme_minimal()

# Histogram + kurva normal teoritis (berdasarkan mean & sd data)

mu_x <- mean(x, na.rm = TRUE)
sd_x <- sd(x, na.rm = TRUE)

ggplot(df_eyes, aes(x = Eyes)) +
geom_histogram(aes(y = ..density..),
bins = 10,
colour = "black",
fill   = "lightgray") +
stat_function(fun = dnorm,
args = list(mean = mu_x, sd = sd_x),
linewidth = 1) +
labs(title = "Histogram Data Eyes dengan Kurva Normal Teoritis",
x = "Eyes",
y = "Density") +
theme_minimal()

# Q-Q plot untuk cek normalitas

qqnorm(x, main = "Q-Q Plot Data Eyes")
qqline(x, col = "red")

############################################################

# 2. DENSITAS MSN–BURR DI R (UNTUK PLOT & RESPONSIBILITY)

############################################################
k_fixed <- 1

dBurr_msn3 <- function(x, alpha, mu, phi, k = k_fixed, log = FALSE) {

# pdf: f(x|α,μ,φ) = (k/φ) exp(-k (x-μ)/φ) (1 + exp(-k (x-μ)/φ)/α)^(-(α+1))

alpha <- pmax(alpha, 1e-6)
phi   <- pmax(phi,   1e-6)

z <- (x - mu)/phi
z <- pmin(pmax(z, -700), 700)   # stabilitas numerik
e <- exp(-k * z)
logf <- log(k) - log(phi) - k * z - (alpha + 1) * log1p(e/alpha)

if (log) return(logf)
exp(logf)
}

Model Bayesian Mixture MSN–Burr

Misalkan data pengamatan \(x_1, x_2, \dots, x_N\) dimodelkan dengan campuran dua komponen MSN–Burr dengan parameter

\[ \theta = (\alpha_1, \phi_1, \mu_1,\; \alpha_2, \phi_2, \mu_2,\; \pi_1), \]

dengan syarat \(\alpha_j > 0\), \(\phi_j > 0\) untuk \(j=1,2\) dan \(0 < \pi_1 < 1\). Komponen kedua mempunyai bobot \(\pi_2 = 1 - \pi_1\).

PDF Komponen MSN–Burr

Fungsi densitas probabilitas (pdf) untuk komponen MSN–Burr adalah

\[ f_{\text{Burr}}(x \mid \alpha, \mu, \phi, k) = \frac{k}{\phi} \exp\!\left(-k\frac{x-\mu}{\phi}\right) \left( 1 + \frac{\exp\!\left(-k\frac{x-\mu}{\phi}\right)}{\alpha} \right)^{-(\alpha+1)}, \]

dengan \(\alpha > 0\) parameter bentuk, \(\mu\) parameter lokasi, \(\phi > 0\) parameter skala, dan \(k > 0\) indeks (pada praktik di sini \(k\) dianggap diketahui/konstan).

Likelihood Mixture MSN–Burr

Densitas campuran untuk sebuah pengamatan \(x_i\) adalah

\[ f(x_i \mid \theta) = \pi_1\, f_{\text{Burr}}(x_i \mid \alpha_1, \mu_1, \phi_1, k) + (1-\pi_1)\, f_{\text{Burr}}(x_i \mid \alpha_2, \mu_2, \phi_2, k). \]

Dengan asumsi pengamatan saling bebas bersyarat pada parameter, likelihood untuk seluruh data \(\mathbf{x} = (x_1,\dots,x_N)\) adalah

\[ L(\theta \mid \mathbf{x}) = \prod_{i=1}^N \Big[ \pi_1\, f_{\text{Burr}}(x_i \mid \alpha_1, \mu_1, \phi_1, k) + (1-\pi_1)\, f_{\text{Burr}}(x_i \mid \alpha_2, \mu_2, \phi_2, k) \Big]. \]

Log-likelihood-nya

\[ \ell(\theta \mid \mathbf{x}) = \sum_{i=1}^N \log \Big[ \pi_1\, f_{\text{Burr}}(x_i \mid \alpha_1, \mu_1, \phi_1, k) + (1-\pi_1)\, f_{\text{Burr}}(x_i \mid \alpha_2, \mu_2, \phi_2, k) \Big]. \]

Prior

Dalam pendekatan Bayesian, prior untuk parameter dipilih independen sebagai:

  • \(\alpha_1, \alpha_2 \sim \text{Gamma}(2, 1)\)
  • \(\phi_1, \phi_2 \sim \text{Cauchy}(0, 5)\)
  • \(\mu_1, \mu_2 \sim \mathcal{N}(m_0, s_0^2)\) (dalam implementasi Stan digunakan \(m_0 = \overline{x}\) dan \(s_0 = 10\))
  • \(\eta \sim \mathcal{N}(0, 2^2)\), dengan \[ \pi_1 = \text{logit}^{-1}(\eta) = \frac{1}{1 + e^{-\eta}}, \qquad \pi_2 = 1 - \pi_1. \]

Secara faktor, prior bersama dapat ditulis sebagai

\[ p(\theta) = p(\alpha_1)\,p(\alpha_2)\, p(\phi_1)\,p(\phi_2)\, p(\mu_1)\,p(\mu_2)\, p(\pi_1), \]

atau jika memakai parameter \(\eta\):

\[ p(\alpha_1,\alpha_2,\phi_1,\phi_2,\mu_1,\mu_2,\eta) = p(\alpha_1)\,p(\alpha_2)\, p(\phi_1)\,p(\phi_2)\, p(\mu_1)\,p(\mu_2)\,p(\eta). \]

Posterior

Dengan Teorema Bayes, posterior parameter diberikan oleh

\[ p(\theta \mid \mathbf{x}) = \frac{ L(\theta \mid \mathbf{x})\, p(\theta) }{ \int L(\theta \mid \mathbf{x})\, p(\theta)\, d\theta }, \]

di mana \(L(\theta \mid \mathbf{x})\) adalah likelihood mixture dan \(p(\theta)\) adalah prior bersama.

Secara proporsional (mengabaikan konstanta normalisasi),

\[ p(\theta \mid \mathbf{x}) \propto L(\theta \mid \mathbf{x})\, p(\theta) \]

atau lebih eksplisit,

\[ p(\theta \mid \mathbf{x}) \propto \Bigg\{ \prod_{i=1}^N \Big[ \pi_1\, f_{\text{Burr}}(x_i \mid \alpha_1, \mu_1, \phi_1, k) + (1-\pi_1)\, f_{\text{Burr}}(x_i \mid \alpha_2, \mu_2, \phi_2, k) \Big] \Bigg\} \times p(\alpha_1)\,p(\alpha_2)\, p(\phi_1)\,p(\phi_2)\, p(\mu_1)\,p(\mu_2)\,p(\pi_1). \]

PDF Mixture Normal

Distribusi mixture normal dengan dua komponen didefinisikan sebagai:

\[ f(x) = \pi_1 \, \mathcal{N}(x \mid \mu_1, \sigma_1^2) + \pi_2 \, \mathcal{N}(x \mid \mu_2, \sigma_2^2) \]

dengan densitas normal komponen:

\[ \mathcal{N}(x \mid \mu, \sigma^2) = \frac{1}{\sqrt{2\pi\sigma^2}} \exp\!\left( -\frac{(x-\mu)^2}{2\sigma^2} \right) \]

di mana:

  • \(\pi_1\) dan \(\pi_2 = 1 - \pi_1\) adalah proporsi campuran,
  • \((\mu_1, \sigma_1)\) adalah parameter komponen 1,
  • \((\mu_2, \sigma_2)\) adalah parameter komponen 2.
############################################################

# 3. MODEL STAN: MIXTURE MSN–BURR (2 KOMPONEN)

############################################################
burr_mix_stan <- "
functions {
// log-pdf MSN–Burr
real burr_msn_lpdf(real x, real alpha, real mu, real phi, real k) {
real z = (x - mu) / phi;
real e = exp(-k * z);
return log(k) - log(phi) - k * z
- (alpha + 1) * log1p(e / alpha);
}
}
data {
int<lower=1> N;
real x[N];
real<lower=0> k;
}
parameters {
real<lower=0> alpha1;
real<lower=0> phi1;
real          mu1;
real<lower=0> alpha2;
real<lower=0> phi2;
real          mu2;
real          eta;          // logit(pi1)
}
transformed parameters {
real<lower=0,upper=1> pi1;
pi1 = inv_logit(eta);
}
model {
// Priors (weakly-informative)
eta    ~ normal(0, 2);
alpha1 ~ gamma(2, 1);
alpha2 ~ gamma(2, 1);
phi1   ~ cauchy(0, 5);
phi2   ~ cauchy(0, 5);
mu1    ~ normal(mean(x), 10);
mu2    ~ normal(mean(x), 10);

// Likelihood mixture
for (n in 1:N) {
target += log_mix(
pi1,
burr_msn_lpdf(x[n] | alpha1, mu1, phi1, k),
burr_msn_lpdf(x[n] | alpha2, mu2, phi2, k)
);
}
}
generated quantities {
real pi2 = 1 - pi1;
vector[N] log_lik;

for (n in 1:N) {
log_lik[n] = log_mix(
pi1,
burr_msn_lpdf(x[n] | alpha1, mu1, phi1, k),
burr_msn_lpdf(x[n] | alpha2, mu2, phi2, k)
);
}
}
"
writeLines(burr_mix_stan, "burr_mix.stan")

############################################################

# 4. MODEL STAN: MIXTURE NORMAL (PEMBANDING)

############################################################
norm_mix_stan <- "
data {
int<lower=1> N;
real x[N];
}
parameters {
real mu1;
real mu2;
real<lower=0> sigma1;
real<lower=0> sigma2;
real eta;                 // logit(pi1)
}
transformed parameters {
real<lower=0,upper=1> pi1;
pi1 = inv_logit(eta);
}
model {
// Priors
eta    ~ normal(0, 2);
mu1    ~ normal(mean(x), 10);
mu2    ~ normal(mean(x), 10);
sigma1 ~ cauchy(0, 5);
sigma2 ~ cauchy(0, 5);

// Likelihood mixture
for (n in 1:N) {
target += log_mix(
pi1,
normal_lpdf(x[n] | mu1, sigma1),
normal_lpdf(x[n] | mu2, sigma2)
);
}
}
generated quantities {
real pi2 = 1 - pi1;
vector[N] log_lik;

for (n in 1:N) {
log_lik[n] = log_mix(
pi1,
normal_lpdf(x[n] | mu1, sigma1),
normal_lpdf(x[n] | mu2, sigma2)
);
}
}
"
writeLines(norm_mix_stan, "norm_mix.stan")

############################################################

# 5. SAMPLING STAN: MSN–BURR

############################################################
stan_data_burr <- list(N = N, x = x, k = 1)

fit_burr <- stan(
file   = "burr_mix.stan",
data   = stan_data_burr,
chains = 4,
iter   = 4000,   # 2000 warmup + 2000 sampel
warmup = 2000,
seed   = 123,
control = list(adapt_delta = 0.9)
)

print(fit_burr, pars = c("pi1","pi2","alpha1","mu1","phi1","alpha2","mu2","phi2"))
## Inference for Stan model: anon_model.
## 4 chains, each with iter=4000; warmup=2000; thin=1; 
## post-warmup draws per chain=2000, total post-warmup draws=8000.
## 
##          mean se_mean   sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## pi1      0.60    0.03 0.21   0.12   0.46   0.67   0.76   0.88    40 1.11
## pi2      0.40    0.03 0.21   0.12   0.24   0.33   0.54   0.88    40 1.11
## alpha1   2.36    0.04 1.40   0.41   1.37   2.07   3.05   5.79  1561 1.01
## mu1    540.19    1.05 5.68 535.04 536.54 537.43 540.28 551.42    30 1.18
## phi1     2.93    0.15 1.24   0.57   2.14   3.05   3.71   5.17    66 1.07
## alpha2   2.00    0.03 1.38   0.26   1.00   1.70   2.68   5.49  2056 1.00
## mu2    546.82    1.04 5.76 535.61 540.95 549.91 550.63 551.87    30 1.18
## phi2     2.05    0.15 1.22   0.63   1.15   1.63   2.76   5.11    66 1.07
## 
## Samples were drawn using NUTS(diag_e) at Mon Nov 24 21:30:52 2025.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
############################################################

# 6. SAMPLING STAN: MIXTURE NORMAL

############################################################
stan_data_norm <- list(N = N, x = x)

fit_norm <- stan(
file   = "norm_mix.stan",
data   = stan_data_norm,
chains = 4,
iter   = 4000,
warmup = 2000,
seed   = 123,
control = list(adapt_delta = 0.9)
)

print(fit_norm, pars = c("pi1","pi2","mu1","sigma1","mu2","sigma2"))
## Inference for Stan model: anon_model.
## 4 chains, each with iter=4000; warmup=2000; thin=1; 
## post-warmup draws per chain=2000, total post-warmup draws=8000.
## 
##          mean se_mean   sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## pi1      0.51    0.08 0.21   0.16   0.31   0.53   0.70   0.84     8 1.48
## pi2      0.49    0.08 0.21   0.16   0.30   0.47   0.69   0.84     8 1.48
## mu1    543.34    2.73 6.31 535.40 537.58 539.55 550.31 551.47     5 2.17
## sigma1   3.60    0.43 1.55   1.31   2.17   3.69   4.69   6.71    13 1.23
## mu2    544.15    2.71 6.26 535.65 537.80 546.60 550.42 551.56     5 2.11
## sigma2   3.51    0.40 1.56   1.28   2.05   3.57   4.61   6.73    15 1.21
## 
## Samples were drawn using NUTS(diag_e) at Mon Nov 24 21:31:10 2025.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
############################################################

# 7. EKSTRAK PARAMETER (POSTERIOR MEAN)

############################################################
post_burr <- rstan::extract(fit_burr)

pi1_burr   <- mean(post_burr$pi1)
pi2_burr   <- mean(post_burr$pi2)
alpha1_hat <- mean(post_burr$alpha1)
mu1_hat    <- mean(post_burr$mu1)
phi1_hat   <- mean(post_burr$phi1)
alpha2_hat <- mean(post_burr$alpha2)
mu2_hat    <- mean(post_burr$mu2)
phi2_hat   <- mean(post_burr$phi2)

cat("\n=== ESTIMASI MSN–BURR (posterior mean) ===\n")
## 
## === ESTIMASI MSN–BURR (posterior mean) ===
cat("pi1 =", pi1_burr, "pi2 =", pi2_burr, "\n")
## pi1 = 0.6010588 pi2 = 0.3989412
cat("Komp 1: alpha1 =", alpha1_hat, "mu1 =", mu1_hat, "phi1 =", phi1_hat, "\n")
## Komp 1: alpha1 = 2.361093 mu1 = 540.1861 phi1 = 2.926553
cat("Komp 2: alpha2 =", alpha2_hat, "mu2 =", mu2_hat, "phi2 =", phi2_hat, "\n\n")
## Komp 2: alpha2 = 2.001183 mu2 = 546.8159 phi2 = 2.054465
post_norm <- rstan::extract(fit_norm)

pi1_norm   <- mean(post_norm$pi1)
pi2_norm   <- mean(post_norm$pi2)
mu1_n_hat  <- mean(post_norm$mu1)
sigma1_hat <- mean(post_norm$sigma1)
mu2_n_hat  <- mean(post_norm$mu2)
sigma2_hat <- mean(post_norm$sigma2)

cat("=== ESTIMASI MIXTURE NORMAL (posterior mean) ===\n")
## === ESTIMASI MIXTURE NORMAL (posterior mean) ===
cat("pi1 =", pi1_norm, "pi2 =", pi2_norm, "\n")
## pi1 = 0.509517 pi2 = 0.490483
cat("Komp 1: mu1 =", mu1_n_hat, "sigma1 =", sigma1_hat, "\n")
## Komp 1: mu1 = 543.339 sigma1 = 3.598572
cat("Komp 2: mu2 =", mu2_n_hat, "sigma2 =", sigma2_hat, "\n\n")
## Komp 2: mu2 = 544.1473 sigma2 = 3.505495
############################################################

# 8. FUNGSI MIXTURE DENSITY

############################################################
f_mix_burr <- function(x) {
f1 <- dBurr_msn3(x, alpha1_hat, mu1_hat, phi1_hat)
f2 <- dBurr_msn3(x, alpha2_hat, mu2_hat, phi2_hat)
pi1_burr * f1 + pi2_burr * f2
}

f_mix_norm <- function(x) {
pi1_norm * dnorm(x, mu1_n_hat, sigma1_hat) +
pi2_norm * dnorm(x, mu2_n_hat, sigma2_hat)
}
############################################################

# 9. SKEWNESS PER KOMPONEN

############################################################

# --- Burr ---

f1_b <- dBurr_msn3(x, alpha1_hat, mu1_hat, phi1_hat)
f2_b <- dBurr_msn3(x, alpha2_hat, mu2_hat, phi2_hat)

num1_b <- pi1_burr * f1_b
num2_b <- pi2_burr * f2_b
den_b  <- num1_b + num2_b

tau1_b <- num1_b / den_b
cluster_burr <- ifelse(tau1_b >= 0.5, 1, 2)

skew1_burr <- skewness(x[cluster_burr == 1])
skew2_burr <- skewness(x[cluster_burr == 2])

cat("Skewness Burr Komp1 =", skew1_burr, "\n")
## Skewness Burr Komp1 = 0.00436694
cat("Skewness Burr Komp2 =", skew2_burr, "\n\n")
## Skewness Burr Komp2 = -0.555566
# --- Normal ---

f1_n <- dnorm(x, mu1_n_hat, sigma1_hat)
f2_n <- dnorm(x, mu2_n_hat, sigma2_hat)

num1_n <- pi1_norm * f1_n
num2_n <- pi2_norm * f2_n
den_n  <- num1_n + num2_n

tau1_n <- num1_n / den_n
cluster_norm <- ifelse(tau1_n >= 0.5, 1, 2)

skew1_norm <- skewness(x[cluster_norm == 1])
skew2_norm <- skewness(x[cluster_norm == 2])

cat("Skewness Normal Komp1 =", skew1_norm, "\n")
## Skewness Normal Komp1 = 0.00436694
cat("Skewness Normal Komp2 =", skew2_norm, "\n\n")
## Skewness Normal Komp2 = -0.555566
# Tabel ringkasan skewness

skew_df <- data.frame(
Model    = c("Burr K1","Burr K2","Normal K1","Normal K2"),
Skewness = c(skew1_burr, skew2_burr, skew1_norm, skew2_norm)
)
print(skew_df)
##       Model    Skewness
## 1   Burr K1  0.00436694
## 2   Burr K2 -0.55556597
## 3 Normal K1  0.00436694
## 4 Normal K2 -0.55556597
############################################################

# 10. PLOT SKEWNESS (BARPLOT)

############################################################
ggplot(skew_df, aes(x = Model, y = Skewness, fill = Model)) +
geom_bar(stat = "identity") +
geom_hline(yintercept = 0, linetype = "dashed") +
ylab("Skewness") + xlab("") +
ggtitle("Skewness per Komponen\nMixture MSN–Burr vs Mixture Normal") +
theme_minimal()

############################################################

# 11. PLOT DENSITY MIXTURE (NORMAL vs MSN–BURR)

############################################################
grid <- seq(min(x) - 1, max(x) + 1, length.out = 400)

par(mfrow = c(1,2))

# Mixture Normal

hist(x, breaks = 10, freq = FALSE,
main = "Mixture Normal", xlab = "Eyes",
col = "lightblue", border = "white")
lines(grid, f_mix_norm(grid), lwd = 2)

# Mixture MSN–Burr

hist(x, breaks = 10, freq = FALSE,
main = "Mixture MSN–Burr", xlab = "Eyes",
col = "lightblue", border = "white")
lines(grid, f_mix_burr(grid), lwd = 2)

par(mfrow = c(1,1))

############################################################

# 12. AIC, BIC, WAIC

############################################################

# Jumlah parameter (approx):

# MSN–Burr: alpha1, phi1, mu1, alpha2, phi2, mu2, eta  = 7

# Normal  : mu1, mu2, sigma1, sigma2, eta              = 5

k_burr <- 7
k_norm <- 5
############################################################

# EKSTRAK LOG-LIKELIHOOD & HITUNG AIC/BIC/WAIC

############################################################

# log_lik_burr & log_lik_norm: matriks S x N

log_lik_burr <- extract_log_lik(
fit_burr,
parameter_name = "log_lik",
merge_chains   = TRUE
)

log_lik_norm <- extract_log_lik(
fit_norm,
parameter_name = "log_lik",
merge_chains   = TRUE
)

# Approx log-likelihood di titik mean posterior

loglik_burr_point <- sum(apply(log_lik_burr, 2, mean))
loglik_norm_point <- sum(apply(log_lik_norm, 2, mean))

# AIC: -2 * logLik + 2k

AIC_burr <- -2 * loglik_burr_point + 2 * k_burr
AIC_norm <- -2 * loglik_norm_point + 2 * k_norm

# BIC: -2 * logLik + k * log(N)

BIC_burr <- -2 * loglik_burr_point + k_burr * log(N)
BIC_norm <- -2 * loglik_norm_point + k_norm * log(N)

# WAIC (Widely Applicable Information Criterion)

WAIC_burr <- waic(log_lik_burr)
WAIC_norm <- waic(log_lik_norm)

cat("\n=== AIC & BIC ===\n")
## 
## === AIC & BIC ===
cat("Mixture MSN–Burr:  AIC =", AIC_burr, " BIC =", BIC_burr, "\n")
## Mixture MSN–Burr:  AIC = 325.9823  BIC = 339.0807
cat("Mixture Normal:    AIC =", AIC_norm, " BIC =", BIC_norm, "\n\n")
## Mixture Normal:    AIC = 321.1931  BIC = 330.5491
cat("=== WAIC ===\n")
## === WAIC ===
print(WAIC_burr)
## 
## Computed from 8000 by 48 log-likelihood matrix.
## 
##           Estimate  SE
## elpd_waic   -158.6 3.3
## p_waic         5.1 0.5
## waic         317.2 6.6
print(WAIC_norm)
## 
## Computed from 8000 by 48 log-likelihood matrix.
## 
##           Estimate  SE
## elpd_waic   -158.1 3.1
## p_waic         4.7 0.5
## waic         316.2 6.2
cat("\nCatatan: model lebih baik biasanya punya AIC/BIC/WAIC yang LEBIH KECIL.\n\n")
## 
## Catatan: model lebih baik biasanya punya AIC/BIC/WAIC yang LEBIH KECIL.
############################################################

# 13. TRACE PLOT KONVERGENSI

############################################################

# Trace plot untuk parameter utama model MSN–Burr

stan_trace(fit_burr, pars = c("pi1","alpha1","mu1","phi1","alpha2","mu2","phi2"))

# Trace plot untuk parameter utama model Mixture Normal

stan_trace(fit_norm, pars = c("pi1","mu1","sigma1","mu2","sigma2"))

#=========================================

# ringkasan lengkap

summ_burr <- summary(fit_burr)$summary

# lihat 6 baris pertama

head(summ_burr)
##              mean    se_mean       sd        2.5%        25%        50%
## alpha1   2.361093 0.03533585 1.396197   0.4101574   1.370687   2.067622
## phi1     2.926553 0.15263319 1.243462   0.5710909   2.143833   3.047338
## mu1    540.186056 1.04523200 5.679362 535.0410275 536.539656 537.432267
## alpha2   2.001183 0.03044215 1.380215   0.2598344   1.000192   1.699591
## phi2     2.054465 0.15077030 1.221594   0.6271922   1.153784   1.626657
## mu2    546.815860 1.04342214 5.759517 535.6081018 540.953452 549.911417
##               75%      97.5%      n_eff     Rhat
## alpha1   3.045473   5.794700 1561.21204 1.005557
## phi1     3.708019   5.174737   66.36924 1.066900
## mu1    540.284721 551.422529   29.52390 1.181254
## alpha2   2.682263   5.494072 2055.62127 1.002202
## phi2     2.756364   5.109580   65.64809 1.065084
## mu2    550.626365 551.868893   30.46857 1.177410
# ambil hanya kolom Rhat

rhat_burr <- summ_burr[, "Rhat"]

# nilai R-hat terbesar

max_rhat_burr <- max(rhat_burr, na.rm = TRUE)
max_rhat_burr
## [1] 1.181254
# parameter dengan R-hat > 1.1

bad_rhat_burr <- rhat_burr[rhat_burr > 1.1]
bad_rhat_burr
##      mu1      mu2      pi1      pi2 
## 1.181254 1.177410 1.113883 1.113883
# nama-nama parameternya

names(bad_rhat_burr)
## [1] "mu1" "mu2" "pi1" "pi2"