# Danny Tshitumbu
# ID: 211964756
# Applied Stats 1: Q.6.6.
# Q.6.6.a.
# Standard approach
m1 <- 1e4
Z <- rep(0, m1)
for (i in 1:m1) {
X <- rpois(25, 2)
Z[i] <- (mean(X) - 2) / sqrt(2/25)
}
er_rate1 <- (1/m1) * sum(Z >= 1.645) # Estimate
er_rate1
## [1] 0.0513
SE_1 <- sqrt(er_rate1*(1 - er_rate1)/m1) # Standard error of the esimate
SE_1
## [1] 0.00220609
CI <- c(er_rate1 - 1.645 * SE_1, c(er_rate1 + 1.645 * SE_1)) # 90% Confidence interval
CI
## [1] 0.04767098 0.05492902
# Importance Sampling with unstandardized weights
m2 <- 1e4
Z2 <- rep(0, m2)
for (i in 1:m2) {
X <- rpois(25, 2.4653)
W <- dpois(X, 2) / dpois(X, 2.4653) # the unstandardized weights
Z2[i] <- (mean(X * W) - 2) / sqrt(2/25)
}
er_rate2 <- mean(Z2 >= 1.645) # Estimate
er_rate2
## [1] 1e-04
SE_2 <- sqrt(er_rate2 * (1 - er_rate2) / m2) # Standard error of the esimate
SE_2
## [1] 9.9995e-05
CI_2 <- c(er_rate2 - 1.645 * SE_2, c(er_rate2 + 1.645 * SE_2)) # 90% Confidence interval
CI_2
## [1] -6.449177e-05 2.644918e-04
# Importance Sampling with standardized weigths
m3 <- 1e4
Z3 <- rep(0, m3)
for (i in 1:m3) {
X <- rpois(25, 2.4653)
W <- exp(0.4653) * (2/2.4653)^X
Stand_W <- W/sum(W)
Z3[i] <- (sum(X * Stand_W) - 2) / sqrt(2/25)
}
er_rate3 <- (1/m3) * sum(Z3 >= 1.645) # Estimate
er_rate3
## [1] 0.0582
SE_3 <- sqrt(er_rate3 * (1 - er_rate3) / m3) # Standard error of the esimate
SE_3
## [1] 0.002341213
CI_3 <- c(er_rate3 - 1.645 * SE_3, c(er_rate3 + 1.645 * SE_3)) # 90% Confidence interval
CI_3
## [1] 0.05434871 0.06205129
# Importance Sampling with Control Variate
m4 <- 1e5
Z4 <- rep(0, m2)
for (i in 1:m4) {
X <- rpois(25, 2.4653)
W <- dpois(X, 2) / dpois(X, 2.4653) # the unstandardized weights
X_IS <- mean(X * W)
lambda <- - cov(X * W, W) / var(W)
Wstar <- sum(W) / 25
X_ISCV <- X_IS + lambda * (Wstar - 1)
Z4[i] <- (X_ISCV - 2) / sqrt(2/25)
}
er_rate4 <- mean(Z4 >= 1.645) # Estimate
er_rate4
## [1] 0
SE_4 <- sqrt(er_rate4 * (1 - er_rate4) / m4) # Standard error of the esimate
SE_4
## [1] 0
CI_4 <- c(er_rate4 - 1.645 * SE_4, c(er_rate4 + 1.645 * SE_4)) # 90% Confidence interval
CI_4
## [1] 0 0
# Antithetic Approach
m5 <- 1e4
Z5 <- rep(0, m5)
for (i in 1:m5) {
X1 <- rpois(25, 2)
X2 <- qpois((1 - ppois(X1, 2)), 2)
mu1 <- mean(X1)
mu2 <- mean(X2)
mu <- (mu1 + mu2) / 2
Z5[i] <- (mu - 2) / sqrt(2/25)
}
er_rate5 <- (1/m5) * sum(Z5 >= 1.645) # Estimate
er_rate5
## [1] 0
SE_5 <- sqrt(er_rate5*(1 - er_rate5)/m5) # Standard error of the esimate
SE_5
## [1] 0
CI_5 <- c(er_rate5 - 1.645 * SE_5, c(er_rate5 + 1.645 * SE_5)) # 90% Confidence interval
CI_5
## [1] 0 0
# Comments/discussion:
# We note that the estimate for the Type 1 error using the standard MC and the standardized
# weight approach gives similar estiamte with similar standard errors. However, the estimate
# found using the the unstandardized weight approach gives an estimate and standard error
# which are smaller in value compared to the other two methods.
# In conclusion, we can see that the Unstandarized Important Sampling (IS) gives a smaller Type 1 error.
# So, the Unstandardized IS gives a lower Type 1 error than the Standardized IS approach.
# We also note that the antithetic approac as well as the IS with control variate gives a near
# zero type 1 error as well as standard error.
# Q.6.6.b.
rm(list = ls())
delta <- seq(2.2, 4, length.out = 50)
# Standard method
power <- rep(0, length(delta))
for (j in 1:length(delta)) {
d <- delta[j]
m <- 1e3
Z <- rep(0, m)
for (i in 1:m) {
X <- rpois(25, d)
Z[i] <- (mean(X) - 2) / sqrt(2/25)
}
power[j] <- (1/m) * sum(Z >= 1.645)
}
plot(delta, power, type = 'l', main = "Power Curve: Standard Approach")

er_pwer1 <- sqrt(power * (1 - power) / 50)
C_band1 <- matrix(c(power - 1.645 * er_pwer1, power + 1.645 * er_pwer1), ncol = 2)
require(plotrix)
## Loading required package: plotrix
# Plot with pointwise Confidence bands
plotCI(delta, power, ui = C_band1[, 2], li = C_band1[, 1])

# Importance Sampling with unstandardized weights
power2 <- rep(0, length(delta))
for (j in 1:length(delta)) {
d <- delta[j]
m2 <- 1e3
Z2 <- rep(0, m2)
for (i in 1:m2) {
X <- rpois(25, 4) # We take the envelope from Poisson(4)
W <- exp(4 - d) * (d/4)^X
Z2[i] <- ((1/25) * sum(X * W) - 2) / sqrt(2/25)
}
power2[j] <- (1/m2) * sum(Z2 >= 1.645)
}
plot(delta, power2, type = 'l', main = "Power Curve: IS with Unstandardized Weight")

er_pwer2 <- sqrt(power2 * (1 - power2) / 50)
C_band2 <- matrix(c(power2 - 1.645 * er_pwer2, power2 + 1.645 * er_pwer2), ncol = 2)
# Plot with pointwise confidence bands
plotCI(delta, power2, ui = C_band2[, 2], li = C_band2[, 1])

# Importance Sampling with Standardized Weights
power3 <- rep(0, length(delta))
for (j in 1:length(delta)) {
d <- delta[j]
m3 <- 1e3
Z3 <- rep(0, m3)
for (i in 1:m3) {
X <- rpois(25, 4) # We take the envelope from Poisson(4)
W <- exp(4 - d) * (d/4)^X
Stand_W <- W/sum(W)
Z3[i] <- (sum(X * Stand_W) - 2) / sqrt(2/25)
}
power3[j] <- (1/m3) * sum(Z3 >= 1.645)
}
plot(delta, power3, type = 'l', main = "Power Curve: IS with Standardized Weight")

er_pwer3 <- sqrt(power3 * (1 - power3) / 50)
C_band3 <- matrix(c(power3 - 1.645 * er_pwer3, power3 + 1.645 * er_pwer3), ncol = 2)
# Plot of pointwise confidence bands
plotCI(delta, power3, ui = C_band3[, 2], li = C_band3[, 1])

# Importance Sampling with Control variate
power4 <- rep(0, length(delta))
for (j in 1:length(delta)) {
d <- delta[j]
m4 <- 1e3
Z4 <- rep(0, m4)
for (i in 1:m4) {
X <- rpois(25, 4) # We take the envelope from Poisson(4)
W <- exp(4 - d) * (d/4)^X
X_IS <- mean(X * W)
lambda <- - cov(X * W, W) / var(W)
Wstar <- sum(W) / 25
X_ISCV <- X_IS + lambda * (Wstar - 1)
Z4[i] <- (X_ISCV - 2) / sqrt(2/25)
}
power4[j] <- (1/m4) * sum(Z4 >= 1.645)
}
plot(delta, power4, type = 'l', main = "Power Curve: IS with Control Variate")

er_pwer4 <- sqrt(power4 * (1 - power4) / 50)
C_band4 <- matrix(c(power4 - 1.645 * er_pwer4, power4 + 1.645 * er_pwer4), ncol = 2)
# Plot with pointwise confidence bands
plotCI(delta, power4, ui = C_band4[, 2], li = C_band4[, 1])

# Comments/discussion:
# From the graphs, we can see that the unstandardized approach seems more power than the
# Standard and standardized approaches. Besides, the unstandardized approach seems to have
# narrower confidence bands.
# Comparing the graphs to the results in (a), we note that the estimate with the smallest
# type 1 error (as found from the unstandardized approach) seems to be more powerful, that
# is smaller type 1 error seems to imply a higher probability of accepting Ha when Ha is true.