1

(a)

mc_function <- function(n,a,b){
  x = runif(n,a,b)
  g = (1/(1+x^2))*exp(-x)
  theta = (b-a)*mean(g)
  theta.se = (b-a)*sd(g)/sqrt(n)
  list(theta=theta, theta.se=theta.se)
}

n = 20000
a = 0
b = 1
I = mc_function(n,a,b)
output <- data.frame(MC_approx=I$theta, MC_S.E.=I$theta.se)
print(output)
##   MC_approx     MC_S.E.
## 1 0.5250929 0.001732666

(b)

anti_function <- function(n,a,b){
  u = runif(n/2,a,b)
  gu1 = (1/(1+u^2))*exp(-u)
  gu2 = (1/(1+(1-u)^2))*exp(-(1-u))
  theta.a = (1/n)*(sum(gu1+gu2))
  theta.a.se = (1/sqrt(2*n))*sd(gu1+gu2)
  list(theta.a=theta.a, theta.a.se=theta.a.se)
}  

n = 20000
a = 0
b = 1
I1 = anti_function(n,a,b)
output1 <- data.frame(AT_approx=I1$theta.a, AT_S.E.=I1$theta.a.se)
print(output1)
##   AT_approx      AT_S.E.
## 1 0.5249958 0.0003315029

The standard error from the Antithetic method is smaller than the standard error from the basic Monte Carlo integration.

(c)

u = runif(n/2)
gu1 = (1/(1+u^2))*exp(-u)
gu2 = (1/(1+(1-u)^2))*exp(-(1-u))

cor(gu1, gu2)
## [1] -0.9635651

Let \[\hat{\mu_{1}}, \hat{\mu_{2}}\] be two identically distributed unbiased estimators. Then, the estimator \[\hat{\mu}_{AS}=\hat{\mu_{1}}+\hat{\mu_{2}}\] has variance equal to \[var\{\hat{\mu}_{AS}\}=\frac{1}{4}(var\{\hat{\mu_{1}}\}+var\{\hat{\mu_{2}}\})+\frac{1}{2}cov\{\hat{\mu_{1}},\hat{\mu_{2}}\}=\frac{\sigma^2}{n}+\frac{1}{2}cov\{\hat{\mu_{1}},\hat{\mu_{2}}\}\] If \[cov\{\hat{\mu_{1}},\hat{\mu_{2}}\}<0\] then \[\hat{\mu}_{AS}\] requires half the number of generated random variables and will have a smaller variance as compared to the basic Monte Carlo method. Since the correlation is negative from the above output, covariance is negative in this case, therefore the variance is smaller for the Antithetic method.

2

(a)

u = runif(n)
gen_function <- function(u){
  x = log(1/(1-(1-exp(-1))*u))
  return(x)
}

(b)

mc_function_2 <- function(n){
  u = runif(n)
  x <- gen_function(u)
  g = (1/(1+x^2))*exp(-x)
  f = exp(-x)/(1-exp(-1))
  theta = mean(g/f)
  theta.se = sd(g/f)/sqrt(n)
  list(theta=theta, theta.se=theta.se)
}

n = 20000
I2 = mc_function_2(n)
output <- data.frame(MC_approx=I2$theta, MC_S.E.=I2$theta.se)
print(output)
##   MC_approx      MC_S.E.
## 1 0.5241854 0.0006892342

(c)

anti_function_2 <- function(n){
  u = runif(n/2)
  x1 <- gen_function(u)
  x2 <- gen_function(1-u)
  g1 = (1/(1+x1^2))*exp(-x1)
  f1 = exp(-x1)/(1-exp(-1))
  g2 = (1/(1+x2^2))*exp(-x2)
  f2 = exp(-x2)/(1-exp(-1))
  theta.a = (1/n)*sum((g1/f1)+(g2/f2))
  theta.a.se = (1/sqrt(2*n))*sd((g1/f1)+(g2/f2))
  list(theta.a=theta.a, theta.a.se=theta.a.se)
} 

n = 20000
I3 = anti_function_2(n)
output1 <- data.frame(AT_approx=I3$theta.a, AT_S.E.=I3$theta.a.se)
print(output1)
##   AT_approx      AT_S.E.
## 1 0.5248493 0.0002386056

3

(a)

library(MASS)
## Warning: package 'MASS' was built under R version 4.1.3
hit_function <- function(n, mu, sig, a, b, c){
  I = 0
  x = mvrnorm(n, mu, sig)
  for (i in 1:n){
   if (x[i,1]<=a && x[i,2]<=b && x[i,3]<=c){
   I=I+1} else {
   I=I}
  }
  theta.hm = I/n
  theta.hm.se = sqrt((1/n)*theta.hm*(1-theta.hm))
  list(theta.hm=theta.hm, theta.hm.se=theta.hm.se)
}

n = 20000
mu = c(0,0,0)
sig = matrix(c(1, 3/5, 1/3, 3/5, 1, 11/15, 1/3, 11/15, 1), nrow=3)

R <- hit_function(n=n, mu=mu, sig=sig, a=1, b=4, c=2)
R$theta.hm
## [1] 0.8304

(b)

R$theta.hm.se
## [1] 0.002653638

(c)

# true value = 0.8279849897
CI_lower = R$theta.hm - 1.96*R$theta.hm.se
CI_upper = R$theta.hm + 1.96*R$theta.hm.se
CI_lower
## [1] 0.8251989
CI_upper
## [1] 0.8356011
# 95% Confidence Interval
print(c("CI_lower"=CI_lower, "CI_upper"=CI_upper))
##  CI_lower  CI_upper 
## 0.8251989 0.8356011

Yes. My confidence interval contains the true value.