# Install if not already installed
if (!requireNamespace("MASS", quietly = TRUE)) install.packages("MASS")
if (!requireNamespace("boot", quietly = TRUE)) install.packages("boot")

library(MASS)  # For generating bivariate normal samples
library(boot)  # For bootstrap resampling
library(ggplot2) # For visualization

1. Find a real-world (should not be simulation) data set with at least 1,000 samples and two continue features X and Y. Assess the normality of X and Y.

Data Source: [life-expectancy-who]https://www.kaggle.com/datasets/kumarajarshi/life-expectancy-who?resource=download

data = read.csv("LifeExpectancyData.csv")
glimpse(data)
Rows: 2,938
Columns: 22
$ Country                         <chr> "Afghanistan", "Afghanistan", "Afghanistan", "A…
$ Year                            <int> 2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008,…
$ Status                          <chr> "Developing", "Developing", "Developing", "Deve…
$ Life.expectancy                 <dbl> 65.0, 59.9, 59.9, 59.5, 59.2, 58.8, 58.6, 58.1,…
$ Adult.Mortality                 <int> 263, 271, 268, 272, 275, 279, 281, 287, 295, 29…
$ infant.deaths                   <int> 62, 64, 66, 69, 71, 74, 77, 80, 82, 84, 85, 87,…
$ Alcohol                         <dbl> 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.03,…
$ percentage.expenditure          <dbl> 71.279624, 73.523582, 73.219243, 78.184215, 7.0…
$ Hepatitis.B                     <int> 65, 62, 64, 67, 68, 66, 63, 64, 63, 64, 66, 67,…
$ Measles                         <int> 1154, 492, 430, 2787, 3013, 1989, 2861, 1599, 1…
$ BMI                             <dbl> 19.1, 18.6, 18.1, 17.6, 17.2, 16.7, 16.2, 15.7,…
$ under.five.deaths               <int> 83, 86, 89, 93, 97, 102, 106, 110, 113, 116, 11…
$ Polio                           <int> 6, 58, 62, 67, 68, 66, 63, 64, 63, 58, 58, 5, 4…
$ Total.expenditure               <dbl> 8.16, 8.18, 8.13, 8.52, 7.87, 9.20, 9.42, 8.33,…
$ Diphtheria                      <int> 65, 62, 64, 67, 68, 66, 63, 64, 63, 58, 58, 5, …
$ HIV.AIDS                        <dbl> 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.…
$ GDP                             <dbl> 584.25921, 612.69651, 631.74498, 669.95900, 63.…
$ Population                      <dbl> 33736494, 327582, 31731688, 3696958, 2978599, 2…
$ thinness..1.19.years            <dbl> 17.2, 17.5, 17.7, 17.9, 18.2, 18.4, 18.6, 18.8,…
$ thinness.5.9.years              <dbl> 17.3, 17.5, 17.7, 18.0, 18.2, 18.4, 18.7, 18.9,…
$ Income.composition.of.resources <dbl> 0.479, 0.476, 0.470, 0.463, 0.454, 0.448, 0.434…
$ Schooling                       <dbl> 10.1, 10.0, 9.9, 9.8, 9.5, 9.2, 8.9, 8.7, 8.4, …
Y <- data$Life.expectancy
X <- data$percentage.expenditure  
df <- na.omit(data.frame(X = X, Y = Y))

# Visualize distributions
par(mfrow = c(2, 2))
hist(df$X, main = "Histogram of X", xlab = "percentage.expenditure")
hist(df$Y, main = "Histogram of Y", xlab = "Life.expectancy")
qqnorm(df$X); qqline(df$X, col = "red")
qqnorm(df$Y); qqline(df$Y, col = "red")

Comments: The histogram for X is highly right-skewed.The Q-Q plot for X shows a strong deviation from the straight line. The histogram of Y is more symmetric. The Q-Q plot for Y shows that most points fall close to the straight line, but there are deviations at both ends, suggesting some mild skewness.

2. Compute the theoretical 95% confidence interval estimate for the means of X, Y and their correlation.

n <- nrow(df)
alpha <- 0.05

# Means and standard errors
mean_X <- mean(df$X)
mean_Y <- mean(df$Y)
se_X <- sd(df$X) / n
se_Y <- sd(df$Y) / n

z_crit <- qnorm(1 - alpha/2)

# 95% CI for means
ci_X <- c(mean_X - z_crit * se_X, mean_X + z_crit * se_X)
ci_Y <- c(mean_Y - z_crit * se_Y, mean_Y + z_crit * se_Y)

# Correlation and Fisher's z CI
r <- cor(df$X, df$Y)
z <- atanh(r)
se_r <- 1 / sqrt(n - 3)
z_ci <- c(z - z_crit * se_r, z + z_crit * se_r)
r_ci <- tanh(z_ci)

# Print results
cat("95% CI for mean of X:", ci_X, "\n")
95% CI for mean of X: 738.9885 741.6539 
cat("95% CI for mean of Y:", ci_Y, "\n")
95% CI for mean of Y: 69.21856 69.23131 
cat("95% CI for correlation:", r_ci, "\n")
95% CI for correlation: 0.3504878 0.4123831 

3. Use bootstrapping method to obtain the 95% CI estimate for the means of X, Y and their correlation with B = 500, 1000, 5000.

# Function for bootstrapping mean
boot_mean <- function(data, indices, col) {
  return(mean(data[indices, col]))
}

# Function for bootstrapping correlation
boot_cor <- function(data, indices) {
  d <- data[indices, ]
  return(cor(d$X, d$Y))
}

# Bootstrapping for each B
for (B in c(500, 1000, 5000)) {
  set.seed(123)
  boot_X <- boot(data = df, statistic = function(d, i) boot_mean(d, i, "X"), R = B)
  boot_Y <- boot(data = df, statistic = function(d, i) boot_mean(d, i, "Y"), R = B)
  boot_corr <- boot(data = df, statistic = boot_cor, R = B)
  
  # Percentile CIs
  ci_X_boot <- boot.ci(boot_X, type = "perc")$percent[4:5]
  ci_Y_boot <- boot.ci(boot_Y, type = "perc")$percent[4:5]
  ci_corr_boot <- boot.ci(boot_corr, type = "perc")$percent[4:5]
  
  cat("\nBootstrap B =", B, "\n")
  cat("  95% CI for mean of X:", ci_X_boot, "\n")
  cat("  95% CI for mean of Y:", ci_Y_boot, "\n")
  cat("  95% CI for correlation:", ci_corr_boot, "\n")
}

Bootstrap B = 500 
  95% CI for mean of X: 669.9998 812.4581 
  95% CI for mean of Y: 68.89326 69.5792 
  95% CI for correlation: 0.3611118 0.4024863 

Bootstrap B = 1000 
  95% CI for mean of X: 668.6017 820.5948 
  95% CI for mean of Y: 68.89212 69.55466 
  95% CI for correlation: 0.3636607 0.4029201 

Bootstrap B = 5000 
  95% CI for mean of X: 671.1306 815.2249 
  95% CI for mean of Y: 68.8787 69.56761 
  95% CI for correlation: 0.3628424 0.4020531 

4. Comment on your results by comparing estimates from step 2 and 3. What is the effect of B? Do you think the normality of X and Y also affects the accuracy of bootstrapping method?

Comment: The parametric CIs for the means are much narrower than the bootstrap CIs.But the bootstrap CIs for correlation are somewhat narrower.

As B increases from 500 to 5,000, the bootstrap CIs for means and correlation become more stable and consistent, but the interval widths do not change dramatically.

No. Bootstrap methods do not require normality and are robust to non-normal and skewed data distributions.

5. If you sample is much smaller (n < 100), what do you expect for the bootstrapping method’s accuracy?

Answer: With small samples, the bootstrap CIs become less reliable. The resampled distributions may not adequately represent the true population variability.

LS0tCnRpdGxlOiAiUiBOb3RlYm9vayIKb3V0cHV0OiBodG1sX25vdGVib29rCi0tLQpgYGB7cn0KIyBJbnN0YWxsIGlmIG5vdCBhbHJlYWR5IGluc3RhbGxlZAppZiAoIXJlcXVpcmVOYW1lc3BhY2UoIk1BU1MiLCBxdWlldGx5ID0gVFJVRSkpIGluc3RhbGwucGFja2FnZXMoIk1BU1MiKQppZiAoIXJlcXVpcmVOYW1lc3BhY2UoImJvb3QiLCBxdWlldGx5ID0gVFJVRSkpIGluc3RhbGwucGFja2FnZXMoImJvb3QiKQoKbGlicmFyeShNQVNTKSAgIyBGb3IgZ2VuZXJhdGluZyBiaXZhcmlhdGUgbm9ybWFsIHNhbXBsZXMKbGlicmFyeShib290KSAgIyBGb3IgYm9vdHN0cmFwIHJlc2FtcGxpbmcKbGlicmFyeShnZ3Bsb3QyKSAjIEZvciB2aXN1YWxpemF0aW9uCmBgYAoKIyMjIyAxLiBGaW5kIGEgcmVhbC13b3JsZCAoc2hvdWxkIG5vdCBiZSBzaW11bGF0aW9uKSBkYXRhIHNldCB3aXRoIGF0IGxlYXN0IDEsMDAwIHNhbXBsZXMgYW5kIHR3byBjb250aW51ZSBmZWF0dXJlcyBYIGFuZCBZLiBBc3Nlc3MgdGhlIG5vcm1hbGl0eSBvZiBYIGFuZCBZLgoKRGF0YSBTb3VyY2U6IFtsaWZlLWV4cGVjdGFuY3ktd2hvXTxodHRwczovL3d3dy5rYWdnbGUuY29tL2RhdGFzZXRzL2t1bWFyYWphcnNoaS9saWZlLWV4cGVjdGFuY3ktd2hvP3Jlc291cmNlPWRvd25sb2FkPgoKYGBge3J9CmRhdGEgPSByZWFkLmNzdigiTGlmZUV4cGVjdGFuY3lEYXRhLmNzdiIpCmdsaW1wc2UoZGF0YSkKYGBgCgpgYGB7cn0KWSA8LSBkYXRhJExpZmUuZXhwZWN0YW5jeQpYIDwtIGRhdGEkcGVyY2VudGFnZS5leHBlbmRpdHVyZSAgCmRmIDwtIG5hLm9taXQoZGF0YS5mcmFtZShYID0gWCwgWSA9IFkpKQoKIyBWaXN1YWxpemUgZGlzdHJpYnV0aW9ucwpwYXIobWZyb3cgPSBjKDIsIDIpKQpoaXN0KGRmJFgsIG1haW4gPSAiSGlzdG9ncmFtIG9mIFgiLCB4bGFiID0gInBlcmNlbnRhZ2UuZXhwZW5kaXR1cmUiKQpoaXN0KGRmJFksIG1haW4gPSAiSGlzdG9ncmFtIG9mIFkiLCB4bGFiID0gIkxpZmUuZXhwZWN0YW5jeSIpCnFxbm9ybShkZiRYKTsgcXFsaW5lKGRmJFgsIGNvbCA9ICJyZWQiKQpxcW5vcm0oZGYkWSk7IHFxbGluZShkZiRZLCBjb2wgPSAicmVkIikKYGBgCioqQ29tbWVudHM6KiogVGhlIGhpc3RvZ3JhbSBmb3IgWCBpcyBoaWdobHkgcmlnaHQtc2tld2VkLlRoZSBRLVEgcGxvdCBmb3IgWCBzaG93cyBhIHN0cm9uZyBkZXZpYXRpb24gZnJvbSB0aGUgc3RyYWlnaHQgbGluZS4gVGhlIGhpc3RvZ3JhbSBvZiBZIGlzIG1vcmUgc3ltbWV0cmljLiBUaGUgUS1RIHBsb3QgZm9yIFkgc2hvd3MgdGhhdCBtb3N0IHBvaW50cyBmYWxsIGNsb3NlIHRvIHRoZSBzdHJhaWdodCBsaW5lLCBidXQgdGhlcmUgYXJlIGRldmlhdGlvbnMgYXQgYm90aCBlbmRzLCBzdWdnZXN0aW5nIHNvbWUgbWlsZCBza2V3bmVzcy4KCgojIyMjIDIuIENvbXB1dGUgdGhlIHRoZW9yZXRpY2FsIDk1JSBjb25maWRlbmNlIGludGVydmFsIGVzdGltYXRlIGZvciB0aGUgbWVhbnMgb2YgWCwgWSBhbmQgdGhlaXIgY29ycmVsYXRpb24uCgpgYGB7cn0KbiA8LSBucm93KGRmKQphbHBoYSA8LSAwLjA1CgojIE1lYW5zIGFuZCBzdGFuZGFyZCBlcnJvcnMKbWVhbl9YIDwtIG1lYW4oZGYkWCkKbWVhbl9ZIDwtIG1lYW4oZGYkWSkKc2VfWCA8LSBzZChkZiRYKSAvIG4Kc2VfWSA8LSBzZChkZiRZKSAvIG4KCnpfY3JpdCA8LSBxbm9ybSgxIC0gYWxwaGEvMikKCiMgOTUlIENJIGZvciBtZWFucwpjaV9YIDwtIGMobWVhbl9YIC0gel9jcml0ICogc2VfWCwgbWVhbl9YICsgel9jcml0ICogc2VfWCkKY2lfWSA8LSBjKG1lYW5fWSAtIHpfY3JpdCAqIHNlX1ksIG1lYW5fWSArIHpfY3JpdCAqIHNlX1kpCgojIENvcnJlbGF0aW9uIGFuZCBGaXNoZXIncyB6IENJCnIgPC0gY29yKGRmJFgsIGRmJFkpCnogPC0gYXRhbmgocikKc2VfciA8LSAxIC8gc3FydChuIC0gMykKel9jaSA8LSBjKHogLSB6X2NyaXQgKiBzZV9yLCB6ICsgel9jcml0ICogc2VfcikKcl9jaSA8LSB0YW5oKHpfY2kpCgojIFByaW50IHJlc3VsdHMKY2F0KCI5NSUgQ0kgZm9yIG1lYW4gb2YgWDoiLCBjaV9YLCAiXG4iKQpjYXQoIjk1JSBDSSBmb3IgbWVhbiBvZiBZOiIsIGNpX1ksICJcbiIpCmNhdCgiOTUlIENJIGZvciBjb3JyZWxhdGlvbjoiLCByX2NpLCAiXG4iKQpgYGAKCiMjIyMgMy4gVXNlIGJvb3RzdHJhcHBpbmcgbWV0aG9kIHRvIG9idGFpbiB0aGUgOTUlIENJIGVzdGltYXRlIGZvciB0aGUgbWVhbnMgb2YgWCwgWSBhbmQgdGhlaXIgY29ycmVsYXRpb24gd2l0aCBCID0gNTAwLCAxMDAwLCA1MDAwLgoKYGBge3J9CiMgRnVuY3Rpb24gZm9yIGJvb3RzdHJhcHBpbmcgbWVhbgpib290X21lYW4gPC0gZnVuY3Rpb24oZGF0YSwgaW5kaWNlcywgY29sKSB7CiAgcmV0dXJuKG1lYW4oZGF0YVtpbmRpY2VzLCBjb2xdKSkKfQoKIyBGdW5jdGlvbiBmb3IgYm9vdHN0cmFwcGluZyBjb3JyZWxhdGlvbgpib290X2NvciA8LSBmdW5jdGlvbihkYXRhLCBpbmRpY2VzKSB7CiAgZCA8LSBkYXRhW2luZGljZXMsIF0KICByZXR1cm4oY29yKGQkWCwgZCRZKSkKfQoKIyBCb290c3RyYXBwaW5nIGZvciBlYWNoIEIKZm9yIChCIGluIGMoNTAwLCAxMDAwLCA1MDAwKSkgewogIHNldC5zZWVkKDEyMykKICBib290X1ggPC0gYm9vdChkYXRhID0gZGYsIHN0YXRpc3RpYyA9IGZ1bmN0aW9uKGQsIGkpIGJvb3RfbWVhbihkLCBpLCAiWCIpLCBSID0gQikKICBib290X1kgPC0gYm9vdChkYXRhID0gZGYsIHN0YXRpc3RpYyA9IGZ1bmN0aW9uKGQsIGkpIGJvb3RfbWVhbihkLCBpLCAiWSIpLCBSID0gQikKICBib290X2NvcnIgPC0gYm9vdChkYXRhID0gZGYsIHN0YXRpc3RpYyA9IGJvb3RfY29yLCBSID0gQikKICAKICAjIFBlcmNlbnRpbGUgQ0lzCiAgY2lfWF9ib290IDwtIGJvb3QuY2koYm9vdF9YLCB0eXBlID0gInBlcmMiKSRwZXJjZW50WzQ6NV0KICBjaV9ZX2Jvb3QgPC0gYm9vdC5jaShib290X1ksIHR5cGUgPSAicGVyYyIpJHBlcmNlbnRbNDo1XQogIGNpX2NvcnJfYm9vdCA8LSBib290LmNpKGJvb3RfY29yciwgdHlwZSA9ICJwZXJjIikkcGVyY2VudFs0OjVdCiAgCiAgY2F0KCJcbkJvb3RzdHJhcCBCID0iLCBCLCAiXG4iKQogIGNhdCgiICA5NSUgQ0kgZm9yIG1lYW4gb2YgWDoiLCBjaV9YX2Jvb3QsICJcbiIpCiAgY2F0KCIgIDk1JSBDSSBmb3IgbWVhbiBvZiBZOiIsIGNpX1lfYm9vdCwgIlxuIikKICBjYXQoIiAgOTUlIENJIGZvciBjb3JyZWxhdGlvbjoiLCBjaV9jb3JyX2Jvb3QsICJcbiIpCn0KYGBgCgojIyMjIDQuIENvbW1lbnQgb24geW91ciByZXN1bHRzIGJ5IGNvbXBhcmluZyBlc3RpbWF0ZXMgZnJvbSBzdGVwIDIgYW5kIDMuIFdoYXQgaXMgdGhlIGVmZmVjdCBvZiBCPyBEbyB5b3UgdGhpbmsgdGhlIG5vcm1hbGl0eSBvZiBYIGFuZCBZIGFsc28gYWZmZWN0cyB0aGUgYWNjdXJhY3kgb2YgYm9vdHN0cmFwcGluZyBtZXRob2Q/CgoqKkNvbW1lbnQ6KiogVGhlIHBhcmFtZXRyaWMgQ0lzIGZvciB0aGUgbWVhbnMgYXJlIG11Y2ggbmFycm93ZXIgdGhhbiB0aGUgYm9vdHN0cmFwIENJcy5CdXQgdGhlIGJvb3RzdHJhcCBDSXMgZm9yIGNvcnJlbGF0aW9uIGFyZSBzb21ld2hhdCBuYXJyb3dlci4gCgpBcyBCIGluY3JlYXNlcyBmcm9tIDUwMCB0byA1LDAwMCwgdGhlIGJvb3RzdHJhcCBDSXMgZm9yIG1lYW5zIGFuZCBjb3JyZWxhdGlvbiBiZWNvbWUgbW9yZSBzdGFibGUgYW5kIGNvbnNpc3RlbnQsIGJ1dCB0aGUgaW50ZXJ2YWwgd2lkdGhzIGRvIG5vdCBjaGFuZ2UgZHJhbWF0aWNhbGx5LgoKTm8uIEJvb3RzdHJhcCBtZXRob2RzIGRvIG5vdCByZXF1aXJlIG5vcm1hbGl0eSBhbmQgYXJlIHJvYnVzdCB0byBub24tbm9ybWFsIGFuZCBza2V3ZWQgZGF0YSBkaXN0cmlidXRpb25zLgoKIyMjIyA1LiBJZiB5b3Ugc2FtcGxlIGlzIG11Y2ggc21hbGxlciAobiA8IDEwMCksIHdoYXQgZG8geW91IGV4cGVjdCBmb3IgdGhlIGJvb3RzdHJhcHBpbmcgbWV0aG9kJ3MgYWNjdXJhY3k/CgoqKkFuc3dlcjoqKiBXaXRoIHNtYWxsIHNhbXBsZXMsIHRoZSBib290c3RyYXAgQ0lzIGJlY29tZSBsZXNzIHJlbGlhYmxlLiBUaGUgcmVzYW1wbGVkIGRpc3RyaWJ1dGlvbnMgbWF5IG5vdCBhZGVxdWF0ZWx5IHJlcHJlc2VudCB0aGUgdHJ1ZSBwb3B1bGF0aW9uIHZhcmlhYmlsaXR5LgoKCg==