library(latex2exp)

Experiment 1: convergence study of \(\hat{\theta}\) against \(n\).

Linear regression and Gaussian data generation process. The problem assumes the error is Gaussian. The purpose of this experiment is to show convergence rate gauss-markov solution \(\hat{\theta}\) to \(\theta_0\) as n increases.

theta_0 <- c(0,1)
lambda <- 0
p<-2
set.seed(123)
sizes <- c(1e3,5e3,1e4,2e4,3e4,5e4,1e5,2e5,5e5,1e6)
m <- length(sizes)
MSE <-rep(m,0)
results_0 <- rep(m,0)
results_1 <- rep(m,0)
for (i in 1:m)
{
  print(i)
  n <- sizes[i]
  x <- rep(0.1,n)
  eps <- rnorm(n)
  
  y<-theta_0[1] + theta_0[2]*x + eps
  
  X<-cbind(rep(1,n),x)
  
  A<-solve((t(X)%*%X + diag(rep(n*lambda,p))))
  b <- t(X)%*%y
  
  hattheta <- A%*%b
  print(hattheta)
  results_0[i]<-hattheta[1]
  results_1[i]<-hattheta[2]
  MSE[i]<- mean(eps^2)
  
}
[1] 1
Error in solve.default((t(X) %*% X + diag(rep(n * lambda, p)))) : 
  system is computationally singular: reciprocal condition number = 9.39561e-17

Experiment 2: Convergence of \(E[||\hat{\theta}-\theta||_2^2]\)

\(n\) trial per experiment \(m\) experiments

m <- 10000
MSE <- rep(0,m)
n <- 10000
x <- rnorm(n)
# loop over experiments
for (i in 1:m){
  #print(i)
  eps <- rnorm(n)
  y<-theta_0 + theta_1*x + eps
  X<-cbind(rep(1,n),x)
  A<-solve((t(X)%*%X + diag(rep(n*lambda,p))))
  b <- t(X)%*%y
  
  hattheta <- A%*%b
  
  MSE[i]<-norm(hattheta-theta_0, type='2')^2
}
# plotting of the expected MSE
check_points <- seq(1,10)*1000
EMSE <- rep(0, length(check_points))
for (i in 1:10)
{
  EMSE[i]<-mean(MSE[1:check_points[i]])
  
}
plot(check_points, EMSE)

Experiment 3: Show that Tik. regularization reduces EMSE

    f <- function(lambda){
       A <- solve((t(X)%*%X + diag(rep(n*lambda,p))))
       b <- diag(rep(1,n))-X%*%A%*%t(X)
       lambda_prime<- n*norm(b%*%y,type='2')^2/tr(b)^2
       return(lambda_prime)
    }
    jac <- function(param){
      # empty
      return(0)
    }
get.lambda<-function(param0){
  res<- nloptr::nloptr( x0=param0,
                 eval_f=f,
                 #eval_grad_f=jac,
                 lb = c(epsilon),
                 ub = c(999),
                 opts = list("algorithm"='NLOPT_LN_COBYLA', "maxeval" = 30, "xtol_rel" = 1e-16, "ftol_rel"= 1e-66, "ftol_abs"=1e-6,   "print_level"=1,"check_derivatives" = TRUE,"check_derivatives_print" = "all"))
  lambda_0<-res$solution 
  return(lambda_0)
 
}

Compute \(\lambda_0>0\) using the GCV estimate

epsilon <- 1e-3
p<-2 # dimensionality of the parameters
# Ensure that lambda>0
param0  <- 1.0
n <- 1000
x <- rnorm(n)
eps <- rnorm(n)
y<-theta_0[1] + theta_0[2]*x + eps
X<-cbind(rep(1,n),x)
lambda_0<-get.lambda(param0)
Skipping derivative checker because algorithm 'NLOPT_LN_COBYLA' does not use gradients.
iteration: 1
    f(x) = 1.187594
iteration: 2
    f(x) = 1.325575
iteration: 3
    f(x) = 0.995559
iteration: 4
    f(x) = 0.957927
iteration: 5
    f(x) = 0.959041
iteration: 6
    f(x) = 0.957926
iteration: 7
    f(x) = 0.958049
iteration: 8
    f(x) = 0.957927
iteration: 9
    f(x) = 0.957933
iteration: 10
    f(x) = 0.957927
iteration: 11
    f(x) = 0.957926
iteration: 12
    f(x) = 0.957927
iteration: 13
    f(x) = 0.957926
iteration: 14
    f(x) = 0.957926
iteration: 15
    f(x) = 0.957926
iteration: 16
    f(x) = 0.957926
iteration: 17
    f(x) = 0.957926
iteration: 18
    f(x) = 0.957926
iteration: 19
    f(x) = 0.957926
iteration: 20
    f(x) = 0.957926
iteration: 21
    f(x) = 0.957926
iteration: 22
    f(x) = 0.957926
iteration: 23
    f(x) = 0.957926
iteration: 24
    f(x) = 0.957926
iteration: 25
    f(x) = 0.957926
iteration: 26
    f(x) = 0.957926
iteration: 27
    f(x) = 0.957926
iteration: 28
    f(x) = 0.957926
iteration: 29
    f(x) = 0.957926
iteration: 30
    f(x) = 0.957926
iteration: 31
    f(x) = 0.957926
iteration: 32
    f(x) = 0.957926
iteration: 33
    f(x) = 0.957926
iteration: 34
    f(x) = 0.957926
iteration: 35
    f(x) = 0.957926
iteration: 36
    f(x) = 0.957926
iteration: 37
    f(x) = 0.957926
iteration: 38
    f(x) = 0.957926
iteration: 39
    f(x) = 0.957926
iteration: 40
    f(x) = 0.957926
iteration: 41
    f(x) = 0.957926
iteration: 42
    f(x) = 0.957926
iteration: 43
    f(x) = 0.957926
iteration: 44
    f(x) = 0.957926
iteration: 45
    f(x) = 0.957926
iteration: 46
    f(x) = 0.957926
iteration: 47
    f(x) = 0.957926
iteration: 48
    f(x) = 0.957926
iteration: 49
    f(x) = 0.957926
iteration: 50
    f(x) = 0.957926
iteration: 51
    f(x) = 0.957926
iteration: 52
    f(x) = 0.957926
iteration: 53
    f(x) = 0.957926
iteration: 54
    f(x) = 0.957926
iteration: 55
    f(x) = 0.957926
iteration: 56
    f(x) = 0.957926
iteration: 57
    f(x) = 0.957926
iteration: 58
    f(x) = 0.957926
print(lambda_0)
[1] 0.00201856

Now that we have \(\lambda_0\), let’s check that \(\hat{\theta}_{\lambda}\) reduces the EMSE compared to the Gauss-Markov estimate (LS). We’ll need to minimize the neg. log likelihood with a ridge penalty from Eq 1.3 in Wabha

Let’s review some closed form results to tighten up the variance analysis

\(\hat{\beta}_1 = C_{xy}/(\hat\sigma^2 + \lambda)\) \(\hat{\beta}_0 = (\bar{y}- \hat\beta_1\bar{x})/(1+\lambda)\)

get.theta.lambda <- function(lambda){
  theta.lambda[2] <-(x%*%y)/(x%*%x + n*lambda)  # 
  theta.lambda[1] <- (mean(y)-theta.lambda[2]*mean(x))/(1+lambda) # 
return(theta.lambda)
}

Check with numerical results

objfun <- function(theta){
       f <- (1/n)*norm(y-(theta[1] + theta[2]*x),type='2')^2 + lambda_0*norm(theta,type='2')^2
       return(f)
    }
get.theta.lambda.num <-function(theta_0){ 
res<- nloptr::nloptr( x0=theta_0,
                 eval_f=objfun,
                 #eval_grad_f=jac,
                 lb = c(-10,-10),
                 ub = c(10,10),
                 opts = list("algorithm"='NLOPT_LN_COBYLA', "maxeval" = 500, "xtol_rel" = 1e-16, "ftol_rel"= 1e-16, "ftol_abs"=1e-16, "print_level"=0,"check_derivatives" = FALSE,"check_derivatives_print" = "all"))
theta.lambda.num<-res$solution 
return(theta.lambda.num)
}
print('numerical optimization')
[1] "numerical optimization"
print(get.theta.lambda.num(theta_0))
[1] 0.03840644 1.02183998
print('analytic gradients')
[1] "analytic gradients"
print(get.theta.lambda(lambda_0))
[1] 0.0382921 1.0197277

Let’s see that this estimate of \(\hat{\theta}_{\lambda}\) really does reduce the EMSE. I’m not going to reestimate \(\lambda_0\) each time due to numerical complexity.

# n trial per experiment
# m experiments
m <- 1000
MSE <- rep(0,m)
MSE.reg <-rep(0,m)
n <- 10000
x <- rnorm(n)
X<-cbind(rep(1,n),x)


# loop over experiments
for (i in 1:m){
  #print(i)
  eps <- rnorm(n)
  y<-theta_0[1] + theta_0[2]*x + eps
  

  A<-solve((t(X)%*%X + diag(rep(n*lambda,p))))
  b <- t(X)%*%y
  
  hattheta <- A%*%b
  
  MSE[i]<-norm(hattheta-theta_0, type='2')^2
  #lambda_0<-get.lambda(param0)
  theta.lambda<-get.theta.lambda(lambda_0)
  #print(lambda_0)
  #print(theta.lambda)
  MSE.reg[i]<-norm(theta.lambda-theta_0, type='2')^2
}

Good practice to always separating the plotting from the estimation, so that I can touch up figures without having to re-run experiments.

# plotting of the expected MSE
check_points <- seq(1,10)*m/10
EMSE <- rep(0, length(check_points))
EMSE.reg <- rep(0, length(check_points))
for (i in 1:10)
{
  EMSE[i]<-mean(MSE[1:check_points[i]])
  EMSE.reg[i]<-mean(MSE.reg[1:check_points[i]])
  
}
plot(check_points, EMSE, xlab='n', ylab='EMSE')
points(check_points, EMSE.reg, col='red')

LS0tCnRpdGxlOiAiVGlraG9ub3YgcmVndWxhcml6YXRpb24gYW5hbHlzaXMiCm91dHB1dDoKICBodG1sX25vdGVib29rOiBkZWZhdWx0CiAgaHRtbF9kb2N1bWVudDogZGVmYXVsdAotLS0KYGBge3J9CmxpYnJhcnkobGF0ZXgyZXhwKQpsaWJyYXJ5KG5sb3B0cikKbGlicmFyeShwc3ljaCkKYGBgCgojIyBFeHBlcmltZW50IDE6IGNvbnZlcmdlbmNlIHN0dWR5IG9mICRcaGF0e1x0aGV0YX0kIGFnYWluc3QgJG4kLgoKTGluZWFyIHJlZ3Jlc3Npb24gYW5kIEdhdXNzaWFuIGRhdGEgZ2VuZXJhdGlvbiBwcm9jZXNzLiBUaGUgcHJvYmxlbSBhc3N1bWVzIHRoZSBlcnJvciBpcyBHYXVzc2lhbi4gVGhlIHB1cnBvc2Ugb2YgdGhpcyBleHBlcmltZW50IGlzIHRvIHNob3cKY29udmVyZ2VuY2UgcmF0ZSBnYXVzcy1tYXJrb3Ygc29sdXRpb24gJFxoYXR7XHRoZXRhfSQgdG8gJFx0aGV0YV8wJCBhcyBuIGluY3JlYXNlcy4gCgpgYGB7cn0KdGhldGFfMCA8LSBjKDAsMSkKbGFtYmRhIDwtIDAKcDwtMgoKc2V0LnNlZWQoMTIzKQoKCnNpemVzIDwtIGMoMWUzLDVlMywxZTQsMmU0LDNlNCw1ZTQsMWU1LDJlNSw1ZTUsMWU2KQptIDwtIGxlbmd0aChzaXplcykKTVNFIDwtcmVwKG0sMCkKcmVzdWx0c18wIDwtIHJlcChtLDApCnJlc3VsdHNfMSA8LSByZXAobSwwKQoKZm9yIChpIGluIDE6bSkKewogIHByaW50KGkpCiAgbiA8LSBzaXplc1tpXQogIHggPC0gcm5vcm0obikKICBlcHMgPC0gcm5vcm0obikKICAKICB5PC10aGV0YV8wWzFdICsgdGhldGFfMFsyXSp4ICsgZXBzCiAgCiAgWDwtY2JpbmQocmVwKDEsbikseCkKICAKICBBPC1zb2x2ZSgodChYKSUqJVggKyBkaWFnKHJlcChuKmxhbWJkYSxwKSkpKQogIGIgPC0gdChYKSUqJXkKICAKICBoYXR0aGV0YSA8LSBBJSolYgogIHByaW50KGhhdHRoZXRhKQogIHJlc3VsdHNfMFtpXTwtaGF0dGhldGFbMV0KICByZXN1bHRzXzFbaV08LWhhdHRoZXRhWzJdCiAgTVNFW2ldPC0gbWVhbihlcHNeMikKICAKfQpwYXIobWZyb3c9YygxLDIpKQpwbG90KHNpemVzLCByZXN1bHRzXzAtdGhldGFfMFsxXSwgeGxhYj0gJ24nLCB5bGFiPVRlWCgnJFxcaGF0e1xcdGhldGFfMH0gLVxcdGhldGFfMCQnKSkKcGxvdChzaXplcywgcmVzdWx0c18xLXRoZXRhXzBbMl0sIHhsYWI9ICduJywgeWxhYj1UZVgoJyRcXGhhdHtcXHRoZXRhXzF9IC1cXHRoZXRhXzEkJykpCmBgYAojIyBFeHBlcmltZW50IDI6IENvbnZlcmdlbmNlIG9mICRFW3x8XGhhdHtcdGhldGF9LVx0aGV0YXx8XzJeMl0kCiRuJCB0cmlhbCBwZXIgZXhwZXJpbWVudAokbSQgZXhwZXJpbWVudHMKYGBge3J9Cm0gPC0gMTAwMDAKTVNFIDwtIHJlcCgwLG0pCm4gPC0gMTAwMDAKeCA8LSBybm9ybShuKQojIGxvb3Agb3ZlciBleHBlcmltZW50cwpmb3IgKGkgaW4gMTptKXsKICAjcHJpbnQoaSkKICBlcHMgPC0gcm5vcm0obikKICB5PC10aGV0YV8wWzFdICsgdGhldGFfMFsyXSp4ICsgZXBzCiAgWDwtY2JpbmQocmVwKDEsbikseCkKCiAgQTwtc29sdmUoKHQoWCklKiVYICsgZGlhZyhyZXAobipsYW1iZGEscCkpKSkKICBiIDwtIHQoWCklKiV5CiAgCiAgaGF0dGhldGEgPC0gQSUqJWIKICAKICBNU0VbaV08LW5vcm0oaGF0dGhldGEtdGhldGFfMCwgdHlwZT0nMicpXjIKfQpgYGAKYGBge3J9CiMgcGxvdHRpbmcgb2YgdGhlIGV4cGVjdGVkIE1TRQpjaGVja19wb2ludHMgPC0gc2VxKDEsMTApKjEwMDAKRU1TRSA8LSByZXAoMCwgbGVuZ3RoKGNoZWNrX3BvaW50cykpCmZvciAoaSBpbiAxOjEwKQp7CiAgRU1TRVtpXTwtbWVhbihNU0VbMTpjaGVja19wb2ludHNbaV1dKQogIAp9CnBsb3QoY2hlY2tfcG9pbnRzLCBFTVNFKQpgYGAKCiMjIEV4cGVyaW1lbnQgMzogU2hvdyB0aGF0IFRpay4gcmVndWxhcml6YXRpb24gcmVkdWNlcyBFTVNFCmBgYHtyfQogICAgZiA8LSBmdW5jdGlvbihsYW1iZGEpewogICAgICAgQSA8LSBzb2x2ZSgodChYKSUqJVggKyBkaWFnKHJlcChuKmxhbWJkYSxwKSkpKQogICAgICAgYiA8LSBkaWFnKHJlcCgxLG4pKS1YJSolQSUqJXQoWCkKICAgICAgIGxhbWJkYV9wcmltZTwtIG4qbm9ybShiJSoleSx0eXBlPScyJyleMi90cihiKV4yCiAgICAgICByZXR1cm4obGFtYmRhX3ByaW1lKQogICAgfQoKICAgIGphYyA8LSBmdW5jdGlvbihwYXJhbSl7CiAgICAgICMgZW1wdHkKICAgICAgcmV0dXJuKDApCiAgICB9CmBgYCAgICAKYGBge3J9CmdldC5sYW1iZGE8LWZ1bmN0aW9uKHBhcmFtMCl7CiAgcmVzPC0gbmxvcHRyOjpubG9wdHIoIHgwPXBhcmFtMCwKICAgICAgICAgICAgICAgICBldmFsX2Y9ZiwKICAgICAgICAgICAgICAgICAjZXZhbF9ncmFkX2Y9amFjLAogICAgICAgICAgICAgICAgIGxiID0gYyhlcHNpbG9uKSwKICAgICAgICAgICAgICAgICB1YiA9IGMoOTk5KSwKICAgICAgICAgICAgICAgICBvcHRzID0gbGlzdCgiYWxnb3JpdGhtIj0nTkxPUFRfTE5fQ09CWUxBJywgIm1heGV2YWwiID0gMzAsICJ4dG9sX3JlbCIgPSAxZS0xNiwgImZ0b2xfcmVsIj0gMWUtNjYsICJmdG9sX2FicyI9MWUtNiwgICAicHJpbnRfbGV2ZWwiPTEsImNoZWNrX2Rlcml2YXRpdmVzIiA9IFRSVUUsImNoZWNrX2Rlcml2YXRpdmVzX3ByaW50IiA9ICJhbGwiKSkKICBsYW1iZGFfMDwtcmVzJHNvbHV0aW9uIAogIHJldHVybihsYW1iZGFfMCkKIAp9CmBgYAoKQ29tcHV0ZSAkXGxhbWJkYV8wPjAkIHVzaW5nIHRoZSBHQ1YgZXN0aW1hdGUKCmBgYHtyfQplcHNpbG9uIDwtIDFlLTMKcDwtMiAjIGRpbWVuc2lvbmFsaXR5IG9mIHRoZSBwYXJhbWV0ZXJzCiMgRW5zdXJlIHRoYXQgbGFtYmRhPjAKcGFyYW0wICA8LSAxLjAKbiA8LSAxMDAwCnggPC0gcm5vcm0obikKZXBzIDwtIHJub3JtKG4pCnk8LXRoZXRhXzBbMV0gKyB0aGV0YV8wWzJdKnggKyBlcHMKWDwtY2JpbmQocmVwKDEsbikseCkKbGFtYmRhXzA8LWdldC5sYW1iZGEocGFyYW0wKQpwcmludChsYW1iZGFfMCkKCmBgYApOb3cgdGhhdCB3ZSBoYXZlICRcbGFtYmRhXzAkLCBsZXQncyBjaGVjayB0aGF0ICRcaGF0e1x0aGV0YX1fe1xsYW1iZGF9JCByZWR1Y2VzIHRoZSBFTVNFIGNvbXBhcmVkIHRvIHRoZSBHYXVzcy1NYXJrb3YgZXN0aW1hdGUgKExTKS4gV2UnbGwgbmVlZCB0byBtaW5pbWl6ZSB0aGUgbmVnLiBsb2cgbGlrZWxpaG9vZCB3aXRoIGEgcmlkZ2UgcGVuYWx0eSBmcm9tIEVxIDEuMyBpbiBXYWJoYQoKTGV0J3MgcmV2aWV3IHNvbWUgY2xvc2VkIGZvcm0gcmVzdWx0cyB0byB0aWdodGVuIHVwIHRoZSB2YXJpYW5jZSBhbmFseXNpcwoKJFxoYXR7XGJldGF9XzEgPSBDX3t4eX0vKFxoYXRcc2lnbWFeMiArIFxsYW1iZGEpJAokXGhhdHtcYmV0YX1fMCA9IChcYmFye3l9LSBcaGF0XGJldGFfMVxiYXJ7eH0pLygxK1xsYW1iZGEpJApgYGB7cn0KZ2V0LnRoZXRhLmxhbWJkYSA8LSBmdW5jdGlvbihsYW1iZGEpewogIHRoZXRhLmxhbWJkYVsyXSA8LSh4JSoleSkvKHglKiV4ICsgbipsYW1iZGEpICAjIAogIHRoZXRhLmxhbWJkYVsxXSA8LSAobWVhbih5KS10aGV0YS5sYW1iZGFbMl0qbWVhbih4KSkvKDErbGFtYmRhKSAjIApyZXR1cm4odGhldGEubGFtYmRhKQp9CmBgYApDaGVjayB3aXRoIG51bWVyaWNhbCByZXN1bHRzCmBgYHtyfQpvYmpmdW4gPC0gZnVuY3Rpb24odGhldGEpewogICAgICAgZiA8LSAoMS9uKSpub3JtKHktKHRoZXRhWzFdICsgdGhldGFbMl0qeCksdHlwZT0nMicpXjIgKyBsYW1iZGFfMCpub3JtKHRoZXRhLHR5cGU9JzInKV4yCiAgICAgICByZXR1cm4oZikKICAgIH0KYGBgCgpgYGB7cn0KZ2V0LnRoZXRhLmxhbWJkYS5udW0gPC1mdW5jdGlvbih0aGV0YV8wKXsgCnJlczwtIG5sb3B0cjo6bmxvcHRyKCB4MD10aGV0YV8wLAogICAgICAgICAgICAgICAgIGV2YWxfZj1vYmpmdW4sCiAgICAgICAgICAgICAgICAgI2V2YWxfZ3JhZF9mPWphYywKICAgICAgICAgICAgICAgICBsYiA9IGMoLTEwLC0xMCksCiAgICAgICAgICAgICAgICAgdWIgPSBjKDEwLDEwKSwKICAgICAgICAgICAgICAgICBvcHRzID0gbGlzdCgiYWxnb3JpdGhtIj0nTkxPUFRfTE5fQ09CWUxBJywgIm1heGV2YWwiID0gNTAwLCAieHRvbF9yZWwiID0gMWUtMTYsICJmdG9sX3JlbCI9IDFlLTE2LCAiZnRvbF9hYnMiPTFlLTE2LCAicHJpbnRfbGV2ZWwiPTAsImNoZWNrX2Rlcml2YXRpdmVzIiA9IEZBTFNFLCJjaGVja19kZXJpdmF0aXZlc19wcmludCIgPSAiYWxsIikpCnRoZXRhLmxhbWJkYS5udW08LXJlcyRzb2x1dGlvbiAKcmV0dXJuKHRoZXRhLmxhbWJkYS5udW0pCn0KYGBgCmBgYHtyfQpwcmludCgnbnVtZXJpY2FsIG9wdGltaXphdGlvbicpCnByaW50KGdldC50aGV0YS5sYW1iZGEubnVtKHRoZXRhXzApKQpwcmludCgnYW5hbHl0aWMgZ3JhZGllbnRzJykKcHJpbnQoZ2V0LnRoZXRhLmxhbWJkYShsYW1iZGFfMCkpCmBgYAoKCkxldCdzIHNlZSB0aGF0IHRoaXMgZXN0aW1hdGUgb2YgJFxoYXR7XHRoZXRhfV97XGxhbWJkYX0kIHJlYWxseSBkb2VzIHJlZHVjZSB0aGUgRU1TRS4gSSdtIG5vdCBnb2luZyB0byByZWVzdGltYXRlICRcbGFtYmRhXzAkIGVhY2ggdGltZSBkdWUgdG8gbnVtZXJpY2FsIGNvbXBsZXhpdHkuCgoKYGBge3J9CiMgbiB0cmlhbCBwZXIgZXhwZXJpbWVudAojIG0gZXhwZXJpbWVudHMKbSA8LSAxMDAwCk1TRSA8LSByZXAoMCxtKQpNU0UucmVnIDwtcmVwKDAsbSkKbiA8LSAxMDAwMAp4IDwtIHJub3JtKG4pClg8LWNiaW5kKHJlcCgxLG4pLHgpCgoKIyBsb29wIG92ZXIgZXhwZXJpbWVudHMKZm9yIChpIGluIDE6bSl7CiAgI3ByaW50KGkpCiAgZXBzIDwtIHJub3JtKG4pCiAgeTwtdGhldGFfMFsxXSArIHRoZXRhXzBbMl0qeCArIGVwcwogIAoKICBBPC1zb2x2ZSgodChYKSUqJVggKyBkaWFnKHJlcChuKmxhbWJkYSxwKSkpKQogIGIgPC0gdChYKSUqJXkKICAKICBoYXR0aGV0YSA8LSBBJSolYgogIAogIE1TRVtpXTwtbm9ybShoYXR0aGV0YS10aGV0YV8wLCB0eXBlPScyJyleMgogICNsYW1iZGFfMDwtZ2V0LmxhbWJkYShwYXJhbTApCiAgdGhldGEubGFtYmRhPC1nZXQudGhldGEubGFtYmRhKGxhbWJkYV8wKQogICNwcmludChsYW1iZGFfMCkKICAjcHJpbnQodGhldGEubGFtYmRhKQogIE1TRS5yZWdbaV08LW5vcm0odGhldGEubGFtYmRhLXRoZXRhXzAsIHR5cGU9JzInKV4yCn0KYGBgCgoKR29vZCBwcmFjdGljZSB0byBhbHdheXMgc2VwYXJhdGluZyB0aGUgcGxvdHRpbmcgZnJvbSB0aGUgZXN0aW1hdGlvbiwgc28gdGhhdCBJIGNhbiB0b3VjaCB1cCBmaWd1cmVzIHdpdGhvdXQgaGF2aW5nIHRvIHJlLXJ1biBleHBlcmltZW50cy4KYGBge3J9CiMgcGxvdHRpbmcgb2YgdGhlIGV4cGVjdGVkIE1TRQpjaGVja19wb2ludHMgPC0gc2VxKDEsMTApKm0vMTAKRU1TRSA8LSByZXAoMCwgbGVuZ3RoKGNoZWNrX3BvaW50cykpCkVNU0UucmVnIDwtIHJlcCgwLCBsZW5ndGgoY2hlY2tfcG9pbnRzKSkKCmZvciAoaSBpbiAxOjEwKQp7CiAgRU1TRVtpXTwtbWVhbihNU0VbMTpjaGVja19wb2ludHNbaV1dKQogIEVNU0UucmVnW2ldPC1tZWFuKE1TRS5yZWdbMTpjaGVja19wb2ludHNbaV1dKQogIAp9CnBsb3QoY2hlY2tfcG9pbnRzLCBFTVNFLCB4bGFiPSduJywgeWxhYj0nRU1TRScpCnBvaW50cyhjaGVja19wb2ludHMsIEVNU0UucmVnLCBjb2w9J3JlZCcpCmBgYAoKCg==