EX 1.

Find the first derivative and second derivative for f(x)
\(f(x)' = x^3 - x^2\)
\(f(x)''=3x^2-2x\)
With Newton’s formula, we have \(x_{k+1} = x_k\) - \(\frac{x_k^3 - x_k^2}{3x_k^2-2x_k}\)

values <- c(0,1,2,3,4,5,6,7,8,9,10)
my_function <- function(x){
  x - (x^3 - x^2)/(3*x^2 - 2*x)
}
for (i in values) {
  result <- my_function(i)
  print(result)
}
## [1] NaN
## [1] 1
## [1] 1.5
## [1] 2.142857
## [1] 2.8
## [1] 3.461538
## [1] 4.125
## [1] 4.789474
## [1] 5.454545
## [1] 6.12
## [1] 6.785714

The minimum is 1 when using multiple starting values.

Ex 2.

f <- function(x) {
  (3*x^4 - 4*x^3) / 12
}
Min <- optimize(f, interval = c(-10,10), tol = 0.01)
Min
## $minimum
## [1] 1.000417
## 
## $objective
## [1] -0.08333325

Ex 3.

Using Newton’s method for optimization for multivariate function.

# define the function f(x,y)
f <- function(x,y) {
  return((x-1)^2 + 100*(y-x^2)^2)
}

# define the gradient of f(x,y)
grad_f <- function(x,y) {
  return(c(2*(x-1) - 400*x*(y-x^2), 200*(y-x^2)))
}

# define the Hessian of f(x,y)
hess_f <- function(x,y) {
  return(matrix(c(2-400*(y-x^2)+1200*x^2, -400*x, -400*x, 200), nrow=2, ncol=2))
}

# initialize the point (x_0, y_0) and iteration counter
x <- c(0, 0)
iter <- 0

# repeat until convergence or maximum number of iterations is reached
while (iter < 100) {
  # calculate the gradient and Hessian at the current point
  grad <- grad_f(x[1], x[2])
  hess <- hess_f(x[1], x[2])
  
  # calculate the search direction using the Hessian
  d <- -solve(hess) %*% grad
  
  # calculate the step size using a line search
  alpha <- 1
  while (f(x[1]+alpha*d[1], x[2]+alpha*d[2]) > f(x[1], x[2]) + 0.5*alpha*t(grad)%*%d) {
    alpha <- alpha/2
  }
  
  # update the current point
  x_new <- x + alpha*d
  
  # check for convergence
  if (abs(f(x_new[1], x_new[2]) - f(x[1], x[2])) < 1e-6) {
    break
  }
  
  # update the iteration counter and current point
  iter <- iter + 1
  x <- x_new
}

# print the minimum value and the minimizing point
cat("Minimum value of f(x,y) =", f(x[1], x[2]), "\n")
## Minimum value of f(x,y) = 0.03901471
cat("Minimizing point =", x, "\n")
## Minimizing point = 0.8024786 0.6439695

Ex 4.

my_func <- function(x) {
  return((x[1]-1)^2 + 100(x[2]-x[1]^2)^2)
}
library(optimr)
par <- -10:10
optimr(c(-10,10), my_func, method = "L-BFGS-B")
## Error in fn(par, ...) : attempt to apply non-function
## $convergence
## [1] 9999
## 
## $par
## [1] NA NA
## 
## $counts
## [1] NA NA
## 
## $message
## [1] "optim method failure\n"