knitr::include_graphics("D:/Sem5/Optimization/UTS/gambar1.jpg")

knitr::include_graphics("D:/Sem5/Optimization/UTS/gambar2.jpg")

knitr::include_graphics("D:/Sem5/Optimization/UTS/gambar3.jpg")

1 1. Solve these problems using mathematical model and R packages (please explain it step-by-step).

1.1 Call lpSolve Package in R

library(lpSolve)

1.2 Input coefficient from the function

fc <- c(4,3)

1.3 Make the matrix

# Create constraint martix
fm <- matrix (c(1,0,
                0,1,
                1,2,
                -2,4,
                -2,1), nrow=5, byrow=TRUE)
fm
##      [,1] [,2]
## [1,]    1    0
## [2,]    0    1
## [3,]    1    2
## [4,]   -2    4
## [5,]   -2    1
# Right  side for the constraints
fv <- c(0,2,25,-8,-5)
fv
## [1]  0  2 25 -8 -5

1.4 Make the directions of the constrains

# Direction of the constraints
constranints_direction  <- c(">=", ">=","<=","<=","<=")
constranints_direction
## [1] ">=" ">=" "<=" "<=" "<="

1.5 Optimal Solution

# Find the optimal solution
optimum <-  lp(direction="max",
               objective.in = fc,
               const.mat = fm,
               const.dir = constranints_direction,
               const.rhs = fv,
               all.int = T)

str(optimum)
## List of 28
##  $ direction       : int 1
##  $ x.count         : int 2
##  $ objective       : num [1:2] 4 3
##  $ const.count     : int 5
##  $ constraints     : num [1:4, 1:5] 1 0 2 0 0 1 2 2 1 2 ...
##   ..- attr(*, "dimnames")=List of 2
##   .. ..$ : chr [1:4] "" "" "const.dir.num" "const.rhs"
##   .. ..$ : NULL
##  $ int.count       : int 2
##  $ int.vec         : int [1:2] 1 2
##  $ bin.count       : int 0
##  $ binary.vec      : int 0
##  $ num.bin.solns   : int 1
##  $ objval          : num 90
##  $ solution        : num [1:2] 21 2
##  $ presolve        : int 0
##  $ compute.sens    : int 0
##  $ sens.coef.from  : num 0
##  $ sens.coef.to    : num 0
##  $ duals           : num 0
##  $ duals.from      : num 0
##  $ duals.to        : num 0
##  $ scale           : int 196
##  $ use.dense       : int 0
##  $ dense.col       : int 0
##  $ dense.val       : num 0
##  $ dense.const.nrow: int 0
##  $ dense.ctr       : num 0
##  $ use.rw          : int 0
##  $ tmp             : chr "Nobody will ever look at this"
##  $ status          : int 0
##  - attr(*, "class")= chr "lp"

1.6 Find the optimum status

# Print status: 0 = success, 2 = no feasible solution
print(optimum$status)
## [1] 0

1.7 Find the best value for x and y

# Display the optimum values for x, y
best_sol <- optimum$solution
names(best_sol) <- c("x", "y") 
print(best_sol)
##  x  y 
## 21  2

1.8 Find the total profit

# Check the value of objective function at optimal point
print(paste("Maximum Solution: ", optimum$objval, sep=""))
## [1] "Maximum Solution: 90"
# Disconnect from the model and the optimum solution
rm(optimum, constranints_direction, best_sol)

2 2. Solve these problems using mathematical model and R packages (please explain it step-by-step).

2.1 Call lpSolve Package in R

library(lpSolve)

2.2 Input coefficient from the function

fc2 <- c(30000,45000)

2.3 Make the matrix

# Create constraint martix
fm2 <- matrix (c(3,4,
                5,6,
                1.5,3), nrow=3, byrow=TRUE)
fm2
##      [,1] [,2]
## [1,]  3.0    4
## [2,]  5.0    6
## [3,]  1.5    3
# Right  side for the constraints
fv2 <- c(30,60,21)
fv2
## [1] 30 60 21

2.4 Make the directions of the constrains

# Direction of the constraints
constranints_direction  <- c("<=","<=","<=")
constranints_direction
## [1] "<=" "<=" "<="

2.5 Find the Optimal Solution

# Find the optimal solution
optimum <-  lp(direction="max",
               objective.in = fc2,
               const.mat = fm2,
               const.dir = constranints_direction,
               const.rhs = fv2,
               all.int = T)

str(optimum)
## List of 28
##  $ direction       : int 1
##  $ x.count         : int 2
##  $ objective       : num [1:2] 30000 45000
##  $ const.count     : int 3
##  $ constraints     : num [1:4, 1:3] 3 4 1 30 5 6 1 60 1.5 3 ...
##   ..- attr(*, "dimnames")=List of 2
##   .. ..$ : chr [1:4] "" "" "const.dir.num" "const.rhs"
##   .. ..$ : NULL
##  $ int.count       : int 2
##  $ int.vec         : int [1:2] 1 2
##  $ bin.count       : int 0
##  $ binary.vec      : int 0
##  $ num.bin.solns   : int 1
##  $ objval          : num 330000
##  $ solution        : num [1:2] 2 6
##  $ presolve        : int 0
##  $ compute.sens    : int 0
##  $ sens.coef.from  : num 0
##  $ sens.coef.to    : num 0
##  $ duals           : num 0
##  $ duals.from      : num 0
##  $ duals.to        : num 0
##  $ scale           : int 196
##  $ use.dense       : int 0
##  $ dense.col       : int 0
##  $ dense.val       : num 0
##  $ dense.const.nrow: int 0
##  $ dense.ctr       : num 0
##  $ use.rw          : int 0
##  $ tmp             : chr "Nobody will ever look at this"
##  $ status          : int 0
##  - attr(*, "class")= chr "lp"

2.6 Find the optimum status

# Print status: 0 = success, 2 = no feasible solution
print(optimum$status)
## [1] 0

2.7 Find the best value for x and y

# Display the optimum values for x, y
best_sol <- optimum$solution
names(best_sol) <- c("x", "y") 
print(best_sol)
## x y 
## 2 6

2.8 Find the total profit

# Check the value of objective function at optimal point
print(paste("Maximum Profit: ", optimum$objval, sep=""))
## [1] "Maximum Profit: 330000"
# Disconnect from the model and the optimum solution
rm(optimum, constranints_direction, best_sol)

3 3. Please figure out how to optimize the cost effectively to blend your sausages.

The ingredient that i have

Ingredient      <- c("Chicken","Wheat","Starch")
Cost            <- c(4.32,2.46,1.86)
Availability    <- c(30,20,17)
Sausage         <- data.frame(Ingredient,Cost,Availability)
Sausage
##   Ingredient Cost Availability
## 1    Chicken 4.32           30
## 2      Wheat 2.46           20
## 3     Starch 1.86           17
So we need to make the following variables above in mathematical way,

\(es\) = Economy Sausage

\(ps\) = Premium Sausage

\(c\) = Chicken

\(w\) = Wheat

\(s\) = Starch

The function if we want to minimize the cost is: \[MIN(Cost) = 4.32(c_{es} + c_{ps}) + 2.46(w_{es} + w_{ps}) + 1.86(s_{es} + s_{ps})\]

And the constraints is:

~Wheat = \(w_{es} + w{ps} ≤ 20\)

~Chicken = $c_{es} + c{ps} ≤ 53 $

~Starch = \(s_{es} + s_{ps} ≤ 17\)

~Ingredient of Chicken in Economy Sausage = \(c_{es} ≥ 0.4 (c_{es} + w_{es} + s_{es})\)

~Ingredient of Chicken in Premium Sausage = \(c_{ps} ≥ 0.6 (c_{ps} + w_{ps} + s_{ps})\)

~Ingredient of Starch in Economy Sausage = \(s_{es} ≥ 0.25 (c_{es} + w_{es} + s_{es})\)

~Ingredient of Starch in Economy Sausage = \(s_{ps} ≥ 0.25 (c_{ps} + w_{ps} + s_{ps})\)

~The Profit of Economy Sausage = \(0.05 × 350\)

~The Profit of Economy Sausage = \(0.05 × 500\)

4 4. Please visualize a contour plot, and tell me how coordinate descent, Steepest descent, Newton Method, Conjugate Gradient works!

Make the question variable:

function4 <- function(x, y) {x^2 + y^2 + 3*x*y}
n <- 30
xpts <- seq(-1.5, 1.5, len = n)
ypts <- seq(-1.5, 1.5, len = n)
gr <- expand.grid(x = xpts, y = ypts)
feval <- with(gr, matrix(function4(x, y), nrow = n, ncol = n))
par(mar = c(5, 4, 1, 1))
contour(xpts, ypts, feval, nlevels = 20, xlab = "x", ylab = "y")
points(-1, -1, pch = 19, cex = 2)
abline(h = -1)

For the explanation, I took the material that Mr Bakti gave me from the e-books in the previous weeks.

4.1 1. How coordinate descent works

The idea behind coordinate descent methods is simple. If \(f\) is a \(k\)-dimensional function, we can minimize \(f\) by successively minimizing each of the individual dimensions of \(f\) in a cyclic fashion, while holding the values of \(f\) in the other dimensions fixed.

This approach is sometimes referred to as cyclic coordinate descent. The primary advantage of this approach is that it takes an arbitrarily complex \(k\)-dimensional problem and reduces it to a collection of \(k\) one-dimensional problems. The disadvantage is that convergence can often be painfully slow, particularly in problems where \(f\) is not well-behaved. In statistics, a popular version of this algorithm is known as backfitting and is used to fit generalized additive models.

So, the job of deriving coordinates is simple. This method will consecutively minimize the function and minimize each individual dimension of a function, while maintaining constant function values in the other dimensions.

4.2 2. How Steepest descent works

Perhaps the most obvious direction to choose when attempting to minimize a function \(f\) starting at \(x_n\) is the direction of steepest descent, or \(-f'(x_n)\).

Steepest descent is a simple gradient method whose purpose is to minimize a function. This method works with deep slow convergence towards the optimal solution. When certain parameters are highly correlated with each other, the steepest descent algorithm can take multiple steps to reach the minimum.

4.3 3. How Newton Method works

Newtons method works as a method for estimating the zero generator of a function $ (F(x)= 0 $.This method uses tangents to estimate the intersection of the graph of the function with the x axis.

Newton’s method makes a quadratic approximation to the target function \(f\) at each step of the algorithm. This follows the “optimization transfer” principle mentioned earlier, whereby we take a complex function \(f\), replace it with a simpler function \(g\) that is easier to optimize, and then optimize the simpler function repeatedly until convergence to the solution.

4.4 4. How Conjugate Gradient works

The conjugate gradient represents the steepest kind of drop approach “with a twist”. With the steepest descent, we start our minimizing of a function \(f\) starting at \(x_0\) by traveling in the direction of the negative gradient \(-f'(x_0)\). Then, continue the journey in the direction of the negative gradient which is evaluated at each successive point until convergence.

The conjugate gradient approach begins in the same manner, but diverges from steepest descent after the first step. In subsequent steps, the direction of travel must be conjugate to the direction most recently traveled.