## [,1] [,2]
## [1,] 1 0
## [2,] 0 1
## [3,] 1 2
## [4,] -2 4
## [5,] -2 1
## [1] 0 2 25 -8 -5
# Direction of the constraints
constranints_direction <- c(">=", ">=","<=","<=","<=")
constranints_direction## [1] ">=" ">=" "<=" "<=" "<="
# Find the optimal solution
optimum <- lp(direction="max",
objective.in = fc,
const.mat = fm,
const.dir = constranints_direction,
const.rhs = fv,
all.int = T)
str(optimum)## List of 28
## $ direction : int 1
## $ x.count : int 2
## $ objective : num [1:2] 4 3
## $ const.count : int 5
## $ constraints : num [1:4, 1:5] 1 0 2 0 0 1 2 2 1 2 ...
## ..- attr(*, "dimnames")=List of 2
## .. ..$ : chr [1:4] "" "" "const.dir.num" "const.rhs"
## .. ..$ : NULL
## $ int.count : int 2
## $ int.vec : int [1:2] 1 2
## $ bin.count : int 0
## $ binary.vec : int 0
## $ num.bin.solns : int 1
## $ objval : num 90
## $ solution : num [1:2] 21 2
## $ presolve : int 0
## $ compute.sens : int 0
## $ sens.coef.from : num 0
## $ sens.coef.to : num 0
## $ duals : num 0
## $ duals.from : num 0
## $ duals.to : num 0
## $ scale : int 196
## $ use.dense : int 0
## $ dense.col : int 0
## $ dense.val : num 0
## $ dense.const.nrow: int 0
## $ dense.ctr : num 0
## $ use.rw : int 0
## $ tmp : chr "Nobody will ever look at this"
## $ status : int 0
## - attr(*, "class")= chr "lp"
## [1] 0
## [,1] [,2]
## [1,] 3.0 4
## [2,] 5.0 6
## [3,] 1.5 3
## [1] 30 60 21
## [1] "<=" "<=" "<="
# Find the optimal solution
optimum <- lp(direction="max",
objective.in = fc2,
const.mat = fm2,
const.dir = constranints_direction,
const.rhs = fv2,
all.int = T)
str(optimum)## List of 28
## $ direction : int 1
## $ x.count : int 2
## $ objective : num [1:2] 30000 45000
## $ const.count : int 3
## $ constraints : num [1:4, 1:3] 3 4 1 30 5 6 1 60 1.5 3 ...
## ..- attr(*, "dimnames")=List of 2
## .. ..$ : chr [1:4] "" "" "const.dir.num" "const.rhs"
## .. ..$ : NULL
## $ int.count : int 2
## $ int.vec : int [1:2] 1 2
## $ bin.count : int 0
## $ binary.vec : int 0
## $ num.bin.solns : int 1
## $ objval : num 330000
## $ solution : num [1:2] 2 6
## $ presolve : int 0
## $ compute.sens : int 0
## $ sens.coef.from : num 0
## $ sens.coef.to : num 0
## $ duals : num 0
## $ duals.from : num 0
## $ duals.to : num 0
## $ scale : int 196
## $ use.dense : int 0
## $ dense.col : int 0
## $ dense.val : num 0
## $ dense.const.nrow: int 0
## $ dense.ctr : num 0
## $ use.rw : int 0
## $ tmp : chr "Nobody will ever look at this"
## $ status : int 0
## - attr(*, "class")= chr "lp"
## [1] 0
The ingredient that i have
Ingredient <- c("Chicken","Wheat","Starch")
Cost <- c(4.32,2.46,1.86)
Availability <- c(30,20,17)
Sausage <- data.frame(Ingredient,Cost,Availability)
Sausage## Ingredient Cost Availability
## 1 Chicken 4.32 30
## 2 Wheat 2.46 20
## 3 Starch 1.86 17
\(es\) = Economy Sausage
\(ps\) = Premium Sausage
\(c\) = Chicken
\(w\) = Wheat
\(s\) = Starch
The function if we want to minimize the cost is: \[MIN(Cost) = 4.32(c_{es} + c_{ps}) + 2.46(w_{es} + w_{ps}) + 1.86(s_{es} + s_{ps})\]
And the constraints is:
~Wheat = \(w_{es} + w{ps} ≤ 20\)
~Chicken = $c_{es} + c{ps} ≤ 53 $
~Starch = \(s_{es} + s_{ps} ≤ 17\)
~Ingredient of Chicken in Economy Sausage = \(c_{es} ≥ 0.4 (c_{es} + w_{es} + s_{es})\)
~Ingredient of Chicken in Premium Sausage = \(c_{ps} ≥ 0.6 (c_{ps} + w_{ps} + s_{ps})\)
~Ingredient of Starch in Economy Sausage = \(s_{es} ≥ 0.25 (c_{es} + w_{es} + s_{es})\)
~Ingredient of Starch in Economy Sausage = \(s_{ps} ≥ 0.25 (c_{ps} + w_{ps} + s_{ps})\)
~The Profit of Economy Sausage = \(0.05 × 350\)
~The Profit of Economy Sausage = \(0.05 × 500\)
Make the question variable:
function4 <- function(x, y) {x^2 + y^2 + 3*x*y}
n <- 30
xpts <- seq(-1.5, 1.5, len = n)
ypts <- seq(-1.5, 1.5, len = n)
gr <- expand.grid(x = xpts, y = ypts)
feval <- with(gr, matrix(function4(x, y), nrow = n, ncol = n))
par(mar = c(5, 4, 1, 1))
contour(xpts, ypts, feval, nlevels = 20, xlab = "x", ylab = "y")
points(-1, -1, pch = 19, cex = 2)
abline(h = -1)For the explanation, I took the material that Mr Bakti gave me from the e-books in the previous weeks.
The idea behind coordinate descent methods is simple. If \(f\) is a \(k\)-dimensional function, we can minimize \(f\) by successively minimizing each of the individual dimensions of \(f\) in a cyclic fashion, while holding the values of \(f\) in the other dimensions fixed.
This approach is sometimes referred to as cyclic coordinate descent. The primary advantage of this approach is that it takes an arbitrarily complex \(k\)-dimensional problem and reduces it to a collection of \(k\) one-dimensional problems. The disadvantage is that convergence can often be painfully slow, particularly in problems where \(f\) is not well-behaved. In statistics, a popular version of this algorithm is known as backfitting and is used to fit generalized additive models.
So, the job of deriving coordinates is simple. This method will consecutively minimize the function and minimize each individual dimension of a function, while maintaining constant function values in the other dimensions.
Perhaps the most obvious direction to choose when attempting to minimize a function \(f\) starting at \(x_n\) is the direction of steepest descent, or \(-f'(x_n)\).
Steepest descent is a simple gradient method whose purpose is to minimize a function. This method works with deep slow convergence towards the optimal solution. When certain parameters are highly correlated with each other, the steepest descent algorithm can take multiple steps to reach the minimum.
Newtons method works as a method for estimating the zero generator of a function $ (F(x)= 0 $.This method uses tangents to estimate the intersection of the graph of the function with the x axis.
Newton’s method makes a quadratic approximation to the target function \(f\) at each step of the algorithm. This follows the “optimization transfer” principle mentioned earlier, whereby we take a complex function \(f\), replace it with a simpler function \(g\) that is easier to optimize, and then optimize the simpler function repeatedly until convergence to the solution.
The conjugate gradient represents the steepest kind of drop approach “with a twist”. With the steepest descent, we start our minimizing of a function \(f\) starting at \(x_0\) by traveling in the direction of the negative gradient \(-f'(x_0)\). Then, continue the journey in the direction of the negative gradient which is evaluated at each successive point until convergence.
The conjugate gradient approach begins in the same manner, but diverges from steepest descent after the first step. In subsequent steps, the direction of travel must be conjugate to the direction most recently traveled.