This is from example code of R package ‘nnls’(Non-negative least square regression).

Here, given a matrix A and a vector of response variable y, the goal is to find:

\(~~~~~~~~~~~~~~arg~min_x||Ax-y||_2\) subject to x>=0

where x is the auto variable

## simulate a matrix A
## with 3 columns, each containing an exponential decay 
library(nnls)
t <- seq(0, 2, by = .04)
k <- c(.5, .6, 1)
A <- matrix(nrow = 51, ncol = 3)
Acolfunc <- function(k, t) exp(-k*t)
for(i in 1:3) A[,i] <- Acolfunc(k[i],t)

## simulate a matrix X
## with 3 columns, each containing a Gaussian shape 
## the Gaussian shapes are non-negative
X <- matrix(nrow = 51, ncol = 3)
wavenum <- seq(18000,28000, by=200)
location <- c(25000, 22000, 20000) 
delta <- c(3000,3000,3000)
Xcolfunc <- function(wavenum, location, delta)
  exp( - log(2) * (2 * (wavenum - location)/delta)^2)
for(i in 1:3) X[,i] <- Xcolfunc(wavenum, location[i], delta[i])

## set seed for reproducibility
set.seed(3300)

## simulated data is the product of A and X with some
## spherical Gaussian noise added 
matdat <- A %*% t(X) + .005 * rnorm(nrow(A) * nrow(X))

## estimate the rows of X using NNLS criteria 
nnls_sol <- function(matdat, A) {
  X <- matrix(0, nrow = 51, ncol = 3)
  for(i in 1:ncol(matdat)) 
     X[i,] <- coef(nnls(A,matdat[,i]))
  X
}
X_nnls <- nnls_sol(matdat,A) 

## Not run: 
## can solve the same problem with L-BFGS-B algorithm
## but need starting values for x 
bfgs_sol <- function(matdat, A) {
  startval <- rep(0, ncol(A))
  fn1 <- function(par1, b, A) sum( ( b - A %*% par1)^2)
  X <- matrix(0, nrow = 51, ncol = 3)
  for(i in 1:ncol(matdat))  
    X[i,] <-  optim(startval, fn = fn1, b=matdat[,i], A=A,
                   lower = rep(0,3), method="L-BFGS-B")$par
    X
}
X_bfgs <- bfgs_sol(matdat,A) 

#compare the two methods
#par(mfrow=c(1,2))
matplot(X_nnls,type="b",pch=20, main="NNLS")
abline(0,0, col=grey(.6))

matplot(X_bfgs,type="b",pch=20, main="bfgs")
abline(0,0, col=grey(.6))

## the RMS deviation(RMSD or RMSE) under NNLS is less than under L-BFGS-B 
sqrt(sum((X - X_nnls)^2))
## [1] 1.017027
sqrt(sum((X - X_bfgs)^2))
## [1] 1.102575
sqrt(sum((X - X_nnls)^2)) < sqrt(sum((X - X_bfgs)^2))
## [1] TRUE
## and L-BFGS-B is much slower 
system.time(nnls_sol(matdat,A))
##    user  system elapsed 
##   0.001   0.000   0.001
system.time(bfgs_sol(matdat,A))
##    user  system elapsed 
##   0.015   0.001   0.015
## can also solve the same problem by reformulating it as a
## quadratic program (this requires the quadprog package; if you
## have quadprog installed, uncomment lines below starting with
## only 1 "#" )

# library(quadprog)

# quadprog_sol <- function(matdat, A) {
#  X <- matrix(0, nrow = 51, ncol = 3)
#  bvec <- rep(0, ncol(A))
#  Dmat <- crossprod(A,A)
#  Amat <- diag(ncol(A))
#  for(i in 1:ncol(matdat)) { 
#    dvec <- crossprod(A,matdat[,i]) 
#    X[i,] <- solve.QP(dvec = dvec, bvec = bvec, Dmat=Dmat,
#                      Amat=Amat)$solution
#  }
#  X
# }
# X_quadprog <- quadprog_sol(matdat,A) 

## the RMS deviation under NNLS is about the same as under quadprog 
# sqrt(sum((X - X_nnls)^2))
# sqrt(sum((X - X_quadprog)^2))

## and quadprog requires about the same amount of time 
# system.time(nnls_sol(matdat,A))
# system.time(quadprog_sol(matdat,A))


## End(Not run)