# Parameters
alpha <- 20
beta <- -0.05
theta <- 10
# Sample some points along x axis
n <- 100
x <- seq(n)
# Make y = f(x) + Gaussian_noise
data.df <- data.frame(x = x,
y = alpha * exp(beta * x) + theta + rnorm(n))
# plot data
plot(data.df$x, data.df$y)
The constant \(\theta\) added complexity to the problem \[ y = \alpha e^{\beta x} + \theta\] \[y - \theta = \alpha e^{\beta x} \] \[ log(y - \theta) = log(\alpha) + \beta x \]
If fit with nlm (minimization by Newton-type methods), the key is to provide a set of good starting parameters.
Strategically, we need to select an empirical \(\theta\) and use linear model to estimate \(\alpha\) and \(\beta\)
# Select an approximate $\theta$, since theta must be lower than min(y), and greater than zero
theta.0 <- min(data.df$y) * 0.5
# Estimate the rest parameters using a linear model
model.0 <- lm(log(y - theta.0) ~ x, data=data.df)
alpha.0 <- exp(coef(model.0)[1])
beta.0 <- coef(model.0)[2]
# Starting parameters
start <- list(alpha = alpha.0, beta = beta.0, theta = theta.0)
start
## $alpha
## (Intercept)
## 18.15366
##
## $beta
## x
## -0.01384194
##
## $theta
## [1] 3.885767
model <- nls(y ~ alpha * exp(beta * x) + theta , data = data.df, start = start)
# Plot fitted curve
plot(data.df$x, data.df$y)
lines(data.df$x, predict(model, list(x = data.df$x)), col = 'skyblue', lwd = 3)
summary(model)
##
## Formula: y ~ alpha * exp(beta * x) + theta
##
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## alpha 20.238189 0.491026 41.22 <2e-16 ***
## beta -0.046922 0.002508 -18.71 <2e-16 ***
## theta 9.740478 0.225244 43.24 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.078 on 97 degrees of freedom
##
## Number of iterations to convergence: 6
## Achieved convergence tolerance: 6.368e-06
# Parameter vs. t value
plot(profile(model))
# Parameters
alpha <- -20
beta <- -0.05
theta <- 30
# Sample some points along x axis
n <- 100
x <- seq(n)
# Make y = f(x) + Gaussian_noise
data.df <- data.frame(x = x,
y = alpha * exp(beta * x) + theta + rnorm(n))
# plot data
plot(data.df$x, data.df$y)
# Prepare a good inital state
theta.0 <- max(data.df$y) * 1.1
model.0 <- lm(log(- y + theta.0) ~ x, data=data.df)
alpha.0 <- -exp(coef(model.0)[1])
beta.0 <- coef(model.0)[2]
start <- list(alpha = alpha.0, beta = beta.0, theta = theta.0)
# Fit the model
model <- nls(y ~ alpha * exp(beta * x) + theta , data = data.df, start = start)
# add fitted curve
plot(data.df$x, data.df$y)
lines(data.df$x, predict(model, list(x = data.df$x)), col = 'skyblue', lwd = 3)
summary(model)
##
## Formula: y ~ alpha * exp(beta * x) + theta
##
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## alpha -20.046670 0.490268 -40.89 <2e-16 ***
## beta -0.052837 0.002621 -20.16 <2e-16 ***
## theta 29.615340 0.188005 157.52 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.01 on 97 degrees of freedom
##
## Number of iterations to convergence: 7
## Achieved convergence tolerance: 2.978e-07
http://stats.stackexchange.com/questions/32824/how-to-minimize-residual-sum-of-squares-of-an-exponential-fit/32832