These examples are essentially lifted from the NCEAS Non-Linear Modeling Working Group, except with slightly different starting values, and Mixed-Effect Models in S and S-PLUS, by José Pinheiro and Douglas Bates. Both are wonderful resources for fitting non-linear models. All examples use the Orange dataset, available through the base datasets package.
library(nlme)
library(brms) # at least 0.8
library(ggplot2)
f1 <- circumference ~ phi1 / (1 + exp(-(age - phi2)/phi3))
n1 <- nls(f1,
data = Orange,
start = list(phi1 = 200, phi2 = 700, phi3 = 350))
prior_1 <- c(set_prior("normal(200, 50)", nlpar = "phi1"),
set_prior("normal(700, 50)", nlpar = "phi2"),
set_prior("normal(350, 50)", nlpar = "phi3"))
n1_b <- brm(f1,
data = Orange,
nonlinear = phi1 + phi2 + phi3 ~ 1,
prior = prior_1,
chains = 3)
summary(n1)
##
## Formula: circumference ~ phi1/(1 + exp(-(age - phi2)/phi3))
##
## Parameters:
## Estimate Std. Error t value Pr(>|t|)
## phi1 192.7 20.2 9.52 7.5e-11 ***
## phi2 728.8 107.3 6.79 1.1e-07 ***
## phi3 353.5 81.5 4.34 0.00013 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 23.4 on 32 degrees of freedom
##
## Number of iterations to convergence: 3
## Achieved convergence tolerance: 2.91e-06
summary(n1_b)
## Family: gaussian (identity)
## Formula: circumference ~ phi1/(1 + exp(-(age - phi2)/phi3))
## Data: Orange (Number of observations: 35)
## Samples: 3 chains, each with iter = 2000; warmup = 1000; thin = 1;
## total post-warmup samples = 3000
## WAIC: Not computed
##
## Fixed Effects:
## Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## phi1_Intercept 190.2 9.11 173.3 208.9 954 1
## phi2_Intercept 709.5 42.58 626.0 794.2 1108 1
## phi3_Intercept 349.6 39.13 274.8 428.0 1006 1
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## sigma(circumference) 23.78 2.95 18.8 30.38 1211 1
##
## Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample
## is a crude measure of effective sample size, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
These are the same as above, except phi1 is now a random effect.
n2 <- nlme(f1,
data = Orange,
fixed = phi1 + phi2 + phi3 ~ 1,
random = phi1 ~ 1,
groups = ~ Tree,
start = coef(n1))
# prior_1 plus prior for random effect, is there a way to update a `prior_frame`?
prior_2 <- rbind(prior_1, set_prior("cauchy(30,2)", class = "sd"))
n2_b <- brm(f1,
data = Orange,
nonlinear = list(phi1 ~ (1|Tree),
phi2 ~ 1,
phi3 ~ 1),
prior = prior_2,
chains = 3,
iter = 4000)
n2_b_coef <- stanplot(n2_b)$data[1:3, c(4, 3, 8)]
names(n2_b_coef) <- c('lower', 'est.', 'upper')
n2_b_coef$grp <- "brms"
n2_b_coef$name <- c('phi1', 'phi2', 'phi3')
n2_coef <- data.frame(intervals(n2)$fixed)
n2_coef$grp <- "nlme"
n2_coef$name <- c('phi1', 'phi2', 'phi3')
coefs <- rbind(n2_b_coef, n2_coef)
names(coefs)[2] <- 'est'
ggplot(coefs, aes(x = name, y = est, colour = grp)) +
geom_pointrange(position = position_dodge(width = .3),
aes(ymin = lower, ymax = upper)) +
xlab('coef') + ylab('estimate') +
ggtitle('Fixed Effects')
The following models include two (uncorrelated) random effects:
n3 <- nlme(f1,
data = Orange,
fixed = phi1 + phi2 + phi3 ~ 1,
random = pdDiag(phi1 + phi2 ~ 1),
groups = ~ Tree,
start = coef(n1))
n3_b <- brm(f1,
data = Orange,
nonlinear = list(phi1 ~ (1|Tree),
phi2 ~ (1|Tree),
phi3 ~ 1),
prior = prior_2,
chains = 3,
iter = 4000)
Just the random effects:
VarCorr(n3)
## Tree = pdDiag(list(phi1 ~ 1,phi2 ~ 1))
## Variance StdDev
## phi1 1012.13 31.814
## phi2 502.72 22.421
## Residual 59.27 7.699
VarCorr(n3_b)
## Estimate Group Name Std.Dev Cov
## mean phi1_Tree Intercept 31.48 1018.17
## phi2_Tree Intercept 30.77 1033.57
## RESIDUAL circumference 8.21 68.86
Say we treated some of the trees with poison, and some with water. We’ll generate some fake trees to add to our data.
set.seed(1)
bad_or <- Orange
good_or <- Orange
# five new trees
bad_or$Tree <- as.factor(as.numeric(levels(bad_or$Tree))[bad_or$Tree] + 5)
bad_or$circumference <- bad_or$circumference - 15 + round(rnorm(1, 0, 5))
bad_or$treat <- 'poison'
good_or$treat <- 'water'
or2 <- rbind(good_or, bad_or)
or2$treat <- factor(or2$treat)
These models are nearly identical to n2 and n2_b. The get_prior() function is a tremendous help for finding the names of coefficients/parameters to set priors on.
print(get_prior(f1,
data = or2,
nonlinear = list(phi1 ~ (1|Tree),
phi2 ~ 1,
phi3 ~ 0 + treat)))
## prior class coef group nlpar bound
## 1 b
## 2 b phi1
## 3 b phi2
## 4 b phi3
## 5 b Intercept phi1
## 6 b Intercept phi2
## 7 b treatpoison phi3
## 8 b treatwater phi3
## 9 student_t(3, 0, 58) sd
## 10 sd Tree group
## 11 sd Intercept Tree group
## 12 student_t(3, 0, 58) sigma
## 13 sigma circumference
n4 <- nlme(f1,
data = or2,
fixed = list(phi1 ~ 1,
phi2 ~ 1,
phi3 ~ 0 + treat),
random = phi1 ~ 1,
groups = ~ Tree,
start = c(190, 700, 300, 300)) # phi1, phi2,
# phi3(poison), phi3(water)
prior_4 <- c(set_prior("normal(200, 50)", nlpar = "phi1"),
set_prior("normal(700, 50)", nlpar = "phi2"),
set_prior("normal(350, 50)", nlpar = "phi3", coef = "treatwater"),
set_prior("normal(300, 50)", nlpar = "phi3", coef = "treatpoison"),
set_prior("cauchy(30,2)", class = "sd"))
n4_b <- brm(f1,
data = or2,
nonlinear = list(phi1 ~ (1|Tree),
phi2 ~ 1,
phi3 ~ 0 + treat),
prior = prior_4,
chains = 4,
iter = 4000)
Fixed effects:
fixef(n4)
## phi1 phi2 phi3.treatpoison phi3.treatwater
## 178.9 747.4 272.0 357.3
t(fixef(n4_b))
## phi1_Intercept phi2_Intercept phi3_treatpoison phi3_treatwater
## mean 179.2 744.4 275.2 355.6
We can also test whether the poison treatment has an effect compared to the water treatment.
# two-sided hypothesis
hyp <- hypothesis(n4_b, "abs(phi3_treatpoison - phi3_treatwater) > 0")
print(hyp)
plot(hyp)
## Hypothesis Tests for class b:
## Estimate Est.Error l-95% CI u-95% CI Evid.Ratio
## abs(phi3_treatpoi... > 0 80.49 22.83 42.91 Inf Inf *
## ---
## '*': The expected value under the hypothesis lies outside the 95% CI.