library(lme4)
## Loading required package: Matrix
library(tidyverse)
## -- Attaching packages -------------------------------------------------------------- tidyverse 1.2.1 --
## v ggplot2 3.1.0     v purrr   0.2.5
## v tibble  1.4.2     v dplyr   0.7.7
## v tidyr   0.8.2     v stringr 1.3.1
## v readr   1.1.1     v forcats 0.3.0
## -- Conflicts ----------------------------------------------------------------- tidyverse_conflicts() --
## x tidyr::expand() masks Matrix::expand()
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
# the example in the nlmer page
startvec <- c(Asym = 200, xmid = 725, scal = 350)
(nm1 <- nlmer(circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym|Tree,
              Orange, start = startvec))
## Nonlinear mixed model fit by maximum likelihood  ['nlmerMod']
## Formula: circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym | Tree
##    Data: Orange
##       AIC       BIC    logLik  deviance  df.resid 
##  273.1438  280.9205 -131.5719  263.1438        30 
## Random effects:
##  Groups   Name Std.Dev.
##  Tree     Asym 31.646  
##  Residual       7.843  
## Number of obs: 35, groups:  Tree, 5
## Fixed Effects:
##  Asym   xmid   scal  
## 192.1  727.9  348.1
Orange$fitted <- fitted(nm1)

ggplot(Orange) +
  aes(x = age, y = circumference) +
  geom_point() +
  geom_line(aes(y = fitted, group = Tree))

library(brms)
## Loading required package: Rcpp
## Loading 'brms' package (version 2.6.0). Useful instructions
## can be found by typing help('brms'). A more detailed introduction
## to the package is available through vignette('brms_overview').
## Run theme_set(theme_default()) to use the default bayesplot theme.
## 
## Attaching package: 'brms'
## The following object is masked from 'package:lme4':
## 
##     ngrps
# Here I let all the curve features randomly vary
formula <- bf(
  circumference ~ asym * inv(1 + exp((mid - age) / scale)),
  mid ~ 1 + (1 | Tree),
  scale ~ 1 + (1 | Tree),
  asym ~ 1 + (1 | Tree),
  sigma ~ 1,
  nl = TRUE)

# quick and dirty priors. you need priors to even fit a model
prior1 <- c(
  prior(normal(200, 50), nlpar = "asym"),
  prior(normal(750, 200), nlpar = "mid"),
  # the population average slope is constrained to be positive
  prior(normal(300, 100), lb = 0, nlpar = "scale"))

hist(rnorm(1000, 200, 50))

hist(rnorm(1000, 750, 200))

hist(rnorm(1000, 300, 100))

fit <- brm(
  formula,
  data = Orange,
  prior = prior1,
  family = gaussian(),
  iter = 2000,
  chains = 1,
  control = list(adapt_delta = 0.90, max_treedepth = 15))
## Compiling the C++ model
## Start sampling
## 
## SAMPLING FOR MODEL '3ed2af72b5cce9a8a6f0814d5bd2bb0f' NOW (CHAIN 1).
## Chain 1: Gradient evaluation took 0.001 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 10 seconds.
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1:  Elapsed Time: 2.489 seconds (Warm-up)
## Chain 1:                0.976 seconds (Sampling)
## Chain 1:                3.465 seconds (Total)
fit
##  Family: gaussian 
##   Links: mu = identity; sigma = log 
## Formula: circumference ~ asym * inv(1 + exp((mid - age)/scale)) 
##          mid ~ 1 + (1 | Tree)
##          scale ~ 1 + (1 | Tree)
##          asym ~ 1 + (1 | Tree)
##          sigma ~ 1
##    Data: Orange (Number of observations: 35) 
## Samples: 1 chains, each with iter = 2000; warmup = 1000; thin = 1;
##          total post-warmup samples = 1000
## 
## Group-Level Effects: 
## ~Tree (Number of levels: 5) 
##                     Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## sd(mid_Intercept)      45.05     36.00     0.92   140.09        598 1.00
## sd(scale_Intercept)    38.60     29.55     2.55   105.31        633 1.00
## sd(asym_Intercept)     45.24     19.56    20.21    96.24        451 1.00
## 
## Population-Level Effects: 
##                 Estimate Est.Error l-95% CI u-95% CI Eff.Sample Rhat
## sigma_Intercept     2.06      0.15     1.81     2.37        809 1.00
## mid_Intercept     738.38     45.94   653.95   829.49        690 1.01
## scale_Intercept   357.38     34.57   292.40   428.93        759 1.00
## asym_Intercept    195.87     20.35   154.68   241.16        364 1.00
## 
## Samples were drawn using sampling(NUTS). For each parameter, Eff.Sample 
## is a crude measure of effective sample size, and Rhat is the potential 
## scale reduction factor on split chains (at convergence, Rhat = 1).
library(tidybayes)
## NOTE: As of tidybayes version 1.0, several functions, arguments, and output column names
##       have undergone significant name changes in order to adopt a unified naming scheme.
##       See help('tidybayes-deprecated') for more information.
grid <- Orange %>%
  group_by(Tree) %>%
  tidyr::expand(age = modelr::seq_range(age, n = 80)) %>%
  ungroup()

fits <- grid %>%
  add_fitted_draws(fit, n = 100)

ggplot(Orange) +
  aes(x = age, y = circumference) +
  geom_line(
    aes(group = interaction(Tree, .draw), y = .value),
    data = fits, alpha = .3) +
  geom_point(color = "red") +
  facet_wrap("Tree")