library(broom)
library(betareg)
data("GasolineYield", package = "betareg")
set.seed(52352)
GasolineYield$rgroup <- sample(1:100,
   size=dim(GasolineYield)[[1]],replace=T)
GTrain <- subset(GasolineYield,
   GasolineYield$rgroup<=50)
GTest <- subset(GasolineYield,
   GasolineYield$rgroup>50)
gy <- betareg(yield ~ gravity + pressure + temp | gravity + pressure + temp, 
   data = GTrain)
print(summary(gy))

Call:
betareg(formula = yield ~ gravity + pressure + temp | gravity + 
    pressure + temp, data = GTrain)

Standardized weighted residuals 2:
    Min      1Q  Median      3Q     Max 
-1.5654 -1.1867 -0.2438  0.7150  1.8804 

Coefficients (mean model with logit link):
              Estimate Std. Error z value Pr(>|z|)    
(Intercept) -6.0444634  0.2053709 -29.432  < 2e-16 ***
gravity      0.0272482  0.0045070   6.046 1.49e-09 ***
pressure     0.1043289  0.0098801  10.559  < 2e-16 ***
temp         0.0090788  0.0002334  38.906  < 2e-16 ***

Phi coefficients (precision model with log link):
              Estimate Std. Error z value Pr(>|z|)    
(Intercept) -15.862671   3.712458  -4.273 1.93e-05 ***
gravity       0.343234   0.075916   4.521 6.15e-06 ***
pressure      0.216113   0.170884   1.265 0.205986    
temp          0.022489   0.005784   3.888 0.000101 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 

Type of estimator: ML (maximum likelihood)
Log-likelihood: 41.36 on 8 Df
Pseudo R-squared: 0.9262
Number of iterations: 50 (BFGS) + 2 (Fisher scoring) 
GTest$model <- predict(gy,newdata=GTest)
library(ggplot2)
ggplot(data=GTest,aes(x=model,y=yield)) + 
   geom_point() + geom_abline(slope=1)

summary(GasolineYield)%>%as.data.frame.array()
GTest$modelErr <- sqrt(predict(gy,newdata=GTest,
   type='variance'))
ggplot(data=GTest,aes(x=model,y=yield)) +
    geom_point() +
    geom_errorbarh(aes(xmin=model-modelErr,xmax=model+modelErr)) +
    geom_abline(slope=1)+theme_bw()

#d$yCollared <- pmin(pmax(1/dim(d)[[1]],d$y),1-1/dim(d)[[1]])
#bm <- betareg(yCollared ~ x1 + x2, data = d, link='logit')
#predict(bm,newdata=d,type='response')
# linear model
data("ReadingSkills", package = "betareg")
rs_ols <- lm(qlogis(accuracy) ~ dyslexia * iq, data = ReadingSkills)
coef(rs_ols)
(Intercept)    dyslexia          iq dyslexia:iq 
  1.6010683  -1.2056303   0.3594491  -0.4228629 
#summary(rs_ols)
broom::tidy(rs_ols)
broom::augment(rs_ols)
broom::confint_tidy(rs_ols)
broom::glance(rs_ols)
#plot accuracy by iq for linear and beta regression models on the same graph
summary(ReadingSkills)%>%as.data.frame.array()
# Reading accuracy
rs_beta <- betareg(accuracy ~ dyslexia * iq | dyslexia + iq,
data = ReadingSkills)
#summary(rs_beta)
broom::tidy(rs_beta)
broom::augment(rs_beta)
broom::confint_tidy(rs_beta)
broom::glance(rs_beta)
#Fit beta regression tree: In each node accuracy’s mean and precision depends on iq, partitioning is done by dyslexia and the noise variables x1, x2, x3.
#Partitioning variables: dyslexia and further random noise variables.
set.seed(148)
ReadingSkills$x1 <- rnorm(nrow(ReadingSkills))
ReadingSkills$x2 <- runif(nrow(ReadingSkills))
ReadingSkills$x3 <- factor(rnorm(nrow(ReadingSkills)) > 0)
rs_tree <- betatree(accuracy ~ iq | iq,
 ~ dyslexia + x1 + x2 + x3,
data = ReadingSkills, minsplit = 10)
      
plot(rs_tree)

#Latent class beta regression
#Setup:No dyslexia information available.
#Look for k = 3 clusters: Two different relationships of type
#accuracy ~ iq, plus component for ideal score of 0.99. Fit beta mixture regression:
rs_mix <- betamix(accuracy ~ iq, data = ReadingSkills, k = 3,
nstart = 10, extra_components = extraComponent(
type = "uniform", coef = 0.99, delta = 0.01))
#plot(rs_mix)
summary(rs_mix)%>%as.list.data.frame()
$Comp.1
$Comp.1$mean
            Estimate Std. Error z value  Pr(>|z|)    
(Intercept)  1.40342    0.26332  5.3296 9.841e-08 ***
iq           0.82502    0.21630  3.8142 0.0001366 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

$Comp.1$precision
            Estimate Std. Error z value  Pr(>|z|)    
(Intercept)  2.68509    0.45435  5.9097 3.427e-09 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1


$Comp.2
$Comp.2$mean
             Estimate Std. Error z value  Pr(>|z|)    
(Intercept)  0.502523   0.082476  6.0930 1.108e-09 ***
iq          -0.048414   0.112923 -0.4287    0.6681    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

$Comp.2$precision
            Estimate Std. Error z value Pr(>|z|)    
(Intercept)  4.25160    0.74737  5.6888 1.28e-08 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1


<S4 Type Object>
attr(,"coef")
     model.1_Comp.1.mean.(Intercept)               model.1_Comp.1.mean.iq 
                          1.40341860                           0.82501991 
model.1_Comp.1.precision.(Intercept)      model.1_Comp.2.mean.(Intercept) 
                          2.68508810                           0.50252278 
              model.1_Comp.2.mean.iq model.1_Comp.2.precision.(Intercept) 
                         -0.04841425                           4.25159860 
            concomitant_Comp.2.alpha             concomitant_Comp.3.alpha 
                          0.22619882                          -0.11691598 
attr(,"vcov")
                                     model.1_Comp.1.mean.(Intercept)
model.1_Comp.1.mean.(Intercept)                          0.069339384
model.1_Comp.1.mean.iq                                   0.023275136
model.1_Comp.1.precision.(Intercept)                     0.035205631
model.1_Comp.2.mean.(Intercept)                          0.002982662
model.1_Comp.2.mean.iq                                   0.011279190
model.1_Comp.2.precision.(Intercept)                    -0.088001379
concomitant_Comp.2.alpha                                 0.073747796
concomitant_Comp.3.alpha                                 0.028579092
                                     model.1_Comp.1.mean.iq
model.1_Comp.1.mean.(Intercept)                0.0232751356
model.1_Comp.1.mean.iq                         0.0467872938
model.1_Comp.1.precision.(Intercept)           0.0132884104
model.1_Comp.2.mean.(Intercept)                0.0019218952
model.1_Comp.2.mean.iq                         0.0009261736
model.1_Comp.2.precision.(Intercept)          -0.0062429381
concomitant_Comp.2.alpha                       0.0109269996
concomitant_Comp.3.alpha                      -0.0097450835
                                     model.1_Comp.1.precision.(Intercept)
model.1_Comp.1.mean.(Intercept)                               0.035205631
model.1_Comp.1.mean.iq                                        0.013288410
model.1_Comp.1.precision.(Intercept)                          0.206435828
model.1_Comp.2.mean.(Intercept)                              -0.002243594
model.1_Comp.2.mean.iq                                        0.002902676
model.1_Comp.2.precision.(Intercept)                         -0.032650305
concomitant_Comp.2.alpha                                      0.026958858
concomitant_Comp.3.alpha                                      0.026407866
                                     model.1_Comp.2.mean.(Intercept)
model.1_Comp.1.mean.(Intercept)                          0.002982662
model.1_Comp.1.mean.iq                                   0.001921895
model.1_Comp.1.precision.(Intercept)                    -0.002243594
model.1_Comp.2.mean.(Intercept)                          0.006802271
model.1_Comp.2.mean.iq                                   0.003429996
model.1_Comp.2.precision.(Intercept)                    -0.015683613
concomitant_Comp.2.alpha                                 0.017485750
concomitant_Comp.3.alpha                                 0.009825237
                                     model.1_Comp.2.mean.iq
model.1_Comp.1.mean.(Intercept)                0.0112791896
model.1_Comp.1.mean.iq                         0.0009261736
model.1_Comp.1.precision.(Intercept)           0.0029026756
model.1_Comp.2.mean.(Intercept)                0.0034299958
model.1_Comp.2.mean.iq                         0.0127516057
model.1_Comp.2.precision.(Intercept)          -0.0541056271
concomitant_Comp.2.alpha                       0.0471233962
concomitant_Comp.3.alpha                       0.0277798735
                                     model.1_Comp.2.precision.(Intercept)
model.1_Comp.1.mean.(Intercept)                              -0.088001379
model.1_Comp.1.mean.iq                                       -0.006242938
model.1_Comp.1.precision.(Intercept)                         -0.032650305
model.1_Comp.2.mean.(Intercept)                              -0.015683613
model.1_Comp.2.mean.iq                                       -0.054105627
model.1_Comp.2.precision.(Intercept)                          0.558558393
concomitant_Comp.2.alpha                                     -0.359911609
concomitant_Comp.3.alpha                                     -0.213023950
                                     concomitant_Comp.2.alpha
model.1_Comp.1.mean.(Intercept)                    0.07374780
model.1_Comp.1.mean.iq                             0.01092700
model.1_Comp.1.precision.(Intercept)               0.02695886
model.1_Comp.2.mean.(Intercept)                    0.01748575
model.1_Comp.2.mean.iq                             0.04712340
model.1_Comp.2.precision.(Intercept)              -0.35991161
concomitant_Comp.2.alpha                           0.54591750
concomitant_Comp.3.alpha                           0.33439361
                                     concomitant_Comp.3.alpha
model.1_Comp.1.mean.(Intercept)                   0.028579092
model.1_Comp.1.mean.iq                           -0.009745084
model.1_Comp.1.precision.(Intercept)              0.026407866
model.1_Comp.2.mean.(Intercept)                   0.009825237
model.1_Comp.2.mean.iq                            0.027779874
model.1_Comp.2.precision.(Intercept)             -0.213023950
concomitant_Comp.2.alpha                          0.334393608
concomitant_Comp.3.alpha                          0.343926987
attr(,"k")
[1] 3
attr(,"components")
attr(,"components")[[1]]
attr(,"components")[[1]]$Comp.1
attr(,"components")[[1]]$Comp.1$mean
            Estimate Std. Error z value  Pr(>|z|)    
(Intercept)  1.40342    0.26332  5.3296 9.841e-08 ***
iq           0.82502    0.21630  3.8142 0.0001366 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

attr(,"components")[[1]]$Comp.1$precision
            Estimate Std. Error z value  Pr(>|z|)    
(Intercept)  2.68509    0.45435  5.9097 3.427e-09 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1


attr(,"components")[[1]]$Comp.2
attr(,"components")[[1]]$Comp.2$mean
             Estimate Std. Error z value  Pr(>|z|)    
(Intercept)  0.502523   0.082476  6.0930 1.108e-09 ***
iq          -0.048414   0.112923 -0.4287    0.6681    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

attr(,"components")[[1]]$Comp.2$precision
            Estimate Std. Error z value Pr(>|z|)    
(Intercept)  4.25160    0.74737  5.6888 1.28e-08 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1



attr(,"concomitant")
attr(,"concomitant")$Comp.2
      Estimate Std. Error z value Pr(>|z|)
prior  0.22620    0.73886  0.3061   0.7595

attr(,"concomitant")$Comp.3
      Estimate Std. Error z value Pr(>|z|)
prior -0.11692    0.58645 -0.1994    0.842

attr(,"call")
refit(object$flexmix, ...)
LS0tCnRpdGxlOiAiQmV0YSByZWdyZXNzaW9uIE1vZGVsbGluZyIKb3V0cHV0OiBodG1sX25vdGVib29rCmF1dGhvcjogTmFuYSBCb2F0ZW5nCi0tLQoKYGBge3J9CgpsaWJyYXJ5KGJyb29tKQpsaWJyYXJ5KGJldGFyZWcpCgpkYXRhKCJHYXNvbGluZVlpZWxkIiwgcGFja2FnZSA9ICJiZXRhcmVnIikKc2V0LnNlZWQoNTIzNTIpCkdhc29saW5lWWllbGQkcmdyb3VwIDwtIHNhbXBsZSgxOjEwMCwKICAgc2l6ZT1kaW0oR2Fzb2xpbmVZaWVsZClbWzFdXSxyZXBsYWNlPVQpCkdUcmFpbiA8LSBzdWJzZXQoR2Fzb2xpbmVZaWVsZCwKICAgR2Fzb2xpbmVZaWVsZCRyZ3JvdXA8PTUwKQpHVGVzdCA8LSBzdWJzZXQoR2Fzb2xpbmVZaWVsZCwKICAgR2Fzb2xpbmVZaWVsZCRyZ3JvdXA+NTApCmd5IDwtIGJldGFyZWcoeWllbGQgfiBncmF2aXR5ICsgcHJlc3N1cmUgKyB0ZW1wIHwgZ3Jhdml0eSArIHByZXNzdXJlICsgdGVtcCwgCiAgIGRhdGEgPSBHVHJhaW4pCnByaW50KHN1bW1hcnkoZ3kpKQpHVGVzdCRtb2RlbCA8LSBwcmVkaWN0KGd5LG5ld2RhdGE9R1Rlc3QpCmxpYnJhcnkoZ2dwbG90MikKZ2dwbG90KGRhdGE9R1Rlc3QsYWVzKHg9bW9kZWwseT15aWVsZCkpICsgCiAgIGdlb21fcG9pbnQoKSArIGdlb21fYWJsaW5lKHNsb3BlPTEpCgpzdW1tYXJ5KEdhc29saW5lWWllbGQpJT4lYXMuZGF0YS5mcmFtZS5hcnJheSgpCgpHVGVzdCRtb2RlbEVyciA8LSBzcXJ0KHByZWRpY3QoZ3ksbmV3ZGF0YT1HVGVzdCwKICAgdHlwZT0ndmFyaWFuY2UnKSkKZ2dwbG90KGRhdGE9R1Rlc3QsYWVzKHg9bW9kZWwseT15aWVsZCkpICsKICAgIGdlb21fcG9pbnQoKSArCiAgICBnZW9tX2Vycm9yYmFyaChhZXMoeG1pbj1tb2RlbC1tb2RlbEVycix4bWF4PW1vZGVsK21vZGVsRXJyKSkgKwogICAgZ2VvbV9hYmxpbmUoc2xvcGU9MSkrdGhlbWVfYncoKQoKI2QkeUNvbGxhcmVkIDwtIHBtaW4ocG1heCgxL2RpbShkKVtbMV1dLGQkeSksMS0xL2RpbShkKVtbMV1dKQojYm0gPC0gYmV0YXJlZyh5Q29sbGFyZWQgfiB4MSArIHgyLCBkYXRhID0gZCwgbGluaz0nbG9naXQnKQojcHJlZGljdChibSxuZXdkYXRhPWQsdHlwZT0ncmVzcG9uc2UnKQoKCmBgYAoKCgpgYGB7cn0KCiMgbGluZWFyIG1vZGVsCmRhdGEoIlJlYWRpbmdTa2lsbHMiLCBwYWNrYWdlID0gImJldGFyZWciKQpyc19vbHMgPC0gbG0ocWxvZ2lzKGFjY3VyYWN5KSB+IGR5c2xleGlhICogaXEsIGRhdGEgPSBSZWFkaW5nU2tpbGxzKQpjb2VmKHJzX29scykKI3N1bW1hcnkocnNfb2xzKQoKYnJvb206OnRpZHkocnNfb2xzKQoKYnJvb206OmF1Z21lbnQocnNfb2xzKQoKYnJvb206OmNvbmZpbnRfdGlkeShyc19vbHMpCgpicm9vbTo6Z2xhbmNlKHJzX29scykKCiNwbG90IGFjY3VyYWN5IGJ5IGlxIGZvciBsaW5lYXIgYW5kIGJldGEgcmVncmVzc2lvbiBtb2RlbHMgb24gdGhlIHNhbWUgZ3JhcGgKYGBgCgoKYGBge3J9CgpzdW1tYXJ5KFJlYWRpbmdTa2lsbHMpJT4lYXMuZGF0YS5mcmFtZS5hcnJheSgpCgojIFJlYWRpbmcgYWNjdXJhY3kKcnNfYmV0YSA8LSBiZXRhcmVnKGFjY3VyYWN5IH4gZHlzbGV4aWEgKiBpcSB8IGR5c2xleGlhICsgaXEsCmRhdGEgPSBSZWFkaW5nU2tpbGxzKQojc3VtbWFyeShyc19iZXRhKQoKYnJvb206OnRpZHkocnNfYmV0YSkKCmJyb29tOjphdWdtZW50KHJzX2JldGEpCgpicm9vbTo6Y29uZmludF90aWR5KHJzX2JldGEpCgpicm9vbTo6Z2xhbmNlKHJzX2JldGEpCgpgYGAKCgpgYGB7cn0KI0ZpdCBiZXRhIHJlZ3Jlc3Npb24gdHJlZTogSW4gZWFjaCBub2RlIGFjY3VyYWN54oCZcyBtZWFuIGFuZCBwcmVjaXNpb24gZGVwZW5kcyBvbiBpcSwgcGFydGl0aW9uaW5nIGlzIGRvbmUgYnkgZHlzbGV4aWEgYW5kIHRoZSBub2lzZSB2YXJpYWJsZXMgeDEsIHgyLCB4My4KCgojUGFydGl0aW9uaW5nIHZhcmlhYmxlczogZHlzbGV4aWEgYW5kIGZ1cnRoZXIgcmFuZG9tIG5vaXNlIHZhcmlhYmxlcy4Kc2V0LnNlZWQoMTQ4KQpSZWFkaW5nU2tpbGxzJHgxIDwtIHJub3JtKG5yb3coUmVhZGluZ1NraWxscykpClJlYWRpbmdTa2lsbHMkeDIgPC0gcnVuaWYobnJvdyhSZWFkaW5nU2tpbGxzKSkKUmVhZGluZ1NraWxscyR4MyA8LSBmYWN0b3Iocm5vcm0obnJvdyhSZWFkaW5nU2tpbGxzKSkgPiAwKQoKcnNfdHJlZSA8LSBiZXRhdHJlZShhY2N1cmFjeSB+IGlxIHwgaXEsCiB+IGR5c2xleGlhICsgeDEgKyB4MiArIHgzLApkYXRhID0gUmVhZGluZ1NraWxscywgbWluc3BsaXQgPSAxMCkKICAgICAgCnBsb3QocnNfdHJlZSkKCmBgYAoKCmBgYHtyfQojTGF0ZW50IGNsYXNzIGJldGEgcmVncmVzc2lvbgojU2V0dXA6Tm8gZHlzbGV4aWEgaW5mb3JtYXRpb24gYXZhaWxhYmxlLgojTG9vayBmb3IgayA9IDMgY2x1c3RlcnM6IFR3byBkaWZmZXJlbnQgcmVsYXRpb25zaGlwcyBvZiB0eXBlCiNhY2N1cmFjeSB+IGlxLCBwbHVzIGNvbXBvbmVudCBmb3IgaWRlYWwgc2NvcmUgb2YgMC45OS4gRml0IGJldGEgbWl4dHVyZSByZWdyZXNzaW9uOgpyc19taXggPC0gYmV0YW1peChhY2N1cmFjeSB+IGlxLCBkYXRhID0gUmVhZGluZ1NraWxscywgayA9IDMsCm5zdGFydCA9IDEwLCBleHRyYV9jb21wb25lbnRzID0gZXh0cmFDb21wb25lbnQoCnR5cGUgPSAidW5pZm9ybSIsIGNvZWYgPSAwLjk5LCBkZWx0YSA9IDAuMDEpKQoKCiNwbG90KHJzX21peCkKc3VtbWFyeShyc19taXgpJT4lYXMubGlzdC5kYXRhLmZyYW1lKCkKCmBgYAoK