Getting ready…

library(tidyverse)
Registered S3 methods overwritten by 'dbplyr':
  method         from
  print.tbl_lazy     
  print.tbl_sql      
-- Attaching packages ------------------------------------------------------------- tidyverse 1.3.0 --
v ggplot2 3.3.3     v purrr   0.3.4
v tibble  3.0.6     v dplyr   1.0.4
v tidyr   1.1.2     v stringr 1.4.0
v readr   1.4.0     v forcats 0.5.1
-- Conflicts ---------------------------------------------------------------- tidyverse_conflicts() --
x dplyr::filter() masks stats::filter()
x dplyr::lag()    masks stats::lag()
fl <- read_csv('isbell_et_al_2019_firstlast.csv')

-- Column specification ------------------------------------------------------------------------------
cols(
  .default = col_double(),
  Student.Identification = col_character(),
  Language = col_character()
)
i Use `spec()` for the full column specifications.

Exploring the data

Getting a sense of the data - from an ordinal/continous outcome to a dichotomous outcome.

table(fl$netchange)

 -3  -2  -1   0   1   2   3 
  1  15 105 323 280  84   6 
table(fl$Decrease)

  0   1 
693 121 

Taking a look at how some predictors associate with the outcome:

ggplot(data = fl, aes(x = factor(start.prof), fill = factor(Decrease, levels = c(0,1)))) +
  geom_bar(stat = "count")+
  coord_flip()

And another…

ggplot(data = fl, aes(x = factor(Time), fill = factor(Decrease, levels = c(0,1)))) +
  geom_bar(stat = "count")+
  coord_flip()

Modeling

An intercept-only model

mod0 <- glm(Decrease ~ 1, family = "binomial", data = fl)
summary(mod0)

Call:
glm(formula = Decrease ~ 1, family = "binomial", data = fl)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-0.5673  -0.5673  -0.5673  -0.5673   1.9525  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -1.74524    0.09853  -17.71   <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 684.34  on 813  degrees of freedom
Residual deviance: 684.34  on 813  degrees of freedom
AIC: 686.34

Number of Fisher Scoring iterations: 4

Converting logits to OR…

exp(mod0$coefficients)
(Intercept) 
  0.1746032 

Quick check: p = 121/(121+693) = .1486 odds = p/(1-p) = .1486/(1-.1486) = .1745 (matches our intercept)

An intercept only model in logistic regression doesn’t give you the grand mean like a linear regression, but instead gives you the overall odds of the outcome in the data.

Now to a model with predictors:

mod1 <- glm(Decrease ~  all.course + motiv + start.prof, family = "binomial", data = fl)
summary(mod1)

Call:
glm(formula = Decrease ~ all.course + motiv + start.prof, family = "binomial", 
    data = fl)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-1.4942  -0.5820  -0.4322  -0.3128   2.4740  

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept) -2.84787    0.48715  -5.846 5.04e-09 ***
all.course  -0.10139    0.03640  -2.785  0.00535 ** 
motiv       -0.22241    0.09081  -2.449  0.01432 *  
start.prof   0.63563    0.08285   7.672 1.69e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 684.34  on 813  degrees of freedom
Residual deviance: 617.62  on 810  degrees of freedom
AIC: 625.62

Number of Fisher Scoring iterations: 5

And now to do some nifty conversions to odds, including CIs…

coef <- tidy(mod1, conf.int = T)
coef <- mutate(coef, OR = exp(estimate),
               `2.5%` = exp(conf.low),
               `97.5%` = exp(conf.high))
coef

Converting to odds is nice because there is a clear interpretation for effects and CI: less than 1 is a decrease in odds, and greater than 1 is an increase in odds. If a CI crosses 1, the effect is not statistically different from the null, which in the case of LR, is 50% chance/1:1 odds/0 logits

How about an effect plot?

coef %>% ggplot(aes(x = term, y = OR))+
  geom_point()+
  geom_errorbar(aes(ymin = `2.5%`, ymax = `97.5%`), width = .2)+
  geom_hline(yintercept = 1, linetype = 2)+
  labs(x = NULL, y = 'Odds of Decrease')+
  coord_flip()+
  theme_minimal()

It’s also useful to check the predictive accuracy of the model:

#check predicted probabilities
fl$pred.prob <- fitted(mod1) #note - fitted() returns probabilities for logistic reg, predict() gives logits
fl$pred.class <- cut(fl$pred.prob, breaks = c(-Inf, .5, Inf), labels= c(0, 1))

table(fl$Decrease, fl$pred.class)
   
      0   1
  0 683  10
  1 115   6

What about R^2? There’s another package that returns this (Nagelkerke R2) when running a LR model:

library(rms)
mod1b <- lrm(Decrease ~  all.course
                 + motiv + start.prof, data = fl)
mod1b$stats
         Obs    Max Deriv   Model L.R.         d.f.            P            C          Dxy 
8.140000e+02 2.437147e-06 6.672132e+01 3.000000e+00 2.142730e-14 7.348813e-01 4.697626e-01 
       Gamma        Tau-a           R2        Brier            g           gr           gp 
4.735235e-01 1.190452e-01 1.384067e-01 1.174376e-01 9.255851e-01 2.523344e+00 1.094682e-01 
LS0tDQp0aXRsZTogIkxvZ2lzdGljIFJlZ3Jlc3Npb24iDQpvdXRwdXQ6IGh0bWxfbm90ZWJvb2sNCi0tLQ0KDQpHZXR0aW5nIHJlYWR5Li4uDQoNCmBgYHtyfQ0KbGlicmFyeSh0aWR5dmVyc2UpDQpsaWJyYXJ5KGJyb29tKQ0KDQpmbCA8LSByZWFkX2NzdignaXNiZWxsX2V0X2FsXzIwMTlfZmlyc3RsYXN0LmNzdicpDQpgYGANCg0KIyBFeHBsb3JpbmcgdGhlIGRhdGENCg0KR2V0dGluZyBhIHNlbnNlIG9mIHRoZSBkYXRhIC0gZnJvbSBhbiBvcmRpbmFsL2NvbnRpbm91cyBvdXRjb21lIHRvIGEgZGljaG90b21vdXMgb3V0Y29tZS4NCg0KYGBge3J9DQp0YWJsZShmbCRuZXRjaGFuZ2UpDQp0YWJsZShmbCREZWNyZWFzZSkNCmBgYA0KDQpUYWtpbmcgYSBsb29rIGF0IGhvdyBzb21lIHByZWRpY3RvcnMgYXNzb2NpYXRlIHdpdGggdGhlIG91dGNvbWU6DQoNCmBgYHtyfQ0KZ2dwbG90KGRhdGEgPSBmbCwgYWVzKHggPSBmYWN0b3Ioc3RhcnQucHJvZiksIGZpbGwgPSBmYWN0b3IoRGVjcmVhc2UsIGxldmVscyA9IGMoMCwxKSkpKSArDQogIGdlb21fYmFyKHN0YXQgPSAiY291bnQiKSsNCiAgY29vcmRfZmxpcCgpDQpgYGANCg0KQW5kIGFub3RoZXIuLi4NCg0KYGBge3J9DQpnZ3Bsb3QoZGF0YSA9IGZsLCBhZXMoeCA9IGZhY3RvcihUaW1lKSwgZmlsbCA9IGZhY3RvcihEZWNyZWFzZSwgbGV2ZWxzID0gYygwLDEpKSkpICsNCiAgZ2VvbV9iYXIoc3RhdCA9ICJjb3VudCIpKw0KICBjb29yZF9mbGlwKCkNCmBgYA0KDQojIE1vZGVsaW5nDQoNCkFuIGludGVyY2VwdC1vbmx5IG1vZGVsDQoNCmBgYHtyfQ0KbW9kMCA8LSBnbG0oRGVjcmVhc2UgfiAxLCBmYW1pbHkgPSAiYmlub21pYWwiLCBkYXRhID0gZmwpDQpzdW1tYXJ5KG1vZDApDQpgYGANCg0KQ29udmVydGluZyBsb2dpdHMgdG8gT1IuLi4NCg0KYGBge3J9DQpleHAobW9kMCRjb2VmZmljaWVudHMpDQpgYGANCg0KUXVpY2sgY2hlY2s6IA0KcCA9ICAxMjEvKDEyMSs2OTMpID0gLjE0ODYNCm9kZHMgPSBwLygxLXApID0gLjE0ODYvKDEtLjE0ODYpID0gLjE3NDUgKG1hdGNoZXMgb3VyIGludGVyY2VwdCkNCg0KQW4gaW50ZXJjZXB0IG9ubHkgbW9kZWwgaW4gbG9naXN0aWMgcmVncmVzc2lvbiBkb2Vzbid0IGdpdmUgeW91IHRoZSBncmFuZCBtZWFuIGxpa2UgYSBsaW5lYXIgcmVncmVzc2lvbiwgYnV0IGluc3RlYWQgZ2l2ZXMgeW91IHRoZSBvdmVyYWxsIG9kZHMgb2YgdGhlIG91dGNvbWUgaW4gdGhlIGRhdGEuDQoNCk5vdyB0byBhIG1vZGVsIHdpdGggcHJlZGljdG9yczoNCg0KYGBge3J9DQptb2QxIDwtIGdsbShEZWNyZWFzZSB+ICBhbGwuY291cnNlICsgbW90aXYgKyBzdGFydC5wcm9mLCBmYW1pbHkgPSAiYmlub21pYWwiLCBkYXRhID0gZmwpDQpzdW1tYXJ5KG1vZDEpDQpgYGANCg0KQW5kIG5vdyB0byBkbyBzb21lIG5pZnR5IGNvbnZlcnNpb25zIHRvIG9kZHMsIGluY2x1ZGluZyBDSXMuLi4NCg0KYGBge3J9DQpjb2VmIDwtIHRpZHkobW9kMSwgY29uZi5pbnQgPSBUKQ0KY29lZiA8LSBtdXRhdGUoY29lZiwgT1IgPSBleHAoZXN0aW1hdGUpLA0KICAgICAgICAgICAgICAgYDIuNSVgID0gZXhwKGNvbmYubG93KSwNCiAgICAgICAgICAgICAgIGA5Ny41JWAgPSBleHAoY29uZi5oaWdoKSkNCmNvZWYNCmBgYA0KQ29udmVydGluZyB0byBvZGRzIGlzIG5pY2UgYmVjYXVzZSB0aGVyZSBpcyBhIGNsZWFyIGludGVycHJldGF0aW9uIGZvciBlZmZlY3RzIGFuZCBDSTogbGVzcyB0aGFuIDEgaXMgYSBkZWNyZWFzZSBpbiBvZGRzLCBhbmQgZ3JlYXRlciB0aGFuIDEgaXMgYW4gaW5jcmVhc2UgaW4gb2Rkcy4gSWYgYSBDSSBjcm9zc2VzIDEsIHRoZSBlZmZlY3QgaXMgbm90IHN0YXRpc3RpY2FsbHkgZGlmZmVyZW50IGZyb20gdGhlIG51bGwsIHdoaWNoIGluIHRoZSBjYXNlIG9mIExSLCBpcyA1MCUgY2hhbmNlLzE6MSBvZGRzLzAgbG9naXRzDQoNCkhvdyBhYm91dCBhbiBlZmZlY3QgcGxvdD8NCg0KYGBge3J9DQpjb2VmICU+JSBnZ3Bsb3QoYWVzKHggPSB0ZXJtLCB5ID0gT1IpKSsNCiAgZ2VvbV9wb2ludCgpKw0KICBnZW9tX2Vycm9yYmFyKGFlcyh5bWluID0gYDIuNSVgLCB5bWF4ID0gYDk3LjUlYCksIHdpZHRoID0gLjIpKw0KICBnZW9tX2hsaW5lKHlpbnRlcmNlcHQgPSAxLCBsaW5ldHlwZSA9IDIpKw0KICBsYWJzKHggPSBOVUxMLCB5ID0gJ09kZHMgb2YgRGVjcmVhc2UnKSsNCiAgY29vcmRfZmxpcCgpKw0KICB0aGVtZV9taW5pbWFsKCkNCmBgYA0KDQpJdCdzIGFsc28gdXNlZnVsIHRvIGNoZWNrIHRoZSBwcmVkaWN0aXZlIGFjY3VyYWN5IG9mIHRoZSBtb2RlbDoNCg0KYGBge3J9DQojY2hlY2sgcHJlZGljdGVkIHByb2JhYmlsaXRpZXMNCmZsJHByZWQucHJvYiA8LSBmaXR0ZWQobW9kMSkgI25vdGUgLSBmaXR0ZWQoKSByZXR1cm5zIHByb2JhYmlsaXRpZXMgZm9yIGxvZ2lzdGljIHJlZywgcHJlZGljdCgpIGdpdmVzIGxvZ2l0cw0KZmwkcHJlZC5jbGFzcyA8LSBjdXQoZmwkcHJlZC5wcm9iLCBicmVha3MgPSBjKC1JbmYsIC41LCBJbmYpLCBsYWJlbHM9IGMoMCwgMSkpDQoNCnRhYmxlKGZsJERlY3JlYXNlLCBmbCRwcmVkLmNsYXNzKQ0KYGBgDQoNCldoYXQgYWJvdXQgUl4yPyBUaGVyZSdzIGFub3RoZXIgcGFja2FnZSB0aGF0IHJldHVybnMgdGhpcyAoTmFnZWxrZXJrZSBSMikgd2hlbiBydW5uaW5nIGEgTFIgbW9kZWw6DQoNCmBgYHtyfQ0KbGlicmFyeShybXMpDQptb2QxYiA8LSBscm0oRGVjcmVhc2UgfiAgYWxsLmNvdXJzZQ0KICAgICAgICAgICAgICAgICArIG1vdGl2ICsgc3RhcnQucHJvZiwgZGF0YSA9IGZsKQ0KbW9kMWIkc3RhdHMNCmBgYA0KDQo=