Todo:
library(tidyverse)
library(igraph)
library(lme4)
library(sjstats)
library(rtweet)
library(rTAGS) # install with devtools::install_github('bretsw/rTAGS)
library(brms)
# source('prepare-data.R') # this creates the two files with 'to_analyze' in their name, as read below
For notes on this one-time setup, see this walkthrough: http://centerforopenscience.github.io/osfr/articles/auth.html)
First, you must generate an API token from an OSF account that has been added to the data repository. Read how to do this here: https://developer.osf.io/#tag/General-Usage
Then, you need to add the OSF API token to the .renviron
file, which is created using the following command. Here, the file is created at the user level, although this could also be set to the project level.
usethis::edit_r_environ(scope='user')
Open the .renviron
file and add a single line, using this exact text but replacing <token>
with your OSF API token:
OSF_PAT="<token>"
Save the file, quit R, and restart in a new session. Continue running the R script from here.
Now, install the osfr
package and load the library:
library(osfr)
Upon loading the osfr
package, you should see this message:
Automatically registered OSF personal access token.
Now you are able to retrieve and download the relevant datasets with this code:
# all-ngsschat-tweets.csv
osf_retrieve_file("https://osf.io/k2w6t/") %>%
osf_download(path = "ngsschat-tweets-14-15.rds", overwrite = TRUE)
# orig-pre.csv
osf_retrieve_file("https://osf.io/nj8yk/") %>%
osf_download(path = "orig-pre-14.csv", overwrite = TRUE)
# orig-post.csv
osf_retrieve_file("https://osf.io/ngwpt/") %>%
osf_download(path = "orig-post-15.csv", overwrite = TRUE)
# users-to-analyze.csv
osf_retrieve_file("https://osf.io/jz7p6/") %>%
osf_download(path = "users-to-analyze.csv", overwrite = TRUE)
# edgelist-to-analyze.csv
osf_retrieve_file("https://osf.io/sbyn9/") %>%
osf_download(path = "edgelist-to-analyze.csv", overwrite = TRUE)
orig <- read_rds("ngsschat-tweets-14-15.rds") # original data
orig_pre <- read_csv("orig-pre-14.csv") # data for the year after
orig_post <- read_csv("orig-post-15.csv") # data for the year before
users <- read_csv("users-to-analyze.csv") # processed user data
edge <- read_csv("edgelist-to-analyze.csv") # processed edgelist data
ts_plot(orig) +
geom_vline(xintercept = as.POSIXct(as.Date("2014-08-01"))) +
geom_vline(xintercept = as.POSIXct(as.Date("2015-07-31"))) +
theme_bw() +
xlab("Day") +
ylab("Number of Tweets including #NGSSchat per day") +
xlim(c(as.POSIXct(as.Date("2012-01-01")), as.POSIXct(as.Date("2017-12-31")))) +
geom_rect(aes(xmin = as.POSIXct(as.Date(c("2014-08-01"))), xmax = as.POSIXct(as.Date(c("2015-07-31"))),
ymin = -Inf,
ymax = Inf),
fill = "cyan3", alpha = 0.01)
users$geocoded_location <- mapsapi::mp_geocode(users$location, key = maps_api_key)
locs <- mp_get_points(users$geocoded_location)
# locs <- locs %>%
# bind_cols(screen_name = users$screen_name) %>%
# bind_cols(group = users$group) %>%
# bind_cols(n_tweets = users$n_tweets)
leaflet() %>%
addProviderTiles("CartoDB") %>%
addCircleMarkers(data = locs)
users <- select(users, -account_lang, -user_id) # this var seems to be all NA
# overall
orig %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
left_join(users) %>%
semi_join(users) %>%
filter(n_tweets >= 2) %>%
count(screen_name) %>%
rename(n_tweets = n) # 230 users w/ more than one tweet
## # A tibble: 191 x 2
## screen_name n_tweets
## <chr> <int>
## 1 21stscied 13
## 2 2footgiraffe 58
## 3 achavez_science 3
## 4 adchempages 28
## 5 aeolani 8
## 6 ajollygal 2
## 7 aliciajohal 5
## 8 all4ed 2
## 9 alynnmeyer 34
## 10 amycoyote 9
## # … with 181 more rows
orig %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
left_join(users) %>%
semi_join(users) %>%
filter(n_tweets >= 2) %>%
count(screen_name) %>%
summarize(median_n = median(n),
mean_n = mean(n),
sd_n = sd(n))
## # A tibble: 1 x 3
## median_n mean_n sd_n
## <int> <dbl> <dbl>
## 1 12 33.3 90.7
# by group
du <- orig %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
left_join(users) %>%
semi_join(users) %>%
filter(n_tweets >= 2) %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is.na(group))
n_tweeters_by_group <- du %>%
filter(!is_retweet) %>%
count(group, screen_name) %>%
count(group) %>%
rename(n_tweeters = n) %>%
mutate(n_prop = n_tweeters / sum(n_tweeters)) %>%
arrange(desc(n_tweeters))
n_tweets_by_group <- du %>%
filter(!is_retweet) %>%
count(group, screen_name) %>%
group_by(group) %>%
summarize(sum_n_tweets = sum(n))
# this is individual engagement by group - probably what we want to report
fin_df <- du %>%
filter(!is_retweet) %>%
count(group, screen_name) %>%
group_by(group) %>%
summarize(mean_n_tweets = mean(n),
sd_n_tweets = sd(n)) %>%
right_join(n_tweeters_by_group) %>%
right_join(n_tweets_by_group) %>%
select(group, sum_n_tweets, n_tweeters, mean_n_tweets, sd_n_tweets) %>%
arrange(desc(sum_n_tweets))
fin_df
## # A tibble: 6 x 5
## group sum_n_tweets n_tweeters mean_n_tweets sd_n_tweets
## <chr> <int> <int> <dbl> <dbl>
## 1 Teacher 2653 78 34.0 90.9
## 2 Administrator 2421 46 52.6 135.
## 3 Researcher 600 14 42.9 54.9
## 4 Other 441 22 20.0 29.2
## 5 Unclear 184 23 8 7.53
## 6 Organization 67 8 8.38 4.50
chisq.test(fin_df$sum_n_tweets) # there are sig diffs across sum_n_tweets
##
## Chi-squared test for given probabilities
##
## data: fin_df$sum_n_tweets
## X-squared = 6350.8, df = 5, p-value < 2.2e-16
cst <- chisq.test(fin_df$sum_n_tweets)
cst$stdres
## [1] 53.53971 45.73744 -15.50365 -20.85089 -29.49392 -33.42869
cst <- chisq.test(fin_df$sum_n_tweets[1:2])
cst$stdres # there does seem to be a diff in n between teachers and admin
## [1] 3.256962 -3.256962
dc <- edge %>%
filter(interaction_type == "conversing")
g <- graph_from_data_frame(dc)
m <- as_adjacency_matrix(g, sparse = FALSE) # sender is row, receiver is column
t <- m %>%
as.data.frame() %>%
rownames_to_column("sender") %>%
gather(receiver, val, -sender) %>%
as_tibble()
tt <- add_users_data(t, users)
tt <- mutate(tt,
code_sender = ifelse(is.na(code_sender), 11, code_sender),
code_receiver = ifelse(is.na(code_receiver), 11, code_receiver)) %>%
# filter(code_sender != 11 & code_receiver !=11) %>%
mutate(group_sender = recode(code_sender,
`1` = "Teacher",
`2` = "Administrator",
`3` = "Administrator",
`4` = "Researcher",
`5` = "Other",
`8` = "Other",
`9` = "Other",
`10` = "Other",
`6` = "Organization",
`7` = "Organization",
`11` = "Other"),
group_receiver = recode(code_receiver,
`1` = "Teacher",
`2` = "Administrator",
`3` = "Administrator",
`4` = "Researcher",
`5` = "Other",
`8` = "Other",
`9` = "Other",
`10` = "Other",
`6` = "Organization",
`7` = "Organization",
`11` = "Other"))
tt$group_receiver <- fct_relevel(as.factor(tt$group_receiver), "Other")
tt$group_sender <- fct_relevel(as.factor(tt$group_sender), "Other")
tt$dic <- ifelse(tt$val > 0, 1, 0)
tt$same <- ifelse(tt$group_sender == tt$group_receiver, 1, 0)
mc1 <- glmer(val ~ 1 +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(mc1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula: val ~ 1 + (1 | sender) + (1 | receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 40543.6 40572.6 -20268.8 40537.6 117646
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.663 -0.130 -0.054 -0.021 77.211
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 5.478 2.341
## sender (Intercept) 3.363 1.834
## Number of obs: 117649, groups: receiver, 343; sender, 242
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -6.0960 0.1825 -33.41 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
mc1 <- glmer(val ~ 1 +
group_sender +
group_receiver +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(mc1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula: val ~ 1 + group_sender + group_receiver + (1 | sender) + (1 |
## receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 40541.5 40648.0 -20259.8 40519.5 117638
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.663 -0.130 -0.054 -0.021 76.264
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 5.240 2.289
## sender (Intercept) 3.218 1.794
## Number of obs: 117649, groups: receiver, 343; sender, 242
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -7.08159 0.33262 -21.290 < 2e-16 ***
## group_senderAdministrator 0.75128 0.34436 2.182 0.02914 *
## group_senderOrganization -0.06468 0.55269 -0.117 0.90685
## group_senderResearcher 0.80974 0.50047 1.618 0.10567
## group_senderTeacher 0.55141 0.29699 1.857 0.06335 .
## group_receiverAdministrator 0.81328 0.36367 2.236 0.02533 *
## group_receiverOrganization -0.12432 0.56143 -0.221 0.82475
## group_receiverResearcher 1.08194 0.52675 2.054 0.03998 *
## group_receiverTeacher 0.93176 0.31947 2.917 0.00354 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) grp_sA grp_sO grp_sR grp_sT grp_rA grp_rO grp_rR
## grp_sndrAdm -0.451
## grp_sndrOrg -0.277 0.261
## grp_sndrRsr -0.317 0.299 0.182
## grp_sndrTch -0.528 0.501 0.307 0.348
## grp_rcvrAdm -0.466 0.000 0.006 0.008 0.004
## grp_rcvrOrg -0.301 0.007 0.015 0.010 0.010 0.264
## grp_rcvrRsr -0.319 0.011 0.008 0.014 0.014 0.281 0.166
## grp_rcvrTch -0.536 0.004 0.006 0.010 0.006 0.476 0.303 0.318
mc2 <- glmer(val ~ 1 +
group_sender +
group_receiver +
same +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(mc2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula: val ~ 1 + group_sender + group_receiver + same + (1 | sender) +
## (1 | receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 40527.5 40643.6 -20251.7 40503.5 117637
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.937 -0.130 -0.054 -0.021 76.956
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 5.240 2.289
## sender (Intercept) 3.218 1.794
## Number of obs: 117649, groups: receiver, 343; sender, 242
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -7.07247 0.32919 -21.485 < 2e-16 ***
## group_senderAdministrator 0.72645 0.34246 2.121 0.0339 *
## group_senderOrganization -0.06262 0.55323 -0.113 0.9099
## group_senderResearcher 0.80756 0.50731 1.592 0.1114
## group_senderTeacher 0.52726 0.29710 1.775 0.0760 .
## group_receiverAdministrator 0.78719 0.36250 2.172 0.0299 *
## group_receiverOrganization -0.11801 0.54673 -0.216 0.8291
## group_receiverResearcher 1.08248 0.52898 2.046 0.0407 *
## group_receiverTeacher 0.90449 0.31922 2.833 0.0046 **
## same 0.09268 0.02276 4.072 4.66e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) grp_sA grp_sO grp_sR grp_sT grp_rA grp_rO grp_rR grp_rT
## grp_sndrAdm -0.449
## grp_sndrOrg -0.263 0.249
## grp_sndrRsr -0.300 0.292 0.165
## grp_sndrTch -0.523 0.497 0.293 0.339
## grp_rcvrAdm -0.462 0.000 -0.002 -0.005 -0.001
## grp_rcvrOrg -0.278 -0.006 -0.008 -0.013 -0.009 0.260
## grp_rcvrRsr -0.317 0.015 0.002 -0.004 0.007 0.278 0.171
## grp_rcvrTch -0.532 0.001 -0.001 -0.006 0.000 0.475 0.294 0.319
## same 0.006 -0.017 0.001 -0.001 -0.019 -0.017 0.002 0.000 -0.021
mc3 <- glmer(val ~ 1 +
scale(years_on_twitter_sender) + scale(n_tweets_sender) + group_sender +
scale(years_on_twitter_receiver) + scale(n_tweets_receiver) + group_receiver +
same +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(mc3)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula:
## val ~ 1 + scale(years_on_twitter_sender) + scale(n_tweets_sender) +
## group_sender + scale(years_on_twitter_receiver) + scale(n_tweets_receiver) +
## group_receiver + same + (1 | sender) + (1 | receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 40446.0 40600.8 -20207.0 40414.0 117633
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -10.952 -0.131 -0.055 -0.021 74.854
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 4.811 2.193
## sender (Intercept) 2.148 1.466
## Number of obs: 117649, groups: receiver, 343; sender, 242
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -6.56657 0.30480 -21.544 < 2e-16 ***
## scale(years_on_twitter_sender) 0.23896 0.09695 2.465 0.0137 *
## scale(n_tweets_sender) 1.01325 0.11139 9.096 < 2e-16 ***
## group_senderAdministrator 0.26663 0.29009 0.919 0.3580
## group_senderOrganization -0.32156 0.46800 -0.687 0.4920
## group_senderResearcher 0.35424 0.42204 0.839 0.4013
## group_senderTeacher 0.22885 0.24962 0.917 0.3593
## scale(years_on_twitter_receiver) 0.26976 0.12549 2.150 0.0316 *
## scale(n_tweets_receiver) 0.39515 0.12521 3.156 0.0016 **
## group_receiverAdministrator 0.54141 0.35993 1.504 0.1325
## group_receiverOrganization -0.27930 0.54344 -0.514 0.6073
## group_receiverResearcher 0.85909 0.52429 1.639 0.1013
## group_receiverTeacher 0.73673 0.31367 2.349 0.0188 *
## same 0.09268 0.02276 4.071 4.68e-05 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
mcb <- brm(val ~ 1 +
scale(years_on_twitter_sender) + scale(n_tweets_sender) + group_sender +
scale(years_on_twitter_receiver) + scale(n_tweets_receiver) + group_receiver +
same +
(1|sender) + (1|receiver),
family = 'poisson',
data = tt)
summary(mcb)
dc <- edge %>%
filter(interaction_type == "endorsing")
g <- graph_from_data_frame(dc)
m <- as_adjacency_matrix(g, sparse = FALSE) # sender is row, receiver is column
t <- m %>%
as.data.frame() %>%
rownames_to_column("sender") %>%
gather(receiver, val, -sender) %>%
as_tibble()
tt <- add_users_data(t, users)
tt <- mutate(tt,
code_sender = ifelse(is.na(code_sender), 11, code_sender),
code_receiver = ifelse(is.na(code_receiver), 11, code_receiver)) %>%
# filter(code_sender != 11 & code_receiver !=11) %>%
mutate(group_sender = recode(code_sender,
`1` = "Teacher",
`2` = "Administrator",
`3` = "Administrator",
`4` = "Researcher",
`5` = "Other",
`8` = "Other",
`9` = "Other",
`10` = "Other",
`6` = "Organization",
`7` = "Organization",
`11` = "Other"),
group_receiver = recode(code_receiver,
`1` = "Teacher",
`2` = "Administrator",
`3` = "Administrator",
`4` = "Researcher",
`5` = "Other",
`8` = "Other",
`9` = "Other",
`10` = "Other",
`6` = "Organization",
`7` = "Organization",
`11` = "Other"))
tt$group_receiver <- fct_relevel(as.factor(tt$group_receiver), "Other")
tt$group_sender <- fct_relevel(as.factor(tt$group_sender), "Other")
tt$same <- ifelse(tt$group_sender == tt$group_receiver, 1, 0)
me1 <- glmer(val ~ 1 +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(me1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula: val ~ 1 + (1 | sender) + (1 | receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 1170.4 1190.3 -582.2 1164.4 5473
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.7834 -0.1148 -0.0673 -0.0448 16.8208
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 1.247 1.117
## sender (Intercept) 2.304 1.518
## Number of obs: 5476, groups: receiver, 74; sender, 60
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -5.4311 0.3333 -16.3 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
me1 <- glmer(val ~ 1 +
group_sender +
group_receiver +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(me1)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula: val ~ 1 + group_sender + group_receiver + (1 | sender) + (1 |
## receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 1176.1 1248.8 -577.0 1154.1 5465
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.7789 -0.1160 -0.0665 -0.0451 19.9912
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 1.013 1.006
## sender (Intercept) 2.296 1.515
## Number of obs: 5476, groups: receiver, 74; sender, 60
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -6.0227 0.6156 -9.784 < 2e-16 ***
## group_senderAdministrator -0.1998 0.6947 -0.288 0.77364
## group_senderOrganization -0.1160 1.2097 -0.096 0.92359
## group_senderResearcher 0.1430 0.8431 0.170 0.86534
## group_senderTeacher -0.3790 0.6448 -0.588 0.55667
## group_receiverAdministrator 1.2593 0.4783 2.633 0.00847 **
## group_receiverOrganization 1.2765 0.7366 1.733 0.08309 .
## group_receiverResearcher 1.4320 0.5709 2.508 0.01213 *
## group_receiverTeacher 0.6082 0.4653 1.307 0.19111
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) grp_sA grp_sO grp_sR grp_sT grp_rA grp_rO grp_rR
## grp_sndrAdm -0.520
## grp_sndrOrg -0.286 0.273
## grp_sndrRsr -0.429 0.388 0.225
## grp_sndrTch -0.552 0.510 0.299 0.420
## grp_rcvrAdm -0.480 0.000 0.000 0.000 0.000
## grp_rcvrOrg -0.307 0.000 0.000 0.000 0.000 0.400
## grp_rcvrRsr -0.410 0.000 0.000 0.000 0.000 0.517 0.333
## grp_rcvrTch -0.493 0.000 0.000 0.000 0.000 0.634 0.412 0.531
me2 <- glmer(val ~ 1 +
group_sender +
group_receiver +
same +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(me2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula: val ~ 1 + group_sender + group_receiver + same + (1 | sender) +
## (1 | receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 1175.2 1254.5 -575.6 1151.2 5464
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.7193 -0.1155 -0.0663 -0.0439 16.9580
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 1.013 1.006
## sender (Intercept) 2.296 1.515
## Number of obs: 5476, groups: receiver, 74; sender, 60
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -6.0324 0.6121 -9.856 < 2e-16 ***
## group_senderAdministrator -0.3148 0.6987 -0.451 0.65230
## group_senderOrganization -0.1243 1.2102 -0.103 0.91821
## group_senderResearcher 0.1189 0.8435 0.141 0.88788
## group_senderTeacher -0.4421 0.6460 -0.684 0.49375
## group_receiverAdministrator 1.2617 0.4785 2.637 0.00837 **
## group_receiverOrganization 1.3101 0.7371 1.777 0.07552 .
## group_receiverResearcher 1.4519 0.5712 2.542 0.01102 *
## group_receiverTeacher 0.4916 0.4705 1.045 0.29605
## same 0.3344 0.1933 1.730 0.08368 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) grp_sA grp_sO grp_sR grp_sT grp_rA grp_rO grp_rR grp_rT
## grp_sndrAdm -0.511
## grp_sndrOrg -0.285 0.273
## grp_sndrRsr -0.426 0.388 0.225
## grp_sndrTch -0.546 0.512 0.299 0.420
## grp_rcvrAdm -0.475 -0.015 -0.004 -0.006 -0.008
## grp_rcvrOrg -0.304 -0.008 -0.005 -0.004 -0.007 0.400
## grp_rcvrRsr -0.406 -0.009 -0.004 -0.010 -0.008 0.517 0.334
## grp_rcvrTch -0.477 0.008 -0.003 -0.003 -0.007 0.625 0.403 0.521
## same -0.020 -0.105 -0.005 -0.020 -0.053 0.012 0.031 0.025 -0.144
me3 <- glmer(val ~ 1 +
scale(years_on_twitter_sender) + scale(n_tweets_sender) + group_sender +
scale(years_on_twitter_receiver) + scale(n_tweets_receiver) + group_receiver +
same +
(1|sender) + (1|receiver),
control=glmerControl(optimizer="bobyqa"),
family = 'poisson',
data = tt)
summary(me3)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: poisson ( log )
## Formula:
## val ~ 1 + scale(years_on_twitter_sender) + scale(n_tweets_sender) +
## group_sender + scale(years_on_twitter_receiver) + scale(n_tweets_receiver) +
## group_receiver + same + (1 | sender) + (1 | receiver)
## Data: tt
## Control: glmerControl(optimizer = "bobyqa")
##
## AIC BIC logLik deviance df.resid
## 1169.6 1275.3 -568.8 1137.6 5460
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -1.7739 -0.1163 -0.0681 -0.0453 16.4855
##
## Random effects:
## Groups Name Variance Std.Dev.
## receiver (Intercept) 0.7202 0.8486
## sender (Intercept) 1.9440 1.3943
## Number of obs: 5476, groups: receiver, 74; sender, 60
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -5.41590 0.59367 -9.123 < 2e-16 ***
## scale(years_on_twitter_sender) 0.13860 0.24328 0.570 0.56887
## scale(n_tweets_sender) 0.63440 0.25803 2.459 0.01395 *
## group_senderAdministrator -0.77999 0.68730 -1.135 0.25643
## group_senderOrganization -0.51334 1.16416 -0.441 0.65925
## group_senderResearcher -0.30623 0.86604 -0.354 0.72364
## group_senderTeacher -0.81576 0.64628 -1.262 0.20686
## scale(years_on_twitter_receiver) -0.03787 0.15792 -0.240 0.81050
## scale(n_tweets_receiver) 0.37935 0.12436 3.050 0.00229 **
## group_receiverAdministrator 0.93067 0.45636 2.039 0.04142 *
## group_receiverOrganization 1.11212 0.66521 1.672 0.09456 .
## group_receiverResearcher 1.32572 0.56028 2.366 0.01797 *
## group_receiverTeacher 0.28634 0.44594 0.642 0.52080
## same 0.33433 0.19332 1.729 0.08374 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
meb <- brm(val ~ 1 +
scale(years_on_twitter_sender) + scale(n_tweets_sender) + group_sender +
scale(years_on_twitter_receiver) + scale(n_tweets_receiver) + group_receiver +
same +
(1|sender) + (1|receiver),
family = 'poisson',
data = tt)
summary(meb)
n_days <- orig_pre %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
count(screen_name, day) %>%
count(screen_name) %>%
select(screen_name, pre_n_days = n)
orig_pre <- orig_pre %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
count(screen_name) %>%
select(screen_name, pre_n = n) %>%
left_join(n_days)
n_days <- orig_post %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
count(screen_name, day) %>%
count(screen_name) %>%
select(screen_name, post_n_days = n)
orig_post <- orig_post %>%
mutate(screen_name = tolower(screen_name)) %>%
filter(!is_retweet) %>%
count(screen_name) %>%
select(screen_name, post_n = n) %>%
left_join(n_days)
d_for_influence <- users %>%
left_join(orig_pre) %>%
left_join(orig_post) %>%
distinct(screen_name, .keep_all = TRUE) %>%
select(screen_name, pre_n, pre_n_days, n_tweets, post_n, post_n_days) %>%
filter(n_tweets > 1) %>%
mutate_all(replace_na, 0)
orig_pre <- rename(orig_pre, sender = screen_name)
influence_endorsing <- edge %>%
filter(interaction_type == "endorsing") %>%
count(sender, receiver) %>%
left_join(orig_pre) %>%
mutate(exposure = n * pre_n) %>%
group_by(receiver) %>%
summarize(exposure_sum_end = sum(exposure, na.rm = TRUE)) %>%
rename(screen_name = receiver) %>%
right_join(d_for_influence) %>%
mutate(exposure_sum_end = replace_na(exposure_sum_end, 0)) %>%
left_join(users) %>%
mutate(group= ifelse(group %in% c("Other", "Unclear", "Uncoded"), "Other", group))
influence_conversing <- edge %>%
filter(interaction_type == "conversing") %>%
count(sender, receiver) %>%
left_join(orig_pre) %>%
mutate(exposure = n * pre_n) %>%
group_by(receiver) %>%
summarize(exposure_sum_conv = sum(exposure, na.rm = TRUE)) %>%
rename(screen_name = receiver) %>%
right_join(d_for_influence) %>%
mutate(exposure_sum_conv = replace_na(exposure_sum_conv, 0)) %>%
left_join(users) %>%
mutate(group= ifelse(group %in% c("Other", "Unclear", "Uncoded"), "Other", group))
influence <- influence_endorsing %>%
left_join(influence_conversing) %>%
left_join(users) %>%
mutate(group = ifelse(group %in% c("Other", "Unclear", "Uncoded"), "Other", group))
influence$group <- fct_relevel(as.factor(influence$group), "Other")
influence %>%
select(pre_n,
pre_n_days,
post_n,
post_n_days,
n_tweets,
n_days,
exposure_sum_end,
exposure_sum_conv,
years_on_twitter) %>%
corrr::correlate() %>%
corrr::shave() %>%
corrr::fashion()
## rowname pre_n pre_n_days post_n post_n_days n_tweets n_days
## 1 pre_n
## 2 pre_n_days .72
## 3 post_n .86 .56
## 4 post_n_days .40 .45 .70
## 5 n_tweets .92 .61 .94 .56
## 6 n_days .51 .63 .67 .79 .69
## 7 exposure_sum_end -.01 .01 .07 .17 .11 .22
## 8 exposure_sum_conv .04 .20 .17 .42 .21 .49
## 9 years_on_twitter .03 .08 .01 .05 .06 .12
## exposure_sum_end exposure_sum_conv years_on_twitter
## 1
## 2
## 3
## 4
## 5
## 6
## 7
## 8 .57
## 9 .00 .18
influence <- mutate(influence,
code = ifelse(is.na(code), 11, code)) %>%
mutate(code_category = recode(code,
`1` = "Teacher",
`2` = "Administrator",
`3` = "Administrator",
`4` = "Researcher",
`5` = "Other",
`8` = "Other",
`9` = "Other",
`10` = "Unclear",
`6` = "Organization",
`7` = "Organization",
`11` = "Unclear")) %>%
mutate(code_category = as.factor(code_category),
code_category = fct_relevel(code_category, "Researcher"))
m0 <- glm(post_n ~ 1,
data = influence,
family = 'poisson')
summary(m0)
##
## Call:
## glm(formula = post_n ~ 1, family = "poisson", data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -6.982 -6.982 -6.982 -2.469 74.711
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.19339 0.01286 248.3 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423 on 247 degrees of freedom
## Residual deviance: 23423 on 247 degrees of freedom
## AIC: 23934
##
## Number of Fisher Scoring iterations: 7
m1 <- glm(post_n ~ 1 +
code_category,
data = influence,
family = 'poisson')
summary(m1)
##
## Call:
## glm(formula = post_n ~ 1 + code_category, family = "poisson",
## data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -9.530 -7.134 -4.979 -2.482 66.041
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.31952 0.04613 71.965 < 2e-16 ***
## code_categoryAdministrator 0.49633 0.05043 9.842 < 2e-16 ***
## code_categoryOrganization -0.20771 0.07451 -2.788 0.00531 **
## code_categoryOther -0.80240 0.07078 -11.337 < 2e-16 ***
## code_categoryTeacher -0.08293 0.05046 -1.644 0.10025
## code_categoryUnclear -2.16806 0.09737 -22.267 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423 on 247 degrees of freedom
## Residual deviance: 21174 on 242 degrees of freedom
## AIC: 21695
##
## Number of Fisher Scoring iterations: 7
m2 <- glm(post_n ~ 1 +
scale(n_tweets) +
scale(years_on_twitter) +
scale(n_days) +
code_category,
data = influence,
family = 'poisson')
summary(m2)
##
## Call:
## glm(formula = post_n ~ 1 + scale(n_tweets) + scale(years_on_twitter) +
## scale(n_days) + code_category, family = "poisson", data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -9.5813 -3.9681 -3.0296 0.0528 22.4589
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.538328 0.053685 47.282 < 2e-16 ***
## scale(n_tweets) 0.121054 0.004479 27.030 < 2e-16 ***
## scale(years_on_twitter) -0.070628 0.016677 -4.235 2.28e-05 ***
## scale(n_days) 0.748193 0.010513 71.169 < 2e-16 ***
## code_categoryAdministrator -0.008767 0.054661 -0.160 0.873
## code_categoryOrganization -0.306031 0.076294 -4.011 6.04e-05 ***
## code_categoryOther 0.096083 0.076164 1.262 0.207
## code_categoryTeacher -0.227305 0.055881 -4.068 4.75e-05 ***
## code_categoryUnclear -1.099022 0.103011 -10.669 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423.1 on 247 degrees of freedom
## Residual deviance: 5733.8 on 239 degrees of freedom
## AIC: 6260.6
##
## Number of Fisher Scoring iterations: 7
m2 <- glm(post_n ~ 1 +
scale(years_on_twitter) +
scale(n_tweets) +
scale(n_days) +
scale(pre_n) +
code_category,
data = influence,
family = 'poisson')
summary(m2)
##
## Call:
## glm(formula = post_n ~ 1 + scale(years_on_twitter) + scale(n_tweets) +
## scale(n_days) + scale(pre_n) + code_category, family = "poisson",
## data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -10.665 -3.872 -2.704 -0.306 21.250
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.43429 0.05460 44.587 < 2e-16 ***
## scale(years_on_twitter) -0.05316 0.01673 -3.178 0.001485 **
## scale(n_tweets) 0.49234 0.03568 13.798 < 2e-16 ***
## scale(n_days) 0.64336 0.01448 44.436 < 2e-16 ***
## scale(pre_n) -0.31536 0.02993 -10.537 < 2e-16 ***
## code_categoryAdministrator 0.04431 0.05507 0.805 0.421008
## code_categoryOrganization -0.29306 0.07628 -3.842 0.000122 ***
## code_categoryOther 0.17993 0.07638 2.356 0.018488 *
## code_categoryTeacher -0.06530 0.05740 -1.138 0.255298
## code_categoryUnclear -0.99286 0.10351 -9.592 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423.1 on 247 degrees of freedom
## Residual deviance: 5619.2 on 238 degrees of freedom
## AIC: 6148.1
##
## Number of Fisher Scoring iterations: 7
m3 <- glm(post_n ~ 1 +
scale(years_on_twitter) +
scale(n_tweets) +
scale(n_days) +
scale(pre_n) +
code_category +
exposure_sum_conv,
data = influence,
family = 'poisson')
summary(m3)
##
## Call:
## glm(formula = post_n ~ 1 + scale(years_on_twitter) + scale(n_tweets) +
## scale(n_days) + scale(pre_n) + code_category + exposure_sum_conv,
## family = "poisson", data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -8.7126 -3.8042 -2.7984 -0.4321 21.9750
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.196e+00 5.886e-02 37.319 < 2e-16 ***
## scale(years_on_twitter) -9.346e-02 1.692e-02 -5.524 3.31e-08 ***
## scale(n_tweets) 5.067e-01 3.503e-02 14.466 < 2e-16 ***
## scale(n_days) 6.092e-01 1.463e-02 41.632 < 2e-16 ***
## scale(pre_n) -3.070e-01 2.946e-02 -10.421 < 2e-16 ***
## code_categoryAdministrator 1.766e-01 5.596e-02 3.156 0.0016 **
## code_categoryOrganization 8.420e-05 8.059e-02 0.001 0.9992
## code_categoryOther 3.980e-01 7.920e-02 5.024 5.05e-07 ***
## code_categoryTeacher 1.137e-01 5.972e-02 1.903 0.0570 .
## code_categoryUnclear -7.916e-01 1.054e-01 -7.513 5.77e-14 ***
## exposure_sum_conv 1.643e-05 1.394e-06 11.785 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423.1 on 247 degrees of freedom
## Residual deviance: 5487.4 on 237 degrees of freedom
## AIC: 6018.3
##
## Number of Fisher Scoring iterations: 7
m4 <- glm(post_n ~ 1 +
scale(years_on_twitter) +
scale(n_tweets) +
scale(n_days) +
scale(pre_n) +
code_category +
exposure_sum_end,
data = influence,
family = 'poisson')
summary(m4)
##
## Call:
## glm(formula = post_n ~ 1 + scale(years_on_twitter) + scale(n_tweets) +
## scale(n_days) + scale(pre_n) + code_category + exposure_sum_end,
## family = "poisson", data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -9.2262 -3.7847 -2.7177 -0.1993 21.9250
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.279e+00 5.910e-02 38.567 < 2e-16 ***
## scale(years_on_twitter) -3.872e-02 1.679e-02 -2.306 0.0211 *
## scale(n_tweets) 4.984e-01 3.566e-02 13.976 < 2e-16 ***
## scale(n_days) 6.411e-01 1.439e-02 44.552 < 2e-16 ***
## scale(pre_n) -3.160e-01 2.998e-02 -10.540 < 2e-16 ***
## code_categoryAdministrator 1.362e-01 5.653e-02 2.410 0.0159 *
## code_categoryOrganization -1.432e-01 7.926e-02 -1.807 0.0708 .
## code_categoryOther 3.333e-01 7.946e-02 4.195 2.73e-05 ***
## code_categoryTeacher 6.598e-02 6.070e-02 1.087 0.2771
## code_categoryUnclear -8.324e-01 1.060e-01 -7.851 4.14e-15 ***
## exposure_sum_end 5.807e-04 7.432e-05 7.814 5.54e-15 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423.1 on 247 degrees of freedom
## Residual deviance: 5562.8 on 237 degrees of freedom
## AIC: 6093.6
##
## Number of Fisher Scoring iterations: 7
m5 <- glm(post_n ~ 1 +
scale(years_on_twitter) +
scale(n_tweets) +
scale(n_days) +
scale(pre_n) +
code_category +
exposure_sum_conv +
exposure_sum_end,
data = influence,
family = 'poisson')
summary(m5)
##
## Call:
## glm(formula = post_n ~ 1 + scale(years_on_twitter) + scale(n_tweets) +
## scale(n_days) + scale(pre_n) + code_category + exposure_sum_conv +
## exposure_sum_end, family = "poisson", data = influence)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -8.7508 -3.7990 -2.7934 -0.4234 21.9990
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 2.193e+00 5.963e-02 36.779 < 2e-16 ***
## scale(years_on_twitter) -9.139e-02 1.790e-02 -5.105 3.30e-07 ***
## scale(n_tweets) 5.071e-01 3.507e-02 14.460 < 2e-16 ***
## scale(n_days) 6.097e-01 1.471e-02 41.451 < 2e-16 ***
## scale(pre_n) -3.077e-01 2.953e-02 -10.418 < 2e-16 ***
## code_categoryAdministrator 1.786e-01 5.625e-02 3.175 0.0015 **
## code_categoryOrganization 1.775e-03 8.075e-02 0.022 0.9825
## code_categoryOther 4.017e-01 7.994e-02 5.026 5.01e-07 ***
## code_categoryTeacher 1.173e-01 6.066e-02 1.935 0.0530 .
## code_categoryUnclear -7.870e-01 1.062e-01 -7.411 1.26e-13 ***
## exposure_sum_conv 1.601e-05 1.828e-06 8.760 < 2e-16 ***
## exposure_sum_end 3.540e-05 1.003e-04 0.353 0.7242
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 23423.1 on 247 degrees of freedom
## Residual deviance: 5487.2 on 236 degrees of freedom
## AIC: 6020.1
##
## Number of Fisher Scoring iterations: 7
sessionInfo()
## R version 3.5.3 (2019-03-11)
## Platform: x86_64-apple-darwin15.6.0 (64-bit)
## Running under: macOS Mojave 10.14.2
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] brms_2.8.0 Rcpp_1.0.1 rTAGS_0.1.0
## [4] googlesheets_0.3.0 rtweet_0.6.9 sjstats_0.17.3
## [7] lme4_1.1-21 Matrix_1.2-15 igraph_1.2.2
## [10] forcats_0.3.0 stringr_1.4.0 dplyr_0.8.0.1
## [13] purrr_0.3.2 readr_1.3.1 tidyr_0.8.3
## [16] tibble_2.1.1 ggplot2_3.1.1 tidyverse_1.2.1
##
## loaded via a namespace (and not attached):
## [1] TH.data_1.0-10 minqa_1.2.4 colorspace_1.4-1
## [4] modeltools_0.2-22 ggridges_0.5.1 rsconnect_0.8.13
## [7] sjlabelled_1.0.16 estimability_1.3 snakecase_0.9.2
## [10] markdown_0.9 base64enc_0.1-3 rstudioapi_0.9.0
## [13] rstan_2.18.2 glmmTMB_0.2.3 DT_0.5
## [16] fansi_0.4.0 mvtnorm_1.0-8 lubridate_1.7.4
## [19] coin_1.2-2 xml2_1.2.0 bridgesampling_0.6-0
## [22] codetools_0.2-16 splines_3.5.3 knitr_1.21
## [25] shinythemes_1.1.2 sjmisc_2.7.7 zeallot_0.1.0
## [28] bayesplot_1.6.0 jsonlite_1.6 nloptr_1.2.1
## [31] broom_0.5.2 shiny_1.2.0 compiler_3.5.3
## [34] httr_1.4.0 emmeans_1.3.2 backports_1.1.4
## [37] assertthat_0.2.1 lazyeval_0.2.2 cli_1.1.0
## [40] later_0.8.0 prettyunits_1.0.2 htmltools_0.3.6
## [43] tools_3.5.3 coda_0.19-2 gtable_0.3.0
## [46] glue_1.3.1 corrr_0.3.0.9000 reshape2_1.4.3
## [49] cellranger_1.1.0 vctrs_0.1.0 nlme_3.1-137
## [52] crosstalk_1.0.0 xfun_0.4 ps_1.3.0
## [55] rvest_0.3.2 miniUI_0.1.1.1 mime_0.6
## [58] gtools_3.8.1 stringdist_0.9.5.1 MASS_7.3-51.1
## [61] zoo_1.8-4 scales_1.0.0 colourpicker_1.0
## [64] hms_0.4.2 promises_1.0.1 Brobdingnag_1.2-6
## [67] parallel_3.5.3 sandwich_2.5-0 inline_0.3.15
## [70] shinystan_2.5.0 pwr_1.2-2 TMB_1.7.15
## [73] yaml_2.2.0 gridExtra_2.3 StanHeaders_2.18.1
## [76] loo_2.1.0 stringi_1.4.3 dygraphs_1.1.1.6
## [79] pkgbuild_1.0.2 boot_1.3-20 rlang_0.3.4
## [82] pkgconfig_2.0.2 matrixStats_0.54.0 evaluate_0.13
## [85] lattice_0.20-38 prediction_0.3.6.2 labeling_0.3
## [88] rstantools_1.5.1 htmlwidgets_1.3 processx_3.2.1
## [91] tidyselect_0.2.5 plyr_1.8.4 magrittr_1.5
## [94] R6_2.4.0 generics_0.0.2 multcomp_1.4-8
## [97] pillar_1.4.0 haven_2.0.0 withr_2.1.2
## [100] xts_0.11-2 survival_2.43-3 abind_1.4-5
## [103] modelr_0.1.2 crayon_1.3.4 utf8_1.1.4
## [106] rmarkdown_1.11 grid_3.5.3 readxl_1.2.0
## [109] data.table_1.12.2 callr_3.1.1 threejs_0.3.1
## [112] digest_0.6.19 xtable_1.8-3 httpuv_1.5.1
## [115] stats4_3.5.3 munsell_0.5.0 shinyjs_1.0