## add 'developer/' to packages to be installed from github
<- c("RColorBrewer", "corrplot", "ggplot2", "readxl", "ranger", "caret", "pbapply", "viridis", "MCMCglmm", "MuMIn", "MASS", "smacof", "vegan", "kableExtra", "tidyr", "knitr", "tuneRanger", "brms", "bayestestR", "loo", "bayesplot", "ggalt")
x
<- lapply(x, function(y) {
aa
# get pakage name
<- strsplit(y, "/")[[1]]
pkg <- pkg[length(pkg)]
pkg
# check if installed, if not then install
if (!pkg %in% installed.packages()[,"Package"]) {
if (grepl("/", y)) devtools::install_github(y, force = TRUE) else
install.packages(y)
}
# load package
try(require(pkg, character.only = T), silent = T)
})
<- viridis(10, alpha = 0.6)
cols
<- function(fit, olddata) {
extract_proximity_oob = predict(fit, olddata, type = "terminalNodes")$predictions
pred = matrix(NA, nrow(pred), nrow(pred))
prox = ncol(pred)
ntree = nrow(prox)
n
if (is.null(fit$inbag.counts)) {
stop("call ranger with keep.inbag = TRUE")
}
# Get inbag counts
= simplify2array(fit$inbag.counts)
inbag
for (i in 1:n) {
for (j in 1:n) {
# Use only trees where both obs are OOB
= inbag[i, ] == 0 & inbag[j, ] == 0
tree_idx = sum(pred[i, tree_idx] == pred[j, tree_idx]) / sum(tree_idx)
prox[i, j]
}
}
return(prox)
}
## find inflection points in z vs distance
<- read.csv("./data/raw/trajectories pooled data.csv", stringsAsFactors = FALSE)
traj.dat
# use butterworth filtered height
$Z <- traj.dat$bw.Z
traj.dat
<- split(traj.dat, f = traj.dat$file)
traj.list
<- lapply(traj.list, function(Y) {
out
<- Y$Z
ts
# a <- c(1:10, 9:0)
# as.numeric(c(FALSE, diff(diff(a) > 0) == -1))
# count up-down inflections
$inflections <- as.numeric(c(FALSE, diff(diff(ts) > 0) == -1, FALSE))
Y
$infl.time <- ifelse(Y$inflections > 0, Y$time, NA)
Y$infl.dist <- ifelse(Y$inflections > 0, Y$distance, NA)
Y$infl.height <- ifelse(Y$inflections > 0, Y$Z, NA)
Y
return(Y)
})
<- do.call(rbind, out)
traj.infl
# head(traj.infl)
<- -0.11
dscnt.cutoff
<- lapply(unique(traj.infl$file), function(x){
summ.l <- traj.infl[traj.infl$file == x, ]
X
# distance to entry at last inflection
<- X$distance[which.max(X$infl.time)]
dist.last.infl
# height to entry at last inflection
<- X$Z[which.max(X$infl.time)]
height.last.infl
# mean speed after last inflection
<- mean(X$speed[which.max(X$infl.time):nrow(X)], na.rm = TRUE)
speed.last.infl
# mean bw speed after last inflection
<- mean(X$bw.speed[which.max(X$infl.time):nrow(X)], na.rm = TRUE)
bw.speed.last.infl
# mean acceleration
<- mean(X$acceleration[which.max(X$infl.time):nrow(X)], na.rm = TRUE)
accel.last.infl
# mean bw acceleration
<- mean(X$bw.acceleration[which.max(X$infl.time):nrow(X)], na.rm = TRUE)
bw.accel.last.infl
# mean angle in y
<- mean(X$ang.xy[(which.max(X$infl.time) - 3):nrow(X)], na.rm = TRUE)
xy.angle.last.infl
<- mean(X$ang.yz[(which.max(X$infl.time) - 3):nrow(X)], na.rm = TRUE)
yz.angle.last.infl
# distance to entry at start of descent fase
<- min(X$distance[X$time >= dscnt.cutoff], na.rm = TRUE)
dist.str.dscnt
# height to entry at start of descent fase
<- X$Z[which(X$time >= dscnt.cutoff)[1]]
height.strt.dscnt
# mean speed after last inflection
<- mean(X$speed[X$time >= dscnt.cutoff], na.rm = TRUE)
speed.dscnt
# mean bw speed after last inflection
<- mean(X$bw.speed[X$time >= dscnt.cutoff], na.rm = TRUE)
bw.speed.dscnt
# mean acceleration at descent phase
<- mean(X$acceleration[X$time >= dscnt.cutoff], na.rm = TRUE)
accel.dscnt
# mean bw acceleration at descent phase
<- mean(X$bw.acceleration[X$time >= dscnt.cutoff], na.rm = TRUE)
bw.accel.dscnt
# mean angle in y
<- mean(X$ang.xy[(which(X$time >= dscnt.cutoff) - 3)[1]:nrow(X)], na.rm = TRUE)
xy.angle.dscnt
# mean angle in z,y
<- mean(X$ang.yz[(which(X$time >= dscnt.cutoff) - 3)[1]:nrow(X)], na.rm = TRUE)
yz.angle.dscnt
# number of inflections at descent
<- sum(X$inflections[X$time >= dscnt.cutoff], na.rm = TRUE)
num.infl.dscnt
<- data.frame(event = x,
df leave.type = X$leave.type[1],
dist.last.infl,
height.last.infl,
bw.speed.last.infl,
bw.accel.last.infl,
xy.angle.last.infl,
yz.angle.last.infl,
dist.str.dscnt,
height.strt.dscnt,
bw.speed.dscnt,
bw.accel.dscnt,
num.infl.dscnt,
xy.angle.dscnt,
yz.angle.dscnt)
return(df)
})
<- do.call(rbind, summ.l)
summ.traj
write.csv(summ.traj, "./data/processed/flying_trajectory_descriptors.csv", row.names = FALSE)
<- read.csv("./data/processed/flying_trajectory_descriptors.csv", stringsAsFactors = FALSE)
summ.traj
<- summ.traj[summ.traj$leave.type < 3, -1]
summ.traj2
$leave.type <- as.factor(summ.traj2$leave.type)
summ.traj2
# run random forest
<- princomp(summ.traj2[,-1], cor = TRUE)
pca.trans
<- preProcess(summ.traj2[, -1], method=c("center", "scale", "BoxCox", "corr"))
trans.traj
<- predict(trans.traj, summ.traj2)
summ.traj3
$leave.type <- as.numeric(summ.traj3$leave.type)
summ.traj3
$leave.type[summ.traj3$leave.type == 1] <- "Truncate"
summ.traj3$leave.type[summ.traj3$leave.type == 2] <- "Acuminate"
summ.traj3$leave.type <- as.factor(summ.traj3$leave.type) summ.traj3
print("variables")
names(summ.traj3)
# tune random forest
<- makeClassifTask(data = summ.traj3, target = "leave.type")
rf_task
# Estimate runtime
estimateTimeTuneRanger(rf_task)
# Tuning
<- tuneRanger(rf_task, measure = list(multiclass.brier), num.trees = 10000, num.threads = 10, iters = 1000, save.file.path = NULL, show.info = FALSE)
tuning.results
# Model with the new tuned hyperparameters
<- ranger(leave.type ~ ., data = summ.traj3, num.trees = 10000, importance = "impurity", keep.inbag = TRUE, mtry = tuning.results$recommended.pars$mtry, min.node.size = tuning.results$recommended.pars$min.node.size, sample.fraction = tuning.results$recommended.pars$sample.fraction)
rf
<- rf$prediction.error
rf.error
pboptions(type = "timer")
<- pbsapply(1:10000, cl = 10, function(x){
rnd.error
<- summ.traj3
Y
$leave.type <- sample(Y$leave.type)
Y
<- ranger(leave.type ~ ., data = Y, num.trees = 10000, importance = "impurity", mtry = tuning.results$recommended.pars$mtry, min.node.size = tuning.results$recommended.pars$min.node.size, sample.fraction = tuning.results$recommended.pars$sample.fraction)
rf
<- rf$prediction.error
rf.error
return(rf.error)
}
)
<- extract_proximity_oob(fit = rf, olddata = summ.traj3)
prx.mat
<- dist(t(prx.mat)) ## Euclidean distances
diss <- mds(diss, ndim = 2) ## 2D interval MDS
fit
set.seed(123)
<- bootmds(fit, prx.mat, method.dat = "euclidean", nrep = 50)
rf.mds
<- list(rf.error = rf.error, rnd.error = rnd.error, rf.model = rf, prx.mat = prx.mat, rf.mds = rf.mds, tuning.results = tuning.results, leave.type = summ.traj3$leave.type)
rf.random.res
saveRDS(rf.random.res, "./data/processed/Random forest randomization test resutls.RDS")
<- readRDS("./data/processed/Random forest randomization test resutls.RDS")
rf.random.res
print("Tunning parameters")
## [1] "Tunning parameters"
$tuning.results rf.random.res
## Recommended parameter settings:
## mtry min.node.size sample.fraction
## 1 1 7 0.8673217
## Results:
## multiclass.brier exec.time
## 1 0.3881766 0.1731429
print("Confusion matrix")
## [1] "Confusion matrix"
$rf.model$confusion.matrix rf.random.res
## predicted
## true Acuminate Truncate
## Acuminate 21 3
## Truncate 7 7
print("Random forest error")
## [1] "Random forest error"
<- rf.random.res$rf.error) (rf.error
## [1] 0.2631579
<- rf.random.res$rnd.error
rnd.error hist(rnd.error, main = " Random error 10000 iterations", col = cols[5], xlab = "Classification error")
abline(v = rf.error, col = cols[10], lty = 2, lwd = 3)
#p value
print("p-value")
## [1] "p-value"
sum(rnd.error < rf.error) / length(rnd.error)
## [1] 0.0067
<- data.frame(leave.type = as.character(rf.random.res$leave.type), rf.random.res$rf.mds$conf, stringsAsFactors = FALSE)
mds.shp
ggplot(mds.shp, aes(x = D1, y = D2, color = leave.type, shape = leave.type)) +
geom_point(size = 7)+
scale_shape_manual(values = c(15, 19)) +
scale_color_manual(values = viridis(10, alpha = 0.6)[c(4, 8)]) +
theme_classic() +
labs(x = "Dimension 1", y = "Dimension 2", color = "Leaf type", shape = "Leaf type") +
theme(text = element_text(size=20), legend.position = c(0.15, 0.9), legend.text = element_text(face="italic"))
Â
Â
<- vegdist(summ.traj3[, -1], method = "euclidean")
dis
## Calculate multivariate dispersions
<- betadisper(dis, summ.traj3$leave.type)
mod
anova(mod)
## Analysis of Variance Table
##
## Response: Distances
## Df Sum Sq Mean Sq F value Pr(>F)
## Groups 1 11.775 11.7750 6.8016 0.01318 *
## Residuals 36 62.324 1.7312
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
<- as.data.frame(mod$vectors)
pcoas $leaf.type <- summ.traj3$leave.type
pcoas
# original graph
# plot(mod, main = "")
# converted to ggplot
ggplot(pcoas, aes(x = PCoA1, y = PCoA2, color = leaf.type)) +
geom_point(size = 3) +
scale_color_manual(values = viridis(10, alpha = 0.6)[c(4, 8)]) +
scale_fill_manual(values = viridis(10, alpha = 0.6)[c(4, 8)]) +
theme_classic() +
labs(x = "MDS dimension 1", y = "MDS dimension 2", color = "Leaf type") +
geom_encircle(aes(fill = leaf.type), s_shape = 1, expand = 0,
alpha = 0.2, show.legend = FALSE)
# ggsave(filename = "~/Dropbox/Manuscripts/Entrada refugio Thyroptera/Figuras/multivariate_dispersion_flight.jpeg", dpi = 300, width = 6, height = 4)
Â
Â
<- readxl::read_excel("./data/raw/Leaf measures.xls")
lvs.msrs
<- cor(lvs.msrs[,sapply(lvs.msrs, is.numeric)])
cm
corrplot.mixed(cm, lower = "number", upper = "ellipse", tl.col = "black", lower.col = cols, upper.col = cols, tl.pos = "lt", tl.cex = 1.4)
<- readxl::read_excel("./data/raw/Leaf measures.xls")
lvs.msrs
<- preProcess(x = as.matrix(lvs.msrs[, -1]), method=c("center", "scale", "BoxCox", "corr"))
pp
<- data.frame(Species = lvs.msrs$Species, predict(pp, as.matrix(lvs.msrs[, - 1])), stringsAsFactors = FALSE)
trans.lvs.msrs
$Species[trans.lvs.msrs$Species == 2] <- "Calathea lutea"
trans.lvs.msrs# trans.lvs.msrs$rdn.var <- rnorm(nrow(trans.lvs.msrs))
$Species <- as.factor(trans.lvs.msrs$Species)
trans.lvs.msrs
# tune random forest
<- makeClassifTask(data = trans.lvs.msrs, target = "Species")
rf_task
# Estimate runtime
estimateTimeTuneRanger(rf_task)
# Tuning
<- tuneRanger(rf_task, measure = list(multiclass.brier), num.trees = 10000, num.threads = 10, iters = 1000, save.file.path = NULL, show.info = FALSE)
tuning.results
# Model with the new tuned hyperparameters
<- ranger(Species ~ ., data = trans.lvs.msrs, num.trees = 10000, importance = "impurity", keep.inbag = TRUE, mtry = tuning.results$recommended.pars$mtry, min.node.size = tuning.results$recommended.pars$min.node.size, sample.fraction = tuning.results$recommended.pars$sample.fraction)
lvs.rf
<- ranger(as.factor(Species) ~ ., data = trans.lvs.msrs, num.trees = 10000, importance = "permutation", keep.inbag = TRUE)
lvs.rf
<- importance_pvalues(x = lvs.rf, formula = as.factor(Species) ~ ., data = trans.lvs.msrs, method = "altman", num.permutations = 1000)
imp_pvals
<- extract_proximity_oob(fit = lvs.rf, olddata = trans.lvs.msrs)
prx.mat
<- dist(t(prx.mat)) ## Euclidean distances
diss <- mds(diss, ndim = 2) ## 2D interval MDS
fit
set.seed(123)
<- bootmds(fit, prx.mat, method.dat = "euclidean", nrep = 50)
rf.mds
pboptions(type = "timer")
<- pbsapply(1:10000, cl = 20, function(x){
rnd.error
<- trans.lvs.msrs
Y
$Species <- sample(Y$Species)
Y
<- ranger(Species ~ ., data = Y, num.trees = 10000, keep.inbag = FALSE, mtry = tuning.results$recommended.pars$mtry, min.node.size = tuning.results$recommended.pars$min.node.size, sample.fraction = tuning.results$recommended.pars$sample.fraction)
rf
<- rf$prediction.error
rf.error
return(rf.error)
}
)
<- list(rf.error = lvs.rf$prediction.error, rnd.error = rnd.error, rf.model = lvs.rf, prx.mat = prx.mat, rf.mds = rf.mds, trans.lvs.msrs = trans.lvs.msrs, imp_pvals = imp_pvals, tuning.results = tuning.results)
rf.random.res
saveRDS(rf.random.res, "./data/processed/Random forest leaf shape randomization test results.RDS")
<- readRDS("./data/processed/Random forest leaf shape randomization test results.RDS")
rf.random.res
print("Tunning parameters")
## [1] "Tunning parameters"
$tuning.results rf.random.res
## Recommended parameter settings:
## mtry min.node.size sample.fraction
## 1 2 2 0.8987571
## Results:
## multiclass.brier exec.time
## 1 0.04946783 0.2914808
print("Confusion matrix")
## [1] "Confusion matrix"
$rf.model$confusion.matrix rf.random.res
## predicted
## true Calathea lutea Heliconia imbricata
## Calathea lutea 25 0
## Heliconia imbricata 2 32
print("Random forest error")
## [1] "Random forest error"
<- rf.random.res$rf.error) (rf.error
## [1] 0.03389831
hist(rf.random.res$rnd.error, main = " Random error 10000 iterations", col = cols[5], xlab = "Classification error", xlim = c(0, 0.8))
abline(v = rf.error, col = cols[9], lty = 2, lwd = 3)
#p value
print("p-value")
## [1] "p-value"
sum(rnd.error < rf.error) / length(rnd.error)
## [1] 0
Â
Â
<- data.frame(Species = rf.random.res$trans.lvs.msrs$Species, rf.random.res$rf.mds$conf, stringsAsFactors = FALSE)
mds.shp
$Species[mds.shp$Species == 2] <- "Calathea lutea"
mds.shp
ggplot(mds.shp, aes(x = D1, y = D2, color = Species, shape = Species)) +
geom_point(size = 7)+
scale_shape_manual(values = c(15, 19)) +
scale_color_manual(values = viridis(10, alpha = 0.6)[c(4, 8)]) +
theme_classic() +
labs(x = "Dimension 1", y = "Dimension 2") +
theme(text = element_text(size=15), legend.position = c(0.4, 0.9), legend.text = element_text(face="italic"))
# ggsave(filename = "~/Dropbox/Manuscripts/Entrada refugio Thyroptera/Figuras/random_forest_space_leaf_shape.jpeg", dpi = 300, width = 6, height = 4)
<- data.frame(var = names(ranger::importance(rf.random.res$rf.model)), imp = ranger::importance(rf.random.res$rf.model), stringsAsFactors = FALSE)
imp
<- imp[order(-imp$imp), ]
imp
$var <- factor(imp$var, levels = imp$var[order(imp$imp)])
imp
<- rf.random.res$imp_pvals
rf.pval <- data.frame(rf.pval, var = rownames(rf.pval), p = ifelse(rf.random.res$imp_pvals[, "pvalue"] < 0.05, "*", ""))
rf.pval
ggplot(imp, aes(x = var, y = imp)) +
geom_bar(stat = "identity", fill = viridis(10)[6] ) +
theme_classic(base_size = 22) +
labs(y = "Importance", x = "Variable") +
scale_x_discrete(labels=c("Roost height", "Leaf length", "Tip length", "Tip length/\ncircumference", "Tip\ncircumference")) +
geom_text(col = "black", size = 10, data = rf.pval, mapping = aes(x = var, y = importance + 0.01, label = p)) +
coord_flip()
# ggsave(filename = "~/Dropbox/Manuscripts/Entrada refugio Thyroptera/Figuras/random_forest_leaf_shape_importance.jpeg", dpi = 300, width = 6, height = 4)
print("Importance p values")
## [1] "Importance p values"
$imp_pvals rf.random.res
## importance pvalue
## Leaf.length 0.013404988 0.173826174
## Tip.length 0.054664114 0.003996004
## Tip.circumference 0.239664183 0.000999001
## Ratio.of.tip.length.to.circumference 0.126814860 0.000999001
## Roost.height 0.002688351 0.409590410
<- readxl::read_excel("./data/raw/Leaf measures.xls")
lvs.msrs
$Species[lvs.msrs$Species == 2] <- "Calathea lutea"
lvs.msrs
<-gather(lvs.msrs, key = "variable", value = "value", names(lvs.msrs)[-1])
lvs.msrs.gat
$Species <- ifelse(lvs.msrs.gat$Species == "Calathea lutea", "C. lutea", "H. imbricata")
lvs.msrs.gat
<- gsub("\\.", " ", names(rf.random.res$trans.lvs.msrs))
used.vars
<- lvs.msrs.gat[lvs.msrs.gat$variable %in% used.vars,]
lvs.msrs.gat
$variable[lvs.msrs.gat$variable == "Tip circumference"] <- "Tip\ncircumference"
lvs.msrs.gat
$variable[lvs.msrs.gat$variable == "Ratio of tip length to circumference"] <- "Tip length/\ncircumference"
lvs.msrs.gat
$variable <- factor(lvs.msrs.gat$variable, levels = c("Leaf length", "Roost height", "Tip length", "Tip\ncircumference", "Tip length/\ncircumference"))
lvs.msrs.gat
<- data.frame(variable = unique(lvs.msrs.gat$variable), label = c("", "*", "*", "*", ""), x = rep(1.5, 5), y = c(0, 34.5, 42, 0.98, 200))
sig
<- aggregate(value ~ Species + variable, data = lvs.msrs.gat, FUN = mean)
agg.msrs
ggplot(lvs.msrs.gat, aes(x = Species, y = value)) +
geom_violin(fill =cols[7]) +
geom_point(size = 5, data = agg.msrs, aes(x = Species, y = value), col = "black", pch = 21, fill = "white") +
geom_text(col = "black", size = 6, data = sig, mapping = aes(x = x, y = y, label = label)) +
facet_wrap(~ variable, nrow = 1, scales = "free_y") +
theme_classic() +
labs(y = "Value") +
theme(axis.text.x = element_text(face = "italic", angle = 45, hjust = 1))
# ggsave(filename = "~/Dropbox/Manuscripts/Entrada refugio Thyroptera/Figuras/violin_plots_leaf_shape_parameters.jpeg", dpi = 300, width = 7, height = 3)
Â
Â
## Species preference
<- read.csv("leave_preference_data.csv", stringsAsFactors = FALSE)
sp.pref
table(sp.pref$Lugar)
names(sp.pref)
<- sp.pref[, c("him.usadas", "clu.usadas", "otros.usadas")]
usadas <- sp.pref[, c("him.disponibles", "clu.disponibles", "otros.disponible")]
disp
names(disp) <- names(usadas) <- c("Him", "Clu", "otros")
<- compana(usadas, disp, test = "randomisation")
pref
pref
print(pref$rm, quote = FALSE)
<- sp.pref[sp.pref$Lugar %in% c("Esquinas", "Finca Km23", "Lecheria", "Naranjal", "Urena"), ]
sp.pref
### aggregating
<- aggregate(cbind(him.usadas, clu.usadas, otros.usadas, him.disponibles, clu.disponibles, otros.disponible) ~ Lugar, data = sp.pref, FUN = sum)
sp.pref.agg
<- sp.pref.agg[, c("him.usadas", "clu.usadas", "otros.usadas")]
usadas <- sp.pref.agg[, c("him.disponibles", "clu.disponibles", "otros.disponible")]
disp
names(disp) <- names(usadas) <- c("Him", "Clu", "otros")
<- compana(usadas, disp, test = "randomisation", alpha = 0.05, nrep = 10000)
pref
print(pref$rm, quote = FALSE)
(pref)
# sin otros
<- compana(usadas[, -3], disp[, -3], test = "randomisation", alpha = 0.05, nrep = 10000)
pref
print(pref$rm, quote = FALSE)
(pref)
<- data.frame(species = rep(c("H. imbricata", "C. lutea", "Others"), each = 5), site = rep(sp.pref.agg$Lugar, 3), used = unlist(usadas), available = unlist(disp))
df.pref
<- aggregate(cbind(used, available) ~ species, data = df.pref, FUN = sum)
agg.df.pref
$site <- "All"
agg.df.pref
<- rbind(df.pref, agg.df.pref)
df.pref
<- data.frame(df.pref[, c("species", "site")], type = rep(c("used", "available"), each = nrow(df.pref)), count = c(df.pref$used, df.pref$available), stringsAsFactors = FALSE, row.names = NULL)
df.pref.gg
order(df.pref.gg$species, df.pref.gg$type, df.pref.gg$site), ]
df.pref.gg[
$species <- factor(df.pref.gg$species, levels = unique(df.pref.gg$species))
df.pref.gg
ggplot(df.pref.gg, aes(species, count, fill = type)) +
geom_bar(stat = "identity", position = "dodge") +
scale_fill_manual(values = viridis(10)[c(3, 8)]) +
facet_wrap(~site, scales = "free_y") +
theme_classic() +
labs(x = "Species", y = "Frequency") +
theme(legend.justification=c(0,0), legend.position=c(0.87, 0.8), text = element_text(size=20))
ggsave(filename = "available_and_used_leaves.jpeg", dpi = 100, width = 20 * 1.7, height = 20, units = "cm")
# pref$random.res
<- read.csv("./data/raw/leave_preference_data.csv", stringsAsFactors = FALSE)
sp.pref
$Lugar <- gsub("Urena", "Ureña", sp.pref$Lugar)
sp.pref
<- sp.pref[sp.pref$clu.disponibles != 0 & sp.pref$him.disponibles != 0, ]
sp.pref
<- sp.pref[, c("him.usadas", "clu.usadas", "otros.usadas")]
usadas <- sp.pref[, c("him.disponibles", "clu.disponibles", "otros.disponible")]
disp
<- data.frame(site = sp.pref$Lugar, disp = unlist(disp, use.names = FALSE), used = unlist(usadas, use.names = FALSE), area = sp.pref$area, densidad = sp.pref$Densidad, species = rep(c("Him", "Clu", "other"), each = nrow(sp.pref)))
use.df
# Selecting between with and without interaction
<- use.df
cc.use.df <- cc.use.df[complete.cases(cc.use.df), ]
cc.use.df <- cc.use.df[cc.use.df$species != "other", ]
cc.use.df <- droplevels(cc.use.df)
cc.use.df
levels(cc.use.df$species) <- c("C. lutea", "H. imbricata")
<- cor(cc.use.df[,sapply(cc.use.df, is.numeric)])
cm
corrplot.mixed(cm, lower = "number", upper = "ellipse", tl.col = cols, lower.col = cols, upper.col = cols)
Roost abundance and usage by site:
$site <- gsub(" Km23", "", use.df$site)
use.df
<- lapply(unique(use.df$site), function(x){
plot_df_l
<- use.df[use.df$site == x, ]
X
<- data.frame(Site = x,
df Species = c("H. imbricata", "C. lutea", "Others"),
Type = rep(c("Available", "Used"), each = 3),
Frequency = c(sapply(unique(use.df$species), function(y) sum(cc.use.df$disp[cc.use.df$specie == y])), sapply(unique(use.df$species), function(y) sum(cc.use.df$used[cc.use.df$specie == y]))))
return(df)
})
<- do.call(rbind, plot_df_l)
plot_df
<- aggregate(Frequency ~ Species + Type, plot_df, sum)
all_df $Site <- "All"
all_df
# plot_df <- rbind(plot_df[plot_df$Site != "Bolivar", ], all_df)
<- rbind(plot_df, all_df)
plot_df
$Species <- factor(plot_df$Species, levels = c("H. imbricata", "C. lutea", "Others"))
plot_df
ggplot(plot_df, aes(x = Species, y = Frequency, fill = Type, group = Type)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Species", y = "Number of individuals") +
facet_wrap( ~ Site, scales = "free_y", nrow = 2) +
scale_fill_viridis_d(begin = 0.15, end = 0.8, alpha = 0.7) +
theme_classic()+
theme(legend.position = c(0.87, 0.2), axis.text.x = element_text(face = "italic", angle = 45, hjust = 1))
# ggsave(filename = "~/Dropbox/Manuscripts/Entrada refugio Thyroptera/Figuras/roosts_per_site.jpeg", dpi = 300, width = 7, height = 4)
Density by species:
<- aggregate(densidad ~ species, data = cc.use.df, mean)
agg.dens
ggplot(cc.use.df, aes(x = species, y = densidad)) + geom_violin(fill =cols[5]) +
geom_point(size = 6, data = agg.dens, fill = "white", pch = 21, col = "black") +
theme_classic() +
labs(y = "Density") +
theme(text = element_text(size=20))
$densidad_sc <- scale(cc.use.df$densidad) cc.use.df
Â
Area and density highly correlated; area was excluded
Variation in density very similar between C. lutea and H. imbricata so no interaction between density and species was included
Â
Bayesian generalized linear mixed models were used to fit binomial models on furled leaf occupancy with site as a random effect and plant species and density as predictors. We tested 4 models:
only species as predictor: \[Occupancy \sim species + (1 | site)\]
only density as predictor: \[Occupancy \sim density + (1 | site)\]
both species and density as predictors: \[Occupancy \sim species + density + (1 | site)\]
<- 20000
iterations <- 10000
burnin
<- c(set_prior("normal(0, 1.5)", class = "Intercept"), set_prior("normal(0, 1.5)", class = "b"))
priors
<- brm(used | trials(disp) ~ species + densidad + (1 | site), data = cc.use.df, family = binomial, save_pars = save_pars(all = TRUE), prior = priors, iter = iterations, warmup = burnin, cores = 3, chains = 3, silent = 2, control = list(adapt_delta = 0.99))
mod.sp.dns
check_prior(mod.sp.dns)
<- brm(used | trials(disp) ~ species + (1 | site), data = cc.use.df, family = binomial, save_pars = save_pars(all = TRUE), prior = priors, iter = iterations, warmup = burnin, cores = 3, chains = 3, silent = 2, control = list(adapt_delta = 0.99))
mod.sp
<- brm(used | trials(disp) ~ densidad + (1 | site), data = cc.use.df, family = binomial, save_pars = save_pars(all = TRUE), prior = priors, iter = iterations, warmup = burnin, cores = 3, chains = 3, silent = 2, control = list(adapt_delta = 0.99))
mod.dns
<- list(mod.sp.dns = mod.sp.dns, mod.sp = mod.sp, mod.dns = mod.dns)
models
saveRDS(models, "./data/processed/brms_models_leave_occupancy.RDS")
attach(usage_models <- readRDS("./data/processed/brms_models_leave_occupancy.RDS"))
# calculate average posteriors and model
<- posterior_average(mod.sp.dns, mod.sp, weights = "loo")
post_avrg_sp <- posterior_average(mod.sp.dns, mod.dns, weights = "loo")
post_avrg_dns <- posterior_average(mod.sp.dns, mod.sp, mod.dns, weights = "loo")
post_avrg_intcpt <- cbind(post_avrg_intcpt[, "b_Intercept"], post_avrg_sp[, "b_speciesHim"], post_avrg_dns[, "b_densidad"])
post_avg colnames(post_avg) <- c("b_Intercept", "b_speciesHim", "b_densidad")
print("priors")
## [1] "priors"
check_prior(mod.sp.dns)
## Parameter Prior_Quality
## 1 b_Intercept informative
## 2 b_speciesHim informative
## 3 b_densidad uninformative
describe_prior(mod.sp.dns)
## Parameter Prior_Distribution Prior_Location Prior_Scale Prior_df
## 1 b_Intercept normal 0 1.5 NA
## 2 b_speciesHim normal 0 1.5 NA
## 3 b_densidad normal 0 1.5 NA
## 4 sd_site__Intercept student_t 0 2.5 3
print("model results")
## [1] "model results"
describe_posterior(as.data.frame(post_avg), ci_method = "HDI")
## Summary of Posterior Distribution
##
## Parameter | Median | 95% CI | pd | ROPE | % in ROPE
## -----------------------------------------------------------------------------
## b_Intercept | -2.64 | [-3.18, -2.04] | 100% | [-0.10, 0.10] | 0%
## b_speciesHim | 1.30 | [ 0.93, 1.66] | 100% | [-0.10, 0.10] | 0%
## b_densidad | 3.70e-04 | [-0.01, 0.01] | 53.52% | [-0.10, 0.10] | 100%
Leaf occupancy by species:
Â
Â
Â
<- brm(Land ~ Apex + (1 | Bat), data = land_data, family = "bernoulli", save_pars = save_pars(all = TRUE), iter = iterations, warmup = burnin, cores = 3, chains = 3, silent = 2)
landing_model
saveRDS(landing_model, "./data/processed/brms_model_landing.RDS")
<- readRDS("./data/processed/brms_model_landing.RDS")
landing_model
print("priors")
## [1] "priors"
check_prior(landing_model)
## Parameter Prior_Quality
## 1 b_Intercept informative
## 2 b_Apextruncate not determinable
describe_prior(landing_model)
## Parameter Prior_Distribution Prior_Location Prior_Scale Prior_df
## 1 b_Intercept student_t 0 2.5 3
## 2 b_Apextruncate uniform NA NA NA
## 3 sd_Bat__Intercept student_t 0 2.5 3
print("model results")
## [1] "model results"
describe_posterior(landing_model, ci_method = "HDI")
## Summary of Posterior Distribution
##
## Parameter | Median | 95% CI | pd | ROPE | % in ROPE | Rhat | ESS
## ---------------------------------------------------------------------------------------------
## (Intercept) | -1.59 | [-3.99, 0.16] | 97.48% | [-0.18, 0.18] | 2.75% | 1.000 | 9669.00
## Apextruncate | 3.28 | [ 1.01, 6.23] | 99.97% | [-0.18, 0.18] | 0% | 1.000 | 11590.00
Â
<- aggregate(Time ~ Land + Apex, data = land_data, FUN = mean)
agg.lat
#Plot for time to enter by apex and landing
ggplot(land_data, aes(x=Land, y=Time, fill=Land)) +
geom_violin(alpha = 0.7) +
geom_point(size = 5, data = agg.lat, aes(y = Time, x = Land), col = "black", pch = 21, fill = "white") +
facet_wrap(~Apex) +
theme(strip.text = element_text(face="bold", size=9),
strip.background = element_rect(colour="black",size=1))+
scale_fill_viridis_d(begin = 0.2, end = 0.8, alpha = 0.8) +
labs( x = "Type of landing", y = "Time from landing\nto hiding") + theme(legend.position = "none", panel.background = element_blank(), panel.border = element_rect(colour = "black", fill=NA, size=1)) + theme_classic(base_size = 22)
# ggsave(filename = "~/Dropbox/Manuscripts/Entrada refugio Thyroptera/Figuras/landing_time_by_apex_type_and_landing_type.jpeg", dpi = 300, width = 7, height = 4)
only landing type as predictor: \[Latency \sim landing.type + (1 | individual)\]
only tip type as predictor: \[Latency \sim tip.type + (1 | individual)\]
<- brm(Time ~ Land + (1 | Bat), data = land_data, save_pars = save_pars(all = TRUE), iter = iterations, warmup = burnin, cores = 3, chains = 3, silent = 2, control = list(adapt_delta = 0.99))
mod.land
<- brm(Time ~ Apex + (1 | Bat), data = land_data, save_pars = save_pars(all = TRUE), iter = iterations, warmup = burnin, cores = 3, chains = 3, silent = 2, control = list(adapt_delta = 0.99))
mod.ap
<- list(mod.land = mod.land, mod.ap = mod.ap)
latency_models
saveRDS(latency_models, "./data/processed/brms_models_leaf_entry_timing.RDS")
attach(latency_models <- readRDS("./data/processed/brms_models_leaf_entry_timing.RDS"))
print("priors")
## [1] "priors"
describe_prior(mod.land)
## Parameter Prior_Distribution Prior_Location Prior_Scale Prior_df
## 1 b_Intercept student_t 380 226.8 3
## 2 b_Landodd uniform NA NA NA
## 3 sd_Bat__Intercept student_t 0 226.8 3
## 4 sigma student_t 0 226.8 3
describe_prior(mod.ap)
## Parameter Prior_Distribution Prior_Location Prior_Scale Prior_df
## 1 b_Intercept student_t 380 226.8 3
## 2 b_Apextruncate uniform NA NA NA
## 3 sd_Bat__Intercept student_t 0 226.8 3
## 4 sigma student_t 0 226.8 3
print("model results")
## [1] "model results"
describe_posterior(mod.land, ci_method = "HDI")
## Summary of Posterior Distribution
##
## Parameter | Median | 95% CI | pd | ROPE | % in ROPE | Rhat | ESS
## -------------------------------------------------------------------------------------------------
## (Intercept) | 387.98 | [161.79, 615.13] | 99.92% | [-40.26, 40.26] | 0% | 1.000 | 21397.00
## Landodd | 205.51 | [-88.54, 495.01] | 91.82% | [-40.26, 40.26] | 8.60% | 1.000 | 21443.00
describe_posterior(mod.ap, ci_method = "HDI")
## Summary of Posterior Distribution
##
## Parameter | Median | 95% CI | pd | ROPE | % in ROPE | Rhat | ESS
## ---------------------------------------------------------------------------------------------------
## (Intercept) | 477.07 | [ 248.33, 714.35] | 99.98% | [-40.26, 40.26] | 0% | 1.000 | 19578.00
## Apextruncate | 31.52 | [-245.13, 306.49] | 59.48% | [-40.26, 40.26] | 24.15% | 1.000 | 32938.00
Â
There is a non-significant trend on the latency to hide between odd and normal landings
There is no effect of leaf shape
Â
Includes:
color_scheme_set("viridis")
for(x in 2:length(usage_models)){
print(names(usage_models)[x])
<- usage_models[[x]]
mod
print("Model Summary:")
print(summary(mod))
print("Rhats:")
print(mcmc_rhat(rhat = rhat(mod)) + yaxis_text(hjust = 0))
print("Traces:")
plot(mod)
print("Autocorrelation:")
print(mcmc_plot(mod, type = "acf"))
print("Pairs:")
if (nrow(mod$prior) > 1)
print(pairs(mod))
}
## [1] "mod.sp"
## [1] "Model Summary:"
## Family: binomial
## Links: mu = logit
## Formula: used | trials(disp) ~ species + (1 | site)
## Data: cc.use.df (Number of observations: 114)
## Samples: 3 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 30000
##
## Group-Level Effects:
## ~site (Number of levels: 6)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.45 0.25 0.14 1.08 1.00 6194 8599
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -2.63 0.28 -3.14 -2.03 1.00 9149 8894
## speciesHim 1.30 0.18 0.94 1.67 1.00 18188 17389
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## [1] "Rhats:"
## [1] "Traces:"
## [1] "Autocorrelation:"
## [1] "Pairs:"
## [1] "mod.dns"
## [1] "Model Summary:"
## Family: binomial
## Links: mu = logit
## Formula: used | trials(disp) ~ densidad + (1 | site)
## Data: cc.use.df (Number of observations: 114)
## Samples: 3 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 30000
##
## Group-Level Effects:
## ~site (Number of levels: 6)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 0.63 0.33 0.25 1.47 1.00 6694 9076
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.44 0.32 -2.09 -0.80 1.00 10030 11001
## densidad -0.00 0.00 -0.01 0.01 1.00 12827 15796
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## [1] "Rhats:"
## [1] "Traces:"
## [1] "Autocorrelation:"
## [1] "Pairs:"
<- landing_model
mod
print("Model Summary:")
## [1] "Model Summary:"
print(summary(mod))
## Family: bernoulli
## Links: mu = logit
## Formula: Land ~ Apex + (1 | Bat)
## Data: land_data (Number of observations: 30)
## Samples: 3 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 30000
##
## Group-Level Effects:
## ~Bat (Number of levels: 9)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 1.67 1.22 0.09 4.69 1.00 7508 11966
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept -1.73 1.08 -4.27 0.00 1.00 12039 7805
## Apextruncate 3.45 1.36 1.25 6.65 1.00 14582 8870
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
print("Rhats:")
## [1] "Rhats:"
print(mcmc_rhat(rhat = rhat(mod)) + yaxis_text(hjust = 0))
print("Traces:")
## [1] "Traces:"
plot(mod)
print("Autocorrelation:")
## [1] "Autocorrelation:"
print(mcmc_plot(mod, type = "acf"))
print("Pairs:")
## [1] "Pairs:"
if (nrow(mod$prior) > 1)
print(pairs(mod))
for(x in 1:length(latency_models)){
print(names(latency_models)[x])
<- latency_models[[x]]
mod
print("Model Summary:")
print(summary(mod))
print("Rhats:")
print(mcmc_rhat(rhat = rhat(mod)) + yaxis_text(hjust = 0))
print("Traces:")
plot(mod)
print("Autocorrelation:")
print(mcmc_plot(mod, type = "acf"))
print("Pairs:")
if (nrow(mod$prior) > 1)
print(pairs(mod))
}
## [1] "mod.land"
## [1] "Model Summary:"
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: Time ~ Land + (1 | Bat)
## Data: land_data (Number of observations: 30)
## Samples: 3 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 30000
##
## Group-Level Effects:
## ~Bat (Number of levels: 9)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 151.60 109.80 6.75 409.19 1.00 8190 11309
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 388.68 114.16 164.71 619.52 1.00 21850 19255
## Landodd 206.00 148.85 -85.34 499.63 1.00 21572 18507
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 373.27 55.46 279.12 495.03 1.00 17373 18218
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## [1] "Rhats:"
## [1] "Traces:"
## [1] "Autocorrelation:"
## [1] "Pairs:"
## [1] "mod.ap"
## [1] "Model Summary:"
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: Time ~ Apex + (1 | Bat)
## Data: land_data (Number of observations: 30)
## Samples: 3 chains, each with iter = 20000; warmup = 10000; thin = 1;
## total post-warmup samples = 30000
##
## Group-Level Effects:
## ~Bat (Number of levels: 9)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sd(Intercept) 188.56 122.93 9.57 459.22 1.00 7217 10435
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## Intercept 476.51 118.44 240.80 707.85 1.00 20101 18603
## Apextruncate 32.05 140.36 -243.00 310.24 1.00 33148 20854
##
## Family Specific Parameters:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
## sigma 377.78 59.83 276.62 508.22 1.00 12708 16180
##
## Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
## and Tail_ESS are effective sample size measures, and Rhat is the potential
## scale reduction factor on split chains (at convergence, Rhat = 1).
## [1] "Rhats:"
## [1] "Traces:"
## [1] "Autocorrelation:"
## [1] "Pairs:"
R session information
## R version 4.1.0 (2021-05-18)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 20.04.2 LTS
##
## Matrix products: default
## BLAS: /usr/lib/x86_64-linux-gnu/atlas/libblas.so.3.10.3
## LAPACK: /usr/lib/x86_64-linux-gnu/atlas/liblapack.so.3.10.3
##
## locale:
## [1] LC_CTYPE=pt_BR.UTF-8 LC_NUMERIC=C
## [3] LC_TIME=es_CR.UTF-8 LC_COLLATE=pt_BR.UTF-8
## [5] LC_MONETARY=es_CR.UTF-8 LC_MESSAGES=pt_BR.UTF-8
## [7] LC_PAPER=es_CR.UTF-8 LC_NAME=C
## [9] LC_ADDRESS=C LC_TELEPHONE=C
## [11] LC_MEASUREMENT=es_CR.UTF-8 LC_IDENTIFICATION=C
##
## attached base packages:
## [1] parallel stats graphics grDevices utils datasets methods
## [8] base
##
## other attached packages:
## [1] ggalt_0.4.0 bayesplot_1.8.1 loo_2.4.1.9000 bayestestR_0.11.5
## [5] brms_2.15.0 Rcpp_1.0.8 tuneRanger_0.5 lhs_1.1.3
## [9] lubridate_1.7.10 mlrMBO_1.1.5 smoof_1.6.0.2 checkmate_2.0.0
## [13] mlr_2.19.0 ParamHelpers_1.14 knitr_1.37 tidyr_1.1.3
## [17] kableExtra_1.3.1 vegan_2.5-7 permute_0.9-5 smacof_2.1-3
## [21] e1071_1.7-7 colorspace_2.0-2 plotrix_3.8-1 MASS_7.3-54
## [25] MuMIn_1.43.17 MCMCglmm_2.32 ape_5.5 coda_0.19-4
## [29] Matrix_1.3-4 viridis_0.6.2 viridisLite_0.4.0 pbapply_1.5-0
## [33] caret_6.0-88 lattice_0.20-44 ranger_0.13.1 readxl_1.3.1
## [37] ggplot2_3.3.5 corrplot_0.90 RColorBrewer_1.1-2
##
## loaded via a namespace (and not attached):
## [1] ModelMetrics_1.2.2.2 dygraphs_1.1.1.6 data.table_1.14.0
## [4] rpart_4.1-15 inline_0.3.19 doParallel_1.0.16
## [7] generics_0.1.0 callr_3.7.0 mice_3.13.0
## [10] proxy_0.4-26 webshot_0.5.2 xml2_1.3.2
## [13] httpuv_1.6.1 StanHeaders_2.21.0-7 assertthat_0.2.1
## [16] gower_0.2.2 xfun_0.29 hms_1.1.0
## [19] jquerylib_0.1.4 evaluate_0.14 promises_1.2.0.1
## [22] fansi_1.0.0 igraph_1.2.6 DBI_1.1.1
## [25] htmlwidgets_1.5.3 tensorA_0.36.2 stats4_4.1.0
## [28] purrr_0.3.4 ellipsis_0.3.2 heplots_1.3-8
## [31] crosstalk_1.1.1 dplyr_1.0.7 backports_1.3.0
## [34] V8_3.4.2 insight_0.14.5 markdown_1.1
## [37] RcppParallel_5.1.4 vctrs_0.3.8 abind_1.4-5
## [40] withr_2.4.3 xts_0.12.1 prettyunits_1.1.1
## [43] weights_1.0.4 cluster_2.1.2 lazyeval_0.2.2
## [46] crayon_1.4.2 candisc_0.8-5 ellipse_0.4.2
## [49] labeling_0.4.2 recipes_0.1.16 pkgconfig_2.0.3
## [52] nlme_3.1-152 wordcloud_2.6 nnet_7.3-16
## [55] rlang_0.4.12 RJSONIO_1.3-1.4 lifecycle_1.0.1
## [58] miniUI_0.1.1.1 colourpicker_1.1.0 extrafontdb_1.0
## [61] cellranger_1.1.0 tcltk_4.1.0 matrixStats_0.61.0
## [64] datawizard_0.2.1 carData_3.0-4 boot_1.3-28
## [67] zoo_1.8-9 base64enc_0.1-3 gamm4_0.2-6
## [70] ggridges_0.5.3 processx_3.5.2 png_0.1-7
## [73] KernSmooth_2.23-20 pROC_1.17.0.1 stringr_1.4.0
## [76] jpeg_0.1-8.1 shinystan_2.5.0 scales_1.1.1
## [79] magrittr_2.0.1 plyr_1.8.6 gdata_2.18.0
## [82] threejs_0.3.3 compiler_4.1.0 rstantools_2.1.1
## [85] ash_1.0-15 lme4_1.1-27.1 cli_3.1.0
## [88] ps_1.6.0 Brobdingnag_1.2-6 htmlTable_2.2.1
## [91] Formula_1.2-4 mgcv_1.8-36 tidyselect_1.1.1
## [94] stringi_1.7.6 forcats_0.5.1 projpred_2.0.2
## [97] highr_0.9 proj4_1.0-10.1 yaml_2.2.1
## [100] latticeExtra_0.6-29 bridgesampling_1.1-2 grid_4.1.0
## [103] sass_0.4.0 fastmatch_1.1-0 polynom_1.4-0
## [106] tools_4.1.0 rio_0.5.27 rstudioapi_0.13
## [109] foreach_1.5.1 foreign_0.8-81 gridExtra_2.3
## [112] cubature_2.0.4.2 prodlim_2019.11.13 farver_2.1.0
## [115] digest_0.6.29 shiny_1.6.0 lava_1.6.9
## [118] car_3.0-11 broom_0.7.8 later_1.2.0
## [121] mco_1.15.6 httr_1.4.2 rsconnect_0.8.18
## [124] rvest_1.0.0 splines_4.1.0 shinythemes_1.2.0
## [127] plotly_4.10.0 xtable_1.8-4 jsonlite_1.7.2
## [130] nloptr_1.2.2.2 BBmisc_1.11 corpcor_1.6.9
## [133] timeDate_3043.102 rstan_2.21.2 ipred_0.9-11
## [136] R6_2.5.1 Hmisc_4.5-0 pillar_1.6.4
## [139] htmltools_0.5.2 mime_0.11 nnls_1.4
## [142] glue_1.6.0 fastmap_1.1.0 minqa_1.2.4
## [145] DT_0.18 class_7.3-19 codetools_0.2-18
## [148] maps_3.3.0 pkgbuild_1.2.0 mvtnorm_1.1-2
## [151] utf8_1.2.2 bslib_0.2.5.1 tibble_3.1.6
## [154] DiceKriging_1.6.0 curl_4.3.2 gtools_3.9.2
## [157] misc3d_0.9-1 Rttf2pt1_1.3.8 zip_2.2.0
## [160] shinyjs_2.0.0 openxlsx_4.2.4 survival_3.2-11
## [163] parallelMap_1.5.1 rmarkdown_2.9 munsell_0.5.0
## [166] plot3D_1.4 iterators_1.0.13 haven_2.4.1
## [169] reshape2_1.4.4 gtable_0.3.0 extrafont_0.17