Clean session and load packages

#clean session
rm(list = ls())

# unload all non-based packages
out <- sapply(paste('package:', names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = T))

## vector with package names
x <- c( "pbapply", "parallel", "ggplot2", "ape", "seqinr", "ips","gtools", "seqmagick", "ggmsa", "cowplot"
        )

aa <- lapply(x, function(y) {
  
  # check if installed, if not then install 
  if (!y %in% installed.packages()[,"Package"]) 
    install.packages(y) 

  # load package
  try(require(y, character.only = T), silent = T)
})

Functions and parameters

#functions and parameters
knitr::opts_knit$set(root.dir = normalizePath(".."))

knitr::opts_chunk$set(dpi = 50, fig.width = 12) 

# ggplot2 theme
theme_set(theme_classic(base_size = 20))

split_seqs <- function(x) {
 
  # from start to end
  y <-  sapply(1:20, function(y) 
     if (y == 1)
    ifelse(substr(x, start = y + 1, stop = y + 1) == substr(x, start = y, stop = y), 0 , 1) else
          ifelse(substr(x, start = y - 1, stop = y - 1) == substr(x, start = y, stop = y), 0 , 1)
    )

  x2 <- strsplit(x, "")[[1]]
  
  w1 <- which(y == 1)
  
  # to add single elements at the end
    w1 <- c(w1, 21)
  if (w1[1] == 1 & w1[2] == 2) w1 <- w1[2:length(w1)]
    
  sgmts <- lapply(1:length(w1), function(w){
    if (w1[w] == w1[1]) paste(x2[1:(w1[w] - 1)], collapse = "")  else
  paste(x2[w1[w - 1]:(w1[w] - 1)], collapse = "")  
  })

  return(sgmts)
}

write_lek_fas <- function(x){
  
 lk <- unlist(strsplit(x[seq(1, length(x), by = 2)], split = "_"))[seq(1, length(x), by = 2)]
  
 lk <- gsub(">", "", paste0(lk, "_"))
 
 out <- sapply(unique(lk), USE.NAMES = FALSE, function(y){
   
   
   wch.lk <- grep(pattern = y, x = x)
   
   fl.nms <-tempfile(tmpdir = getwd(), fileext =  paste0("song.seq_",y, "SongsSeq.fa"))
   
   writeLines(x[min(wch.lk):(max(wch.lk)+ 1)], con = fl.nms)
   
   return(basename(fl.nms))
 })

 return(out)  
}

# get leks with more than 5 song types
leks <- c("BR1", "CCE", "CCL", "HC1", "HC2", "LOC", "SAT", "SJA", "SUR", "TR1", "TR2")

Create fastas for mafft

# create fastas for each lek

###----Alignment of phenotypic sequence data: CSV to fasta format----------------------

dat <- read.csv("./data/raw/segments_by_song_type.csv", stringsAsFactors = FALSE)
yrs <- read.csv("./data/raw/year_range_by_song_type.csv", stringsAsFactors = FALSE)

#Are id's unique?
length(dat$song.type) == length(unique(dat$song.type)) #OK

# seq as a single column
dat$seqs <- sapply(1:nrow(dat), function(i)  paste(dat[i,3:22], collapse = "")
)

# one for each songtype year
dat.yr <- yrs

dat.yr$song.type.year <- paste(dat.yr$song.type, dat.yr$year, sep = "-")

dat.yr <- dat.yr[dat.yr$lek %in% leks, ]


dat.yr <- merge(dat.yr, dat[, c("song.type", "seqs")], by = "song.type")


# only 1 year per song type and using before 2008
dat.all.old <- dat.yr[order(dat.yr$song.type, dat.yr$year), ]

# only 1 year per song type and using before 2008
dat.early.old <- dat.all.old[!duplicated(dat.all.old$song.type), ]

# only 1 year per song type and exlcuding before 2008
dat.all.new <- dat.all.old[dat.all.old$year > 2007, ]

# only 1 year per song type and using before 2008
dat.early.new <- dat.early.old[dat.early.old$year > 2008, ]

# rest of song type years (without earliest)
dat.rest.old <- dat.all.old[!dat.all.old$song.type.year %in% dat.early.old$song.type.year,]

dat.rest.new <- dat.all.new[!dat.all.new$song.type.year %in% dat.early.new$song.type.year,]


# by lek all song types
for(i in leks){
  
# all old
write.fasta(sequences = as.list(dat.rest.old$seqs[dat.rest.old$lek == i]), names = dat.rest.old$song.type.year[dat.rest.old$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_rest_old_", i, ".fa"), open = "w", nbchar = 60, as.string = FALSE)

# early old
write.fasta(sequences = as.list(dat.early.old$seqs[dat.early.old$lek == i]), names = dat.early.old$song.type.year[dat.early.old$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_early_old_", i, ".fa"), open = "w", nbchar = 60, as.string = FALSE)

# all new
write.fasta(sequences = as.list(dat.rest.new$seqs[dat.rest.new$lek == i]), names = dat.rest.new$song.type.year[dat.rest.new$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_rest_new_", i, ".fa"), open = "w", nbchar = 60, as.string = FALSE)

# early new
write.fasta(sequences = as.list(dat.early.new$seqs[dat.early.new$lek == i]), names = dat.early.new$song.type.year[dat.early.new$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_early_new_", i, ".fa"), open = "w", nbchar = 60, as.string = FALSE)
  }

Aligning with mafft

# alignments real songs

# add using or not song types before 2008
use.old.fossils <- c("old", "new")

# type of costs 
type <- c("optimal", "all.equal")

grd.costs <- expand.grid(use.old.fossils = use.old.fossils, type = type)

unq.sgmts <- c("d", "f", "m", "p", "s", "u")

asc.val <- c("0x73", "0x75", "0x70", "0x6D", "0x66", "0x64")


cost <- matrix(c(0.5, 1.25, 2, 0.5, 1.25, 2, # optimal
               rep(c(1, 1, 1.5), 2)), # equal costs
               nrow = 4, byrow = TRUE)

cost.mat <- data.frame(cost, stringsAsFactors = FALSE)

names(cost.mat)[1:3] <- c("different.category", "same.category", "same.segment")

cost.mat <- data.frame(cost.mat, grd.costs, stringsAsFactors = FALSE)


# grid cost values
grd.sgm <- expand.grid(sgmt1 = unq.sgmts, sgmt2 = unq.sgmts)

grd.sgm$type <- "different.category"

trlls <- c("f", "m", "s")
tones <- c("d", "p", "u")
grd.sgm$type[grd.sgm$sgmt1 %in% trlls & grd.sgm$sgmt2 %in% trlls] <-  "same.category"
grd.sgm$type[grd.sgm$sgmt1 %in% tones & grd.sgm$sgmt2 %in% tones] <- "same.category"
grd.sgm$type[grd.sgm$sgmt1 == grd.sgm$sgmt2] <- "same.segment"

# create combs
cost.grds <- pblapply(1:nrow(cost.mat), cl = 3, function(x){
  
  grd.sgm$cost <- NA
  grd.sgm$cost[grd.sgm$type == "different.category"] <- cost.mat$different.category[x]
  grd.sgm$cost[grd.sgm$type == "same.category"] <- cost.mat$same.category[x]
  grd.sgm$cost[grd.sgm$type == "same.segment"] <- cost.mat$same.segment[x]

  grd.sgm$pair <- paste(grd.sgm$sgmt1, "x", grd.sgm$sgmt2)

  grd.sgm$asc1 <- grd.sgm$sgmt1
  grd.sgm$asc2 <- grd.sgm$sgmt2
  
  for(i in 1:length(unq.sgmts)){
    grd.sgm$asc1 <- gsub(unq.sgmts[i], asc.val[i], grd.sgm$asc1)
    grd.sgm$asc2 <- gsub(unq.sgmts[i], asc.val[i], grd.sgm$asc2)
  }

  grd.sgm$cost.type <- cost.mat$type[x]
  grd.sgm$cost.vals <- cost.mat$cost.vals[x]
  
  grd.sgm$use.old.fossils <- cost.mat$use.old.fossils[x]
  
  return(grd.sgm)
})

length(cost.grds)

# get leks with more than 5 song types
# leks <- names(table(sgmnts$lek))[table(sgmnts$lek)> 5]


pboptions(type = "timer")

yrs <- read.csv("./data/raw/year_range_by_song_type.csv", stringsAsFactors = FALSE)

# exclude all  equal
# cost.grds <- cost.grds[1]

out <- pblapply(1:length(cost.grds), cl = parallel::detectCores() - 1, function(y){
  
  Y <- cost.grds[[y]]
  w <- apply(Y[, c("asc1", "asc2", "cost")], 1, paste,  collapse = " ")
  
  writeLines(w, con = paste0("./data/processed/mafft_alignments/",Y$cost.type[1],"_cost_matrix.txt"))
  
  out <- lapply(leks, function(i){
    
    # don't do those with no data before 2008
    if (min(yrs$year[yrs$lek == i]) > 2000 & Y$use.old.fossils[1] == "old")
      return(NULL) else
      {      
          cll <- paste0("mafft --localpair --maxiterate 50000 --textmatrix ./data/processed/mafft_alignments/",Y$cost.type[1] ,"_cost_matrix.txt ./data/processed/song_sequences/song_seqs_early_", Y$use.old.fossils[1], "_", i,".fa > ./data/processed/mafft_alignments/", i, "_", Y$cost.type[1], "_early_",  Y$use.old.fossils[1],  "_alignment.fa 2> ./data/processed/mafft_alignments/early_", Y$use.old.fossils[1], "_", i,"_", Y$cost.type[1], "_mafft_output.txt")
  
      system(cll)
    
    alg <- readLines(paste0("./data/processed/mafft_alignments/",i,"_", Y$cost.type[1], "_early_",  Y$use.old.fossils[1], "_alignment.fa"))[2]
    gaps <- nchar(alg) - 20
    
    convg <- ifelse(any(grepl("Converged", readLines(paste0("./data/processed/mafft_alignments/early_", Y$use.old.fossils[1], "_", i ,"_", Y$cost.type[1],"_mafft_output.txt")))), "Y", "N")
    
    df2 <- data.frame(lek = i, gaps, convg, cost.type = Y$cost.type[1], use.old.fossils = Y$use.old.fossils[1])
    
    return(df2)
    } 
  })
  
  out <- out[!sapply(out, is.null)]
  
  res <- do.call(rbind, out)
  
  return(res)
})

res <- do.call(rbind, out)

write.csv(res, "./output/aligment_gaps_and_convergence_results.csv")

Add new sequences to mafft alignments

#add new sequences only for optimal so far

alg <- list.files(path = "./data/processed/mafft_alignments", pattern = "alignment.fa$", full.names = TRUE)

# and new
rest.seqs <- list.files(path = "./data/processed/song_sequences", pattern = "rest", full.names = TRUE)


out <- lapply(leks, function(x){
  
  # get alignments for x lek
  lk.alg <- grep(x, alg, value = TRUE)

  for(w  in lk.alg){
    cost_mat <- ifelse(grepl("optimal", w), "optimal", "all.equal")
    use_old_fossils <- ifelse(grepl("old", w), "old", "new")
  
    cll <- paste0("mafft --anysymbol --keeplength --add ", grep(paste0("new_", x), rest.seqs, value = TRUE)," --reorder ", file.path("./data/processed/mafft_alignments", paste(x, cost_mat, "early", use_old_fossils ,"alignment.fa", sep = "_")), " > ",     file.path("./data/processed/mafft_alignments", paste(x, cost_mat, "all", use_old_fossils ,"alignment.fa", sep = "_")))

  
    system(cll)
    }
  })

Create fastas for prank

# create fastas for each lek

###----Alignment of phenotypic sequence data: CSV to fasta format----------------------

dat <- read.csv("./data/raw/segments_by_song_type.csv", stringsAsFactors = FALSE)
yrs <- read.csv("./data/raw/year_range_by_song_type.csv", stringsAsFactors = FALSE)

#Are id's unique?
length(dat$song.type) == length(unique(dat$song.type)) #OK

# seq as a single column
dat$seqs <- sapply(1:nrow(dat), function(i)  paste(dat[i,3:22], collapse = "")
)

# replace song segments with bases
dat$seqs <- gsub(pattern = "f", replacement = "C", x = dat$seqs) # trill to pyrimidine
  dat$seqs <- gsub(pattern = "s", replacement = "T", x = dat$seqs) # trill to pyrimidine
  dat$seqs <- gsub(pattern = "m", replacement = "Y", x = dat$seqs) # medium trill treated as ambiguous between fast and slow
  dat$seqs <- gsub(pattern = "u", replacement = "A", x = dat$seqs) # tone to purine
  dat$seqs <- gsub(pattern = "d", replacement = "G", x = dat$seqs) # tone to purine
  dat$seqs <- gsub(pattern = "p", replacement = "R", x = dat$seqs)


# one for each songtype year
dat.yr <- yrs

dat.yr$song.type.year <- paste(dat.yr$song.type, dat.yr$year, sep = "-")

dat.yr <- dat.yr[dat.yr$lek %in% leks, ]


dat.yr <- merge(dat.yr, dat[, c("song.type", "seqs")], by = "song.type")


# only 1 year per song type and using before 2008
dat.all.old <- dat.yr[order(dat.yr$song.type, dat.yr$year), ]

# only 1 year per song type and using before 2008
dat.early.old <- dat.all.old[!duplicated(dat.all.old$song.type), ]

# only 1 year per song type and exlcuding before 2008
dat.all.new <- dat.all.old[dat.all.old$year > 2000, ]

# only 1 year per song type and using before 2008
dat.early.new <- dat.early.old[dat.early.old$year > 2000, ]

# rest of song type years (without earliest)
dat.rest.old <- dat.all.old[!dat.all.old$song.type.year %in% dat.early.old$song.type.year,]

dat.rest.new <- dat.all.new[!dat.all.new$song.type.year %in% dat.early.new$song.type.year,]


# by lek all song types
for(i in leks){
  
# all old
write.fasta(sequences = as.list(dat.rest.old$seqs[dat.rest.old$lek == i]), names = dat.rest.old$song.type.year[dat.rest.old$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_rest_old_", i, "_prank.fa"), open = "w", nbchar = 60, as.string = FALSE)

# early old
write.fasta(sequences = as.list(dat.early.old$seqs[dat.early.old$lek == i]), names = dat.early.old$song.type.year[dat.early.old$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_early_old_", i, "_prank.fa"), open = "w", nbchar = 60, as.string = FALSE)

# all new
write.fasta(sequences = as.list(dat.rest.new$seqs[dat.rest.new$lek == i]), names = dat.rest.new$song.type.year[dat.rest.new$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_rest_new_", i, "_prank.fa"), open = "w", nbchar = 60, as.string = FALSE)

# early new
write.fasta(sequences = as.list(dat.early.new$seqs[dat.early.new$lek == i]), names = dat.early.new$song.type.year[dat.early.new$lek == i], file.out = paste0("./data/processed/song_sequences/song_seqs_early_new_", i, "_prank.fa"), open = "w", nbchar = 60, as.string = FALSE)
  }

Aligning with prank

prank_seqs <- list.files(path = "./data/processed/song_sequences", pattern = "prank")

# get only early ones
prank_seqs_early <- grep("early", prank_seqs, value = TRUE) 

# get rest
prank_seqs_rest <- grep("rest", prank_seqs, value = TRUE) 

out <- pblapply(prank_seqs_early, function(x) {

  lek <- sapply(strsplit(x, "_", fixed = TRUE), "[[", 5)
  all.fossils <- sapply(strsplit(x, "_", fixed = TRUE), "[[", 4)
  old.fossils <- sapply(strsplit(x, "_", fixed = TRUE), "[[", 3)
  
  cll <- paste0("prank -d=./data/processed/song_sequences/", x, " -o=./data/processed/mafft_alignments/", lek, "_prank_", old.fossils, "_", all.fossils,"_alignment.fa -iterate=100 -kappa=2")

  system(cll)
  })



# remove best.fas suffix
prank_algn <- list.files(path = "./data/processed/mafft_alignments", pattern = "best.fas", full.name = TRUE)

file.rename(from = prank_algn, to = gsub(".best.fas$", "", prank_algn))

Add new sequences to prank alignments

#add new sequences only for optimal so far

alg <- list.files(path = "./data/processed/mafft_alignments", pattern = "prank", full.names = TRUE)

# and new
rest.seqs <- list.files(path = "./data/processed/song_sequences", pattern = "rest", full.names = TRUE)

rest.seqs <- grep("prank", rest.seqs, value = TRUE)

out <- lapply(leks, function(x){
  
  # get alignments for x lek
  lk.alg <- grep(x, alg, value = TRUE)

  for(w  in lk.alg){
    cost_mat <- "prank"
    use_old_fossils <- ifelse(grepl("old", w), "old", "new")
  
    cll <- paste0("mafft --anysymbol --keeplength --add ", grep(paste0("new_", x), rest.seqs, value = TRUE)," --reorder ", file.path("./data/processed/mafft_alignments", paste(x, cost_mat, "early", use_old_fossils ,"alignment.fa", sep = "_")), " > ",     file.path("./data/processed/mafft_alignments", paste(x, cost_mat, "all", use_old_fossils ,"alignment.fa", sep = "_")))

    system(cll)
    }
  })

Plot alignments

Mafft optimal costs:

  • diff type = 0.5 (i.e. trill vs pure tone)
  • same category = 1.25 (i.e. slow trill vs fast trill)
  • same segment = 2 (i.e. slow trill vs slow trill)

Mafft agonostics (all equal) costs:

  • diff type = 1.1 (i.e. trill vs pure tone)
  • same category = 1.1 (i.e. slow trill vs fast trill)
  • same segment = 1 (i.e. slow trill vs slow trill)

Prank: probabilistic multiple alignment program based on a novel algorithm that treats insertions correctly and avoids over-estimation of the number of deletion events

Make plots

#plot alignments
seqs <- list.files(path = "./data/processed/mafft_alignments", pattern = "\\.fa$", full.names = TRUE)

# remove those wtih repeated species
seqs <- grep("early", seqs, value = TRUE)

algn_plots <- lapply(seqs, function(i){
  ggmsa(i, color = "Chemistry_AA") + ggtitle(gsub("_|\\.fa", " ", basename(i)))
  })

names(algn_plots) <- basename(seqs)

algn_plots
## $BR1_all.equal_early_new_alignment.fa

## 
## $BR1_optimal_early_new_alignment.fa

## 
## $BR1_prank_early_new_alignment.fa

## 
## $BR1_prank_early_old_alignment.fa

## 
## $CCE_all.equal_early_new_alignment.fa

## 
## $CCE_all.equal_early_old_alignment.fa

## 
## $CCE_optimal_early_new_alignment.fa

## 
## $CCE_optimal_early_old_alignment.fa

## 
## $CCE_prank_early_new_alignment.fa

## 
## $CCE_prank_early_old_alignment.fa

## 
## $CCL_all.equal_early_new_alignment.fa

## 
## $CCL_all.equal_early_old_alignment.fa

## 
## $CCL_optimal_early_new_alignment.fa

## 
## $CCL_optimal_early_old_alignment.fa

## 
## $CCL_prank_early_new_alignment.fa

## 
## $CCL_prank_early_old_alignment.fa

## 
## $HC1_all.equal_early_new_alignment.fa

## 
## $HC1_all.equal_early_old_alignment.fa

## 
## $HC1_optimal_early_new_alignment.fa

## 
## $HC1_optimal_early_old_alignment.fa

## 
## $HC1_prank_early_new_alignment.fa

## 
## $HC1_prank_early_old_alignment.fa

## 
## $HC2_all.equal_early_new_alignment.fa

## 
## $HC2_all.equal_early_old_alignment.fa

## 
## $HC2_optimal_early_new_alignment.fa

## 
## $HC2_optimal_early_old_alignment.fa

## 
## $HC2_prank_early_new_alignment.fa

## 
## $HC2_prank_early_old_alignment.fa

## 
## $LOC_all.equal_early_new_alignment.fa

## 
## $LOC_optimal_early_new_alignment.fa

## 
## $LOC_prank_early_new_alignment.fa

## 
## $LOC_prank_early_old_alignment.fa

## 
## $SAT_all.equal_early_new_alignment.fa

## 
## $SAT_all.equal_early_old_alignment.fa

## 
## $SAT_optimal_early_new_alignment.fa

## 
## $SAT_optimal_early_old_alignment.fa

## 
## $SAT_prank_early_new_alignment.fa

## 
## $SAT_prank_early_old_alignment.fa

## 
## $SJA_all.equal_early_new_alignment.fa

## 
## $SJA_all.equal_early_old_alignment.fa

## 
## $SJA_optimal_early_new_alignment.fa

## 
## $SJA_optimal_early_old_alignment.fa

## 
## $SJA_prank_early_new_alignment.fa

## 
## $SJA_prank_early_old_alignment.fa

## 
## $SUR_all.equal_early_new_alignment.fa

## 
## $SUR_all.equal_early_old_alignment.fa

## 
## $SUR_optimal_early_new_alignment.fa

## 
## $SUR_optimal_early_old_alignment.fa

## 
## $SUR_prank_early_new_alignment.fa

## 
## $SUR_prank_early_old_alignment.fa

## 
## $TR1_all.equal_early_new_alignment.fa

## 
## $TR1_optimal_early_new_alignment.fa

## 
## $TR1_prank_early_new_alignment.fa

## 
## $TR1_prank_early_old_alignment.fa

## 
## $TR2_all.equal_early_new_alignment.fa

## 
## $TR2_optimal_early_new_alignment.fa

## 
## $TR2_prank_early_new_alignment.fa

## 
## $TR2_prank_early_old_alignment.fa

Save nexus files

### Convert fasta alignment to nexus
# read all alignments
all.algs <- list.files(pattern = ".fa$", path = "./data/processed/mafft_alignments/")

# loop over alignments
for (j in all.algs){

  lek <- substring(j, 0, 3) 
  cost_mat <- if(grepl("optimal", j)) "optimal" 
  cost_mat <- if(grepl("all.equal", j)) "all.equal" 
  cost_mat <- if(grepl("prank", j)) "prank" 
  
  use_old_fossils <- ifelse(grepl("old", j), "old", "new")
  
  
  # paste together call
  cll <- paste0(" perl ./source/convertfasta2nex.pl ./data/processed/mafft_alignments/", j, " > ",  "./data/processed/nexus/", gsub("\\.fa$", ".nex", j))

 # call perl
 system(cll)  
}

# fix nexus header 
Nex.file <-list.files(pattern = ".nex$", path = "./data/processed/nexus/", full.names = TRUE)

 
for(i in Nex.file){  
   align <- readLines(i)
    align[4] <- 'format datatype=standard gap=- missing=? symbols="dfmpsu";'
    
    # change seqs back to sound segments
    if (grepl("prank", i)){

      seqs <- align[7:(length(align)-2)]   
      sep <- separate(data.frame(seqs = seqs),col = "seqs",sep =   "\t", into = c("song", "seqs")) 
      
      
      # replace song segments with bases
        sep$seqs <- gsub(pattern = "C", replacement = "f", x = sep$seqs) # trill to pyrimidine
        sep$seqs <- gsub(pattern = "T", replacement = "s", x = sep$seqs) # trill to pyrimidine
      sep$seqs <- gsub(pattern = "Y", replacement = "m", x = sep$seqs) # medium trill treated as ambiguous between fast and slow
      sep$seqs <- gsub(pattern = "A", replacement = "u", x = sep$seqs) # tone to purine
      sep$seqs <- gsub(pattern = "G", replacement = "d", x = sep$seqs) # tone to purine
      sep$seqs <- gsub(pattern = "R", replacement = "p", x = sep$seqs)
    
         align[7:(length(align)-2)] <- paste0(sep$song, "\t", sep$seqs)       
                   }
    write(align, i)
}

R session information

## R version 3.6.1 (2019-07-05)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 18.04.4 LTS
## 
## Matrix products: default
## BLAS:   /usr/lib/x86_64-linux-gnu/atlas/libblas.so.3.10.3
## LAPACK: /usr/lib/x86_64-linux-gnu/atlas/liblapack.so.3.10.3
## 
## locale:
##  [1] LC_CTYPE=es_ES.UTF-8       LC_NUMERIC=C              
##  [3] LC_TIME=es_CR.UTF-8        LC_COLLATE=es_ES.UTF-8    
##  [5] LC_MONETARY=es_CR.UTF-8    LC_MESSAGES=es_ES.UTF-8   
##  [7] LC_PAPER=es_CR.UTF-8       LC_NAME=C                 
##  [9] LC_ADDRESS=C               LC_TELEPHONE=C            
## [11] LC_MEASUREMENT=es_CR.UTF-8 LC_IDENTIFICATION=C       
## 
## attached base packages:
## [1] parallel  stats     graphics  grDevices utils     datasets  methods  
## [8] base     
## 
## other attached packages:
## [1] cowplot_1.0.0   ggmsa_0.0.2     seqmagick_0.1.3 gtools_3.8.1   
## [5] ips_0.0.11      seqinr_3.6-1    ape_5.3         ggplot2_3.3.0  
## [9] pbapply_1.4-2  
## 
## loaded via a namespace (and not attached):
##  [1] Rcpp_1.0.3          lattice_0.20-41     tidyr_1.0.0        
##  [4] Biostrings_2.54.0   ggseqlogo_0.1       assertthat_0.2.1   
##  [7] zeallot_0.1.0       digest_0.6.22       R6_2.4.1           
## [10] plyr_1.8.4          backports_1.1.5     stats4_3.6.1       
## [13] evaluate_0.14       pillar_1.4.2        zlibbioc_1.32.0    
## [16] rlang_0.4.1         lazyeval_0.2.2      phangorn_2.5.5     
## [19] S4Vectors_0.24.3    Matrix_1.2-18       rmarkdown_1.17     
## [22] labeling_0.3        stringr_1.4.0       igraph_1.2.4.1     
## [25] munsell_0.5.0       compiler_3.6.1      xfun_0.12          
## [28] pkgconfig_2.0.3     BiocGenerics_0.32.0 htmltools_0.4.0    
## [31] tidyselect_0.2.5    tibble_2.1.3        quadprog_1.5-8     
## [34] IRanges_2.20.2      XML_3.98-1.20       crayon_1.3.4       
## [37] dplyr_0.8.3         withr_2.1.2         MASS_7.3-51.4      
## [40] grid_3.6.1          nlme_3.1-142        jsonlite_1.6       
## [43] gtable_0.3.0        lifecycle_0.1.0     magrittr_1.5       
## [46] scales_1.0.0        tidytree_0.3.2      stringi_1.4.6      
## [49] XVector_0.26.0      ellipsis_0.3.0      vctrs_0.2.0        
## [52] fastmatch_1.1-0     tools_3.6.1         treeio_1.10.0      
## [55] ade4_1.7-13         glue_1.3.1          purrr_0.3.3        
## [58] yaml_2.2.1          colorspace_1.4-1    knitr_1.28