Load Necessary R Packages

library(vcfR)
## 
##    *****       ***   vcfR   ***       *****
##    This is vcfR 1.13.0 
##      browseVignettes('vcfR') # Documentation
##      citation('vcfR') # Citation
##    *****       *****      *****       *****
library(vegan)
## Loading required package: permute
## Loading required package: lattice
## This is vegan 2.6-4
library(ggplot2)
library(ggpubr)

Confirm Your Working Directory

Check the working directory.

getwd()
## [1] "/Users/vinishasant/Documents/Fall2022/BIOSC1540/CompBioCode/FinalProject"

Check the files found in the working directory to make sure that the vcf.gz file containing SNPs is found in the same folder.

list.files(pattern = "vcf")
## [1] "my_snps.vcf.gz"  "vcf_num_df.csv"  "vcf_num_df2.csv" "vcf_num.csv"    
## [5] "vcf_scaled.csv"

Setting Up the SNP Data

Loading Data

Load the VCF Data into R. my_vcf is an object containing the name of the VCF file.

my_vcf <- "my_snps.vcf.gz"
vcf <- vcfR::read.vcfR(my_vcf, convertNA = T)
## Scanning file to determine attributes.
## File attributes:
##   meta lines: 130
##   header_line: 131
##   variant count: 8213
##   column count: 2513
## 
Meta line 130 read in.
## All meta lines processed.
## gt matrix initialized.
## Character matrix gt created.
##   Character matrix gt rows: 8213
##   Character matrix gt cols: 2513
##   skip: 0
##   nrows: 8213
##   row_num: 0
## 
Processed variant 1000
Processed variant 2000
Processed variant 3000
Processed variant 4000
Processed variant 5000
Processed variant 6000
Processed variant 7000
Processed variant 8000
Processed variant: 8213
## All variants processed

Convert the raw VCF file to genotype scores (allele counts), stored into an object called vcf_num.

vcf_num <- vcfR::extract.gt(vcf, element = "GT", IDtoRowNames = F, as.numeric = T, convertNA = T)

Save the CSV file

write.csv(vcf_num, file = "vcf_num.csv", row.names = F)

Confirm the presence of the file in the working directory

list.files(pattern = "csv")
## [1] "1000genomes_people_info2.csv" "vcf_num_df.csv"              
## [3] "vcf_num_df2.csv"              "vcf_num.csv"                 
## [5] "vcf_scaled.csv"

Transposing the Data

Transpose the original VCF data to the R dataframe orientation using t() and convert it to a dataframe object

vcf_num_t <- t(vcf_num)
vcf_num_df <- data.frame(vcf_num_t)

Get sample names and add this information to the dataframe

sample <- row.names(vcf_num_df)
vcf_num_df <- data.frame(sample, vcf_num_df)

Save the CSV file

write.csv(vcf_num_df, file = "vcf_num_df.csv", row.names = F)

Confirm the presence of the file in the working directory

list.files(pattern = "csv")
## [1] "1000genomes_people_info2.csv" "vcf_num_df.csv"              
## [3] "vcf_num_df2.csv"              "vcf_num.csv"                 
## [5] "vcf_scaled.csv"

Cleaning the Data

Merging the Data with Population Meta Data

Load the population meta data

pop_meta <- read.csv(file = "1000genomes_people_info2.csv")

Check to make sure “sample” appears in both, the meta data and the SNP data

names(pop_meta)
## [1] "pop"       "super_pop" "sample"    "sex"       "lat"       "lng"
names(vcf_num_df[1:10])
##  [1] "sample" "X1"     "X2"     "X3"     "X4"     "X5"     "X6"     "X7"    
##  [9] "X8"     "X9"

Merge the meta data with SNP data

vcf_num_df2 <- merge(pop_meta, vcf_num_df, by = "sample")

Verify the dimensions before and after the merge

nrow(vcf_num_df) == nrow(vcf_num_df2)
## [1] TRUE

Verify the names of the new dataframe

names(vcf_num_df2[1:15])
##  [1] "sample"    "pop"       "super_pop" "sex"       "lat"       "lng"      
##  [7] "X1"        "X2"        "X3"        "X4"        "X5"        "X6"       
## [13] "X7"        "X8"        "X9"

Save the CSV file and confirm its presence in the working directory

write.csv(vcf_num_df2, file = "vcf_num_df2.csv", row.names = F)
list.files(pattern = "csv")
## [1] "1000genomes_people_info2.csv" "vcf_num_df.csv"              
## [3] "vcf_num_df2.csv"              "vcf_num.csv"                 
## [5] "vcf_scaled.csv"

Omitting Invariant Features

invar_omit() function

invar_omit <- function(x){
  cat("Dataframe of dim", dim(x), "processed...\n")
  sds <- apply(x, 2, sd, na.rm = TRUE)
  i_var0 <- which(sds == 0)
  
  cat(length(i_var0), "columns removed \n")
  
  if(length(i_var0) > 0){
    x <- x[, -i_var0]
  }
  
  return(x)
}

Check which colums have character data so we can skip them when running invar_omit()

names(vcf_num_df2[1:10])
##  [1] "sample"    "pop"       "super_pop" "sex"       "lat"       "lng"      
##  [7] "X1"        "X2"        "X3"        "X4"

Run the data through the invar_omit() function

vcf_no_invar <- vcf_num_df2
vcf_no_invar <- data.frame(vcf_no_invar[,c(1:6)], invar_omit(vcf_no_invar[,-c(1:6)]))
## Dataframe of dim 2504 8213 processed...
## 1995 columns removed
N_invar_cols <- 1995

Removing Low Quality Data

find_Nas() function

find_Nas <- function(x){
  NAs_TF <- is.na(x)
  i_NA <- which(NAs_TF == TRUE)
  N_NA <- length(i_NA)
  
  return(i_NA)
}

for() loop to search for NAs

N_rows <- nrow(vcf_no_invar) #number of rows(individuals)
N_SNPs <- ncol(vcf_no_invar) #total number of columns (SNPs)

N_NA <- rep(x = 0, times = N_rows) #vector to hold output (number of NAs)

for(i in 1:N_rows){
  #for each row, find the location of NAs
  i_NA <- find_Nas(vcf_no_invar[i,])
  
  #determine how many NAs with length()
  N_NA_i <- length(i_NA)
  
  #save the output to our storage vector
  N_NA[i] <- N_NA_i
}

Check to see if any row in the dataframe has >50% NAs

cutoff50 <- N_SNPs*0.5
percent_NA <- N_NA/N_SNPs*100
any(percent_NA > 50)
## [1] FALSE

Check the average percent of NAs per row

mean(percent_NA)
## [1] 6.416469e-06
N_meanNA_rows <- mean(percent_NA)

Mean Imputation of NAs

mean_imputation() function

mean_imputation <- function(df){
  
  cat("This may take some time :)")
  n_cols <- ncol(df)
  
  for(i in 1:n_cols){
    #get the current column
    column_i <- df[, i]
    
    #get the mean of the current column
    mean_i <- mean(column_i, na.rm = TRUE)
    
    #get the NAs in the current column
    NAs_i <- which(is.na(column_i))
    
    #report the number of NAs
    N_NAs <-  length(NAs_i)
    
    #replace the NAs in the current column
    column_i[NAs_i] <- mean_i
    
    #replace the original column with the updated columns
    df[, i] <- column_i
    
  }
  
  return(df)
}

Run the mean_imputation() function on all the numeric columns

vcf_noNA <- vcf_no_invar
vcf_noNA[, -c(1:6)] <- mean_imputation(vcf_no_invar[, -c(1:6)])
## This may take some time :)

Preparing for PCA

Scale the numeric features using scale()

vcf_scaled <- vcf_noNA
vcf_scaled[, -c(1:6)] <- scale(vcf_noNA[, -c(1:6)])

Save the CSV file and confirm its presence in the working directory

write.csv(vcf_scaled, file = "vcf_scaled.csv", row.names = F)
list.files(pattern = "csv")
## [1] "1000genomes_people_info2.csv" "vcf_num_df.csv"              
## [3] "vcf_num_df2.csv"              "vcf_num.csv"                 
## [5] "vcf_scaled.csv"

Run the PCA

Run the PCA analysis using prcomp() on only the numeric features

vcf_pca <- prcomp(vcf_scaled[, -c(1:6)])

PCA Diagnostics

Screeplot

Create a screeplot of the data using screeplot()

screeplot(vcf_pca)

The default screeplot does not provide much guidance on how many PCs we should retain in analysis

Explained Variation

PCA_variation() function

PCA_variation <- function(pca_summary, PCs = 2){
  var_explained <- pca_summary$importance[2,1:PCs]*100
  var_explained <- round(var_explained, 3)
  return(var_explained)
}

Extract PCA variation data and calculate percentage variation

vcf_pca_summary <- summary(vcf_pca)

#extracting raw variation data
var_out <- PCA_variation(vcf_pca_summary, PCs = 500)

#calculate the cut off for the rule of thumb
N_columns <- ncol(vcf_scaled)
cut_off <- 1/N_columns*100

#calculate the PCs which exceed the cutoff
i_cut_off <- which(var_out < cut_off)

i_cut_off <- min(i_cut_off) #first value below cutoff
## Warning in min(i_cut_off): no non-missing arguments to min; returning Inf
N_meanNA_rowsPCs <- i_cut_off #save this value

Extract the amount of variation explained by the first 3 PCs

var_PC123 <- var_out[c(1,2,3)]

Plot the percentage variation

barplot(var_out, main = "Percent variation (%) Screeplot",
        ylab = "Percent variation (%) explained",
        names.arg = 1:length(var_out))
abline(h = cut_off, col = 2, lwd = 2)
abline(v = i_cut_off)
legend("topright", col = c(2,1), lty = c(1,1), 
       legend = c("Vertical line: cutoff", 
                  "Horizontal line: 1st value below cutoff"))

Plot cumulative percentage variation

cumulative_variation <- cumsum(var_out)
plot(cumulative_variation, type = "l")

Plot PCA Results

Calculate Scores

Get the scores using vegan::scores()

vcf_pca_scores <- vegan::scores(vcf_pca)

Combine the scores with the species information into a dataframe, and look at the information on the variation explained by the PCs

vcf_pca_scores2 <- data.frame(super_pop = vcf_noNA$super_pop, vcf_pca_scores)

var_PC123[1]
##   PC1 
## 2.739
var_PC123[2]
##   PC2 
## 2.147

Plot PC1 versus PC2

ggpubr::ggscatter(data = vcf_pca_scores2,
                  y = "PC2", x = "PC1",
                  color = "super_pop",
                  shape = "super_pop",
                  main = "PCA Scatterplot",
                  xlab = "PC1 (2.7% of variation)",
                  ylab = "PC2 (2.1% of variation")

Plot PC2 versus PC3

ggpubr::ggscatter(data = vcf_pca_scores2,
                  y = "PC3", x = "PC2",
                  color = "super_pop",
                  shape = "super_pop",
                  main = "PCA Scatterplot",
                  xlab = "PC2 (2.1% of variation)",
                  ylab = "PC3 (1.6% of variation")

Plot PC1 versus PC2

ggpubr::ggscatter(data = vcf_pca_scores2,
                  y = "PC3", x = "PC1",
                  color = "super_pop",
                  shape = "super_pop",
                  main = "PCA Scatterplot",
                  xlab = "PC1 (2.7% of variation)",
                  ylab = "PC3 (1.6% of variation")