Load necessary R packages

library(vcfR)    
## Warning: package 'vcfR' was built under R version 4.2.2
## 
##    *****       ***   vcfR   ***       *****
##    This is vcfR 1.13.0 
##      browseVignettes('vcfR') # Documentation
##      citation('vcfR') # Citation
##    *****       *****      *****       *****
library(vegan)
## Loading required package: permute
## Loading required package: lattice
## This is vegan 2.6-2
library(ggplot2)
library(ggpubr)
library(scatterplot3d)

Confirm working directory and location of vcf file

getwd()
## [1] "C:/Users/saman/Desktop/Comp Bio/Final Project"
list.files()
##  [1] "1000genomes_people_info2-1.csv"                               
##  [2] "5.17947992-18187992.ALL.chr5_GRCh38.genotypes.20170504.vcf.gz"
##  [3] "Final Project.Rmd"                                            
##  [4] "Final Project.Rproj"                                          
##  [5] "Final_Project_Final_Report.Rmd"                               
##  [6] "Final_Project_Workflow.Rmd"                                   
##  [7] "final_report_Epstein.Rmd"                                     
##  [8] "SNPs_cleaned.csv"                                             
##  [9] "vcf_num.csv"                                                  
## [10] "vcf_num_df.csv"                                               
## [11] "vcf_num_df2.csv"                                              
## [12] "vcf_scaled.csv"
list.files(pattern = "vcf")
## [1] "5.17947992-18187992.ALL.chr5_GRCh38.genotypes.20170504.vcf.gz"
## [2] "vcf_num.csv"                                                  
## [3] "vcf_num_df.csv"                                               
## [4] "vcf_num_df2.csv"                                              
## [5] "vcf_scaled.csv"

Load vcf file

vcf <- vcfR::read.vcfR("5.17947992-18187992.ALL.chr5_GRCh38.genotypes.20170504.vcf.gz", convertNA  = TRUE)
## Scanning file to determine attributes.
## File attributes:
##   meta lines: 130
##   header_line: 131
##   variant count: 7085
##   column count: 2513
## 
Meta line 130 read in.
## All meta lines processed.
## gt matrix initialized.
## Character matrix gt created.
##   Character matrix gt rows: 7085
##   Character matrix gt cols: 2513
##   skip: 0
##   nrows: 7085
##   row_num: 0
## 
Processed variant 1000
Processed variant 2000
Processed variant 3000
Processed variant 4000
Processed variant 5000
Processed variant 6000
Processed variant 7000
Processed variant: 7085
## All variants processed

Convert vcf data to genotype scores

vcf_num <- vcfR::extract.gt(vcf, 
           element = "GT",
           IDtoRowNames  = F,
           as.numeric = T,
           convertNA = T,
           return.alleles = F)

Save this information into a csv file

write.csv(vcf_num, file = "vcf_num.csv", row.names = F)
list.files
## function (path = ".", pattern = NULL, all.files = FALSE, full.names = FALSE, 
##     recursive = FALSE, ignore.case = FALSE, include.dirs = FALSE, 
##     no.. = FALSE) 
## .Internal(list.files(path, pattern, all.files, full.names, recursive, 
##     ignore.case, include.dirs, no..))
## <bytecode: 0x000001d46c41e9a8>
## <environment: namespace:base>

Transpose data

vcf_num_t <- t(vcf_num)

Convert into a dataframe

vcf_num_df <- data.frame(vcf_num_t)

Get sample names

sample <- row.names(vcf_num_df)

Add sample names to data frame

vcf_num_df <- data.frame(sample, vcf_num_df)

Save to a csv file

getwd()
## [1] "C:/Users/saman/Desktop/Comp Bio/Final Project"
write.csv(vcf_num_df, file = "vcf_num_df.csv", row.names = F)
list.files()
##  [1] "1000genomes_people_info2-1.csv"                               
##  [2] "5.17947992-18187992.ALL.chr5_GRCh38.genotypes.20170504.vcf.gz"
##  [3] "Final Project.Rmd"                                            
##  [4] "Final Project.Rproj"                                          
##  [5] "Final_Project_Final_Report.Rmd"                               
##  [6] "Final_Project_Workflow.Rmd"                                   
##  [7] "final_report_Epstein.Rmd"                                     
##  [8] "SNPs_cleaned.csv"                                             
##  [9] "vcf_num.csv"                                                  
## [10] "vcf_num_df.csv"                                               
## [11] "vcf_num_df2.csv"                                              
## [12] "vcf_scaled.csv"

Clean data

Merge data with population data

pop_meta <- read.csv( file = "1000genomes_people_info2-1.csv")
names(pop_meta)
## [1] "pop"       "super_pop" "sample"    "sex"       "lat"       "lng"
names(vcf_num_df)[1:10]
##  [1] "sample" "X1"     "X2"     "X3"     "X4"     "X5"     "X6"     "X7"    
##  [9] "X8"     "X9"
vcf_num_df2 <- merge(pop_meta, vcf_num_df, by = "sample")
nrow(vcf_num_df) == nrow(vcf_num_df2)
## [1] TRUE
names(vcf_num_df2)[1:15]
##  [1] "sample"    "pop"       "super_pop" "sex"       "lat"       "lng"      
##  [7] "X1"        "X2"        "X3"        "X4"        "X5"        "X6"       
## [13] "X7"        "X8"        "X9"

Save to a csv fle

getwd()
## [1] "C:/Users/saman/Desktop/Comp Bio/Final Project"
write.csv(vcf_num_df2, file = "vcf_num_df2.csv", row.names = F)

Remove invariant features

invar_omit <- function(x){
  cat("Dataframe of dim",dim(x), "processed...\n")
  sds <- apply(x, 2, sd, na.rm = TRUE)
  i_var0 <- which(sds == 0)
 
  
  cat(length(i_var0),"columns removed\n")
  
  if(length(i_var0) > 0){
     x <- x[, -i_var0]
  }
  
  ## add return()  with x in it
  return(x)                      
}
names(vcf_num_df2)[1:10]
##  [1] "sample"    "pop"       "super_pop" "sex"       "lat"       "lng"      
##  [7] "X1"        "X2"        "X3"        "X4"
vcf_noinvar <- vcf_num_df2[, -c(1:6)]

vcf_noinvar <- invar_omit(vcf_noinvar)
## Dataframe of dim 2504 7085 processed...
## 1745 columns removed
vcf_noinvar <- data.frame(vcf_num_df2[, c(1:6)], 
                          vcf_noinvar)
dim(vcf_noinvar)
## [1] 2504 5346
my_meta_N_invar_cols <- 1745

Remove NAs

find_NAs <- function(x){
  NAs_TF <- is.na(x)
  i_NA <- which(NAs_TF == TRUE)
  N_NA <- length(i_NA)
  return(i_NA)
}
N_rows <- nrow(vcf_noinvar)
N_NA <- rep(x = 0, times = N_rows)
N_SNPs <- ncol(vcf_noinvar)
for(i in 1:N_rows){
  i_NA <- find_NAs(vcf_noinvar[i,])
  N_NA_i <- length(i_NA)
  N_NA[i] <- N_NA_i
}
cutoff50 <- N_SNPs*0.5
percent_NA <- N_NA/N_SNPs*100
any(percent_NA > 50)
## [1] FALSE
mean(percent_NA)
## [1] 0
my_meta_N_meanNA_rows <- mean(percent_NA)

Mean imputation of NAs

mean_imputation <- function(df){
  n_cols <- ncol(df)
  for(i in 1:n_cols){
    column_i <- df[, i]
    mean_i <- mean(column_i, na.rm = TRUE)
    NAs_i <- which(is.na(column_i))
    N_NAs <- length(NAs_i)
    column_i[NAs_i] <- mean_i
    df[, i] <- column_i
  }
  return(df)
}
names(vcf_noinvar)[1:10]
##  [1] "sample"    "pop"       "super_pop" "sex"       "lat"       "lng"      
##  [7] "X1"        "X3"        "X4"        "X5"
vcf_noNA <- vcf_noinvar[, -c(1:6)]

vcf_noNA <- mean_imputation(vcf_noNA)

vcf_noNA <- data.frame(vcf_noinvar[, c(1:6)], 
                          vcf_noNA)
dim(vcf_noNA)
## [1] 2504 5346

Scale data

vcf_scaled <- vcf_noNA
vcf_scaled[ ,-c(1:6)] <- scale(vcf_scaled[, -c(1:6)])

Save to a csv file

write.csv(vcf_scaled, file = "vcf_scaled.csv", row.names = F)

Run PCA

vcf_pca <- prcomp(vcf_scaled[, -c(1:6)])

View data with a scree plot

screeplot(vcf_pca)

Calculate explained variation

PCA_variation <- function(pca_summary, PCs = 2){
  var_explained <- pca_summary$importance[2, 1:PCs]*100
  var_explained <- round(var_explained, 3)
  return(var_explained)
}
vcf_pca_summary <- summary(vcf_pca)
var_out <- PCA_variation(vcf_pca_summary, PCs = 500)

Find PCs that exceed the cut off

N_columns <- ncol(vcf_scaled)
cut_off <- 1/N_columns*100
i_cut_off <- which(var_out < cut_off)
i_cut_off <- min(i_cut_off)
## Warning in min(i_cut_off): no non-missing arguments to min; returning Inf
my_meta_N_meanNA_rowPCs <- i_cut_off
my_meta_var_PC123 <- var_out[c(1,2,3)]

Plot percentages with a bar plot

barplot(var_out, 
        main = "Percent variation (%) Scree plot",
        ylab = "Percent variation (%) explained",
        names.arg = 1:length(var_out)
        )
abline(h = cut_off, col = 2, lwd = 2)
abline(v = i_cut_off)
legend("topright",
       col = c(2,1),
       lty = c(1,1),
       legend = c("Vertical line: cutoff",
                  "Horizontal line: 1st value below cut off")
       )

Plot cumulative percentages

cumulative_variation <- cumsum(var_out)
plot(cumulative_variation, type = "l")

Calculate PCA scores

vcf_pca_scores <- vegan::scores(vcf_pca)
vcf_pca_scores2 <- data.frame(super_pop = vcf_noNA$super_pop, vcf_pca_scores)
my_meta_var_PC123[1]
##   PC1 
## 3.105
my_meta_var_PC123[2]
##  PC2 
## 1.67
my_meta_var_PC123[3]
##   PC3 
## 1.594

Create scatter plots of scores

ggpubr::ggscatter(data = vcf_pca_scores2,
                  y = "PC2",
                  x = "PC1",
                  color = "super_pop",
                  shape = "super_pop",
                  main = "PCA Scatterplot",
                  xlab = "PC1 (3.105% of variation)",
                  ylab = "PC2 (1.67% of variation)"
                  )

ggpubr:: ggscatter(data = vcf_pca_scores2,
                   y = "PC3",
                   x = "PC2",
                   color = "super_pop",
                   shape = "super_pop",
                   main = "PCA Scatterplot",
                   xlab = "PC2 (1.67% of variation)",
                   ylab = "PC3 (1.594% of variation)"
                   )

ggpubr:: ggscatter(data = vcf_pca_scores2,
                   y = "PC3",
                   x = "PC1",
                   color = "super_pop",
                   shape = "super_pop",
                   main = "PCA Scatterplot",
                   xlab = "PC1 (3.105% of variation)",
                   ylab = "PC3 (1.594% of variation)"
                   )