library(vcfR)
##
## ***** *** vcfR *** *****
## This is vcfR 1.13.0
## browseVignettes('vcfR') # Documentation
## citation('vcfR') # Citation
## ***** ***** ***** *****
library(vegan)
## Loading required package: permute
## Loading required package: lattice
## This is vegan 2.6-4
library(ggplot2)
library(ggpubr)
list.files(pattern = "vcf")
## [1] "16.26712210-26952210.ALL.chr16_GRCh38.genotypes.20170504.vcf.gz"
## [2] "7.26000-266000.ALL.chr7_GRCh38.genotypes.20170504.vcf.gz"
## [3] "vcf_num_df2.csv"
## [4] "vcf_scaled.csv"
vcf_data <- vcfR::read.vcfR("16.26712210-26952210.ALL.chr16_GRCh38.genotypes.20170504.vcf.gz", convertNA = T)
## Scanning file to determine attributes.
## File attributes:
## meta lines: 130
## header_line: 131
## variant count: 7941
## column count: 2513
##
Meta line 130 read in.
## All meta lines processed.
## gt matrix initialized.
## Character matrix gt created.
## Character matrix gt rows: 7941
## Character matrix gt cols: 2513
## skip: 0
## nrows: 7941
## row_num: 0
##
Processed variant 1000
Processed variant 2000
Processed variant 3000
Processed variant 4000
Processed variant 5000
Processed variant 6000
Processed variant 7000
Processed variant: 7941
## All variants processed
vcf_data_genotype <- vcfR::extract.gt(vcf_data,
element = "GT",
IDtoRowNames = F,
as.numeric = T,
convertNA = T,
)
vcf_data_genotype_t <- t(vcf_data_genotype)
vcf_data_genotype_df <- data.frame(vcf_data_genotype_t)
sample <- row.names(vcf_data_genotype_df)
vcf_data_genotype_df <- data.frame(sample, vcf_data_genotype_df)
Clean Data
meta_data <- read.csv(file = "1000genomes_people_info2-1.csv")
vcf_data_genotype_df_2 <- merge(meta_data, vcf_data_genotype_df, by = "sample")
invar
invar_omit <- function(x){
# cat("Dataframe of dim",dim(x), "processed...\n")
sds <- apply(x, 2, sd, na.rm = TRUE)
i_var0 <- which(sds == 0)
cat(length(i_var0),"columns removed\n")
if(length(i_var0) > 0){
x <- x[,-i_var0]
}
return(x)
}
vcf_data_no_invar <- data.frame(vcf_data_genotype_df_2[,c(1:6)],
invar_omit(vcf_data_genotype_df_2[,-c(1:6)]),
row.names = NULL)
## 1953 columns removed
find NA’s
find_NAs <- function(x){
NAs_TF <- is.na(x)
i_NA <- which(NAs_TF == TRUE)
N_NA <- length(i_NA)
# cat("Results:",N_NA, "NAs present\n.")
return(i_NA)
}
N_rows <- nrow(vcf_data_no_invar)
N_NA <- rep(x = 0, times = N_rows)
N_SNPs <- ncol(vcf_data_no_invar)
for (i in 1:N_rows) {
i_NA <- find_NAs(vcf_data_no_invar[i,])
N_NA_i <- length(i_NA)
N_NA[i] <- N_NA_i
}
cutoff50 <- N_SNPs*0.5
percent_NA <- N_NA/N_SNPs*100
any(percent_NA >50)
## [1] FALSE
mean(percent_NA)
## [1] 0
mean_imputation <- function(df) {
n_cols <- ncol(df)
for (i in 1:n_cols) {
column_i <- df[,i]
mean_i <- mean(column_i,
na.rm = T)
NAs_i <- which(is.na(column_i))
N_NAs <- length(NAs_i)
column_i[NAs_i] <- mean_i
df[,i] <- column_i
}
return(df)
}
vcf_data_no_NA <- data.frame(vcf_data_no_invar[,c(1:6)],
mean_imputation(vcf_data_no_invar[,-c(1:6)]),
row.names = NULL)
vcf_scaled <- vcf_data_no_NA
vcf_scaled[,-c(1:6)] <-scale(vcf_data_no_NA[,-c(1:6)])
dim(vcf_scaled)
## [1] 2504 5994
write.csv(vcf_scaled, file = "Kodavali_cleaned.csv",
row.names = F)