## Data consist of 150 samples of organic molecular mixtures analyzed by
## pyrolysis gas chromatography-mass spectrometry (py-GC-MS).
## Each data set has the scan numbers in the rows and the mass to charge ratio (m/z)
## values in the columns.
## The data values are the raw intensities as measured by py-GC-MS.
## Each sample has either an abiotic (A) or biotic (B) origin.
## We use the Benjamini-Hochberg (BH) procedure to control the false discovery rate.
## In this file we create different graphs regarding the significant features
## from the BH method.
## The data are preprocessed before we apply the BH procedure.
library(dplyr) # for dataframe computation
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(MALDIquant) # for chemometrics processing
##
## This is MALDIquant version 1.22.2
## Quantitative Analysis of Mass Spectrometry Data
## See '?MALDIquant' for more information about this package.
library(caret) # for machine learning
## Loading required package: ggplot2
## Loading required package: lattice
library("data.table") # for rbindlist
##
## Attaching package: 'data.table'
## The following objects are masked from 'package:dplyr':
##
## between, first, last
library("ggplot2") # for plots
library(rgl) # for 3D graphs
library(plotly) # for interactive graphs
##
## Attaching package: 'plotly'
## The following object is masked from 'package:ggplot2':
##
## last_plot
## The following object is masked from 'package:stats':
##
## filter
## The following object is masked from 'package:graphics':
##
## layout
#Data preparation
setwd("C:/Desktop/Templeton_grant_research/Second_paper_material/Data2022")
species=c(rep("A",5),"B", "A","A","B","B","A","C", rep("B",5),rep("C",4),rep("B",4),"A",
rep("B",11),"A","A","B","A","A","B","B","A",rep("B",3),"A","B",rep("A",6),
rep("B",3),"A","A","B","B",rep("A",4),"B",rep("A",5),"B",rep("A",10),
rep("B",4),rep("A",5),rep("B",3),rep("A",3),rep("B",4),rep("A",8),"B",
"A","A","B","C","C","A",rep("B",3),"A","B", "B","A","B",rep("A",3),"B","A",
rep("A",7),"B","A","B")
species2=as.numeric(as.factor(species))
ind=which(species2=="3")
species_n=species2[-ind]
#Reading in the data is based on the code found in:
## "How to import multiple .csv files simultaneously in R and create a data frame"
## by Dahna, A., datascience+, March 09, 2019
#https://datascienceplus.com/how-to-import-multiple-csv-files-simultaneously-in-r-and-create-a-data-frame/
species_names <- list.files()
species_names=species_names[-ind]
z=lapply(species_names, read.delim) #read in all of 134 datasets
setwd("C:/Desktop/Templeton_grant_research/Second_paper_material/Data2023")
species_names2 <- list.files()
z2=lapply(species_names2, read.delim) #read in all of 16 datasets
NN=700 #number of m/z values
mass=seq(50,NN,1) #m/z
MM=3240 #number of scans
ndim=length(species_names)
ndim2=length(species_names2)
#Scan number:m/z:intensity values for 134 samples
M=list()
for(i in 1:ndim){
colnames(z[[i]])="mass"
#remove commas
z[[i]]=data.frame(do.call("rbind", strsplit(as.character(z[[i]]$mass), ",",
fixed = TRUE)))
z[[i]]=data.frame(lapply(z[[i]],as.numeric))
colnames(z[[i]])=c("scan",as.character(seq(50,NN,1)))
z[[i]]=z[[i]] %>% slice(1:MM) #selects the first MM rows
M[[i]]=z[[i]]
}
#Scan number:m/z:intensity values for 16 samples
M2=list()
for(i in 1:ndim2){
colnames(z2[[i]])="mass"
#remove commas
z2[[i]]=data.frame(do.call("rbind", strsplit(as.character(z2[[i]]$mass), ",",
fixed = TRUE)))
z2[[i]]=data.frame(lapply(z2[[i]],as.numeric))
colnames(z2[[i]])=c("scan",as.character(seq(50,NN,1)))
z2[[i]]=z2[[i]] %>% slice(1:MM) #selects the first MM rows
M2[[i]]=z2[[i]]
}
M_new=c(M,M2)
N=length(M_new)
species_n2=rep(2,16)
species_names_new=c(species_names,species_names2)
y=c(species_n,species_n2)
y=factor(y,labels=c("A","B"))
#####################################################################################
#Preprocessing
#Detect the significant peaks as local max above four times the signal to noise ratio
MZ=151 #number of m/z values to use
#Create Chromatograms for each sample and each m/z value inside each sample
sample_list=list()
sample_location_list=list()
suppressWarnings({
for (i in 1:N){
S=list()
for(j in 1:MZ){
log_int=ifelse(M_new[[i]][,j+1]>0, log10(M_new[[i]][,j+1]),0)
S[[j]] = createMassSpectrum(mass=seq(1,MM,1), log_int,
metaData=list(name="Chrom"))
}
chrom = smoothIntensity(S, method="MovingAverage",halfWindowSize=5)#smooth the data
chrom = removeBaseline(chrom, method="SNIP") #remove the baseline
# Put the processed chromatograms back into a dataframe
processed_chrom_list=list()
for (k in 1:MZ){
processed_chrom_list[[k]] = as.numeric(intensity(chrom[[k]]))
}
processed_mass_dataframe = as.data.frame(do.call(rbind, processed_chrom_list))
Ma=max(processed_mass_dataframe)
Mi=min(processed_mass_dataframe)
#Normalize across sample
processed_mass_dataframe = t((processed_mass_dataframe - Mi)/(Ma - Mi))
processed_mass_dataframe = as.data.frame(processed_mass_dataframe)
S2=list()
for(t in 1:MZ){
S2[[t]] = createMassSpectrum(mass=seq(1,MM,1),
intensity=processed_mass_dataframe[, t],
metaData=list(name="Chrom_normalized"))
}
peaks = detectPeaks(S2, method="MAD", halfWindowSize=20, SNR=4)
peak_list=list()
for (tt in 1:MZ){
v=numeric(MM)
scan_number=mass(peaks[[tt]])
v[scan_number] = intensity(peaks[[tt]])
peak_list[[tt]] = v
}
processed_peaks = t(as.data.frame(do.call(rbind, peak_list)))
row.names(processed_peaks)=c(paste0("R", 1:MM))
colnames(processed_peaks)=c(paste0("M", 50:(MZ+50-1)))
processed_peaks2=as.data.frame(processed_peaks) %>%
mutate(bin = cut(seq(1,MM,1),breaks=180,dig.lab = 6)) %>% # bin the peaks by scan #
group_by(bin) %>%
summarise_all(max)
sample_list[[i]] = processed_peaks2
sample_location_list[[i]] = processed_peaks
}
})
#Scan # and mass spectrum for each sample
mass_scan_list_loc=list()
for(i in 1:N){
sm_loc=as.numeric(unlist(sample_location_list[[i]]))
mass_scan_list_loc[[i]]=sm_loc
}
#Sample vs mass/scan numbers
#Put the samples into a dataframe
data_mass_scan_loc = do.call(rbind, mass_scan_list_loc)
bin2=as.character(seq(1,3240,1))
MS=as.character(seq(50,(MZ+50-1),1))
colnames(data_mass_scan_loc) = paste(outer(bin2, MS, paste, sep = ';'))
data_mass_scan_new=as.data.frame(ifelse(data_mass_scan_loc > 0,1,0))
MZ_name=c(paste0(";",50:(MZ+50-1)))
MZ_name2=c(paste0(50:(MZ+50-1)))
MZ_name3=c(paste0(".",50:(MZ+50-1)))
bin3 = unique(cut(seq(1,MM,1),breaks=180,dig.lab = 6))
#Finding the endpoints in bin4 is based on code found in
#"Obtain endpoints from interval that is factor variable"
#https://stackoverflow.com/questions/40665240/obtain-endpoints-from-interval-that-is-factor-variable
#Stack Overflow:
bin4=unique(as.numeric(unlist( strsplit( gsub( "[][(]" , "", levels(bin3)) , ","))))
#Determine the mean scan number for each bin and each
#m/z value across the training samples
mean_scan_name = list()
for (i in 1:MZ){
data_mass_scan_new2 = data_mass_scan_new %>% select(ends_with(MZ_name[i]))
scan_name=sub(basename(colnames(data_mass_scan_new2)),pattern = MZ_name[i],
replacement = "" , fixed = TRUE)
colnames(data_mass_scan_new2) = scan_name #name the columns with the scan numbers
count_elements=function(x){
y=sum(x)
}
#Count the number of elements for each scan number across the training samples
n_elements=as.numeric(apply(as.matrix(data_mass_scan_new2),2,count_elements))
#Repeat the nonzero scan numbers with its frequency
vec=as.numeric(rep(colnames(data_mass_scan_new2), times=n_elements))
scan_dataframe=list() #scan numbers for each bin for the ith m/z value selected
indd1=1:floor(bin4[2])
ind_L1=which(vec %in% indd1)
Lt1=vec[ind_L1]
#Mean number of scan number in the first bin
scan_mean1=if(length(Lt1)>0){round(mean(Lt1))
}else {-1}
DD1=data.frame(matrix(ncol = 1, nrow = 1))
colnames(DD1)= paste(outer(as.character(scan_mean1),MZ_name2[i], paste, sep = ';'))
scan_dataframe[[1]]=DD1
for (j in 2:(length(bin4)-1)){
indd=(floor(bin4[j])+1):floor(bin4[j+1])
ind_L=which(vec %in% indd)
Lt=vec[ind_L]
#Mean number of scan number in each bin
scan_mean=if(length(Lt)>0){round(mean(Lt))
}else {-j} #to give empty columns a unique name
DD=data.frame(matrix(ncol = 1, nrow = 1))
colnames(DD)= paste(outer(as.character(scan_mean),MZ_name2[i],
paste, sep = ';'))
scan_dataframe[[j]]=DD
}
#The mean scan number for each bin of scans for the ith m/z value and each sample
mean_scan_name[[i]]=do.call(cbind, scan_dataframe)
}
# To get the column names of the interleaved m/z values and scan numbers as columns
data_mass_scan_interl=do.call(cbind, mean_scan_name)
names_new=colnames(data_mass_scan_interl)
#Scan# and mass spectrum for each sample
mass_scan_list=list()
for(i in 1:N){
sm=as.numeric(unlist(sample_list[[i]]))[181:(180*(MZ+1))]
mass_scan_list[[i]]=sm
}
#Sample vs mass/scan numbers
data_preprocessed = do.call(rbind, mass_scan_list) #put the samples into a dataframe
colnames(data_preprocessed ) = names_new
data_preprocessed = as.data.frame(data_preprocessed )
count_nonzero=function(x){(length(which(x > 0)))/(dim(data_preprocessed)[2])}
#Ratio of nonzero feature values for each observation
Perc_Nnonzero=apply(data_preprocessed ,1,count_nonzero)
data_preprocessed = data_preprocessed %>% mutate(Perc_Nnonzero)
colnames(data_preprocessed) = make.names(colnames(data_preprocessed))
#Removing variables with near zero variance, nearZeroVar, is based on the book
#"Applied Predictive Modeling" by Kuhn, M. and Johnson, K.,
#Springer, New York, 2013
#Detect features with near zero variance
near_zero_variance = nearZeroVar(data_preprocessed)
#Remove features with near zero variance
data_preprocessed = data_preprocessed[, -near_zero_variance]
dim(data_preprocessed) #new dimension
## [1] 150 8204
#Contains all samples with the selected features and corresponding y-values (A or B)
training_transformed=data.frame(data_preprocessed,y=y)
####################################################################################
#Benjamini-Hochberg (BH) method to determine significant different variables
#between biotic and abiotic species.
#We used code (modified) for BH found in James et al.,
#"An introduction to statistical learning" 2ed, Springer, 2023
m = ncol(training_transformed)-1
x1 = training_transformed %>% filter(y=="A")
x2 = training_transformed %>% filter(y=="B")
n1=nrow(x1)
n2=nrow(x2)
B=1000
set.seed(99)
#Using the permutation test to create the null distribution for the t-statistic
TT = rep(NA, m)
TT.star = matrix(NA, ncol = m, nrow = B)
for(j in 1:m){
TT[j] = t.test(x1[,j],x2[,j],var.equal = FALSE)$statistic
for (b in 1:B){
sam = sample(c(x1[,j], x2[,j]))
TT.star[b,j] = t.test(sam[1:n1],sam[(n1 + 1):(n1 + n2)], var.equal = FALSE)$statistic
}
}
c = sort(abs(TT))
FDR <- RR <- VV <- rep(NA , m )
for (j in 1:m){
R = sum(abs(TT) >= c[j])
V = sum(abs(TT.star) >= c[j])/B
RR[j] = R
VV[j] = V
FDR[j] = V/R
}
max(RR[FDR <= 0.001])
## [1] 84
index=abs(TT) >= min (c[FDR < 0.001])
head(training_transformed[,index]) #Significant different features
Av=colnames(training_transformed[,index])
###############################################################
K=(dim(training_transformed)[2]-1)
y2=c(1,1,1,1,1,3,1,1,3,3,1,rep(2,9),1,3,3,rep(2,9),1,1,3,1,1,2,2,1,3,2,3,1,2,rep(1,6),
2,3,2,1,1,2,3,rep(1,4),2,rep(1,5),2,rep(1,10),3,2,2,2,rep(1,5),2,2,2,1,1,1,3,3,3,3,
rep(1,8),3,1,1,3,1,2,3,2,1,2,2,1,3,1,1,1,2,rep(1,8),3,1,3,rep(2,13),3,2,3)
y2=factor(y2,labels=c("A","B","C"))
#A: Abiotic
#B: Contemporary biotic
#C: Altered biotic
indexA=which(y2=="A")
indexB=which(y2=="B")
indexC=which(y2=="C")
lA=length(indexA) #number of abiotic samples
lB=length(indexB) #number of contemporary biotic samples
lC=length(indexC) #number of altered biotic samples
abiotic=species_names_new[indexA] #abiotic sample names
bioticB=species_names_new[indexB] #contemporary biotic sample names
bioticC=species_names_new[indexC] #altered biotic sample names
####################################################################################
#######Calculate the proportion of samples that have the significant variables sorted
#######by abiotic, biotic contemporary, and altered biotic
calc_proportion=function(Av) {
#Intensity values for the variables in the vector Av for each species
dA=list()
dB=list()
dC=list()
for(i in 1:length(Av)){
dA2=training_transformed %>% select(Av[i]| "y") %>% filter(y2=="A")
dA[[i]] = dA2[,1]
dB2=training_transformed %>% select(Av[i]| "y") %>% filter(y2=="B")
dB[[i]] = dB2[,1]
dC2=training_transformed %>% select(Av[i]| "y") %>% filter(y2=="C")
dC[[i]] = dC2[,1]
}
data_A = do.call(cbind, dA)
colnames(data_A) = Av
data_B = do.call(cbind, dB)
colnames(data_B) = Av
data_C = do.call(cbind, dC)
colnames(data_C) = Av
#Put non-zero values = 1
data_AA=ifelse(data_A[,1:(length(Av))]>0,1,0)
data_BB=ifelse(data_B[,1:(length(Av))]>0,1,0)
data_CC=ifelse(data_C[,1:(length(Av))]>0,1,0)
data_ABC = rbind(data_AA,data_BB,data_CC)
data_stat=data.frame(apply(data_AA,2,mean),apply(data_BB,2,mean),apply(data_CC,2,mean),
apply(data_ABC,2,mean))
colnames(data_stat)=c("Pr A", "Pr B(contemporary) ","Pr B(altered)", "Prop.total")
return(data_stat)
}
data_stat=calc_proportion(Av)
#########################################################################################
######Find the distribution of proportion of abiotic samples, biotic samples,
######contemporary biotic, and altered biotic samples
allA=training_transformed %>% filter(y=="A")
allB=training_transformed %>% filter(y=="B")
allB_Cont=training_transformed[,1:(K-1)]%>% slice(indexB) #omitting Perc_Nnonzero feature
allB_Alt=training_transformed[,1:(K-1)]%>% slice(indexC)
allA_bin=ifelse(allA[,1:(K-1)]>0,1,0)
allB_bin=ifelse(allB[,1:(K-1)]>0,1,0)
allB_Cont_bin=ifelse(allB_Cont>0,1,0)
allB_Alt_bin=ifelse(allB_Alt>0,1,0)
# Proportion of abiotic samples that contain each feature
prop_A= apply(allA_bin,2,mean)
# Proportion of biotic samples that contain each feature
prop_B= apply(allB_bin,2,mean)
# Proportion of contemporary biotic samples that contain each feature
prop_B_Cont= apply(allB_Cont_bin,2,mean)
# Proportion of altered biotic samples that contain each feature
prop_B_Alt = apply(allB_Alt_bin,2,mean)
#Median number of features for abiotic, biotic,
#contemporary biotic, and altered biotic samples, respectively
median(prop_A)
## [1] 0.1466667
median(prop_B)
## [1] 0.1866667
median(prop_B_Cont)
## [1] 0.1730769
median(prop_B_Alt)
## [1] 0.173913
##########################################################################################
#Proportion of samples that contain the significant variables
D_A = data_stat[,1]
D_B_Cont = data_stat[,2]
D_B_Alt = data_stat[,3]
D_B = (data_stat[,2] * lB+ data_stat[,3] * lC)/(lB+lC)
DIM=length(Av)
D_A_array=array(c(D_A), dim=c(DIM,1))
D_B_array=array(c(D_B), dim=c(DIM,1))
D_B_Cont_array=array(c(D_B_Cont), dim=c(DIM,1))
D_B_Alt_array=array(c(D_B_Alt), dim=c(DIM,1))
#These significant variables are on average found in the following proportion
#of samples:
mean(D_A)
## [1] 0.1147619
mean(D_B)
## [1] 0.4412698
mean(D_B_Cont)
## [1] 0.444826
mean(D_B_Alt)
## [1] 0.4332298
#######################################################################################
#####Distribution of the significant variables
allA2=allA %>% select(all_of(Av))
allB2=allB %>% select(all_of(Av))
allB_Cont2 = allB_Cont %>% select(all_of(Av))
allB_Alt2 = allB_Alt %>% select(all_of(Av))
Nrow = lA+(lB+lC)*2
size_Dbar = Nrow*length(Av)
Dbar=matrix(numeric(size_Dbar),nrow=Nrow)
Dbar=as.data.frame(Dbar)
#DIM=length(Av)
for(i in 1:DIM){
DbarA=data.frame(allA2[,i])
colnames(DbarA)=c(Av[i])
DbarB=data.frame(allB2[,i])
colnames(DbarB)=c(Av[i])
DbarBCont=data.frame(allB_Cont2[,i])
colnames(DbarBCont)=c(Av[i])
DbarBAlt=data.frame(allB_Alt2[,i])
colnames(DbarBAlt)=c(Av[i])
Dbar[,i]=rbind(DbarA,DbarB,DbarBCont, DbarBAlt)
}
Type=c(rep("A",lA),rep("B",(lB+lC)),rep("B(Cont.)",lB),rep("B(Alt.)",lC))
Dbar=data.frame(Dbar,Type)
colnames(Dbar)=c(Av,"Type")
########################################################################################
#Split the scan# and m/z values for the significant variables into two separate
#components for different number of features.
data_split=function(Av){
datadf3=Av
#Splitting a vector string based on code found in "Splitting Strings in R programming –
#strsplit() method":
# https://www.geeksforgeeks.org/splitting-strings-in-r-programming-strsplit-method/
datadf_st3 = strsplit(datadf3, split = "[.]+")
datadf_st23=list()
for (i in 1:length(datadf_st3)){
datadf_st23[[i]] = strsplit(datadf_st3[[i]],split = '""')
}
#Get the first element of a list based on code found in
#"R list get first item of each element":
#https://stackoverflow.com/questions/44176908/r-list-get-first-item-of-each-element ,
#stack overflow
datadf_st213= unlist(sapply(datadf_st23, function(x) x[1]))
#gsub based on code found in "Remove Character From String in R":
#https://sparkbyexamples.com/r-programming/remove-character-from-string-in-r/
#by Nelamali,N., March 27, 2024
xx2 = as.numeric(gsub('[X]','',datadf_st213))
yy2= as.numeric(unlist(sapply(datadf_st23, function(x) x[2])))
return(list(xx2=xx2,yy2=yy2))
}
xx2=as.numeric(unlist(data_split(Av)[1]))
yy2=as.numeric(unlist(data_split(Av)[2]))
#######################################################################################
split_name = strsplit(species_names_new, split = "[.]+")
split_name2=list()
for (i in 1:length(split_name)){
split_name2[[i]] = strsplit(split_name[[i]],split = '""')
}
#Name of samples
split_name_new= unlist(sapply(split_name2, function(x) x[1]))
split_name_new = unlist( strsplit( gsub( "[][(]" , "", split_name_new) , "3d"))
data_new=training_transformed %>% select(all_of(Av))
y3=factor(y2,labels=c("Abiotic","Biotic (contemporary)","Biotic (altered)"))
#PCA
pr.out3=prcomp(data_new, scale=T)
summary(pr.out3)
## Importance of components:
## PC1 PC2 PC3 PC4 PC5 PC6 PC7
## Standard deviation 5.144 2.83269 2.0839 1.76987 1.61838 1.53964 1.43387
## Proportion of Variance 0.315 0.09553 0.0517 0.03729 0.03118 0.02822 0.02448
## Cumulative Proportion 0.315 0.41051 0.4622 0.49950 0.53069 0.55891 0.58338
## PC8 PC9 PC10 PC11 PC12 PC13 PC14
## Standard deviation 1.34132 1.29071 1.28120 1.21751 1.15781 1.13862 1.1113
## Proportion of Variance 0.02142 0.01983 0.01954 0.01765 0.01596 0.01543 0.0147
## Cumulative Proportion 0.60480 0.62463 0.64417 0.66182 0.67778 0.69321 0.7079
## PC15 PC16 PC17 PC18 PC19 PC20 PC21
## Standard deviation 1.0768 1.05223 1.01621 1.00702 0.97824 0.94957 0.93984
## Proportion of Variance 0.0138 0.01318 0.01229 0.01207 0.01139 0.01073 0.01052
## Cumulative Proportion 0.7217 0.73490 0.74719 0.75927 0.77066 0.78139 0.79191
## PC22 PC23 PC24 PC25 PC26 PC27 PC28
## Standard deviation 0.91155 0.8887 0.87775 0.85840 0.85127 0.82085 0.80716
## Proportion of Variance 0.00989 0.0094 0.00917 0.00877 0.00863 0.00802 0.00776
## Cumulative Proportion 0.80180 0.8112 0.82037 0.82914 0.83777 0.84579 0.85355
## PC29 PC30 PC31 PC32 PC33 PC34 PC35
## Standard deviation 0.78239 0.75724 0.75388 0.73458 0.72447 0.71413 0.68786
## Proportion of Variance 0.00729 0.00683 0.00677 0.00642 0.00625 0.00607 0.00563
## Cumulative Proportion 0.86084 0.86766 0.87443 0.88085 0.88710 0.89317 0.89880
## PC36 PC37 PC38 PC39 PC40 PC41 PC42
## Standard deviation 0.68657 0.65946 0.64463 0.62720 0.61561 0.59712 0.58804
## Proportion of Variance 0.00561 0.00518 0.00495 0.00468 0.00451 0.00424 0.00412
## Cumulative Proportion 0.90442 0.90959 0.91454 0.91922 0.92374 0.92798 0.93210
## PC43 PC44 PC45 PC46 PC47 PC48 PC49
## Standard deviation 0.5871 0.57161 0.56183 0.5573 0.53776 0.52880 0.51504
## Proportion of Variance 0.0041 0.00389 0.00376 0.0037 0.00344 0.00333 0.00316
## Cumulative Proportion 0.9362 0.94009 0.94385 0.9475 0.95099 0.95432 0.95747
## PC50 PC51 PC52 PC53 PC54 PC55 PC56
## Standard deviation 0.50384 0.49177 0.47977 0.46315 0.43403 0.42149 0.41339
## Proportion of Variance 0.00302 0.00288 0.00274 0.00255 0.00224 0.00211 0.00203
## Cumulative Proportion 0.96050 0.96338 0.96612 0.96867 0.97091 0.97303 0.97506
## PC57 PC58 PC59 PC60 PC61 PC62 PC63
## Standard deviation 0.40241 0.39585 0.38606 0.37385 0.36052 0.35301 0.34168
## Proportion of Variance 0.00193 0.00187 0.00177 0.00166 0.00155 0.00148 0.00139
## Cumulative Proportion 0.97699 0.97885 0.98063 0.98229 0.98384 0.98532 0.98671
## PC64 PC65 PC66 PC67 PC68 PC69 PC70
## Standard deviation 0.32224 0.31666 0.30124 0.29606 0.2898 0.27362 0.26575
## Proportion of Variance 0.00124 0.00119 0.00108 0.00104 0.0010 0.00089 0.00084
## Cumulative Proportion 0.98795 0.98914 0.99022 0.99127 0.9923 0.99316 0.99400
## PC71 PC72 PC73 PC74 PC75 PC76 PC77
## Standard deviation 0.26247 0.24925 0.22967 0.22604 0.21562 0.20262 0.18153
## Proportion of Variance 0.00082 0.00074 0.00063 0.00061 0.00055 0.00049 0.00039
## Cumulative Proportion 0.99482 0.99556 0.99619 0.99679 0.99735 0.99784 0.99823
## PC78 PC79 PC80 PC81 PC82 PC83 PC84
## Standard deviation 0.17168 0.16364 0.1595 0.14609 0.13650 0.13176 0.09854
## Proportion of Variance 0.00035 0.00032 0.0003 0.00025 0.00022 0.00021 0.00012
## Cumulative Proportion 0.99858 0.99890 0.9992 0.99946 0.99968 0.99988 1.00000
#########################################################################################
######Create a dataframe with scan#, m/z, intensity coordinates, sample type, and name for
#the significant variables
Av3=Av
data_Av3=training_transformed %>% select(all_of(Av3))
sample_import=data.frame(species_names_new,y2,data_Av3)
#Coordinates for scan# and m/z for the significant variables
cord=data.frame(xx2,yy2)
colnames(cord)=c("scan #", "m/z")
#Intensity values for the significant variables for each sample
ss=list()
for(i in 1:150){
ss[[i]] = sample_import[i,]
}
sample_frame = as.data.frame(t(do.call(rbind,ss)))
colnames(sample_frame)=species_names_new
#The significant variables with their scan# and m/z coordinates and
#intensity value for each sample
cord2=cbind(cord,sample_frame[3:(length(Av3)+2),])
#Code for converting dataframe to numeric based on code found in
#"Code for converting entire data frame to numeric":
#https://stackoverflow.com/questions/60288057/code-for-converting-entire-data-frame-to-numeric ,
#stack overflow
cord2=mutate_all(cord2, function(x) as.numeric(as.character(x)))
#Create a dataframe for abiotic samples with scan#, m/z value coordinates,
#intensity, sample type
data_mA=list()
for (i in 1:lA){
data_mA[[i]]= data.frame(xx2,yy2,as.numeric(data_Av3[indexA[i],]))
colnames(data_mA[[i]])=c("Scan","Mass_to_charge_ratio","Intensity")
}
typeA=factor(rep("Abiotic",lA*length(Av3)))
data3DA=do.call(rbind,data_mA)
data3DA=data.frame(data3DA,typeA)
colnames(data3DA)=c("Scan","Mass_to_charge_ratio","Intensity","Type")
NameA=factor(rep(split_name_new[indexA],each=length(Av3)))#abiotic sample names
#Create a dataframe for contemporary biotic samples with scan#, m/z value coordinates,
#intensity, sample type
data_mB=list()
for (i in 1:lB){
data_mB[[i]]= data.frame(xx2,yy2,as.numeric(data_Av3[indexB[i],]))
colnames(data_mB[[i]])=c("Scan","Mass_to_charge_ratio","Intensity")
}
typeB=factor(rep("Biotic (cont.)",lB*length(Av3)))
data3DB=do.call(rbind,data_mB)
data3DB=data.frame(data3DB,typeB)
colnames(data3DB)=c("Scan","Mass_to_charge_ratio","Intensity","Type")
NameB=factor(rep(split_name_new[indexB],each=length(Av3)))
#Create a dataframe for altered biotic samples with scan#, m/z value coordinates,
#intensity, sample type
data_mC=list()
for (i in 1:lC){
data_mC[[i]]= data.frame(xx2,yy2,as.numeric(data_Av3[indexC[i],]))
colnames(data_mC[[i]])=c("Scan","Mass_to_charge_ratio","Intensity")
}
typeC=factor(rep("Biotic (alt.)",lC*length(Av3)))
data3DC=do.call(rbind,data_mC)
data3DC=data.frame(data3DC,typeC)
colnames(data3DC)=c("Scan","Mass_to_charge_ratio","Intensity","Type")
NameC=factor(rep(split_name_new[indexC],each=length(Av3)))
NameS=c(NameA, NameB, NameC)
data3D=rbind(data3DA, data3DB, data3DC)
#dataframe with scan#, m/z, intensity coordinates, sample type, and name for
#the significant variables
data3D=cbind(data3D,NameS)
ind_all=which(data3D[,3]>0)
#####################################################################################
#Figure S3
##3D-PCA
cols=c("#B15928","#33A02C","#1F78B4")
Cols=function(vec){
cols=c("#B15928","#33A02C","#1F78B4")
return(cols[as.numeric(as.factor(vec))])
}
library(rgl)
knitr::knit_hooks$set(webgl = hook_webgl)
windowsFonts(A = windowsFont("sans"))
plot3d(pr.out3$x[,1:3], col = Cols(y2), type = 's', radius = .2 )
text3d(pr.out3$x[,1:3],
texts=split_name_new,
ps= 0.6, pos=3, family="A")
bbox3d(color = c("grey", "black"), emission = "grey",
specular = "grey", shininess = 5, alpha = 0.8)
windowsFonts(A = windowsFont("sans"))
plot3d(pr.out3$x[,1:3], col = Cols(y2), type = 's', radius = .25 )
text3d(pr.out3$x[,1:3],
texts=split_name_new,
cex= 0.7, pos=3, family="A")
bbox3d(color = c("grey", "black"), emission = "grey",
specular = "grey", shininess = 5, alpha = 0.8)
#######################################################################################
#Barplot of the significant variables
#Figure S6
par(mfcol = c(4, 1), mar = numeric(4),oma = c(5, 4, .5, .5), mai = c(0.05, 0.2, 0.3, 0.2),
mgp = c(2, .6, 0),family="sans", ps=9)
barplot(D_A_array,beside=TRUE,ylim=c(0,1),col="#B15928" ,las=2,axes=FALSE,
main="Abiotic samples",width=5)
text(-2.2,1.12,label=substitute(paste(bold(('a')))),col="black",xpd=NA)
axis(2L)
box()
barplot(D_B_array,beside=TRUE,ylim=c(0,1),col="#6A3D9A",las=2,axes=FALSE,
main="Biotic samples")
text(-2.2,1.12,label=substitute(paste(bold(('b')))), col="black",xpd=NA)
axis(2L)
box()
barplot(D_B_Cont_array,beside=TRUE,ylim=c(0,1),col="#33A02C" ,las=2,axes=FALSE,
main="Contemporary biotic samples")
text(-2.2,1.12,label=substitute(paste(bold(('c')))), col="black",xpd=NA)
axis(2L)
box()
barplot(D_B_Alt_array,beside=TRUE,ylim=c(0,1),col="#1F78B4",las=2,axes=FALSE,
main="Altered biotic samples")
text(-2.2,1.12,label=substitute(paste(bold(('d')))), col="black",xpd=NA)
axis(2L)
box()
mtext("Proportion of samples", side = 2, outer = TRUE, line = 2.2, cex=0.9)
mtext("Significant features", side = 1, outer = TRUE, line = 2.2, cex=0.9)

#########################################################################
#Plot scan#:m/z:intensity for the significant variables with sample names
#for each point
#Figure S8
fig=plot_ly(data=data3D[ind_all,], x=~Scan,y=~Mass_to_charge_ratio,
z=~Intensity, type="scatter3d", mode="markers",marker=list(size = 3), color=~Type,colors=c("#B15928","#33A02C","#1F78B4"),text = ~paste("", NameS))
fig=fig %>% layout(legend = list(x = 0.4, y = 0.95,orientation = 'h',
itemsizing='constant',bgcolor ="ghostwhite"))
fig