WT_3_hr PS19_3_hr
knitr::opts_knit$set(root.dir = ".")
# load libraries
library(cowplot) # plot_grid()
library(DoubletFinder) # paramSweep()
library(dplyr) # ungroup()
library(ggrepel) # geom_text_repel()
library(ggplot2) # ggplot()
library(grid) # grid.arrange()
library(gridExtra) # grid.arrange()
library(harmony) # RunHarmony()
library(reshape2) # melt()
library(Seurat) # Read10X_h5()
library(Seurat.utils) # RenameGenesSeurat()
library(stringr) # str_match()
pathToRef <- "/research/labs/neurology/fryer/projects/references/mouse/refdata-gex-mm10-2020-A"
pathToTestType <- "scRNA/cell_rep_1_vs_rep_2/"
tissue <- "Brain"
seqType <- "Cell"
treatment <- "PMI"
tool <- "cellbender"
# replicate groups
replicate <- "1 vs 2"
replicate1 <- "replicate1"
replicate2 <- "replicate2"
# genotype
WT <- "WT"
PS19 <- "PS19"
# timepoints
treatment_3hr <- "3_hr"
# colors
treatment_color <- "blue"
control_color <- "gray"
treatment_color_PS19 <- "#9933FF"
treatment_color_WT <- "blue"
WT_treatment_color_3hr_rep1 <- "#6699FF" # light blue grey
WT_treatment_color_3hr_rep2 <- "blue" # light blue grey
PS19_treatment_color_3hr_rep1 <- "#CC99FF" # pink/purple
PS19_treatment_color_3hr_rep2 <- "purple" # pink/purple
sample_colors <- c(WT_treatment_color_3hr_rep1, WT_treatment_color_3hr_rep2,
PS19_treatment_color_3hr_rep1, PS19_treatment_color_3hr_rep2)
treatment_colors <- c(treatment_color_WT, treatment_color_PS19)
genotype_colors <- c("thistle", "skyblue")
replicate_colors <- c("tan3", "turquoise")
# contrast
myContrasts <- c("replicate 1 - replicate 2")
nCount.min <- 250
nFeature.min <- 250
complexity.cutoff <- 0.85
mt.cutoff <- 25
hb.cutoff <- 5
These functions with help simultaneously save plots as a png and pdf.
saveToPDF <- function(...) {
d = dev.copy(pdf,...)
dev.off(d)
}
saveToPNG <- function(...) {
d = dev.copy(png,...)
dev.off(d)
}
Using CellBender filtered output.
prefix <- "../../cellbender/scRNA/"
suffix <- "/.h5_filtered.h5"
if (tissue == "Brain" && file.exists(paste0("../../rObjects/", pathToTestType, "PMI_merged_h5.rds"))) {
PMI <- readRDS(paste0("../../rObjects/", pathToTestType, "PMI_merged_h5.rds"))
} else if (tissue == "Brain") {
# individual sample objects
rep1_WT_3_hr <- CreateSeuratObject(Read10X_h5(paste0(prefix,"rep1_WT_3_hr",suffix)))
rep2_WT_3_hr <- CreateSeuratObject(Read10X_h5(paste0(prefix,"rep2_WT_3_hr",suffix)))
rep1_PS19_3_hr <- CreateSeuratObject(Read10X_h5(paste0(prefix,"rep1_PS19_3_hr",suffix)))
rep2_PS19_3_hr <- CreateSeuratObject(Read10X_h5(paste0(prefix,"rep2_PS19_3_hr",suffix)))
# merge objects
PMI <- merge(x = rep1_WT_3_hr,
y = c(rep2_WT_3_hr, rep1_PS19_3_hr, rep2_PS19_3_hr),
add.cell.ids = c("rep1_WT_3_hr", "rep2_WT_3_hr", "rep1_PS19_3_hr", "rep2_PS19_3_hr"),
project = paste0("PMI replicate group ", replicate, " - Single Cell"))
# cleanup and save
remove(rep1_WT_3_hr, rep2_WT_3_hr, rep1_PS19_3_hr, rep2_PS19_3_hr)
saveRDS(PMI, past0("../../rObjects/", pathToTestType, "PMI_merged_h5.rds"))
}
# preview
PMI
## An object of class Seurat
## 32285 features across 73354 samples within 1 assay
## Active assay: RNA (32285 features, 0 variable features)
nCount_RNA = total number of transcripts (UMIs) in a single cell nFeature_RNA = number of unique genes (features)
# create sample column
barcodes <- colnames(PMI)
pattern <- "(.+)_[ACGT]+-(\\d+)"
sample <- str_match(barcodes, pattern)[,2]
table(sample)
## sample
## rep1_PS19_3_hr rep1_WT_3_hr rep2_PS19_3_hr rep2_WT_3_hr
## 14280 9083 25000 24991
PMI$sample <- factor(sample, levels = c("rep1_WT_3_hr", "rep2_WT_3_hr",
"rep1_PS19_3_hr", "rep2_PS19_3_hr"))
table(PMI$sample) # check
##
## rep1_WT_3_hr rep2_WT_3_hr rep1_PS19_3_hr rep2_PS19_3_hr
## 9083 24991 14280 25000
Idents(PMI) <- PMI$sample
Add treatment column to metadata
# create treatment column
treat <- gsub("rep1_WT_3_hr", treatment_3hr, PMI$sample)
treat <- gsub("rep2_WT_3_hr", treatment_3hr, treat)
treat <- gsub("rep1_PS19_3_hr", treatment_3hr, treat)
treat <- gsub("rep2_PS19_3_hr", treatment_3hr, treat)
PMI$treatment <- factor(treat, levels = c("3_hr"))
table(PMI$treatment)
##
## 3_hr
## 73354
Add genotype column to metadata
# create treatment column
geno <- gsub("rep1_WT_3_hr", WT, PMI$sample)
geno <- gsub("rep2_WT_3_hr", WT, geno)
geno <- gsub("rep1_PS19_3_hr", PS19, geno)
geno <- gsub("rep2_PS19_3_hr", PS19, geno)
PMI$genotype <- factor(geno, levels = c("WT","PS19"))
table(PMI$genotype)
##
## WT PS19
## 34074 39280
Add replicate column to metadata
# create treatment column
repl <- gsub("rep1_WT_3_hr", replicate1, PMI$sample)
repl <- gsub("rep2_WT_3_hr", replicate2, repl)
repl <- gsub("rep1_PS19_3_hr", replicate1, repl)
repl <- gsub("rep2_PS19_3_hr", replicate2, repl)
PMI$replicate <- factor(repl, levels = c("replicate1","replicate2"))
table(PMI$replicate )
##
## replicate1 replicate2
## 23363 49991
# cell.complexity
PMI$cell.complexity <- log10(PMI$nFeature_RNA) / log10(PMI$nCount_RNA)
# percent MT
grep("^mt-",rownames(PMI@assays$RNA@counts),value = TRUE)
## [1] "mt-Nd1" "mt-Nd2" "mt-Co1" "mt-Co2" "mt-Atp8" "mt-Atp6" "mt-Co3"
## [8] "mt-Nd3" "mt-Nd4l" "mt-Nd4" "mt-Nd5" "mt-Nd6" "mt-Cytb"
mt.genes <- c("mt-Nd1", "mt-Nd2", "mt-Co1", "mt-Co2", "mt-Atp8", "mt-Atp6", "mt-Co3", "mt-Nd3",
"mt-Nd4l", "mt-Nd4", "mt-Nd5", "mt-Nd6", "mt-Cytb")
PMI$percent.mt <- PercentageFeatureSet(PMI, pattern = "^mt-")
# ribosomal proteins begin with 'RPL' or 'RSL' for this annotation file
PMI$percent.ribo <- PercentageFeatureSet(PMI, pattern = "^R[sp]l")
# percent.hb - hemoglobin proteins begin with 'HB' or 'HBP'
PMI$percent.hb <- PercentageFeatureSet(PMI, pattern = "^Hb[^(p)]")
# percent Xist in each cell
PMI$percent.Xist <- PercentageFeatureSet(PMI, features = "Xist")
# percent Ddx3x in each cell
PMI$percent.Ddx3x <- PercentageFeatureSet(PMI, features = "Ddx3x")
# percent Ttr in each cell
PMI$percent.Ttr <- PercentageFeatureSet(PMI, features = "Ttr")
# percent Apoe in each cell
PMI$percent.Apoe <- PercentageFeatureSet(PMI, features = "Apoe")
# Visualize the number of cell counts per sample
data <- as.data.frame(table(PMI$sample))
colnames(data) <- c("sample","frequency")
ncellsRaw <- ggplot(data, aes(x = sample, y = frequency, fill = sample)) +
geom_col() +
theme_classic() +
geom_text(aes(label = frequency),
position=position_dodge(width=0.9),
vjust=-0.25) +
scale_fill_manual(values = sample_colors) +
scale_y_continuous(breaks = seq(0,30000, by = 5000), limits = c(0,30000)) +
ggtitle("Raw: cells per sample") +
theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 45, hjust=1))
ncellsRaw
# set graphical parameter
par(mfrow = c(3,1))
# Visualize nCount_RNA
denCount <- ggplot(PMI@meta.data,
aes(color = sample,
x = nCount_RNA,
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_log10() +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("nCount_RNA") +
ylab("Density") +
geom_vline(xintercept = nCount.min)
# Visualize percent.mt
denMt <- ggplot(PMI@meta.data,
aes(color = sample,
x = percent.mt,
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
geom_vline(xintercept = mt.cutoff) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("% Mitochondrial Genes") +
ylab("Density")
# Visualize cell complexity
# Quality cells are usually above 0.85
denCellComplexity <- ggplot(PMI@meta.data,
aes(color = sample,
x = cell.complexity,
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("Cell Complexity (log10(nFeature/nCount))") +
ylab("Density") +
geom_vline(xintercept = complexity.cutoff)
# Arrange graphs in grid
plots <- list(denCount,denMt,denCellComplexity)
layout <- rbind(c(1),c(2),c(3))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
## Warning: Transformation introduced infinite values in continuous x-axis
## Warning: Removed 26 rows containing non-finite values (stat_density).
## Removed 26 rows containing non-finite values (stat_density).
## Removed 26 rows containing non-finite values (stat_density).
# Visualize percent.Ttr
denTtr <- ggplot(PMI@meta.data,
aes(color = sample,
x = log2(percent.Ttr),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
# geom_vline(xintercept = mt.cutoff) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Ttr") +
ylab("Density")
denTtr
## Warning: Removed 48566 rows containing non-finite values (stat_density).
# Visualize percent.Xist
denXist <- ggplot(PMI@meta.data,
aes(color = sample,
x = log2(percent.Xist),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
# geom_vline(xintercept = mt.cutoff) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Xist") +
ylab("Density")
denXist
## Warning: Removed 29918 rows containing non-finite values (stat_density).
# Visualize percent.Ddx3x
denDdx3x <- ggplot(PMI@meta.data,
aes(color = sample,
x = log2(percent.Ddx3x),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
# geom_vline(xintercept = mt.cutoff) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Ddx3x") +
ylab("Density")
denDdx3x
## Warning: Removed 54336 rows containing non-finite values (stat_density).
# Visualize percent.Ddx3x
denApoe <- ggplot(PMI@meta.data,
aes(color = sample,
x = log2(percent.Apoe),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
# geom_vline(xintercept = mt.cutoff) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Apoe") +
ylab("Density")
denApoe
## Warning: Removed 18531 rows containing non-finite values (stat_density).
# nFeature, nCount, and cell.complexity violins
vCellInfo <- VlnPlot(PMI,
features = c("nFeature_RNA", "nCount_RNA","cell.complexity"),
ncol = 3,
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vCellInfo
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
# percent violins
vMtRiboHb <- VlnPlot(PMI,
features = c("percent.mt","percent.ribo","percent.hb"),
ncol = 3,
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vMtRiboHb
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
## Removed 26 rows containing non-finite values (stat_ydensity).
## Removed 26 rows containing non-finite values (stat_ydensity).
# percent violins
vXist <- VlnPlot(PMI,
features = c("percent.Xist"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vXist
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
# percent violins
vDdx3x <- VlnPlot(PMI,
features = c("percent.Ddx3x"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vDdx3x
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
# percent violins
vTtr <- VlnPlot(PMI,
features = c("percent.Ttr"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vTtr
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
# percent violins
vApoe<- VlnPlot(PMI,
features = c("percent.Apoe"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vApoe
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
vGenes <- VlnPlot(PMI,
features = c("percent.Xist", "percent.Ddx3x",
"percent.Ttr", "percent.Apoe"),
ncol = 4,
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vGenes
## Warning: Removed 26 rows containing non-finite values (stat_ydensity).
## Removed 26 rows containing non-finite values (stat_ydensity).
## Removed 26 rows containing non-finite values (stat_ydensity).
## Removed 26 rows containing non-finite values (stat_ydensity).
sMt <- ggplot(
PMI@meta.data,
aes(x = nCount_RNA, y = nFeature_RNA, color = percent.mt)) +
geom_point() +
stat_smooth(method=lm) +
scale_x_log10() +
scale_y_log10() +
theme_classic() +
geom_vline(xintercept = nCount.min) +
geom_hline(yintercept = nFeature.min) +
facet_wrap(~sample) +
scale_colour_gradient(low = "gray90", high = "black", limits =c(0,100))
#geom_rect(aes(xmin=300, xmax=300, ymin=1000,
# ymax=3000), color="transparent", fill="orange", alpha=0.3)
sMt
## Warning: Transformation introduced infinite values in continuous x-axis
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Transformation introduced infinite values in continuous x-axis
## Warning: Transformation introduced infinite values in continuous y-axis
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 26 rows containing non-finite values (stat_smooth).
sMt <- FeatureScatter(PMI,
feature1 = "nCount_RNA",
feature2 = "percent.mt",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sMt
## Warning: Removed 26 rows containing missing values (geom_point).
sTtr <- FeatureScatter(PMI,
feature1 = "percent.Ttr",
feature2 = "nCount_RNA",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sTtr
## Warning: Removed 26 rows containing missing values (geom_point).
sTtrMt <- FeatureScatter(PMI,
feature1 = "percent.Ttr",
feature2 = "percent.mt",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sTtrMt
## Warning: Removed 26 rows containing missing values (geom_point).
sXist <- FeatureScatter(PMI,
feature1 = "percent.Xist",
feature2 = "nCount_RNA",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sXist
## Warning: Removed 26 rows containing missing values (geom_point).
We want to be careful filtering because removing things can easily lead to misinterpretation. For example, cells with high percent.mt could actually just be involved in respiratory processes.
We will filter based on 6 conditions:
– nCount_RNA > 500 – nFeature_RNA > 250 – cell.complexity > 0.85 – percent.mt < 1 – percent.hb < 5
And removing MT genes as to not alter down stream differential expression
# filter
PMI.filtered <- subset(PMI,
subset = (nCount_RNA > nCount.min) &
(nFeature_RNA > nFeature.min) &
(cell.complexity > complexity.cutoff) &
(percent.mt < mt.cutoff))
# print cells removed
print(paste0(dim(PMI)[2] - dim(PMI.filtered)[2]," cells removed"))
## [1] "20030 cells removed"
data <- as.data.frame(table(PMI.filtered$sample))
colnames(data) <- c("sample","frequency")
ncellsFilter1 <- ggplot(data, aes(x = sample, y = frequency, fill = sample)) +
geom_col() +
theme_classic() +
geom_text(aes(label = frequency),
position=position_dodge(width=0.9),
vjust=-0.25) +
scale_fill_manual(values = sample_colors) +
scale_y_continuous(breaks = seq(0,30000, by = 5000), limits = c(0,30000)) +
ggtitle("Raw: cells per sample") +
theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 45, hjust=1))
ncellsFilter1
# Arrange graphs in grid
plots <- list(ncellsRaw, ncellsFilter1)
layout <- cbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# cleanup
remove(plots, layout, grid, ncellsFilter1)
Remove lowly expressed genes. We will keep genes that have at least 1 count in 10 cells.
# filter genes
counts <- GetAssayData(object = PMI.filtered, slot = "counts")
nonzero <- counts > 0 # produces logical
keep <- Matrix::rowSums(nonzero) >= 10 # sum the true/false
counts.filtered <- counts[keep,] # keep certain genes
# overwrite PMI.filtered
PMI.filtered <- CreateSeuratObject(counts.filtered,
meta.data = PMI.filtered@meta.data)
# print features removed
print(paste0(dim(counts)[1] - dim(counts.filtered)[1], " features removed"))
## [1] "12590 features removed"
https://github.com/chris-mcginnis-ucsf/DoubletFinder\
Heterotypic - doublets derived from transcriptionally distinct cells. DoubletFinder works best on this type of doublet.
Homotopic - Transcriptionally similar cell doublets. DoubletFinder does not work as great on this type of doublet.
pANN - proportion of artificial nearest neighbors (pANN)
BCMVN - mean-variance normalized bimodality coefficient of pANN distributions produced during pN -pK parameter sweeps. The BCMVN may be used to identify the pK parameter.
Overview of steps:
A. Prepare each sample
B. pK Identification (no ground-truth) - defines the PC neighborhood size used to compute pANN
C. Homotypic Doublet Proportion Estimate - homotypic doublets may not be a problem depending on the type of analysis you are performing. If you have some doublets of the same type and their counts are normalized, they will generally represent the profile of single cells of the same type.
D. DoubletFinder
E. Visualize where the doublets are located
# split object by sample
PMI.split <- SplitObject(PMI.filtered, split.by = "sample")
for (i in 1:length(PMI.split)) {
# normalize and find PCs
print(i)
PMI_sample <- NormalizeData(PMI.split[[i]])
sampleID <- levels(droplevels(PMI_sample@meta.data$sample))
PMI_sample <- FindVariableFeatures(PMI_sample, selection.method = "vst", nfeatures = 2000)
PMI_sample <- ScaleData(PMI_sample)
PMI_sample <- RunPCA(PMI_sample)
# get significant PCs
stdv <- PMI_sample[["pca"]]@stdev
sum.stdv <- sum(PMI_sample[["pca"]]@stdev)
percent.stdv <- (stdv / sum.stdv) * 100
cumulative <- cumsum(percent.stdv)
co1 <- which(cumulative > 90 & percent.stdv < 5)[1]
co2 <- sort(which((percent.stdv[1:length(percent.stdv) - 1] -
percent.stdv[2:length(percent.stdv)]) > 0.1),
decreasing = T)[1] + 1
min.pc <- min(co1, co2)
min.pc
# run umap
PMI_sample <- RunUMAP(PMI_sample, dims = 1:min.pc, reduction = "pca")
# cluster
PMI_sample <- FindNeighbors(object = PMI_sample, dims = 1:min.pc)
PMI_sample <- FindClusters(object = PMI_sample, resolution = 0.2)
# Assign identity of clusters
Idents(object = PMI_sample) <- "RNA_snn_res.0.4"
d1 <- DimPlot(PMI_sample,
reduction = "umap",
label = TRUE,
label.size = 6)
path <- paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_UMAP_res0.02_",sampleID)
pdf(paste0(path, ".pdf"), width = 5, height = 4)
print(d1)
dev.off()
# number of cells in each cluster
n_cells <- FetchData(PMI_sample, vars = c("ident")) %>% dplyr::count(ident) %>%tidyr::spread(ident, n)
## pK Identification (no ground-truth)
sweep.res.list <- paramSweep_v3(PMI_sample, PCs = 1:min.pc, sct = FALSE)
sweep.stats <- summarizeSweep(sweep.res.list, GT = FALSE)
bcmvn <- find.pK(sweep.stats)
# Optimal pK for any scRNA-seq data can be manually discerned as maxima in BCmvn distributions
bcmvn_max <- bcmvn[which.max(bcmvn$BCmetric),]
pK_value <- bcmvn_max$pK
pK_value <- as.numeric(levels(pK_value))[pK_value]
# Homotypic Doublet Proportion Estimate
annotations <- PMI_sample@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(pK_value*nrow(PMI_sample@meta.data))
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
# Run DoubletFinder with varying classification
PMI_sample <- doubletFinder_v3(PMI_sample, PCs = 1:min.pc,
pN = 0.25, pK = pK_value, nExp = nExp_poi.adj,
reuse.pANN = FALSE, sct = FALSE)
# set DF class for calling doublets
DF_class <- PMI_sample@meta.data[, grep("DF.classifications", colnames(PMI_sample@meta.data)),]
DF_class[which(DF_class == "Doublet")] <- "Doublet"
table(DF_class)
# table showing the number of doublets and singlets
write.table(table(DF_class), paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_table_",sampleID), sep = "\t",
row.names = FALSE, quote = FALSE)
PMI_sample@meta.data[,"CellTypes_DF"] <- DF_class
# plot
d2 <- DimPlot(PMI_sample, group.by="CellTypes_DF", reduction="umap",
order=c("Coll.Duct.TC","Doublet"),
cols=c("#66C2A5","black"))
path <- paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_UMAP_",sampleID)
pdf(paste0(path, ".pdf"), width = 5,height = 4)
print(d2)
dev.off()
# plot
f1 <- FeaturePlot(PMI_sample,
reduction = "umap",
features = c("nFeature_RNA", "nCount_RNA",
"cell.complexity", "percent.mt"),
pt.size = 0.4,
order = TRUE,
label = TRUE)
path <- paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_FeaturePlot_",sampleID)
pdf(paste0(path, ".pdf"), width = 7, height = 7)
print(f1)
dev.off()
#only keep singlets
PMI_sample_singlets <- subset(PMI_sample, subset = CellTypes_DF == "Singlet")
# inspect
d3 <- DimPlot(PMI_sample_singlets, group.by="CellTypes_DF", reduction="umap",
order=c("Coll.Duct.TC","Doublet"),
cols=c("#66C2A5","black"))
path <- paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_UMAP_singlets_",sampleID)
pdf(paste0(path, ".pdf"), width = 5, height = 4)
print(d3)
dev.off()
# number of cells in each cluster per and post removing doublets
n_cells_singlets <- FetchData(PMI_sample_singlets, vars = c("ident")) %>%
dplyr::count(ident) %>% tidyr::spread(ident, n)
n_cells_singlets
ncells_per_cluster <- rbind(n_cells, n_cells_singlets)
row.names(ncells_per_cluster) <- c("Doublets and singlets", "Singlets only")
ncells_per_cluster
difference <- diff(as.matrix(ncells_per_cluster))
difference <- as.data.frame(difference)
row.names(difference) <- c("difference")
cbind(difference, ncells_per_cluster)
write.table(ncells_per_cluster, paste0(
"../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_table_ncells_per_cluster",sampleID, ".txt"), sep = "\t",
row.names = FALSE, quote = FALSE)
# plot the number of cells in each cluster per and post doubletFinder
ncell_matrix <- as.matrix(ncells_per_cluster)
ncells_melt <- melt(ncell_matrix)
colnames(ncells_melt) <- c("doublet type","cluster","number of cells")
ncell_max <- ncells_melt[which.max(ncells_melt$`number of cells`),]
ncell_max_value <- ncell_max$`number of cells`
cellmax <- ncell_max_value + 800 # so that the figure doesn't cut off the text
b1 <- ggplot(ncells_melt, aes(x = factor(cluster), y = `number of cells`,
fill = `doublet type`)) +
geom_bar(stat="identity", colour="black", width=1, position = position_dodge(width=0.8)) +
geom_text(aes(label = `number of cells`),
position=position_dodge(width=0.9), vjust=-0.25, angle = 45, hjust=-.01) +
theme_classic() + scale_fill_manual(values = c("gray", "#66C2A5")) +
ggtitle("Number of cells per cluster") + xlab("cluster") +
theme(axis.text.x = element_text(angle = 45, hjust=1)) +
scale_y_continuous(limits = c(0,cellmax))
path <- paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_barplot_ncells_per_cluster",sampleID)
pdf(paste0(path, ".pdf"), width = 7,height = 5)
print(b1)
dev.off()
f2 <- FeaturePlot(PMI_sample_singlets,
reduction = "umap",
features = c("nFeature_RNA", "nCount_RNA",
"cell.complexity", "percent.mt"),
pt.size = 0.4,
order = TRUE,
label = TRUE)
path <- paste0("../../results/", pathToTestType, "doubletFinder/",
treatment,"_",tolower(tissue),
"_doubletFinder_FeaturePlot_singlets",sampleID)
pdf(paste0(path, ".pdf"), width = 7,height = 7)
print(f2)
dev.off()
# put the PMI together again
PMI.split[[i]] <- PMI_sample_singlets
}
## [1] 1
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 7305
## Number of edges: 253708
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9626
## Number of communities: 14
## Elapsed time: 0 seconds
## [1] "Creating artificial doublets for pN = 5%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 10%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 15%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 20%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 25%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 30%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## NULL
## [1] "Creating 2435 artificial doublets..."
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Computing pANN..."
## [1] "Classifying doublets.."
## [1] 2
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 13663
## Number of edges: 436744
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9248
## Number of communities: 7
## Elapsed time: 1 seconds
## [1] "Creating artificial doublets for pN = 5%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 10%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 15%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 20%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 25%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 30%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## NULL
## [1] "Creating 4554 artificial doublets..."
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Computing pANN..."
## [1] "Classifying doublets.."
## [1] 3
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 10410
## Number of edges: 372406
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9653
## Number of communities: 16
## Elapsed time: 0 seconds
## [1] "Creating artificial doublets for pN = 5%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 10%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 15%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 20%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 25%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 30%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## NULL
## [1] "Creating 3470 artificial doublets..."
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Computing pANN..."
## [1] "Classifying doublets.."
## [1] 4
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 21946
## Number of edges: 687162
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9121
## Number of communities: 7
## Elapsed time: 2 seconds
## [1] "Creating artificial doublets for pN = 5%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 5e-04..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 10%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 5e-04..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 15%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 5e-04..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 20%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 5e-04..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 25%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 5e-04..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## [1] "Creating artificial doublets for pN = 30%"
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Defining neighborhoods..."
## [1] "Computing pANN across all pK..."
## [1] "pK = 5e-04..."
## [1] "pK = 0.001..."
## [1] "pK = 0.005..."
## [1] "pK = 0.01..."
## [1] "pK = 0.02..."
## [1] "pK = 0.03..."
## [1] "pK = 0.04..."
## [1] "pK = 0.05..."
## [1] "pK = 0.06..."
## [1] "pK = 0.07..."
## [1] "pK = 0.08..."
## [1] "pK = 0.09..."
## [1] "pK = 0.1..."
## [1] "pK = 0.11..."
## [1] "pK = 0.12..."
## [1] "pK = 0.13..."
## [1] "pK = 0.14..."
## [1] "pK = 0.15..."
## [1] "pK = 0.16..."
## [1] "pK = 0.17..."
## [1] "pK = 0.18..."
## [1] "pK = 0.19..."
## [1] "pK = 0.2..."
## [1] "pK = 0.21..."
## [1] "pK = 0.22..."
## [1] "pK = 0.23..."
## [1] "pK = 0.24..."
## [1] "pK = 0.25..."
## [1] "pK = 0.26..."
## [1] "pK = 0.27..."
## [1] "pK = 0.28..."
## [1] "pK = 0.29..."
## [1] "pK = 0.3..."
## NULL
## [1] "Creating 7315 artificial doublets..."
## [1] "Creating Seurat object..."
## [1] "Normalizing Seurat object..."
## [1] "Finding variable genes..."
## [1] "Scaling data..."
## [1] "Running PCA..."
## [1] "Calculating PC distance matrix..."
## [1] "Computing pANN..."
## [1] "Classifying doublets.."
# converge PMI.split
PMI.singlets <- merge(x = PMI.split[[1]],
y = c(PMI.split[[2]], PMI.split[[3]],PMI.split[[4]]),
project = paste0("LPS PMI ", tissue, " Single Cell"))
# print how many cells removed
print(paste0(dim(PMI.filtered)[2] - dim(PMI.singlets)[2]," cells removed"))
## [1] "7237 cells removed"
# how many removed if we had an upper nCount and nFeature
PMI.upper <- subset(PMI.singlets,
subset = (nCount_RNA < 10000) & (nFeature_RNA < 5000))
print(paste0(dim(PMI.filtered)[2] - dim(PMI.upper)[2],
" cells would have been removed if upper bound applied"))
## [1] "8254 cells would have been removed if upper bound applied"
# overwrite PMI.filtered
PMI.filtered <- PMI.singlets
# reset levels
PMI.filtered$treatment <- factor(PMI.filtered$treatment,
levels = c("3_hr"))
PMI.filtered$replicate <- factor(PMI.filtered$replicate,
levels = c("replicate1","replicate2"))
PMI.filtered$sample <- factor(PMI.filtered$sample,
levels = c("rep1_WT_3_hr", "rep2_WT_3_hr",
"rep1_PS19_3_hr", "rep2_PS19_3_hr"))
# cleanup
remove(PMI.singlets, PMI.upper, PMI.split, PMI_sample_singlets, PMI_sample)
remove(n_cells,n_cells_singlets,ncell_matrix,ncell_max,ncells_per_cluster,ncells_melt)
remove(sweep.res.list, sweep.stats,bcmvn,bcmvn_max,difference)
remove(d1,d2,d3,f1,f2)
remove(counts,counts.filtered, nonzero)
# remove mt.genes
counts <- GetAssayData(object = PMI.filtered, slot = "counts")
keep <- !rownames(counts) %in% mt.genes # false when mt.gene
counts.filtered <- counts[keep,]
# overwrite PMI.filtered
PMI.filtered <- CreateSeuratObject(counts.filtered,
meta.data = PMI.filtered@meta.data)
# print features removed
print(paste0(dim(counts)[1] - dim(counts.filtered)[1], " features removed"))
## [1] "13 features removed"
# User params
goi <- "Ttr"
sample_oi <- "ALL"
threshold <- 2
# Split seurat object by timepoint to perform SCT on all samples
#PMI.split <- SplitObject(PMI.filtered, split.by = "sample")
# Subset data
log2.threshold <- log2(threshold + 0.01)
counts.df <- FetchData(PMI.filtered, vars = goi)
colnames(counts.df) <- "counts"
log2.counts.df <- log2(counts.df + 0.01)
# Histogram
title <- paste0("PMI Brain Cell: ", goi, " - ", sample_oi, "\nnCount_RNA > ", threshold)
hist1 <- ggplot(counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0(goi, " nCount_RNA")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# Histogram log transformed
hist2 <- ggplot(log2.counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0("Log2(",goi, " nCount_RNA)")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = log2.threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = log2.threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = log2.threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# plot
plots <- list(hist1,hist2)
layout <- rbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# number removed
table(counts.df$counts > threshold)
##
## FALSE TRUE
## 36635 9452
Malat1 all samples
# User params
goi <- "Malat1"
threshold <- 0.1
# Subset data
log2.threshold <- log2(threshold + 0.01)
counts.df <- FetchData(PMI.filtered, vars = goi)
colnames(counts.df) <- "counts"
log2.counts.df <- log2(counts.df + 0.01)
# Histogram
title <- paste0("LPS Brain Cell: ", goi, "\nnCount_RNA > ", threshold)
hist1 <- ggplot(counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0(goi, " nCount_RNA")) + ylab("# of Samples") + theme_bw() +
geom_vline(xintercept = threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4") +
annotate("rect",
xmin = threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue")
# Histogram log transformed
hist2 <- ggplot(log2.counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0("Log2(",goi, " nCount_RNA)")) + ylab("# of Samples") + theme_bw() +
geom_vline(xintercept = log2.threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = log2.threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4") +
annotate("rect",
xmin = log2.threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue")
# plot
plots <- list(hist1,hist2)
layout <- rbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# number removed
table(counts.df$counts > threshold)
##
## FALSE TRUE
## 3282 42805
Xist all samples
# User params
goi <- "Xist"
sample_oi <- "ALL"
threshold <- 2
# Split seurat object by timepoint to perform SCT on all samples
#PMI.split <- SplitObject(PMI.filtered, split.by = "sample")
# Subset data
log2.threshold <- log2(threshold + 0.01)
counts.df <- FetchData(PMI.filtered, vars = goi)
colnames(counts.df) <- "counts"
log2.counts.df <- log2(counts.df + 0.01)
# Histogram
title <- paste0("PMI Brain Cell: ", goi, " - ", sample_oi, "\nnCount_RNA > ", threshold)
hist1 <- ggplot(counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0(goi, " nCount_RNA")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# Histogram log transformed
hist2 <- ggplot(log2.counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0("Log2(",goi, " nCount_RNA)")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = log2.threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = log2.threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = log2.threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# plot
plots <- list(hist1,hist2)
layout <- rbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# number removed
table(counts.df$counts > threshold)
##
## FALSE TRUE
## 29474 16613
PMI.filtered.split <- SplitObject(PMI.filtered, split.by = "sample")
Ttr by sample
# histogram of Ttr for each sample
for (i in 1:length(PMI.filtered.split)) {
print(i)
PMI_sample <- PMI.filtered.split[[i]]
goi <- "Ttr"
sample_oi <- print(i)
threshold <- 2
# Subset data
log2.threshold <- log2(threshold + 0.01)
counts.df <- FetchData(PMI_sample, vars = goi)
colnames(counts.df) <- "counts"
log2.counts.df <- log2(counts.df + 0.01)
# Histogram
title <- paste0("PMI Brain Cell: ", goi, " - ", sample_oi, "\nnCount_RNA > ", threshold)
hist1 <- ggplot(counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0(goi, " nCount_RNA")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# Histogram log transformed
hist2 <- ggplot(log2.counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0("Log2(",goi, " nCount_RNA)")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = log2.threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = log2.threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = log2.threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# plot
plots <- list(hist1,hist2)
layout <- rbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# number removed
table(counts.df$counts > threshold)
}
## [1] 1
## [1] 1
## [1] 2
## [1] 2
## [1] 3
## [1] 3
## [1] 4
## [1] 4
Xist by sample
# histogram of Ttr for each sample
for (i in 1:length(PMI.filtered.split)) {
print(i)
PMI_sample <- PMI.filtered.split[[i]]
goi <- "Xist"
sample_oi <- print(i)
threshold <- 2
# Subset data
log2.threshold <- log2(threshold + 0.01)
counts.df <- FetchData(PMI_sample, vars = goi)
colnames(counts.df) <- "counts"
log2.counts.df <- log2(counts.df + 0.01)
# Histogram
title <- paste0("PMI Brain Cell: ", goi, " - ", sample_oi, "\nnCount_RNA > ", threshold)
hist1 <- ggplot(counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0(goi, " nCount_RNA")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# Histogram log transformed
hist2 <- ggplot(log2.counts.df, aes(x = counts)) +
geom_histogram(bins = 100, fill = "gray", color = "black") +
labs(title = title, x=NULL, y=NULL) +
xlab(paste0("Log2(",goi, " nCount_RNA)")) + ylab("# of cells") + theme_bw() +
geom_vline(xintercept = log2.threshold, col = "blue") +
annotate("rect",
xmin = -Inf,
xmax = log2.threshold,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="deepskyblue") +
annotate("rect",
xmin = log2.threshold,
xmax = Inf,
ymin = 0,
ymax=Inf,
alpha=0.2,
fill="chocolate4")
# plot
plots <- list(hist1,hist2)
layout <- rbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# number removed
table(counts.df$counts > threshold)
}
## [1] 1
## [1] 1
## [1] 2
## [1] 2
## [1] 3
## [1] 3
## [1] 4
## [1] 4
# Visualize the number of cell counts per sample
data <- as.data.frame(table(PMI.filtered$sample))
colnames(data) <- c("sample","frequency")
ncellsFiltered <- ggplot(data, aes(x = sample, y = frequency, fill = sample)) +
geom_col() +
theme_classic() +
geom_text(aes(label = frequency),
position=position_dodge(width=0.9),
vjust=-0.25) +
scale_fill_manual(values = sample_colors) +
scale_y_continuous(breaks = seq(0,30000, by = 5000), limits = c(0,30000)) +
ggtitle("Filtered: cells per sample") +
theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 45, hjust=1))
# Arrange graphs in grid
plots <- list(ncellsRaw,ncellsFiltered)
layout <- cbind(c(1),c(2))
grid <- grid.arrange(grobs = plots, layout_matrix = layout)
# set graphical parameter
par(mfrow = c(3,1))
# Visualize the number of counts per cell
denCount_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = nCount_RNA,
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_log10() +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("nCount_RNA") +
ylab("Density") +
geom_vline(xintercept = nCount.min)
# Visualize percent.mt
denMt_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = percent.mt,
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_log10() +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("% Mitochondrial Genes") +
ylab("Density") +
geom_vline(xintercept = mt.cutoff)
# Visualize cell complexity
# Quality cells are usually above 0.80
denCellComplexity_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = cell.complexity,
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_log10() +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("Cell Complexity (log10(nFeature/nCount))") +
ylab("Density") +
geom_vline(xintercept = complexity.cutoff)
# Arrange graphs in grid
plots <- list(denCount,denMt,denCellComplexity,denCount_filter,
denMt_filter,denCellComplexity_filter)
layout <- rbind(c(1,4),c(2,5),c(3,6))
grid_den_raw_filter <- grid.arrange(grobs = plots, layout_matrix = layout)
## Warning: Transformation introduced infinite values in continuous x-axis
## Warning: Removed 26 rows containing non-finite values (stat_density).
## Removed 26 rows containing non-finite values (stat_density).
## Removed 26 rows containing non-finite values (stat_density).
## Warning: Transformation introduced infinite values in continuous x-axis
## Warning: Removed 839 rows containing non-finite values (stat_density).
Density plots for genes of interest
# set graphical parameter
par(mfrow = c(4,1))
# Visualize the number of counts per cell
denApoe_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = log2(percent.Apoe),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Apoe") +
ylab("Density")
# Visualize percent.mt
denXist_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = log2(percent.Xist),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Xist") +
ylab("Density")
# Visualize cell complexity
# Quality cells are usually above 0.80
denDdx3x_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = log2(percent.Ddx3x),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Ddx3x") +
ylab("Density")
denTtr_filter <- ggplot(PMI.filtered@meta.data,
aes(color = sample,
x = log2(percent.Ttr),
fill = sample)) +
geom_density(alpha = 0.2) +
theme_classic() +
scale_x_continuous(n.breaks = 4) +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("percent Ttr") +
ylab("Density")
# Arrange graphs in grid
plots <- list(denApoe, denXist, denDdx3x, denTtr, denApoe_filter,
denXist_filter, denDdx3x_filter, denTtr_filter)
layout <- rbind(c(1,5),c(2,6),c(3,7), c(4,8))
grid_den_raw_filter_genes <- grid.arrange(grobs = plots, layout_matrix = layout)
## Warning: Removed 18531 rows containing non-finite values (stat_density).
## Warning: Removed 29918 rows containing non-finite values (stat_density).
## Warning: Removed 54336 rows containing non-finite values (stat_density).
## Warning: Removed 48566 rows containing non-finite values (stat_density).
## Warning: Removed 10435 rows containing non-finite values (stat_density).
## Warning: Removed 15042 rows containing non-finite values (stat_density).
## Warning: Removed 32360 rows containing non-finite values (stat_density).
## Warning: Removed 32212 rows containing non-finite values (stat_density).
# nFeature, nCount, and cell.complexity violins
vCellInfo <- VlnPlot(PMI.filtered,
features = c("nFeature_RNA", "nCount_RNA","cell.complexity"),
ncol = 3,
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vCellInfo
# percent violins
vMtRiboHb <- VlnPlot(PMI.filtered,
features = c("percent.mt","percent.ribo","percent.hb"),
ncol = 3,
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vMtRiboHb
# percent violins
vXist <- VlnPlot(PMI.filtered,
features = c("percent.Xist"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vXist
# percent violins
vDdx3x <- VlnPlot(PMI.filtered,
features = c("percent.Ddx3x"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vDdx3x
# percent violins
vTtr <- VlnPlot(PMI.filtered,
features = c("percent.Ttr"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vTtr
# percent violins
vApoe<- VlnPlot(PMI.filtered,
features = c("percent.Apoe"),
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vApoe
vGenes <- VlnPlot(PMI.filtered,
features = c("percent.Xist", "percent.Ddx3x",
"percent.Ttr", "percent.Apoe"),
ncol = 4,
group.by = 'sample',
cols = sample_colors,
pt.size = 0)
vGenes
sMt <- ggplot(
PMI.filtered@meta.data,
aes(x = nCount_RNA, y = nFeature_RNA, color = percent.mt)) +
geom_point() +
stat_smooth(method=lm) +
scale_x_log10() +
scale_y_log10() +
theme_classic() +
geom_vline(xintercept = nCount.min) +
geom_hline(yintercept = nFeature.min) +
facet_wrap(~sample) +
scale_colour_gradient(low = "gray90", high = "black", limits =c(0,100))
sMt
## `geom_smooth()` using formula 'y ~ x'
sMt <- FeatureScatter(PMI.filtered,
feature1 = "nCount_RNA",
feature2 = "percent.mt",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sMt
sTtr <- FeatureScatter(PMI.filtered,
feature1 = "percent.Ttr",
feature2 = "nCount_RNA",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sTtr
sTtrMt <- FeatureScatter(PMI.filtered,
feature1 = "percent.Ttr",
feature2 = "percent.mt",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sTtrMt
sXist <- FeatureScatter(PMI.filtered,
feature1 = "percent.Xist",
feature2 = "nCount_RNA",
group.by = 'sample',
cols = sample_colors,
shuffle = TRUE)
sXist
# Visualize the distribution of genes detected per cell via boxplot
b1 <- ggplot(PMI.filtered@meta.data,
aes(x = sample,
y = log10(nFeature_RNA),
fill=sample)) +
geom_boxplot() +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1)) +
theme(plot.title = element_text(hjust = 0.5, face="bold")) +
ggtitle("Unique Genes / Cell / Sample") +
scale_color_manual(values = sample_colors) +
scale_fill_manual(values = sample_colors) +
xlab("Sample")
b1
df <- data.frame(row.names = rownames(PMI.filtered))
df$rsum <- rowSums(x = PMI.filtered, slot = "counts")
df$gene_name <- rownames(df)
df <- df[order(df$rsum,decreasing = TRUE),]
head(df, 10)
## rsum gene_name
## Malat1 4523169 Malat1
## Gm42418 2708018 Gm42418
## Cst3 2253222 Cst3
## Ttr 1042117 Ttr
## Apoe 983998 Apoe
## Tmsb4x 823495 Tmsb4x
## Fth1 533080 Fth1
## Actb 395289 Actb
## C1qa 372274 C1qa
## C1qb 345687 C1qb
For something to be informative, it needs to exhibit variation, but not all variation is informative. The goal of our clustering analysis is to keep the major sources of variation in our dataset that should define our cell types, while restricting the variation due to uninteresting sources of variation (sequencing depth, cell cycle differences, mitochondrial expression, batch effects, etc.). Then, to determine the cell types present, we will perform a clustering analysis using the most variable genes to define the major sources of variation in the dataset.
The most common biological data correction is to remove the effects of the cell cycle on the transcriptome. This data correction can be performed by a simple linear regression against a cell cycle score.
Check cell cycle phase BEFORE doing sctransform. Counts to need to be comparable between cells and each cell has a different number for nCount_RNA.
# summary of counts per cell
summary(PMI.filtered@meta.data$nCount_RNA)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 283 515 727 2077 2156 34654
Use the NormalizeData() function with the argument LogNormalze to account for sequencing depth. nCount_RNA for each gene is divided by the total nCount_RNA for that cell. This is done for all cells. This number is then multiplied by the scale.factor so we don’t have to deal with a tiny number. This number is then natural-log transformed using log1p. log1p is the natural logarithm (base e) of 1 + count. The 1 will prevent taking the log of 0.
PMI.phase <- NormalizeData(PMI.filtered,
scale.factor = 10000, # default
normalization.method = "LogNormalize" # default
)
Give each cell a score based on expression of G1, G2/M, and S phase markers. A list of markers is provided for humans. We will need to convert human gene names to mouse gene names. We will use the CellCycleScoring() function in seurat.
Below is a resource for acquiring cell markers for other organisms https://hbctraining.github.io/scRNA-seq_online/lessons/cell_cycle_scoring.html
G1 ~10 hrs S ~5-6 hrs G2 ~3-4 hrs M ~2 hrs
G1 (10 hrs) > G2/M (5-6 hrs) = S (5-6 hrs)
If the score is negative for both S.Score and G2M.Score the phase is G1. Otherwise the the greatest positive value between S.Score and G2M.Score determines the phase.
Using a human list for mouse https://github.com/satijalab/seurat/issues/2493
# Basic function to convert human to mouse gene names
convertHumanGeneList <- function(x){
require("biomaRt")
human = useMart("ensembl", dataset = "hsapiens_gene_ensembl")
mouse = useMart("ensembl", dataset = "mmusculus_gene_ensembl")
genesV2 = getLDS(attributes = c("hgnc_symbol"), filters = "hgnc_symbol",
values = x , mart = human, attributesL = c("mgi_symbol"),
martL = mouse, uniqueRows=T)
humanx <- unique(genesV2[, 2])
# Print the first 6 genes found to the screen
print(head(humanx))
return(humanx)
}
m.s.genes <- convertHumanGeneList(cc.genes.updated.2019$s.genes)
m.g2m.genes <- convertHumanGeneList(cc.genes.updated.2019$g2m.genes)
saveRDS(m.s.genes, "../../results/geneLists/m.s.genes.rds")
saveRDS(m.g2m.genes, "../../results/geneLists/m.g2m.genes.rds")
m.s.genes <- readRDS("../../results/geneLists/m.s.genes.rds")
m.g2m.genes <- readRDS("../../results/geneLists/m.g2m.genes.rds")
# score cells for cell cycle
PMI.phase <- CellCycleScoring(PMI.phase,
g2m.features = m.g2m.genes,
s.features = m.s.genes,
set.ident = TRUE)
cellcyclecount_barplot <-
as_tibble(PMI.phase[[]]) %>%
ggplot(aes(Phase, fill = Phase)) + geom_bar()
cellcyclecount_barplot
# pie point
cellcyclecount_piepoint <-
as_tibble(PMI.phase[[]]) %>%
ggplot(aes(x=S.Score, y=G2M.Score, color=Phase)) +
geom_point()
cellcyclecount_piepoint
# Identify the most variable genes
PMI.phase <- FindVariableFeatures(PMI.phase,
selection.method = "vst", # default vst
nfeatures = 2000, # default 2000
verbose = FALSE)
# view top variable genes
top40 <- head(VariableFeatures(PMI.phase), 40)
top40
## [1] "S100a9" "S100a8" "Hist1h2ap" "Retnlg" "Hist1h1b" "Mgp"
## [7] "Hist1h2ae" "Ccl5" "Chil3" "Cxcl10" "Camp" "Ngp"
## [13] "Vpreb3" "Lcn2" "Igkc" "H2-Eb1" "Ptgds" "Ly6c2"
## [19] "Crym" "Pf4" "Vtn" "Cxcl2" "Tmem212" "Spp1"
## [25] "Acta2" "Ltf" "Iglc1" "Ccdc153" "H2-Aa" "Plac8"
## [31] "H2-Ab1" "S100a4" "Gzma" "Ccn3" "Ly6d" "Apod"
## [37] "Iglc2" "Myoc" "Gfap" "Mrc1"
# plot variable features with labels
VarFeatPlot <- VariableFeaturePlot(PMI.phase, cols = c("gray47","red"))
VarFeatPlotLabel <- LabelPoints(plot = VarFeatPlot,
points = top40, repel = TRUE, fontface="italic",
xnudge = 0, ynudge = 0, max.overlaps = 12)
VarFeatPlotLabel
# The variability information can be accessed using the HVFInfo method.
# The names of the variable features can be accessed with VariableFeatures().
variance.data <- as_tibble(HVFInfo(PMI.phase),rownames = "Gene")
variance.data <- variance.data %>% mutate(hypervariable=Gene %in% VariableFeatures(PMI.phase))
# We can plot out a graph of the variance vs mean and highlight the selected genes
# this way, we can see whether we think we’re likely to capture what we need.
subset_data <- subset(variance.data, hypervariable == TRUE)
varGeneslog <- variance.data %>%
ggplot(aes(log(mean),log(variance),color=hypervariable)) +
geom_point() +
scale_color_manual(values=c("black","red")) + geom_text_repel(
data = subset_data, max.overlaps = 20,
aes(
x = log(mean),
y = log(variance),
label = Gene,
fontface="italic",),segment.alpha = 1,size = 4) +
theme(legend.position="bottom")
varGeneslog
See if the cell cycle is a major source of variation using PCA. Choose the most variable gene features (we have already done), then e the data. We scale the data because highly expressed genes exhibit the highest amount of variation and we don’t want our ‘highly variable genes’ only to reflect high expression.
vst: First, fits a line to the relationship of log(variance) and log(mean) using local polynomial regression (loess). Then, feature values are standardized using the observed mean and expected variance (given by the fitted line). Feature variance is then calculated on the standardized values after clipping to a maximum (see clip.max parameter).
The ScaleData() function in Seurat will adjust gene expressions so that the mean expression in each cell is 0. It will also scale each gene to give a variance of 1 for each cell.
# Scale the counts
PMI.phase <- ScaleData(PMI.phase)
## Centering and scaling data matrix
PMI.phase@assays
## $RNA
## Assay data with 19682 features for 46087 cells
## Top 10 variable features:
## S100a9, S100a8, Hist1h2ap, Retnlg, Hist1h1b, Mgp, Hist1h2ae, Ccl5,
## Chil3, Cxcl10
PMI.phase.pca <- RunPCA(PMI.phase, features = c(m.s.genes, m.g2m.genes))
## Warning in PrepDR(object = object, features = features, verbose = verbose): The
## following 43 features requested have not been scaled (running reduction without
## them): Mcm4, Mrpl36, Chaf1b, Exo1, Msh2, Cdc45, Fen1, Slbp, Cdc6, Cenpu, Dscc1,
## Ubr7, Rad51ap1, Gins2, Tipin, Prim1, Wdr76, Pola1, Usp1, Ung, Blm, Casp8ap2,
## Rfc2, Polr1b, Rrm1, Mcm7, Ccne2, Cbx5, Gtse1, Ctcf, Ttk, Anp32e, G2e3, Ncapd2,
## Gas2l3, Rangap1, Lbr, Kif2c, Hjurp, Nek2, Ect2, Jpt1, Ckap5
## Warning in irlba(A = t(x = object), nv = npcs, ...): You're computing too large
## a percentage of total singular values, use a standard svd instead.
## Warning: Requested number is larger than the number of available items (51).
## Setting to 51.
## Warning: Requested number is larger than the number of available items (51).
## Setting to 51.
## Warning: Requested number is larger than the number of available items (51).
## Setting to 51.
## Warning: Requested number is larger than the number of available items (51).
## Setting to 51.
## Warning: Requested number is larger than the number of available items (51).
## Setting to 51.
## PC_ 1
## Positive: Anln, Tubb4b, Cdca7, Nasp, Psrc1, Cdc25c, Nuf2, Pcna, Hells, Tyms
## Cdca2, Mcm6, Dtl, Mcm5, Aurka, Pimreg, Rad51, Ckap2, E2f8, Aurkb
## Kif20b, Bub1, Clspn, Uhrf1, Tacc3, Dlgap5
## Negative: Top2a, Mki67, Birc5, Cks1b, Cdca8, Smc4, Cdca3, Nusap1, Ube2c, Ccnb2
## Tpx2, Cenpf, Cenpa, Cdk1, Cks2, Cenpe, Rrm2, Kif11, Kif23, Hmmr
## Cdc20, Ckap2l, Hmgb2, Gmnn, Ndc80
## PC_ 2
## Positive: Pcna, Nasp, Mcm6, Cdca7, Hmgb2, Hells, Gmnn, Tyms, Mcm5, Clspn
## Dtl, Anln, Uhrf1, Rrm2, Smc4, Rad51, E2f8, Tacc3, Aurkb, Cdc25c
## Ckap2l, Cks2, Bub1, Cks1b, Kif20b, Ndc80
## Negative: Cenpf, Cenpa, Ube2c, Cenpe, Tubb4b, Hmmr, Ccnb2, Cdc20, Tpx2, Kif23
## Cdca3, Nusap1, Cdca8, Ckap2, Kif11, Nuf2, Birc5, Dlgap5, Mki67, Aurka
## Pimreg, Cdca2, Cdk1, Top2a, Psrc1
## PC_ 3
## Positive: Anln, Tubb4b, Pcna, Smc4, Nasp, Gmnn, Nuf2, Tacc3, Kif11, Kif20b
## E2f8, Psrc1, Rad51, Ckap2, Cdca2, Hmmr, Clspn, Birc5, Cdc25c, Tpx2
## Bub1, Top2a, Nusap1, Dlgap5, Pimreg, Aurka
## Negative: Hmgb2, Cks2, Cdca7, Tyms, Cenpa, Mcm5, Ckap2l, Cks1b, Cdk1, Cdca3
## Mcm6, Ccnb2, Kif23, Dtl, Hells, Mki67, Cenpf, Cdca8, Rrm2, Cenpe
## Uhrf1, Cdc20, Ndc80, Ube2c, Aurkb
## PC_ 4
## Positive: Anln, Hmgb2, Cks2, Pcna, Cenpa, Smc4, Cenpf, Cenpe, Ube2c, Hmmr
## Ccnb2, Kif23, Tacc3, Cdc20, Mki67, Ckap2, Cdca8, Kif20b, Psrc1, Cdca2
## Aurka, Dlgap5, Top2a, Birc5, Cdc25c, Ndc80
## Negative: Tubb4b, Tyms, Nasp, Mcm6, Cdca7, Gmnn, Hells, Mcm5, Uhrf1, Rrm2
## Clspn, Dtl, Nuf2, Rad51, E2f8, Cks1b, Aurkb, Cdk1, Bub1, Nusap1
## Kif11, Ckap2l, Pimreg, Cdca3, Tpx2
## PC_ 5
## Positive: Nasp, Cenpf, Anln, Cenpe, Hmmr, Mki67, Ube2c, Cdca7, Top2a, Cenpa
## Kif11, Ckap2, Tpx2, Dlgap5, Cdca2, Pimreg, Cdca8, Nusap1, Psrc1, Cdc25c
## Kif23, Bub1, Aurka, Ccnb2, Kif20b, Ndc80
## Negative: Pcna, Hmgb2, Tubb4b, Cks2, Mcm6, Tyms, Gmnn, Smc4, Hells, Mcm5
## Uhrf1, Cdk1, Nuf2, Rad51, Rrm2, Clspn, E2f8, Dtl, Tacc3, Cks1b
## Cdca3, Birc5, Aurkb, Ckap2l, Cdc20
DimPlot(PMI.phase.pca)
If the plots for each phase look very similar to each other, do not regress out variation due to cell cycle. You can plot PC1 vs PC2 before and after regression to see how effective it was. G1 (10 hrs) > G2/M (5-6 hrs) = S (5-6 hrs)
# Perform PCA
PMI.phase <- RunPCA(PMI.phase)
# Plot the PCA colored by cell cycle phase
cycle.pca <- DimPlot(PMI.phase,
reduction = "pca",
group.by= "Phase",
split.by = "Phase")
cycle.pca
Now, we can use the SCTransform method as a more accurate method of normalizing, estimating the variance of the raw filtered data, and identifying the most variable genes. Variation in sequencing depth (total nCount_RNA per cell) is normalized using a regularized negative binomial model. ??Variance is also adjusted based on pooling information across genes with similar abundances??
Sctransform automatically accounts for cellular sequencing depth by regressing out sequencing depth (nUMIs). However, if there are other sources of uninteresting variation identified in the data during the exploration steps we can also include these. We observed little to no effect due to cell cycle phase and so we chose not to regress this out of our data. We observed some effect of mitochondrial expression and so we choose to regress this out from the data.
Since we have four samples in our dataset (from two conditions), we want to keep them as separate objects and transform them as that is what is required for integration. We will first split the cells in seurat.phase object by sample.
# Split seurat object by timepoint to perform SCT on all samples
PMI.split <- SplitObject(PMI.phase, split.by = "sample")
Now we will use a ‘for loop’ to run the SCTransform() on each sample, and regress out mitochondrial expression by specifying in the vars.to.regress argument of the SCTransform() function.
Before we run this for loop, we know that the output can generate large R objects/variables in terms of memory. If we have a large dataset, then we might need to adjust the limit for allowable object sizes within R (Default is 500 * 1024 ^ 2 = 500 Mb) using the following code:
options(future.globals.maxSize = 4000 * 1024^5)
for (i in 1:length(PMI.split)) {
print(paste0("Sample ", i))
PMI.split[[i]] <- SCTransform(PMI.split[[i]]
# vars.to.regress = c("percent.mt")
)
}
## [1] "Sample 1"
##
|
| | 0%
|
|================== | 25%
|
|=================================== | 50%
|
|==================================================== | 75%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 5%
|
|====== | 8%
|
|======= | 11%
|
|========= | 13%
|
|=========== | 16%
|
|============= | 18%
|
|=============== | 21%
|
|================= | 24%
|
|================== | 26%
|
|==================== | 29%
|
|====================== | 32%
|
|======================== | 34%
|
|========================== | 37%
|
|============================ | 39%
|
|============================= | 42%
|
|=============================== | 45%
|
|================================= | 47%
|
|=================================== | 50%
|
|===================================== | 53%
|
|======================================= | 55%
|
|========================================= | 58%
|
|========================================== | 61%
|
|============================================ | 63%
|
|============================================== | 66%
|
|================================================ | 68%
|
|================================================== | 71%
|
|==================================================== | 74%
|
|===================================================== | 76%
|
|======================================================= | 79%
|
|========================================================= | 82%
|
|=========================================================== | 84%
|
|============================================================= | 87%
|
|=============================================================== | 89%
|
|================================================================ | 92%
|
|================================================================== | 95%
|
|==================================================================== | 97%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 5%
|
|====== | 8%
|
|======= | 11%
|
|========= | 13%
|
|=========== | 16%
|
|============= | 18%
|
|=============== | 21%
|
|================= | 24%
|
|================== | 26%
|
|==================== | 29%
|
|====================== | 32%
|
|======================== | 34%
|
|========================== | 37%
|
|============================ | 39%
|
|============================= | 42%
|
|=============================== | 45%
|
|================================= | 47%
|
|=================================== | 50%
|
|===================================== | 53%
|
|======================================= | 55%
|
|========================================= | 58%
|
|========================================== | 61%
|
|============================================ | 63%
|
|============================================== | 66%
|
|================================================ | 68%
|
|================================================== | 71%
|
|==================================================== | 74%
|
|===================================================== | 76%
|
|======================================================= | 79%
|
|========================================================= | 82%
|
|=========================================================== | 84%
|
|============================================================= | 87%
|
|=============================================================== | 89%
|
|================================================================ | 92%
|
|================================================================== | 95%
|
|==================================================================== | 97%
|
|======================================================================| 100%
## [1] "Sample 2"
##
|
| | 0%
|
|================== | 25%
|
|=================================== | 50%
|
|==================================================== | 75%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 6%
|
|====== | 9%
|
|======== | 12%
|
|=========== | 15%
|
|============= | 18%
|
|=============== | 21%
|
|================= | 24%
|
|=================== | 27%
|
|===================== | 30%
|
|======================= | 33%
|
|========================= | 36%
|
|============================ | 39%
|
|============================== | 42%
|
|================================ | 45%
|
|================================== | 48%
|
|==================================== | 52%
|
|====================================== | 55%
|
|======================================== | 58%
|
|========================================== | 61%
|
|============================================= | 64%
|
|=============================================== | 67%
|
|================================================= | 70%
|
|=================================================== | 73%
|
|===================================================== | 76%
|
|======================================================= | 79%
|
|========================================================= | 82%
|
|=========================================================== | 85%
|
|============================================================== | 88%
|
|================================================================ | 91%
|
|================================================================== | 94%
|
|==================================================================== | 97%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 6%
|
|====== | 9%
|
|======== | 12%
|
|=========== | 15%
|
|============= | 18%
|
|=============== | 21%
|
|================= | 24%
|
|=================== | 27%
|
|===================== | 30%
|
|======================= | 33%
|
|========================= | 36%
|
|============================ | 39%
|
|============================== | 42%
|
|================================ | 45%
|
|================================== | 48%
|
|==================================== | 52%
|
|====================================== | 55%
|
|======================================== | 58%
|
|========================================== | 61%
|
|============================================= | 64%
|
|=============================================== | 67%
|
|================================================= | 70%
|
|=================================================== | 73%
|
|===================================================== | 76%
|
|======================================================= | 79%
|
|========================================================= | 82%
|
|=========================================================== | 85%
|
|============================================================== | 88%
|
|================================================================ | 91%
|
|================================================================== | 94%
|
|==================================================================== | 97%
|
|======================================================================| 100%
## [1] "Sample 3"
##
|
| | 0%
|
|================== | 25%
|
|=================================== | 50%
|
|==================================================== | 75%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 5%
|
|====== | 8%
|
|======== | 11%
|
|========= | 14%
|
|=========== | 16%
|
|============= | 19%
|
|=============== | 22%
|
|================= | 24%
|
|=================== | 27%
|
|===================== | 30%
|
|======================= | 32%
|
|========================= | 35%
|
|========================== | 38%
|
|============================ | 41%
|
|============================== | 43%
|
|================================ | 46%
|
|================================== | 49%
|
|==================================== | 51%
|
|====================================== | 54%
|
|======================================== | 57%
|
|========================================== | 59%
|
|============================================ | 62%
|
|============================================= | 65%
|
|=============================================== | 68%
|
|================================================= | 70%
|
|=================================================== | 73%
|
|===================================================== | 76%
|
|======================================================= | 78%
|
|========================================================= | 81%
|
|=========================================================== | 84%
|
|============================================================= | 86%
|
|============================================================== | 89%
|
|================================================================ | 92%
|
|================================================================== | 95%
|
|==================================================================== | 97%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 5%
|
|====== | 8%
|
|======== | 11%
|
|========= | 14%
|
|=========== | 16%
|
|============= | 19%
|
|=============== | 22%
|
|================= | 24%
|
|=================== | 27%
|
|===================== | 30%
|
|======================= | 32%
|
|========================= | 35%
|
|========================== | 38%
|
|============================ | 41%
|
|============================== | 43%
|
|================================ | 46%
|
|================================== | 49%
|
|==================================== | 51%
|
|====================================== | 54%
|
|======================================== | 57%
|
|========================================== | 59%
|
|============================================ | 62%
|
|============================================= | 65%
|
|=============================================== | 68%
|
|================================================= | 70%
|
|=================================================== | 73%
|
|===================================================== | 76%
|
|======================================================= | 78%
|
|========================================================= | 81%
|
|=========================================================== | 84%
|
|============================================================= | 86%
|
|============================================================== | 89%
|
|================================================================ | 92%
|
|================================================================== | 95%
|
|==================================================================== | 97%
|
|======================================================================| 100%
## [1] "Sample 4"
##
|
| | 0%
|
|================== | 25%
|
|=================================== | 50%
|
|==================================================== | 75%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 6%
|
|====== | 9%
|
|======== | 12%
|
|=========== | 15%
|
|============= | 18%
|
|=============== | 21%
|
|================= | 24%
|
|=================== | 27%
|
|===================== | 30%
|
|======================= | 33%
|
|========================= | 36%
|
|============================ | 39%
|
|============================== | 42%
|
|================================ | 45%
|
|================================== | 48%
|
|==================================== | 52%
|
|====================================== | 55%
|
|======================================== | 58%
|
|========================================== | 61%
|
|============================================= | 64%
|
|=============================================== | 67%
|
|================================================= | 70%
|
|=================================================== | 73%
|
|===================================================== | 76%
|
|======================================================= | 79%
|
|========================================================= | 82%
|
|=========================================================== | 85%
|
|============================================================== | 88%
|
|================================================================ | 91%
|
|================================================================== | 94%
|
|==================================================================== | 97%
|
|======================================================================| 100%
##
|
| | 0%
|
|== | 3%
|
|==== | 6%
|
|====== | 9%
|
|======== | 12%
|
|=========== | 15%
|
|============= | 18%
|
|=============== | 21%
|
|================= | 24%
|
|=================== | 27%
|
|===================== | 30%
|
|======================= | 33%
|
|========================= | 36%
|
|============================ | 39%
|
|============================== | 42%
|
|================================ | 45%
|
|================================== | 48%
|
|==================================== | 52%
|
|====================================== | 55%
|
|======================================== | 58%
|
|========================================== | 61%
|
|============================================= | 64%
|
|=============================================== | 67%
|
|================================================= | 70%
|
|=================================================== | 73%
|
|===================================================== | 76%
|
|======================================================= | 79%
|
|========================================================= | 82%
|
|=========================================================== | 85%
|
|============================================================== | 88%
|
|================================================================ | 91%
|
|================================================================== | 94%
|
|==================================================================== | 97%
|
|======================================================================| 100%
NOTE: By default, after normalizing, adjusting the variance, and regressing out uninteresting sources of variation, SCTransform will rank the genes by residual variance and output the 3000 most variant genes. If the dataset has larger cell numbers, then it may be beneficial to adjust this parameter higher using the variable.features.n argument.
Note, the last line of output specifies “Set default assay to SCT”. We can view the different assays that we have stored in our seurat object.
A thread about whether or not regress out batch: https://github.com/satijalab/seurat/issues/3270 It is suggested to not regress out batch, and instead use a data integration method
# Check
PMI.split
## $rep1_WT_3_hr
## An object of class Seurat
## 38435 features across 6856 samples within 2 assays
## Active assay: SCT (18753 features, 3000 variable features)
## 1 other assay present: RNA
## 1 dimensional reduction calculated: pca
##
## $rep2_WT_3_hr
## An object of class Seurat
## 35869 features across 13123 samples within 2 assays
## Active assay: SCT (16187 features, 3000 variable features)
## 1 other assay present: RNA
## 1 dimensional reduction calculated: pca
##
## $rep1_PS19_3_hr
## An object of class Seurat
## 37952 features across 8618 samples within 2 assays
## Active assay: SCT (18270 features, 3000 variable features)
## 1 other assay present: RNA
## 1 dimensional reduction calculated: pca
##
## $rep2_PS19_3_hr
## An object of class Seurat
## 35764 features across 17490 samples within 2 assays
## Active assay: SCT (16082 features, 3000 variable features)
## 1 other assay present: RNA
## 1 dimensional reduction calculated: pca
Condition-specific clustering of cells indicates that we need to integrate the cells across conditions to ensure that cells of the same cell type cluster together.
To integrate, use the shared highly variable genes from each condition identified using SCTransform. Then, integrate conditions to overlay cells that are similar or have a “common set of biological features” between groups.
Now, using our SCTransform object as input, let’s perform the integration across conditions.
First, we need to specify that we want to use all of the 3000 most variable genes identified by SCTransform for the integration. By default, this function selects the top 2000 genes.
# Choose the features to use when integrating multiple datasets.
# will use nfeatures as 3000 as defined by running SCTransform above
var.features <- SelectIntegrationFeatures(object.list = PMI.split,
nfeatures = 3000)
# merge the PMI
PMI.sct.merged <- merge(x = PMI.split[[1]],
y = c(PMI.split[[2]],PMI.split[[3]],PMI.split[[4]]),
project = paste0("LPS PMI ", tissue, " Single Cell"))
# define the variable features
VariableFeatures(PMI.sct.merged) <- var.features
save rObject
insepct data
PMI.sct.merged <- RunPCA(PMI.sct.merged)
## PC_ 1
## Positive: C1qa, C1qb, Tmsb4x, Hexb, C1qc, Ctss, Jun, Atf3, Klf2, Fos
## Cx3cr1, Ctsd, Junb, Ccl4, Tyrobp, Egr1, Csf1r, Zfp36, Btg2, Trem2
## Rps29, P2ry12, Nfkbia, Dusp1, Hspa1a, Fcer1g, Mafb, Gpr34, B2m, Fau
## Negative: Slc1a2, Atp1a2, Sparcl1, Plpp3, Clu, Aldoc, Mt3, Slc4a4, Gja1, Apoe
## Gpr37l1, Slc1a3, Htra1, Cpe, Atp1b2, Pla2g7, Ptn, Ptprz1, Mfge8, Slc6a11
## Cspg5, Acsl3, Prnp, Mt1, Ndrg2, S1pr1, Ntsr2, Ttyh1, Cldn10, F3
## PC_ 2
## Positive: Slc1a2, Atp1a2, Plpp3, Sparcl1, Aldoc, Clu, Apoe, Slc4a4, Gja1, Slc1a3
## Mt3, Gpr37l1, Pla2g7, C1qa, Atp1b2, Slc6a11, S1pr1, C1qb, Mt1, Mfge8
## Cpe, Htra1, Hexb, Ntsr2, Cldn10, F3, Aqp4, C1qc, Ttyh1, Bcan
## Negative: Plp1, Mbp, Mal, Cryab, Mag, Cnp, Mobp, Apod, Ermn, Cldn11
## Ptgds, Sept4, Qdpr, Tubb4a, Car2, Aplp1, Stmn1, Stmn4, Ppp1r14a, Enpp2
## Mog, Tspan2, Dbndd2, Opalin, Fth1, Ttll7, App, Gatm, Trf, Ugt8a
## PC_ 3
## Positive: C1qa, Hexb, C1qb, Slc1a2, Cx3cr1, C1qc, Ctss, Atf3, Ctsd, Plp1
## Csf1r, P2ry12, Plpp3, Egr1, Ccl4, Mafb, Aldoc, Mbp, Jun, Gpr34
## Trem2, Atp1a2, Lgmn, Trf, Mag, Kctd12, Selplg, Aplp1, Zfp36, Scd2
## Negative: Bsg, Cldn5, Flt1, Pltp, Spock2, Ly6c1, Itm2a, Cxcl12, Ly6a, Slco1a4
## Igfbp7, Egfl7, Abcb1a, Adgrf5, Sptbn1, Hspb1, Ptprb, Adgrl4, Fn1, Vim
## Tmsb10, Pcp4l1, Tm4sf1, Ahnak, Pglyrp1, Jcad, Ttr, Esam, Tsc22d1, Slc9a3r2
## PC_ 4
## Positive: Ttr, Enpp2, Ecrg4, 2900040C04Rik, Chchd10, Calml4, Dbi, Rbp1, Atp5g1, Pcp4
## Kl, Folr1, Sostdc1, Kcnj13, Cox8a, Tmsb10, Clic6, Atp5md, Ndufa4, Mt3
## Prlr, Cox6c, Rpl41, Spint2, Cab39l, Lgals1, Rsph1, Krt18, Prdx5, Cox7b
## Negative: Cldn5, Flt1, Ly6c1, Cxcl12, Spock2, Itm2a, Slco1a4, Ly6a, Bsg, Egfl7
## Igfbp7, Abcb1a, Ptprb, Pltp, Sptbn1, Adgrf5, Jun, Adgrl4, Hspb1, Atp1a2
## Atf3, Slc6a6, Jcad, Egr1, Fos, Klf2, Esam, Hspa1a, Podxl, Car4
## PC_ 5
## Positive: Pdgfra, Tnr, Olig1, Lhfpl3, Vcan, C1ql1, Cspg4, Gpr17, Meg3, Matn4
## Cacng4, Opcml, Cntn1, Ptprz1, Pcdh15, Neu4, Rtn1, Vxn, Epn2, Nxph1
## Olig2, Sulf2, Cspg5, Ncald, Dpp6, Gria3, Gng3, Stmn3, Olfm2, Xylt1
## Negative: Ttr, Mt3, Chchd10, Clu, Enpp2, Fth1, Apoe, Mt1, Aldoc, Slc1a2
## Ecrg4, Ptgds, Car2, 2900040C04Rik, Rbp1, Igfbp2, Tmsb4x, Calml4, Fxyd1, Plpp3
## Bsg, Rps24, Atp1a2, Rps29, Tpt1, Fau, Lgals1, Gja1, Cryab, Rpl41
PMI.sct.merged <- RunUMAP(PMI.sct.merged, dims = 1:15, reduction = "pca")
## 14:43:47 UMAP embedding parameters a = 0.9922 b = 1.112
## 14:43:47 Read 46087 rows and found 15 numeric columns
## 14:43:47 Using Annoy for neighbor search, n_neighbors = 30
## 14:43:47 Building Annoy index with metric = cosine, n_trees = 50
## 0% 10 20 30 40 50 60 70 80 90 100%
## [----|----|----|----|----|----|----|----|----|----|
## **************************************************|
## 14:43:54 Writing NN index file to temp file /tmp/Rtmp5bLYos/filea1766e61ba37
## 14:43:54 Searching Annoy index using 1 thread, search_k = 3000
## 14:44:13 Annoy recall = 100%
## 14:44:14 Commencing smooth kNN distance calibration using 1 thread
## 14:44:17 Initializing from normalized Laplacian + noise
## 14:44:19 Commencing optimization for 200 epochs, with 2014746 positive edges
## 14:45:13 Optimization finished
pcatreatment <- DimPlot(PMI.sct.merged,
reduction = "umap",
split.by = "treatment",
group.by = "treatment",
cols = treatment_colors)
pcatreatment
pcareplicate <- DimPlot(PMI.sct.merged,
reduction = "umap",
group.by = "replicate",
cols = replicate_colors)
pcareplicate
pcareplicate_split <- DimPlot(PMI.sct.merged,
reduction = "umap",
split.by = "replicate",
group.by = "replicate",
cols = replicate_colors)
pcareplicate_split
pcagenotype <- DimPlot(PMI.sct.merged,
reduction = "umap",
group.by = "genotype",
cols = genotype_colors)
pcagenotype
pcagenotype_split <- DimPlot(PMI.sct.merged,
reduction = "umap",
split.by = "genotype",
group.by = "genotype",
cols = genotype_colors)
pcagenotype_split
pcasample <- DimPlot(PMI.sct.merged,
reduction = "umap",
split.by = "sample",
group.by = "sample",
cols = sample_colors)
pcasample
pcasample_split <- DimPlot(PMI.sct.merged,
reduction = "umap",
group.by = "sample",
shuffle = TRUE)
pcasample_split
# clean up
remove(pca1, pca2, pca3, pca4, pca5, pca6)
## Warning in remove(pca1, pca2, pca3, pca4, pca5, pca6): object 'pca1' not found
## Warning in remove(pca1, pca2, pca3, pca4, pca5, pca6): object 'pca2' not found
## Warning in remove(pca1, pca2, pca3, pca4, pca5, pca6): object 'pca3' not found
## Warning in remove(pca1, pca2, pca3, pca4, pca5, pca6): object 'pca4' not found
## Warning in remove(pca1, pca2, pca3, pca4, pca5, pca6): object 'pca5' not found
## Warning in remove(pca1, pca2, pca3, pca4, pca5, pca6): object 'pca6' not found
run harmony
# run PCA on the merged object
# run harmony to harmonize over samples
PMI.integrated <- RunHarmony(object = PMI.sct.merged,
group.by.vars = "sample",
assay.use = "SCT",
reduction = "pca",
plot_convergence = TRUE)
## Harmony 1/10
## Harmony 2/10
## Harmony 3/10
## Harmony 4/10
## Harmony 5/10
## Harmony 6/10
## Harmony converged after 6 iterations
## Warning: Invalid name supplied, making object name syntactically valid. New
## object name is Seurat..ProjectDim.SCT.harmony; see ?make.names for more details
## on syntax validity
# first put the PMI back in place
Idents(PMI.integrated) <- PMI.integrated$sample
PMI.integrated$treatment <- factor(PMI.integrated$treatment,
levels = c("3_hr"))
PMI.integrated$replicate <- factor(PMI.integrated$replicate,
levels = c("replicate1", "replicate2"))
PMI.integrated$sample <- factor(PMI.integrated$sample,
levels = c("rep1_WT_3_hr", "rep2_WT_3_hr",
"rep1_PS19_3_hr", "rep2_PS19_3_hr"))
# check the embedding
harmony_embeddings <- Embeddings(PMI.integrated, 'harmony')
harmony_embeddings[1:5, 1:5]
## harmony_1 harmony_2 harmony_3 harmony_4
## rep1_WT_3_hr_ATGCATGCAATGCTCA-1 -15.965521 2.3385952 -30.931085 31.2723413
## rep1_WT_3_hr_CCAATTTTCCTAGAGT-1 -11.799546 -77.4583413 15.261404 -7.8950917
## rep1_WT_3_hr_CAACCAATCTGTCAGA-1 18.322419 0.2154381 -19.241808 10.1074343
## rep1_WT_3_hr_TACTTCACAGCTGTAT-1 -10.021870 -74.3522908 13.414466 -9.2692464
## rep1_WT_3_hr_TCATGTTAGCTAAACA-1 -2.003486 -49.1918764 4.166751 -0.7116928
## harmony_5
## rep1_WT_3_hr_ATGCATGCAATGCTCA-1 -15.582510
## rep1_WT_3_hr_CCAATTTTCCTAGAGT-1 -3.952214
## rep1_WT_3_hr_CAACCAATCTGTCAGA-1 -9.636293
## rep1_WT_3_hr_TACTTCACAGCTGTAT-1 -2.815923
## rep1_WT_3_hr_TCATGTTAGCTAAACA-1 -4.575501
# check the PCA plot
p1 <- DimPlot(object = PMI.integrated,
reduction = "harmony",
group.by = "replicate",
cols = treatment_colors) + NoLegend()
p2 <- VlnPlot(object = PMI.integrated,
features = "harmony_1",
group.by = "replicate",
pt.size = 0,
cols = treatment_colors) + NoLegend()
plot_grid(p1,p2)
# clean up
remove(p1, p2)
Top 20 variable features
top20 <- PMI.integrated@assays$SCT@var.features[1:20]
top20
## [1] "Ttr" "Plp1" "Ptgds" "Cd74" "Igkc" "S100a8" "S100a9" "Vtn"
## [9] "Enpp2" "Mbp" "Lyz2" "H2-Aa" "Retnlg" "H2-Eb1" "Mal" "Slc1a2"
## [17] "Cldn5" "Cxcl10" "Rgs5" "Apod"
After integration, to visualize the integrated data we can use dimensionality reduction techniques, such as PCA and Uniform Manifold Approximation and Projection (UMAP). While PCA will determine all PCs, we can only plot two at a time. In contrast, UMAP will take the information from any number of top PCs to arrange the cells in this multidimensional space. It will take those distances in multidimensional space, and try to plot them in two dimensions. In this way, the distances between cells represent similarity in expression.
To generate these visualizations with the harmony output, use reduction = “harmony”
# Plot PCA
pcaTreatment <- DimPlot(PMI.integrated,
reduction = "harmony",
split.by = "treatment",
group.by = "treatment",
cols = treatment_colors)
pcaTreatment
pcaTreatmentSplit <- DimPlot(PMI.integrated,
reduction = "harmony",
group.by = "treatment",
cols = treatment_colors)
pcaTreatmentSplit
pcaReplicate <- DimPlot(PMI.integrated,
reduction = "harmony",
group.by = "replicate",
cols = replicate_colors)
pcaReplicate
pcaReplicateSplit <- DimPlot(PMI.integrated,
reduction = "harmony",
split.by = "replicate",
group.by = "replicate",
cols = replicate_colors)
pcaReplicateSplit
pcaGenotype <- DimPlot(PMI.integrated,
reduction = "harmony",
group.by = "genotype",
cols = genotype_colors,
shuffle = TRUE)
pcaGenotype
pcaGenotypeSplit <- DimPlot(PMI.integrated,
reduction = "harmony",
split.by = "genotype",
group.by = "genotype",
cols = genotype_colors,
shuffle = TRUE)
pcaGenotypeSplit
pcaSample <- DimPlot(PMI.integrated,
reduction = "harmony",
group.by = "sample",
cols = sample_colors,
shuffle = TRUE)
pcaSample
pcaSampleSplit <- DimPlot(PMI.integrated,
reduction = "harmony",
split.by = "sample",
group.by = "sample",
cols = sample_colors)
pcaSampleSplit
To overcome the extensive technical noise in the expression of any single gene for scRNA-seq data, Seurat assigns cells to clusters based on their PCA scores derived from the expression of the integrated most variable genes, with each PC essentially representing a “metagene” that combines information across a correlated gene set. Determining how many PCs to include in the clustering step is therefore important to ensure that we are capturing the majority of the variation, or cell types, present in our dataset.
# Printing out the most variable genes driving PCs
print(x = PMI.integrated[["pca"]],
dims = 1:10,
nfeatures = 10)
## PC_ 1
## Positive: C1qa, C1qb, Tmsb4x, Hexb, C1qc, Ctss, Jun, Atf3, Klf2, Fos
## Negative: Slc1a2, Atp1a2, Sparcl1, Plpp3, Clu, Aldoc, Mt3, Slc4a4, Gja1, Apoe
## PC_ 2
## Positive: Slc1a2, Atp1a2, Plpp3, Sparcl1, Aldoc, Clu, Apoe, Slc4a4, Gja1, Slc1a3
## Negative: Plp1, Mbp, Mal, Cryab, Mag, Cnp, Mobp, Apod, Ermn, Cldn11
## PC_ 3
## Positive: C1qa, Hexb, C1qb, Slc1a2, Cx3cr1, C1qc, Ctss, Atf3, Ctsd, Plp1
## Negative: Bsg, Cldn5, Flt1, Pltp, Spock2, Ly6c1, Itm2a, Cxcl12, Ly6a, Slco1a4
## PC_ 4
## Positive: Ttr, Enpp2, Ecrg4, 2900040C04Rik, Chchd10, Calml4, Dbi, Rbp1, Atp5g1, Pcp4
## Negative: Cldn5, Flt1, Ly6c1, Cxcl12, Spock2, Itm2a, Slco1a4, Ly6a, Bsg, Egfl7
## PC_ 5
## Positive: Pdgfra, Tnr, Olig1, Lhfpl3, Vcan, C1ql1, Cspg4, Gpr17, Meg3, Matn4
## Negative: Ttr, Mt3, Chchd10, Clu, Enpp2, Fth1, Apoe, Mt1, Aldoc, Slc1a2
## PC_ 6
## Positive: Ttr, Enpp2, Ecrg4, 2900040C04Rik, Calml4, Rbp1, Chchd10, Kl, Folr1, Sostdc1
## Negative: Tmsb10, Cd74, H2-Aa, Igkc, H2-Ab1, Cd79a, H2-Eb1, Cd52, Ifi27l2a, Rps24
## PC_ 7
## Positive: Cd24a, Hbb-bs, Nnat, Atf3, Rarres2, Hsp90aa1, Fos, Tmem212, Stmn1, Map1b
## Negative: C1qb, C1qa, Tmsb4x, Rps29, Bsg, Rplp1, Tyrobp, Pdgfra, Olig1, Cldn5
## PC_ 8
## Positive: Cd74, H2-Aa, H2-Ab1, S100a6, S100a11, H2-Eb1, Cybb, Crip1, Ifitm2, Klf2
## Negative: Hbb-bs, Hba-a1, Hba-a2, Hbb-bt, Stmn1, Stmn2, Sox11, Meis2, Dlx6os1, Igfbpl1
## PC_ 9
## Positive: Stmn1, Stmn2, Sox11, Dlx6os1, Meis2, Igfbpl1, Map1b, Tubb2b, Dcx, Tubb3
## Negative: Hbb-bs, Hba-a1, Hba-a2, Hbb-bt, Pdgfra, Tnr, Olig1, Cd74, S100a6, Cspg4
## PC_ 10
## Positive: Vtn, Rgs5, Cald1, Pdgfrb, Atp13a5, Ndufa4l2, Myl9, Rgs4, Atp1a2, Slc6a20a
## Negative: Cldn5, Atf3, Mt3, Klf2, Bsg, Pltp, Fos, Flt1, Mt1, Ly6c1
Quantitative approach to an elbow plot - The point where the principal components only contribute 5% of standard deviation and the principal components cumulatively contribute 90% of the standard deviation. - The point where the percent change in variation between the consecutive PCs is less than 0.1%.
First metric
# Determine percent of variation associated with each PC
stdv <- PMI.integrated[["pca"]]@stdev
sum.stdv <- sum(PMI.integrated[["pca"]]@stdev)
percent.stdv <- (stdv / sum.stdv) * 100
# Calculate cumulative percents for each PC
cumulative <- cumsum(percent.stdv)
# Determine which PC exhibits cumulative percent greater than 90% and
# and % variation associated with the PC as less than 5
co1 <- which(cumulative > 90 & percent.stdv < 5)[1]
co1
## [1] 40
Second metric
# Determine the difference between variation of PC and subsequent PC
co2 <- sort(which(
(percent.stdv[1:length(percent.stdv) - 1] -
percent.stdv[2:length(percent.stdv)]) > 0.1),
decreasing = T)[1] + 1
# last point where change of % of variation is more than 0.1%.
co2
## [1] 19
Choose the minimum of these two metrics as the PCs covering the majority of the variation in the data.
# Minimum of the two calculation
min.pc <- min(co1, co2)
min.pc
## [1] 19
Use min.pc we just calculated to generate the clusters. We can plot the elbow plot again and overlay the information determined using our metrics:
# Create a dataframe with values
plot_df <- data.frame(pct = percent.stdv,
cumu = cumulative,
rank = 1:length(percent.stdv))
# Elbow plot to visualize
ggplot(plot_df, aes(cumulative, percent.stdv, label = rank, color = rank > min.pc)) +
geom_text() +
geom_vline(xintercept = 90, color = "grey") +
geom_hline(yintercept = min(percent.stdv[percent.stdv > 5]), color = "grey") +
theme_bw()
# Run UMAP
PMI.integrated <- RunUMAP(PMI.integrated,
dims = 1:min.pc,
reduction = "harmony",
n.components = 3) # set to 3 to use with VR
DimPlot(PMI.integrated,
group.by = "treatment",
shuffle = TRUE,
cols = treatment_colors)
DimPlot(PMI.integrated,
group.by = "treatment",
split.by = "treatment",
shuffle = TRUE,
cols = treatment_colors)
DimPlot(PMI.integrated,
group.by = "replicate",
shuffle = TRUE,
cols = replicate_colors)
DimPlot(PMI.integrated,
group.by = "replicate",
split.by = "replicate",
shuffle = TRUE,
cols = replicate_colors)
DimPlot(PMI.integrated,
group.by = "genotype",
cols = genotype_colors,
shuffle = TRUE)
DimPlot(PMI.integrated,
group.by = "genotype",
split.by = "genotype",
cols = genotype_colors,
shuffle = TRUE)
DimPlot(PMI.integrated,
group.by = "sample",
shuffle = TRUE,
cols = sample_colors)
DimPlot(PMI.integrated,
group.by = "sample",
split.by = "sample",
shuffle = TRUE,
cols = sample_colors)
FeaturePlot(PMI.integrated,
features = "Apoe")
FeaturePlot(PMI.integrated,
features = "Ttr")
FeaturePlot(PMI.integrated,
features = "Xist")
Seurat uses a graph-based clustering approach, which embeds cells in a graph structure, using a K-nearest neighbor (KNN) graph (by default), with edges drawn between cells with similar gene expression patterns. Then, it attempts to partition this graph into highly interconnected ‘quasi-cliques’ or ‘communities’ [Seurat - Guided Clustering Tutorial].
We will use the FindClusters() function to perform the graph-based clustering. The resolution is an important argument that sets the “granularity” of the downstream clustering and will need to be optimized for every individual experiment. For datasets of 3,000 - 5,000 cells, the resolution set between 0.4-1.4 generally yields good clustering. Increased resolution values lead to a greater number of clusters, which is often required for larger datasets.
The FindClusters() function allows us to enter a series of resolutions and will calculate the “granularity” of the clustering. This is very helpful for testing which resolution works for moving forward without having to run the function for each resolution.
# Determine the K-nearest neighbor graph
PMI.unannotated <- FindNeighbors(object = PMI.integrated,
assay = "SCT", # set as default after SCTransform
reduction = "harmony",
dims = 1:min.pc)
# Determine the clusters for various resolutions
PMI.unannotated <- FindClusters(object = PMI.unannotated,
algorithm = 1, # 1= Louvain
resolution = seq(0.1,0.8,by=0.1))
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9785
## Number of communities: 14
## Elapsed time: 10 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9649
## Number of communities: 19
## Elapsed time: 10 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9548
## Number of communities: 21
## Elapsed time: 8 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9459
## Number of communities: 23
## Elapsed time: 9 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9378
## Number of communities: 24
## Elapsed time: 9 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9300
## Number of communities: 26
## Elapsed time: 7 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9233
## Number of communities: 27
## Elapsed time: 8 seconds
## Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
##
## Number of nodes: 46087
## Number of edges: 1655448
##
## Running Louvain algorithm...
## Maximum modularity in 10 random starts: 0.9175
## Number of communities: 32
## Elapsed time: 9 seconds
# 0.1
umap0.1 <- DimPlot(PMI.unannotated,
group.by = "SCT_snn_res.0.1",
label = TRUE)
umap0.1
# 0.2
umap0.2 <- DimPlot(PMI.unannotated,
group.by = "SCT_snn_res.0.2",
label = TRUE)
umap0.2
# 0.3
umap0.3 <- DimPlot(PMI.unannotated,
group.by = "SCT_snn_res.0.3",
label = TRUE)
umap0.3
# 0.4
umap0.4 <- DimPlot(PMI.unannotated,
group.by = "SCT_snn_res.0.4",
label = TRUE)
umap0.4
# replicate
utreatment <- DimPlot(PMI.unannotated,
label = FALSE,
group.by = "SCT_snn_res.0.1",
split.by = "treatment") +
NoLegend()
utreatment
# replicate
ureplicate <- DimPlot(PMI.unannotated,
label = FALSE,
group.by = "SCT_snn_res.0.1",
split.by = "replicate") +
NoLegend()
ureplicate
# sample
usample <- DimPlot(PMI.unannotated,
label = FALSE,
group.by = "SCT_snn_res.0.1",
split.by = "sample") +
NoLegend()
usample
# phase
uphase <- DimPlot(PMI.unannotated,
label = FALSE,
group.by = "SCT_snn_res.0.1",
split.by = "Phase") +
NoLegend()
uphase
PMI.unannotated[["percent.neuron"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("Snap25", "Syt1", "Gad1", "Gad2"))
PMI.unannotated[["percent.astrocytes"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("Clu", "Gfap", "Aqp4"))
PMI.unannotated[["percent.microglia"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("Hexb", "C1qa"))
PMI.unannotated[["percent.DAM"]] <-
PercentageFeatureSet(PMI.unannotated,
features = c("Apoe", "Cst7", "Lyz2", "Lpl", "Cd9", "Trem2"))
PMI.unannotated[["percent.homeostatic"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("P2ry12", "Cx3cr1"))
PMI.unannotated[["percent.oligodendrocytes"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("Plp1", "Mbp", "Mag"))
PMI.unannotated[["percent.OPCs"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("Olig1", "Pdgfra", "Vcan"))
PMI.unannotated[["percent.macrophages"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("F13a1", "Mrc1"))
PMI.unannotated[["percent.endothelial"]] <-
PercentageFeatureSet(PMI.unannotated, features = c("Igfbp7", "Fn1", "Sox17"))
FeaturePlot(PMI.unannotated, features = "percent.neuron")
FeaturePlot(PMI.unannotated, features = "percent.astrocytes")
FeaturePlot(PMI.unannotated, features = "percent.microglia")
FeaturePlot(PMI.unannotated, features = "percent.DAM")
FeaturePlot(PMI.unannotated, features = "percent.homeostatic")
FeaturePlot(PMI.unannotated, features = "percent.oligodendrocytes")
FeaturePlot(PMI.unannotated, features = "percent.OPCs")
FeaturePlot(PMI.unannotated, features = "percent.macrophages")
FeaturePlot(PMI.unannotated, features = "percent.endothelial")
## Revist QC metrics
# nCount
fnCount <- FeaturePlot(PMI.unannotated, features = "nCount_RNA", order = TRUE)
fnCount
# nFeature
fnFeature <- FeaturePlot(PMI.unannotated, features = "nFeature_RNA", order = TRUE)
fnFeature
# percent.mt
fpercent.mt <- FeaturePlot(PMI.unannotated, features = "percent.mt", order = TRUE)
fpercent.mt
# cell.complexity
fcell.complexity <- FeaturePlot(PMI.unannotated, features = "cell.complexity", order = TRUE)
fcell.complexity
fS.Score <- FeaturePlot(PMI.unannotated, features = "S.Score", order = TRUE)
fS.Score
fG2M.Score <- FeaturePlot(PMI.unannotated, features = "G2M.Score", order = TRUE)
fG2M.Score
if order is set to TRUE this means the cells are in order of expression. Can be useful if cells expressing given feature are getting buried.
# cell information
fnCount <-
FeaturePlot(
PMI.unannotated,
features = "nCount_RNA",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fnCount
fnFeature <-
FeaturePlot(
PMI.unannotated,
features = "nFeature_RNA",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fnFeature
fcell.complextity <-
FeaturePlot(
PMI.unannotated,
features = "cell.complexity",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fcell.complextity
fpercent.mt <-
FeaturePlot(
PMI.unannotated,
features = "percent.mt",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.mt
fG2M.Score <-
FeaturePlot(
PMI.unannotated,
features = "G2M.Score",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fG2M.Score
fS.Score <-
FeaturePlot(
PMI.unannotated,
features = "S.Score",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fS.Score
# Gene sets
fpercent.neuron <-
FeaturePlot(
PMI.unannotated,
features = "percent.neuron",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.neuron
fpercent.astrocytes <-
FeaturePlot(
PMI.unannotated,
features = "percent.astrocytes",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.astrocytes
fpercent.microglia <-
FeaturePlot(
PMI.unannotated,
features = "percent.microglia",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.microglia
fpercent.DAM <-
FeaturePlot(
PMI.unannotated,
features = "percent.DAM",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.DAM
fpercent.homeostatic <-
FeaturePlot(
PMI.unannotated,
features = "percent.homeostatic",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.homeostatic
fpercent.oligodendrocytes <-
FeaturePlot(
PMI.unannotated,
features = "percent.oligodendrocytes",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.oligodendrocytes
fpercent.OPCs <-
FeaturePlot(
PMI.unannotated,
features = "percent.OPCs",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.OPCs
fpercent.macrophages <-
FeaturePlot(
PMI.unannotated,
features = "percent.macrophages",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.macrophages
fpercent.endothelial <-
FeaturePlot(
PMI.unannotated,
features = "percent.endothelial",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fpercent.endothelial
# Genes
fMalat1 <-
FeaturePlot(
PMI.unannotated,
features = "Malat1",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fMalat1
fXist <-
FeaturePlot(
PMI.unannotated,
features = "Xist",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fXist
fTtr <-
FeaturePlot(
PMI.unannotated,
features = "Ttr",
split.by = "sample",
keep.scale = "all",
order = TRUE
)
fTtr
save plotes
PMI.unannotated@meta.data$seurat_clusters <-
PMI.unannotated@meta.data$SCT_snn_res.0.1
# treatment
btreatment <- PMI.unannotated@meta.data %>%
group_by(seurat_clusters, treatment) %>%
dplyr::count() %>%
group_by(seurat_clusters) %>%
dplyr::mutate(percent = 100*n/sum(n)) %>%
ungroup() %>%
ggplot(aes(x=seurat_clusters,y=percent, fill=treatment)) +
geom_col() +
scale_fill_manual(values = treatment_colors) +
ggtitle("Percentage of treatment per cluster")
btreatment
# replicate
breplicate <- PMI.unannotated@meta.data %>%
group_by(seurat_clusters, replicate) %>%
dplyr::count() %>%
group_by(seurat_clusters) %>%
dplyr::mutate(percent = 100*n/sum(n)) %>%
ungroup() %>%
ggplot(aes(x=seurat_clusters,y=percent, fill=replicate)) +
geom_col() +
scale_fill_manual(values = replicate_colors) +
ggtitle("Percentage of replicate per cluster")
breplicate
# genotype
bgenotype <- PMI.unannotated@meta.data %>%
group_by(seurat_clusters, genotype) %>%
dplyr::count() %>%
group_by(seurat_clusters) %>%
dplyr::mutate(percent = 100*n/sum(n)) %>%
ungroup() %>%
ggplot(aes(x=seurat_clusters,y=percent, fill=genotype)) +
geom_col() +
scale_fill_manual(values = genotype_colors) +
ggtitle("Percentage of genotype per cluster")
bgenotype
# sample
bsample <- PMI.unannotated@meta.data %>%
group_by(seurat_clusters, sample) %>%
dplyr::count() %>%
group_by(seurat_clusters) %>%
dplyr::mutate(percent = 100*n/sum(n)) %>%
ungroup() %>%
ggplot(aes(x=seurat_clusters,y=percent, fill=sample)) +
geom_col() +
scale_fill_manual(values = sample_colors) +
ggtitle("Percentage of sample per cluster")
bsample
treatment_ncells <- FetchData(PMI.unannotated,
vars = c("ident", "treatment")) %>%
dplyr::count(ident, treatment) %>%
tidyr::spread(ident, n)
write.table(treatment_ncells,
paste0("../../results/", pathToTestType, "nCells/",
treatment, "_",tolower(tissue),
"_cells_per_cluster_treatment.txt"),
quote = FALSE, sep = "\t")
sample_ncells <- FetchData(PMI.unannotated,
vars = c("ident", "sample")) %>%
dplyr::count(ident,sample) %>%
tidyr::spread(ident, n)
write.table(sample_ncells,
paste0("../../results/", pathToTestType, "nCells/",
treatment, "_",tolower(tissue),
"_cells_per_cluster_sample.txt"),
quote = FALSE, sep = "\t")