Loading required package: SeuratObject
Loading required package: sp
Attaching package: 'SeuratObject'
The following objects are masked from 'package:base':
intersect, t
── Installed datasets ──────────────────────────────── SeuratData v0.2.2.9001 ──
✔ pbmcref 1.0.0 ✔ pbmcsca 3.0.0
────────────────────────────────────── Key ─────────────────────────────────────
✔ Dataset loaded successfully
❯ Dataset built with a newer version of Seurat than installed
❓ Unknown version of Seurat installed
Attaching package: 'dplyr'
The following objects are masked from 'package:stats':
filter, lag
The following objects are masked from 'package:base':
intersect, setdiff, setequal, union
── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
✔ forcats 1.0.0 ✔ readr 2.1.5
✔ ggplot2 3.5.1 ✔ stringr 1.5.1
✔ lubridate 1.9.3 ✔ tibble 3.2.1
✔ purrr 1.0.2 ✔ tidyr 1.3.1
── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag() masks stats::lag()
ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
Attaching package: 'magrittr'
The following object is masked from 'package:purrr':
set_names
The following object is masked from 'package:tidyr':
extract
Attaching package: 'dbplyr'
The following objects are masked from 'package:dplyr':
ident, sql
Registered S3 method overwritten by 'SeuratDisk':
method from
as.sparse.H5Group Seurat
Attaching shinyBS
Loading required package: ggraph
Attaching package: 'ggraph'
The following object is masked from 'package:sp':
geometry
#Load Seurat Object merged from cell lines and a control(PBMC) after filtration
load("0-imp_Robj/All_Samples_Merged_with_10x_Azitmuth_Annotated_SCT_HPC_without_harmony_integration_removed_nonCD4cells_from_control_and_Bcells_from_L4.robj")
All_samples_Merged
An object of class Seurat
62931 features across 49388 samples within 6 assays
Active assay: SCT (26179 features, 3000 variable features)
3 layers present: counts, data, scale.data
5 other assays present: RNA, ADT, prediction.score.celltype.l1, prediction.score.celltype.l2, prediction.score.celltype.l3
4 dimensional reductions calculated: integrated_dr, ref.umap, pca, umap
# Load necessary libraries
library(Seurat)
# Display basic metadata summary
head(All_samples_Merged@meta.data)
# Check if columns such as `orig.ident`, `nCount_RNA`, `nFeature_RNA`, `nUMI`, `ngene`, and any other necessary columns exist
required_columns <- c("orig.ident", "nCount_RNA", "nFeature_RNA", "nUMI", "ngene")
missing_columns <- setdiff(required_columns, colnames(All_samples_Merged@meta.data))
if (length(missing_columns) > 0) {
cat("Missing columns:", paste(missing_columns, collapse = ", "), "\n")
} else {
cat("All required columns are present.\n")
}
All required columns are present.
# Check cell counts and features
cat("Number of cells:", ncol(All_samples_Merged), "\n")
Number of cells: 49388
cat("Number of features:", nrow(All_samples_Merged), "\n")
Number of features: 26179
# Verify that each `orig.ident` label has the correct number of cells
cat("Cell counts per group:\n")
Cell counts per group:
print(table(All_samples_Merged$orig.ident))
L1 L2 L3 L4 L5 L6 L7 PBMC PBMC10x
5825 5935 6428 6023 6022 5148 5331 5171 3505
# Check that the cell IDs are unique (which ensures no issues from merging)
if (any(duplicated(colnames(All_samples_Merged)))) {
cat("Warning: There are duplicated cell IDs.\n")
} else {
cat("Cell IDs are unique.\n")
}
Cell IDs are unique.
# Check the assay consistency for RNA
DefaultAssay(All_samples_Merged) <- "RNA"
# Check dimensions of the RNA counts layer using the new method
cat("Dimensions of the RNA counts layer:", dim(GetAssayData(All_samples_Merged, layer = "counts")), "\n")
Dimensions of the RNA counts layer: 36601 49388
cat("Dimensions of the RNA data layer:", dim(GetAssayData(All_samples_Merged, layer = "data")), "\n")
Dimensions of the RNA data layer: 36601 49388
# Check the ADT assay (optional)
if ("ADT" %in% names(All_samples_Merged@assays)) {
cat("ADT assay is present.\n")
cat("Dimensions of the ADT counts layer:", dim(GetAssayData(All_samples_Merged, assay = "ADT", layer = "counts")), "\n")
} else {
cat("ADT assay is not present.\n")
}
ADT assay is present.
Dimensions of the ADT counts layer: 56 49388
# InstallData("pbmcref")
#
# # The RunAzimuth function can take a Seurat object as input
# All_samples_Merged <- RunAzimuth(All_samples_Merged, reference = "pbmcref")
# Remove the percent.mito column
All_samples_Merged$percent.mito <- NULL
Warning: Cannot find cell-level meta data named percent.mito
# Set identity classes to an existing column in meta data
Idents(object = All_samples_Merged) <- "cell_line"
All_samples_Merged[["percent.rb"]] <- PercentageFeatureSet(All_samples_Merged,
pattern = "^RP[SL]")
# Convert 'percent.mt' to numeric, replacing "NaN" with 0
All_samples_Merged$percent.rb <- replace(as.numeric(All_samples_Merged$percent.rb), is.na(All_samples_Merged$percent.rb), 0)
# The [[ operator can add columns to object metadata. This is a great place to stash QC stats
All_samples_Merged[["percent.mt"]] <- PercentageFeatureSet(All_samples_Merged, pattern = "^MT-")
# Convert 'percent.mt' to numeric, replacing "NaN" with 0
All_samples_Merged$percent.mt <- replace(as.numeric(All_samples_Merged$percent.mt), is.na(All_samples_Merged$percent.mt), 0)
VlnPlot(All_samples_Merged, features = c("nFeature_RNA",
"nCount_RNA",
"percent.mt",
"percent.rb"),
ncol = 4, pt.size = 0.1) &
theme(plot.title = element_text(size=10))
FeatureScatter(All_samples_Merged, feature1 = "percent.mt",
feature2 = "percent.rb")
VlnPlot(All_samples_Merged, features = c("nFeature_RNA",
"nCount_RNA",
"percent.mt"),
ncol = 3)
FeatureScatter(All_samples_Merged,
feature1 = "percent.mt",
feature2 = "percent.rb") +
geom_smooth(method = 'lm')
`geom_smooth()` using formula = 'y ~ x'
FeatureScatter(All_samples_Merged,
feature1 = "nCount_RNA",
feature2 = "nFeature_RNA") +
geom_smooth(method = 'lm')
`geom_smooth()` using formula = 'y ~ x'
##FeatureScatter is typically used to visualize feature-feature relationships ##for anything calculated by the object, ##i.e. columns in object metadata, PC scores etc.
FeatureScatter(All_samples_Merged,
feature1 = "nCount_RNA",
feature2 = "percent.mt")+
geom_smooth(method = 'lm')
`geom_smooth()` using formula = 'y ~ x'
FeatureScatter(All_samples_Merged,
feature1 = "nCount_RNA",
feature2 = "nFeature_RNA")+
geom_smooth(method = 'lm')
`geom_smooth()` using formula = 'y ~ x'
Running SCTransform on assay: RNA
Running SCTransform on layer: counts
vst.flavor='v2' set. Using model with fixed slope and excluding poisson genes.
Variance stabilizing transformation of count matrix of size 26179 by 49388
Model formula is y ~ log_umi
Get Negative Binomial regression parameters per gene
Using 2000 genes, 5000 cells
Found 487 outliers - those will be ignored in fitting/regularization step
Second step: Get residuals using fitted parameters for 26179 genes
Computing corrected count matrix for 26179 genes
Calculating gene attributes
Wall clock passed: Time difference of 8.072341 mins
Determine variable features
Getting residuals for block 1(of 10) for counts dataset
Getting residuals for block 2(of 10) for counts dataset
Getting residuals for block 3(of 10) for counts dataset
Getting residuals for block 4(of 10) for counts dataset
Getting residuals for block 5(of 10) for counts dataset
Getting residuals for block 6(of 10) for counts dataset
Getting residuals for block 7(of 10) for counts dataset
Getting residuals for block 8(of 10) for counts dataset
Getting residuals for block 9(of 10) for counts dataset
Getting residuals for block 10(of 10) for counts dataset
Finished calculating residuals for counts
Set default assay to SCT
Warning: The following features are not present in the object: MLF1IP, not
searching for symbol synonyms
Warning: The following features are not present in the object: FAM64A, HN1, not
searching for symbol synonyms
# Apply SCTransform
All_samples_Merged <- SCTransform(All_samples_Merged,
vars.to.regress = c("percent.rb","percent.mt", "CC.Difference", "nCount_RNA"),
do.scale=TRUE,
do.center=TRUE,
verbose = TRUE)
Running SCTransform on assay: RNA
Running SCTransform on layer: counts
vst.flavor='v2' set. Using model with fixed slope and excluding poisson genes.
Variance stabilizing transformation of count matrix of size 26179 by 49388
Model formula is y ~ log_umi
Get Negative Binomial regression parameters per gene
Using 2000 genes, 5000 cells
Found 487 outliers - those will be ignored in fitting/regularization step
Second step: Get residuals using fitted parameters for 26179 genes
Computing corrected count matrix for 26179 genes
Calculating gene attributes
Wall clock passed: Time difference of 6.563877 mins
Determine variable features
Regressing out percent.rb, percent.mt, CC.Difference, nCount_RNA
Centering and scaling data matrix
Getting residuals for block 1(of 10) for counts dataset
Getting residuals for block 2(of 10) for counts dataset
Getting residuals for block 3(of 10) for counts dataset
Getting residuals for block 4(of 10) for counts dataset
Getting residuals for block 5(of 10) for counts dataset
Getting residuals for block 6(of 10) for counts dataset
Getting residuals for block 7(of 10) for counts dataset
Getting residuals for block 8(of 10) for counts dataset
Getting residuals for block 9(of 10) for counts dataset
Getting residuals for block 10(of 10) for counts dataset
Regressing out percent.rb, percent.mt, CC.Difference, nCount_RNA
Centering and scaling data matrix
Finished calculating residuals for counts
Set default assay to SCT
Variables_genes <- All_samples_Merged@assays$SCT@var.features
# Exclude genes starting with "HLA-" AND "Xist" AND "TRBV, TRAV"
Variables_genes_after_exclusion <- Variables_genes[!grepl("^HLA-|^XIST|^TRBV|^TRAV", Variables_genes)]
# Set the seed for clustering steps
set.seed(123)
# These are now standard steps in the Seurat workflow for visualization and clustering
All_samples_Merged <- RunPCA(All_samples_Merged,
features = Variables_genes_after_exclusion,
do.print = TRUE,
pcs.print = 1:5,
genes.print = 15,
npcs = 50)
PC_ 1
Positive: NPM1, SEC11C, YBX3, MTHFD2, VDAC1, MTDH, HDGFL3, IL2RA, CCT8, C12orf75
RBM17, HINT2, PRELID1, KRT7, BATF3, RAN, CCND2, RAD21, SPATS2L, MIR155HG
PRDX1, HSP90AB1, MINDY3, HTATIP2, CANX, VIM, EGFL6, CTSH, SRM, SLC35F3
Negative: CD7, KIR3DL1, PRKCH, SEPTIN9, KIR2DL3, PTPRC, CST7, CLEC2B, EPCAM, XCL1
ESYT2, MATK, CD52, KIR3DL2, GZMM, CD3G, TRGV2, MYO1E, KIR2DL4, ARHGAP15
KLRC1, CXCR3, KLRK1, CD6, MALAT1, SH3BGRL3, TC2N, LEF1, RPS27, PTPN6
PC_ 2
Positive: C12orf75, CYBA, HACD1, LY6E, SCCPDH, EGFL6, ATP5MC1, TNFRSF4, ENO1, APRT
ARPC2, BACE2, TIGIT, GGH, PTP4A3, SYT4, SPINT2, CHCHD2, CCL17, CORO1B
RPL27A, NME2, COX6A1, PON2, CTSC, GYPC, NME1, NET1, PLPP1, RHOC
Negative: PAGE5, RPL35A, RBPMS, CD74, NDUFV2, TENM3, LMNA, RPL22L1, CDKN2A, RPS3A
KIF2A, RPL11, PSMB9, ANXA5, PLD1, PPP2R2B, FAM241A, B2M, SPOCK1, VAMP5
STAT1, FAM50B, ERAP2, ZC2HC1A, SH3KBP1, GPX4, IFI27L2, RPS14, MSC-AS1, CTAG2
PC_ 3
Positive: RPL30, RPL39, RPS27, RPS4Y1, ETS1, MT-ND3, MALAT1, BTG1, RPS29, TCF7
TPT1, FYB1, RPL34, ZBTB20, ANK3, SELL, SARAF, IL7R, LINC00861, TXNIP
RIPOR2, CSGALNACT1, PNRC1, PIK3IP1, THEMIS, FAM107B, CAMK4, EEF1A2, RPS27A, LINC01934
Negative: PFN1, NME2, RPS15, KIR3DL2, MIF, EIF4A1, ACTB, NDUFA4, C1QBP, RPL19
CHCHD2, ATP5MC3, KIR2DL3, HMGN2, CLIC1, EIF5A, KIR3DL1, MT-CO2, RPL27A, GAPDH
TUBA1B, CST7, TUBB4B, COX6A1, DAD1, RPS2, TRGV2, PSMB6, EPCAM, STMN1
PC_ 4
Positive: HSPE1, EIF5A, RPL34, ATP5MC3, RPS4Y1, MT-ND3, ODC1, CHCHD10, CYCS, CYC1
HSPD1, RPL39, GCSH, PPBP, RPS29, FKBP4, PPID, HSP90AA1, TCF7, GSTP1
FCER2, RPL30, CD7, TOMM40, DNAJC12, FAM162A, FKBP11, ATP5F1B, C1QBP, PRELID3B
Negative: RPS4X, GAS5, KRT1, EGLN3, LINC02752, WFDC1, TTC29, TBX4, RPLP1, RPL13
AC069410.1, IFNGR1, PLCB1, TNS4, SP5, IL32, FAM9C, S100A11, SEMA4A, IL4
NKG7, LINC00469, S100A4, HSPB1, S100A6, CEBPD, RPLP0, VIPR2, NPTX1, VIM
PC_ 5
Positive: TMSB4X, LGALS1, TMSB10, S100A11, S100A4, S100A6, COTL1, LSP1, TP73, IFITM2
TAGLN2, GPAT3, TMEM163, HOXC9, LIME1, DUSP4, LAPTM5, GAS2L1, GPAT2, CRIP1
TNFRSF18, EEF1A2, QPRT, EMP3, IFITM1, MIIP, PRDX5, CARHSP1, RBM38, MRPS6
Negative: CCL17, MIR155HG, MAP4K4, LRBA, RXFP1, MYO1D, PRKCA, RUNX1, CA10, CFI
CA2, FRMD4A, THY1, AL590550.1, IMMP2L, DOCK10, NFIB, EZH2, LTA, SNTB1
IGHE, HS3ST1, SLC35F3, RANBP17, MGST3, CCL5, AKAP12, AC100801.1, EPB41L2, ONECUT2
# determine dimensionality of the data
ElbowPlot(All_samples_Merged, ndims = 50)
library(ggplot2)
library(RColorBrewer)
# Assuming you have 10 different cell lines, generating a color palette with 10 colors
cell_line_colors <- brewer.pal(10, "Set3")
# Assuming All_samples_Merged$cell_line is a factor or character vector containing cell line names
data <- as.data.frame(table(All_samples_Merged$cell_line))
colnames(data) <- c("cell_line", "nUMI") # Change column name to nUMI
ncells <- ggplot(data, aes(x = cell_line, y = nUMI, fill = cell_line)) +
geom_col() +
theme_classic() +
geom_text(aes(label = nUMI),
position = position_dodge(width = 0.9),
vjust = -0.25) +
scale_fill_manual(values = cell_line_colors) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
plot.title = element_text(hjust = 0.5)) + # Adjust the title position
ggtitle("Filtered cells per sample") +
xlab("Cell lines") + # Adjust x-axis label
ylab("Frequency") # Adjust y-axis label
print(ncells)
# TEST-1
# given that the output of RunPCA is "pca"
# replace "so" by the name of your seurat object
pct <- All_samples_Merged[["pca"]]@stdev / sum(All_samples_Merged[["pca"]]@stdev) * 100
cumu <- cumsum(pct) # Calculate cumulative percents for each PC
# Determine the difference between variation of PC and subsequent PC
co2 <- sort(which((pct[-length(pct)] - pct[-1]) > 0.1), decreasing = T)[1] + 1
# last point where change of % of variation is more than 0.1%. -> co2
co2
[1] 16
# TEST-2
# get significant PCs
stdv <- All_samples_Merged[["pca"]]@stdev
sum.stdv <- sum(All_samples_Merged[["pca"]]@stdev)
percent.stdv <- (stdv / sum.stdv) * 100
cumulative <- cumsum(percent.stdv)
co1 <- which(cumulative > 90 & percent.stdv < 5)[1]
co2 <- sort(which((percent.stdv[1:length(percent.stdv) - 1] -
percent.stdv[2:length(percent.stdv)]) > 0.1),
decreasing = T)[1] + 1
min.pc <- min(co1, co2)
min.pc
[1] 16
# Create a dataframe with values
plot_df <- data.frame(pct = percent.stdv,
cumu = cumulative,
rank = 1:length(percent.stdv))
# Elbow plot to visualize
ggplot(plot_df, aes(cumulative, percent.stdv, label = rank, color = rank > min.pc)) +
geom_text() +
geom_vline(xintercept = 90, color = "grey") +
geom_hline(yintercept = min(percent.stdv[percent.stdv > 5]), color = "grey") +
theme_bw()
# Set the seed for clustering steps
set.seed(123)
All_samples_Merged <- FindNeighbors(All_samples_Merged,
dims = 1:16,
verbose = FALSE)
# understanding resolution
All_samples_Merged <- FindClusters(All_samples_Merged,
resolution = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,0.8, 0.9, 1,1.2,1.5,2))
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9871
Number of communities: 11
Elapsed time: 17 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9766
Number of communities: 12
Elapsed time: 15 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9661
Number of communities: 14
Elapsed time: 16 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9560
Number of communities: 15
Elapsed time: 13 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9465
Number of communities: 17
Elapsed time: 14 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9379
Number of communities: 19
Elapsed time: 12 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9295
Number of communities: 19
Elapsed time: 13 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9210
Number of communities: 19
Elapsed time: 12 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9137
Number of communities: 23
Elapsed time: 13 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.9066
Number of communities: 24
Elapsed time: 13 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.8966
Number of communities: 29
Elapsed time: 13 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.8826
Number of communities: 35
Elapsed time: 13 seconds
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1623994
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.8634
Number of communities: 41
Elapsed time: 13 seconds
# Set the seed for clustering steps
set.seed(123)
# non-linear dimensionality reduction --------------
All_samples_Merged <- RunUMAP(All_samples_Merged,
dims = 1:16,
verbose = FALSE)
Warning: The default method for RunUMAP has changed from calling Python UMAP via reticulate to the R-native UWOT using the cosine metric
To use Python UMAP via reticulate, set umap.method to 'umap-learn' and metric to 'correlation'
This message will be shown once per session
# note that you can set `label = TRUE` or use the Label Clusters function to help label
# individual clusters
DimPlot(All_samples_Merged,group.by = "cell_line",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,group.by = "predicted.celltype.l2",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.1",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.2",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.3",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.4",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.5",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.6",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.7",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.8",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.0.9",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.1",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.1.2",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.1.5",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged,
group.by = "SCT_snn_res.2",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
# Set identity classes to an existing column in meta data
Idents(object = All_samples_Merged) <- "SCT_snn_res.0.9"
cluster_table <- table(Idents(All_samples_Merged))
barplot(cluster_table, main = "Number of Cells in Each Cluster",
xlab = "Cluster",
ylab = "Number of Cells",
col = rainbow(length(cluster_table)))
print(cluster_table)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
6287 5824 4874 4487 3413 3341 3305 3036 2796 2233 2027 2015 1753 1378 700 514
16 17 18 19 20 21 22
391 265 195 193 182 107 72
table(All_samples_Merged$predicted.celltype.l2, All_samples_Merged$SCT_snn_res.0.1)
0 1 2 3 4 5 6 7 8 9 10
B intermediate 0 3 0 0 0 3 0 1 0 0 0
B memory 8 6 1 0 116 85 0 32 0 4 0
CD14 Mono 0 0 2 0 7 0 0 4 0 0 13
CD4 CTL 0 0 0 0 0 0 12 0 0 0 1
CD4 Naive 0 8 0 0 0 0 521 0 1512 0 1
CD4 Proliferating 5449 2472 5388 2852 4161 4041 0 3247 6 1395 0
CD4 TCM 873 3412 522 268 487 584 4481 108 1996 45 54
CD4 TEM 0 1 0 0 0 0 62 0 21 0 0
CD8 Proliferating 0 0 0 0 1 1 0 0 0 0 0
CD8 TCM 0 1 0 16 0 0 0 0 0 0 0
CD8 TEM 0 1 0 8 1 3 0 2 0 0 0
cDC1 0 0 0 0 0 6 0 2 0 0 0
cDC2 0 0 2 0 35 3 0 10 0 1 2
dnT 0 3 1 2 3 4 0 2 0 0 0
HSPC 57 10 1 0 489 213 0 674 0 361 0
ILC 0 1 0 0 0 0 0 0 0 0 0
NK 0 0 0 0 0 0 0 0 0 0 1
NK Proliferating 4 40 23 2785 36 262 0 10 0 1 0
Treg 15 13 1 0 0 14 0 0 1 1 0
clustree(All_samples_Merged, prefix = "SCT_snn_res.")
# InstallData("pbmcref")
#
# # The RunAzimuth function can take a Seurat object as input
# All_samples_Merged <- RunAzimuth(All_samples_Merged, reference = "pbmcref")
DimPlot(All_samples_Merged, group.by = "predicted.celltype.l1",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged, group.by = "predicted.celltype.l1",
reduction = "umap",
label.size = 3,
repel = T,
label = F)
DimPlot(All_samples_Merged, group.by = "predicted.celltype.l2",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
DimPlot(All_samples_Merged, group.by = "predicted.celltype.l2",
reduction = "umap",
label.size = 3,
repel = T,
label = F)
DimPlot(All_samples_Merged, group.by = "predicted.celltype.l2",
reduction = "umap",
label.size = 3,
repel = T,
label = T, label.box = T)
table(All_samples_Merged$predicted.celltype.l2, All_samples_Merged$SCT_snn_res.0.2)
0 1 2 3 4 5 6 7 8 9 10 11
B intermediate 0 3 0 0 0 3 0 1 0 0 0 0
B memory 8 6 1 0 116 85 0 32 0 4 0 0
CD14 Mono 0 0 2 0 7 0 0 4 0 0 0 13
CD4 CTL 0 0 0 0 0 0 12 0 0 0 0 1
CD4 Naive 0 8 0 0 0 0 521 0 1479 0 33 1
CD4 Proliferating 5449 2471 5388 2852 4162 4041 0 3247 6 1395 0 0
CD4 TCM 873 3412 522 268 487 584 4481 108 1835 45 161 54
CD4 TEM 0 1 0 0 0 0 62 0 21 0 0 0
CD8 Proliferating 0 0 0 0 1 1 0 0 0 0 0 0
CD8 TCM 0 1 0 16 0 0 0 0 0 0 0 0
CD8 TEM 0 1 0 8 1 3 0 2 0 0 0 0
cDC1 0 0 0 0 0 6 0 2 0 0 0 0
cDC2 0 0 2 0 35 3 0 10 0 1 0 2
dnT 0 3 1 2 3 4 0 2 0 0 0 0
HSPC 57 10 1 0 489 213 0 674 0 361 0 0
ILC 0 1 0 0 0 0 0 0 0 0 0 0
NK 0 0 0 0 0 0 0 0 0 0 0 1
NK Proliferating 4 40 23 2785 36 262 0 10 0 1 0 0
Treg 15 13 1 0 0 14 0 0 0 1 1 0
#save(All_samples_Merged, file = "0-imp_Robj/CD4Tcells_with_removed_nonCD4Tcells_and_B_cells_from_l4_SCT.robj")
# Load required libraries
library(Seurat)
library(harmony)
Loading required package: Rcpp
library(ggplot2)
# Run Harmony, adjusting for batch effect using "cell_line" or another grouping variable
All_samples_Merged <- RunHarmony(
All_samples_Merged,
group.by.vars = "cell_line", # Replace with the metadata column specifying batch or cell line
assay.use="SCT")
Transposing data matrix
Initializing state using k-means centroids initialization
Harmony 1/10
Harmony 2/10
Harmony converged after 2 iterations
# Check results in harmony embeddings
harmony_embeddings <- Embeddings(All_samples_Merged, reduction = "harmony")
head(harmony_embeddings)
harmony_1 harmony_2 harmony_3 harmony_4 harmony_5
L1_AAACCTGAGGGCTTCC-1 2.372506 -0.4652384 -1.3864390 -6.101962 1.9662766
L1_AAACCTGGTGCAGGTA-1 -11.880105 1.4130639 1.7998812 -12.903461 -2.3062708
L1_AAACCTGGTTAAAGTG-1 -11.656453 1.9009972 -2.8572775 -7.844086 1.6502197
L1_AAACCTGTCAGGTAAA-1 1.010652 1.2426571 -6.4541501 -1.065624 3.2007142
L1_AAACCTGTCCCTGACT-1 1.966541 0.2846983 0.2567532 -2.670933 -0.4542568
L1_AAACCTGTCCTTCAAT-1 -15.313004 1.3893831 6.4963875 -5.672730 -7.0802410
harmony_6 harmony_7 harmony_8 harmony_9 harmony_10
L1_AAACCTGAGGGCTTCC-1 0.8706431 -2.4265880 2.4200283 3.4342559 5.7009236
L1_AAACCTGGTGCAGGTA-1 8.1602019 -0.6291847 -0.9178515 1.3373910 -0.7982891
L1_AAACCTGGTTAAAGTG-1 2.8815515 -1.9330178 4.8779608 -0.9739285 -4.2107025
L1_AAACCTGTCAGGTAAA-1 0.6805172 0.2769258 0.7634054 -1.1441131 -4.1797937
L1_AAACCTGTCCCTGACT-1 1.0147231 -1.6400817 1.2952512 0.7321718 3.2792296
L1_AAACCTGTCCTTCAAT-1 3.5085589 -7.4269497 4.1304803 -1.0812541 3.5872445
harmony_11 harmony_12 harmony_13 harmony_14 harmony_15
L1_AAACCTGAGGGCTTCC-1 -0.1649760 -0.5332741 -0.1558125 -2.644081 0.002342797
L1_AAACCTGGTGCAGGTA-1 2.2977767 -1.1447399 -0.6881687 1.255205 -1.291901028
L1_AAACCTGGTTAAAGTG-1 -3.3878640 0.8610558 2.5889053 4.187525 -3.955024871
L1_AAACCTGTCAGGTAAA-1 -0.3176378 1.7939973 1.9788248 1.327084 -0.102736887
L1_AAACCTGTCCCTGACT-1 -0.2898779 0.1152865 0.3996874 -1.889512 1.985317322
L1_AAACCTGTCCTTCAAT-1 -0.4167058 -1.7405263 -0.6149283 1.046365 0.417234070
harmony_16 harmony_17 harmony_18 harmony_19 harmony_20
L1_AAACCTGAGGGCTTCC-1 -2.1073083 0.07116993 -0.04525818 0.8535713 -3.8425677
L1_AAACCTGGTGCAGGTA-1 1.6612004 -0.44929869 0.90198271 -2.6848916 -5.1954291
L1_AAACCTGGTTAAAGTG-1 0.1497240 2.74358729 0.24408976 -1.1406271 -1.6747849
L1_AAACCTGTCAGGTAAA-1 0.5369931 -0.65889922 0.57042062 -0.1430303 2.9883632
L1_AAACCTGTCCCTGACT-1 -4.3523206 3.91986786 -0.39367004 2.6081013 -0.8222636
L1_AAACCTGTCCTTCAAT-1 -0.1853333 0.31358363 0.86841910 -0.9707719 -1.4389134
harmony_21 harmony_22 harmony_23 harmony_24 harmony_25
L1_AAACCTGAGGGCTTCC-1 -5.593843 -6.9012757 -3.9749638 -0.2139104 -2.4696243
L1_AAACCTGGTGCAGGTA-1 -3.233125 -3.6666164 0.5614491 -0.9917646 -1.1874664
L1_AAACCTGGTTAAAGTG-1 -3.185765 -2.1602869 -1.1014216 -1.8780882 -1.7204409
L1_AAACCTGTCAGGTAAA-1 2.082967 0.4522502 1.6680793 1.0416562 0.1427182
L1_AAACCTGTCCCTGACT-1 -5.119547 -3.5243394 -1.1678788 -0.3389346 0.4990246
L1_AAACCTGTCCTTCAAT-1 -2.357326 -4.2995177 -1.7158355 2.1806720 -0.8136284
harmony_26 harmony_27 harmony_28 harmony_29 harmony_30
L1_AAACCTGAGGGCTTCC-1 0.06629435 1.07790707 -0.02810575 -0.9961143 -0.8140662
L1_AAACCTGGTGCAGGTA-1 0.11163170 -0.08329564 -0.68440038 0.8176656 -0.4734832
L1_AAACCTGGTTAAAGTG-1 -1.23931373 1.62953698 0.61275435 -1.7238315 -1.4961591
L1_AAACCTGTCAGGTAAA-1 -1.17730272 0.78922912 -0.65264018 1.6380596 -0.9888842
L1_AAACCTGTCCCTGACT-1 1.16548772 -0.20465007 1.00915454 -1.0349853 -0.2539486
L1_AAACCTGTCCTTCAAT-1 -0.23879318 1.67333247 -0.68390955 1.9219087 -2.0685288
harmony_31 harmony_32 harmony_33 harmony_34 harmony_35
L1_AAACCTGAGGGCTTCC-1 -2.564188 0.1068911 1.7708540 -0.6124385 2.4014274
L1_AAACCTGGTGCAGGTA-1 2.024115 -1.8391988 -3.0740876 0.8055699 -2.0923606
L1_AAACCTGGTTAAAGTG-1 -4.133319 1.3588613 0.9110234 -0.2819785 0.9795468
L1_AAACCTGTCAGGTAAA-1 0.828385 -1.1689358 -1.0635701 -0.7054131 1.5992768
L1_AAACCTGTCCCTGACT-1 -2.023087 1.5968169 2.8227119 -0.9710984 1.4832003
L1_AAACCTGTCCTTCAAT-1 -1.261143 0.7025339 -1.8885806 -2.1237109 3.3705426
harmony_36 harmony_37 harmony_38 harmony_39 harmony_40
L1_AAACCTGAGGGCTTCC-1 -0.119383 0.9343716 -0.80342322 -0.4789469 -0.1378823
L1_AAACCTGGTGCAGGTA-1 -1.328527 -0.5962343 0.23457964 -0.1262493 -2.1876238
L1_AAACCTGGTTAAAGTG-1 1.649833 -0.2296077 0.00745321 0.6239973 0.5127185
L1_AAACCTGTCAGGTAAA-1 0.384464 0.4026464 1.14064041 1.0833285 0.3438146
L1_AAACCTGTCCCTGACT-1 -1.071330 1.6709080 -1.60091722 -0.8663288 1.5554283
L1_AAACCTGTCCTTCAAT-1 -2.431456 1.0180089 -1.23446212 0.4764892 -0.2561857
harmony_41 harmony_42 harmony_43 harmony_44 harmony_45
L1_AAACCTGAGGGCTTCC-1 -0.1155697 -0.04322491 -0.1000986 0.6626011 1.5337581
L1_AAACCTGGTGCAGGTA-1 -1.3447455 -0.29085193 1.4474435 0.2637812 -0.4246334
L1_AAACCTGGTTAAAGTG-1 1.8169554 2.06097277 1.1916254 -0.6011642 -1.8631830
L1_AAACCTGTCAGGTAAA-1 -1.1467332 1.38507993 -2.2342562 -0.0879406 -0.2128552
L1_AAACCTGTCCCTGACT-1 1.7450815 -0.55590967 -0.5864848 0.8599249 0.5276650
L1_AAACCTGTCCTTCAAT-1 3.2082039 3.25146017 -1.3766677 0.9836097 0.4746092
harmony_46 harmony_47 harmony_48 harmony_49 harmony_50
L1_AAACCTGAGGGCTTCC-1 -1.0245671 0.2216869 0.8437829 0.7876506 -0.34316919
L1_AAACCTGGTGCAGGTA-1 0.7766075 -1.1352876 -1.8048034 0.9084398 0.03425543
L1_AAACCTGGTTAAAGTG-1 -1.8451380 0.9181067 -2.0254307 -0.7391285 1.49266921
L1_AAACCTGTCAGGTAAA-1 -0.3869980 -0.7053866 -1.8087832 0.1142819 0.85874493
L1_AAACCTGTCCCTGACT-1 -0.4365963 -0.8258234 1.6246417 0.1162626 -0.37204850
L1_AAACCTGTCCTTCAAT-1 -1.5806012 -0.6615639 -0.8554796 -1.7950746 1.97106686
# Set the seed for clustering steps
set.seed(123)
# Run UMAP on Harmony embeddings
All_samples_Merged <- RunUMAP(All_samples_Merged, reduction = "harmony", dims = 1:16)
10:38:02 UMAP embedding parameters a = 0.9922 b = 1.112
10:38:02 Read 49388 rows and found 16 numeric columns
10:38:02 Using Annoy for neighbor search, n_neighbors = 30
10:38:02 Building Annoy index with metric = cosine, n_trees = 50
0% 10 20 30 40 50 60 70 80 90 100%
[----|----|----|----|----|----|----|----|----|----|
**************************************************|
10:38:07 Writing NN index file to temp file /tmp/RtmpPB0w8q/file2c48d1c6d96fa
10:38:07 Searching Annoy index using 1 thread, search_k = 3000
10:38:26 Annoy recall = 100%
10:38:27 Commencing smooth kNN distance calibration using 1 thread with target n_neighbors = 30
10:38:32 Initializing from normalized Laplacian + noise (using RSpectra)
10:38:35 Commencing optimization for 200 epochs, with 2085280 positive edges
10:39:38 Optimization finished
# Set the seed for clustering steps
set.seed(123)
# Optionally, find neighbors and clusters (if you plan to do clustering analysis)
All_samples_Merged <- FindNeighbors(All_samples_Merged, reduction = "harmony", dims = 1:16)
Computing nearest neighbor graph
Computing SNN
All_samples_Merged <- FindClusters(All_samples_Merged, resolution = 0.5) # Adjust resolution as needed
Modularity Optimizer version 1.3.0 by Ludo Waltman and Nees Jan van Eck
Number of nodes: 49388
Number of edges: 1487073
Running Louvain algorithm...
Maximum modularity in 10 random starts: 0.8815
Number of communities: 14
Elapsed time: 19 seconds
# Visualize UMAP
DimPlot(All_samples_Merged, reduction = "umap", group.by = "cell_line", label = TRUE, pt.size = 0.5) +
ggtitle("UMAP of Harmony-Integrated Data")
# Visualize UMAP with batch/cell line information
DimPlot(All_samples_Merged, reduction = "umap", group.by = "cell_line", label = TRUE, pt.size = 0.5) +
ggtitle("UMAP - Colored by Cell Line (After Harmony Integration)")
# Visualize UMAP with clusters
DimPlot(All_samples_Merged, reduction = "umap", group.by = "seurat_clusters", label = TRUE, pt.size = 0.5) +
ggtitle("UMAP - Clustered Data (After Harmony Integration)")
# Visualize specific cell types or other metadata
DimPlot(All_samples_Merged, reduction = "umap", group.by = "predicted.celltype.l2", label = TRUE, pt.size = 0.5) +
ggtitle("UMAP - Cell Types After Harmony Integration")
#save(All_samples_Merged, file = "../../../0-IMP-OBJECTS/All_Samples_Merged_with_10x_Azitmuth_Annotated_SCT_HPC_without_harmony_integration.robj")