BIOF501 Term Project: Pipeline for Pseudobulking and Function Prediction

public public 1yr ago 0 bookmarks

By Alex Adrian-Hamazaki

Repository Contents

Directories

  • bin: contains scripts used for creating pseudobulk and function prediction

Files

  • environment.yml: Contains dependen

Code Snippets

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
print("RUNNING EGAD")



packages <-installed.packages()[,"Package"]

if (!"EGAD" %in% packages) {
    BiocManager::install("EGAD")
    }

library(EGAD)
library(tidyverse)
library(stringr)
#data_file <-snakemake@input[[1]]

#Change
save <- "~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/EAGD/EGAD_sum_pc_OPfiltered.csv"
# data_file <- "~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/pseudobulk/sum_pseudobulk.csv"
#data_file <- "~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/bulk/bulk_pc.csv"

is_bulk <- TRUE

#Never change
pc_genes <- "~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/pc_genes/processed_uniprot.csv"
sample_names <- "~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/sample_names/bulk_pseudo.csv"
shared_genes <- "~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/pc_genes/scAndBulkOverlapGenes.txt"

#../../../../pipeline42/datasets/Gtex/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_tpm.gct



################ 2.1 : MAKING DATA SETS

########### MAKE COEXPRESSION NETWORK

#~~~~~~~~~ If using file
#expression_data <- t(read.delim('data/LiverAverageCounts.csv', sep = ',', header= TRUE, row.names = 1))


#~~~~~~~~ Diagnostic WD

#getwd()

#~~~~~~~~~If using command line

#args = commandArgs(trailingOnly=TRUE)
#data_file <- args[1]


print("Loading Expression Dataset")

expression_data <- (read.delim(data_file, sep = ',', header= TRUE, row.names = 1))
sample_names_df <- read.delim(sample_names, sep = ",", row.names = 1, header = TRUE)




<<<<<<< HEAD
### Remove Organism Parts that are NOT shared between the two Tabula And Gtex Datasets
filter_by_OP <- function(OP, expression_data) {
  expression2 <- expression_data %>%
    filter(str_detect(rownames(expression_data), paste0(OP,".*")))
  return (expression2)
=======
coexpression_network[is.na(coexpression_network)] <- 0
>>>>>>> 3c1fb065b25c7ca5703de1a68e8cd379c6c7289b

}
if (is_bulk) {
  print('Subsampling for Bulk name Organism Parts')
  OP_names <- sample_names_df$bulk_names
  expression_data <- expression_data %>%
    filter(rownames(expression_data) %in% OP_names)
} else {
  print('Subsampling for Pseudobulk name Organism Parts')

  OP_names <- sample_names_df$pseudo_names


  expression_data_2 <- lapply(OP_names, filter_by_OP, expression_data)

  expression_data <- do.call(rbind, expression_data_2)
  }


print("Filtering Expression Data for PC genes")
pc <- read.delim(pc_genes, sep = ",", header = TRUE, row.names = 1)
pc_names <- pc$FirstUniprot
pc_expression <- expression_data[,colnames(expression_data) %in% pc_names]
print(paste("Removed",ncol(expression_data)- ncol(pc_expression) , "non-protein coding genes"))




######### Build Coexpression Network

coexpression_network <- cor(pc_expression)
coexpression_network[is.na(coexpression_network)] <- 0


############ BUILDING ANNOTATION SET
print("Building Annotation Set")

### With builtin GO
#annotations <- make_annotations(GO.human[,c('GO', 'evidence')], unique(GO.human$GO), unique(GO.human$evidence))

### With Custom GO with BP
GO <- read.delim(file = '~/Masters/Pseudobulk_Function_Pipeline_HighRes/data/GO/pro_GO.csv', sep = ",", stringsAsFactors = TRUE)

# Filter the GO to Gene pairings for only Genes measured in our expression data because we only want GO terms with 20>= genes
expression_genes <- colnames(expression_data)
GO <- filter(GO, GO$DB_Object_Symbol %in% expression_genes) 
#in our sc data this removes ~3,000 genes
#in bulk this removes ~16000 genes sheesh

# Filter for only the genes that are measured in both data types (note this is redudant with the prev step now)
sharedGenes <- read_csv(file =shared_genes, col_names = FALSE)
GO <- filter(GO, GO$DB_Object_Symbol %in%sharedGenes$X1)

GO_unique <- data.frame(table(GO$GO.ID))
colnames(GO_unique) <- c('GO', 'count')


# Create a histogram looking at how many genes are affiliated with each GO term
ggplot(data = GO_unique)+ 
  geom_histogram(mapping = aes(count)) +
  scale_y_continuous(trans = 'log10') +
  labs(title = 'Distribution of Genes in GO Terms') + 
  xlab('Number of Genes')+
  ylab('GO Terms with number of genes')


################ Remove GO Terms with less than 20 Genes in the expression data.

GO_unique_filtered <-  filter(GO_unique, count >=20)

1-nrow(GO_unique_filtered)/nrow(GO_unique) #92.8 % of GO terms were removed in sc. 92% in bulk

ggplot(data = GO_unique_filtered)+ 
  geom_histogram(mapping = aes(count), breaks = c(0, 19, 30, 40, 50, 60, 70, 80, 90, 100, 150)) + 
  scale_y_continuous(trans = 'log10') +
  labs(title = 'Distribution of Genes Assosiated with GO Terms') + 
  xlab('Number of Genes')+
  ylab('Count of GO Terms')

# With GO_unique_filtered, we now have all of the GO Terms we want to use in our analysis

GO_20_or_more <-dplyr::filter(GO, (GO$GO.ID %in% GO_unique_filtered$GO))

# 50694 GO To Gene assosiations were filtered out
1-nrow(GO_20_or_more)/nrow(GO) #44.9% of Gene to Go Term assosiations were for GO terms with less than 20 genes.  45% in bulk

#Note: We removed 86.2% of GO terms, this onyl removed 31.5% of GO to gene assosiations


#Make one hot encoding matrix
# Contains only GO terms with 20 genes or more that were measured in both datasets
annotations <- make_annotations(GO_20_or_more[,c('DB_Object_Symbol', 'GO.ID')],  unique(GO_20_or_more$DB_Object_Symbol), unique(GO_20_or_more$GO.ID))

################ Neighbor Voting
print("Performing Neighbor Voting. This can take a while")
auroc <- neighbor_voting(genes.labels = annotations, 
                          network = coexpression_network,
                          nFold = 3,
                          output = "AUROC")


#auroc <- run_GBA(network = coexpression_network,
#                labels = annotations)

rm(coexpression_network )
rm(annotations)


print(paste0("Wrote AURUC to ", save ))

write.table(x = auroc, paste0(save), sep = ",")
41
42
43
44
shell:
    """
    python {params.script} {input.data}
    """
SnakeMake From line 41 of main/Snakefile
55
56
57
58
shell:
    """
    python {params.script} {input.data} {params.cell_type_column}
    """
SnakeMake From line 55 of main/Snakefile
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
shell:
    """
    echo {input.data}

    touch {output.pseudobulk}

    head -n 1 {input.data[0]} > {output.pseudobulk}

    array2=({input.data})
    echo ${{array2}}


    for file in ${{array2[@]}};
    do
        echo ${{file}}
        tail -n+2  ${{file}} >> {output.pseudobulk}
    done
    """
SnakeMake From line 67 of main/Snakefile
92
93
script:
    "bin/EGAD.R"
SnakeMake From line 92 of main/Snakefile
102
103
104
105
shell:
    """
    python {params.script} {input.data}
    """
SnakeMake From line 102 of main/Snakefile
ShowHide 5 more snippets with no or duplicated tags.

Login to post a comment if you would like to share your experience with this workflow.

Do you know this workflow well? If so, you can request seller status , and start supporting this workflow.

Free

Created: 1yr ago
Updated: 1yr ago
Maitainers: public
URL: https://github.com/AlexAdrian-Hamazaki/Pseudobulk_Function_Pipeline
Name: pseudobulk_function_pipeline
Version: 1
Badge:
workflow icon

Insert copied code into your website to add a link to this workflow.

Downloaded: 0
Copyright: Public Domain
License: None
  • Future updates

Related Workflows

cellranger-snakemake-gke
snakemake workflow to run cellranger on a given bucket using gke.
A Snakemake workflow for running cellranger on a given bucket using Google Kubernetes Engine. The usage of this workflow ...