Short Tandem Repeat (STR) Catalog Analysis Workflow

public public 1yr ago 0 bookmarks

Active STRs Analysis

This repository contains a Snakemake workflow for analyzing the regulatory and functional role of short tandem repeat (STR) catalogs. The project was funded by the Staford BioX undergraduate research fellowship under the mentorship of Dr. Graham Erwin in the Synder Lab.

Code Snippets

31
32
33
shell:
    "wget --user-agent=\"Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0\" "
    "-O {output} {config[catalog_urls][estrs]}"
38
39
shell:
    "wget {config[catalog_urls][trf]} -O {output}"
44
45
shell:
    "wget {config[catalog_urls][gangstr]} -O {output}"
56
57
script:
    "../scripts/xlsx_to_tsv.py"
87
88
shell:
    "gzip -dc {input} > {output}"
97
98
shell:
    "vk vcf2tsv wide --print-header {input} > {output}"
105
106
shell:
    "cat {input} > {output}"
115
116
117
118
119
120
121
122
123
124
    script:
        "../scripts/preprocess_novel_json.py"

'''
Standardization rules
---
Rearrange columns of catalogs such that first four
columns are: chr, start, stop, motif in addition to
applying global filters specified in config file
'''
134
135
script:
    "../scripts/standardize_catalog/estrs.py"
144
145
script:
    "../scripts/standardize_catalog/gangstr.py"
154
155
script:
    "../scripts/standardize_catalog/trf.py"
164
165
script:
    "../scripts/standardize_catalog/novel.py"
173
174
shell:
    "tail -n +2 {input} | cut -f1-4 | sort -k1,1 -k2,2n > {output}"
12
13
14
15
shell:
    "wget -O {output.gz} {params.url} && "
    "gzip -dc {output.gz} > {output.unsorted} && "
    "sort -k1,1 -k2,2n {output.unsorted} > {output.sorted}"
35
36
shell:
    "bedtools closest -D ref -a {input.catalog} -b {input.histone} > {output}"
13
14
script:
    "../scripts/parse_relative_mismatches_optimized.py"
15
16
shell:
    "wget {config[annotation_urls][ccre]} -O {output}"
28
29
shell:
    "bedtools closest -D ref -a {input.ann} -b {input.cat} > {output}"
59
60
shell:
    "bedtools intersect -wa -a {a} -b {b} > {output}"
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import re
import pandas as pd
import pysam
from collections import defaultdict
from time import time

extract_nodes_p = re.compile('(\d+)\[((?:\d+[A-Z])+)\]', re.IGNORECASE)
extract_operations_p = re.compile('(\d+)([A-Z])', re.IGNORECASE)

class LocusFlankData(object):

    def __init__(self) -> None:
        self.flank_data = {
            'left': defaultdict(lambda: defaultdict(int)),
            'right': defaultdict(lambda: defaultdict(int)),
        } 

    def add_range(self, flank, position, operation, count) -> None:
        for i in range(count):
            self.flank_data[flank][position + i][operation] += 1

    def parse_graph_cigar(self, cigar_string):
        nodes = extract_nodes_p.findall(cigar_string)
        for node in nodes:
            node_id, cigar_string = node
            node_id = int(node_id)
            if node_id == 0:
                self.parse_cigar_string('left', cigar_string)
            elif node_id == 2:
                self.parse_cigar_string('right', cigar_string)

    def parse_cigar_string(self, flank, cigar_string) -> None:
        operations = extract_operations_p.findall(cigar_string)

        if flank == 'left':
            operations.reverse()

        current_position = 1
        for count, op in operations:
            count = int(count)
            if op not in 'S':
                self.add_range(flank, current_position, op, count)
            if op in 'MX':
                current_position += count

    def to_df(self) -> pd.DataFrame:
        dfs = []
        for flank in self.flank_data:
            data = self.flank_data[flank]
            df = pd.DataFrame({
                'position': data.keys(),
                'matches': [data[pos]['M'] for pos in data],
                'mismatches': [data[pos]['X'] for pos in data],
                'insertions': [data[pos]['I'] for pos in data],
                'deletions': [data[pos]['D'] for pos in data],
            })
            # df['flank'] = flank
            if flank == 'left':
                df['position'] *= -1
            dfs.append(df)
        return pd.concat(dfs, ignore_index=True)


bam_file_path = snakemake.input['bam']
out_file_path = snakemake.output[0]
sample = snakemake.wildcards['sample']

# bam_file_path = 'resources/realigned_bam/active/HG00118/HG00118_realigned.bam'
# out_file_path = 'mutations.h5'
# sample = 'HG00118'

# Get all cigar strings
loci_cigars = defaultdict(list)
bam = pysam.AlignmentFile(bam_file_path)
for read in bam.fetch(until_eof=True):
    locus_id, _, cigar_string = read.get_tag('XG').split(',')
    loci_cigars[locus_id].append(cigar_string)

print('Finished fetching {} reads. Parsing...'.format(sample))

# Parse and store, while continously freeing up memory
all_locus_ids = list(loci_cigars.keys())
for locus_id in all_locus_ids:
    locus_flank_data = LocusFlankData()
    for cigar_string in loci_cigars[locus_id]:
        locus_flank_data.parse_graph_cigar(cigar_string)
    df = locus_flank_data.to_df()
    df['locus_id'] = locus_id
    df = df.set_index(['locus_id', 'position'])
    df.to_hdf(out_file_path, sample, format='table', append=True, min_itemsize={ 'locus_id' : 32 })
    del loci_cigars[locus_id]
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import json
import pandas as pd
import re
from utils import LooseDataFrame

catalog = json.load(open(snakemake.input[0], 'r'))
df = pd.DataFrame(catalog)

old_cols = list(df.columns)
new_cols = ['chr', 'start', 'stop']

# Expand locus IDs into separate chr, start, stop columns
df[new_cols] = df['LocusId'].str.split('_', expand=True)

df['motif'] = ''
new_cols.append('motif')

# Place chr, start, stop, motif columns at the beginning
df = df[new_cols + old_cols]

# Expand variants
begin = re.escape('(')
end = re.escape(')*')
p = re.compile('{}[GCAT]+{}'.format(begin, end))

new_df = LooseDataFrame(df.columns)
for i, row in df.iterrows():
    structure_motifs = p.findall(row['LocusStructure'])

    if len(structure_motifs) > 1:
        ref_regions = row['ReferenceRegion']
        variant_ids = row['VariantId']
        variant_types = row['VariantType']
    else:
        ref_regions = [row['ReferenceRegion']]
        variant_ids = [row['VariantId']]
        variant_types = [row['VariantType']]

    assert len(structure_motifs) == len(ref_regions) == len(variant_ids) == len(variant_types)

    for i in range(len(structure_motifs)):
        chr, pos = ref_regions[i].split(':')
        start, stop = pos.split('-')

        new_row = row.to_dict()
        new_row['start'] = start
        new_row['stop'] = stop
        # Slice motif to remove regex and keep repeating unit motif
        new_row['motif'] = structure_motifs[i][1:-2]
        new_row['VariantId'] = variant_ids[i]
        new_row['VariantType'] = variant_types[i]
        new_df.append(new_row)

new_df = new_df.to_df()

new_df.to_csv(snakemake.output[0], sep='\t', index=False)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import pandas as pd
from pyliftover import LiftOver
from common import apply_filters

estrs = pd.read_csv(snakemake.input[0], sep='\t')
estrs = estrs.rename({
    'chrom': 'chr',
    'str.start': 'start',
    'str.end': 'stop',
    'str.motif.forward': 'motif'
}, axis='columns')

former_cols = ['chr', 'start', 'stop', 'motif']
latter_cols = [col for col in estrs.columns if col not in former_cols]

estrs = estrs[former_cols + latter_cols]

# Only include finely-mapped eSTRs (CAVIAR score >0.3)
estrs = estrs[estrs['score'] > 0.3]

estrs_hg19 = estrs.copy()
estrs_hg38 = estrs.copy()

# Liftover
lo = LiftOver('hg19', 'hg38')
to_drop = []
for i, row in estrs_hg38.iterrows():
    start = lo.convert_coordinate(row['chr'], row['start'])
    stop = lo.convert_coordinate(row['chr'], row['stop'])
    if len(start) > 0 and len(stop) > 0:
        estrs_hg38.loc[i, 'start'] = start[0][1]
        estrs_hg38.loc[i, 'stop'] = stop[0][1]
    else:
        to_drop.append(i)

estrs_hg38 = estrs_hg38.drop(to_drop, axis='rows')

estrs_hg19 = apply_filters(estrs_hg19, snakemake.config)
estrs_hg38 = apply_filters(estrs_hg38, snakemake.config)

estrs_hg19.to_csv(snakemake.output['hg19'], sep='\t', index=False)
estrs_hg38.to_csv(snakemake.output['hg38'], sep='\t', index=False)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import pandas as pd
from common import apply_filters

gangstr = pd.read_csv(snakemake.input[0], sep='\t')
gangstr = gangstr.rename({
    'CHROM': 'chr',
    'POS': 'start',
    'END': 'stop',
    'RU': 'motif'
}, axis='columns')

former_cols = ['chr', 'start', 'stop', 'motif']
latter_cols = [col for col in gangstr.columns if col not in former_cols]

gangstr = gangstr[former_cols + latter_cols]

# Remove loci that do not pass
gangstr = gangstr[gangstr['NA12892_FILTER'] == 'PASS']

# Zero-index start positions
gangstr['start'] = gangstr['start'] - 1

gangstr = apply_filters(gangstr, snakemake.config)

gangstr.to_csv(snakemake.output[0], sep='\t', index=False)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
import pandas as pd
from common import apply_filters

novel = pd.read_csv(snakemake.input[0], sep='\t')

former_cols = ['chr', 'start', 'stop', 'motif']
latter_cols = [col for col in novel.columns if col not in former_cols]

novel = novel[former_cols + latter_cols]

novel = apply_filters(novel, snakemake.config)

novel.to_csv(snakemake.output[0], sep='\t', index=False)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
import pandas as pd
from common import apply_filters

trf = pd.read_csv(snakemake.input[0], sep='\t')
trf.columns = ['bin', 'chr', 'start', 'stop', 'name', 'period', 'copyNum', 'consensusSize', 'perMatch', 'perIndel', 'score', 'A', 'C', 'G', 'T', 'entropy', 'motif']

former_cols = ['chr', 'start', 'stop', 'motif']
latter_cols = [col for col in trf.columns if col not in former_cols]

trf = trf[former_cols + latter_cols]

trf = apply_filters(trf, snakemake.config)

trf.to_csv(snakemake.output[0], sep='\t', index=False)
1
2
3
4
5
import pandas as pd

xls = pd.ExcelFile(snakemake.input[0])
df = pd.read_excel(xls, snakemake.params['sheet'])
df.to_csv(snakemake.output[0], sep='\t', index=False)
ShowHide 21 more snippets with no or duplicated tags.

Login to post a comment if you would like to share your experience with this workflow.

Do you know this workflow well? If so, you can request seller status , and start supporting this workflow.

Free

Created: 1yr ago
Updated: 1yr ago
Maitainers: public
URL: https://github.com/rashidalabri/active-strs-analysis
Name: active-strs-analysis
Version: 1
Badge:
workflow icon

Insert copied code into your website to add a link to this workflow.

Downloaded: 0
Copyright: Public Domain
License: GNU General Public License v3.0
  • Future updates

Related Workflows

cellranger-snakemake-gke
snakemake workflow to run cellranger on a given bucket using gke.
A Snakemake workflow for running cellranger on a given bucket using Google Kubernetes Engine. The usage of this workflow ...