Mtb variant calling pipeline (bwa/GATK)

public public 1yr ago 0 bookmarks

Pipeline for M. tuberculosis variant identification from short-read data.

Usage

# Navigate to root directory.
# Identify files that have not been produced (no run)
snakemake -np
# Run snakemake
snakemake

Directory structure

├── .gitignore
├── README.md
├── LICENSE.md
├── workflow
 ├── rules
 ├── envs
|  ├── tool1.yaml
 ├── scripts
|  ├──process_stanford_tb.sh
|  ├──trim_reads.sh
|  ├──run_kraken.sh
|  ├──map_reads.sh
|  ├──cov_stats.sh
|  ├──mykrobe_predict.sh
|  ├──call_varsk.sh
|  ├──vqsr.sh
|  ├──vcf2fasta.sh
 ├── notebooks
 ├── report
| └── Snakefile
├── config
 ├── config.yaml
├── results
 ├── trim
 ├── bams
 ├── vars
 ├── stats
 ├── fasta
└── resources

Code Snippets

 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
module load anaconda
source activate snakemake

# Read input arguments. 
ref=$1 
bam=$2
ploidy=$3 
vcf=$4

# name gVCF file
gvcf=${vcf/.vcf.gz/.g.vcf.gz}

# if no GATK dictionary, index reference genome
if [ ! -f ${ref%.*}".dict" ] ; then
echo "GATK dictionary for $ref" >&2
picard CreateSequenceDictionary \
	REFERENCE=${ref} \
	OUTPUT=${ref%.*}".dict" 
fi

# If BAM index does not exist, index BAM.
if [ ! -s ${bam}.bai ]; then 
  echo 'indexing bam' 
  samtools index ${bam}
fi

# Call variants with GATK 4.1, output GVCF
gatk --java-options "-Xmx100g" HaplotypeCaller \
-R ${ref} \
-ploidy ${ploidy} \
-I ${bam} \
-ERC GVCF \
-O ${gvcf}

# Index gvcf 
gatk IndexFeatureFile \
  -I ${gvcf}

# GVCF to VCF. 
gatk --java-options '-Xmx100g' GenotypeGVCFs \
-R ${ref} \
--variant ${gvcf} \
-ploidy ${ploidy} \
--include-non-variant-sites true \
--output ${vcf}
# min base quality score is 10 by default.

#Error handling
if [ "$?" != "0" ]; then
	echo "[Error]" $LINENO "GATK failed!" 1>&2
	exit 1
fi

#### PRINT OUTPUT ####
echo "Done====" >&2
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
module load anaconda
source activate snakemake

# Read from command line: bam, ref genome.
ref=${1} 
bam=${2}
cov_stats=${3}

# Collect stats with Picard
picard CollectWgsMetrics \
R=${ref} \
I=${bam} \
O=${cov_stats}
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
module load anaconda
source activate snakemake

# Read from command line: ref genome, fastq 1, fastq 2.
ref=${1} # 1st input is full path to reference genome
mapper=$2 # 2nd input is mapping algorithm 
p1=$3  # 3th input is full path to read 1
p2=$4  # 4th input is full path to read 2 (if paired-end)
bam=$5 # bam file.

# Define output directory. 
BAMS_DIR=$(dirname ${bam})

# Define temp directory.
TMP_DIR=${BAMS_DIR}/tmp/
mkdir -p ${TMP_DIR}

# Define prefix.
prefix=$(basename ${p1%%.*})

# Set names of intermediate files.
ref_index=${ref%.*}
ref_prefix=$(basename $ref_index)

# remove suffix
sam=${TMP_DIR}${prefix}_${mapper}_${ref_prefix}'.sam' 
rawbam=${TMP_DIR}${prefix}_${mapper}_${ref_prefix}'.bam' 
rgbam=${TMP_DIR}${prefix}_${mapper}_${ref_prefix}'_rg.bam' 
sortbam=${TMP_DIR}${prefix}_${mapper}_${ref_prefix}'_rg_sorted.bam'
echo $sam

# help message if no inputs are entered (-z checks if 1st arugment string is null).
if [ -z ${1} ]
then
  echo "This script will use Bowtie 2 to align two paired-end fastq files"
  echo "$(basename $0) <refGenome> <mapper> <dataset> <readFile1> <readFile2>"
  echo "First input is the Mtb ref genome"
  echo "Second input is the read mapper"
  echo "Third input is the dataset"
  echo "Forth input is location of file containing read 1"
  echo "Fifth input is location of file containing read 2 (if paired-end)" 
  exit
fi

# if no p2 given 
if [ -z ${p2} ]
then
  echo "read pair not specified"
fi

# get machine id_lane from SRA read identifier (old Illumina fastq format)
# if gzipped fastq
if [[ ${p1} == *.gz ]]; then
  seqid=$(zcat ${p1} | head -n 1)
else
# if not gzipped
  seqid=$(cat ${p1} | head -n 1)
fi
seqid="$(echo $seqid | cut -d' ' -f1)"
seqid="$(echo $seqid | cut -d':' -f3)"

id_lane=${seqid:-readgroup1} # default is "readgroup1"

#### MAPPING ####
# bowtie2 mapping
if [ $mapper == 'bowtie' ] || [ $mapper == 'bowtie2' ] ; then

  # if no indexing, index reference genome
  if [ ! -f ${ref%.*}".1.bt2" ] ; then
  echo "bowtie2 indexing $ref" >&2
  ${BOWTIE2_BUILD} ${ref} ${ref_index}
  fi 

  # map
  #if paired-end reads
  echo "mapping with bowtie2" >&2
  if [ ! -z ${p2} ]; then 
  bowtie2 --threads 7 -X 1100 -x ${ref_index} -1 ${p1} -2 ${p2} -S ${sam}
  # -x basename of reference index 
  # --un-gz gzips sam output
  # p is number of threads
  # end-to-end mapping is the default mode
  # -X 2000 increase maximum fragment length from default (500) to allow longer fragments (paired-end only) -X 2000 **used for roetzer data ***
  # ART simulations X = 1100 (mean fragment length = 650bp + 3 x 150-bp stdev)

  # if single-end reads
  elif [ -z ${p2} ]; then
    echo "single reads"
  bowtie2 --threads 7 -X 1100 -x ${ref_index} -U ${p1} -S ${sam}
  # -U for unpaired reads
  fi
  # Error handling
  if [ "$?" != "0" ]; then
    echo "[Error]" $LINENO "bowtie mapping failed ${p1}!" 1>&2
    exit 1
  fi
fi

# bwa mapping
if [ $mapper == 'bwa' ]; then
  # if no indexing, index reference genome
  if [ ! -f ${ref}".sa" ] ; then
  echo "bwa indexing $ref" >&2
  bwa index ${ref} 
  fi

  # map
  echo "mapping with bwa" >&2
  # if paired-end reads
  if [ ! -z ${p2} ]; then 
    bwa mem -t 7 ${ref} ${p1} ${p2} > ${sam}
   # -t no. of threads.
  # if single-end reads
  elif [ -z ${p2} ]; then
      echo "single reads"
    bwa mem -t 7 ${ref} ${p1} >  ${sam}
  fi
  # Error handling
  if [ "$?" != "0" ]; then
    echo "[Error]" $LINENO "bwa mapping failed ${p1}!" 1>&2
    exit 1
  fi
fi

### POST-PROCESSING ####

# Convert sam to bam 
sambamba view -t 7 -S -h ${sam} -f bam -o ${rawbam}
# -S auto-detects input format, -h includes header, -o directs output

# Add/replace read groups for post-processing with GATK
picard AddOrReplaceReadGroups \
  INPUT=${rawbam} \
  OUTPUT=${rgbam} \
  RGID=${id_lane} \
  RGLB=library1 \
  RGPU=${id_lane} \
  RGPL="illumina" \
  RGSM=${prefix}

# Sort the BAM 
sambamba sort ${rgbam}

# Index BAM
sambamba sort ${rgbam} index -t7 --out ${sortbam}

# Remove duplicates. (-r = remove)
sambamba markdup -r -p -t7 ${sortbam} ${bam} --tmpdir=${TMP_DIR} 

# Remove intermediate files.
#rm ${sam} ${rawbam} ${rgbam} #${sortbam} ${sortbam}.bai

# Error handling
if [ "$?" != "0" ]; then
    echo "[Error]" $LINENO "Remove dups failed ${p1}!" 1>&2
    exit 1
fi

### PRINT OUTPUT ####
echo "Done====" >&2
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
module load anaconda
source activate snakemake

# input
bam=$1
output=$2

# mykrobe predict
mykrobe predict ${bam/.rmdup.bam} tb \
--output ${output}  \
--format csv \
--ploidy haploid \
--seq ${bam} \
--panel 201901	# tb 201901	AMR panel based on first line drugs from NEJM-2018 variants (DOI 10.1056/NEJMoa1800474), and second line drugs from Walker 2015 panel.	NC_000962.3
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
module purge
module load anaconda
source activate snakemake

# Kraken database.
DBNAME=/labs/jandr/walter/varcal/data/refs/kraken/

# Read from command line paired end 1 and paired end 2 - trimmed, zipped fastq files.
p1=$1
p2=$2
filt1=$3
filt2=$4
report=$5

# name outputs
prefix=$(basename ${p1/_trim_1.*})
LOGDIR=$(dirname $report)/

# run kraken to taxonomically classify paired-end reads and write output file.
kraken2 --db ${DBNAME} --paired --gzip-compressed --threads 8 --report ${report} --use-names ${p1} ${p2} --output ${LOGDIR}${prefix}.out

# select for reads directly assigned to Mycobacterium genus (G) (taxid 1763), reads assigned directly to Mycobacterium tuberculosis complex (G1) (taxid 77643), and reads assigned to Mycobacterium tuberculosis (S) and children. *this includes reads assigned to Mtb and those assigned to the genus, but not a different species.

grep -E 'Mycobacterium \(taxid 1763\)|Mycobacterium tuberculosis' ${LOGDIR}${prefix}.out | awk '{print $2}' > ${LOGDIR}${prefix}_reads.list

# use bbmap to select reads corresponding to taxa of interest.
filterbyname.sh  int=false in1=${p1} in2=${p2} out1=${filt1} out2=${filt2} names=${LOGDIR}${prefix}_reads.list include=true overwrite=true

# test if kraken filtered fastq files are the same length
len1=$(cat ${filt1}  | wc -l)
len2=$(cat ${filt2} | wc -l)  

echo $len1 $len2

# Write error to log.
if [ $len1 != $len2 ]; then
  echo ${prefix} error: paired-end files are of different lengths. 
fi  

# Summarize Kraken statistics. 
/labs/jandr/walter/tb/scripts/kraken_stats.sh ${report} 

# Error handling
if [ "$?" != "0" ]; then
	echo "[Error]" $LINENO "Kraken ${p1} failed!" 1>&2
	exit 1
fi
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
module load anaconda
source activate snakemake

# Read from command line: ref genome, fastq 1, fastq 2.
p1=$1  # fastq 1
p2=$2  # fast1 2
trim1=$3 # trimmed/adapter-removed fastq 1
trim2=$4 # trimmed/adapter-removed fastq 2

# Print input and output filenames.
echo $p1 $p2 
echo 'output files' $trim1 $trim2

# Make temp directory for intermediate files. 
mkdir -p $(dirname $p1)/tmp
TMP_DIR=$(dirname $p1)/tmp/

# Name temporary files.
tmp1=${TMP_DIR}$(basename ${p1/.fastq.gz/_val_1.fq.gz})
tmp2=${TMP_DIR}$(basename ${p2/.fastq.gz/_val_2.fq.gz})


#### Trim READS ####
echo "trimming reads"

# trip adapters 
trim_galore  --gzip --stringency 3 --paired ${p1} ${p2} --output_dir ${TMP_DIR} 2>&1 
# -q Trims low-quality (Phred scaled base quality < 20) in addition to adapter removal
# --length Sets min read length at 70; bwa mem is optimized for reads >=70 bp. (Abbott)

# second, try cutadapt to remove next seq poly-G's
cutadapt -f 'fastq' --nextseq-trim=20  --minimum-length=20 --pair-filter=any -o ${trim1} -p ${trim2} ${tmp1} ${tmp2} 2>&1 
# removes poly-G tails -NovaSeq. In those instruments, a ‘dark cycle’ (with no detected color) encodes a G. However, dark cycles also occur when when sequencing “falls off” the end of the fragment. The read then contains a run of high-quality, but incorrect `G` calls 

# run fastqc again
#/srv/gsfs0/software/fastqc/0.11.4/fastqc --noextract --threads 8 --outdir ${QC_DIR} ${cutadapt1} 
#/srv/gsfs0/software/fastqc/0.11.4/fastqc --noextract --threads 8 --outdir ${QC_DIR}  ${cutadapt2}

# Test if both paired-end reads have same number of lines.
len1=$(zcat $trim1 | wc -l)
len2=$(zcat $trim2 | wc -l)

# Create error if two files are different lengths.
if [ $len1 != $len2 ]; then   
   echo ${prefix}'_1.fq'  
   error: paired-end files are of different lengths. 
fi  

# remove extra files
rm ${tmp1} ${tmp2}

# Error handling
if [ "$?" != "0" ]; then
	echo "[Error]" $LINENO "Read trimming ${p1} failed!" 1>&2
	exit 1
fi
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
module load bcftools

# read from command line.
ref=$1
vcf=$2
sample_name=$3
bed=$4
fasta=$5

# Get sample name for correct genotype
samp=$(bcftools query -l ${vcf} )

# Make consensus masked sequence & rename fasta header. 
bcftools consensus --include 'TYPE!="indel"' --mask ${bed} --fasta-ref ${ref} --sample ${samp} --absent 'N' --missing 'N' ${vcf}  | \
    seqtk rename - ${sample_name}  > ${fasta}
55
56
shell:
  'workflow/scripts/trim_reads.sh {input.p1} {input.p2} {output.trim1} {output.trim2} &>> {log}'
71
72
shell:
  'workflow/scripts/run_kraken.sh {input.trim1} {input.trim2} {output.kr1} {output.kr2} {output.kraken_report} &>> {log}'
87
88
shell:
  "workflow/scripts/map_reads.sh {input.ref_path} {params.mapper} {input.kr1} {input.kr2} {output.bam} &>> {log}"
101
102
103
104
105
shell:    
  '''
  workflow/scripts/cov_stats.sh {input.ref_path} {input.bam} {output.cov_stats} &>> {log}
  paste {input.kraken_stats} <(sed -n '7,8'p {output.cov_stats} ) > {output.combined_stats}
  '''
112
113
shell:
  "cat {input.combined_stats} > {output}"
127
128
shell:
  "sambamba markdup -r -t {threads} --tmpdir={params.tmp_dir} {input.bams} {output.combined_bam}"
141
142
143
144
145
146
147
shell:    
  '''
  # Coverage summary statistics
  workflow/scripts/cov_stats.sh {input.ref_path} {input.combined_bam} {output.cov_stats} &>> {log}
  # Mosdepth coverage along genome (for plotting)
  mosdepth --by 2000 -Q 30 {params.prefix} {input.combined_bam}
  '''
157
158
shell: 
  "workflow/scripts/mykrobe_predict.sh {input.combined_bam} {output.amr_out} &>> {log}"
171
172
shell: 
  "workflow/scripts/call_vars_gatk.sh {input.ref_path} {input.combined_bam} {params.ploidy} {output.vcf} &>> {log}"
187
188
189
190
191
192
193
194
195
196
197
198
199
shell:
  '''
  # Rename Chromosome to be consistent with snpEff/Ensembl genomes.
  zcat {input.vcf}| sed 's/NC_000962.3/Chromosome/g' | bgzip > {output.rename_vcf}
  tabix {output.rename_vcf}

  # Run snpEff
  java -jar -Xmx8g {config[snpeff]} eff {config[snpeff_db]} {output.rename_vcf} -dataDir {config[snpeff_datapath]} -noStats -no-downstream -no-upstream -canon > {output.tmp_vcf}

  # Also use bed file to annotate vcf
  bcftools annotate -a {params.bed} -h {params.vcf_header} -c CHROM,FROM,TO,FORMAT/PPE {output.tmp_vcf} > {output.ann_vcf}

  '''
209
210
211
212
213
214
215
216
217
218
219
220
221
shell:
  '''

 # Filter VQSR
  bcftools filter -e "QUAL < 40.0 | FORMAT/DP < 10" --set-GTs '.' {input.ann_vcf} -O z -o {output.filt_vcf}

  # Index filtered VCF.
  tabix -f -p vcf {output.filt_vcf}

  # Print out stats to log file. 
  bcftools stats {output.filt_vcf} &>> {log} 

 '''
235
236
237
238
  shell:
    '''
    workflow/scripts/vcf2fasta.sh {input.ref_path} {input.filt_vcf} {params.sample_name} {params.bed} {output.fasta}    
	'''    
252
253
254
255
256
257
shell: 
  """
  tb-profiler profile --no_delly --bam {input.combined_bam} --prefix {params.samp} --dir {params.outdir} \
  --external_db /labs/jandr/walter/repos/tbdb/tbdb --csv &>> {log}
  mv {params.tmp_file} {output.profile}
  """
ShowHide 13 more snippets with no or duplicated tags.

Login to post a comment if you would like to share your experience with this workflow.

Do you know this workflow well? If so, you can request seller status , and start supporting this workflow.

Free

Created: 1yr ago
Updated: 1yr ago
Maitainers: public
URL: https://github.com/ksw9/mtb_pipeline
Name: mtb_pipeline
Version: 1
Badge:
workflow icon

Insert copied code into your website to add a link to this workflow.

Downloaded: 0
Copyright: Public Domain
License: None
  • Future updates

Related Workflows

cellranger-snakemake-gke
snakemake workflow to run cellranger on a given bucket using gke.
A Snakemake workflow for running cellranger on a given bucket using Google Kubernetes Engine. The usage of this workflow ...