Workflow Steps and Code Snippets

165 tagged steps and code snippets that match keyword pysam

A pipeline for simulating viral or vector integration into a host genome (v0.0.2)

 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
from sys import argv
from os import path, SEEK_CUR
from scipy.stats import norm
from collections import defaultdict
import argparse
import csv
import pysam

import pdb

genome_length = int(3e9)

def main(argv):
	#get arguments
	parser = argparse.ArgumentParser(description='simulate viral integrations')
	parser.add_argument('--sim-info', help='information from simulation', required = True, type=str)
	parser.add_argument('--sim-bam', help='bam file from read simulation (must be sorted, indexed)', required = True, type=str)
	parser.add_argument('--soft-threshold', help='threshold for number of bases that must be mapped when finding integrations in soft-clipped reads', type=int, default=20)
	parser.add_argument('--discordant-threshold', help='threshold for number of bases that may be unmapped when finding integrations in discordant read pairs', type=int, default=20)
	parser.add_argument('--mean-frag-len', help='mean framgement length used when simulating reads', type=int, default=500)
	parser.add_argument('--sd-frag-len', help='standard devation of framgement length used when simulating reads', type=int, default=30)
	parser.add_argument('--window-frac', help='fraction of the distribution of fragment sizes to use when searching for discordant read pairs', type=float, default=0.99)
	parser.add_argument('--output', help='output file', required=False, default='annotated.tsv')
	args = parser.parse_args()

	# read in bam file
	samfile = pysam.AlignmentFile(args.sim_bam)

	# iterate over integrations in info file and pull out reads crossing each one
	# the sam/bam file has the same coordinates as the info file
	# so just use the coordinates of the left and right junctions from this files
	with open(args.sim_info, newline='') as info_file, open(args.output, 'w', newline='') as output:

		# use csv to read and write files
		reader = csv.DictReader(info_file, delimiter = '\t')

		writer_fieldnames = list(reader.fieldnames) + ['left_chimeric', 'right_chimeric', 'left_discord', 'right_discord', 'multiple_discord', 'fake_discord', 'discordant_and_chimeric']

		writer = csv.DictWriter(output, delimiter = '\t', fieldnames = writer_fieldnames)
		writer.writeheader()


		# set window size for looking for discordant pairs and 
		# looking for multiple ints with the same discordant pair
		window_width = window_size(args.mean_frag_len, args.sd_frag_len, args.window_frac)

		# create a buffer of integrations that all fall within this window width 
		buffer = [next(reader)]
		while True:

			buffer, next_buffer = get_ints_in_window(buffer, window_width, reader, info_file)

			# find reads crossing each integration in the buffer
			buffer = find_reads_crossing_ints(buffer, samfile, args, window_width)

			# check for read pairs that cross multiple junctions
			buffer = find_multiple_discordant(buffer)

			# check for read pairs that are discordant at one integration and chimeric at another integration
			# assume that integrations are independent (even though they aren't in simulation), so these are not detectable
			buffer = find_multiple_chimeric_discordant(buffer)

			# write rows in buffer
			for row in buffer:
				writer.writerow(row)

			buffer = next_buffer

			# check if we've reached the end of the file
			if len(buffer) == 0:
				break

	samfile.close()

def find_multiple_chimeric_discordant(buffer):
	"""
	Assume that in real data, integrations are independent, so two integrations do not
	occur close enough to each other in the same cell that we can detect them.  This may
	or may not be the case, but the number of these kinds of pairs of integrations is assumed
	to be small enough that they're not worth worrying about. 

	If a read pair has one read that is chimeric, but it's also discordant about the same
	or a different integration, only the chimeric read will be detected 
	(and it won't be flagged as a discordant read pair). Assuming that
	these events are a tiny minority in real data, here don't flag the discordant pairs
	as something that should be detected (but instead add them to the 'discordant_and_chimeric'
	category)
	"""	
	to_delete = {}
	for i, row in enumerate(buffer):

		# for each row, find reads that are both chimeric
		# and discordant for any other integration
		left_discord = row['left_discord'].split(';')
		right_discord =  row['right_discord'].split(';')
		to_delete[i] = {'left' : [], 'right' : []}


		# check for these reads in other rows
		for row_2 in buffer:
			# skip if this is the same as the one we're looking at
			if row_2 == row:
				continue

			# get read names for chimeric reads for this row
			chimeric = row_2['left_chimeric'].split(";")
			chimeric += row_2['right_chimeric'].split(";")
			chimeric = [read[:-2] for read in chimeric]

			# check if chimeric reads in row_2 also occur in the list
			# of discordant reads in row
			for read in chimeric:
				if read in left_discord:
					to_delete[i]['left'].append(read)
				if read in right_discord:
					to_delete[i]['right'].append(read)

	# remove reads in to_delete
	for row_num in to_delete.keys():

		# remove reads from left_discord
		left_discord = buffer[row_num]['left_discord'].split(';')
		left_discord = [read for read in left_discord if read not in to_delete[row_num]['left']]
		buffer[row_num]['left_discord'] = ";".join(left_discord)

		# remove reads from right_discord
		right_discord = buffer[row_num]['right_discord'].split(';')
		right_discord = [read for read in right_discord if read not in to_delete[row_num]['right']]
		buffer[row_num]['right_discord'] = ";".join(right_discord)		

		# removed reads go to 'discordant_and_chimeric'
		buffer[row_num]['discordant_and_chimeric'] = [f"{read}_left" for read in to_delete[row_num]['left']]
		buffer[row_num]['discordant_and_chimeric'] += [f"{read}_right" for read in to_delete[row_num]['right']]
		buffer[row_num]['discordant_and_chimeric'] = ";".join(buffer[row_num]['discordant_and_chimeric'])

	return buffer

def find_multiple_discordant(buffer):
	"""
	a read might be discordant about two or more integration sites
	for example, if a read pair crosses the right border of one integration and the left
	border of a close by integration, both read pairs will be mapped to the virus
	and therefore this pair won't be detected as an integration

	deal with discordant pairs that cross multiple integrations in one of two ways:
		1. if a pair is discordant about the right side of one integration and the left side
		of a close by integration, then both reads are mapped to the virus and this read should
		be removed from both.  Keep track of these in a different category ('fake_discord')
		2. if a pair crosses right side of one integration, and both sides of a nearby integration,
		then one read maps to virus (in the first integration), and the other maps to host (after the second)
		and therefore this pair should be indicated to be an integration.  However, if we want to score based
		on integration position, we need to decide which integration this read 'belongs' to.  Keep
		these reads in a seperate category ('multiple_discord') for all the integrations that they cross,
		to reflect the fact that the 'belong' to multiple integrations
	"""
	# keep track of which reads we've already seen
	seen = dict()

	# for reads that we find at multiple junctions, 
	# keep track of which junctions so we can work out what to do with that read
	multiples = dict()

	for i, row in enumerate(buffer):
		# check reads found around left side
		for read in row['left_discord'].split(';'):
			if read == '':
				continue
			if read in seen.keys():
				# if this is the first double, add the entry from seen
				if read not in multiples:
					multiples[read] = []
					multiples[read].append(seen[read])

				# add entry to multiples for this time we found the read
				multiples[read].append({'int_id' : row['id'], 'side' : 'left' , 'buffer_row' : i})

			else:
				seen[read] = {'int_id' : row['id'], 'side' : 'left', 'buffer_row' : i}
		# check right side
		for read in row['right_discord'].split(';'):
			if read == '':
				continue
			if read in seen.keys():
				# if this is the first double, add the entry from seen
				if read not in multiples:
					multiples[read] = []
					multiples[read].append(seen[read])

				# add entry to multiples for this time we found the read
				multiples[read].append({'int_id' : row['id'], 'side' : 'right', 'buffer_row' : i})
			else:
				seen[read] = {'int_id' : row['id'], 'side' : 'right', 'buffer_row' : i}


		# add extra keys to dicts 
		row['multiple_discord'] = []
		row['fake_discord'] = []

	# deal with reads crossing multiple integrations
	for read in multiples:

		# if the side of the first junction matches the side of the last junction
		# this is a 'multiple_discord'
		sides = [find['side'] for find in multiples[read]]
		if sides[0] == sides[-1]:
			new_type = 'multiple_discord'
		else:
			new_type = 'fake_discord'

		for find in multiples[read]:

			# find row and side
			buffer_row = find['buffer_row']
			old_type = f"{find['side']}_discord"

			# remove from list of reads of old type
			reads = buffer[buffer_row][old_type].split(";")
			reads.remove(read)
			buffer[buffer_row][old_type] = ";".join(reads)

			# add to list of new type
			buffer[buffer_row][new_type].append(f"{read}_{find['side']}")

	# for each row, join lists of 'multiple_disord' and 'fake_discord'
	for row in buffer:
		row['multiple_discord'] = ";".join(row['multiple_discord'])
		row['fake_discord'] = ";".join(row['fake_discord'])

	return buffer

def times_already_found(read, multiples):
	"""
	check how many times a read has already been found when checking for discordant about multiple integration sides
	"""	
	if read in multiples:
		return len(multiples[read])
	else:
		return 0

def find_reads_crossing_ints(buffer, samfile, args, window_width):
	"""
	find reads crossing the integration site, and add them to the row
	reads crossing the integration site can be chimeric about the left or right junction
	or discordant about the left or right junction
	if they're discordant about both junctions, they actually go from host sequence to host sequence
	and	therefore aren't actually discordant
	"""
	for row in buffer:

		# get information about integration site location
		chr = row['chr']
		left_start = int(row['leftStart']) 
		left_stop = int(row['leftStop'])
		right_start = int(row['rightStart']) 
		right_stop = int(row['rightStop'])

		assert left_start >= 0 and right_start >= 0

		# find chimeric reads
		left_chimeric = get_chimeric(chr, left_start, left_stop, samfile, args.soft_threshold, buffer)
		right_chimeric = get_chimeric(chr, right_start, right_stop, samfile, args.soft_threshold, buffer)

		row['left_chimeric'] = ";".join(left_chimeric)
		row['right_chimeric'] = ";".join(right_chimeric)

		# find discordant read pairs	
		left_discord = get_discordant(chr, left_start, left_stop, samfile, args.discordant_threshold, window_width, buffer)
		right_discord = get_discordant(chr, right_start, right_stop, samfile, args.discordant_threshold, window_width, buffer)

		# if a read is both chimeric and discordant, chimeric takes priority 
		# (but it shouldn't be both if the clipping threshold is the same for both read types)
		left_chimeric, left_discord = remove_chimeric_from_discord(left_chimeric, left_discord)
		right_chimeric, right_discord = remove_chimeric_from_discord(right_chimeric, right_discord)

		left_chimeric, right_discord = remove_chimeric_from_discord(left_chimeric, right_discord)
		right_chimeric, left_discord = remove_chimeric_from_discord(right_chimeric, left_discord)


		row['left_discord'] = ";".join(left_discord)
		row['right_discord'] = ";".join(right_discord)

	return buffer	

def get_ints_in_window(buffer, window_width, reader, reader_handle):
	"""
	get a list of integrations (dicts corresponding to one row from the int-info file)
	that are within window_width of each other
	"""	

	assert len(buffer) == 1

	# get position and chromosome from buffer
	start = int(buffer[0]['rightStop'])
	chr = buffer[0]['chr']

	# get more integraions until we're not in the window anymore or we're at the end of the file
	# to avoid having to go back a line, save the first line not added to 'next_buffer'
	while True:	

		# get next row
		try:
			row = next(reader)
		except StopIteration:
			next_buffer = []
			break

		# compare previous integration with this integration
		prev_start = start
		prev_chr = chr
		start =  int(row['leftStart'])
		chr = row['chr']

		# check if next row is a window width away
		if (start - prev_start < window_width) and prev_chr == chr:
			# add to buffer
			buffer.append(row)
			start = int(row['rightStop'])
		else:
			# don't add the row but this will be the start of the next buffer
			next_buffer = [row]
			break

	return buffer, next_buffer	

def get_discordant(chr, start, stop, samfile, threshold, window_width, buffer):
	"""
	Get any discordant read pairs which cross an integration site
	In other words, get pairs where one mate is mapped on the host side, and the other on the virus side
	A read is considered mapped if it has at most threshold (default 20) unmapped bases

	This includes any pairs where the integration site falls within threshold (default 20) bases 
	of the end of the read (for a read mapped on the left of the integration), 
	or within threshold bases of the start of the read for a read on the mapped on the right
	of the integration

	Avoid an exhaustive search by extracting only the reads in a window around the integration site
	Set this window based on the mean length and standard deviation of fragment size used in simulation
	and the fraction of the fragment length distribution we want to cover.  Find discordant
	pairs by finding pairs for which an integration (start, stop) falls between the 20th
	base from the end of the left read and the 20th base of the right read

	Current criteria in discordant.pl is that a pair must have one read mapped and the other
	unmapped to be considered a discordant read pair.  To be considered 'mapped', a read must
	have 20 or fewer unmapped bases.  To be considered 'unmapped', a read must have
	less than 20 unmapped bases

	Note that a read pair might have one integration fall between read1 and read2, but read1 or read2
	might also cross a second integration.  This pair is therefore both discordant about one integration, and
	also one member of the pair is chimeric  A consequence of this is that one read maps only to vector or host, 
	but the other maps to both.  This discordant pair cannot currently be detected, since
	the pipeline currently detects discordant read-pairs only if one read is mapped to vector and
	not host, and vice versa for the other read.  However, this pair really is evidence
	for integration at the first site (as a discordant read-pair), so include it in the
	output as such.
	"""

	reads = []

	# extract read pairs in the desired window
	# pysam numbering is 0-based, with the only exception being the region string in the fetch() and pileup() methods. 
	window_start = start - window_width
	if window_start < 1:

		window_start = 1

	window_stop = stop + window_width
	if window_stop > samfile.get_reference_length(chr):
		window_stop = samfile.get_reference_length(chr) 
	if window_stop == window_start:
		window_stop += 1

	for read1, read2 in read_pair_generator(samfile, f"{chr}:{window_start}-{window_stop}"):

		# check mate is mapped
		if read1.is_unmapped or read2.is_unmapped:
			continue
		# check reference for this read is the same as mate
		if read1.reference_name != read2.reference_name:
			continue
		# if this read is forward, mate must be reverse and vice versa
		if (read1.is_reverse == read2.is_reverse):
			continue

		# if the integration site falls between left_boundary and right_boundary
		# (which are coordinates within the reference)
		# this pair crosses the integration site
		if read1.is_reverse is False:
			left_boundary = get_boundary(read1, threshold, side = "right")
			right_boundary =  get_boundary(read2, threshold, side = "left")
		else:
			left_boundary = get_boundary(read2, threshold, side = "right")
			right_boundary =  get_boundary(read1, threshold, side = "left")

		# if left_boundary is greater than right_boundary, reads overlap
		if left_boundary >= right_boundary:
			continue

		assert left_boundary is not None
		assert right_boundary is not None
		assert left_boundary < right_boundary

		if within(start, stop, left_boundary, right_boundary):

			reads.append(read1.qname)
# 			
# 			# TODO - need to decide if integrations should be excluded on the basis
# 			# below (if it's not the case that one read is mapped to host and the other to virus)
# 			# these events can't be currently detected in the pipeline, but are real evidence
# 			# of integration, so for now include them in the output
# 
# 			r1_mapped = get_mapped_ref(read1, buffer, threshold)
# 			r2_mapped = get_mapped_ref(read2, buffer, threshold)
# 			
# 			assert r1_mapped['host'] or r1_mapped['virus']
# 			assert r2_mapped['host'] or r2_mapped['virus']
# 			
# 			if r1_mapped['host'] != r2_mapped['host'] and r1_mapped['virus'] != r2_mapped['virus']:
# 				reads.append(read1.qname)

	return reads

def get_mapped_ref(read, buffer, threshold):
	"""
	figure out if each read in this read pair will be mapped to host or vector/virus
	returns a dict with the keys 'host' and 'virus',
	and values True or False depending on if the read is mapped to either or not
	"""

	assert read.is_unmapped is False

	read_mapped = {'host':False, 'virus':False, 'int' : []}

	# get first and last position to which read is mapped in reference
	first = read.get_reference_positions()[0]
	last =  read.get_reference_positions()[-1]

	prev_start = 0
	for row in buffer:
		# figure out if we need to include the ambiguous bases or not
		if row['juncTypes'].split(',')[0] == 'gap':
			left_host_junc = int(row['leftStart'])
			left_virus_junc = int(row['leftStop'])
		else:
			left_host_junc = int(row['leftStop'])
			left_virus_junc = int(row['leftStart'])
		if row['juncTypes'].split(',')[1] == 'gap':
			right_virus_junc = int(row['rightStart'])
			right_host_junc = int(row['rightStop'])
		else:
			right_virus_junc = int(row['rightStop'])
			right_host_junc = int(row['rightStart'])	

		# if read is between the start of the chromosome and the start of the integration
		if intersects(first, last, prev_start, left_host_junc):
			# check we have at least threshold bases mapped
			if check_threshold(read, prev_start, left_host_junc, threshold):
				read_mapped['host']  = True
		# if read is between the left and right junctions
		if intersects(first, last, left_virus_junc, right_virus_junc):
			if check_threshold(read, left_virus_junc, right_virus_junc, threshold):
				read_mapped['virus'] = True
				read_mapped['int'].append(row['id'])
		prev_start = right_host_junc

		# if the prev_start is to the right of the position of the last mapped base in the read
		# then we don't need to continue
		if prev_start > last:
			break


	# check if read is between the end of the last integration and the end of the chromosome
	# don't know what the end of the chromosome is, so just use a large number (genome_length)
	if intersects(first, last, prev_start, genome_length):
		if check_threshold(read, prev_start, genome_length, threshold):
			read_mapped['host'] = True

	return read_mapped	

def get_chimeric(chr, start, stop, samfile, threshold, buffer):
	"""
	find reads that cross an interval defined as chr:start-stop in samfile
	the interval must be at least threshold bases from the start and end of the read
	"""
	reads = []

	# get reads that cross interval
	# pysam numbering is 0-based, with the only exception being the region string in the fetch() and pileup() methods. 
	# The same is true for any coordinates passed to the samtools command utilities directly, such as pysam.fetch().
	for read in samfile.fetch(chr, start, stop + 1):

		# check that interval is at least threshold bases from either end of the read
		mapped = get_mapped_ref(read, buffer, threshold)
		assert (mapped['host'] or mapped['virus'])

		if mapped['host'] is True and mapped['virus'] is True:

			reads.append(read.query_name + read_num(read))

	return reads

def read_num(read):
	"""
	return '/1' if read is R1, '/2' if read is R2, or empty string otherwise
	"""
	if read.is_read1 is True:
		return "/1"
	elif read.is_read2 is True:
		return "/2"
	else:
		return ""

def check_threshold(read, start, stop, threshold):
	""""
	check that there are least threshold bases that map to an interval (defined by start and stop)
	"""
	rstart = read.get_reference_positions()[0]
	# need to account for 0-based numbering (stop already has this accounted for)
	rstop = read.get_reference_positions()[-1] + 1
	assert intersects(rstart, rstop, start, stop)

	if (rstop - start) < threshold:
		return False
	if (stop - rstart) < threshold:
		return False

	return True

def window_size(mean_frag_len, sd_frag_len, window_frac):
	"""
	to avoid exhaustive search for discordant reads, 
	work out window size for extracting reads when finding discordant read pairs
	based on mean and standard deviation of fragment length distribution,
	and fraction of this distribution we want to cover

	For example, if the mean fragment length is 500 bp, the standard deviation is 30 bp, and we want 
	to cover 0.99 of this distribution, the window size would be 570 bp, which accounts for 99% of 
	the fragment sizes in this distribution (one-tailed)

	"""

	# get one-tailed value which contains window_frac of fragments
	upper = norm.ppf(window_frac, loc = mean_frag_len, scale = sd_frag_len)

	return int(round(upper))

def get_boundary(read, threshold, side = "left"):
	"""
	get first position in reference that is at least threshold bases from the 
	start or end of the read and isn't None

	if side is 'left', return the 0-based position after threshold mapped bases from the 
	start of the read
	if side is 'right', return the 0-based position before threshold mapped bases from the
	end of the read
	"""
	assert isinstance(threshold, int)
	assert threshold >= 0
	assert threshold <= read.qlen
	assert side in ['left', 'right']

	aligned_pairs = read.get_aligned_pairs()

	if side == "left":
		# if we want to look on the right hand side, we need to look backwards 
		# through the aligned pairs
		aligned_pairs = list(reversed(aligned_pairs))
		# python numbering is zero-based and half-open
		threshold -= 1 


	# iterate over bases in aligned_pairs, starting from the threshold value
	for pair in aligned_pairs[threshold:]:
		# check the base is aligned
		if (pair[1] is not None):
			return pair[1]

def within(start1, stop1, start2, stop2):
	"""
	compare two intervals, each with a start and stop value
	return true if the first interval is completely within in the second

	use half-open intervals, so [8, 8) is not within [5, 8) 
	within(8, 8, 5, 8) => False
	within(6, 6, 5, 8) => True
	within(5, 8, 5, 8) => True
	within(4, 6, 5, 8) => False
	within(5, 9, 5, 8) => False
	"""	
	assert start1 <= stop1
	assert start2 <= stop2
	assert isinstance(start1, int)
	assert isinstance(start2, int)
	assert isinstance(stop1, int)
	assert isinstance(stop2, int)

	if start1 >= start2 and start1 < stop2:
		if stop1 - 1 >= start2 and stop1 - 1 < stop2:
			return True
	return False

def intersects(start1, stop1, start2, stop2):
	"""
	compare two intervals, each with a start and stop value
	return true if the first interval is intersects the second

	use half-open intervals, so [2, 3) and [3, 4) don't intersect
	"""	
	assert start1 <= stop1
	assert start2 <= stop2
	assert isinstance(start1, int)
	assert isinstance(start2, int)
	assert isinstance(stop1, int)
	assert isinstance(stop2, int)

	# if first interval is completely to the left of the second interval
	if stop1 <= start2:
		return False
	# if first interval is completely to the right of the second interval
	if start1 >= stop2:
		return False

	# otherwise, they intersect
	return True	

def read_pair_generator(bam, region_string=None):
    """
    https://www.biostars.org/p/306041/
    Generate read pairs in a BAM file or within a region string.
    Reads are added to read_dict until a pair is found.
    """
    read_dict = defaultdict(lambda: [None, None])
    for read in bam.fetch(region=region_string):
        if read.is_secondary or read.is_supplementary:
            continue
        qname = read.query_name
        if qname not in read_dict:
            if read.is_read1:
                read_dict[qname][0] = read
            else:
                read_dict[qname][1] = read
        else:
            if read.is_read1:
                yield read, read_dict[qname][1]
            else:
                yield read_dict[qname][0], read
            del read_dict[qname]

def remove_chimeric_from_discord(chimeric, discord):
	"""
	check for read ids that are in both chimeric and discord - remove them from discord if they're in both
	"""
	# chimeric reads have /1 or /2 added
	to_delete = []
	chimeric_tmp = [read[:-2] for read in chimeric]
	for read in discord:
		if read in chimeric_tmp:
			print(f"  removed a read that was in both chimeric and discord: {read}")
			to_delete.append(read)

	# remove reads flagged for deletion
	discord = [read for read in discord if read not in to_delete]

	return chimeric, discord

if __name__ == "__main__":
	main(argv[1:])

kGWASflow is a Snakemake workflow for performing k-mers-based GWAS. (v1.2.3)

  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
import csv
import os, glob, shutil
import re
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from qmplot import manhattanplot
import natsort
from natsort import natsorted
import seaborn as sns
import pysam

if __name__ == "__main__":

    ## Logging
    with open(snakemake.log[0], "w") as f:
      sys.stderr = sys.stdout = f

      ## Read the alignment data 
      align_kmers_sam = pd.read_table(snakemake.input[0], 
                                      sep='\t', comment='@', header=None,
                                      usecols=[0,2,3], names=['kmer_id', 'chr', 'bp'])

      ## Preparing the data for plotting
      align_kmers_sam['kmer'] = align_kmers_sam['kmer_id'].str.split('_').str[0]
      align_kmers_sam['p_value'] = align_kmers_sam['kmer_id'].str.split('_').str[1]
      align_kmers_sam['p_value'] = align_kmers_sam['p_value'].astype(float)
      align_kmers_sam['bp'] = align_kmers_sam['bp'].astype(int)

      # Sort the data by chromosome and chromosome position
      align_kmers_sam_sorted = align_kmers_sam.sort_values(by=["chr", "bp"], key=natsort.natsort_keygen())

      # Get colors for manhattan plot
      colors = sns.color_palette("colorblind").as_hex()
      colors_2 = sns.color_palette("husl").as_hex()

      # Make a column of minus log10 p-values
      align_kmers_sam_sorted['minuslog10pvalue'] = -np.log10(align_kmers_sam_sorted.p_value)

      ## Get min & max minus log10 p-values for y axis limits
      y_max = align_kmers_sam_sorted['minuslog10pvalue'].max()
      y_min = align_kmers_sam_sorted['minuslog10pvalue'].min()
      print("y_max: " + str(y_max))
      print("y_min: " + str(y_min))
      ## Check if only one chromosome is provided for the manhattan plot
      num_of_chrs = len(pd.unique(align_kmers_sam_sorted['chr']))

      # Define a function to extract chromosome names and lengths from a SAM file header
      def extract_chromosome_info(sam_file):
        """
        Extract chromosome names and lengths from a SAM file header and return as a Pandas DataFrame with columns "chr" and "bp".
        """
        chromosome_info = {} # dictionary to store chromosome names and lengths
        pattern = re.compile(r'^([Cc][Hh][Rr])?\d*[XYM]?$') # chromosome names pattern

        with pysam.AlignmentFile(sam_file, "r") as sam: # open SAM file
            for header_line in sam.header["SQ"]: # iterate over SQ header lines
                chromosome_name = header_line["SN"] # get chromosome name
                length = header_line["LN"] # get chromosome length
                if chromosome_name.startswith("chr"): # if chromosome name starts with "chr"
                    name = chromosome_name # use name as is
                else: # if chromosome name does not start with "chr"
                    match = pattern.match(chromosome_name) # 
                    if match: # if chromosome name matches pattern
                        # Convert name to "chrX" format
                        name = "chr" + match.group(1)
                    else: # if chromosome name does not match pattern
                        continue # skip chromosome
                chromosome_info[name] = length # add chromosome name and length to dictionary

        # Convert dictionary to DataFrame
        df = pd.DataFrame(chromosome_info.items(), columns=["chr", "bp"])

        return df # return the DataFrame

      # Plotting the manhattan plot
      print("Plotting...")

      # Set font sizes
      tick_fontsize = snakemake.params["tick_fontsize"]
      label_fontsize = snakemake.params["label_fontsize"]
      title_fontsize = snakemake.params["title_fontsize"]

      # Set the figure dpi
      dpi = snakemake.params["dpi"]

      ## If only one chromosome is provided, plot the k-mer's position on
      ## that chromosome on the x axis
      if num_of_chrs == 1:
        f, ax = plt.subplots(figsize=(18, 9), facecolor='w', edgecolor='k')
        manhattanplot(data=align_kmers_sam_sorted,
                      snp="kmer_id",
                      chrom="chr",
                      CHR= pd.unique(align_kmers_sam_sorted['chr']),
                      color=colors_2,
                      pos="bp",
                      pv="p_value",
                      suggestiveline=None,  # Turn off suggestiveline
                      genomewideline=None,  # Turn off genomewideline
                      xticklabel_kws={"rotation": "vertical"},
                      ax=ax,
                      s = snakemake.params["point_size"],
                      clip_on=False)
        ax.set_ylim([y_min-0.5, y_max+1]) # Set y axis limits

        # Set x axis tick interval
        xtick_interval = snakemake.params["xtick_interval"]

        # Calculate the minimum and maximum of your data, rounded to the nearest multiple of 5000
        min_val = align_kmers_sam_sorted['bp'].min() // xtick_interval * xtick_interval
        max_val = (align_kmers_sam_sorted['bp'].max() // xtick_interval + 1) * xtick_interval

        # Generate the tick locations
        xtick_locs = np.arange(min_val, max_val, xtick_interval)

        f.suptitle('k-mer Based GWAS Manhattan Plot for ' + snakemake.params["pheno"], fontsize=title_fontsize)
        plt.xlabel('Chromosome: ' + pd.unique(align_kmers_sam_sorted['chr'])[0], fontsize=label_fontsize)
        plt.ylabel(r"$-log_{10}{(P)}$", fontsize=label_fontsize) 
        plt.xticks(xtick_locs, fontsize = tick_fontsize)
        plt.yticks(fontsize = tick_fontsize)
        plt.tight_layout()

      ## If more than one chromosome is provided, use all chromosomes
      if num_of_chrs > 1:

        # Extract chromosome names and lengths from the SAM file header
        chrom_info_tab = extract_chromosome_info(snakemake.input[0])
        chrom_names = natsorted(chrom_info_tab['chr'].tolist())

        # Add extra chromosome names and lengths to the data frame
        align_kmers_sam_with_all_chrom = pd.concat([align_kmers_sam, chrom_info_tab], ignore_index=True)
        align_kmers_sam_with_all_chrom = align_kmers_sam_with_all_chrom[align_kmers_sam_with_all_chrom['chr'].isin(chrom_names)]

        # Sort the data by chromosome and chromosome position
        align_kmers_sam_with_all_chrom_sorted = align_kmers_sam_with_all_chrom.sort_values(by=["chr", "bp"], key=natsort.natsort_keygen()) 
        # Fill NaN values with 1
        align_kmers_sam_with_all_chrom_sorted['p_value'] = align_kmers_sam_with_all_chrom_sorted['p_value'].fillna(1) 

        # Plot the dots of chromosome length rows wiht the lowest opacity 
        extra_rows = align_kmers_sam_with_all_chrom_sorted[align_kmers_sam_with_all_chrom_sorted['p_value'] == 1]

        f, ax = plt.subplots(figsize=(18, 9), facecolor='w', edgecolor='k')
        manhattanplot(data=align_kmers_sam_with_all_chrom_sorted,
                      snp="kmer_id",
                      chrom="chr",
                      color=colors,
                      pos="bp",
                      pv="p_value",
                      suggestiveline=None,  # Turn off suggestiveline
                      genomewideline=None,  # Turn off genomewideline
                      xticklabel_kws={"rotation": "vertical"},
                      ax=ax,
                      s = snakemake.params["point_size"],
                      clip_on=True)
        ax.set_ylim([y_min-2, y_max+1]) # Set y axis limits
        plt.scatter(extra_rows['bp'], -np.log10(extra_rows['p_value']), alpha=0)

        # Calculate the cumulative distances from the start of each chromosome and store them in a list
        chrom_ends = chrom_info_tab['bp'].cumsum().tolist()

        # Plot the vertical lines for the end of each chromosome
        for end_position in chrom_ends:
          plt.axvline(x=end_position, color='grey', linestyle='--', alpha=0.2)

        # Add a caption to the right bottom corner of the outside of the plot
        caption = '*Vertical dashed lines indicate chromosome boundaries.'
        ax.text(1.0, -0.2, caption, transform=ax.transAxes, ha='right', va='bottom', fontsize=12)

        # Set the title of the plot
        f.suptitle('k-mer Based GWAS Manhattan Plot for ' + snakemake.params["pheno"], fontsize=title_fontsize) 

        # Set the x and y axis labels
        plt.xlabel('Chromosome', fontsize=label_fontsize) # Set the x axis label
        plt.ylabel(r"$-log_{10}{(P)}$", fontsize=label_fontsize) # Set the y axis label
        plt.xticks(fontsize = tick_fontsize)
        plt.yticks(fontsize = tick_fontsize)

        # Adjust the plot layout
        plt.tight_layout() 

      ## Saving the plot as pdf
      print("Plotting is done. Saving the plot...")
      plt.savefig(snakemake.output["manhattan_plot"], dpi=dpi)

Evaluating the robustness of polygenic adaptation to the choice of the GWAS cohort

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import pysam
from optparse import OptionParser

parser = OptionParser("$prog [options]")
parser.add_option("-i", "--infile", dest="infile", help="Input GWAS+freq file", default=None, type="string")
parser.add_option("-p", "--minp", dest="minp", help="Minimum p-value allowed", default=1, type="float")
parser.add_option("-o", "--outfile", dest="outfile", help="Output file", default=None, type="string")
parser.add_option("-s", "--sep", dest="sep", help="Number of valid SNPs separating each printed SNP", default=None, type="int")
(options,args) = parser.parse_args()


infile = pysam.Tabixfile(options.infile, mode='r')
outfile = open(options.outfile,"w")

readheader = infile.header
for x in readheader:
    header = x

header = header.split("\t")
header = list(filter(lambda a: a != "REF" and a != "ALT" and a != "ANC" and a != "DER" and a!= "SE" and a != "PVAL", header))
header = "\t".join(header)

outfile.write(''.join(header) + '\n')

i=0
for line in infile.fetch():
    fields = line.strip().split("\t")
    Pval = float(fields[9])
    if Pval > options.minp:
        if i == options.sep:
            i = 0
            finalvec = fields[0:3]+[fields[7]]+fields[10:]
            outfile.write("\t".join(finalvec)+ '\n')
        i += 1
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import pysam
import numpy as np
from optparse import OptionParser
import subprocess

parser = OptionParser("$prog [options]")
parser.add_option("-i", "--infile", dest="infile", help="Input GWAS+freq file", default=None, type="string")
parser.add_option("-b", "--bedfile", dest="bedfile", help="Bed file with LD partitions (def None)", default=None, type="string")
parser.add_option("-p", "--maxp", dest="maxp", help="Maximum p-value allowed", default=1, type="float")
parser.add_option("-o", "--outfile", dest="outfile", help="Output file", default=None, type="string")
(options,args) = parser.parse_args()


bedfile = open(options.bedfile,"r")
infile = pysam.Tabixfile(options.infile, mode='r')
outfile = open(options.outfile,"w")
logmaxp = np.log10(options.maxp)

readheader = infile.header
for x in readheader:
    header = x

header = header.split("\t")
header = list(filter(lambda a: a != "REF" and a != "ALT" and a != "ANC" and a != "DER" and a!= "SE" and a != "PVAL", header))
header = "\t".join(header)
outfile.write(''.join(header) + '\n')

for line in bedfile:
    regfields = line.strip("\n").split("\t")
    regchr = regfields[0].strip()
    regstart = int(regfields[1])
    regend = int(regfields[2])

    CurrLogPval = 0
    CurrPval = None
    CurrBest = None
    try:
        for elem in infile.fetch(regchr,regstart-1,regend):
            fields = elem.strip().split("\t")
            Pval = float(fields[9])
            if Pval == 0.0:
                Pval = 1e-20
            LogPval = np.log10(Pval)
            if LogPval < CurrLogPval:
                CurrBest = fields[0:3]+[fields[7]]+fields[10:]
                CurrLogPval = LogPval
                CurrPval = Pval
    except:
        continue

    if CurrLogPval < logmaxp and CurrBest != None:
        outfile.write("\t".join(CurrBest) + '\n')

Multiplex Accurate Sensitive Quantitation (MASQ) analysis and primer design

  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
import os,sys
import numpy as np
import gzip
from collections import Counter, defaultdict
import fileinput
import operator
import pickle
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import editdistance
import numpy as np
import pysam
from masq_helper_functions import tabprint
from masq_helper_functions import reverseComplement
from masq_helper_functions import convert_cigar_string
from masq_helper_functions import setup_logger, load_snv_table, write_snv_table

########################################################################
# Start timer
t0 = time.time()
# Setup log file
log = setup_logger(snakemake.log,'check_loci')
log.info('Starting process')

########################################################################
# INPUT FILES AND PARAMETERS
log.info('Getting input files and parameters from snakemake object')
# WGS path to bam
# Now allows multiple BAMs
if isinstance(snakemake.input.bam,list) and isinstance(snakemake.params.wgs_name,list):
    WGS_BAM = snakemake.input.bam
    WGS_NAME = snakemake.params.wgs_name
elif isinstance(snakemake.input.bam,str) and isinstance(snakemake.params.wgs_name,str):
    WGS_BAM = [snakemake.input.bam]
    WGS_NAME = [snakemake.params.wgs_name]
else:
    logger.error("WGS_BAM and WGS_BAM should be lists or strings")

# # Input SNV table
SNV_table = open(snakemake.input.SNV_table,'r')

########################################################################
# OUTPUT FILES
log.info('Getting output files from snakemake object')
# Output plots
list_of_plot_files = snakemake.output.plots
# Updated SNP file
updated_SNV_table = snakemake.output.new_SNV_table

# Make plot folder if it doesn't exist
plotfolder = os.path.dirname(list_of_plot_files[0])
os.makedirs(plotfolder, exist_ok=True)

########################################################################
# Plotting colors
BASE_COLORS = ["#00ABBA",
               "#9ACA3C",
               "#F26421",
               "#672D8F"]

########################################################################
## load the genome sequence from a pickle file
log.info('Loading reference genome pickle')
seq_pickle = snakemake.params.wgs_ref
seq_dic = pickle.load(open(seq_pickle, 'rb'))
log.info('Done loading reference genome pickle')

########################################################################
# Load the input SNV table
log.info('Loading SNV table')
snv_info = load_snv_table(SNV_table)
for key,value in snv_info.items():
    log.debug(key)
    log.debug(tabprint(value))
log.info('Done loading SNV table')

########################################################################

# Process input file to extend it with strand info and get coordinates of seq
log.info('Parsing specific SNV info fields')

target_info = []
target_locs_array = [list(map(int, x.split(";") ) ) if len(x)>0 else [] for x in snv_info['target_locs']]

for i,loc in enumerate(snv_info['loc']):
    chrom=snv_info['chr'][i]
    pos = int(snv_info['posi'][i])
    target_locs = target_locs_array[i]
    aml_loc = target_locs[0]
    log.debug('Position: %d' % pos)
    log.debug('AML Loc: %d' % aml_loc)
    int_seq = snv_info['trimmed-target-seq'][i]
    length = len(int_seq)

    if chrom=="0":
        strand="+"
        start=1
        end=100
        targets=target_locs
    elif ('strand' in snv_info.keys()) and ('fragment-start' in snv_info.keys()) and ('fragment-end' in snv_info.keys()) and ('add-targets' in snv_info.keys()):
        strand=snv_info['strand'][i]
        start=snv_info['fragment-start'][i]
        end=snv_info['fragment-end'][i]
        end=snv_info['add-targets'][i]
    else:
        log.info('Inferring strand, start, and end from match to reference genome')
        # identifying start and end genomic positions of the targeted region
        # and getting target sequence from ref genome
        top_start = pos - 1 - aml_loc
        top_end = top_start + length
        top_match = seq_dic[chrom][top_start:top_end]
        # if the target sequence was on the bottom strand...
        bottom_start = pos - (length - aml_loc)
        bottom_end = bottom_start + length
        bottom_match = reverseComplement(seq_dic[chrom][bottom_start:bottom_end])

        log.debug('Target seq : %s' % int_seq)
        log.debug('Top match  : %s' % top_match)
        log.debug('Btm match  : %s' % bottom_match)

        # check which strand the sequence came from and set coordinates
        if top_match == int_seq:
            log.debug('Locus %s: positive strand' % (loc))
            strand = "+"
            start = top_start
            end = top_end
            targets = target_locs
        else:
            log.debug('Locus %s: negative strand' % (loc))
            strand = "-"
            start = bottom_start
            end = bottom_end
            targets = [length - x - 1 for x in target_locs]

    target_info.append([chrom, strand, start, end, targets])

########################################################################
# Now go through WGS data and add non-ref bases to the input file...
log.info('Finding additional variant positions from WGS BAM')
BASES = ["A", "C", "G", "T"]
#sample_names = [WGS_NAME] # EDIT TO ALLOW MULTIPLE BAMS...
BASE2INT = dict([x[::-1] for x in enumerate(BASES)])
MIN_QUAL = 20
MIN_MAP = 40
buff = 30
region_buffer = 30


all_targets = []
loc_ind = 0  # counter for which loci we're on
#
# load each region from the target info read in and processed
for chrom, strand, true_start, true_end, true_targets in target_info:

    if chrom=="0": # not human / mouse sequence (GFP seq for example)
        all_targets.append([])
        image_filename = list_of_plot_files[loc_ind]
        fig = plt.figure(figsize=(20, 18))
        plt.savefig(image_filename, dpi=200, facecolor='w', edgecolor='w',
                    papertype=None, format=None,
                    transparent=False)
        plt.close()
        loc_ind += 1
    else:

        log.debug('Locus %d' % (loc_ind+1))
        image_filename = list_of_plot_files[loc_ind]
        loc_ind += 1

        # add buffer to target sequence on either side and update values
        start = true_start - region_buffer
        end = true_end + region_buffer
        targets = [x + region_buffer for x in true_targets]
        # get local sequence
        local_seq = seq_dic[chrom][start:end]
        log.debug('Chrom: %s' % chrom)
        log.debug('start: %d' % start)
        log.debug('end: %d' % end)
        log.debug('Local_seq: %s' % local_seq)
        L = len(local_seq)
        # also in integer form
        local_int = np.array([BASE2INT[x] for x in local_seq])
        # intialize for keeping track of variants at this position
        has_something = np.zeros(L, dtype=bool)

        # for each sample
        fig = plt.figure(figsize=(20, 18))
        locus_string = "%s:%d-%d %s" % (chrom, start, end, strand)
        fig.suptitle(locus_string, fontsize=20)
        ind = 1
        for sample_name,sample_bam_fn in zip(WGS_NAME,WGS_BAM):
            log.info('Plotting locus %d' % loc_ind)
            # Set up the plot
            ax1 = fig.add_subplot(len(WGS_NAME), 1, ind)
            ax2 = ax1.twinx()
            ax1.tick_params(axis='both', labelsize=15)
            ax2.tick_params(axis='both', labelsize=15)
            ind += 1

            # open the bam file
            sample_bam = pysam.AlignmentFile(sample_bam_fn, "rb")

            # for each position of interest
            # get an iterator that walks over the reads that include our event
            read_iterator = sample_bam.fetch(
                chrom, start - buff, end + buff)  # ref has chr1,chr2,etc.

            # store as pairs
            read_names = []
            read_pair_dict = dict()
            for read in read_iterator:
                try:
                    read_pair_dict[read.qname].append(read)  # if pair is there
                except:
                    read_pair_dict[read.qname] = [read]  # if pair is absent
                    read_names.append(read.qname)  # keys to dictionary

            N = len(read_names)  # number of read pairs in this region
            # store an integer at every position for each read(pair) in interval
            # matrix (number of reads (N) by number of positions (end-start))
            read_stack = np.zeros(shape=(N, end - start), dtype=int)
            # get all read pairs
            for rcounter, read_name in enumerate(read_names):
                reads = read_pair_dict[read_name]
                for read in reads:
                    read_sequence = read.query_sequence
                    read_qualities = read.query_qualities
                    # get_aligned_pairs:
                    # returns paired list of ref seq position and read seq position
                    for rindex, pindex in read.get_aligned_pairs():
                        # rindex and pindex return ALL positions, including soft-clipped
                        # rindex: position in read (1-150 for example)
                        # pindex: position in reference genome
                        # if there's a base in the read
                        # and we are in range
                        if rindex is not None and pindex is not None:
                            # Separated these lines, added pindex None check
                            if pindex >= start and pindex < end:
                                # compute the base score
                                base_score = read_qualities[rindex]
                                # if it's good enough
                                if base_score >= MIN_QUAL:
                                    # check if there is already a value in place
                                    current = read_stack[rcounter, pindex - start]
                                    base = read_sequence[rindex]
                                    if base == "N":
                                        baseint = 0
                                    else:
                                        # A-1, C-2, G-3, T-4
                                        baseint = BASE2INT[base] + 1
                                    # if there is no value, store the value
                                    if current == 0:
                                        read_stack[rcounter, pindex - start] = baseint
                                    else:
                                        # if there is a mismatch between the two reads,
                                        # set value back to 0
                                        # this value is just for the 2 paired reads
                                        if current != baseint:
                                            read_stack[rcounter, pindex - start] = 0
            summary = []
            # iterating over numpy array - by rows
            # transpose first to iterate over positions
            for x in read_stack.transpose():
                # gets counts of N,A,C,G,T per position as array
                # append to summary list
                summary.append(np.bincount(x, minlength=5))
            # convert summary to array, fills by row
            # drop the N count, and transpose again
            # .T tranposes, only if num dimensions>1
            summary = np.array(summary)[:, 1:].T
            # now we have base (4) by position array as summary
            # base_cover: coverage at each position (sum of A/C/G/T counts)
            base_cover = np.sum(summary, axis=0)
            # base_ratio: A/C/G/T counts over total coverage
            # aka frequency of each base
            base_ratio = summary.astype(float) / np.maximum(1, base_cover)
            # update has something
            # EDIT - 0 coverage is not has_something??
            has_something += ( (base_ratio[local_int, np.arange(L)] < 0.9) & (base_cover>3) ) # reference ratio is less than 0.9

            ########################################################################
            # Plot variants in each region

            # plot the coverage first
            ax1.plot(base_cover, color='k', label='coverage', alpha=0.5)
            # draw lines for boundaries of event
            # and targets
            if strand == "+":
                ax2.axvline(region_buffer-0.5, color='g', lw=2)
                ax2.axvline(L-region_buffer-0.5, color='g', linestyle='--', lw=2)
            else:
                ax2.axvline(region_buffer-0.5, color='g', linestyle='--', lw=2)
                ax2.axvline(L-region_buffer-0.5, color='g', lw=2)
            for pos in targets:
                ax1.axvline(pos, color='r', lw=2)

            # Plot colored circle for each base at position vs. frequency
            for BASE_COLOR, BASE, yvals in zip(BASE_COLORS, BASES, base_ratio):
                ax2.plot(yvals, 'o', markersize=13, label=BASE, color=BASE_COLOR)
                base_filter = local_int == BASE2INT[BASE]  # same as ref base
                x = np.arange(L)
                # label ref bases with  black circle
                ax2.plot(x[base_filter], yvals[base_filter], 'o',
                         markersize=13, mfc="None", mec='black', mew=2)
            ax1.set_ylabel(sample_name, fontsize=25)

            # for non-ref sites...
            # label number of reads for non-ref base in red
            for hs_ind in np.where(has_something)[0]:
                base_counts, total = summary[:, hs_ind], base_cover[hs_ind]
                ref_base = local_int[hs_ind]
                for bind in range(4):
                    # loop through A/C/G/T, find the non-zero counts
                    if bind != ref_base and base_counts[bind] > 0:
                        # text_string = r"$\frac{%d}{%d}$" % (
                            # base_counts[bind],total)
                        # text_string = r"%d/%d" % (base_counts[bind], total)
                        text_string = r"%d" % (base_counts[bind])
                        # add the count to the plot
                        ax2.text(hs_ind + 1, base_ratio[bind, hs_ind], text_string,
                                 fontsize=20, color='red', weight='semibold')
            ax2.set_xlim(-10, 10+L)
            ax2.set_ylim(0, 1)
            ax2.legend(loc="upper center", numpoints=1, ncol=4,
                       fontsize=18, columnspacing=0.8, handletextpad=-0.2)
            ax1.legend(loc="lower center", numpoints=2,
                       fontsize=18, columnspacing=0.5)

        plt.tight_layout(rect=(0, 0, 1, 0.98))
        plt.savefig(image_filename, dpi=200, facecolor='w', edgecolor='w',
                    papertype=None, format=None,
                    transparent=False)
        plt.close()

    ########################################################################
        # Add non-ref het/homo sites to "target" list
        rlen = true_end - true_start
        new_targets = np.where(has_something)[0] - region_buffer
        if strand == "-":
            new_targets = true_end - true_start - new_targets - 1

        # new_targets>=0 (in original target region)
        new_targets = new_targets[(new_targets >= 0) * (new_targets < rlen)]
        all_targets.append(new_targets)

    ########################################################################

# Add new het/homo non-ref sites to original input file
log.info('Writing updated SNV info table')
outfile = open(updated_SNV_table, 'w')

snv_info['add-targets']=[]
snv_info['strand']=[]
snv_info['fragment-start']=[]
snv_info['fragment-end']=[]
print("Start loop")
for i, (new_targets, more_info) in enumerate(zip(all_targets, target_info)):
    print(i)
    prev_targets = list(map(int, snv_info['target_locs'][i].split(";")))
    add_targets = [x for x in new_targets if x not in prev_targets]
    snv_info['add-targets'].append(";".join(list(map(str, add_targets))))
    snv_info['strand'].append(more_info[1])
    snv_info['fragment-start'].append(more_info[2])
    snv_info['fragment-end'].append(more_info[3])
print("End loop")
print(snv_info)

write_snv_table(snv_info,outfile)

########################################################################
# Close files
outfile.close()

########################################################################
# End timer
t1 = time.time()
td = (t1 - t0) / 60
log.info("Done in %0.2f minutes" % td)

tweaking chipseq workflow for ATACseq

 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import os
import pysam
import argparse

############################################
############################################
## PARSE ARGUMENTS
############################################
############################################

Description = 'Remove singleton reads from paired-end BAM file i.e if read1 is present in BAM file without read 2 and vice versa.'
Epilog = """Example usage: bampe_rm_orphan.py <BAM_INPUT_FILE> <BAM_OUTPUT_FILE>"""

argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)

## REQUIRED PARAMETERS
argParser.add_argument('BAM_INPUT_FILE', help="Input BAM file sorted by name.")
argParser.add_argument('BAM_OUTPUT_FILE', help="Output BAM file sorted by name.")

## OPTIONAL PARAMETERS
argParser.add_argument('-fr', '--only_fr_pairs', dest="ONLY_FR_PAIRS", help="Only keeps pairs that are in FR orientation on same chromosome.",action='store_true')
args = argParser.parse_args()

############################################
############################################
## HELPER FUNCTIONS
############################################
############################################

def makedir(path):

    if not len(path) == 0:
        try:
            #!# AVI: changed because of race conditions if directory exists, original code:  os.makedirs(path)
            os.makedirs(path, exist_ok=True)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

############################################
############################################
## MAIN FUNCTION
############################################
############################################

def bampe_rm_orphan(BAMIn,BAMOut,onlyFRPairs=False):

    ## SETUP DIRECTORY/FILE STRUCTURE
    OutDir = os.path.dirname(BAMOut)
    makedir(OutDir)

    ## COUNT VARIABLES
    totalReads = 0; totalOutputPairs = 0; totalSingletons = 0; totalImproperPairs = 0

    ## ITERATE THROUGH BAM FILE
    EOF = 0
    SAMFin = pysam.AlignmentFile(BAMIn,"rb")  #!# AVI: changed to new API from pysam.Samfile
    SAMFout = pysam.AlignmentFile(BAMOut, "wb",header=SAMFin.header)   #!# AVI: changed to new API from pysam.Samfile
    currRead = next(SAMFin)     #!# AVI: adapted for the use of the iterator, original code: currRead = SAMFin.next()

    for read in SAMFin.fetch(until_eof=True): #!# AVI: added .fetch() to explicitly use new API
        totalReads += 1
        if currRead.qname == read.qname:
            pair1 = currRead; pair2 = read

            ## FILTER FOR READS ON SAME CHROMOSOME IN FR ORIENTATION
            if onlyFRPairs:
                if pair1.tid == pair2.tid:

                    ## READ1 FORWARD AND READ2 REVERSE STRAND
                    if not pair1.is_reverse and pair2.is_reverse:
                        if pair1.reference_start <= pair2.reference_start:
                            totalOutputPairs += 1
                            SAMFout.write(pair1)
                            SAMFout.write(pair2)
                        else:
                            totalImproperPairs += 1

                    ## READ1 REVERSE AND READ2 FORWARD STRAND
                    elif pair1.is_reverse and not pair2.is_reverse:
                        if pair2.reference_start <= pair1.reference_start:
                            totalOutputPairs += 1
                            SAMFout.write(pair1)
                            SAMFout.write(pair2)
                        else:
                            totalImproperPairs += 1

                    else:
                        totalImproperPairs += 1
                else:
                    totalImproperPairs += 1
            else:
                totalOutputPairs += 1
                SAMFout.write(pair1)
                SAMFout.write(pair2)

            ## RESET COUNTER
            try:
                totalReads += 1
                currRead = next(SAMFin)   #!# AVI: adapted for the use of the iterator, original code: currRead = SAMFin.next()
            except:
                StopIteration
                EOF = 1

        ## READS WHERE ONLY ONE OF A PAIR IS IN FILE
        else:
            totalSingletons += 1
            pair1 = currRead
            currRead = read

    if not EOF:
        totalReads += 1
        totalSingletons += 1
        pair1 = currRead

    ## CLOSE ALL FILE HANDLES
    SAMFin.close()
    SAMFout.close()

    LogFile = os.path.join(OutDir,'%s_bampe_rm_orphan.log' % (os.path.basename(BAMOut[:-4])))
    SamLogFile = open(LogFile,'w')
    SamLogFile.write('\n##############################\n')
    SamLogFile.write('FILES/DIRECTORIES')
    SamLogFile.write('\n##############################\n\n')
    SamLogFile.write('Input File: ' + BAMIn + '\n')
    SamLogFile.write('Output File: ' + BAMOut + '\n')
    SamLogFile.write('\n##############################\n')
    SamLogFile.write('OVERALL COUNTS')
    SamLogFile.write('\n##############################\n\n')
    SamLogFile.write('Total Input Reads = ' + str(totalReads) + '\n')
    SamLogFile.write('Total Output Pairs = ' + str(totalOutputPairs) + '\n')
    SamLogFile.write('Total Singletons Excluded = ' + str(totalSingletons) + '\n')
    SamLogFile.write('Total Improper Pairs Excluded = ' + str(totalImproperPairs) + '\n')
    SamLogFile.write('\n##############################\n')
    SamLogFile.close()

############################################
############################################
## RUN FUNCTION
############################################
############################################

bampe_rm_orphan(BAMIn=args.BAM_INPUT_FILE,BAMOut=args.BAM_OUTPUT_FILE,onlyFRPairs=args.ONLY_FR_PAIRS)

Workflow for transcript expression analysis.

 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
import pandas as pd
import seaborn as sns
#import matplotlib
#matplotlib.use("Agg")
import matplotlib.pyplot as plt
#from pysam import VariantFile

#daten = pd.read_csv('sleuth_matrix.csv', sep='\t')
sleuth_matrix = pd.read_csv(snakemake.input[0], sep='\t')
sns.boxenplot(data=sleuth_matrix, scale = "linear");
plt.title("Boxenplots der (normalisierten) Counts aller Samples")

#plt.savefig('boxenplot.svg')
plt.savefig(snakemake.output[0])

RADSeq tool with Snakemake workflow integration for analysis of RAD sequencing data. (latest)

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
import sys
import gzip
import pysam
from graph_tool.all import *
import numpy as np
import graph_operations
import likelihood_operations

sys.stderr = open(snakemake.log[0], "w")

sample = snakemake.wildcards.get('sample')

# input
bam = snakemake.input.get("bam")
reads = snakemake.input.get("fastq")

# required output
vcf = open(snakemake.output.get("vcf", ""), "w")
vcf_header = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{}\n".format(sample)
vcf.write(vcf_header)

# optional output
graph_xml = snakemake.output.get("graph_xml", "")
output_figure = snakemake.output.get("graph_figure", "")
connected_components_xml = snakemake.output.get("connected_components_xml", "")
connected_components_figure = snakemake.output.get("connected_components_figure", "")
dir_subgraphs = snakemake.output.get("components_subgraphs", "")

# params for ploidy, threshold, noise and cluster-size
threshold = snakemake.params.get("threshold_max_edit_distance", "")
ploidy = snakemake.params.get("ploidy", "")  # a diploid chromosome set is determined for the prototype
noise_small = snakemake.params.get("treshold_seq_noise_small", "")
noise_large = snakemake.params.get("treshold_seq_noise_large", "")
cluster_size = snakemake.params.get("treshold_cluster_size", "")

# get reads format
format = ""  # format of reads
if reads.endswith((".fastq", ".fq", ".fastq.gz", ".fq.gz")):
    format = "fastq"
if reads.endswith((".fasta", ".fa", ".fasta.gz", ".fa.gz")):
    format = "fasta"

# default value of threshold for maximum distance value
if threshold == "":
    threshold = 23
else:
    threshold = int(threshold)

# default value of noise
if not noise_small:
    noise_small = 1
if not noise_large:
    noise_large = 1

# init graph
graph = Graph(directed=True)

# set graph properties
for (name, prop, prop_type) in graph_operations.set_properties():
    if name.startswith("g_"):
        vars()[name] = graph.new_graph_property(prop_type)
        graph.graph_properties[prop] = vars()[name]
    if name.startswith("v_"):
        vars()[name] = graph.new_vertex_property(prop_type)
        graph.vertex_properties[prop] = vars()[name]
    if name.startswith("e_"):
        vars()[name] = graph.new_edge_property(prop_type)
        graph.edge_properties[prop] = vars()[name]

# set ploidy, sequencing error and heterozygosity
graph.graph_properties["ploidy"] = ploidy

graph.graph_properties["ins-error-rates"] = snakemake.params.get("err_ins", "")
graph.graph_properties["del-error-rates"] = snakemake.params.get("err_del", "")

graph.graph_properties["subst-heterozygosity"] = snakemake.params.get("heterozyg_subst", "")
graph.graph_properties["ins-heterozygosity"] = snakemake.params.get("heterozyg_ins", "")
graph.graph_properties["del-heterozygosity"] = snakemake.params.get("heterozyg_del", "")

# create first empty node for graph
node = graph.add_vertex()
v_id[node] = "{idx}_{sample}".format(idx=0, sample=sample)
v_name[node] = ""
v_seq[node] = ""
v_q_qual[node] = ""

# add reads as vertices of the graph
if reads.endswith(".gz"):
    with gzip.open(reads, "rt") as _reads:
        graph = graph_operations.set_nodes(graph, _reads, format, sample)
else:
    with open(reads, "rU") as _reads:
        graph = graph_operations.set_nodes(graph, _reads, format, sample)

# add edges from all-vs-all alignment of reads (please see rule minimap2)
verbose = pysam.set_verbosity(0)  # https://github.com/pysam-developers/pysam/issues/939
bam = pysam.AlignmentFile(bam, "rb")
pysam.set_verbosity(verbose)
for read in bam.fetch(until_eof=True):
    graph = graph_operations.set_edges(graph, read, threshold)
bam.close()
graph.remove_vertex(0)


# write log files
sys.stderr.write(
    "graph construction summary for sample {}:"
    "\n nodes:\t{}\n edges:\t{}\n".format(sample, graph.num_vertices(), graph.num_edges()))

graph_operations.save_and_draw_graph(graph, xml_out=graph_xml,
                                     figure_out=output_figure)

# in case subgraph directory is expected as optional output, but some samples do not produce
# subgraphs. This way the empty directories for these samples preserved and are not removed by snakemake.
graph_operations.set_dir(dir_subgraphs)

# step 1: extract connected components
message = "CONNECTED COMPONENTS based on the graph construction from the edit distances (minimap2)"
connected_components = graph_operations.get_components(graph, message, snakemake.wildcards.get('sample'),
                                                       dir_subgraphs, connected_components_xml,
                                                       connected_components_figure, v_color="component-label")

graph = None
loc_nr = 0
for (comp, comp_nr) in connected_components:
    # step 2: likelihood of allele fractions
    alleles = likelihood_operations.get_candidate_alleles(comp, comp.vertices(), noise_small, noise_large, cluster_size)
    n = len(alleles)
    vafs_candidates = list(likelihood_operations.get_candidate_vafs(n, ploidy))
    nodes = list([comp.vertex_index[node] for node in comp.vertices()])
    read_allele_likelihoods = {}

    # calculate the likelihood over ALL reads
    vafs_likelihoods = [likelihood_operations.calc_vafs_likelihood(comp, vafs, nodes, alleles, read_allele_likelihoods) for vafs in
                        vafs_candidates]
    if not vafs_likelihoods:  # case empty list, e.g. if the treshold-seq-noise value is set too large
        continue
    max_likelihood_idx = np.argmax(vafs_likelihoods)

    # obtain ML solution for step 2
    max_likelihood_vafs = vafs_candidates[max_likelihood_idx]

    # write to log file
    sys.stderr.write(
        "\n\nStats for component {} in sample {} with {} alleles and ploidy = {}:\n".format(comp_nr, sample,
                                                                                            n, ploidy))
    sys.stderr.write("\n\tMaximum vafs likelihood:\n")
    for vaf, allele in zip(max_likelihood_vafs, alleles):
        sys.stderr.write("\t\t{} for allele {}\n".format(vaf, allele))

    # step 3: likelihood of loci given alleles and fractions
    loci_candidates = list(likelihood_operations.get_candidate_loci(n, ploidy, max_likelihood_vafs))
    loci_likelihoods = {}

    loci_likelihoods = [
        likelihood_operations.calc_loci_likelihoods(comp, max_likelihood_vafs, alleles, loci, loci_likelihoods)
        for loci in loci_candidates
    ]
    max_likelihood_idx = np.argmax(loci_likelihoods)
    max_likelihood_loci = loci_candidates[max_likelihood_idx]

    # write to log file
    sys.stderr.write("\n\tMaximum loci likelihood calculations:\n")
    sys.stderr.write("\t\tloci_likelihoods:\n\t\t\t{}\n\t\tmax_likelihood_idx:\n\t\t\t{}"
                     "\n\t\tmax_likelihood_loci:\n\t\t\t{}\n".format(loci_likelihoods, max_likelihood_idx,
                                                                     max_likelihood_loci))

    # step 4: results output to VCF file
    loci_alleles = likelihood_operations.get_sorted_loci_alleles(alleles, max_likelihood_loci)
    gt_indices = likelihood_operations.get_gt_indices(alleles, max_likelihood_loci, loci_alleles)

    for gt_idx_locus in list(set(gt_indices)):
        vcf.write("{chrom}\t{pos}\t.\t{ref}\t{alt}\t.\t.\t.\tGT\t{gt}\n".format(chrom="LOC{}".format(loc_nr),
                                                                             pos="1", ref=loci_alleles[0],
                                                                             alt=', '.join(loci_alleles[1:]) if len(loci_alleles) > 1 else ".",
                                                                             gt=likelihood_operations.get_genotype(gt_idx_locus)))
        loc_nr += 1

Use an ensemble of variant callers to call variants from ATAC-seq data (v0.3.3)

  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
import sys
import pysam
import argparse
import warnings
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score
from sklearn.metrics import roc_curve



def phred(vals):
    """ apply the phred scale to the vals provided """
    with np.errstate(divide='raise'):
        try:
            return -10*np.log10(1-vals)
        except FloatingPointError:
            return np.float64(30)
            return -10*np.ma.log10(1-vals).filled(-3) # fill all infinite values with a phred scale of 30

def plot_line(lst, show_discards=False):
    plt.clf()
    roc = np.copy(lst.T)
    roc[1] = phred(roc[1])
    max_val = phred(max(roc[2])/(max(roc[2])+1))
    # discard inf or na cols
    inf_or_na_cols = np.isinf(roc).any(axis=0) | np.isnan(roc).any(axis=0)
    # warn the user if we're discarding the majority of points
    discarded = np.sum(inf_or_na_cols)/roc.shape[1]*100
    if (not show_discards and discarded != 0) or discarded >= 50:
        warnings.warn("Discarding NaN or Inf points ({}% of points)".format(discarded))
    roc = roc[:,~(inf_or_na_cols)]
    # perform a simple linear regression
    p = np.polyfit(roc[0], roc[1], 1)
    r_squared = 1 - (sum((roc[1] - (p[0] * roc[0] + p[1]))**2) / ((len(roc[1]) - 1) * np.var(roc[1], ddof=1)))
    p = np.poly1d(p)
    # plot the points and the line
    plt.scatter(roc[0], roc[1], color='r', label="_nolegend_")
    if max(roc[0]) <= 1:
        plt.xlabel("RF Probability")
    elif max(roc[0]) <= np.pi/2:
        plt.xlabel("Reverse Arcsin of RF Probability")
    else:
        plt.xlabel("Phred-Scaled RF Probability")
    plt.ylabel("Phred-Scaled Precision (QUAL)")
    plt.plot(
        roc[0],
        p(roc[0]),
        label=str(p)+"\nr-squared: "+str(round(r_squared, 2))+ \
            ("\ndiscarded: "+str(int(discarded))+"%" if show_discards else "")
    )
    plt.hlines(max_val, min(roc[0]), max(roc[0]), colors='g', linestyles='dashed')
    plt.legend(frameon=False, loc='lower right')
    plt.tight_layout()

def eqbin_mean(grp, log=True, pseudo=True, discards_ok=False, inverse=False):
    if inverse:
        return np.arcsin(grp.mean())
    else:
        if log:
            if discards_ok or not pseudo:
                return phred(grp).mean()
            else:
                return phred(grp.sum()/(len(grp) + pseudo))
        else:
           return grp.mean()

def tpr_probs(df, bins=15, eqbin=True, log=True, pseudo=True, discards_ok=False, inverse=False):
    """ bin the sites and calculate an accuracy (predicted positive value) for that bin """
    # retrieve only predicted positives
    df = df[df['varca~CLASS:']>=0.5]
    if eqbin:
        bin_size = int(len(df)/bins)
        # create bins (ie each grp) and add a single false positive to it so we don't get Inf
        lst = np.array([
            (
                eqbin_mean(grp['varca~prob.1'], log, pseudo, discards_ok, inverse),
                precision_score(
                    np.append(grp['varca~truth'].values, 0) if pseudo else grp['varca~truth'],
                    np.append(grp['varca~CLASS:'].values, 1) if pseudo else grp['varca~CLASS:']
                ),
                grp['varca~prob.1'].size
            )
            for grp in (df.iloc[i:i+bin_size] for i in range(0,len(df)-bin_size+1,bin_size))
        ])
    else:
        if log:
            df = df.copy()
            df['varca~prob.1'] = phred(df['varca~prob.1'])
        start = phred(0.5) if log else 0.5
        # get the end excluding inf values (in case log == True)
        end = df.loc[df['varca~prob.1'] != np.inf, 'varca~prob.1'].max()
        # create bins (ie each grp) and add a single false positive to it so we don't get Inf
        lst = np.array([
            (
                grp[1]['varca~prob.1'].mean(),
                precision_score(
                    np.append(grp[1]['varca~truth'].values, 0) if pseudo else grp['varca~truth'],
                    np.append(grp[1]['varca~CLASS:'].values, 1) if pseudo else grp['varca~CLASS:']
                ),
                grp[1]['varca~prob.1'].size
            )
            for grp in df.groupby(pd.cut(df['varca~prob.1'], pd.interval_range(start, end, bins)))
        ])
    return lst



def strip_type(caller):
    """
        strip the -indel or -snp from the end of a caller name
    """
    vartype = ''
    if caller.endswith('-snp'):
        caller = caller[:-len('-snp')]
        vartype = 'snp'
    elif caller.endswith('-indel'):
        caller = caller[:-len('-indel')]
        vartype = 'indel'
    # if there is still a dash, get everything after it
    i = caller.rfind('-')
    if i != -1:
        caller = caller[i+1:]
    return caller, vartype

def isnan(val):
    return type(val) is float and np.isnan(val)

def get_calls(prepared, callers=None, pretty=False):
    """
        get the alleles in each row of prepared at the location (CHROM/POS) of loc
        when choosing an alt allele, choose from the callers in the order given
    """
    # keep track of the contigs that we've seen
    contigs = set()
    # whether we've read the header yet
    read_header = False
    # iterate through each row in the df and check whether they match loc
    for chunk in prepared:
        # do some special stuff (parsing the header) on the very first iteration
        if not read_header:
            # if callers is None, retrieve the callers from the columns of the dataframe
            if callers is None:
                callers = [
                    caller[:-len('~ALT')] for caller in chunk.columns
                    if caller.endswith('~ALT') and not caller.startswith('pg-')
                ]
            # what types of variants are we dealing with? let's count how many
            # times they appear in the caller names
            vartypes = {'snp': 0, 'indel': 0}
            # also, let's retrieve the callers as a dictionary
            pretty_callers = {}
            for caller in callers:
                pretty_caller, vartype = strip_type(caller)
                # don't beautify the callers if the user didn't request it
                pretty_callers[caller] = pretty_caller if pretty else caller
                # keep track of how often each vartype appears
                if vartype in vartypes:
                    vartypes[vartype] += 1
            callers = pretty_callers
            # retrieve the first CHROM/POS location and yield whether we are reading indels or snps
            loc, predict = yield max(vartypes, key=vartypes.get)
            read_header = True
        # now iterate through each row (and also convert the POS column to an int)
        for idx, row in chunk.iterrows():
            # check if we already passed the row -- ie we either:
            # 1) moved onto a new contig or
            # 2) moved passed the position
            while (
                idx[0] != loc[0] and loc[0] in contigs
            ) or (
                idx[0] == loc[0] and idx[1] > loc[1]
            ):
                # return None if we couldn't find loc in the df
                loc, predict = yield None
            if idx == loc:
                # we found it!
                found = False
                # now, we must figure out which caller to get the alleles from
                for caller in callers:
                    ref, alt = row[caller+"~REF"], row[caller+"~ALT"]
                    # TODO: make this work for an arbitrary number of variant types for multilabel classification using the other CLASS values in classified
                    # right now, this only works if there's a single binary label
                    if not isnan(ref) and (
                        (isnan(alt) + predict) % 2
                    ):
                        found = True
                        break
                if found:
                    loc, predict = yield callers[caller], ref, alt
                else:
                    # if we know there is a variant here, but none of the other
                    # callers found it, just label it as a non-variant
                    # TODO: figure out the alt allele from inspecting the ref genome?
                    loc, predict = yield 'varca', row['REF'], float('nan')
            # save the current contig so that we know which ones we've seen
            contigs.add(idx[0])

def prob2qual(prob, vartype):
    # values are from linear model that we created from using the "-i" option
    if vartype == 'snp':
        return 0.6237*phred(prob)+8.075
    elif vartype == 'indel':
        return 0.8463*phred(prob)+2.724
    else:
        # we shouldn't ever encounter this situation, but just in case...
        return phred(prob)

def main(prepared, classified, callers=None, cs=1000, all_sites=False, pretty=False, vartype=None):
    """
        use the results of the prepare pipeline and the classify pipeline
        to create a VCF with all of the classified sites
    """
    # first, get a generator that can read through each call in the prepared df
    prepared = get_calls(
        pd.read_csv(
            prepared, sep="\t", header=0, index_col=["CHROM", "POS"],
            dtype=str, chunksize=cs, na_values=".",
            usecols=lambda col: col in ['CHROM', 'POS', 'REF'] or col.endswith('~REF') or col.endswith('~ALT')
        ), callers, pretty
    )
    # flush out the first item in the generator: the vartype
    if vartype is None:
        vartype = next(prepared)
    else:
        # if the user already gave us the vartype, then just discard this
        next(prepared)
    # also retrieve the classifications as a df
    classified = pd.read_csv(
        classified, sep="\t", header=0, index_col=["CHROM", "POS"],
        dtype={'CHROM':str, 'POS':int}, chunksize=cs, na_values="."
    )
    # keep track of how many sites in the classifications df we've had to skip
    skipped = 0
    # keep track of how many sites we skipped but were predicted to have a variant
    no_alts = 0
    # iterate through each site in the classifications df, get its alleles, and
    # then return them in a nice-looking dictionary
    for chunk in classified:
        for idx, row in chunk.iterrows():
            try:
                # get the alleles for this CHROM/POS location
                call = prepared.send((idx, row['varca~CLASS:']))
            except StopIteration:
                call = None
            # we found a variant but couldn't find an alternate allele!
            no_alts += not (call is None or (isnan(call[2]) + row['varca~CLASS:']) % 2)
            # check: does the site appear in the prepared pipeline?
            # and does this site have a variant?
            if call is None or (not all_sites and isnan(call[2])):
                skipped += 1
                continue
            caller, ref, alt = call
            qual = prob2qual(
                row['varca~prob.'+str(int(not isnan(alt)))], vartype
            )
            # construct a dictionary with all of the relevant details
            yield {
                'contig': str(idx[0]), 'start': idx[1], 'stop': idx[1]+len(ref),
                'qual': qual, 'alleles': (ref, "." if isnan(alt) else alt), 'info': {'CALLER':caller}
            }
    if skipped:
        warnings.warn(
            "Ignored {:n} classification sites that didn't have a variant.".format(skipped)
        )
    if no_alts:
        warnings.warn(
            "Ignored {:n} sites that we predicted to have variants but didn't appear in any of the callers.".format(no_alts)
        )

def write_vcf(out, records):
    """
        write the records to the output vcf
    """
    vcf = pysam.VariantFile(args.out, mode='w')
    # write the necessary VCF header info
    vcf.header.info.add("CALLER", 1, 'String', "The caller from which this site was taken")
    contigs = set()
    for rec in records:
        # handle pysam increasing the start and end sites by 1
        rec['start'] -= 1
        rec['stop'] -= 1
        # parse the record into a pysam.VariantRecord
        try:
            record = vcf.new_record(
                **rec, samples=None, id=None, filter=None
            )
        except ValueError:
            # add the contig if it hasn't already been added
            if rec['contig'] not in contigs:
                vcf.header.contigs.add(rec['contig'])
                contigs.add(rec['contig'])
            else:
                raise
            # now, try again
            record = vcf.new_record(
                **rec, samples=None, id=None, filter=None
            )
        # write the record to a file
        vcf.write(record)
    return vcf

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-o", "--out", default=sys.stdout, help="the filename to save the vcf (or bcf) to"
    )
    parser.add_argument(
        "classified", type=Path, help="a sorted, results.tsv.gz file from the output of the classify pipeline"
    )
    parser.add_argument(
        "prepared", type=Path, nargs="?", default=sys.stdin, help="a sorted, merge.tsv.gz file from the prepare pipeline (if not supplied, this is read from stdin)"
    )
    parser.add_argument(
        '-c', "--callers", default=None, help="a comma separated list of the callers from which to choose alleles, supplied in order of priority (default: all of the callers in the file, in the order they appear)"
    )
    parser.add_argument(
        '-s', "--chunksize", type=np.uint32, default=100000, help="how many rows to read into memory at once (default: 100,000)"
    )
    parser.add_argument(
        '-a', '--all', action='store_true', help="whether to also write non-variant sites to create a gVCF (default: no)"
    )
    parser.add_argument(
        '-p', '--pretty', action='store_true', help="should caller names appear in the vcf by their pretty form (with all dashes intelligently removed) or their original caller ID form? (default: the original form)"
    )
    parser.add_argument(
        '-t', '--type', choices=['indel', 'snp'], default=None, help="whether to recalibrate QUAL values assuming your data are SNPs or indels (default: infer from callers)"
    )
    parser.add_argument(
        '-i', '--internal', action='store_true', help="For testing and internal use: recalibrate the QUAL scores (assumes varca~truth column exists in classified)"
    )
    args = parser.parse_args()

    callers = None
    if args.callers is not None:
        callers = args.callers.split(",")

    if not args.internal:
        import matplotlib
        matplotlib.use('Agg')
        vcf = write_vcf(args.out, main(args.prepared, args.classified, callers, args.chunksize, args.all, args.pretty, args.type))
    else:
        if not sys.flags.interactive:
            sys.exit("ERROR: You must run this script in python's interactive mode (using python's -i flag) when providing the -i flag to this script.")
        try:
            df = pd.read_csv(args.classified, sep="\t", header=0, index_col=["CHROM", "POS"], usecols=['CHROM', 'POS', 'varca~truth', 'varca~prob.1', 'varca~CLASS:'], low_memory=False).sort_values(by='varca~prob.1')
        except ValueError:
            df = pd.read_csv(args.classified, sep="\t", header=0, index_col=["CHROM", "POS"], usecols=['CHROM', 'POS', 'breakca~truth', 'breakca~prob.1', 'breakca~CLASS:'], low_memory=False).sort_values(by='breakca~prob.1')
            df.columns = ['varca~truth', 'varca~prob.1', 'varca~CLASS:']
        plt.ion()
        plot_line(tpr_probs(df))

A tool for generating bacterial genomes from metagenomes with nanopore long read sequencing

 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import sys
import os
import argparse
import pysam

fastaf = snakemake.input[0]
out = open(snakemake.output[0], 'w')

#knobs
smooth_gap_width = 150000
contig_edge_margin = 150000
min_smoothed_aln_len = 10000
min_aln_len = 5000

#align
print("Aligning...".format(t=str(snakemake.threads)))
os.system("nucmer -p {delta} -b 4000 -l 2000 --maxmatch {fa} {fa}".format( #-t {threads} back to mummer3, no more multithreading : (
	threads = str(snakemake.threads),
	delta = snakemake.params['delta'],
	fa = fastaf
))
os.system("show-coords -T {delta}.delta -L 2000 | sed '1,5d' > coords.tsv".format(
	delta = snakemake.params['delta']
)) #the 1,5d gets rid of the header and identity hit as well
print("Trimming genome...")

max_tiglen = 0

with open('coords.tsv') as coords:
	lines = coords.readlines()
	smoothed_lines = []
	if len(lines) > 0:
		aln_start_line = lines[0].strip().split("\t")
		prev_line = lines[0].strip().split("\t")

		#goal: identify corner-cutting parallel off-diagonals.
	#	print(prev_line)

		for l in lines[1:] + [lines[0]]: #just so we don't miss the last item
			s = l.strip().split("\t")
			#print(s)

			tigname = s[-1] #store the name of the tig.  this repeats nugatorily.

			if int(s[1]) > max_tiglen: #keep track of the largest alignment coordinate found, used as the tig length
				max_tiglen = int(s[1])

			if int(s[0]) > int(s[1]): #ignore inversions
	#				print('ignoring')
				continue

			if int(s[1]) - int(s[0]) < min_aln_len:
		#		print('ignoring')
				continue

			if abs(int(s[0]) - int(prev_line[1])) < smooth_gap_width and \
				abs(int(s[2]) - int(prev_line[3])) < smooth_gap_width:
			#	print("joining")
				pass
			else:
	#				print("terminating")
				newline = aln_start_line
				newline[1] = prev_line[1]
				newline[3] = prev_line[3]
				if int(newline[1]) - int(newline[0]) > min_smoothed_aln_len:
	#				print("storing")
					smoothed_lines.append(newline)
				aln_start_line = s
			prev_line = s


#print("output")
#for s in smoothed_lines:
#	print(s)

if len(smoothed_lines) > 0:
	#is it a corner cutting parallel off-diagonal?
	if int(smoothed_lines[0][0]) < contig_edge_margin and int(smoothed_lines[0][3]) > max_tiglen - contig_edge_margin:
		if int(smoothed_lines[-1][2]) < contig_edge_margin and int(smoothed_lines[-1][1]) > max_tiglen - contig_edge_margin:
			#out.write("It's overcircularized!")

			out.write(tigname + ":" + smoothed_lines[0][0] + '-' + smoothed_lines[-1][0] + "\n")

out.write("done\n")
tool / pypi

pysam

pysam - a python module for reading, manipulating and writing genomic data sets. pysam is a lightweight wrapper of the htslib C-API and provides facilities to read and write SAM/BAM/VCF/BCF/BED/GFF/GTF/FASTA/FASTQ files as well as access to the command line functionality of the samtools and bcftools packages. The module supports compression and random access through indexing. This module provides a low-level wrapper around the htslib C-API as using cython and a high-level API for convenient access to the data within standard genomic file formats. See: http://www.htslib.org https://github.com/pysam-developers/pysam http://pysam.readthedocs.org/en/stable