Workflow Steps and Code Snippets

457 tagged steps and code snippets that match keyword QIIME2.0

Automated pipeline for amplicon sequence analysis

  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import os
import subprocess
from sys import stdin
#import benchmark_utils
from benchmark_utils import countFasta

def complement(seq):
    complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'Y':'R', 'R':'Y','S':'S','W':'W','K':'M','M':'K','N':'N','B':'V','V':'B','D':'H','H':'D'} 
    bases = list(seq) 
    bases = [complement[base] for base in bases] 
    return ''.join(bases)


def reverse_complement(s):
    return complement(s[::-1])

#from Bio.Seq import Seq


primer_by_sample={}
uniq_primers={}
idx_fw_primer=-1   # default for qiime (col 3)
idx_rv_primer=-1  # new field 
idx_rv_revcomp_primer=-1
isRC = False
foundSample=False  
primer=""
if snakemake.config["primers"]["remove"].lower() == "metadata":
    with open(snakemake.input[1]) as mappingFile:
        l=0
        for line in mappingFile:
            l=l+1;
            columns = line.split('\t')
            #the header is always at row 1 and must contain these first 3 fields (qiime specs):
            #SampleID BarcodeSequence LinkerPrimerSequence Description
            if l==1 :
                c=0
                #Find target headers
                for col in columns:
                    if col == "ReversePrimer" or col == "LinkerPrimerSequenceReverse"  or col == "ReverseLinkerPrimerSequence"  or col == "RvLinkerPrimerSequence" or col == "ReversePrimerSequence" :
                        idx_rv_primer=c
                    elif col == "LinkerPrimerSequence":
                        idx_fw_primer=c
                    elif col == "ReverseLinkerPrimerSequenceRevCom"  or col == "ReversePrimerRevCom":
                        idx_rv_revcomp_primer=c
                        isRC=True
                    c=c+1
                if isRC:
                    idx_rv_primer=idx_rv_revcomp_primer 
            elif line.startswith(snakemake.params[4]):
                foundSample=True
                if idx_rv_primer != -1:
                    if isRC:
                        #fw_primer=columns[idx_fw_primer]
                        #rv_primer=columns[idx_rv_primer]
                        primer="-g "+columns[idx_fw_primer]+"..."+columns[idx_rv_primer]
                    else:
                        #fw_primer=columns[idx_fw_primer]
                        #rv_primer=reverse_complement(columns[idx_rv_primer])
                        primer="-g "+columns[idx_fw_primer]+"..."+reverse_complement(columns[idx_rv_primer])
                else:
                    #fw_primer=columns[idx_fw_primer]
                    primer="-g "+columns[idx_fw_primer]


    if not foundSample:
        print("\033[91m" +"No primers found for sample:"+ snakemake.params[4]+ " \033[0m")
        print("\033[91mPlease make sure to have the sample included in the mapping file: "+snakemake.input[1]+"  \033[0m")
        print("\033[91m Aborting the pipeline \033[0m")
        exit(1)

elif snakemake.config["primers"]["remove"].lower() == "cfg":
    primer="-g " + snakemake.config["primers"]["fw_primer"]
    if snakemake.config["primers"]["rv_primer"].len() > 2 :
        primer=primer+"..."+reverse_complement(snakemake.config["primers"]["rv_primer"]) 


discard = True
if "--discard-untrimmed" in snakemake.params[0]:
    extra=snakemake.params[0].replace("--discard-untrimmed","")
else: 
    extra=snakemake.params[0]
    discard = False

#This file will contain the untrimmed reads for the first pass
no_primer=" --untrimmed-output " + snakemake.params[2]+".tmp"

if snakemake.config["primers"]["remove"].lower() == "metadata":
    subprocess.run( ["cutadapt  "+ primer +" "+ extra+" -o "+snakemake.output[0] + ".1 "+ no_primer +" " + snakemake.input[0]+ ">"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
else:
    subprocess.run( ["cutadapt  "+ primer +" "+extra+" -o "+snakemake.output[0] + ".1 "+ no_primer +" " + snakemake.input[0]+ ">"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
#    primer=snakemake.config["cutadapt"]["adapters"]
#comment above line because we just add the primer generation in the elif above....


initialReads=countFasta(snakemake.input[0],False)
disscardedReads=countFasta(snakemake.params[2]+".tmp",False)

#The "extra" var returns to the original values in the sense that if the user wants to disscard reads
# this option will be present on the final cutadapt command 
extra=snakemake.params[0]
#if we disscarded reads
if disscardedReads>0:
    #reverse complement disscardedReads
    subprocess.run( ["vsearch --fastx_revcomp "+ snakemake.params[2]+".tmp  --fastaout "+ snakemake.params[2]+".tmp2"],stdout=subprocess.PIPE, shell=True)
    if snakemake.config["primers"]["remove"].lower() == "metadata":
        if discard:
        #Run cutadapt on disscarded reads
            subprocess.run( ["cutadapt  "+ primer +" "+ extra+" -o "+snakemake.output[0] + ".2 " + snakemake.params[2]+".tmp2"+ ">>"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
        else:
            print("Running second cutadapt")
            print("cutadapt  "+ primer +" "+ extra+" -o "+snakemake.output[0] + ".2 --untrimmed-output "+ snakemake.output[0] + ".3 " + snakemake.params[2]+".tmp2"+ ">>"+ snakemake.params[5])
            subprocess.run( ["cutadapt  "+ primer +" "+ extra+" -o "+snakemake.output[0] + ".2 --untrimmed-output "+ snakemake.output[0] + ".3 " + snakemake.params[2]+".tmp2"+ ">>"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
            #reverse complement untrimmed disscardedReads
            subprocess.run( ["vsearch --fastx_revcomp "+ snakemake.output[0]+".3  --fastaout "+ snakemake.params[2]+".tmp3"],stdout=subprocess.PIPE, shell=True)
    else:
        if discard:
            subprocess.run( ["cutadapt "+ primer  +" "+extra+" -o "+snakemake.output[0] + ".2 " + snakemake.params[2]+".tmp2 >>"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
        else:
            subprocess.run( ["cutadapt "+ primer  +" "+extra+" -o "+snakemake.output[0] + ".2 --untrimmed-output "+ snakemake.output[0] + ".3 "  + snakemake.params[2]+".tmp2 >>"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
            subprocess.run( ["vsearch --fastx_revcomp "+ snakemake.output[0]+".3  --fastaout "+ snakemake.params[2]+".tmp3"],stdout=subprocess.PIPE, shell=True)

    if discard:        
        #Concatenate results
        subprocess.run( ["cat "+snakemake.output[0] + ".1 "+ snakemake.output[0] + ".2 > "+ snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
        #remove intermediate files: disscarded reads first round, disscarded reads RC, accepted reads first round, accepted reads second round
        subprocess.run( ["rm -f "+ snakemake.params[2]+".tmp "+ snakemake.params[2]+".tmp2 "+snakemake.output[0] + ".1 "+ snakemake.output[0] + ".2"],stdout=subprocess.PIPE, shell=True)
    else:
        #Concatenate results
        subprocess.run( ["cat "+snakemake.output[0] + ".1 "+ snakemake.output[0] + ".2 " + snakemake.params[2] + ".tmp3  > " + snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
        #remove intermediate files: disscarded reads first round, disscarded reads RC, accepted reads first round, accepted reads second round
        #subprocess.run( ["rm -f "+ snakemake.params[2]+".tmp "+ snakemake.params[2]+".tmp2 "+snakemake.output[0] + ".1 "+ snakemake.output[0] + ".2 "+ snakemake.params[2]+".tmp3"],stdout=subprocess.PIPE, shell=True)
else: #no reads to evaluate just rename file
    print("No untrimmed output!!!!")
    subprocess.run( ["mv "+snakemake.output[0] + ".1  > "+ snakemake.output[0]],stdout=subprocess.PIPE, shell=True)

survivingReads=countFasta(snakemake.output[0],False)
prc = float((survivingReads/initialReads)*100)
prc_str = "{:.2f}".format(float((survivingReads/initialReads)*100))

with open(snakemake.params[1], "w") as primers:
        primers.write(primer)
        primers.close()

print("\033[91m This step removes primers \033[0m")
print("\033[93m Total number of initial reads: " + str(initialReads) + " \033[0m")
print("\033[93m Total number of surviving reads: " + str(survivingReads) + " = "+ prc_str + "% \033[0m")
print("\033[93m You can find cutadapt's log file at: " + snakemake.params[5] +"\n \033[0m")
if snakemake.config["interactive"] != "F" or prc < snakemake.config["primers"]["min_prc"]:
    print("\033[92m Do you want to continue?(y/n): \033[0m")
    user_input = stdin.readline() #READS A LINE
    user_input = user_input[:-1]
    if user_input.upper() == "N" or user_input.upper() == "NO":
        subprocess.run( ["rm -f "+ snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
        exit(1)
else:
    print("\033[93m" +" Interactive mode off \033[0m")
    print("\033[93m" +" Removing primers...\033[0m")


if not os.path.exists(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files"):
    os.makedirs(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files")
subprocess.run( ["cat "+ snakemake.output[0]+"| grep '^>' | cut -f1 -d' ' | sed 's/>// ; s/_[0-9]*$//' |  sort | uniq -c | awk '{print $2\"\\t\"$1}' > " + snakemake.params[3]+".tmp1"],stdout=subprocess.PIPE, shell=True)
subprocess.run( ["cat "+ snakemake.input[0]+"| grep '^>' | cut -f1 -d' ' | sed 's/>// ; s/_[0-9]*$//' |  sort | uniq -c | awk '{print $2\"\\t\"$1}'| awk -F'\t' 'NR==FNR{h[$1]=$2;next} BEGIN{print \"Sample\\tReads_before_cutadapt\\tSurviving_reads\\tPrc_surviving_reads\"}{if(h[$1]){print $1\"\\t\"h[$1]\"\\t\"$2\"\\t\"($2/h[$1])*100\"%\"}else{print $1\"\\t\"$2\"\\t0\\t0%\"}}' - "+snakemake.params[3]+".tmp1 > "+ snakemake.params[3]],stdout=subprocess.PIPE, shell=True)
os.remove(snakemake.params[3]+".tmp1")
exit(0)
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import os
import subprocess
from benchmark_utils import countFasta
from sys import stdin

def complement(seq):
    complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'Y':'R', 'R':'Y','S':'S','W':'W','K':'M','M':'K','N':'N','B':'V','V':'B','D':'H','H':'D'} 
    bases = list(seq) 
    bases = [complement[base] for base in bases] 
    return ''.join(bases)


def reverse_complement(s):
    return complement(s[::-1])

primer_by_sample={}
uniq_primers={}
idx_fw_primer=-1   # default for qiime (col 3)
idx_rv_primer=-1   # new field 
idx_rv_revcomp_primer=-1
isRC = False  
primer_set = ""
no_primer = ""
extra=snakemake.params[0]
log_by_sample="Sample\tInitial reads\tSurviving reads\n"
if "--discard-untrimmed" in snakemake.params[0]:
    no_primer=" --untrimmed-output " + snakemake.params[2]
    extra=snakemake.params[0].replace("--discard-untrimmed","")

if not os.path.exists(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files"):
    os.makedirs(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files")

if snakemake.config["primers"]["remove"].lower() == "metadata":
    with open(snakemake.input[1]) as mappingFile:
        l=0
        for line in mappingFile:
            l=l+1;
            columns = line.split('\t')
            #the header is always at row 1 and must contain these first 3 fields (qiime specs):
            #SampleID BarcodeSequence LinkerPrimerSequence Description
            if l==1 :
                c=0
                #Find target headers
                for col in columns:
                    if col == "ReversePrimer" or col == "LinkerPrimerSequenceReverse"  or col == "ReverseLinkerPrimerSequence"  or col == "RvLinkerPrimerSequence" or col == "ReversePrimerSequence" :
                        idx_rv_primer=c
                    elif col == "LinkerPrimerSequence":
                        idx_fw_primer=c
                    elif col == "ReverseLinkerPrimerSequenceRevCom"  or col == "ReversePrimerRevCom":
                        idx_rv_revcomp_primer=c
                        isRC=True
                    c=c+1
                if isRC:
                    idx_rv_primer=idx_rv_revcomp_primer 
            elif not line.startswith("#"):
                if idx_rv_primer != -1:
                    #here, we are creating a dic with sample:primer
                    if isRC:  
                        primer_by_sample[columns[0]]=[columns[idx_fw_primer],columns[idx_rv_primer]]
                    else:
                        primer_by_sample[columns[0]]=[columns[idx_fw_primer],reverse_complement(columns[idx_rv_primer])]
                    #for primer in uniq_primers:
                    if columns[idx_fw_primer]+columns[idx_rv_primer] not in uniq_primers:
                        if isRC:
                            uniq_primers[columns[idx_fw_primer]+columns[idx_rv_primer]]=[columns[idx_fw_primer],columns[idx_rv_primer]]
                        else:
                            uniq_primers[columns[idx_fw_primer]+columns[idx_rv_primer]]=[columns[idx_fw_primer],reverse_complement(columns[idx_rv_primer])]    
                else:
                    primer_by_sample[columns[0]]=[columns[idx_fw_primer]]
                    if columns[idx_fw_primer] not in uniq_primers:
                        uniq_primers[columns[idx_fw_primer]]=[columns[idx_fw_primer]]
        mappingFile.close()

    #If we have more than one different pair of primers, we run cutadapt by sample
    #otherwise we run only one instance
    if len(uniq_primers) >1:
        #create tmp dir
        if not os.path.exists(snakemake.params[4]):
            os.makedirs(snakemake.params[4])
        else: #it exists and most lickly we want to delete all its content.
            subprocess.run( ["rm -fr " + snakemake.params[4]+"*"],stdout=subprocess.PIPE, shell=True)
        #split the reads
        #If we are running this, it comes from our demultiplexing, and thus we have fasta headers like this:
        #><sample>_###  so we remove the _###
        subprocess.run(["cat "+ snakemake.input[0]+ " |  awk '{if($0 ~ \"^>\"){sample=$1; header=$0; gsub(\">\",\"\",sample);gsub(\"_[0-9].*\",\"\",sample);}else{print header\"\\n\"$0 >> \""+snakemake.params[4]+"\"sample\".fasta\"} }'"],stdout=subprocess.PIPE, shell=True)
        all_primers=""
        for file in os.listdir(snakemake.params[4]):
            #file only has the name of the file, the path is already discarded
            #the function os.path.splitext strip the extension
            sample=os.path.splitext(file)[0]
            no_primer=""
            extra=""
            if "--discard-untrimmed" in snakemake.params[0]:
                no_primer=" --untrimmed-output " + snakemake.params[4]+sample+"_untrimmed.fasta"
                extra=snakemake.params[0].replace("--discard-untrimmed","")
            tmp_out = snakemake.params[4]+sample+"_trimmed.fasta"
            tmp_log = snakemake.params[4]+sample+".log"
            if sample in primer_by_sample:
                if len(primer_by_sample[sample])>1:
                    primer_set=" -g "+primer_by_sample[sample][0]+"..."+primer_by_sample[sample][1]+" "
                else:
                    primer_set=" -g "+primer_by_sample[sample][0]
                #run cutadapt by sample
                subprocess.run(["echo \"Processing sample\" " + sample + "\n >> "+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
                subprocess.run( ["cutadapt  "+ primer_set +" "+extra+" -o "+tmp_out + " "+ no_primer +" " + snakemake.params[4]+file+ ">>"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
                #stats by sample
                initialReads=countFasta(snakemake.params[4]+file,False)
                survivingReads=countFasta(tmp_out,False)
                prc = "{:.2f}".format(float((survivingReads/initialReads)*100))
                log_by_sample=log_by_sample+sample+"\t"+str(initialReads)+"\t"+str(survivingReads)+" ("+prc+"%)\n"
                all_primers=all_primers+sample+"\t"+primer_set+"\n"
            else:
                print("\033[91mNo primers found for sample:"+ sample+ " \033[0m")
                print("\033[91mPlease make sure to have the sample included in the mapping file: "+snakemake.input[1]+"  \033[0m")
                print("\033[91mAborting the pipeline \033[0m")
                exit(1)

        #merge results
        subprocess.run( ["cat  "+ snakemake.params[4]+"*_trimmed.fasta > "+ snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
        with open(snakemake.params[1], "a") as primers:
            primers.write(all_primers)
            primers.close()
        #subprocess.run( ["cat  "+ snakemake.params[4]+"*_untrimmed.fasta > " snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
    else: #only run one cutadapt instance
        new_key = list(uniq_primers)
        if len(uniq_primers[new_key[0]])>1: #is PE?
            primer_set=" -g "+uniq_primers[new_key[0]][0]+"..."+uniq_primers[new_key[0]][1]+" "
        else: #is SE
            primer_set=" -g "+uniq_primers[new_key[0]][0]
        subprocess.run( ["cutadapt  "+ primer_set +" "+extra+" -o "+snakemake.output[0] + " "+ no_primer +" " + snakemake.input[0]+ ">"+  snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
        with open(snakemake.params[1], "a") as primers:
            primers.write(primer_set)
            primers.close()
else: #values come at the CFG, run only once
    primer_set="-g " + snakemake.config["primers"]["fw_primer"]
    if snakemake.config["primers"]["rv_primer"].len() > 2 :
        primer_set=primer_set+"..."+reverse_complement(snakemake.config["primers"]["rv_primer"])

    subprocess.run( ["cutadapt  "+ primer_set  +" "+extra+" -o "+snakemake.output[0] + " "+ no_primer +" " + snakemake.input[0]+ ">"+ snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
  #  primer_set = snakemake.config["cutadapt"]["adapters"]
    with open(snakemake.params[1], "w") as primers:
        primers.write(primer_set)
        primers.close()

initialReads=countFasta(snakemake.input[0],False)
survivingReads=countFasta(snakemake.output[0],False)
prc=float((survivingReads/initialReads)*100)
prc_str = "{:.2f}".format(float((survivingReads/initialReads)*100))

user_input="0"
while (user_input != "1" and user_input !=  "2"):
    print("\033[91m This step removes primers \033[0m")
    print("\033[93m Total number of initial reads: " + str(initialReads) + " \033[0m")
    print("\033[93m Total number of surviving reads: " + str(survivingReads) + " = "+ prc_str + "% \033[0m")
    print("\033[93m You can find cutadapt's log file at: " + snakemake.params[5] +"\n \033[0m")
    if snakemake.config["interactive"] != "F" or prc < snakemake.config["primers"]["min_prc"]:
        print("\033[92m What would you like to do? \033[0m")
        print("\033[92m 1. Continue with the workflow. \033[0m")
        print("\033[92m 2. Interrupt the workflow. \033[0m")
        if snakemake.config["primers"]["remove"].lower() == "metadata" and  len(uniq_primers)>1:
            print("\033[92m 3. Print results by sample. \033[0m")
        user_input = stdin.readline() #READS A LINE
        user_input = user_input[:-1]
        if user_input == "2":
            print("\033[91m Aborting workflow... \033[0m")
            #delete target outpu (snakemake also does it)
            subprocess.run( ["rm -f "+ snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
            #delete cutadapt mp directory
            subprocess.run( ["rm -fr " + snakemake.params[4]],stdout=subprocess.PIPE, shell=True)
            #delete all the concatenated log files
            subprocess.run( ["rm -f " + snakemake.params[5]],stdout=subprocess.PIPE, shell=True)
            #delete primers file
            subprocess.run( ["rm -f " + snakemake.params[1]],stdout=subprocess.PIPE, shell=True)
            exit(1)
        if user_input == "3":
            print(log_by_sample)
    else:
        print("\033[93m" +" Interactive mode off \033[0m")
        print("\033[93m" +" Removing primers...\033[0m")
        user_input="1"
# if we ran multiple cutadap tasks, now delete tmp files and logs. 
if snakemake.config["primers"]["remove"].lower() == "metadata" and  len(uniq_primers)>1: 
    print("\033[96mCleaning intermediate files...\033[0m")
    subprocess.run( ["rm -fr " + snakemake.params[4]],stdout=subprocess.PIPE, shell=True)

#Summarize results    
subprocess.run( ["cat "+ snakemake.output[0]+"| grep '^>' | cut -f1 -d' ' | sed 's/>// ; s/_[0-9]*$//' |  sort | uniq -c | awk '{print $2\"\\t\"$1}' > " + snakemake.params[3]+".tmp1"],stdout=subprocess.PIPE, shell=True)
subprocess.run( ["cat "+ snakemake.input[0]+"| grep '^>' | cut -f1 -d' ' | sed 's/>// ; s/_[0-9]*$//' |  sort | uniq -c | awk '{print $2\"\\t\"$1}'| awk -F'\t' 'NR==FNR{h[$1]=$2;next} BEGIN{print \"Sample\\tReads_before_cutadapt\\tSurviving_reads\\tPrc_surviving_reads\"}{if(h[$1]){print $1\"\\t\"h[$1]\"\\t\"$2\"\\t\"($2/h[$1])*100\"%\"}else{print $1\"\\t\"$2\"\\t0\\t0%\"}}' - "+snakemake.params[3]+".tmp1 > "+ snakemake.params[3]],stdout=subprocess.PIPE, shell=True)
os.remove(snakemake.params[3]+".tmp1")
exit(0)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import os
from sys import stdin
import subprocess

try:
    treads = subprocess.run( ["grep '^>' " + snakemake.input[0] + " | wc -l"],stdout=subprocess.PIPE, shell=True)
    totalReads =  treads.stdout.decode('utf-8').strip()
    creads = subprocess.run( ["cat " + snakemake.input[1] + " | wc -l"],stdout=subprocess.PIPE, shell=True)
    chimericReads =  creads.stdout.decode('utf-8').strip()
    prc = (float(chimericReads)/float(totalReads))*100
    print("\033[91m This step can remove possible chimeric sequences \033[0m")
    print("\033[93m Total number of reads: " + totalReads + " \033[0m")
    print("\033[93m Total number of possible chimeras: " + chimericReads + " ({0:.2f}".format(prc) + "%) \033[0m")
    print("\033[92m Do you want to remove chimeric sequences?(y/n): \033[0m")
    if snakemake.config["interactive"] != "F":
        user_input = stdin.readline() #READS A LINE
        user_input = user_input[:-1]
        filter_log = "Total number of possible chimeras: " + chimericReads + " ({0:.2f}".format(prc) + ")%\n\n"
        if user_input.upper() == "Y" or user_input.upper() == "YES":
            subprocess.run( ["filter_fasta.py -f " + snakemake.input[0] + " -s "+ snakemake.input[1] + " -n -o " + snakemake.output[0]], stdout=subprocess.PIPE, shell=True)
            filter_log += "The chimeric sequences were removed with the following command:\n\n"
            filter_log += ":commd:`filter_fasta.py -f " + snakemake.input[0] + " -s "+ snakemake.input[1] + " -n -o " + snakemake.output[0]+"`\n\n"
        else:
            subprocess.run( ["mv " + snakemake.input[0] + " " + snakemake.output[0]], stdout=subprocess.PIPE, shell=True)
            filter_log += "The user didn't remove the chimeric sequences\n\n"
        with open(snakemake.output[1], "w") as out:
            out.write(filter_log)
            out.close()
    else:
        print("\033[93m" +" Interactive mode off \033[0m")
        print("\033[93m" +" Removing chimeras...\033[0m")
        subprocess.run( [snakemake.config["qiime"]["path"]+"filter_fasta.py -f " + snakemake.input[0] + " -s "+ snakemake.input[1] + " -n -o " + snakemake.output[0]], stdout=subprocess.PIPE, shell=True)
        filter_log = "Total number of possible chimeras: " + chimericReads + " ({0:.2f}".format(prc) + ")%\n\n"
        filter_log += "The chimeric sequences were removed with the following command:\n\n"
        filter_log += ":commd:`filter_fasta.py -f " + snakemake.input[0] + " -s "+ snakemake.input[1] + " -n -o " + snakemake.output[0]+"`\n\n"
        with open(snakemake.output[1], "w") as out:
            out.write("Interactive mode off. Automatic chimera removing...\n")
            out.write(str(filter_log))
            out.close()

except Exception as e:
    print("Problem executing script.\nMessage: " + str(e))
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import os
import subprocess
from benchmark_utils import countFasta
from benchmark_utils import countFastaGZ
from sys import stdin
import shutil

def complement(seq):
    complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'Y':'R', 'R':'Y','S':'S','W':'W','K':'M','M':'K','N':'N','B':'V','V':'B','D':'H','H':'D'} 
    bases = list(seq) 
    bases = [complement[base] for base in bases] 
    return ''.join(bases)


def reverse_complement(s):
    return complement(s[::-1])

#from Bio.Seq import Seq


primer_by_sample={}
uniq_primers={}
idx_fw_primer=-1   # default for qiime (col 3)
idx_rv_primer=-1  # new field 
idx_rv_revcomp_primer=-1
isRC = False  
with open(snakemake.input[0]) as mappingFile:
    l=0
    for line in mappingFile:
        l=l+1;
        columns = line.split('\t')
        #the header is always at row 1 and must contain these first 3 fields (qiime specs):
        #SampleID BarcodeSequence LinkerPrimerSequence Description
        if l==1 :
            c=0
            #Find target headers
            for col in columns:
                if col == "ReverseLinkerPrimerSequence"  or col == "RvLinkerPrimerSequence" or col == "ReversePrimer" or col == "ReversePrimerSequence"  :
                    idx_rv_primer=c
                elif col == "LinkerPrimerSequence":
                    idx_fw_primer=c
                elif col == "ReverseLinkerPrimerSequenceRevCom"  or col == "RvLinkerPrimerSequenceRevCom" or col == "ReversePrimerRevCom":
                    idx_rv_revcomp_primer=c                   
                c=c+1
            #if there is no "ReverseLinkerPrimerSequence" we look for the ReverseLinkerPrimerSequenceRevCom
            if idx_rv_primer == -1 and idx_rv_revcomp_primer !=1:
                idx_rv_primer=idx_rv_revcomp_primer
                isRC = True 
        elif not line.startswith("#"):
            if idx_rv_primer != -1:
                #if the valuee is the reverse complemented now we want the 5' to 3' orientation so rev com again.
                if isRC:
                    primer_by_sample[columns[0]]=[columns[idx_fw_primer],reverse_complement(columns[idx_rv_primer])]
                else:
                    primer_by_sample[columns[0]]=[columns[idx_fw_primer],columns[idx_rv_primer]]
            elif idx_fw_primer != -1:
                primer_by_sample[columns[0]]=[columns[idx_fw_primer]]
            else:
                print("\033[91m ERROR: LinkerPrimerSequence not found on mapping file: "+ snakemake.input[0] +" \033[0m")
                exit(1)


# List files
fq_files = [f for f in os.listdir(snakemake.params[0]) if f.endswith("_1."+snakemake.params[2])]
if not os.path.exists(snakemake.params[0]+"/reads_discarded_primer/"):
    os.makedirs(snakemake.params[0]+"/reads_discarded_primer/")
if not os.path.exists(snakemake.params[0]+"/primer_removed/"):
    os.makedirs(snakemake.params[0]+"/primer_removed/")
if not os.path.exists(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files"):
    os.makedirs(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files")
summ_file = open(snakemake.output[0],"w")
summ_file2 = open(snakemake.params[4],"w")
summ_file.write("Sample\tReads_before_cutadapt\tSurviving_reads\tPrc_surviving_reads\n")
summ_file2.write("Sample\tReads_before_cutadapt\tSurviving_reads\tPrc_surviving_reads\n")
log_str = "Sample\tReads_before_cutadapt\tSurviving_reads\tPrc_surviving_reads\n"
log_zero = "Sample\tReads_before_cutadapt\tSurviving_reads\tPrc_surviving_reads\n"
has_zero_length_reads = False
zero_samples = 0;
to_remove = []

for fw in fq_files:
    sample=fw.replace("_1."+snakemake.params[2],"")
    fw_fq= snakemake.params[0]+"/"+fw
    rv=fw.replace("_1."+snakemake.params[2],"_2."+snakemake.params[2])
    rv_fq= snakemake.params[0]+"/"+rv
    #Count reads before trimming
    if snakemake.params[2].endswith("gz"):
        reads_ori=countFastaGZ(fw_fq,True)
    else:
        reads_ori=countFasta(fw_fq,True)
    #no cutadapt if no reads
    #if reads_ori > 0:

    if sample in primer_by_sample:
        runCutAdapt=False
        discard_untrimmed=""
        extra_params=snakemake.params[1] 
        if len(primer_by_sample[sample])>1 and reads_ori < 1:
            reads_after = 0
            prcOK="{:.2f}".format(float((reads_after/1)*100))
            to_copy=snakemake.params[0]+"/primer_removed/"+sample+"_1."+snakemake.params[2]
            os.symlink(fw_fq,to_copy)
            if snakemake.params[3] == "PE":
                to_copy_rv=snakemake.params[0]+"/primer_removed/"+sample+"_2."+snakemake.params[2]
                os.symlink(rv_fq,to_copy_rv)
            runCutAdapt=True

        elif len(primer_by_sample[sample])>1 and snakemake.params[3] == "PE" :
            if "--discard-untrimmed" in snakemake.params[1]:
                discard_untrimmed=" --untrimmed-output "+snakemake.params[0]+"/reads_discarded_primer/"+sample+"_1.fastq.gz --untrimmed-paired-output  "+snakemake.params[0]+"/reads_discarded_primer/"+sample+"_2.fastq.gz"
                extra_params=snakemake.params[1].replace("--discard-untrimmed","")
            #print("cutadapt -g "+ primer_by_sample[sample][0] + " -G " + primer_by_sample[sample][1] + " " +extra_params+" -O "+ snakemake.config["primers"]["min_overlap"] +" -o "+snakemake.params[0]+"/primer_removed/"+sample+"_1.fastq.gz -p "+snakemake.params[0]+"/primer_removed/"+sample+"_2.fastq.gz "+discard_untrimmed +" "+ fw_fq + " " +  rv_fq + " >> "+snakemake.params[0]+"/primer_removed/"+sample+".cutadapt.log")
            subprocess.run(["cutadapt -g "+ primer_by_sample[sample][0] + " -G " + primer_by_sample[sample][1]+" -m "+ snakemake.config["primers"]["min_length"]  + " " +extra_params+" -O "+ snakemake.config["primers"]["min_overlap"] +" -o "+snakemake.params[0]+"/primer_removed/"+sample+"_1.fastq.gz -p "+snakemake.params[0]+"/primer_removed/"+sample+"_2.fastq.gz "+discard_untrimmed +" "+ fw_fq + " " +  rv_fq + " >> "+snakemake.params[0]+"/primer_removed/"+sample+".cutadapt.log"],stdout=subprocess.PIPE, shell=True)
            runCutAdapt=True
            #subprocess.run(["grep \"(passing filters)\" "+snakemake.params[0]+"/primer_removed/"+sample+".cutadapt.log | awk '{print \""+sample+"\t\"$5\"\t\"$6}' >> "+snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
            #subprocess.run( ["cutadapt "+ primer_set +" "+snakemake.params[0]+" -o "+snakemake.output[0] + " " + snakemake.input[0]+ ">"+ snakemake.output[1]],stdout=subprocess.PIPE, shell=True)
        elif len(primer_by_sample[sample])>=1 and snakemake.params[3] == "SE":
            if "--discard-untrimmed" in snakemake.params[0]:
                discard_untrimmed=" --untrimmed-output "+snakemake.params[0]+"/reads_discarded_primer/"+sample+"_1.fastq.gz"
                extra_params=snakemake.params[1].replace("--discard-untrimmed","") 
            subprocess.run(["cutadapt -g "+ primer_by_sample[sample][0] +" -m "+ snakemake.config["primers"]["min_length"] +" " +extra_params+" -O "+ snakemake.config["primers"]["min_overlap"] +" -o "+snakemake.params[0]+"/primer_removed/"+sample+"_1.fastq.gz "+ discard_untrimmed + " " + fw_fq + " >> "+ snakemake.params[0]+"/primer_removed/"+sample+".cutadapt.log"],stdout=subprocess.PIPE, shell=True)  
            #subprocess.run(["grep \"(passing filters)\" "+snakemake.params[0]+"/primer_removed/"+sample+".cutadapt.log | awk '{print \""+sample+"\t\"$5\"\t\"$6}' >> "+snakemake.output[0]],stdout=subprocess.PIPE, shell=True)
            runCutAdapt=True
        elif len(primer_by_sample[sample])==1 and snakemake.params[3] == "PE":
            print("\033[91m ERROR: Found forward and reverse reads, but only one primer was supplied \033[0m")
            print("sample: "+sample + " primer " + primer_by_sample[sample][0])
            summ_file.close()
            summ_file2.close()
            exit(1)

        if runCutAdapt and reads_ori > 0:
            if snakemake.params[2].endswith("gz"):
                reads_ori=countFastaGZ(fw_fq,True)
                reads_after=countFastaGZ(snakemake.params[0]+"/primer_removed/"+sample+"_1.fastq.gz",True)
            else:
                reads_ori=countFasta(fw_fq,True)
                reads_after=countFasta(snakemake.params[0]+"/primer_removed/"+sample+"_1.fastq",True)
            prcOK="{:.2f}".format(float((reads_after/reads_ori)*100))
            summ_file.write(sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n");
            summ_file2.write(sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n");
            log_str = log_str + sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n"
            if reads_after < 1:
                has_zero_length_reads = True
                log_zero = log_zero + sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n"
                to_remove.append(snakemake.params[0]+"/primer_removed/"+sample+"_1."+snakemake.params[2])
                if snakemake.params[3] == "PE":
                    to_remove.append(snakemake.params[0]+"/primer_removed/"+sample+"_2."+snakemake.params[2])
                zero_samples = zero_samples + 1


        elif runCutAdapt and reads_ori < 1:
            summ_file.write(sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n");
            summ_file2.write(sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n");
            log_str = log_str + sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n"
            log_zero = log_zero + sample+"\t"+str(reads_ori)+"\t"+str(reads_after)+"\t"+prcOK+"\n"
            zero_samples = zero_samples + 1
            has_zero_length_reads = True
            to_remove.append(snakemake.params[0]+"/primer_removed/"+sample+"_1."+snakemake.params[2])
            if snakemake.params[3] == "PE":
                to_remove.append(snakemake.params[0]+"/primer_removed/"+sample+"_2."+snakemake.params[2])


    else:
        print("\033[93m WARNING: No primers found for sample: "+sample +" \033[0m")
        summ_file.close()
        summ_file2.close()
        exit(1)
summ_file.close() 
summ_file2.close()

user_input="0"
show_menu = True
if zero_samples > 0:
    while show_menu:
        print("\033[91m\n###########  Primer removal validation    ###########\033[0m")
        print("\033[91m You have " + str(zero_samples) + " samples without reads surviving filters. \033[0m")
        print("\033[92m LIBRARY: "+snakemake.wildcards.sample+" \033[0m")
        print("\033[92m cutadapt_log: "+snakemake.params[0]+"/primer_removed/"+sample+".cutadapt.log \033[0m")
        print("\033[93m Please select one of the following options: \033[0m")
        print("\033[93m   1. Print samples with 0 reads \033[0m")
        print("\033[93m   2. Print summary (all the samples) \033[0m")
        print("\033[93m   3. Remove from this analysis samples with 0 reads\033[0m")
        print("\033[93m      and continue with the workflow. \033[0m")
        print("\033[93m   4. Interrupt the workflow and re-do primer removal step. \033[0m")
        print("\033[93m      Adjust primer values in your configuration and/or mapping file \033[0m")
        print("\033[93m      and restart the pipeline. \033[0m")
        print("\033[93m      This action will remove:"+snakemake.params[0]+"/primer_removed \033[0m")
        print("\033[93m   5. Interrupt the workflow \033[0m")
        print("\033[93m Select an option: \033[0m")
        user_input = stdin.readline() #READS A LINE
        user_input = user_input[:-1]
        if user_input == "1":
            print(log_zero)
        elif user_input == "2":
            print(log_str)
        elif user_input == "3":
            for file in to_remove:
                newn = file+"_NOK"
                os.rename(file, newn)
                show_menu = False
        elif user_input == "4":
            shutil.rmtree(snakemake.params[0]+"/primer_removed")
            exit(1)
        elif user_input == "5":
            exit(1)

exit(0)
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
import subprocess
import functools
from snakemake.utils import report
from benchmark_utils import readBenchmark
from benchmark_utils import countTxt
from seqsChart import createChart
from seqsChart import createChartPrc
from benchmark_utils import countFasta
from benchmark_utils import make_table

################
#Function to retrive the sample names and put in the report title
#@param file with the sample list, it is created during combine_filtered_samples
#snakemake.wildcards.project + "/runs/" + snakemake.wildcards.run + "/samples.log"
#@return the title with the samples
def getSampleList(sampleFile):
    with open(sampleFile) as sfile:
        samps ="Amplicon Analysis Report for Libraries: "
        for l in sfile:
            samps+= l
        samps+="\n"
        for i in range(0,len(samps)):
            samps+="="
    return samps;

#########################
#This function reads the file cat_samples.log which have the executed command to
#combine all the libraries after cleaning and demultiplexing and before taxonomy
#assignation
#@param catLogFile file with the command
#snakemake.wildcards.project + "/runs/" + snakemake.wildcards.run + "/cat_samples.log"
#@return the string ready to be concatenated into the report.
def getCombinedSamplesList(catLogFile):
    with open(catLogFile) as sfile:
        command =":commd:`"
        i=0
        for l in sfile:
            if i == 0:
                command+= l + "`\n\n"
            i+=1
    return command;


#title = getSampleList(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/samples.log")
#catCommand =  getCombinedSamplesList(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/cat_samples.log")
title = "Amplicon Analysis Report\n===========================\n\n"
################################################################################
#                         Benchmark Section                                    #
# This section is to generate a pre-formatted text with the benchmark info for #
# All the executed rules.                                                      #
################################################################################
#combineBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/combine_seqs_fw_rev.benchmark")
dada2Benchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/dada2.benchmark")
asvFilterBenchmark =  readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/filter.benchmark")

#pikRepBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/pick_reps.benchmark")
#assignTaxaBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/assign_taxa.benchmark")
otuTableBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/dada2.table.benchmark")
convertOtuBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/dada2.biom.benchmark")
#convertOtuBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable.txt.benchmark")
summTaxaBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/summary/summarize_taxa.benchmark")
asvNoSingletonsBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/asvTable_nosingletons.bio.benchmark")
filterASVTableBenchmark =  readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/asvTable_nosingletons.txt.benchmark")
filterBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/representative_seq_set_noSingletons.benchmark")
deRepBenchmark=""
#if  snakemake.config["derep"]["dereplicate"] == "T" and  snakemake.config["pickOTU"]["m"] != "swarm" and  snakemake.config["pickOTU"]["m"] != "usearch":
#    deRepBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/derep.benchmark")
if snakemake.config["alignRep"]["align"] == "T":
    #align_seqs.py -m {config[alignRep][m]} -i {input} -o {params.outdir} {config[alignRep][extra_params]}
    alignBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/align_rep_seqs.benchmark")
    #"filter_alignment.py -i {input} -o {params.outdir} {config[filterAlignment][extra_params]}"
    alignFilteredBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/filtered/align_rep_seqs.benchmark")
    #"make_phylogeny.py -i {input} -o {output} {config[makeTree][extra_params]}"
    makePhyloBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/filtered/representative_seq_set_noSingletons_aligned_pfiltered.benchmark")
kronaBenchmark=""
if snakemake.config["krona"]["report"].casefold() == "t" or snakemake.config["krona"]["report"].casefold() == "true":
    kronaBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/krona_report.benchmark")

#dada2FilterBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/filter.benchmark")
#dada2Benchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/dada2.benchmark")
#dada2BiomBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/dada2.biom.benchmark")

################################################################################
#                         TOOLS VERSION SECTION                          #
################################################################################
#clusterOtuV = subprocess.run([snakemake.config["qiime"]["path"]+'pick_otus.py', '--version'], stdout=subprocess.PIPE)
#clusterOtuVersion = "**" + clusterOtuV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

#pickRepV = subprocess.run([snakemake.config["qiime"]["path"]+'pick_rep_set.py', '--version'], stdout=subprocess.PIPE)
#pickRepVersion = "**" + pickRepV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

#assignTaxaV = subprocess.run([snakemake.config["qiime"]["path"]+'parallel_assign_taxonomy_'+snakemake.config["assignTaxonomy"]["qiime"]["method"]+'.py', '--version'], stdout=subprocess.PIPE)
#assignTaxaVersion = "**" + assignTaxaV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

#makeOTUV = subprocess.run([snakemake.config["qiime"]["path"]+'make_otu_table.py', '--version'], stdout=subprocess.PIPE)
#makeOTUVersion = "**" + makeOTUV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

convertBiomV = subprocess.run([snakemake.config["biom"]["command"], '--version'], stdout=subprocess.PIPE)
convertBiomVersion = "**" + convertBiomV.stdout.decode('utf-8').strip() + "**"

dada2V = subprocess.run([snakemake.config["Rscript"]["command"],'Scripts/dada2Version.R'], stdout=subprocess.PIPE)
dada2Version = "**" + dada2V.stdout.decode('utf-8').strip() + "**"


summTaxaSV = subprocess.run([snakemake.config["qiime"]["path"]+'summarize_taxa.py', '--version'], stdout=subprocess.PIPE)
summTaxaVersion = "**" + summTaxaSV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

filterOTUNoSV = subprocess.run([snakemake.config["qiime"]["path"]+'filter_otus_from_otu_table.py', '--version'], stdout=subprocess.PIPE)
filterOTUNoSVersion = "**" + filterOTUNoSV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

filterFastaV = subprocess.run([snakemake.config["qiime"]["path"]+'filter_fasta.py', '--version'], stdout=subprocess.PIPE)
filterFastaVersion = "**" + filterFastaV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

rscriptV = subprocess.run([snakemake.config["Rscript"]["command"], '--version'], stdout=subprocess.PIPE)
rscriptVersion = "**" + filterFastaV.stdout.decode('utf-8').strip() + "**"


#blastnV = subprocess.run([snakemake.config["assignTaxonomy"]["blast"]["command"], '-version'], stdout=subprocess.PIPE)
#blastnVersion = "**" + blastnV.stdout.decode('utf-8').split('\n', 1)[0].replace('blastn:','').strip() + "**"

#vsearchV2 = subprocess.run([snakemake.config["assignTaxonomy"]["vsearch"]["command"], '--version'], stdout=subprocess.PIPE)
#vsearchVersion_tax = "**" + vsearchV2.stdout.decode('utf-8').split('\n', 1)[0].strip() + "**"

#if  snakemake.config["derep"]["dereplicate"] == "T" and  snakemake.config["pickOTU"]["m"] != "swarm" and  snakemake.config["pickOTU"]["m"] != "usearch":
#    vsearchV = subprocess.run([snakemake.config["derep"]["vsearch_cmd"], '--version'], stdout=subprocess.PIPE)
#    vsearchVersion = "**" + vsearchV.stdout.decode('utf-8').split('\n', 1)[0].strip() + "**"

if snakemake.config["alignRep"]["align"] == "T":
    alignFastaVersion="TBD"
    try:
        alignFastaV = subprocess.run([snakemake.config["qiime"]["path"]+'align_seqs.py', '--version'], stdout=subprocess.PIPE)
        if "Version" in alignFastaVersion:
            alignFastaVersion = "**" + alignFastaV.stdout.decode('utf-8').replace('Version: ','').strip() + "**"
    except Exception as e:
        alignFastaVersion = "**Problem retriving the version**"

    filterAlignmentV = subprocess.run([snakemake.config["qiime"]["path"]+'filter_alignment.py', '--version'], stdout=subprocess.PIPE)
    filterAlignmentVersion = "**" + filterAlignmentV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

    makePhyloV = subprocess.run([snakemake.config["qiime"]["path"]+'make_phylogeny.py', '--version'], stdout=subprocess.PIPE)
    makePhyloVersion = "**" + makePhyloV.stdout.decode('utf-8').replace('Version:','').strip() + "**"


################################################################################
#                        Compute counts section                                #
################################################################################
totalReads = "TBD"
intTotalReads = 1;
try:
     treads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/filter_summary.out | awk 'NR>1{sum=sum+$2} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
     intTotalReads = int(treads.stdout.decode('utf-8').strip())
     totalReads = "**" + str(intTotalReads) + "**"
except Exception as e:
     totalReads = "Problem reading outputfile"

filteredReads = "TBD"
intFilteredReads = 1;
prcFiltered=0.0
try:
     freads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/filter_summary.out | awk 'NR>1{sum=sum+$3} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
     intFilteredReads = int(freads.stdout.decode('utf-8').strip())
     filteredReads = "**" + str(intFilteredReads) + "**"
     prcFiltered = float(intFilteredReads/intTotalReads)*100
     prcFilteredStr = "**" + "{:.2f}".format(prcFiltered) + "%**"
except Exception as e:
     filteredReads = "Problem reading outputfile"

denoisedFWReads = "TBD"
intDenoisedFWReads = 1;
prcDenoisedFW=0
try:
     dfwreads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/stats_dada2.txt | awk 'NR>1{sum=sum+$2} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
     intDenoisedFWReads = int(dfwreads.stdout.decode('utf-8').strip())
     denoisedFWReads = "**" + str(intDenoisedFWReads) + "**"
     prcDenoisedFW = float(intDenoisedFWReads/intTotalReads)*100
     prcDenoisedFWStr = "**" + "{:.2f}".format(prcDenoisedFW) + "%**"
     prcDenoisedFWvsFiltered = (intDenoisedFWReads/intFilteredReads)*100
     prcDenoisedFWStrvsFiltered = "**" + "{:.2f}".format(prcDenoisedFWvsFiltered) + "%**"
except Exception as e:
     denoisedFWReads = "Problem reading outputfile"

denoisedRVReads = "TBD"
intDenoisedRVReads = 1;
prcDenoisedRV=0.0
try:
     drvreads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/stats_dada2.txt | awk 'NR>1{sum=sum+$3} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
     intDenoisedRVReads = int(drvreads.stdout.decode('utf-8').strip())
     denoisedRVReads = "**" + str(intDenoisedRVReads) + "**"
     prcDenoisedRV = float(intDenoisedRVReads/intTotalReads)*100
     prcDenoisedRVStr = "**" + "{:.2f}".format(prcDenoisedRV) + "%**"
     prcDenoisedRVvsFiltered = (intDenoisedRVReads/intFilteredReads)*100
     prcDenoisedRVStrvsFiltered = "**" + "{:.2f}".format(prcDenoisedRVvsFiltered) + "%**"
except Exception as e:
     denoisedRVReads = "Problem reading outputfile"

mergedReads = "TBD"
intmergedReads = 1;
prcmerged=0.0
try:
     mreads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/stats_dada2.txt | awk 'NR>1{sum=sum+$4} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
     intmergedReads = int(mreads.stdout.decode('utf-8').strip())
     mergedReads = "**" + str(intmergedReads) + "**"
     prcmerged = float(intmergedReads/intTotalReads)*100
     prcmergedStr = "**" + "{:.2f}".format(prcmerged) + "%**"
     prcmergedvsVariant = (intmergedReads/((intDenoisedFWReads+intDenoisedFWReads)/2))*100
     prcmergedStrvsVariant = "**" + "{:.2f}".format(prcmergedvsFiltered) + "%**"
except Exception as e:
     mergedReads = "Problem reading outputfile"

lengthFReads = "TBD"
intlengthFReads = 1;
prclengthF=0.0
try:
     lreads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/stats_dada2.txt | awk 'NR>1{sum=sum+$5} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
     intlengthFReads = int(lreads.stdout.decode('utf-8').strip())
     lengthFReads = "**" + str(intlengthFReads) + "**"
     prclengthF = float(intlengthFReads/intTotalReads)*100
     prclengthFStr = "**" + "{:.2f}".format(prclengthF) + "%**"
     prclengthFvsMerged = (intlengthFReads/intmergedReads)*100
     prclengthFStrvsMerged = "**" + "{:.2f}".format(prclengthFvsMerged) + "%**"
except Exception as e:
     lengthFReads = "Problem reading outputfile"

chimeraReads = "TBD"
intchimeraReads = 1;
prcchimera=0.0
if snakemake.config["dada2_asv"]["chimeras"] == "T":
    try:
         chreads = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/stats_dada2.txt | awk 'NR>1{sum=sum+$6} END{print sum}'"], stdout=subprocess.PIPE, shell=True)    
         intchimeraReads = int(chreads.stdout.decode('utf-8').strip())
         chimeraReads = "**" + str(intchimeraReads) + "**"
         prcchimera = float(intchimeraReads/intTotalReads)*100
         prcchimeraStr = "**" + "{:.2f}".format(prcchimera) + "%**"
         prcchimeravsLength = (intchimeraReads/intlengthFReads)*100
         prcchimeraStrvsLength = "**" + "{:.2f}".format(prcchimeravsLength) + "%**"
    except Exception as e:
         chimeraReads = "Problem reading outputfile"
intASV = 1
totalAsvs=""
intAsvs=1
try:
    asv_file=snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+"/asv/taxonomy_dada2/representative_seq_set_tax_assignments.txt"
    tasvs = subprocess.run( ["cat " +  asv_file + " | wc -l"], stdout=subprocess.PIPE, shell=True)
    intAsvs = int(tasvs.stdout.decode('utf-8').strip())
    #print("Total OTUS" + str(intOtus))
    totalAsvs = "**" + str(intAsvs) + "**"
except Exception as e:
    totalAsvs = "**Problem reading outputfile**"

prcAssigned = 0.0
prcNotAssignedOtus="TBD"
assignedOtus=0
notAssignedOtus=0
try:
    aOtus = subprocess.run( ["cat " +  snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/taxonomy_dada2/representative_seq_set_tax_assignments.txt | cut -f2 | grep -w NA | wc -l"], stdout=subprocess.PIPE, shell=True)
    notAssignedOtus = int(aOtus.stdout.decode('utf-8').strip())
    #print("Not assigned OTUS" + str(notAssignedOtus))
    assignedOtus = (intAsvs - notAssignedOtus)
    prcAssigned = float(assignedOtus/intAsvs)*100

    prcAssignedAsvs = "**" + "{:.2f}".format(prcAssigned) + "%**"
except Exception as e:
    prcAssignedAsvs = "**Problem reading outputfile**"


intSingletons = 1;
totalSingletons =""
try:
    totS = subprocess.run( ["grep -v \"^#\" " +  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/asvTable_noSingletons.txt" + " | wc -l"], stdout=subprocess.PIPE, shell=True)
    intSingletons = int(totS.stdout.decode('utf-8').strip())
    #print("Total OTUS" + str(intOtus))
    totalSingletons = "**" + str(intSingletons) + "**"
except Exception as e:
    totalSingletons = "**Problem reading outputfile**"


notAssignedSingleOtus = 0
assignedSingleOtus = 0
totalAssignedSingletons =""
try:
    sOtus = subprocess.run( ["cat " +  snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/taxonomy_dada2/representative_seq_set_noSingletons.fasta  |  grep '^>' | sed 's/>//' | grep -F -w -f - " +snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/taxonomy_dada2/representative_seq_set_tax_assignments.txt | cut -f2 | grep -w NA | wc -l" ], stdout=subprocess.PIPE, shell=True)
    notAssignedSingleOtus = int(sOtus.stdout.decode('utf-8').strip())
#print("Not assigned OTUS" + str(notAssignedOtus))
    assignedSingleOtus = (intSingletons - notAssignedSingleOtus)
    totalAssignedSingletons = "**" + str(assignedSingleOtus) + "%**"
except Exception as e:
    totalAssignedSingletons = "**Problem reading outputfile**"

prcSingle = 0.0
prcSingleStr=""  
try:
    prcSingle=float(assignedSingleOtus/intSingletons)*100
    prcSingleStr = "**" + "{:.2f}".format(prcSingle) + "%**" 
except Exception as e:
    prcSingleStr="**Error parsing output**"


#include user description on the report
desc = snakemake.config["description"]
txtDescription = ""
if len(desc) > 0:
    txtDescription = "\n**User description:** "+desc+"\n"


################################################################################
#                       Sample distribution chart                              #
################################################################################
countTxt="Following the read counts: \n\n"
fileData = []
headers = []
data =[]
headers.append("File description")
headers.append("Location")
headers.append("#")
headers.append("(%)")
fileData.append(headers)
#combined
data.append("Demultiplexed reads")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/<SAMPLE>_data/demultiplexed/\*.fastq.gz")
data.append(str(intTotalReads))
data.append("100%")
fileData.append(data)
data=[]
#filtered
data.append("QA filtered & trimmed reads")
data.append(snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/<LIBRARY>_data/demultiplexed/filtered/\*.fastq.gz")
data.append(str(intFilteredReads))
data.append("{:.2f}".format(float(prcFiltered))+"%")
fileData.append(data)
data=[]

#fw denoised
data.append("Denoised FW reads")
data.append("*No intermediate file generated*")
data.append(str(intDenoisedFWReads))
data.append("{:.2f}".format(prcDenoisedFW)+"%")
fileData.append(data)
data=[]

#rv denoised
data.append("Denoised RV reads")
data.append("*NO intermediate file generated*")
data.append(str(intDenoisedRVReads))
data.append("{:.2f}".format(prcDenoisedRV)+"%")
fileData.append(data)
data=[]

#Merged
data.append("Merged and full denoised reads")
data.append("*No intermediate file generated*")
data.append(str(intmergedReads))
data.append("{:.2f}".format(prcmerged)+"%")
fileData.append(data)
data=[]

#LengthFiltered
data.append("Length filtered")
data.append("*No intermediate file generated*")
data.append(str(intlengthFReads))
data.append("{:.2f}".format(prclengthF)+"%")
fileData.append(data)
data=[]

if snakemake.config["dada2_asv"]["chimeras"] == "T":
    data.append("Chimera removed")
    data.append("*No intermediate file generated*")
    data.append(str(intchimeraReads))
    data.append("{:.2f}".format(prcchimera)+"%")
    fileData.append(data)
    data=[]

#asv
data.append("ASV table")
data.append(snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/asvTable.txt")
data.append(str(intAsvs))
#data.append("{:.2f}".format(float((intAsvs/intTotalReads)*100))+"%")
data.append("100%")
fileData.append(data)
data=[]
#Taxonomy
data.append("Taxonomy assignation")
data.append(snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/asv/taxonomy_dada2/representative_seq_set_tax_assignments.txt")
data.append(str(assignedOtus))
data.append("{:.2f}".format(float((assignedOtus/intAsvs)*100))+"%")
fileData.append(data)
data=[]
#otus no singletons
data.append("ASV table (no singletons: a > " + str(snakemake.config["filterOtu"]["n"])+")")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/asvTable_noSingletons.txt")
data.append(str(intSingletons))
data.append("{:.2f}".format(float((intSingletons/intAsvs)*100))+"%")
fileData.append(data)
data=[]
#Assigned singletons
data.append("Assigned no singletons")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/asvTable_noSingletons.txt")
data.append(str(assignedSingleOtus))
try:
    data.append("{:.2f}".format(prcSingle)+"%")
except Exception as e:
    data.append("Err")
    print("Error - Assigned no singletons - dividing: "+ str(assignedSingleOtus)+"/"+ str(intSingletons))
fileData.append(data)
countTxt += make_table(fileData)
################################################################################
#                         Generate sequence amounts chart                      #
################################################################################
numbers=[intTotalReads];
labels=["Initial\nreads"];
prcs=[]

prcs.append("100%")
#if  snakemake.config["derep"]["dereplicate"] == "T" and  snakemake.config["pickOTU"]["m"] != "swarm" and  snakemake.config["pickOTU"]["m"] != "usearch":
#    numbers.append(intDerep)
#    labels.append("Derep.")
#    prcs.append("{:.2f}".format(float((intDerep/intTotalReads)*100))+"%")

numbers.append(intFilteredReads)
labels.append("Filtered\nreads")
prcs.append("{:.2f}".format(prcFiltered)+"%")

#numbers.append(intDenoisedFWReads)
#labels.append("Denoised\nFW reads")
#prcs.append("{:.2f}".format(prcDenoisedFW)+"%")

#numbers.append(intDenoisedRVReads)
#labels.append("Denoised\nRV reads")
#prcs.append("{:.2f}".format(prcDenoisedRV)+"%")


numbers.append(intmergedReads)
labels.append("Merged\nreads")
prcs.append("{:.2f}".format(prcmerged)+"%")

numbers.append(intlengthFReads)
labels.append("Length\nfiltered")
prcs.append("{:.2f}".format(prclengthF)+"%")
color_index=4
if snakemake.config["dada2_asv"]["chimeras"] == "T":
    numbers.append(intchimeraReads)
    labels.append("Chimera\nremoved")
    prcs.append("{:.2f}".format(prcchimera)+"%")
    color_index=5

numbers2=[intAsvs];
labels2=["ASVs"];
prcs2=["100%"]

#numbers.append(intAsvs)
#labels.append("ASVs")
#prcs.append("{:.2f}".format(float((intAsvs/intTotalReads)*100))+"%")

numbers2.append(assignedOtus)
labels2.append("Assigned\nASVs")
prcs2.append("{:.2f}".format(float((assignedOtus/intAsvs)*100))+"%")

numbers2.append(intSingletons)
labels2.append("No\nSingletons")
prcs2.append("{:.2f}".format(float((intSingletons/intAsvs)*100))+"%")

numbers2.append(assignedSingleOtus)
labels2.append("Assigned no\nsingletons")
prcs2.append("{:.2f}".format(prcSingle)+"%")

createChartPrc(numbers, tuple(labels),prcs,snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files/sequence_numbers_asv.png",0)
createChartPrc(numbers2, tuple(labels2),prcs2,snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files/sequence_numbers_asv_2.png",color_index)

###############################################################################
#                       Varaible sections                                     #
################################################################################
variable_refs=""
assignTaxoStr = ""
if snakemake.config["ANALYSIS_TYPE"] == "ASV":
    assignTaxoStr =":red:`Tool:` RDP_\n\n"
    assignTaxoStr += ":green:`Function:` assignTaxonomy() *implementation of RDP Classifier within dada2*\n\n"
    assignTaxoStr += ":green:`Reference database:` " + str(snakemake.config["dada2_taxonomy"]["db"])+ "\n\n"
    if snakemake.config["dada2_taxonomy"]["add_sps"]["add"].casefold() == "T":
        assignTaxoStr += ":green:`Species information.` After assigning taxonomy, genus-species binomials were assigned with assignSpecies() function.\n\n" 
        assignTaxoStr += ":green:`Function:` addSpecies()* wraps the assignSpecies function to assign genus-species binomials to the input sequences by exact matching against a reference fasta.*\n\n"
        assignTaxoStr += ":green:`Taxonomy species file:` " + str(snakemake.config["dada2_taxonomy"]["add_sps"]["db_sps"])+ "\n\n"
    else:
        assignTaxoStr += ":green:`Species information:` The *'add species'* (add_sps) option from the configuration file is set to **false**. Set it to **true** and supply a *species database* if you want to add species-level annotation to the taxonomic table.\n\n"
    variable_refs+=".. [RDP]  Wang, Q, G. M. Garrity, J. M. Tiedje, and J. R. Cole. 2007. Naive Bayesian Classifier for Rapid Assignment of rRNA Sequences into the New Bacterial Taxonomy. Appl Environ Microbiol. 73(16):5261-7.\n\n"


#Alignment report
alignmentReport = ""
if snakemake.config["alignRep"]["align"] == "T":
    alignmentReport = "\nAlign representative sequences\n-------------------------------\n\n"
    alignmentReport+="Align the sequences in a FASTA file to each other or to a template sequence alignment.\n\n"
    alignmentReport+=":red:`Tool:` [QIIME]_ - align_seqs.py\n\n"
    alignmentReport+=":red:`Version:` "+alignFastaVersion +"\n\n"
    alignmentReport+=":green:`Method:` ["+ snakemake.config["alignRep"]["m"] + "]_\n\n"
    alignmentReport+="**Command:**\n\n"
    alignmentReport+=":commd:`align_seqs.py -m "+snakemake.config["alignRep"]["m"] +" -i "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/dada2/representative_seq_set_noSingletons.fasta "+ snakemake.config["alignRep"]["extra_params"] + " -o "
    alignmentReport+=snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_aligned.fasta`\n\n"
    alignmentReport+="**Output files:**\n\n"
    alignmentReport+=":green:`- Aligned fasta file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_aligned.fasta\n\n"
    alignmentReport+=":green:`- Log file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_log.txt\n\n"
    alignmentReport+=alignBenchmark+"\n\n"

    alignmentReport+="Filter alignment\n-----------------\n\n"
    alignmentReport+="Removes positions which are gaps in every sequence.\n\n"
    alignmentReport+=":red:`Tool:` [QIIME]_ - filter_alignment.py\n\n"
    alignmentReport+=":red:`Version:` "+filterAlignmentVersion +"\n\n"
    alignmentReport+="**Command:**\n\n"
    alignmentReport+=":commd:`filter_alignment.py -i  "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_aligned.fasta " +snakemake.config["filterAlignment"]["extra_params"]
    alignmentReport+=" -o  "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/filtered/`\n\n"
    alignmentReport+="**Output file:**\n\n"
    alignmentReport+=":green:`- Aligned fasta file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_aligned_pfiltered.fasta\n\n"
    alignmentReport+=alignFilteredBenchmark+"\n\n"

    alignmentReport+="Make tree\n-----------\n\n"
    alignmentReport+="Create phylogenetic tree (newick format).\n\n"
    alignmentReport+=":red:`Tool:` [QIIME]_ - make_phylogeny.py\n\n"
    alignmentReport+=":red:`Version:` "+makePhyloVersion +"\n\n"
    alignmentReport+=":green:`Method:` ["+ snakemake.config["makeTree"]["method"] + "]_\n\n"
    alignmentReport+="**Command:**\n\n"
    alignmentReport+=":commd:`make_phylogeny.py -i "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_aligned.fasta -o representative_seq_set_noSingletons_aligned_pfiltered.tre "+ snakemake.config["makeTree"]["extra_params"]+ " -t " + snakemake.config["makeTree"]["method"]+"`\n\n"
    alignmentReport+="**Output file:**\n\n"
    alignmentReport+=":green:`- Taxonomy tree:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/aligned/representative_seq_set_noSingletons_aligned.tre\n\n"
    alignmentReport+=makePhyloBenchmark+"\n\n"
#KRONA REPORT
kronaReport = ""
if  snakemake.config["krona"]["report"].casefold() == "t" or snakemake.config["krona"]["report"].casefold() == "true":
    kronaReport+="Krona report\n----------------\n\n"
    kronaReport+="Krona allows hierarchical data to be explored with zooming, multi-layered pie charts.\n\n"
    kronaReport+=":red:`Tool:` [Krona]_\n\n"
    if snakemake.config["krona"]["otu_table"].casefold() != "singletons":
        kronaReport+="These charts were created using the ASV table **without** singletons\n\n"
    else:
        kronaReport+="These charts were created using the ASV table **including** singletons\n\n"

    if snakemake.config["krona"]["samples"].strip() == "all":
        kronaReport+="The report was executed for all the samples.\n\n"
    else:
        kronaReport+="The report was executed for the following target samples: "+ snakemake.config["krona"]["samples"].strip() + "\n\n"
    if "-c" in snakemake.config["krona"]["extra_params"]:
        kronaReport+="The samples were combined on a single chart\n\n"
    else:
        kronaReport+="Each sample is represented on a separated chart (same html report).\n\n"
    kronaReport+="You can see the report at the following link:\n\n"
    kronaReport+=":green:`- Krona report:` kreport_\n\n"
    #kronaReport+=" .. _kreport: ../../runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/krona_report.html\n\n"
    kronaReport+=" .. _kreport: report_files/krona_report.dada2.html\n\n"

    kronaReport+="Or access the html file at:\n\n"
    kronaReport+=":green:`- Krona html file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/taxonomy_dada2/krona_report.html\n\n"
    kronaReport+=kronaBenchmark+"\n\n"

###############################################################################
#                         REFERENCES                                     #
################################################################################
#dada2
variable_refs+= ".. [dada2] Callahan BJ, McMurdie PJ, Rosen MJ, Han AW, Johnson AJA, Holmes SP (2016). DADA2: High-resolution sample inference from Illumina amplicon data. Nature Methods, 13, 581-583. doi: 10.1038/nmeth.3869.\n\n"

#ALIGNMENT
if snakemake.config["alignRep"]["align"] == "T":
    if snakemake.config["alignRep"]["m"] == "pynast":
        variable_refs+= ".. [pynast] Caporaso JG, Bittinger K, Bushman FD, DeSantis TZ, Andersen GL, Knight R. 2010. PyNAST: a flexible tool for aligning sequences to a template alignment. Bioinformatics 26:266-267.\n\n"
    elif snakemake.config["alignRep"]["m"] == "infernal":
        variable_refs+= ".. [infernal] Nawrocki EP, Kolbe DL, Eddy SR. 2009. Infernal 1.0: Inference of RNA alignments. Bioinformatics 25:1335-1337.\n\n"

    if snakemake.config["makeTree"]["method"] == "fasttree":
        variable_refs+= ".. [fasttree] Price MN, Dehal PS, Arkin AP. 2010. FastTree 2-Approximately Maximum-Likelihood Trees for Large Alignments. Plos One 5(3).\n\n"
    elif snakemake.config["makeTree"]["method"] == "raxml":
        variable_refs+= "..[raxml] Stamatakis A. 2006. RAxML-VI-HPC: Maximum Likelihood-based Phylogenetic Analyses with Thousands of Taxa and Mixed Models. Bioinformatics 22(21):2688-2690.\n\n"
    elif snakemake.config["makeTree"]["method"] == "clearcut":
        variable_refs+= "..[clearcut] Evans J, Sheneman L, Foster JA. 2006. Relaxed Neighbor-Joining: A Fast Distance-Based Phylogenetic Tree Construction Method. J Mol Evol 62:785-792.\n\n"
    elif snakemake.config["makeTree"]["method"] == "clustalw":
        variable_refs+= "..[clustalw] Larkin MA, Blackshields G, Brown NP, Chenna R, McGettigan PA, McWilliam H, Valentin F, Wallace IM, Wilm A, Lopez R, Thompson JD, Gibson TJ, Higgins DG. 2007. Clustal W and Clustal X version 2.0. Bioinformatics 23:2947-2948.\n\n"

########
# EXTRA
##############

errorPlots="" 
if snakemake.config["dada2_asv" ]["generateErrPlots"].casefold() == "t" or snakemake.config["dada2_asv" ]["generateErrPlots"].casefold() == "true":
    errorPlots+="**Error plots:** \n\n:green:`- FW reads error plot::`  " + snakemake.wildcards.PROJECT + "/runs/"+snakemake.wildcards.run+ "/asv/fw_err.pdf\n\n" 
    errorPlots+=":green:`- RV reads error plot::`  " + snakemake.wildcards.PROJECT + "/runs/"+snakemake.wildcards.run+ "/asv/rv_err.pdf\n\n"

#shorts and longs
shorts = str(snakemake.config["rm_reads"]["shorts"])
longs = str(snakemake.config["rm_reads"]["longs"])
with open(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/shorts_longs.log") as trimlog:
    i=0
    for line in trimlog:
        i=i+1
        #tokens = line.split("\t")
        if i== 1:
            shorts = line
        else:
            longs = line

trunc_fw = str(snakemake.config["dada2_filter"]["truncFW"])
trunc_rv = str(snakemake.config["dada2_filter"]["truncRV"])
with open(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/trunc_val.log") as trunclog:
    i=0
    for line in trunclog:
        i=i+1
        #tokens = line.split("\t")
        if i== 1:
            trunc_fw = line
        else:
            trunc_rv = line

chimeras="" 
if snakemake.config["dada2_asv" ]["chimeras"].casefold() == "t" or snakemake.config["dada2_asv" ]["chimeras"].casefold() == "true":
    chimeras="Remove chimeras\n~~~~~~~~~~~~~~~~\n\n"
    chimeras+="Sequence variants identified as bimeric are removed, and a bimera-free collection of unique sequences is generated.\n\n"
    chimeras+=":green:`Function:` removeBimeraDenovo()\n\n"
    chimeras+=":green:`Method:` consensus\n\n" 

report("""
{title}
    .. role:: commd
    .. role:: red
    .. role:: green

**CASCABEL** is designed to run amplicon sequence analysis across single or multiple read libraries. This report consists of the ASV table creation and taxonomic assignment for all the combined accepted reads of given samples or libraries, if multiple.

{txtDescription}

Filter and Trim
---------------
Once that all the individual libraries were demultiplexed, the fastq files from all the samples for all the libraries were processed together. 

The filter and trimming steps were both performed with the **filterAndTrim()** function from the R package dada2, according to user parameters.

:red:`Tool:` dada2_ 

:red:`Version:` {dada2Version}

:green:`Function:` filterAndTrim()

:green:`Max Expected Errors (maxEE) FW:` {snakemake.config[dada2_filter][maxEE_FW]}

:green:`Max Expected Errors (maxEE) RV:` {snakemake.config[dada2_filter][maxEE_RV]}

:green:`Forward read truncation:` {trunc_fw}

:green:`Reverse read truncation:` {trunc_rv}

**Command:**


:commd:`Scripts/asvFilter.R $PWD {snakemake.config[dada2_filter][generateQAplots]} {snakemake.config[dada2_filter][truncFW]} {snakemake.config[dada2_filter][truncRV]} {snakemake.config[dada2_filter][maxEE_FW]} {snakemake.config[dada2_filter][maxEE_RV]} {snakemake.config[dada2_filter][cpus]} {snakemake.config[dada2_filter][extra_params]} {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/filter_summary.out`


**Output file:**

:green:`- Filtered fastq files:`   {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/<Library>/demultiplexed/filtered/

:green:`- Summary:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/filter_summary.out


:red:`Note:` To speed up downstream computation, consider tightening maxEE. If too few reads are passing the filter, consider relaxing maxEE, perhaps especially on the reverse reads.

Make sure that your forward and reverse reads overlap after length truncation.

{asvFilterBenchmark}


Amplicon Sequence Variants
----------------------------
In order to identify ASVs, dada2 workflow require to execute several steps. Following a summary of these steps and its main parameters. 

:red:`Tool:` dada2_ 

:red:`Version:` {dada2Version}

Learn errors
~~~~~~~~~~~~~~~~
The first step after filtering the reads is to learn the errors from the fastq files.

:green:`Function:` learnErrors(filteredFQ)

{errorPlots}

ASV inference
~~~~~~~~~~~~~~~
The amplicon sequence variant identification consists of a high resolution sample inference from the amplicon data using the learned errors. 

:green:`Function:` dada(filteredFQ, errors, pool='{snakemake.config[dada2_asv][pool]}')

Merge pairs
~~~~~~~~~~~~~~~
In this step, forward and reverse reads are paired in order to create full denoised sequences.

:green:`Function:` mergePairs(dadaF, dadaR)

:green:`Min overlap:` {snakemake.config[dada2_merge][minOverlap]}

:green:`Max mismatch:` {snakemake.config[dada2_merge][maxMismatch]}

Length filtering   
~~~~~~~~~~~~~~~~~~
Sequences that are much longer or shorter than expected may be the result of non-specific priming.

:green:`- Shortest length:` {shorts}

:green:`- Longest length:` {longs}

{chimeras}

**Output files:**

:green:`- Representative ASV sequences:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/representative_seq_set.fasta

The total number of different ASVs is: {totalAsvs}


Assign taxonomy
----------------
Given a set of sequences, assign the taxonomy of each sequence.

{assignTaxoStr}

The percentage of successfully assigned ASVs is: {prcAssignedAsvs}

**Output file:**

:green:`- ASV taxonomy assignation:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/representative_seq_set_tax_assignments.txt


The previous steps were performed within a Cascabel R script according to the following command:

**Command**

:commd:`Scripts/asvDada2.R $PWD  {snakemake.config[dada2_asv][pool]}   {snakemake.config[dada2_asv][cpus]}    {snakemake.config[dada2_asv][generateErrPlots]}   {snakemake.config[dada2_asv][extra_params]}  {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/    {snakemake.config[rm_reads][shorts]}    {snakemake.config[rm_reads][longs]}   {snakemake.config[rm_reads][offset]}    {snakemake.config[dada2_asv][chimeras]}    {snakemake.config[dada2_taxonomy][db]}   {snakemake.config[dada2_taxonomy][add_sps][db_sps]}    {snakemake.config[dada2_taxonomy][add_sps][add]}   {snakemake.config[dada2_taxonomy][extra_params]}  {snakemake.config[dada2_merge][minOverlap]}  {snakemake.config[dada2_merge][maxMismatch]}  {snakemake.config[dada2_taxonomy][add_sps][extra_params]}`  


{dada2Benchmark}

Make ASV table
---------------
Tabulates the number of times an ASV is found in each sample, and adds the taxonomic predictions for each ASV in the last column.

**Command:**

:commd:`cat {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/representative_seq_set_tax_assignments.txt | awk 'NR==FNR{{if(NR>1){{tax=$2;for(i=3;i<=NF;i++){{tax=tax";"$i}};h[$1]=tax;}}next;}} {{if(FNR==1){{print $0"\\ttaxonomy"}}else{{print $0"\\t"h[$1]}}' - {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/asv_table.txt  >  {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable.txt`

**Output file:**

:green:`- ASV table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable.txt

{otuTableBenchmark}

Convert ASV table
------------------
Convert from txt to the BIOM table format.

:red:`Tool:` [BIOM]_

:red:`Version:` {convertBiomVersion}

**Command:**

:commd:`biom convert -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable.txt -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable.biom {snakemake.config[biom][tableType]} --table type "OTU table"  --to-hdf5 --process-obs-metdata taxonomy`

**Output file:**

:green:`- Biom format table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable.biom

{convertOtuBenchmark}

Summarize Taxa
---------------
Summarize information of the representation of taxonomic groups within each sample.

:red:`Tool:` [QIIME]_ - summarize_taxa.py

:red:`Version:` {summTaxaVersion}

**Command:**

:commd:`summarize_taxa.py -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/otuTable.biom {snakemake.config[summTaxa][extra_params]} -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/summary/`

**Output file:**

:green:`- Taxonomy summarized counts at different taxonomy levels:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/summary/otuTable_L**N**.txt

Where **N** is the taxonomy level. Default configuration produces levels from 2 to 6.

{summTaxaBenchmark}

Filter ASV table
-----------------
Filter ASVs from an ASV table based on their observed counts or identifier.

:red:`Tool:` [QIIME]_ - filter_otus_from_otu_table.py

:red:`Version:` {filterOTUNoSVersion}

:green:`Minimum observation counts:` {snakemake.config[filterOtu][n]}

**Command:**

:commd:`filter_otus_from_otu_table.py -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable.biom -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable_noSingletons.biom {snakemake.config[filterOtu][extra_params]} -n {snakemake.config[filterOtu][n]}`

**Output file:**

:green:`- Biom table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/otuTable_noSingletons.biom

{asvNoSingletonsBenchmark}

Convert Filtered ASV table
---------------------------
Convert the filtered OTU table from the BIOM table format to a human readable format

:red:`Tool:` [BIOM]_

:red:`Version:` {convertBiomVersion}

**Command:**

:commd:`biom convert -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_dada2/asvTable_noSingletons.biom -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable_noSingletons.txt {snakemake.config[biom][tableType]} {snakemake.config[biom][headerKey]} {snakemake.config[biom][extra_params]} {snakemake.config[biom][outFormat]}`

**Output file:**

:green:`- TSV format table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/asv/taxonomy_dada2/asvTable_noSingletons.txt

{filterASVTableBenchmark}

Filter representative sequences
---------------------------------
Remove sequences according to the filtered OTU biom table.

:red:`Tool:` [QIIME]_ - filter_fasta.py

:red:`Version:` {filterFastaVersion}

**Command:**

:commd:`filter_fasta.py -f {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/asv/representative_seq_set.fasta -o {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/asv/taxonomy_dada2/representative_seq_set_noSingletons.fasta {snakemake.config[filterFasta][extra_params]} -b {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/asv/taxonomy_dada2/otuTable_noSingletons.biom`

**Output file:**

:green:`- Filtered fasta file:` {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/asv/taxonomy_dada2/representative_seq_set_noSingletons.fasta


{alignmentReport}

{kronaReport}

Final counts
-------------

{countTxt}

.. image:: report_files/sequence_numbers_asv.png


.. image:: report_files/sequence_numbers_asv_2.png


:red:`Note:`

:green:`- Assigned ASVs percentage` is the amount of successfully assigned ASVs.

:green:`- No singletons percentage` is the percentage of no singletons ASVs in reference to the complete ASV table.

:green:`- Assigned No singletons` is the amount of successfully no singletons assigned ASVs.

References
------------

.. [QIIME] QIIME. Caporaso JG, Kuczynski J, Stombaugh J, Bittinger K, Bushman FD, Costello EK, Fierer N, Gonzalez Pena A, Goodrich JK, Gordon JI, Huttley GA, Kelley ST, Knights D, Koenig JE, Ley RE, Lozupone CA, McDonald D, Muegge BD, Pirrung M, Reeder J, Sevinsky JR, Turnbaugh PJ, Walters WA, Widmann J, Yatsunenko T, Zaneveld J, Knight R. 2010. QIIME allows analysis of high-throughput community sequencing data. Nature Methods 7(5): 335-336.

.. [Cutadapt] Cutadapt v1.15 .Marcel Martin. Cutadapt removes adapter sequences from high-throughput sequencing reads. EMBnet.Journal, 17(1):10-12, May 2011. http://dx.doi.org/10.14806/ej.17.1.200

.. [vsearch] Rognes T, Flouri T, Nichols B, Quince C, Mahé F. (2016) VSEARCH: a versatile open source tool for metagenomics. PeerJ 4:e2584. doi: 10.7717/peerj.2584

.. [Krona] Ondov BD, Bergman NH, and Phillippy AM. Interactive metagenomic visualization in a Web browser. BMC Bioinformatics. 2011 Sep 30; 12(1):385.

.. [BIOM] The Biological Observation Matrix (BIOM) format or: how I learned to stop worrying and love the ome-ome. Daniel McDonald, Jose C. Clemente, Justin Kuczynski, Jai Ram Rideout, Jesse Stombaugh, Doug Wendel, Andreas Wilke, Susan Huse, John Hufnagle, Folker Meyer, Rob Knight, and J. Gregory Caporaso.GigaScience 2012, 1:7. doi:10.1186/2047-217X-1-7

{variable_refs}


""", snakemake.output[0], metadata="Author: J. Engelmann & A. Abdala ")
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
import subprocess
import functools
from snakemake.utils import report
from benchmark_utils import readBenchmark
from benchmark_utils import countTxt
from seqsChart import createChart
from seqsChart import createChartPrc
from benchmark_utils import countFasta
from benchmark_utils import make_table

################
#Function to retrive the sample names and put in the report title
#@param file with the sample list, it is created during combine_filtered_samples
#snakemake.wildcards.project + "/runs/" + snakemake.wildcards.run + "/samples.log"
#@return the title with the samples
def getSampleList(sampleFile):
    with open(sampleFile) as sfile:
        samps ="Amplicon Analysis Report for Libraries: "
        for l in sfile:
            samps+= l
        samps+="\n"
        for i in range(0,len(samps)):
            samps+="="
    return samps;

#########################
#This function reads the file cat_samples.log which have the executed command to
#combine all the libraries after cleaning and demultiplexing and before taxonomy
#assignation
#@param catLogFile file with the command
#snakemake.wildcards.project + "/runs/" + snakemake.wildcards.run + "/cat_samples.log"
#@return the string ready to be concatenated into the report.
def getCombinedSamplesList(catLogFile):
    with open(catLogFile) as sfile:
        command =":commd:`"
        i=0
        for l in sfile:
            if i == 0:
                command+= l + "`\n\n"
            i+=1
    return command;


title = getSampleList(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/samples.log")
catCommand =  getCombinedSamplesList(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/cat_samples.log")

################################################################################
#                         Benchmark Section                                    #
# This section is to generate a pre-formatted text with the benchmark info for #
# All the executed rules.                                                      #
################################################################################
combineBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/combine_seqs_fw_rev.benchmark")
otuBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu.benchmark")
pikRepBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/pick_reps.benchmark")
assignTaxaBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/assign_taxa.benchmark")
otuTableBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable.biom.benchmark")
convertOtuBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable.txt.benchmark")
summTaxaBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/summary/summarize_taxa.benchmark")
otuNoSingletonsBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable_nosingletons.bio.benchmark")
filterBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/representative_seq_set_noSingletons.benchmark")
deRepBenchmark=""
if  (snakemake.config["derep"]["dereplicate"] == "T"  and  snakemake.config["pickOTU"]["m"] != "usearch") or  snakemake.config["pickOTU"]["m"] == "swarm":
    deRepBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/derep.benchmark")
if snakemake.config["alignRep"]["align"] == "T":
    #align_seqs.py -m {config[alignRep][m]} -i {input} -o {params.outdir} {config[alignRep][extra_params]}
    alignBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/align_rep_seqs.benchmark")
    #"filter_alignment.py -i {input} -o {params.outdir} {config[filterAlignment][extra_params]}"
    alignFilteredBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/filtered/align_rep_seqs.benchmark")
    #"make_phylogeny.py -i {input} -o {output} {config[makeTree][extra_params]}"
    makePhyloBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/filtered/representative_seq_set_noSingletons_aligned_pfiltered.benchmark")
kronaBenchmark=""
if snakemake.config["krona"]["report"].casefold() == "t" or snakemake.config["krona"]["report"].casefold() == "true":
    kronaBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/krona_report.benchmark")

#dada2FilterBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/filter.benchmark")
#dada2Benchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/dada2.benchmark")
#dada2BiomBenchmark = readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/asv/dada2.biom.benchmark")

################################################################################
#                         TOOLS VERSION SECTION                          #
################################################################################
clusterOtuV = subprocess.run([snakemake.config["qiime"]["path"]+'pick_otus.py', '--version'], stdout=subprocess.PIPE)
clusterOtuVersion = "**" + clusterOtuV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

pickRepV = subprocess.run([snakemake.config["qiime"]["path"]+'pick_rep_set.py', '--version'], stdout=subprocess.PIPE)
pickRepVersion = "**" + pickRepV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

assignTaxaV = subprocess.run([snakemake.config["qiime"]["path"]+'parallel_assign_taxonomy_'+snakemake.config["assignTaxonomy"]["qiime"]["method"]+'.py', '--version'], stdout=subprocess.PIPE)
assignTaxaVersion = "**" + assignTaxaV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

makeOTUV = subprocess.run([snakemake.config["qiime"]["path"]+'make_otu_table.py', '--version'], stdout=subprocess.PIPE)
makeOTUVersion = "**" + makeOTUV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

convertBiomV = subprocess.run([snakemake.config["biom"]["command"], '--version'], stdout=subprocess.PIPE)
convertBiomVersion = "**" + convertBiomV.stdout.decode('utf-8').strip() + "**"

summTaxaSV = subprocess.run([snakemake.config["qiime"]["path"]+'summarize_taxa.py', '--version'], stdout=subprocess.PIPE)
summTaxaVersion = "**" + summTaxaSV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

filterOTUNoSV = subprocess.run([snakemake.config["qiime"]["path"]+'filter_otus_from_otu_table.py', '--version'], stdout=subprocess.PIPE)
filterOTUNoSVersion = "**" + filterOTUNoSV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

filterFastaV = subprocess.run([snakemake.config["qiime"]["path"]+'filter_fasta.py', '--version'], stdout=subprocess.PIPE)
filterFastaVersion = "**" + filterFastaV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

blastnV = subprocess.run([snakemake.config["assignTaxonomy"]["blast"]["command"], '-version'], stdout=subprocess.PIPE)
blastnVersion = "**" + blastnV.stdout.decode('utf-8').split('\n', 1)[0].replace('blastn:','').strip() + "**"

vsearchV2 = subprocess.run([snakemake.config["assignTaxonomy"]["vsearch"]["command"], '--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
vsearchVersion_tax = "**" + vsearchV2.stdout.decode('utf-8').split('\n', 1)[0].strip() + "**"

if  (snakemake.config["derep"]["dereplicate"] == "T" and  snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm":
    vsearchV = subprocess.run([snakemake.config["derep"]["vsearch_cmd"], '--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    vsearchVersion = "**" + vsearchV.stdout.decode('utf-8').split('\n', 1)[0].strip() + "**"

if  snakemake.config["pickOTU"]["m"] == "swarm":
    swarmV = subprocess.run(['swarm', '--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    swarmVersion = "**" + vsearchV.stdout.decode('utf-8').split('\n', 1)[0].strip() + "**"


if snakemake.config["alignRep"]["align"] == "T":
    alignFastaVersion="TBD"
    try:
        alignFastaV = subprocess.run([snakemake.config["qiime"]["path"]+'align_seqs.py', '--version'], stdout=subprocess.PIPE)
        if "Version" in alignFastaVersion:
            alignFastaVersion = "**" + alignFastaV.stdout.decode('utf-8').replace('Version: ','').strip() + "**"
    except Exception as e:
        alignFastaVersion = "**Problem retriving the version**"

    filterAlignmentV = subprocess.run([snakemake.config["qiime"]["path"]+'filter_alignment.py', '--version'], stdout=subprocess.PIPE)
    filterAlignmentVersion = "**" + filterAlignmentV.stdout.decode('utf-8').replace('Version:','').strip() + "**"

    makePhyloV = subprocess.run([snakemake.config["qiime"]["path"]+'make_phylogeny.py', '--version'], stdout=subprocess.PIPE)
    makePhyloVersion = "**" + makePhyloV.stdout.decode('utf-8').replace('Version:','').strip() + "**"


################################################################################
#                        Compute counts section                                #
################################################################################
totalReads = "TBD"
intTotalReads = 1;
try:
    treads = subprocess.run( ["grep '^>' " + snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/seqs_fw_rev_combined.fasta | wc -l"], stdout=subprocess.PIPE, shell=True)
    intTotalReads = int(treads.stdout.decode('utf-8').strip())
    totalReads = "**" + str(intTotalReads) + "**"
except Exception as e:
    totalReads = "Problem reading outputfile"

derep_reads = "TBD"
intDerep=1
if  (snakemake.config["derep"]["dereplicate"] == "T"  and  snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm":
    try:
        totd = subprocess.run( ["grep \"^>\" " +  snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/derep/seqs_fw_rev_combined_derep.fasta" + " | wc -l"], stdout=subprocess.PIPE, shell=True)
        intDerep = int(totd.stdout.decode('utf-8').strip())
        derep_reads = "**" + str(intDerep) + "**"
    except Exception as e:
        derep_reads = "**Problem reading outputfile**"

intOtus = 1
try:
    otu_file=""
    if (snakemake.config["derep"]["dereplicate"] == "T" and snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm" :
        otu_file = snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/otu/seqs_fw_rev_combined_remapped_otus.txt"
    else:
        otu_file = snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/otu/seqs_fw_rev_combined_otus.txt"
    totus = subprocess.run( ["cat " +  otu_file + " | wc -l"], stdout=subprocess.PIPE, shell=True)
    intOtus = int(totus.stdout.decode('utf-8').strip())
    #print("Total OTUS" + str(intOtus))
    totalOtus = "**" + str(intOtus) + "**"
except Exception as e:
    totalOtus = "**Problem reading outputfile**"

prcAssigned = 0.0
prcNotAssignedOtus="TBD"
try:
    nohit = "'No blast hit|Unassigned'"
    #if snakemake.config["assignTaxonomy"]["tool"] != "blast":
    #    nohit = "'Unassigned'"
    aOtus = subprocess.run( ["grep -E "+ nohit + " " +  snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/representative_seq_set_tax_assignments.txt | wc -l"], stdout=subprocess.PIPE, shell=True)
    notAssignedOtus = int(aOtus.stdout.decode('utf-8').strip())
    #print("Not assigned OTUS" + str(notAssignedOtus))
    assignedOtus = (intOtus - notAssignedOtus)
    prcAssigned = (assignedOtus/intOtus)*100

    prcAssignedOtus = "**" + "{:.2f}".format(prcAssigned) + "%**"
except Exception as e:
    prcAssignedOtus = "**Problem reading outputfile**"


intSingletons = 1;
try:
    totS = subprocess.run( ["grep -v \"^#\" " +  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable_noSingletons.txt" + " | wc -l"], stdout=subprocess.PIPE, shell=True)
    intSingletons = int(totS.stdout.decode('utf-8').strip())
    #print("Total OTUS" + str(intOtus))
    totalSingletons = "**" + str(intSingletons) + "**"
except Exception as e:
    totalSingletons = "**Problem reading outputfile**"

nohit = "'No blast hit|Unassigned|None'"
#if snakemake.config["assignTaxonomy"]["tool"] != "blast":
#    nohit = "'Unassigned'"
notAssignedSingleOtus = 0
assignedSingleOtus = 0
try:
    sOtus = subprocess.run( ["grep -E "+ nohit + " " +  snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable_noSingletons.txt | wc -l"], stdout=subprocess.PIPE, shell=True)
    notAssignedSingleOtus = int(sOtus.stdout.decode('utf-8').strip())
#print("Not assigned OTUS" + str(notAssignedOtus))
    assignedSingleOtus = (intSingletons - notAssignedSingleOtus)
except Exception as e:
    totalAssignedSingletons = "**Problem reading outputfile**"


#include user description on the report
desc = snakemake.config["description"]
txtDescription = ""
if len(desc) > 0:
    txtDescription = "\n**User description:** "+desc+"\n"


################################################################################
#                       Sample distribution chart                              #
################################################################################
countTxt="Following the read counts: \n\n"
fileData = []
headers = []
data =[]
headers.append("File description")
headers.append("Location")
headers.append("#")
headers.append("(%)")
fileData.append(headers)
#combined
data.append("Combined clean reads")
data.append(snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/seqs_fw_rev_combined.fasta")
data.append(str(intTotalReads))
data.append("100%")
fileData.append(data)
data=[]
#derep
if  (snakemake.config["derep"]["dereplicate"] == "T" and  snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm":
	data.append("Dereplicated reads")
	data.append(snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/derep/seqs_fw_rev_combined_derep.fasta")
	data.append(str(intDerep))
	data.append("{:.2f}".format(float((intDerep/intTotalReads)*100))+"%")
	fileData.append(data)
	data=[]

#otus
data.append("OTU table")
data.append(otu_file)
data.append(str(intOtus))
data.append("{:.2f}".format(float((intOtus/intTotalReads)*100))+"%")
fileData.append(data)
data=[]
#Taxonomy
data.append("Taxonomy assignation")
data.append(snakemake.wildcards.PROJECT+ "/runs/" + snakemake.wildcards.run+ "/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/representative_seq_set_tax_assignments.txt")
data.append(str(assignedOtus))
data.append("{:.2f}".format(float((assignedOtus/intOtus)*100))+"%")
fileData.append(data)
data=[]
#otus no singletons
data.append("OTU table (no singletons: a > " + str(snakemake.config["filterOtu"]["n"])+")")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable_noSingletons.txt")
data.append(str(intSingletons))
data.append("{:.2f}".format(float((intSingletons/intOtus)*100))+"%")
fileData.append(data)
data=[]
#Assigned singletons
data.append("Assigned no singletons")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/otuTable_noSingletons.txt")
data.append(str(assignedSingleOtus))
try:
    data.append("{:.2f}".format(float((assignedSingleOtus/intSingletons)*100))+"%")
except Exception as e:
    data.append("Err")
    print("Error - Assigned no singletons - dividing: "+ str(assignedSingleOtus)+"/"+ str(intSingletons))
fileData.append(data)
countTxt += make_table(fileData)
################################################################################
#                         Generate sequence amounts chart                      #
################################################################################
#numbers=[intTotalReads];
#labels=["Combined\nreads"];
#prcs=[]

#prcs.append("100%")
#Now we only create the 1st chart if we dereplicate, otherwise there is no sense to show one single bar
sequence_bars=""
color_index=0
if  (snakemake.config["derep"]["dereplicate"] == "T" and  snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm":
    numbers=[intTotalReads];
    labels=["Combined\nreads"];
    prcs=[]
    prcs.append("100%")

    numbers.append(intDerep)
    labels.append("Derep.")
    prcs.append("{:.2f}".format(float((intDerep/intTotalReads)*100))+"%")
    createChartPrc(numbers, tuple(labels),prcs,snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files/sequence_numbers_all.png",color_index)
    sequence_bars=".. image:: report_files/sequence_numbers_all.png\n\n"
    color_index=2

numbers2=[intOtus]
labels2=["OTUs"]
prcs2=["{:.2f}".format(float((intOtus/intTotalReads)*100))+"%"]

numbers2.append(assignedOtus)
labels2.append("Assigned\nOTUs")
prcs2.append("{:.2f}".format(float((assignedOtus/intOtus)*100))+"%")

numbers2.append(intSingletons)
labels2.append("No\nSingletons")
prcs2.append("{:.2f}".format(float((intSingletons/intOtus)*100))+"%")

numbers2.append(assignedSingleOtus)
labels2.append("Assigned NO\n singletons")
prcs2.append("{:.2f}".format(float((assignedSingleOtus/intSingletons)*100))+"%")

createChartPrc(numbers2, tuple(labels2),prcs2,snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files/sequence_numbers_all_2.png",color_index)

###############################################################################
#                       Varaible sections                                     #
################################################################################
variable_refs=""
assignTaxoStr = ""
if snakemake.config["assignTaxonomy"]["tool"] == "blast":
    assignTaxoStr =":red:`Tool:` ["+str(snakemake.config["assignTaxonomy"]["tool"])+"]_\n\n"
    assignTaxoStr += ":red:`Version:` " + blastnVersion + "\n\n"
    variable_refs+= ".. [blast] Altschul SF, Gish W, Miller W, Myers EW, Lipman DJ. 1990. Basic local alignment search tool. J Mol Biol 215(3):403-410\n\n"
    ref = ""
    if len(str(snakemake.config["assignTaxonomy"]["blast"]["blast_db"])) > 1:
        assignTaxoStr +=  ":green:`Reference database:` "+ str(snakemake.config["assignTaxonomy"]["blast"]["blast_db"])+"\n\n"
        ref= "-db " + str(snakemake.config["assignTaxonomy"]["blast"]["blast_db"])
    else:
        assignTaxoStr +=  ":green:`Reference fasta file:` "+ str(snakemake.config["assignTaxonomy"]["blast"]["fasta_db"])+"\n\n"
        ref= "-subject "+ str(snakemake.config["assignTaxonomy"]["blast"]["fasta_db"])
    assignTaxoStr +=  ":green:`Taxonomy mapping file:` "+ str(snakemake.config["assignTaxonomy"]["blast"]["mapFile"])+"\n\n"
    assignTaxoStr += "**Command:**\n\n"
    assignTaxoStr += ":commd:`"+ str(snakemake.config["assignTaxonomy"]["blast"]["command"] )+" " +ref + "-evalue " + str(snakemake.config["assignTaxonomy"]["blast"]["evalue"]) + "-outfmt '6 qseqid sseqid pident qcovs evalue bitscore' -num_threads " + str(snakemake.config["assignTaxonomy"]["blast"]["jobs"]) + " -max_target_seqs "
    assignTaxoStr += str(snakemake.config["assignTaxonomy"]["blast"]["max_target_seqs"]) +" -perc_identity "+ str(snakemake.config["assignTaxonomy"]["blast"]["identity"]) + " -out representative_seq_set_tax_blastn.out`\n\n"
    if snakemake.config["assignTaxonomy"]["blast"]["max_target_seqs"] != 1:
        assignTaxoStr += "After blast assignation, **results were mapped to their LCA using stampa_merge.py** script\n\n"

elif snakemake.config["assignTaxonomy"]["tool"] == "qiime":
    assignTaxoStr =":red:`Tool:` [QIIME]_\n\n"
    assignTaxoStr += ":red:`Version:` "+assignTaxaVersion
    assignTaxoStr += ":green:`Method:` **" + str(snakemake.config["assignTaxonomy"]["qiime"]["method"])+ "**\n\n"
    assignTaxoStr += "Reference database: " + str(snakemake.config["assignTaxonomy"]["qiime"]["dbFile"])+ "\n\n"
    assignTaxoStr += "Taxonomy mapping file: " + str(snakemake.config["assignTaxonomy"]["qiime"]["mapFile"])+ "\n\n"
    assignTaxoStr += "**Command:**\n\n"
    assignTaxoStr += ":commd:`parallel_assign_taxonomy_" + str(snakemake.config["assignTaxonomy"]["qiime"]["method"])+ ".py -i " + str(snakemake.wildcards.PROJECT)+ "/runs/" + str(snakemake.wildcards.run)+ "/otu/representative_seq_set.fasta --id_to_taxonomy_fp " + str(snakemake.config["assignTaxonomy"]["qiime"]["mapFile"])+ " --reference_seqs_fp "
    assignTaxoStr += str(snakemake.config["assignTaxonomy"]["qiime"]["dbFile"])+ " --jobs_to_start " + str(snakemake.config["assignTaxonomy"]["qiime"]["jobs"])+ " " + str(snakemake.config["assignTaxonomy"]["qiime"]["extra_params"])+ " "
    assignTaxoStr += "--output_dir " + str(snakemake.wildcards.PROJECT)+ "/runs/" + str(snakemake.wildcards.run)+ "/otu/taxonomy_" + str(snakemake.config["assignTaxonomy"]["tool"])+ "/`\n\n"
elif snakemake.config["assignTaxonomy"]["tool"] == "vsearch":
    assignTaxoStr =":red:`Tool:` [vsearch]_\n\n"
    assignTaxoStr += ":red:`Version:` " + vsearchVersion_tax + "\n\n"
    assignTaxoStr +=  ":green:`Reference fasta file:` "+ str(snakemake.config["assignTaxonomy"]["vsearch"]["db_file"])+"\n\n"
    assignTaxoStr +=  ":green:`Taxonomy mapping file:` "+ str(snakemake.config["assignTaxonomy"]["vsearch"]["mapFile"])+"\n\n"
    assignTaxoStr += "**Command:**\n\n"
    assignTaxoStr += ":commd:`"+ str(snakemake.config["assignTaxonomy"]["vsearch"]["command"] )+ "--usearch_global "+ str(snakemake.wildcards.PROJECT)+ "/runs/" + str(snakemake.wildcards.run)+ "/otu/representative_seq_set.fasta --db "+ str(snakemake.config["assignTaxonomy"]["vsearch"]["db_file"])
    assignTaxoStr += " --dbmask none --qmask none --rowlen 0 --id "+ str(snakemake.config["assignTaxonomy"]["vsearch"]["identity"])+" --iddef " + str(snakemake.config["assignTaxonomy"]["vsearch"]["identity_definition"])+" --userfields query+id" + str(snakemake.config["assignTaxonomy"]["vsearch"]["identity_definition"])+"+target "
    assignTaxoStr += " --maxaccepts "+ str(snakemake.config["assignTaxonomy"]["vsearch"]["max_target_seqs"]) + " --threads " + str(snakemake.config["assignTaxonomy"]["vsearch"]["jobs"]) + " "+ str(snakemake.config["assignTaxonomy"]["vsearch"]["extra_params"]) + " --output_no_hits --userout  representative_seq_set_tax_vsearch.out`\n\n"
    if (snakemake.config["assignTaxonomy"]["vsearch"]["max_target_seqs"]) != 1:
        assignTaxoStr += "After taxonomy assignation with vsearch, top hits with the same sequence identity but different taxonomy were mapped to their last common ancestor (LCA) using the script **stampa_merge.py** from https://github.com/frederic-mahe/stampa.\n\n"

#Dereplication report
dereplicateReport=""
if  (snakemake.config["derep"]["dereplicate"] == "T"  and  snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm":
    dereplicateReport="Dereplicate reads\n"
    dereplicateReport+="---------------------\n\n"
    dereplicateReport+="Clusterize the reads with an identity threshold of 100%.\n\n"
    dereplicateReport+=":red:`Tool:` [vsearch]_\n\n"
    dereplicateReport+=":red:`Version:` " + vsearchVersion+"\n\n"
    dereplicateReport+="**Command:**\n\n"
    dereplicateReport+=":commd:`"+str(snakemake.config["derep"]["vsearch_cmd"]) +" --derep_fulllength  seqs_fw_rev_combined.fasta --output seqs_fw_rev_combined_derep.fasta --uc  seqs_fw_rev_combined_derep.uc --strand " + str(snakemake.config["derep"]["strand"]) + " --fasta_width 0 --minuniquesize "+ str(snakemake.config["derep"]["min_abundance"])+"`\n\n"
    dereplicateReport+="**Output files:**\n\n"
    dereplicateReport+=":green:`- Dereplicated fasta file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/seqs_fw_rev_combined_derep.fasta\n\n"
    dereplicateReport+=":green:`- Cluster file:` "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/seqs_fw_rev_combined_derep.uc\n\n"
    dereplicateReport+="Total number of dereplicated sequences is: "+str(derep_reads).strip()+"\n\n"+deRepBenchmark+"\n\n"

#Cluestering report
otuClusteringReport=""
otuClusteringReport="Cluster OTUs\n"
otuClusteringReport+="---------------------\n\n"
otuClusteringReport+="Assigns similar sequences to operational taxonomic units, or OTUs, by clustering sequences based on a user-defined similarity threshold.\n\n"
if (snakemake.config["pickOTU"]["m"]== "swarm"):
    otuClusteringReport+=":red:`Tool:` [swarm]_\n\n"
    otuClusteringReport+=":red:`Version:` " + swarmVersion+"\n\n"
    otuClusteringReport+=":green:`Distance:` " + snakemake.config["pickOTU"]["s"]+"\n\n"
    otuClusteringReport+="**Command:**\n\n"
    otuClusteringReport+=":commd:`swarm -i "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarm.struct -s "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarm.stats -d "+snakemake.config["pickOTU"]["s"]+" -z -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_combined_derep_otus.txt "
    otuClusteringReport+="-u "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarms.uc -t "+ snakemake.config["pickOTU"]["cpus"]+"  " + snakemake.config["pickOTU"]["extra_params"] + " < "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/seqs_fw_rev_combined_derep.fasta` \n\n"
    otuClusteringReport+="**Output files:**\n\n"
    otuClusteringReport+=":green:`- OTU List:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_combined_derep_otus.txt\n\n"
    otuClusteringReport+=":green:`- Cluster file (uc):` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarms.uc\n\n"
    otuClusteringReport+=":green:`- Swarm stats:` "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarm.stats\n\n"
    otuClusteringReport+=":green:`- Swarm structure:` "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarm.struct\n\n"
    otuClusteringReport+="The total number of different OTUS (swarms) is: " +totalOtus+"\n\n"
else:
    otuClusteringReport+=":red:`Tool:`  ["+snakemake.config["pickOTU"]["m"]+"]_\n\n"
    otuClusteringReport+=":red:`Version:` " + clusterOtuVersion +"\n\n"
    otuClusteringReport+=":green:`Method:` " + snakemake.config["pickOTU"]["m"]+"\n\n"
    otuClusteringReport+=":green:`Identity:` " + snakemake.config["pickOTU"]["s"]+"\n\n"
    otuClusteringReport+="**Command:**\n\n"
    otuClusteringReport+=":commd:`pick_otus.py -m "+snakemake.config["pickOTU"]["m"] + "-i "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/seqs_fw_rev_filtered.fasta -o "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/ "
    otuClusteringReport+="-s "+snakemake.config["pickOTU"]["s"]+" " + snakemake.config["pickOTU"]["extra_params"] + " --threads "+ snakemake.config["pickOTU"]["cpus"] + "` \n\n"  
    otuClusteringReport+="**Output files:**\n\n"
    otuClusteringReport+=":green:`- OTU List:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_filtered_otus.txt\n\n"
    otuClusteringReport+=":green:`- Log file:` "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_filtered_otus.log\n\n"
    otuClusteringReport+="The total number of different OTUS is: " +totalOtus+"\n\n"

#Remap report
remapClusters=""
if  (snakemake.config["derep"]["dereplicate"] == "T"  and  snakemake.config["pickOTU"]["m"] != "usearch") or snakemake.config["pickOTU"]["m"] == "swarm":
    variable_refs+= ".. [ClusterMapper] https://github.com/AlejandroAb/ClusterMapper\n\n"
    remapClusters="Re-map clusters\n"
    remapClusters+="---------------------\n\n"
    remapClusters+="Compute abundance values after dereplication and OTU clustering.\n\n"
    remapClusters+=":red:`Tool:`  Cascabel Java application: [ClusterMapper]_\n\n"
    remapClusters+="**Command:**\n\n"
    if(snakemake.config["pickOTU"]["m"] == "swarm"):
        remapClusters+=":commd:`java -cp Scripts/ClusterMapper/build/classes clustermapper.ClusterMapper uc2otu  -uc "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/seqs_fw_rev_combined_derep.uc -otu " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_combined_derep_otus.txt -o " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_combined_remapped_otus.txt`\n\n"
    else:
        remapClusters+=":commd:`java  -cp Scripts/ClusterMapper/build/classes clustermapper.ClusterMapper uc2uc   -uc "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/derep/seqs_fw_rev_combined_derep.uc -uc2 " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/swarms.uc --full-uc --relabel -l OTU -lidx 1 -o " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_combined_remapped_otus.txt`\n\n"
    remapClusters+="**Output files:**\n\n"
    remapClusters+=":green:`- Mapped abundances:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/seqs_fw_rev_combined_remapped_otus.txt\n\n"
    remapClusters+=":green:`- Log file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/remap.log\n\n"

#Alignment report
alignmentReport = ""
if snakemake.config["alignRep"]["align"] == "T":
    alignmentReport = "\nAlign representative sequences\n-------------------------------\n\n"
    alignmentReport+="Align the sequences in a FASTA file to each other or to a template sequence alignment.\n\n"
    alignmentReport+=":red:`Tool:` [QIIME]_ - align_seqs.py\n\n"
    alignmentReport+=":red:`Version:` "+alignFastaVersion +"\n\n"
    alignmentReport+=":green:`Method:` ["+ snakemake.config["alignRep"]["m"] + "]_\n\n"
    alignmentReport+="**Command:**\n\n"
    alignmentReport+=":commd:`align_seqs.py -m "+snakemake.config["alignRep"]["m"] +" -i "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/"+snakemake.config["assignTaxonomy"]["tool"]+"/representative_seq_set_noSingletons.fasta "+ snakemake.config["alignRep"]["extra_params"] + " -o "
    alignmentReport+=snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_aligned.fasta`\n\n"
    alignmentReport+="**Output files:**\n\n"
    alignmentReport+=":green:`- Aligned fasta file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_aligned.fasta\n\n"
    alignmentReport+=":green:`- Log file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_log.txt\n\n"
    alignmentReport+=alignBenchmark+"\n\n"

    alignmentReport+="Filter alignment\n-----------------\n\n"
    alignmentReport+="Removes positions which are gaps in every sequence.\n\n"
    alignmentReport+=":red:`Tool:` [QIIME]_ - filter_alignment.py\n\n"
    alignmentReport+=":red:`Version:` "+filterAlignmentVersion +"\n\n"
    alignmentReport+="**Command:**\n\n"
    alignmentReport+=":commd:`filter_alignment.py -i  "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_aligned.fasta " +snakemake.config["filterAlignment"]["extra_params"]
    alignmentReport+=" -o  "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/filtered/`\n\n"
    alignmentReport+="**Output file:**\n\n"
    alignmentReport+=":green:`- Aligned fasta file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_aligned_pfiltered.fasta\n\n"
    alignmentReport+=alignFilteredBenchmark+"\n\n"

    alignmentReport+="Make tree\n-----------\n\n"
    alignmentReport+="Create phylogenetic tree (newick format).\n\n"
    alignmentReport+=":red:`Tool:` [QIIME]_ - make_phylogeny.py\n\n"
    alignmentReport+=":red:`Version:` "+makePhyloVersion +"\n\n"
    alignmentReport+=":green:`Method:` ["+ snakemake.config["makeTree"]["method"] + "]_\n\n"
    alignmentReport+="**Command:**\n\n"
    alignmentReport+=":commd:`make_phylogeny.py -i "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_aligned.fasta -o representative_seq_set_noSingletons_aligned_pfiltered.tre "+ snakemake.config["makeTree"]["extra_params"]+ " -t " + snakemake.config["makeTree"]["method"]+"`\n\n"
    alignmentReport+="**Output file:**\n\n"
    alignmentReport+=":green:`- Taxonomy tree:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/aligned/representative_seq_set_noSingletons_aligned.tre\n\n"
    alignmentReport+=makePhyloBenchmark+"\n\n"
#KRONA REPORT
kronaReport = ""
if  snakemake.config["krona"]["report"].casefold() == "t" or snakemake.config["krona"]["report"].casefold() == "true":
    kronaReport+="Krona report\n----------------\n\n"
    kronaReport+="Krona allows hierarchical data to be explored with zooming, multi-layered pie charts.\n\n"
    kronaReport+=":red:`Tool:` [Krona]_\n\n"
    if snakemake.config["krona"]["otu_table"].casefold() != "singletons":
        kronaReport+="These charts were created using the OTU table **without** singletons\n\n"
    else:
        kronaReport+="These charts were created using the OTU table **including** singletons\n\n"

    if snakemake.config["krona"]["samples"].strip() == "all":
        kronaReport+="The report was executed for all the samples.\n\n"
    else:
        kronaReport+="The report was executed for the following target samples: "+ snakemake.config["krona"]["samples"].strip() + "\n\n"
    if "-c" in snakemake.config["krona"]["extra_params"]:
        kronaReport+="The samples were combined on a single chart\n\n"
    else:
        kronaReport+="Each sample is represented on a separated chart (same html report).\n\n"
    kronaReport+="You can see the report at the following link:\n\n"
    kronaReport+=":green:`- Krona report:` kreport_\n\n"
    #kronaReport+=" .. _kreport: ../../runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/krona_report.html\n\n"
    kronaReport+=" .. _kreport: report_files/krona_report."+snakemake.config["assignTaxonomy"]["tool"]+".html\n\n"

    kronaReport+="Or access the html file at:\n\n"
    kronaReport+=":green:`- Krona html file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/otu/taxonomy_"+snakemake.config["assignTaxonomy"]["tool"]+"/krona_report.html\n\n"
    kronaReport+=kronaBenchmark+"\n\n"

###############################################################################
#                         REFERENCES                                     #
################################################################################
#CLUSTER OTUS
if snakemake.config["pickOTU"]["m"] == "uclust":
    variable_refs+= ".. [uclust] Edgar RC. 2010. Search and clustering orders of magnitude faster than BLAST. Bioinformatics 26(19):2460-2461.\n\n"
elif snakemake.config["pickOTU"]["m"] == "usearch61":
    variable_refs+= ".. [usearch61] Edgar RC. 2010. Search and clustering orders of magnitude faster than BLAST. Bioinformatics 26(19):2460-2461.\n\n"
elif snakemake.config["pickOTU"]["m"] == "mothur":
    variable_refs+= ".. [mothur] Schloss PD, Wescott SL, Ryabin T, Hall JR, Hartmann M, Hollister EB, Lesniewski RA, Oakley BB, Parks DH, Robinson CJ, Sahl JW, Stres B, Thallinger GG, Van Horn DJ, Weber CF. 2009. Introducing mothur: Open-source, platform-independent, community-supported software for describing and comparing microbial communities. Appl Environ Microbiol 75(23):7537-7541.\n\n"
elif snakemake.config["pickOTU"]["m"] == "blast":
    variable_refs+= ".. [blast] Altschul SF, Gish W, Miller W, Myers EW, Lipman DJ. 1990. Basic local alignment search tool. J Mol Biol 215(3):403-410\n\n"
elif snakemake.config["pickOTU"]["m"] == "swarm":
    variable_refs+= ".. [swarm] Mahé F, Rognes T, Quince C, de Vargas C, Dunthorn M. (2014) Swarm: robust and fast clustering method for amplicon-based studies. PeerJ 2:e593 doi: 10.7717/peerj.593\n\n"
elif snakemake.config["pickOTU"]["m"] == "cdhit":
    variable_refs+= ".. [cdhit] Cd-hit: Limin Fu, Beifang Niu, Zhengwei Zhu, Sitao Wu and Weizhong Li, CD-HIT: accelerated for clustering the next generation sequencing data. Bioinformatics, (2012), 28 (23): 3150-3152. doi: 10.1093/bioinformatics/bts565.\n\n"
#ALIGNMENT
if snakemake.config["alignRep"]["m"] == "pynast":
    variable_refs+= ".. [pynast] Caporaso JG, Bittinger K, Bushman FD, DeSantis TZ, Andersen GL, Knight R. 2010. PyNAST: a flexible tool for aligning sequences to a template alignment. Bioinformatics 26:266-267.\n\n"
elif snakemake.config["alignRep"]["m"] == "infernal":
    variable_refs+= ".. [infernal] Nawrocki EP, Kolbe DL, Eddy SR. 2009. Infernal 1.0: Inference of RNA alignments. Bioinformatics 25:1335-1337.\n\n"

if snakemake.config["makeTree"]["method"] == "fasttree":
    variable_refs+= ".. [fasttree] Price MN, Dehal PS, Arkin AP. 2010. FastTree 2-Approximately Maximum-Likelihood Trees for Large Alignments. Plos One 5(3).\n\n"
elif snakemake.config["makeTree"]["method"] == "raxml":
    variable_refs+= "..[raxml] Stamatakis A. 2006. RAxML-VI-HPC: Maximum Likelihood-based Phylogenetic Analyses with Thousands of Taxa and Mixed Models. Bioinformatics 22(21):2688-2690.\n\n"
elif snakemake.config["makeTree"]["method"] == "clearcut":
    variable_refs+= "..[clearcut] Evans J, Sheneman L, Foster JA. 2006. Relaxed Neighbor-Joining: A Fast Distance-Based Phylogenetic Tree Construction Method. J Mol Evol 62:785-792.\n\n"
elif snakemake.config["makeTree"]["method"] == "clustalw":
    variable_refs+= "..[clustalw] Larkin MA, Blackshields G, Brown NP, Chenna R, McGettigan PA, McWilliam H, Valentin F, Wallace IM, Wilm A, Lopez R, Thompson JD, Gibson TJ, Higgins DG. 2007. Clustal W and Clustal X version 2.0. Bioinformatics 23:2947-2948.\n\n"

report("""
{title}
    .. role:: commd
    .. role:: red
    .. role:: green

**CASCABEL** is designed to run amplicon sequence analysis across single or multiple read libraries. This report consists of the OTU creation and taxonomic assignment for all the combined accepted reads of given samples or libraries, if multiple.

{txtDescription}

Combine Reads
---------------

Merge all the reads of the individual libraries into one single file.

**Command:**

{catCommand}

**Output file:**

:green:`- Merged reads:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/seqs_fw_rev_filtered.fasta

The total number of reads is: {totalReads}

{combineBenchmark}

{dereplicateReport}

{otuClusteringReport}

{remapClusters}

{otuBenchmark}

Pick representatives
-----------------------
Pick a single representative sequence for each OTU.

:red:`Tool:` [QIIME]_ - pick_rep_set.py

:red:`Version:` {pickRepVersion}

:green:`Method:` {snakemake.config[pickRep][m]}

**Command:**

:commd:`pick_rep_set.py -m {snakemake.config[pickRep][m]} -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/seqs_fw_rev_filtered_otus.txt -f {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/seqs_fw_rev_filtered.fasta -o {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/otu/representative_seq_set.fasta {snakemake.config[pickRep][extra_params]} --log_fp {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/otu/representative_seq_set.log`

**Output file:**

:green:`- Fasta file with representative sequences:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/representative_seq_set.fasta

{pikRepBenchmark}

Assign taxonomy
----------------
Given a set of sequences, assign the taxonomy of each sequence.

{assignTaxoStr}

The percentage of successfully assigned OTUs is: {prcAssignedOtus}

**Output file:**

:green:`- OTU taxonomy assignation:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/representative_seq_set_tax_assignments.txt

{assignTaxaBenchmark}

Make OTU table
---------------
Tabulates the number of times an OTU is found in each sample, and adds the taxonomic predictions for each OTU in the last column.

:red:`Tool:` [QIIME]_ - make_otu_table.py

:red:`Version:` {makeOTUVersion}

**Command:**

:commd:`make_otu_table.py -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/seqs_fw_rev_filtered_otus.txt -t {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/representative_seq_set_tax_assignments.txt {snakemake.config[makeOtu][extra_params]} -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.biom`

**Output file:**

:green:`- Biom format table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.biom

{otuTableBenchmark}

Convert OTU table
------------------
Convert from the BIOM table format to a human readable format.

:red:`Tool:` [BIOM]_

:red:`Version:` {convertBiomVersion}

**Command:**

:commd:`biom convert -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.biom -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.txt {snakemake.config[biom][tableType]} {snakemake.config[biom][headerKey]} {snakemake.config[biom][extra_params]} {snakemake.config[biom][outFormat]}`

**Output file:**

:green:`- TSV format table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.txt

{convertOtuBenchmark}

Summarize Taxa
---------------
Summarize information of the representation of taxonomic groups within each sample.

:red:`Tool:` [QIIME]_ - summarize_taxa.py

:red:`Version:` {summTaxaVersion}

**Command:**

:commd:`summarize_taxa.py -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.biom {snakemake.config[summTaxa][extra_params]} -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/summary/`

**Output file:**

:green:`- Taxonomy summarized counts at different taxonomy levels:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/summary/otuTable_L**N**.txt

Where **N** is the taxonomy level. Default configuration produces levels from 2 to 6.

{summTaxaBenchmark}

Filter OTU table
-----------------
Filter OTUs from an OTU table based on their observed counts or identifier.

:red:`Tool:` [QIIME]_ - filter_otus_from_otu_table.py

:red:`Version:` {filterOTUNoSVersion}

:green:`Minimum observation counts:` {snakemake.config[filterOtu][n]}

**Command:**

:commd:`filter_otus_from_otu_table.py -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable.biom -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable_noSingletons.biom {snakemake.config[filterOtu][extra_params]} -n {snakemake.config[filterOtu][n]}`

**Output file:**

:green:`- Biom table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable_noSingletons.biom

{otuNoSingletonsBenchmark}

Convert Filtered OTU table
---------------------------
Convert the filtered OTU table from the BIOM table format to a human readable format

:red:`Tool:` [BIOM]_

:red:`Version:` {convertBiomVersion}

**Command:**

:commd:`biom convert -i {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable_noSingletons.biom -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable_noSingletons.txt {snakemake.config[biom][tableType]} {snakemake.config[biom][headerKey]} {snakemake.config[biom][extra_params]} {snakemake.config[biom][outFormat]}`

**Output file:**

:green:`- TSV format table:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/otuTable_noSingletons.txt

{otuNoSingletonsBenchmark}

Filter representative sequences
---------------------------------
Remove sequences according to the filtered OTU biom table.

:red:`Tool:` [QIIME]_ - filter_fasta.py

:red:`Version:` {filterFastaVersion}

**Command:**

:commd:`filter_fasta.py -f {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/otu/representative_seq_set.fasta -o {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/representative_seq_set_noSingletons.fasta {snakemake.config[filterFasta][extra_params]} -b {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/otu/otuTable_noSingletons.biom`

**Output file:**

:green:`- Filtered fasta file:` {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.run}/otu/taxonomy_{snakemake.config[assignTaxonomy][tool]}/representative_seq_set_noSingletons.fasta

{filterBenchmark}

{alignmentReport}

{kronaReport}

Final counts
-------------

{countTxt}

{sequence_bars}

.. image:: report_files/sequence_numbers_all_2.png

:red:`Note:`

:green:`- Assigned OTUs percentage` is the amount of successfully assigned OTUs.

:green:`- No singletons percentage` is the percentage of no singletons OTUs in reference to the complete OTU table.

:green:`- Assigned No singletons` is the amount of successfully no singletons assigned OTUs.

References
------------

.. [QIIME] QIIME. Caporaso JG, Kuczynski J, Stombaugh J, Bittinger K, Bushman FD, Costello EK, Fierer N, Gonzalez Pena A, Goodrich JK, Gordon JI, Huttley GA, Kelley ST, Knights D, Koenig JE, Ley RE, Lozupone CA, McDonald D, Muegge BD, Pirrung M, Reeder J, Sevinsky JR, Turnbaugh PJ, Walters WA, Widmann J, Yatsunenko T, Zaneveld J, Knight R. 2010. QIIME allows analysis of high-throughput community sequencing data. Nature Methods 7(5): 335-336.

.. [Cutadapt] Cutadapt v1.15 .Marcel Martin. Cutadapt removes adapter sequences from high-throughput sequencing reads. EMBnet.Journal, 17(1):10-12, May 2011. http://dx.doi.org/10.14806/ej.17.1.200

.. [vsearch] Rognes T, Flouri T, Nichols B, Quince C, Mahé F. (2016) VSEARCH: a versatile open source tool for metagenomics. PeerJ 4:e2584. doi: 10.7717/peerj.2584

.. [Krona] Ondov BD, Bergman NH, and Phillippy AM. Interactive metagenomic visualization in a Web browser. BMC Bioinformatics. 2011 Sep 30; 12(1):385.

.. [BIOM] The Biological Observation Matrix (BIOM) format or: how I learned to stop worrying and love the ome-ome. Daniel McDonald, Jose C. Clemente, Justin Kuczynski, Jai Ram Rideout, Jesse Stombaugh, Doug Wendel, Andreas Wilke, Susan Huse, John Hufnagle, Folker Meyer, Rob Knight, and J. Gregory Caporaso.GigaScience 2012, 1:7. doi:10.1186/2047-217X-1-7

{variable_refs}


""", snakemake.output[0], metadata="Author: J. Engelmann & A. Abdala ")
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
import subprocess
from snakemake.utils import report
import benchmark_utils 
from benchmark_utils import countTxt
from benchmark_utils import readBenchmark
from benchmark_utils import countFasta
from benchmark_utils import countFastaGZ
from benchmark_utils import readSampleDist
from benchmark_utils import make_table
from countData import parseCounts
from seqsChart import createChart

#Parse the total number of counts
#countTxt = parseCounts(snakemake.input.counts)

################################################################################
#                         TOOLS VERSION SECTION                          #
################################################################################
#--fastq
fqv = subprocess.run([snakemake.config["fastQC"]["command"], '--version'], stdout=subprocess.PIPE)
fqVersion = "**" + fqv.stdout.decode('utf-8').strip() + "**"

if snakemake.config["demultiplexing"]["demultiplex"] !=  "F":
   #--qiime extract_barcodes
   ebv = subprocess.run([snakemake.config["qiime"]["path"]+'extract_barcodes.py', '--version'], stdout=subprocess.PIPE)
   ebVersion = ebv.stdout.decode('utf-8')
   ebVersion = "**" + ebVersion[ebVersion.find(":")+1:].strip() + "**"
   #--qiime split_libraries
   spVersion = "**TBD**"
   spv = subprocess.run([snakemake.config["qiime"]["path"]+'split_libraries_fastq.py', '--version'], stdout=subprocess.PIPE)
   spVersion = spv.stdout.decode('utf-8')
   if "Version" in spVersion:
       spVersion = "**" + spVersion[spVersion.find(":")+1:].strip() + "**"
else:
   ebVersion = "**NA**"
   SPvERSION = "**NA**"

vsearchVersion = "**TBD**"
vsearchV = subprocess.run(['vsearch', '--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
vsearchVersion = "**" + vsearchV.stdout.decode('utf-8').split('\n', 1)[0].strip() + "**"


#--qiime identify_chimeric_seqs
icVersion = "**TBD**"
icv = subprocess.run([snakemake.config["qiime"]["path"]+'identify_chimeric_seqs.py', '--version'], stdout=subprocess.PIPE)
icVersion = icv.stdout.decode('utf-8')
if "Version" in icVersion:
    icVersion = "**" + icVersion[icVersion.find(":")+1:].strip() + "**"
#--pear
try:
    pearv = subprocess.run( [snakemake.config["pear"]["command"]+" -h | grep 'PEAR v'"], stdout=subprocess.PIPE, shell=True)
    pearversion = "**" + pearv.stdout.decode('utf-8').strip() + "**"
except Exception as e:
    pearversion = "Problem reading version"

#--cutadapt
cutVersion = "**TBD**"
if snakemake.config["primers"]["remove"].casefold() == "metadata" or snakemake.config["primers"]["remove"].casefold() == "cfg" or snakemake.config["primers"]["remove"].lower() != "f":
    cutv = subprocess.run(['cutadapt', '--version'], stdout=subprocess.PIPE)
    cutVersion = "**cutadapt v" + cutv.stdout.decode('utf-8').strip() + "**"
    #cutVersion = "cutadapt v TBD"

################################################################################
#                          Chimera check                                       #
################################################################################
removeChimeras = False
if snakemake.config["chimera"]["search"] == "T":
    ################################################################################
    #                       Read log file from remove_chimera.py                   #
    # After search for chimera, user have the option to remove them or not. If the #
    # user decides to remove the chimera, the executed command is stored on the log#
    # file, otherwise it stores a message indicating the user decision.            #
    ################################################################################
    chimera_log = ""
    try:
        with open(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/chimera/chimera.log") as chimlog:
            for line in chimlog:
                chimera_log += line
            chimlog.close()
    except FileNotFoundError:
        chiemra_log = "No Log for identify_chimeric_seqs.py"
    if "The chimeric sequences were removed" in chimera_log:
        removeChimeras = True


################################################################################
#                         Benchmark Section                                    #
# This section is to generate a pre-formatted text with the benchmark info for #
# All the executed rules.                                                      #
################################################################################
fqBench = readBenchmark(snakemake.wildcards.PROJECT+"/samples/"+snakemake.wildcards.sample+"/qc/fq.benchmark")
pearBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/pear.benchmark")
if snakemake.config["demultiplexing"]["demultiplex"] != "F":
    barBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.benchmark")
    splitLibsBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/splitLibs.benchmark")
    #splitLibsRCBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/splitLibs.benchmark")
   # combineBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/combine_seqs_fw_rev.benchmark")
else:
    combineBench=pearBench #THIS IS ONLY FOR TESTING REMOVE!!! 
rmShorLongBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/filter.benchmark")
demultiplexFQBench=""
if snakemake.config["demultiplexing"]["demultiplex"] == "T" and snakemake.config["demultiplexing"]["create_fastq_files"] == "T":
    demultiplexFQBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/demultiplex_fq.benchmark")

################################################################################
#                           Compute Counts                                     #
################################################################################
if snakemake.config["gzip_input"] == "F":
    rawCounts = countFasta(snakemake.wildcards.PROJECT+"/samples/"+snakemake.wildcards.sample+"/rawdata/fw.fastq", True);
else:
    rawCounts = countFastaGZ(snakemake.wildcards.PROJECT+"/samples/"+snakemake.wildcards.sample+"/rawdata/fw.fastq.gz", True);
#rawCountsStr= '{0:g}'.format(float(rawCounts))
rawCountsStr= str(int(rawCounts))
#-peared
pearedCounts = 0
if snakemake.config["UNPAIRED_DATA_PIPELINE"] != "T":
    pearedCounts = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/seqs.assembled.fastq", True);
else:
    pearedCounts = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/seqs.assembled.UNPAIRED.fastq", True);

#pearedCountsStr='{0:g}'.format(float(pearedCounts))
pearedCountsStr=str(int(pearedCounts))
prcPeared = "{:.2f}".format(float((pearedCounts/rawCounts)*100))
#-dumultiplex
if snakemake.config["demultiplexing"]["demultiplex"] != "F": #starting to test this  and snakemake.config["demultiplexing"]["bc_mismatch"]>0:
    #in the past we had two files fw and reverse nos everything is on one file
    #fwAssignedCounts = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.assigned.fna", False)
    #barcodes.fastq_corrected_toRC
    #rvAssignedCounts = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/seqs.assigned.fna", False)

    #prcFwAssigned = "{:.2f}".format(float((fwAssignedCounts/pearedCounts)*100))
    #prcRvAssigned = "{:.2f}".format(float((rvAssignedCounts/pearedCounts)*100))
    #totalAssigned = fwAssignedCounts + rvAssignedCounts
    #prcPearedAssigned = "{:.2f}".format(float((totalAssigned/pearedCounts)*100))
    #prcRawAssigned = "{:.2f}".format(float((totalAssigned/rawCounts)*100))
    #New implementation
    totalAssigned =  countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.assigned.fna", False)
    rvAssignedCounts = countTxt(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.fastq_corrected_toRC")
    fwAssignedCounts = totalAssigned - rvAssignedCounts 
    prcFwAssigned = "{:.2f}".format(float((fwAssignedCounts/pearedCounts)*100))
    prcRvAssigned = "{:.2f}".format(float((rvAssignedCounts/pearedCounts)*100))
    prcPearedAssigned = "{:.2f}".format(float((totalAssigned/pearedCounts)*100))
    prcRawAssigned = "{:.2f}".format(float((totalAssigned/rawCounts)*100))

else: 
    totalAssigned = pearedCounts
    prcPearedAssigned = "{:.2f}".format(float((totalAssigned/pearedCounts)*100))
    prcRawAssigned = "{:.2f}".format(float((totalAssigned/rawCounts)*100))

#--cutadapt
cutSequences = False
if snakemake.config["primers"]["remove"].casefold() == "metadata" or snakemake.config["primers"]["remove"].casefold() == "cfg":
    sequenceNoAdapters = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna", False)
    if (totalAssigned - sequenceNoAdapters) > 0:
        cutSequences = True
        prcCut = "{:.2f}".format(float((sequenceNoAdapters/totalAssigned)*100))
        prcCutRaw = "{:.2f}".format(float((sequenceNoAdapters/rawCounts)*100))

if removeChimeras:
    sequenceNoChimeras = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered_nc.fasta", False)
    prcChim = "{:.2f}".format(float((sequenceNoChimeras/totalAssigned)*100))
    prcChimRaw = "{:.2f}".format(float((sequenceNoChimeras/rawCounts)*100))
    if cutSequences:
        prcChimCut = "{:.2f}".format(float((sequenceNoChimeras/sequenceNoAdapters)*100))
#out="{PROJECT}/runs/{run}/{sample}_data/"
trimmedCounts = countFasta(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered.fasta", False)
prcTrimmedSplit ="{:.2f}".format(float((trimmedCounts/totalAssigned)*100))
prcTrimmedRaw= "{:.2f}".format(float((trimmedCounts/rawCounts)*100))
if cutSequences:
    prcTrimmedCut="{:.2f}".format(float((trimmedCounts/sequenceNoAdapters)*100))
#if removeChimeras:
#    prcTrimmedChimera="{:.2f}".format(float((trimmedCounts/sequenceNoChimeras)*100))
try:
    samplesLib = subprocess.run( ["cat " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered.dist.txt | wc -l"], stdout=subprocess.PIPE, shell=True)
    samplesLibInt = int(samplesLib.stdout.decode('utf-8').strip())
except Exception as e:
    totalReads = "Problem reading outputfile"
################################################################################
#                         Generate sequence amounts chart                      #
################################################################################
numbers=[rawCounts,pearedCounts];
labels=["Raw", "Assembled"];
if snakemake.config["demultiplexing"]["demultiplex"] == "T":
    numbers.append(totalAssigned)
    labels.append("Demultiplexed")
if snakemake.config["primers"]["remove"].casefold() == "metadata" or snakemake.config["primers"]["remove"].casefold() == "cfg":
    numbers.append(sequenceNoAdapters)
    labels.append("Cutadapt")
numbers.append(trimmedCounts)
labels.append("Length filtering")
if snakemake.config["chimera"]["search"] == "T" and removeChimeras:
    numbers.append(sequenceNoChimeras)
    labels.append("No Chimera")
createChart(numbers, tuple(labels),snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files/sequence_numbers."+snakemake.wildcards.sample+".png")
################################################################################
#                          Chimera check                                       #
################################################################################
variable_refs=""
if snakemake.config["chimera"]["search"] == "T" and snakemake.config["chimera"]["method"] == "usearch61":
    variable_refs+= ".. [usearch61] Edgar RC. 2010. Search and clustering orders of magnitude faster than BLAST. Bioinformatics 26(19):2460-2461.\n\n"
else: 
    variable_refs+= ".. [uchime] Edgar RC, Haas BJ, Clemente JC, Quince C, Knight R (2011) UCHIME improves sensitivity and speed of chimera detection. Bioinformatics, 27 (16): 2194-2200. doi:10.1093/bioinformatics/btr381. \n\n"
quimeraStr = ""
if snakemake.config["chimera"]["search"] == "T":
    quimeraStr="Identify Chimera\n-------------------\n\n"
    quimeraStr+="Identify possible chimeric sequences (sequences generated due to the PCR amplification of multiple templates or parent sequences).\n\n"
    if snakemake.config["chimera"]["method"] == "usearch61":
        quimeraStr += ":red:`Tool:` [QIIME]_ - identify_chimeric_seqs.py\n\n"
        quimeraStr += ":red:`Version:` "+ icVersion +"\n\n"
        quimeraStr += ":red:`Method:` [usearch61]_ \n\n"
    else:
        quimeraStr += ":red:`Tool:` [Vsearch]_ - vsearch\n\n"
        quimeraStr += ":red:`Version:` "+ vsearchVersion +"\n\n"        
        quimeraStr += ":red:`Method:` "+ str(snakemake.config["chimera"]["method"]) +" - uses [uchime]_ \n\n"    
    quimeraStr += "**Command:**\n\n"
    if snakemake.config["chimera"]["method"] == "usearch61":
         quimeraStr+=":commd:`identify_chimeric_seqs.py -m "+ str(snakemake.config["chimera"]["method"])+" -i "+ str(snakemake.wildcards.PROJECT)+"/runs/"+str(snakemake.wildcards.run)+"/"+str(snakemake.wildcards.sample)+"_data/seqs_fw_rev_accepted.fna "+str(snakemake.config["chimera"]["extra_params"])
         quimeraStr+=" -o "+ str(snakemake.wildcards.PROJECT)+"/runs/"+str(snakemake.wildcards.run)+"/"+str(snakemake.wildcards.sample)+"_data/chimera/` \n\n"
    else:
         quimeraStr+=":commd:`vsearch --"+ str(snakemake.config["chimera"]["method"])+" "+ str(snakemake.wildcards.PROJECT)+"/runs/"+str(snakemake.wildcards.run)+"/"+str(snakemake.wildcards.sample)+"_data/seqs_fw_rev_accepted.fna --threads "+ str(snakemake.config["chimera"]["threads"]) +" " +str(snakemake.config["chimera"]["extra_params"])   
         quimeraStr+=" --uchimeout "+ str(snakemake.wildcards.PROJECT)+"/runs/"+str(snakemake.wildcards.run)+"/"+str(snakemake.wildcards.sample)+"_data/chimera/chimeras.summary.txt` \n\n"
    quimeraStr+="**Output files:**\n\n"
    if snakemake.config["chimera"]["method"] == "usearch61":
        quimeraStr+=":green:`- File with the possible chimeric sequences:` "+str(snakemake.wildcards.PROJECT)+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/chimera/chimeras.txt\n\n"
    else:
        quimeraStr+=":green:`- File with the possible chimeric sequences:` "+str(snakemake.wildcards.PROJECT)+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/chimera/chimeras.summary.txt\n\n"
    identifyChimeraBench=readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/chimera/chimera.benchmark")
    quimeraStr+=identifyChimeraBench
    quimeraStr+=chimera_log
    if removeChimeras:
        quimeraStr+=":red:`Reads after remove chimeric sequences:` "+ str(sequenceNoChimeras)+"\n\n"
        quimeraStr+=":red:`Percentage of reads vs raw reads:` "+ str(prcChimRaw) + "%\n\n"
        quimeraStr+=":red:`Percentage of reads vs demultiplexed reads:` "+ str(prcChim) + "%\n\n"
        if cutSequences:
            quimeraStr+=":red:`Percentage of reads vs cutadapt:` "+ str(prcChimRaw) + "%\n\n"




################################################################################
#                           Peared FastQC                                     #
################################################################################
fastQCPearStr = ""
if snakemake.config["fastQCPear"] == "T":
    fastQCPearStr = "Peared FastQC Analysis\n------------------------\n\n" # title
    fastQCPearStr += "Check the quality of the reads after assembly.\n\n"
    fastQCPearStr += ":red:`Tool:` [FastQC]_\n\n"
    fastQCPearStr += ":red:`Version:` "+ fqVersion +"\n\n"
    fastQCPearStr += "**Command:**\n\n"
    fastQCPearStr += ":commd:`fastqc "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/seqs.assembled.fastq --extract -o  "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/qc`\n\n"
    fastQCPearStr += "**Output files:**\n\n:green:`- FastQC report:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/qc/seqs.assembled_fastqc.html FQ_Report_ \n\n"
    fastQCPearStr += ".. _FQ_Report: peared/qc/seqs.assembled_fastqc.html \n\n"
    fastQCPearStrBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/qc/fq.benchmark")
    fastQCPearStr += fastQCPearStrBench

################################################################################
#                           Extract Barcode                                    #
################################################################################
extractBCStr = ""
if snakemake.config["demultiplexing"]["demultiplex"] != "F":
    extractBCStr ="Extract barcodes\n-----------------\n\n"
    extractBCStr +="Extract the barcodes used to identify individual samples.\n\n"
    extractBCStr +=":red:`Tool:` [QIIME]_ - extract_barcodes.py\n\n"
    extractBCStr +=":red:`Version:` "+ebVersion+"\n\n"
    extractBCStr +="**Command:**\n\n"
    extractBCStr +=":commd:`extract_barcodes.py -f "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/seqs.assembled.fastq -c "+str(snakemake.config["ext_bc"]["c"])+ " " + str(snakemake.config["ext_bc"]["bc_length"])+ " " + snakemake.config["ext_bc"]["extra_params"] + " -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/`\n\n"
    extractBCStr +="**Output files:**\n\n"
    extractBCStr +=":green:`- Fastq file with barcodes:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.fastq\n\n"
    extractBCStr +=":green:`- Fastq file with the reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/reads.fastq\n\n"
    extractBCStr +=barBench
################################################################################
#                           CORRECT Barcodes                                   #
################################################################################
correctBCStr = ""
bcFile="barcodes.fastq"
if snakemake.config["demultiplexing"]["demultiplex"] != "F": # and snakemake.config["demultiplexing"]["bc_mismatch"]:
    correctBCStr = "Correct Barcodes\n--------------------\n"
    correctBCStr += "Try to correct the barcode from unassigned reads and place reads in correct orientetion.\n\n"
    correctBCStr += "Maximum number of mismatches **"  + str(snakemake.config["demultiplexing"]["bc_mismatch"]) + "**.\n\n"
    correctBCStr +=":red:`Tool:` Cascabel Java application\n\n"
    correctBCStr +="**Command:**\n\n"
    correctBCStr += ":commd:`java -jar Scripts/BarcodeCorrector.jar -b "+snakemake.wildcards.PROJECT+"/metadata/sampleList_mergedBarcodes_"+snakemake.wildcards.sample+".txt -fb "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.fastq -fr "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/reads.fastq  -m "  + str(snakemake.config["demultiplexing"]["bc_mismatch"]) + " -o  " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.fastq_corrected -or  " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/reads.fastq_corrected -rc -x " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/sample_matrix.txt  >  " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/demux.log`\n\n"
    correctBCStr += "**Output files:**\n\n:green:`- Barcode corrected file:` "+snakemake.wildcards.PROJECT+ "/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.fastq_corrected\n\n"
    correctBCStr += ":green:`- Reads corrected file:` "+snakemake.wildcards.PROJECT+ "/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/reads.fastq_corrected\n\n"
    correctBCStr += ":green:`- Error correction summary:` "+snakemake.wildcards.PROJECT+ "/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/demux.log\n\n"
    correctBarBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes_corrected.benchmark")
    correctBCStr += correctBarBench
    bcFile="barcodes.fastq_corrected"

splitStr = ""
if snakemake.config["demultiplexing"]["demultiplex"] != "F":
    splitStr+="Demultiplexing\n"
    splitStr+="----------------\n"
    splitStr+="For library splitting, also known as demultiplexing, Cascabel performs several steps to assign fragments in the original as well as reverse orientation to the correct sample.\n\n"
    splitStr+="Split samples from Fastq file\n"
    splitStr+="~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"
    splitStr+=":red:`Tool:` [QIIME]_ - split_libraries_fastq.py\n\n"
    splitStr+=":red:`version:` "+ spVersion+"\n\n"
    splitStr+="**Command:**\n\n"
    splitStr+=":commd:`split_libraries_fastq.py -m "+snakemake.wildcards.PROJECT+"/metadata/sampleList_mergedBarcodes_"+snakemake.wildcards.sample+".txt -i "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/reads.fastq -o  "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs -b "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/"+bcFile+" -q "+str(snakemake.config["split"]["q"])+" -r "+str(snakemake.config["split"]["r"])+" --retain_unassigned_reads "+str(snakemake.config["split"]["extra_params"])+" --barcode_type "+str(snakemake.config["split"]["barcode_type"])+"`\n\n"
    splitStr+=splitLibsBench

    splitStr+="Retain assigned reads\n"
    splitStr+="~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"
    splitStr+="**Command:**\n\n"
    splitStr+=":commd:`cat "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.fna | grep -P -A1 \"(?!>Unass)^>\" | sed '/^--$/d' > "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.assigned.fna`\n\n"

    splitStr+="Create file with only unassigned reads\n"
    splitStr+="~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"
    splitStr+="**Command:**\n\n"
    splitStr+=":commd:`cat "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.fna | grep \"^>Unassigned\" |  sed 's/>Unassigned_[0-9]* /@/g' | sed 's/ .*//' | grep -F -w -A3  -f - "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/seqs.assembled.fastq |  sed '/^--$/d' >"+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/unassigned.fastq`\n\n"

#    splitStr+="Reverse complement unassigned reads\n"
#    splitStr+="~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"
#    splitStr+=":red:`Tool:` [Vsearch]_\n\n"
#    splitStr+=":red:`version:`  "+vsearchVersion+"\n\n"
#    splitStr+="**Command:**\n\n"
#    splitStr+=":commd:`vsearch --fastx_revcomp "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/unassigned.fastq  --fastqout "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/unassigned.reversed.fastq`\n\n"


#    splitStr+="Barcode extraction for reverse complemented, unassigned reads\n"
#    splitStr+="~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"
#    splitStr +=":red:`Tool:` [QIIME]_ - extract_barcodes.py\n\n"
#    splitStr +=":red:`Version:` "+ebVersion+"\n\n"
#    splitStr+="**Command:**\n\n"
#    splitStr +=":commd:`extract_barcodes.py -f "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/unassigned.reversed.fastq -c "+str(snakemake.config["ext_bc"]["c"])+" "+str(snakemake.config["ext_bc"]["bc_length"])+" "+snakemake.config["ext_bc"]["extra_params"]+" -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes_unassigned/`\n\n"

#    if snakemake.config["bc_mismatch"]:
#        splitStr += "Correct reverse complemented barcodes \n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
#        splitStr += "Maximum number of mismatches **"  + str(snakemake.config["bc_mismatch"]) + "**.\n\n"
#        splitStr +=":red:`Tool:` Cascabel Java application\n\n"
#        splitStr +="**Command:**\n\n"
#        splitStr += ":commd:`java -cp Scripts/BarcodeCorrector/build/classes/  barcodecorrector.BarcodeCorrector -b "+snakemake.wildcards.PROJECT+"/metadata/sampleList_mergedBarcodes_"+snakemake.wildcards.sample+".txt -f "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes_unassigned/barcodes.fastq_corrected -m "  + str(snakemake.config["bc_mismatch"]) + "`\n\n"
#        splitStr += "**Output file:**\n\n:green:`- Barcode corrected file:` "+snakemake.wildcards.PROJECT+ "/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes/barcodes.fastq_corrected\n\n"
#        splitStrBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes_unassigned/barcodes_corrected.benchmark")
#        splitStr += splitStrBench+"\n\n"

#    splitStr +="Split reverse complemented reads\n"
#    splitStr+="~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"
#    splitStr +=":red:`Tool:` [QIIME]_ - extract_barcodes.py\n\n"
#    splitStr +=":red:`Version:` "+ebVersion+"\n\n"
#    splitStr+="**Command:**\n\n"
#    splitStr +=":commd:`split_libraries_fastq.py -m "+snakemake.wildcards.PROJECT+"/metadata/sampleList_mergedBarcodes_"+snakemake.wildcards.sample+".txt -i "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/barcodes_unassigned/reads.fastq -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC -b "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+str(snakemake.wildcards.sample)+"_data/barcodes_unassigned/"+bcFile+" -q "+str(snakemake.config["split"]["q"])+" -r "+str(snakemake.config["split"]["r"])+" "+str(snakemake.config["split"]["extra_params"])+" --barcode_type "+str(snakemake.config["split"]["barcode_type"])+"`\n\n"
#    splitStr +=splitLibsBench+"\n\n"

    splitStr +="**Output files:**\n\n"
#   # splitStr +=":green:`- FW reads fasta file with new header:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.assigned.fna\n\n"
    splitStr +=":green:`- Text histogram with the length of the fw reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/histograms.txt\n\n"
    splitStr +=":green:`- Log file for the fw reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/split_library_log.txt\n\n"
#   # splitStr +=":green:`- RV reads fasta file with new header:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/seqs.assigned.fna\n\n"
#    splitStr +=":green:`- Text histogram with the length of the rv reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/histograms.txt\n\n"
#    splitStr +=":green:`- Log file for the rv reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/split_library_log.txt\n\n"
#    splitStr +=":green:`- Fasta file with unassigned reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/seqs.unassigned.fna\n\n"
    splitStr +=":red:`Number of reads assigned on FW:` "+str(fwAssignedCounts)+" = "+str(prcFwAssigned)+"% of the peared reads\n\n"
    splitStr +=":red:`Number of reads assigned on RVC:` "+str(rvAssignedCounts)+" = "+str(prcRvAssigned)+"% of the peared reads\n\n"

################################################################################
#                           Single FastQ creation                              #
################################################################################
demultiplexFQ = ""
if snakemake.config["demultiplexing"]["demultiplex"] == "T" and snakemake.config["demultiplexing"]["create_fastq_files"] == "T":
    demultiplexFQ = "Generate single sample fastq files\n------------------------------------------\n\n" # title
    demultiplexFQ += "Create single fastq files per samples (based on the raw data without applying any filtering).\n\n"
    demultiplexFQ +=":red:`Tool:` Cascabel Java program\n\n"
    demultiplexFQ += "**Command:**\n\n"
    demultiplexFQ += ":commd:`"+snakemake.config["java"]["command"] + " -cp Scripts DemultiplexQiime --txt -a rv -b "+ str(snakemake.config["demultiplexing"]["bc_mismatch"]) + " -d "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.assigned.ori.txt -o "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/ "
    ext=".gz"
    if snakemake.config["gzip_input"].casefold() == "f":
        ext=""
    demultiplexFQ += "-r1 "+snakemake.wildcards.PROJECT+"/samples/"+snakemake.wildcards.sample+"/rawdata/fw.fastq"+ext+" -r2 "+snakemake.wildcards.PROJECT+"/samples/"+snakemake.wildcards.sample+"/rawdata/fw.fastq"+ext+"`\n\n"
    if snakemake.config["demultiplexing"]["remove_bc"]:
        demultiplexFQ +=":red:`Barcodes removed:` "+ str(snakemake.config["demultiplexing"]["remove_bc"]) + " first bases\n\n"
#Now only for ASV workflow
   # if snakemake.config["primers"]["remove"].lower() == "cfg":
   #     demultiplexFQ +=":red:`Primers removed:` **FW** " + snakemake.config["primers"]["fw_primer"] + " **RV** " +snakemake.config["primers"]["rv_primer"]+"\n\n"
   # elif snakemake.config["primers"]["remove"].lower() == "metadata":
   #     demultiplexFQ +=":red:`Removed primers` were obtained from the metadata file.\n\n" 
    demultiplexFQ += "**The demultiplexed fastq files are located at:**\n\n:green:`- Demultiplexed directory:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/\n\n"
    demultiplexFQ += ":green:`- Summary file:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/summary.txt\n\n"
    demultiplexFQ += demultiplexFQBench
   # also this only for the ASV workflow
   # if (snakemake.config["primers"]["remove"].lower() == "cfg" or snakemake.config["primers"]["remove"].lower() == "metadata"):
   #     demultiplexFQ += "**Remove primers:**\n\nFollowing, primers were removed from the fastq files\n\n"
   #     demultiplexFQ +=":red:`Tool:` [Cutadapt]_\n\n"
   #     demultiplexFQ += ":red:`Version:` "+cutVersion+"\n\n"
   #     demultiplexFQ += "**Command:**\n\n"
   #     if snakemake.config["primers"]["remove"].lower() == "cfg":
   #         if snakemake.config["LIBRARY_LAYOUT"].casefold()=="pe":
   #             demultiplexFQ += ":commd:`cutadapt -g "+ snakemake.config["primers"]["fw_primer"]  + " -G " + snakemake.config["primers"]["rv_primer"]  + " " +snakemake.config["primers"]["extra_params"]+" -O "+ snakemake.config["primers"]["min_overlap"]  +" -m " +snakemake.config["primers"]["min_length"]+ " -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/SAMPLE_1.fastq.gz -p "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/SAMPLE_2.fastq.gz "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/SAMPLE_1.fq.gz  "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/SAMPLE_2.fq.gz  >> "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/"+snakemake.wildcards.sample+".cutadapt.log`\n\n"
   #         else:
   #             demultiplexFQ += ":commd:`cutadapt -g "+ snakemake.config["primers"]["fw_primer"]  + " " +snakemake.config["primers"]["extra_params"]+" -O "+ snakemake.config["primers"]["min_overlap"]  +" -m " +snakemake.config["primers"]["min_length"]+ " -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/SAMPLE_1.fastq.gz "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/SAMPLE_1.fq.gz  >> "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/"+snakemake.wildcards.sample+".cutadapt.log`\n\n"
   #         demultiplexFQ += "The above command ran once for each single sample fastq file(s) using the mentioned primers\n\n"
   #     else: #is from metadata
   #         if snakemake.config["LIBRARY_LAYOUT"].casefold()=="pe":
   #             demultiplexFQ += ":commd:`cutadapt -g sample_FW_primer  -G sample_RV_primer " +snakemake.config["primers"]["extra_params"]+" -O "+ snakemake.config["primers"]["min_overlap"]  +" -m " +snakemake.config["primers"]["min_length"]+ " -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/SAMPLE_1.fastq.gz -p "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/SAMPLE_2.fastq.gz "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/SAMPLE_1.fq.gz "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/SAMPLE_2.fq.gz  >> "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/"+snakemake.wildcards.sample+".cutadapt.log`\n\n"
   #         elif snakemake.config["LIBRARY_LAYOUT"].casefold()=="se":
   #             demultiplexFQ += ":commd:`cutadapt -g sample_FW_primer "+ " " +snakemake.config["primers"]["extra_params"]+" -O "+ snakemake.config["primers"]["min_overlap"]  +" -m " +snakemake.config["primers"]["min_length"]+ " -o "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/SAMPLE_1.fastq.gz "+ snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/SAMPLE_1.fq.gz  >> "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed/"+snakemake.wildcards.sample+".cutadapt.log`\n\n"
   #         demultiplexFQ += "The above command ran once for each single sample fastq file(s) and primers were obtained from the mapping file accordingly to its sample\n\n"    
   #     demultiplexFQ += ":green:`- Reads without primers:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/primer_removed\n\n"
   #     if "--discard-untrimmed" in snakemake.config["primers"]["extra_params"]:
   #         demultiplexFQ += ":green:`- Discarded reads (no primer):` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/demultiplexed/reads_discarded_primer\n\n"
   #     else:
   #         demultiplexFQ += ":red:`- Given the options, reads without primers where not removed!`\n\n"
   #     demultiplexFQ += ":green:`- Primer removal results by sample:` primers_removal_\n\n"
   #     demultiplexFQ +=" .. _primers_removal: report_files/cutadapt."+snakemake.wildcards.sample+".fastq_summary.tsv\n\n"

################################################################################
#                           Combine FW and Reverse reads                       #
################################################################################

combineFR = ""
#if snakemake.config["demultiplexing"]["demultiplex"] != "F":
#    combineFR = "Combine reads\n---------------------------------\n\n" # title
#    combineFR += "Concatenate forward and reverse reads.\n\n"
#    combineFR += "**Command:**\n\n"
#    combineFR += ":commd:`cat "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibs/seqs.assigned.fna "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/splitLibsRC/seqs.assigned.fna > "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna`\n\n"
#    combineFR +="**Output files:**\n\n"
#    combineFR +=":green:`- Fasta file with combined reads:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna\n\n"
#    combineFR +=":red:`- Total number of acepted reads:` " +str(totalAssigned)+ " = "+ str(prcPearedAssigned)+ "% of the peared reads or "+str(prcRawAssigned)+"% of the raw reads.\n\n"
#    combineFR += combineBench

################################################################################
#                          Cut adapters                                        #
################################################################################
cutAdaptStr = ""
if snakemake.config["primers"]["remove"].casefold() == "metadata" or snakemake.config["primers"]["remove"].casefold() == "cfg":
    cutAdaptStr = "Remove sequence primers\n------------------------\n\n" # title
    cutAdaptStr +="Remove the adapters / primers from the reads.\n\n"
    cutAdaptStr +=":red:`Tool:` [Cutadapt]_\n\n"
    cutAdaptStr += ":red:`Version:` "+cutVersion+"\n\n"
    cutAdaptStr += "**Command:**\n\n"
    primer_lines=0
    if snakemake.config["primers"]["remove"].lower() == "cfg":
        #cutAdaptStr += ":commd:`cutadapt "+ str(snakemake.config["cutadapt"]["adapters"])+" " + str(snakemake.config["cutadapt"]["extra_params"]) + " -o " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna\n\n"
        #cutAdaptStr +=  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna > " +  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.log`\n\n"
        cutAdaptStr += ":commd:`cutadapt -g "+ str(snakemake.config["primers"]["fw_primer"])+"..."+str(snakemake.config["primers"]["rv_primer"])+" "+ str(snakemake.config["primers"]["extra_params"]) + " -o " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna > " +  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.log`\n\n"
    elif snakemake.config["primers"]["remove"].lower() == "metadata":
        primers=""
        try:
            #with open(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/primers.txt") as pfile:
            with open(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/report_files/primers."+snakemake.wildcards.sample+".txt") as pfile:
                primers=pfile.read()
                #primer_lines=len(pfile.readlines())
                primer_lines=len(primers.split("\n"))
                if primer_lines > 1:
                    if snakemake.config["LIBRARY_LAYOUT"].casefold()=="pe":
                        primers="-g sample_FW_primer...sampleRV_primer"
                    else:
                        primers="-g sample_FW_primer"

        except FileNotFoundError:
            primers="-ERROR reading primer file-"
        #cutAdaptStr += ":commd:`cutadapt "+primers +" " + str(snakemake.config["cutadapt"]["extra_params"]) + " -o " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna\n\n"
        #cutAdaptStr += snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna > " +  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.log`\n\n"
        cutAdaptStr += ":commd:`cutadapt "+primers +" " + str(snakemake.config["primers"]["extra_params"]) + " -o " + snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna "+  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna > " +  snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.log`\n\n"

#cutAdaptStr += "*PRIMERS: primer sequences were obtained from the metadata file\n\n"
    if primer_lines > 1:
        cutAdaptStr += ":green:`- Primers used by sample:` primers_sample_\n\n"
        cutAdaptStr +=  ".. _primers_sample: report_files/primers."+snakemake.wildcards.sample+".txt\n\n"
    cutAdaptStr += "**Output files:**\n\n:green:`- Reads without adapters:` "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna\n\n"
    if cutSequences:
        cutAdaptStr += ":red:`Total number of reads after cutadapt:` "+ str(sequenceNoAdapters) + " = " + str(prcCut) + "% of the assigned reads or "+ str(prcCutRaw)+"% of the total reads\n\n"
    #cutAdaptStr+=":\n\n"
    cutAdaptStr+=":green:`- Primer removal results by sample:` primers_OTU_\n\n"
    cutAdaptStr+=" .. _primers_OTU: report_files/cutadapt."+snakemake.wildcards.sample+".summary.tsv\n\n"

    cutAdaptBench =readBenchmark(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/cutadapt.benchmark")
    cutAdaptStr += cutAdaptBench+"\n\n"
################################################################################
#                          Counts for too long too shorts                      #
################################################################################
#trimmedStr =  ":red:`Total number of reads after trimming:` "+str(trimmedCounts)+ "="+ str(prcTrimmedSplit)+"% of the demultiplexed reads or " + str(prcTrimmedRaw) + "% of the raw reads\n\n"
trimmedStr =  ":red:`Total number of reads after length filtering:` "+str(trimmedCounts)+ "\n\n"
trimmedStr += ":red:`Percentage of reads vs raw reads:` "+str(prcTrimmedRaw)+"%\n\n"
trimmedStr+=":red:`Percentage of reads vs demultiplexed reads:` " + str(prcTrimmedSplit) + "%\n\n"
if cutSequences:
    trimmedStr+=":red:`Percentage of reads after cutadapt:` "+ str(prcTrimmedCut) + "%\n"
#if removeChimeras:
#    trimmedStr+=":red:`Percentage of reads after remove chimeras vs trimmed reads:` "+ str(prcTrimmedChimera) + "%\n"


#bcValidationBench =readBenchmark(snakemake.wildcards.PROJECT+"/metadata/bc_validation/"+snakemake.wildcards.sample+"/validation.benchmark")
################################################################################
#                     Remove too short and too long reads                      #
#  This rule creates a temporary file with the short and long values choosed   #
#  by the user in order to remove the reads. The file filter.log contains the  #
#  minimun expected length for the reads followed by the maximun length tab    #
#  separated (shorts <TAB> longs)                                              #
################################################################################
shorts = str(snakemake.config["rm_reads"]["shorts"])
longs = str(snakemake.config["rm_reads"]["longs"])
with open(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/filter.log") as trimlog:
    for line in trimlog:
        tokens = line.split("\t")
        if len(tokens)>2:
            shorts = tokens[1]
            longs = tokens[2]
################################################################################
#                       FInal Counts                              #
################################################################################
countTxt="Following you can see the final read counts: \n\n"
fileData = []
headers = []
data =[]
headers.append("File description")
headers.append("Location")
headers.append("Number of reads")
headers.append("Prc(%) vs raw")
fileData.append(headers)
#raw
data.append("Raw reads")
data.append(snakemake.wildcards.PROJECT+"/samples/"+snakemake.wildcards.sample+"/rawdata/\*.fq")
data.append(str(rawCounts))
data.append("{:.2f}".format(float((rawCounts/rawCounts)*100))+"%")
fileData.append(data)
data=[]
#pear
data.append("Assembled reads")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/peared/seqs.assembled.fastq")
data.append(str(pearedCounts))
data.append("{:.2f}".format(float((pearedCounts/rawCounts)*100))+"%")
fileData.append(data)
data=[]
#splitted
if snakemake.config["demultiplexing"]["demultiplex"] == "T":
    data.append("Demultiplexed reads")
    data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted.fna")
    data.append(str(totalAssigned))
    data.append("{:.2f}".format(float((totalAssigned/rawCounts)*100))+"%")
    fileData.append(data)
    data=[]
#adapters
if snakemake.config["primers"]["remove"].casefold() == "metadata" or snakemake.config["primers"]["remove"].casefold() == "cfg":
    data.append("Adapter removed")
    data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_accepted_no_adapters.fna")
    data.append(str(sequenceNoAdapters))
    data.append("{:.2f}".format(float((sequenceNoAdapters/rawCounts)*100))+"%")
    fileData.append(data)
    data=[]
#length filtered
data.append("Length filtered")
data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered.fasta")
data.append(str(trimmedCounts))
data.append("{:.2f}".format(float((trimmedCounts/rawCounts)*100))+"%")
fileData.append(data)
data=[]
#chimera
if snakemake.config["chimera"]["search"] == "T" and removeChimeras:
    data.append("Non chimeric reads")
    data.append(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered_nc.fasta")
    data.append(str(sequenceNoChimeras))
    data.append("{:.2f}".format(float((sequenceNoChimeras/rawCounts)*100))+"%")
    fileData.append(data)
    data=[]
countTxt += make_table(fileData)
################################################################################
#                       Sample distribution chart                              #
################################################################################

sampleDistChart = ""
if snakemake.config["demultiplexing"]["demultiplex"] == "T":
    dist_table = readSampleDist(snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered.dist.txt",trimmedCounts,samplesLibInt)
    sampleDistChart = "Sample distribution\n--------------------------------------\n\n" # title
    sampleDistChart += dist_table + "\n\n"
    sampleDistChart += ".. image:: report_files/seqs_fw_rev_filtered."+snakemake.wildcards.sample+".dist.png\n\n"
    sampleDistChart +="The previous chart shows the number of clean reads per sample. The bars are sorted from left to right, according to the metadata input file.\n\n"
    sampleDistChart +="**To see more details about the number of reads per sample in this library, please refer to the file:** "+snakemake.wildcards.PROJECT+"/runs/"+snakemake.wildcards.run+"/"+snakemake.wildcards.sample+"_data/seqs_fw_rev_filtered.dist.txt\n\n"


################################################################################
#                       User description section                               #
################################################################################
desc = snakemake.config["description"]
txtDescription = ""
if len(desc) > 0:
    txtDescription = "\n**User description:** "+desc+"\n"


################################################################################
#                       controls warning section                               #
################################################################################
"""
We want to include a small section to warn the user about the use of controls. This could be
the case if they are demultiplexing a complete library. 
"""
ctrlWarning =""
if snakemake.config["demultiplexing"]["demultiplex"] == "T":
    ctrlWarning="\n:warn:`Note: Library demultiplexing has been carried out, if you have controls among your samples, please be aware that Cascabel won't perform any special operation with them. They are treated as any other sample within this workflow. Please make sure to analyze your controls with other tools, and correct your sample counts for potential contamination.`\n"
################################################################################
#                                Report                                        #
################################################################################

report("""
Amplicon Analysis Report for Library: {snakemake.wildcards.sample}
=====================================================================
    .. role:: commd
    .. role:: red
    .. role:: green
    .. role:: warn

**CASCABEL** is designed to run amplicon sequence analysis across single or multiple read libraries.

The objective of this pipeline is to create different output files which allow the user to explore data in a simple and meaningful way, as well as facilitate downstream analysis, based on the generated output files.

Another aim of **CASCABEL** is also to encourage the documentation process, by creating this report in order to assure data analysis reproducibility.

{txtDescription}

{ctrlWarning}

Following you can see all the steps that were taken in order to get the final results of the pipeline.

Raw Data
---------
The raw data for this library can be found at:

:green:`- FW raw reads:` {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/rawdata/fw.fastq

:green:`- RV raw reads:` {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/rawdata/rv.fastq

:red:`Number of total reads:` {rawCountsStr}

Quality Control
------------------
Evaluate quality on raw reads.

:red:`Tool:` [FastQC]_

:red:`Version:` {fqVersion}

**Command:**

:commd:`fastqc {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/rawdata/fw.fastq {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/rawdata/rv.fastq --extract -o {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/qc/`

You can follow the links below, in order to see the complete FastQC report:

:green:`- FastQC for sample {snakemake.wildcards.sample}_1:` FQ1_

    .. _FQ1: ../../../samples/{snakemake.wildcards.sample}/qc/fw_fastqc.html

:green:`- FastQC for sample {snakemake.wildcards.sample}_2:` FQ2_

    .. _FQ2: ../../../samples/{snakemake.wildcards.sample}/qc/rv_fastqc.html

{fqBench}


Read pairing
----------------
Align paired end reads and merge them into one single sequence in case they overlap.

:red:`Tool:` [PEAR]_

:red:`version:` {pearversion}

**Command:**

:commd:`pear -f {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/rawdata/fw.fastq -r {snakemake.wildcards.PROJECT}/samples/{snakemake.wildcards.sample}/rawdata/rv.fastq -t {snakemake.config[pear][t]} -v {snakemake.config[pear][v]} -j {snakemake.config[pear][j]} -p {snakemake.config[pear][p]} -o {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/peared/seqs > {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/peared/seqs.assembled.fastq`

**Output files:**

:green:`- Merged reads:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/peared/seqs.assembled.fastq

:green:`- Log file:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/peared/pear.log

:red:`Number of peared reads:` {pearedCountsStr} =  {prcPeared}%

{pearBench}

{fastQCPearStr}

{extractBCStr}

{correctBCStr}

{splitStr}

{demultiplexFQ}

{combineFR}

{cutAdaptStr}


Remove too long and too short reads
------------------------------------
Remove very short and long reads, with lengths more than some standard deviation below or above the mean to be short or long respectively

:green:`- Minimun length expected (shorts):` {shorts}

:green:`- Maximun length expected (longs):` {longs}

**Command:**

:commd:`awk '!/^>/ {{ next }} {{ getline seq }} length(seq) > shorts  && length(seq) < longs {{ print $0 \"\\n\" seq }}'  {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/seqs_fw_rev_accepted.fna  >  {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/seqs_fw_rev_filtered.fasta`

**Sequence distribution before remove reads**

.. image:: report_files/seqs_dist_hist.{snakemake.wildcards.sample}.png
    :height: 400px
    :width: 400px
    :align: center


**Output file:**

:green:`- Fasta file with correct sequence length:` {snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/{snakemake.wildcards.sample}_data/seqs_fw_rev_filtered.fasta

{trimmedStr}

{rmShorLongBench}


{quimeraStr}


{sampleDistChart}


Final counts
-------------

{countTxt}

.. image:: report_files/sequence_numbers.{snakemake.wildcards.sample}.png

OTU report
---------------------------

Cascabel report on downstream analyses in combination with multiple libraries (if supplied), can be found at the following link: otu_report_ ({snakemake.wildcards.PROJECT}/runs/{snakemake.wildcards.run}/otu_report_{snakemake.config[assignTaxonomy][tool]}.html)

    .. _otu_report: otu_report_{snakemake.config[assignTaxonomy][tool]}.html

References
------------------

.. [FastQC] FastQC v0.11.3. Andrews S. (2010). FastQC: a quality control tool for high throughput sequence data

.. [PEAR] PEAR: a fast and accurate Illumina Paired-End reAd mergeR. Zhang et al (2014) Bioinformatics 30(5): 614-620 | doi:10.1093/bioinformatics/btt593

.. [QIIME] QIIME. Caporaso JG, Kuczynski J, Stombaugh J, Bittinger K, Bushman FD, Costello EK, Fierer N, Gonzalez Pena A, Goodrich JK, Gordon JI, Huttley GA, Kelley ST, Knights D, Koenig JE, Ley RE, Lozupone CA, McDonald D, Muegge BD, Pirrung M, Reeder J, Sevinsky JR, Turnbaugh PJ, Walters WA, Widmann J, Yatsunenko T, Zaneveld J, Knight R. 2010. QIIME allows analysis of high-throughput community sequencing data. Nature Methods 7(5): 335-336.

.. [Cutadapt] Cutadapt v1.15 .Marcel Martin. Cutadapt removes adapter sequences from high-throughput sequencing reads. EMBnet.Journal, 17(1):10-12, May 2011. http://dx.doi.org/10.14806/ej.17.1.200

.. [Vsearch] Rognes T, Flouri T, Nichols B, Quince C, Mahé F. (2016) VSEARCH: a versatile open source tool for metagenomics. PeerJ 4:e2584. doi: 10.7717/peerj.2584


{variable_refs}


""", snakemake.output[0], metadata="Author: J. Engelmann & A. Abdala ")
294
295
shell:
    "{config[qiime][path]}validate_mapping_file.py -o {params} -m {input.mapp}"
341
342
343
shell:
    "{config[qiime][path]}extract_barcodes.py -f {input.assembly} -c {config[ext_bc][c]} "
    "{config[ext_bc][bc_length]} {config[ext_bc][extra_params]} -o {params}"
357
358
359
shell:
    "{config[qiime][path]}extract_barcodes.py -f {input.assembly} -c {config[ext_bc][c]} "
    "{config[ext_bc][bc_length]} {config[ext_bc][extra_params]} -o {params}"
tool / biotools

QIIME2.0

QIIME 2™ is a next-generation microbiome bioinformatics platform that is extensible, free, open source, and community developed.