text
stringlengths
0
828
.. seealso:: https://docs.cachethq.io/reference#metrics
""""""
data = ApiParams()
data['name'] = name
data['suffix'] = suffix
data['description'] = description
data['default_value'] = default_value
data['display'] = display
return self._post('metrics', data=data)['data']"
81,"def create(self, metric_id, value, timestamp=None):
""""""Add a Metric Point to a Metric
:param int metric_id: Metric ID
:param int value: Value to plot on the metric graph
:param str timestamp: Unix timestamp of the point was measured
:return: Created metric point data (:class:`dict`)
.. seealso:: https://docs.cachethq.io/reference#post-metric-points
""""""
data = ApiParams()
data['value'] = value
data['timestamp'] = timestamp
return self._post('metrics/%s/points' % metric_id, data=data)['data']"
82,"def create(self, email, verify=None, components=None):
""""""Create a new subscriber
:param str email: Email address to subscribe
:param bool verify: Whether to send verification email
:param list components: Components ID list, defaults to all
:return: Created subscriber data (:class:`dict`)
.. seealso:: https://docs.cachethq.io/reference#subscribers
""""""
data = ApiParams()
data['email'] = email
data['verify'] = verify
data['components'] = components
return self._post('subscribers', data=data)['data']"
83,"def parser(metadata, analysistype, fieldnames, cutoff, program):
""""""
Read in the BLAST outputs, and populate dictionaries with the parsed results
:param metadata: type LIST: List of metadata objects
:param analysistype: type STR: Current analysis type
:param fieldnames: type LIST: List of fields used to in BLAST analyses
:param cutoff: type INT: Percent identity cutoff to use to determine if a match is present
:param program: type STR: BLAST program used in the analyses
:return: metadata: List of updated metadata objects
""""""
for sample in metadata:
# Initialise a dictionary attribute to store results
sample[analysistype].blastresults = dict()
try:
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(sample[analysistype].report), fieldnames=fieldnames, dialect='excel-tab')
resultdict = dict()
resultset = dict()
# Initialise a dictionary to store all the target sequences
sample[analysistype].targetsequence = dict()
coregenomes = list()
# Create a list of all the names of the database files, replace - with _, remove path and extension
for fasta in sample[analysistype].targets:
fastaname = os.path.basename(os.path.splitext(fasta)[0]).replace('-', '_')
fastaname = fastaname.split('.')[0]
coregenomes.append(fastaname)
# Go through each BLAST result
for row in blastdict:
# Ignore the headers
if row['query_id'].startswith(fieldnames[0]):
pass
else:
# Create the subject length variable - if the sequences are DNA (e.g. blastn), use the subject
# length as usual; if the sequences are protein (e.g. tblastx), use the subject length / 3
if program == 'blastn' or program == 'blastp' or program == 'blastx':
subject_length = float(row['subject_length'])
else:
subject_length = float(row['subject_length']) / 3
# Calculate the percent identity and extract the bitscore from the row
# Percent identity is: (length of the alignment - number of mismatches) / total subject length
percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /
subject_length * 100))
# If the percent identity is greater than the cutoff
if percentidentity >= cutoff:
# Split off any | from the sample name
target = row['subject_id'].split('|')[0]
# As there are variable _ in the name, try to split off the last one only if there are
# multiple and only keep the first part of the split if there is one _ in the name
underscored = '_'.join(target.split('_')[:-1]) if len(target.split('_')) > 2 else \
target.split('_')[0]
try:
# Update the dictionary with the reference genome and the target
resultset[underscored].add(target)
except KeyError:
# Initialise the dictionary with the first hit
resultset[underscored] = set()
resultset[underscored].add(target)
# Get the number of unique genes per reference genome
for underscored, target_set in resultset.items():
resultdict[underscored] = len(target_set)
# Sort the dictionary on the number of hits - best at the top