text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Update the `export` QAction when the Dataset menu is pulled down.
<END_TASK>
<USER_TASK:>
Description:
def updateDatasetMenu(self):
"""Update the `export` QAction when the Dataset menu is pulled down.
This method is a slot. See class ctor for details.
""" |
enabled = True
current = self.vtgui.dbs_tree_view.currentIndex()
if current:
leaf = self.vtgui.dbs_tree_model.nodeFromIndex(current)
if leaf.node_kind in (u'group', u'root group'):
enabled = False
self.plot_action.setEnabled(enabled) |
<SYSTEM_TASK:>
Export a given dataset to a `CSV` file.
<END_TASK>
<USER_TASK:>
Description:
def plot(self):
"""Export a given dataset to a `CSV` file.
This method is a slot connected to the `export` QAction. See the
:meth:`addEntry` method for details.
""" |
# The PyTables node tied to the current leaf of the databases tree
current = self.vtgui.dbs_tree_view.currentIndex()
leaf = self.vtgui.dbs_tree_model.nodeFromIndex(current).node
data_name = leaf.name
hists_1d = ['HistRelBcid', 'HistErrorCounter', 'HistTriggerErrorCounter', 'HistServiceRecord', 'HistTot', 'HistTdc', 'HistClusterTot', 'HistClusterSize']
hists_2d = ['HistOcc', 'Enable', 'Imon', 'C_High', 'EnableDigInj', 'C_Low', 'FDAC', 'TDAC', 'HistTdcPixel', 'HistTotPixel', 'HistThreshold', 'HistNoise', 'HistThresholdFitted', 'HistNoiseFitted', 'HistThresholdFittedCalib', 'HistNoiseFittedCalib']
if data_name in hists_1d:
plot_1d_hist(hist=leaf[:], title=data_name)
elif data_name in hists_2d:
if data_name == 'HistOcc':
leaf = np.sum(leaf[:], axis=2)
plot_2d_hist(hist=leaf[:], title=data_name)
elif 'Table' in str(type(leaf)) and len(leaf[:].dtype.names) <= 3: # detect tables with less than 4 columns
plot_table(leaf[:], title=data_name)
elif data_name == 'HitOrCalibration':
print 'Comming soon'
else:
print 'Plotting', data_name, '(%s) is not supported!' % type(leaf) |
<SYSTEM_TASK:>
generate iterable from item, but leaves out strings
<END_TASK>
<USER_TASK:>
Description:
def iterable(item):
"""generate iterable from item, but leaves out strings
""" |
if isinstance(item, collections.Iterable) and not isinstance(item, basestring):
return item
else:
return [item] |
<SYSTEM_TASK:>
Set maximum ToT value that is considered to be a hit
<END_TASK>
<USER_TASK:>
Description:
def max_tot_value(self, value):
"""Set maximum ToT value that is considered to be a hit""" |
self._max_tot_value = value
self.interpreter.set_max_tot(self._max_tot_value)
self.histogram.set_max_tot(self._max_tot_value)
self.clusterizer.set_max_hit_charge(self._max_tot_value) |
<SYSTEM_TASK:>
If Schema access, parse fields and build respective lists
<END_TASK>
<USER_TASK:>
Description:
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
""" |
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list |
<SYSTEM_TASK:>
Builds a list of valid fields
<END_TASK>
<USER_TASK:>
Description:
def _build_fields(self):
""" Builds a list of valid fields
""" |
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1])) |
<SYSTEM_TASK:>
Reformats the given document before insertion into Solr.
<END_TASK>
<USER_TASK:>
Description:
def _clean_doc(self, doc, namespace, timestamp):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
- inserts namespace and timestamp metadata into the document in order
to handle rollbacks
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
""" |
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = u(doc.pop("_id"))
# Update namespace and timestamp metadata
if 'ns' in doc or '_ts' in doc:
raise errors.OperationFailed(
'Need to set "ns" and "_ts" fields, but these fields already '
'exist in the document %r!' % doc)
doc['ns'] = namespace
doc['_ts'] = timestamp
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc |
<SYSTEM_TASK:>
Update or insert a document into Solr
<END_TASK>
<USER_TASK:>
Description:
def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
""" |
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc, namespace, timestamp)],
commit=(self.auto_commit_interval == 0),
commitWithin=u(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc, namespace, timestamp)],
commit=False) |
<SYSTEM_TASK:>
Update or insert multiple documents into Solr
<END_TASK>
<USER_TASK:>
Description:
def bulk_upsert(self, docs, namespace, timestamp):
"""Update or insert multiple documents into Solr
docs may be any iterable
""" |
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d, namespace, timestamp) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs) |
<SYSTEM_TASK:>
Removes documents from Solr
<END_TASK>
<USER_TASK:>
Description:
def remove(self, document_id, namespace, timestamp):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
""" |
self.solr.delete(id=u(document_id),
commit=(self.auto_commit_interval == 0)) |
<SYSTEM_TASK:>
Helper method for iterating over Solr search results.
<END_TASK>
<USER_TASK:>
Description:
def _stream_search(self, query):
"""Helper method for iterating over Solr search results.""" |
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc |
<SYSTEM_TASK:>
Called to query Solr for documents in a time range.
<END_TASK>
<USER_TASK:>
Description:
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range.""" |
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self._stream_search(query) |
<SYSTEM_TASK:>
Returns the last document stored in the Solr engine.
<END_TASK>
<USER_TASK:>
Description:
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
""" |
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
for r in result:
r['_id'] = r.pop(self.unique_key)
return r |
<SYSTEM_TASK:>
Returns the result of the scrypt password-based key derivation function.
<END_TASK>
<USER_TASK:>
Description:
def hash(password, salt, N, r, p, dkLen):
"""Returns the result of the scrypt password-based key derivation function.
Constraints:
r * p < (2 ** 30)
dkLen <= (((2 ** 32) - 1) * 32
N must be a power of 2 greater than 1 (eg. 2, 4, 8, 16, 32...)
N, r, p must be positive
""" |
# This only matters to Python 3
if not check_bytes(password):
raise ValueError('password must be a byte array')
if not check_bytes(salt):
raise ValueError('salt must be a byte array')
# Scrypt implementation. Significant thanks to https://github.com/wg/scrypt
if N < 2 or (N & (N - 1)): raise ValueError('Scrypt N must be a power of 2 greater than 1')
# A psuedorandom function
prf = lambda k, m: hmac.new(key = k, msg = m, digestmod = hashlib.sha256).digest()
# convert into integers
B = [ get_byte(c) for c in pbkdf2_single(password, salt, p * 128 * r, prf) ]
B = [ ((B[i + 3] << 24) | (B[i + 2] << 16) | (B[i + 1] << 8) | B[i + 0]) for i in xrange(0, len(B), 4)]
XY = [ 0 ] * (64 * r)
V = [ 0 ] * (32 * r * N)
for i in xrange(0, p):
smix(B, i * 32 * r, r, N, V, XY)
# Convert back into bytes
Bc = [ ]
for i in B:
Bc.append((i >> 0) & 0xff)
Bc.append((i >> 8) & 0xff)
Bc.append((i >> 16) & 0xff)
Bc.append((i >> 24) & 0xff)
return pbkdf2_single(password, chars_to_bytes(Bc), dkLen, prf) |
<SYSTEM_TASK:>
copy file or directory at a given location; can be pasted later
<END_TASK>
<USER_TASK:>
Description:
def copy(location):
"""copy file or directory at a given location; can be pasted later""" |
copyData = settings.getDataFile()
copyFileLocation = os.path.abspath(location)
copy = {"copyLocation": copyFileLocation}
dataFile = open(copyData, "wb")
pickle.dump(copy, dataFile)
speech.speak(location + " copied successfully!")
speech.speak("Tip: use 'hallie paste' to paste this file.") |
<SYSTEM_TASK:>
paste a file or directory that has been previously copied
<END_TASK>
<USER_TASK:>
Description:
def paste(location):
"""paste a file or directory that has been previously copied""" |
copyData = settings.getDataFile()
if not location:
location = "."
try:
data = pickle.load(open(copyData, "rb"))
speech.speak("Pasting " + data["copyLocation"] + " to current directory.")
except:
speech.fail("It doesn't look like you've copied anything yet.")
speech.fail("Type 'hallie copy <file>' to copy a file or folder.")
return
process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
if "denied" in process:
speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!") |
<SYSTEM_TASK:>
trust a new PGP key related to a apt-repository
<END_TASK>
<USER_TASK:>
Description:
def apt_add_key(keyid, keyserver='keyserver.ubuntu.com', log=False):
""" trust a new PGP key related to a apt-repository """ |
if log:
log_green(
'trusting keyid %s from %s' % (keyid, keyserver)
)
with settings(hide('warnings', 'running', 'stdout')):
sudo('apt-key adv --keyserver %s --recv %s' % (keyserver, keyid))
return True |
<SYSTEM_TASK:>
adds an apt repository
<END_TASK>
<USER_TASK:>
Description:
def enable_apt_repositories(prefix, url, version, repositories):
""" adds an apt repository """ |
with settings(hide('warnings', 'running', 'stdout'),
warn_only=False, capture=True):
sudo('apt-add-repository "%s %s %s %s"' % (prefix,
url,
version,
repositories))
with hide('running', 'stdout'):
output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update")
if 'Some index files failed to download' in output:
raise SystemExit(1)
else:
# if we didn't abort above, we should return True
return True |
<SYSTEM_TASK:>
checks if a particular package is installed
<END_TASK>
<USER_TASK:>
Description:
def is_package_installed(distribution, pkg):
""" checks if a particular package is installed """ |
if ('centos' in distribution or
'el' in distribution or
'redhat' in distribution):
return(is_rpm_package_installed(pkg))
if ('ubuntu' in distribution or
'debian' in distribution):
return(is_deb_package_installed(pkg)) |
<SYSTEM_TASK:>
checks if a particular rpm package is installed
<END_TASK>
<USER_TASK:>
Description:
def is_rpm_package_installed(pkg):
""" checks if a particular rpm package is installed """ |
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo("rpm -q %s" % pkg)
if result.return_code == 0:
return True
elif result.return_code == 1:
return False
else: # print error to user
print(result)
raise SystemExit() |
<SYSTEM_TASK:>
instals a yum group
<END_TASK>
<USER_TASK:>
Description:
def yum_group_install(**kwargs):
""" instals a yum group """ |
for grp in list(kwargs['groups']):
log_green("installing %s ..." % grp)
if 'repo' in kwargs:
repo = kwargs['repo']
sudo("yum groupinstall -y --quiet "
"--enablerepo=%s '%s'" % (repo, grp))
else:
sudo("yum groups mark install -y --quiet '%s'" % grp)
sudo("yum groups mark convert -y --quiet '%s'" % grp)
sudo("yum groupinstall -y --quiet '%s'" % grp) |
<SYSTEM_TASK:>
abstractSearch in fields of collection and reset rendering.
<END_TASK>
<USER_TASK:>
Description:
def recherche(self, pattern, entete, in_all=False):
"""abstractSearch in fields of collection and reset rendering.
Returns number of results.
If in_all is True, call get_all before doing the search.""" |
if in_all:
self.collection = self.get_all()
self.collection.recherche(pattern, entete)
self._reset_render()
return len(self.collection) |
<SYSTEM_TASK:>
Launch the callable job in background thread.
<END_TASK>
<USER_TASK:>
Description:
def launch_background_job(self, job, on_error=None, on_success=None):
"""Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success
""" |
if not self.main.mode_online:
self.sortie_erreur_GUI(
"Local mode activated. Can't run background task !")
self.reset()
return
on_error = on_error or self.sortie_erreur_GUI
on_success = on_success or self.sortie_standard_GUI
def thread_end(r):
on_success(r)
self.update()
def thread_error(r):
on_error(r)
self.reset()
logging.info(
f"Launching background task from interface {self.__class__.__name__} ...")
th = threads.worker(job, thread_error, thread_end)
self._add_thread(th) |
<SYSTEM_TASK:>
Return a filter list, bases on criteres
<END_TASK>
<USER_TASK:>
Description:
def filtre(liste_base, criteres) -> groups.Collection:
"""
Return a filter list, bases on criteres
:param liste_base: Acces list
:param criteres: Criteria { `attribut`:[valeurs,...] }
""" |
def choisi(ac):
for cat, li in criteres.items():
v = ac[cat]
if not (v in li):
return False
return True
return groups.Collection(a for a in liste_base if choisi(a)) |
<SYSTEM_TASK:>
Default implentation requires users from DB.
<END_TASK>
<USER_TASK:>
Description:
def _load_users(self):
"""Default implentation requires users from DB.
Should setup `users` attribute""" |
r = sql.abstractRequetesSQL.get_users()()
self.users = {d["id"]: dict(d) for d in r} |
<SYSTEM_TASK:>
Should instance interfaces and set them to interface, following `modules`
<END_TASK>
<USER_TASK:>
Description:
def load_modules(self):
"""Should instance interfaces and set them to interface, following `modules`""" |
if self.INTERFACES_MODULE is None:
raise NotImplementedError("A module containing interfaces modules "
"should be setup in INTERFACES_MODULE !")
else:
for module, permission in self.modules.items():
i = getattr(self.INTERFACES_MODULE,
module).Interface(self, permission)
self.interfaces[module] = i |
<SYSTEM_TASK:>
Read auto-connection parameters and returns local password or None
<END_TASK>
<USER_TASK:>
Description:
def has_autolog(self, user_id):
"""
Read auto-connection parameters and returns local password or None
""" |
try:
with open("local/init", "rb") as f:
s = f.read()
s = security.protege_data(s, False)
self.autolog = json.loads(s).get("autolog", {})
except FileNotFoundError:
return
mdp = self.autolog.get(user_id, None)
return mdp |
<SYSTEM_TASK:>
This function combines and return the commanline call of the program.
<END_TASK>
<USER_TASK:>
Description:
def get_cmd(self):
""" This function combines and return the commanline call of the program.
""" |
cmd = []
if self.path is not None:
if '/' in self.path and not os.path.exists(self.path):
debug.log('Error: path contains / but does not exist: %s'%self.path)
else:
if self.ptype is not None:
if os.path.exists(self.ptype):
cmd.append(self.ptype)
elif '/' not in self.ptype:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
ppath = os.path.join(path, self.ptype)
if os.path.isfile(ppath):
cmd.append(ppath)
break
cmd.append(self.path)
if sys.version_info < (3, 0):
cmd.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in [quote(str(x)) for x in self.args]+self.unquoted_args])
else:
cmd.extend([str(x) for x in [quote(str(x)) for x in self.args]+self.unquoted_args])
else:
debug.log('Error: Program path not set!')
return ' '.join(cmd) |
<SYSTEM_TASK:>
This function appends the provided arguments to the program object.
<END_TASK>
<USER_TASK:>
Description:
def append_args(self, arg):
""" This function appends the provided arguments to the program object.
""" |
debug.log("Adding Arguments: %s"%(arg))
if isinstance(arg, (int,float)): self.args.append(str(arg))
if isinstance(arg, str): self.args.append(arg)
if isinstance(arg, list):
if sys.version_info < (3, 0):
self.args.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in arg])
else:
self.args.extend([str(x) for x in arg]) |
<SYSTEM_TASK:>
This function will read the standard out of the program and print it
<END_TASK>
<USER_TASK:>
Description:
def print_stdout(self):
""" This function will read the standard out of the program and print it
""" |
# First we check if the file we want to print does exists
if self.wdir != '':
stdout = "%s/%s"%(self.wdir, self.stdout)
else:
stdout = self.stdout
if os.path.exists(stdout):
with open_(stdout, 'r') as f:
debug.print_out("\n".join([line for line in f]))
else: # FILE DOESN'T EXIST
debug.log("Error: The stdout file %s does not exist!"%(stdout)) |
<SYSTEM_TASK:>
This function will read the standard out of the program, catch
<END_TASK>
<USER_TASK:>
Description:
def find_out_var(self, varnames=[]):
""" This function will read the standard out of the program, catch
variables and return the values
EG. #varname=value
""" |
if self.wdir != '':
stdout = "%s/%s"%(self.wdir, self.stdout)
else:
stdout = self.stdout
response = [None]*len(varnames)
# First we check if the file we want to print does exists
if os.path.exists(stdout):
with open_(stdout, 'r') as f:
for line in f:
if '=' in line:
var = line.strip('#').split('=')
value = var[1].strip()
var = var[0].strip()
if var in varnames: response[varnames.index(var)] = value
else: # FILE DOESN'T EXIST
debug.log("Error: The stdout file %s does not exist!"%(stdout))
return response |
<SYSTEM_TASK:>
Runs the necessary methods to parse raw read outputs
<END_TASK>
<USER_TASK:>
Description:
def reporter(self):
"""
Runs the necessary methods to parse raw read outputs
""" |
logging.info('Preparing reports')
# Populate self.plusdict in order to reuse parsing code from an assembly-based method
for sample in self.runmetadata.samples:
self.plusdict[sample.name] = dict()
self.matchdict[sample.name] = dict()
if sample.general.bestassemblyfile != 'NA':
for gene in sample[self.analysistype].allelenames:
self.plusdict[sample.name][gene] = dict()
for allele, percentidentity in sample[self.analysistype].results.items():
if gene in allele:
# Split the allele number from the gene name using the appropriate delimiter
if '_' in allele:
splitter = '_'
elif '-' in allele:
splitter = '-'
else:
splitter = ''
self.matchdict[sample.name].update({gene: allele.split(splitter)[-1]})
# Create the plusdict dictionary as in the assembly-based (r)MLST method. Allows all the
# parsing and sequence typing code to be reused.
try:
self.plusdict[sample.name][gene][allele.split(splitter)[-1]][percentidentity] \
= sample[self.analysistype].avgdepth[allele]
except KeyError:
self.plusdict[sample.name][gene][allele.split(splitter)[-1]] = dict()
self.plusdict[sample.name][gene][allele.split(splitter)[-1]][percentidentity] \
= sample[self.analysistype].avgdepth[allele]
if gene not in self.matchdict[sample.name]:
self.matchdict[sample.name].update({gene: 'N'})
self.profiler()
self.sequencetyper()
self.mlstreporter() |
<SYSTEM_TASK:>
If the pipeline has previously been run on these data, instead of reading through the results, parse the
<END_TASK>
<USER_TASK:>
Description:
def report_parse(self):
"""
If the pipeline has previously been run on these data, instead of reading through the results, parse the
report instead
""" |
# Initialise lists
report_strains = list()
genus_list = list()
if self.analysistype == 'mlst':
for sample in self.runmetadata.samples:
try:
genus_list.append(sample.general.referencegenus)
except AttributeError:
sample.general.referencegenus = 'ND'
genus_list.append(sample.general.referencegenus)
# Read in the report
if self.analysistype == 'mlst':
for genus in genus_list:
try:
report_name = os.path.join(self.reportpath, '{at}_{genus}.csv'.format(at=self.analysistype,
genus=genus))
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
except FileNotFoundError:
report_name = self.report
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
else:
report_name = self.report
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
# Populate strains not in the report with 'empty' GenObject with appropriate attributes
for sample in self.runmetadata.samples:
if sample.name not in report_strains:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].sequencetype = 'ND'
sample[self.analysistype].matches = 0
sample[self.analysistype].results = dict() |
<SYSTEM_TASK:>
Utility function to call classes based on filename extension.
<END_TASK>
<USER_TASK:>
Description:
def guess_type(filename, **kwargs):
""" Utility function to call classes based on filename extension.
Just usefull if you are reading the file and don't know file extension.
You can pass kwargs and these args are passed to class only if they are
used in class.
""" |
extension = os.path.splitext(filename)[1]
case = {'.xls': Xls,
'.xlsx': Xlsx,
'.csv': Csv}
if extension and case.get(extension.lower()):
low_extension = extension.lower()
new_kwargs = dict()
class_name = case.get(low_extension)
class_kwargs = inspect.getargspec(class_name.__init__).args[1:]
for kwarg in kwargs:
if kwarg in class_kwargs:
new_kwargs[kwarg] = kwargs[kwarg]
return case.get(low_extension)(filename, **new_kwargs)
else:
raise Exception('No extension found') |
<SYSTEM_TASK:>
This function takes the database path and a gene name as inputs and
<END_TASK>
<USER_TASK:>
Description:
def get_gene_seqs(database_path, gene):
"""
This function takes the database path and a gene name as inputs and
returns the gene sequence contained in the file given by the gene name
""" |
gene_path = database_path + "/" + gene + ".fsa"
gene_seq = ""
# Open fasta file
with open(gene_path) as gene_file:
header = gene_file.readline()
for line in gene_file:
seq = line.strip()
gene_seq += seq
return gene_seq |
<SYSTEM_TASK:>
This function takes the list hits_found as argument. This contains all
<END_TASK>
<USER_TASK:>
Description:
def find_best_sequence(hits_found, specie_path, gene, silent_N_flag):
"""
This function takes the list hits_found as argument. This contains all
hits found for the blast search of one gene. A hit includes the subjct
sequence, the query, and the start and stop position of the allignment
corresponding to the subject sequence. This function finds the best
hit by concatinating sequences of found hits. If different overlap
sequences occurr these are saved in the list alternative_overlaps. The
subject and query sequence of the concatinated sequence to gether with
alternative overlaps and the corresponding start stop
positions are returned.
""" |
# Get information from the fisrt hit found
all_start = hits_found[0][0]
current_end = hits_found[0][1]
final_sbjct = hits_found[0][2]
final_qry = hits_found[0][3]
sbjct_len = hits_found[0][4]
alternative_overlaps = []
# Check if more then one hit was found within the same gene
for i in range(len(hits_found)-1):
# Save information from previous hit
pre_block_start = hits_found[i][0]
pre_block_end = hits_found[i][1]
pre_sbjct = hits_found[i][2]
pre_qry = hits_found[i][3]
# Save information from next hit
next_block_start = hits_found[i+1][0]
next_block_end = hits_found[i+1][1]
next_sbjct = hits_found[i+1][2]
next_qry = hits_found[i+1][3]
# Check for overlapping sequences, collaps them and save alternative overlaps if any
if next_block_start <= current_end:
# Find overlap start and take gaps into account
pos_count = 0
overlap_pos = pre_block_start
for i in range(len(pre_sbjct)):
# Stop loop if overlap_start position is reached
if overlap_pos == next_block_start:
overlap_start = pos_count
break
if pre_sbjct[i] != "-":
overlap_pos += 1
pos_count += 1
# Find overlap length and add next sequence to final sequence
if len(pre_sbjct[overlap_start:]) > len(next_sbjct):
# <--------->
# <--->
overlap_len = len(next_sbjct)
overlap_end_pos = next_block_end
else:
# <--------->
# <--------->
overlap_len = len(pre_sbjct[overlap_start:])
overlap_end_pos = pre_block_end
# Update current end
current_end = next_block_end
# Use the entire pre sequence and add the last part of the next sequence
final_sbjct += next_sbjct[overlap_len:]
final_qry += next_qry[overlap_len:]
# Find query overlap sequences
pre_qry_overlap = pre_qry[overlap_start : (overlap_start + overlap_len)] # can work for both types of overlap
next_qry_overlap = next_qry[:overlap_len]
sbjct_overlap = next_sbjct[:overlap_len]
# If alternative query overlap excist save it
if pre_qry_overlap != next_qry_overlap:
print("OVERLAP WARNING:")
print(pre_qry_overlap, "\n", next_qry_overlap)
# Save alternative overlaps
alternative_overlaps += [(next_block_start, overlap_end_pos, sbjct_overlap, next_qry_overlap)]
elif next_block_start > current_end:
# <------->
# <------->
gap_size = next_block_start - current_end - 1
final_qry += "N"*gap_size
if silent_N_flag:
final_sbjct += "N"*gap_size
else:
ref_seq = get_gene_seqs(specie_path, gene)
final_sbjct += ref_seq[pre_block_end:pre_block_end+gap_size]
current_end = next_block_end
final_sbjct += next_sbjct
final_qry += next_qry
# Calculate coverage
no_call = final_qry.upper().count("N")
coverage = (current_end - all_start +1 - no_call) / float(sbjct_len)
# Calculate identity
equal = 0
not_equal = 0
for i in range(len(final_qry)):
if final_qry[i].upper() != "N":
if final_qry[i].upper() == final_sbjct[i].upper():
equal += 1
else:
not_equal += 1
identity = equal/float(equal + not_equal)
return final_sbjct, final_qry, all_start, current_end, alternative_overlaps, coverage, identity |
<SYSTEM_TASK:>
This function finds mis matches between two sequeces. Depending on the
<END_TASK>
<USER_TASK:>
Description:
def find_mismatches(gene, sbjct_start, sbjct_seq, qry_seq, alternative_overlaps = []):
"""
This function finds mis matches between two sequeces. Depending on the
the sequence type either the function find_codon_mismatches or
find_nucleotid_mismatches are called, if the sequences contains both
a promoter and a coding region both functions are called. The function
can also call it self if alternative overlaps is give. All found mis
matches are returned
""" |
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
# Find mis matches in RNA genes
if gene in RNA_gene_list:
mis_matches += find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq)
else:
# Check if the gene sequence is with a promoter
regex = r"promoter_size_(\d+)(?:bp)"
promtr_gene_objt = re.search(regex, gene)
# Check for promoter sequences
if promtr_gene_objt:
# Get promoter length
promtr_len = int(promtr_gene_objt.group(1))
# Extract promoter sequence, while considering gaps
# --------agt-->----
# ---->?
if sbjct_start <= promtr_len:
#Find position in sbjct sequence where promoter ends
promtr_end = 0
nuc_count = sbjct_start - 1
for i in range(len(sbjct_seq)):
promtr_end += 1
if sbjct_seq[i] != "-":
nuc_count += 1
if nuc_count == promtr_len:
break
# Check if only a part of the promoter is found
#--------agt-->----
# ----
promtr_sbjct_start = -1
if nuc_count < promtr_len:
promtr_sbjct_start = nuc_count - promtr_len
# Get promoter part of subject and query
sbjct_promtr_seq = sbjct_seq[:promtr_end]
qry_promtr_seq = qry_seq[:promtr_end]
# For promoter part find nucleotide mis matches
mis_matches += find_nucleotid_mismatches(promtr_sbjct_start, sbjct_promtr_seq, qry_promtr_seq, promoter = True)
# Check if gene is also found
#--------agt-->----
# -----------
if (sbjct_start + len(sbjct_seq.replace("-", ""))) > promtr_len:
sbjct_gene_seq = sbjct_seq[promtr_end:]
qry_gene_seq = qry_seq[promtr_end:]
sbjct_gene_start = 1
# Find mismatches in gene part
mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_gene_seq, qry_gene_seq)
# No promoter, only gene is found
#--------agt-->----
# -----
else:
sbjct_gene_start = sbjct_start - promtr_len
# Find mismatches in gene part
mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_seq, qry_seq)
else:
# Find mismatches in gene
mis_matches += find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq)
# Find mismatches in alternative overlaps if any
for overlap in alternative_overlaps:
mis_matches += find_mismatches(gene, overlap[0], overlap[2], overlap[3])
return mis_matches |
<SYSTEM_TASK:>
This function finds the entire indel missing in from a gapped sequence
<END_TASK>
<USER_TASK:>
Description:
def find_nuc_indel(gapped_seq, indel_seq):
"""
This function finds the entire indel missing in from a gapped sequence
compared to the indel_seqeunce. It is assumes that the sequences start
with the first position of the gap.
""" |
ref_indel = indel_seq[0]
for j in range(1,len(gapped_seq)):
if gapped_seq[j] == "-":
ref_indel += indel_seq[j]
else:
break
return ref_indel |
<SYSTEM_TASK:>
This function converts a codon to an amino acid. If the codon is not
<END_TASK>
<USER_TASK:>
Description:
def aa(codon):
"""
This function converts a codon to an amino acid. If the codon is not
valid an error message is given, or else, the amino acid is returned.
""" |
codon = codon.upper()
aa = {"ATT": "I", "ATC": "I", "ATA": "I",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L", "TTA": "L", "TTG": "L",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TTT": "F", "TTC": "F",
"ATG": "M",
"TGT": "C", "TGC": "C",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"TAT": "Y", "TAC": "Y",
"TGG": "W",
"CAA": "Q", "CAG": "Q",
"AAT": "N", "AAC": "N",
"CAT": "H", "CAC": "H",
"GAA": "E", "GAG": "E",
"GAT": "D", "GAC": "D",
"AAA": "K", "AAG": "K",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TAA": "*", "TAG": "*", "TGA": "*"}
# Translate valid codon
try:
amino_a = aa[codon]
except KeyError:
amino_a = "?"
return amino_a |
<SYSTEM_TASK:>
This function takes a sequece and a codon number and returns the codon
<END_TASK>
<USER_TASK:>
Description:
def get_codon(seq, codon_no, start_offset):
"""
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
""" |
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon |
<SYSTEM_TASK:>
This function is used to name a insertion mutation based on the HGVS
<END_TASK>
<USER_TASK:>
Description:
def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset):
"""
This function is used to name a insertion mutation based on the HGVS
recommendation.
""" |
start_codon_no = codon_no - 1
if len(sbjct_nucs) == 3:
start_codon_no = codon_no
start_codon = get_codon(sbjct_seq, start_codon_no, start_offset)
end_codon = get_codon(sbjct_seq, codon_no, start_offset)
pos_name = "p.%s%d_%s%dins%s"%(aa(start_codon), start_codon_no, aa(end_codon), codon_no, aa_alt)
return pos_name |
<SYSTEM_TASK:>
This function serves to name the individual mutations dependently on
<END_TASK>
<USER_TASK:>
Description:
def name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, start_offset):
"""
This function serves to name the individual mutations dependently on
the type of the mutation.
""" |
# Get the subject and query sequences without gaps
sbjct_nucs = sbjct_rf_indel.replace("-", "")
qry_nucs = qry_rf_indel.replace("-", "")
# Translate nucleotides to amino acids
aa_ref = ""
aa_alt = ""
for i in range(0, len(sbjct_nucs), 3):
aa_ref += aa(sbjct_nucs[i:i+3])
for i in range(0, len(qry_nucs), 3):
aa_alt += aa(qry_nucs[i:i+3])
# Identify the gapped sequence
if mut == "ins":
gapped_seq = sbjct_rf_indel
else:
gapped_seq = qry_rf_indel
gap_size = gapped_seq.count("-")
# Write mutation names
if gap_size < 3 and len(sbjct_nucs) ==3 and len(qry_nucs) == 3:
# Write mutation name for substitution mutation
mut_name = "p.%s%d%s"%(aa(sbjct_nucs), codon_no, aa(qry_nucs))
elif len(gapped_seq) == gap_size:
if mut == "ins":
# Write mutation name for insertion mutation
mut_name = name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset)
aa_ref = mut
else:
# Write mutation name for deletion mutation
mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "del")
aa_alt = mut
# Check for delins - mix of insertion and deletion
else:
# Write mutation name for a mixed insertion and deletion mutation
mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "delins")
# Check for frameshift
if gapped_seq.count("-")%3 != 0:
# Add the frameshift tag to mutation name
mut_name += " - Frameshift"
return mut_name, aa_ref, aa_alt |
<SYSTEM_TASK:>
This funtion takes a sequnece starting with a gap or the complementary
<END_TASK>
<USER_TASK:>
Description:
def get_inframe_gap(seq, nucs_needed = 3):
"""
This funtion takes a sequnece starting with a gap or the complementary
seqeuence to the gap, and the number of nucleotides that the seqeunce
should contain in order to maintain the correct reading frame. The
sequence is gone through and the number of non-gap characters are
counted. When the number has reach the number of needed nucleotides
the indel is returned. If the indel is a 'clean' insert or deletion
that starts in the start of a codon and can be divided by 3, then only
the gap is returned.
""" |
nuc_count = 0
gap_indel = ""
nucs = ""
for i in range(len(seq)):
# Check if the character is not a gap
if seq[i] != "-":
# Check if the indel is a 'clean'
# i.e. if the insert or deletion starts at the first nucleotide in the codon and can be divided by 3
if gap_indel.count("-") == len(gap_indel) and gap_indel.count("-") >= 3 and len(gap_indel) != 0:
return gap_indel
nuc_count += 1
gap_indel += seq[i]
# If the number of nucleotides in the indel equals the amount needed for the indel, the indel is returned.
if nuc_count == nucs_needed:
return gap_indel
# This will only happen if the gap is in the very end of a sequence
return gap_indel |
<SYSTEM_TASK:>
Try merging all the bravado_core models across all loaded APIs. If
<END_TASK>
<USER_TASK:>
Description:
def merge(self):
"""Try merging all the bravado_core models across all loaded APIs. If
duplicates occur, use the same bravado-core model to represent each, so
bravado-core won't treat them as different models when passing them
from one PyMacaron client stub to an other or when returning them via the
PyMacaron server stub.
""" |
# The sole purpose of this method is to trick isinstance to return true
# on model_values of the same kind but different apis/specs at:
# https://github.com/Yelp/bravado-core/blob/4840a6e374611bb917226157b5948ee263913abc/bravado_core/marshal.py#L160
log.info("Merging models of apis " + ", ".join(apis.keys()))
# model_name => (api_name, model_json_def, bravado_core.model.MODELNAME)
models = {}
# First pass: find duplicate and keep only one model of each (fail if
# duplicates have same name but different definitions)
for api_name, api in apis.items():
for model_name, model_def in api.api_spec.swagger_dict['definitions'].items():
if model_name in models:
other_api_name, other_model_def, _ = models.get(model_name)
log.debug("Model %s in %s is a duplicate of one in %s" % (model_name, api_name, other_api_name))
if ApiPool._cmp_models(model_def, other_model_def) != 0:
raise MergeApisException("Cannot merge apis! Model %s exists in apis %s and %s but have different definitions:\n[%s]\n[%s]"
% (model_name, api_name, other_api_name, pprint.pformat(model_def), pprint.pformat(other_model_def)))
else:
models[model_name] = (api_name, model_def, api.api_spec.definitions[model_name])
# Second pass: patch every models and replace with the one we decided
# to keep
log.debug("Patching api definitions to remove all duplicates")
for api_name, api in apis.items():
for model_name in api.api_spec.definitions.keys():
_, _, model_class = models.get(model_name)
api.api_spec.definitions[model_name] = model_class |
<SYSTEM_TASK:>
Create a task type.
<END_TASK>
<USER_TASK:>
Description:
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the tasks required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskType`:
A task type model instance representing the task type
just created.
""" |
# Set None for optional list and dicts to proper datatypes
if environment_variables is None:
environment_variables = []
if required_arguments is None:
required_arguments = []
if required_arguments_default_values is None:
required_arguments_default_values = {}
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_post is not None:
data_to_post.update(extra_data_to_post)
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task type
return self.response_data_to_model_instance(response.json()) |
<SYSTEM_TASK:>
Convert response data to a task type model.
<END_TASK>
<USER_TASK:>
Description:
def response_data_to_model_instance(self, response_data):
"""Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
""" |
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
# Instantiate a model for the task instance
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data) |
<SYSTEM_TASK:>
Deploy to pypi as specified version.
<END_TASK>
<USER_TASK:>
Description:
def deploy(version):
"""
Deploy to pypi as specified version.
""" |
NAME = "pathquery"
git = Command("git").in_dir(DIR.project)
version_file = DIR.project.joinpath("VERSION")
old_version = version_file.bytes().decode('utf8')
if version_file.bytes().decode("utf8") != version:
DIR.project.joinpath("VERSION").write_text(version)
git("add", "VERSION").run()
git("commit", "-m", "RELEASE: Version {0} -> {1}".format(
old_version,
version
)).run()
git("push").run()
git("tag", "-a", version, "-m", "Version {0}".format(version)).run()
git("push", "origin", version).run()
else:
git("push").run()
# Set __version__ variable in __init__.py, build sdist and put it back
initpy = DIR.project.joinpath(NAME, "__init__.py")
original_initpy_contents = initpy.bytes().decode('utf8')
initpy.write_text(
original_initpy_contents.replace("DEVELOPMENT_VERSION", version)
)
python("setup.py", "sdist").in_dir(DIR.project).run()
initpy.write_text(original_initpy_contents)
# Upload to pypi
python(
"-m", "twine", "upload", "dist/{0}-{1}.tar.gz".format(NAME, version)
).in_dir(DIR.project).run() |
<SYSTEM_TASK:>
Install a new version of a package in the hitch venv.
<END_TASK>
<USER_TASK:>
Description:
def hvenvup(package, directory):
"""
Install a new version of a package in the hitch venv.
""" |
pip = Command(DIR.gen.joinpath("hvenv", "bin", "pip"))
pip("uninstall", package, "-y").run()
pip("install", DIR.project.joinpath(directory).abspath()).run() |
<SYSTEM_TASK:>
Validates single instance. Returns boolean value and store errors in self.errors
<END_TASK>
<USER_TASK:>
Description:
def is_valid(self):
"""
Validates single instance. Returns boolean value and store errors in self.errors
""" |
self.errors = []
for field in self.get_all_field_names_declared_by_user():
getattr(type(self), field).is_valid(self, type(self), field)
field_errors = getattr(type(self), field).errors(self)
self.errors.extend(field_errors)
return len(self.errors) == 0 |
<SYSTEM_TASK:>
Run the analyses using the inputted values for forward and reverse read length. However, if not all strains
<END_TASK>
<USER_TASK:>
Description:
def main(self):
"""
Run the analyses using the inputted values for forward and reverse read length. However, if not all strains
pass the quality thresholds, continue to periodically run the analyses on these incomplete strains until either
all strains are complete, or the sequencing run is finished
""" |
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
self.createobjects()
# Run the genesipping analyses
self.methods()
# Determine if the analyses are complete
self.complete()
self.additionalsipping()
# Update the report object
self.reports = Reports(self)
# Once all the analyses are complete, create reports for each sample
Reports.methodreporter(self.reports)
# Print the metadata
printer = MetadataPrinter(self)
printer.printmetadata() |
<SYSTEM_TASK:>
Run the typing methods
<END_TASK>
<USER_TASK:>
Description:
def methods(self):
"""
Run the typing methods
""" |
self.contamination_detection()
ReportImage(self, 'confindr')
self.run_genesippr()
ReportImage(self, 'genesippr')
self.run_sixteens()
self.run_mash()
self.run_gdcs()
ReportImage(self, 'gdcs') |
<SYSTEM_TASK:>
Calculate the levels of contamination in the reads
<END_TASK>
<USER_TASK:>
Description:
def contamination_detection(self):
"""
Calculate the levels of contamination in the reads
""" |
self.qualityobject = quality.Quality(self)
self.qualityobject.contamination_finder(input_path=self.sequencepath,
report_path=self.reportpath) |
<SYSTEM_TASK:>
Run the 16S analyses using the filtered database
<END_TASK>
<USER_TASK:>
Description:
def run_sixteens(self):
"""
Run the 16S analyses using the filtered database
""" |
SixteensFull(args=self,
pipelinecommit=self.commit,
startingtime=self.starttime,
scriptpath=self.homepath,
analysistype='sixteens_full',
cutoff=0.985) |
<SYSTEM_TASK:>
Run MASH to determine the closest refseq genomes
<END_TASK>
<USER_TASK:>
Description:
def run_mash(self):
"""
Run MASH to determine the closest refseq genomes
""" |
self.pipeline = True
mash.Mash(inputobject=self,
analysistype='mash') |
<SYSTEM_TASK:>
checks if a given package is installed on pip
<END_TASK>
<USER_TASK:>
Description:
def checkInstalledPip(package, speak=True, speakSimilar=True):
"""checks if a given package is installed on pip""" |
packages = sorted([i.key for i in pip.get_installed_distributions()])
installed = package in packages
similar = None
if not installed:
similar = [pkg for pkg in packages if package in pkg]
if speak:
speakInstalledPackages(package, "pip", installed, similar, speakSimilar)
return (installed, similar) |
<SYSTEM_TASK:>
checks if a given package is installed on homebrew
<END_TASK>
<USER_TASK:>
Description:
def checkInstalledBrew(package, similar=True, speak=True, speakSimilar=True):
"""checks if a given package is installed on homebrew""" |
packages = subprocess.check_output(['brew', 'list']).split()
installed = package in packages
similar = []
if not installed:
similar = [pkg for pkg in packages if package in pkg]
if speak:
speakInstalledPackages(package, "homebrew", installed, similar, speakSimilar)
return (installed, similar) |
<SYSTEM_TASK:>
Placeholders for fields names and value binds
<END_TASK>
<USER_TASK:>
Description:
def placeholders(cls,dic):
"""Placeholders for fields names and value binds""" |
keys = [str(x) for x in dic]
entete = ",".join(keys)
placeholders = ",".join(cls.named_style.format(x) for x in keys)
entete = f"({entete})"
placeholders = f"({placeholders})"
return entete, placeholders |
<SYSTEM_TASK:>
Insert row from datas
<END_TASK>
<USER_TASK:>
Description:
def insert(table, datas, avoid_conflict=False):
""" Insert row from datas
:param table: Safe table name
:param datas: List of dicts.
:param avoid_conflict: Allows ignoring error if already exists (do nothing then)
:return:
""" |
if avoid_conflict:
debut = """INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} ON CONFLICT DO NOTHING"""
else:
debut = """INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} RETURNING *"""
l = [abstractRequetesSQL.formate(debut, table=table, INSERT=d, args=d) for d in datas if d]
return Executant(l) |
<SYSTEM_TASK:>
Update row with Id from table. Set fields given by dic.
<END_TASK>
<USER_TASK:>
Description:
def update(cls,table, dic, Id):
""" Update row with Id from table. Set fields given by dic.""" |
if dic:
req = "UPDATE {table} SET {SET} WHERE id = " + cls.named_style.format('__id') + " RETURNING * "
r = abstractRequetesSQL.formate(req, SET=dic, table=table, args=dict(dic, __id=Id))
return MonoExecutant(r)
return MonoExecutant((f"SELECT * FROM {table} WHERE id = " + cls.named_style.format('__id'),
{"__id": Id})) |
<SYSTEM_TASK:>
Create ONE row from dic and returns the entry created
<END_TASK>
<USER_TASK:>
Description:
def cree(table, dic, avoid_conflict=False):
""" Create ONE row from dic and returns the entry created """ |
if avoid_conflict:
req = """ INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} ON CONFLICT DO NOTHING RETURNING *"""
else:
req = """ INSERT INTO {table} {ENTETE_INSERT} VALUES {BIND_INSERT} RETURNING *"""
r = abstractRequetesSQL.formate(req, table=table, INSERT=dic, args=dic)
return MonoExecutant(r) |
<SYSTEM_TASK:>
Enumerate the children of the given object, as would be visible and utilized by dispatch.
<END_TASK>
<USER_TASK:>
Description:
def trace(self, context, obj):
"""Enumerate the children of the given object, as would be visible and utilized by dispatch.""" |
root = obj
if isroutine(obj):
yield Crumb(self, root, endpoint=True, handler=obj, options=opts(obj))
return
for name, attr in getmembers(obj if isclass(obj) else obj.__class__):
if name == '__getattr__':
sig = signature(attr)
path = '{' + list(sig.parameters.keys())[1] + '}'
reta = sig.return_annotation
if reta is not sig.empty:
if callable(reta) and not isclass(reta):
yield Crumb(self, root, path, endpoint=True, handler=reta, options=opts(reta))
else:
yield Crumb(self, root, path, handler=reta)
else:
yield Crumb(self, root, path, handler=attr)
del sig, path, reta
continue
elif name == '__call__':
yield Crumb(self, root, None, endpoint=True, handler=obj)
continue
if self.protect and name[0] == '_':
continue
yield Crumb(self, root, name,
endpoint=callable(attr) and not isclass(attr), handler=attr, options=opts(attr)) |
<SYSTEM_TASK:>
For genus-specific targets, MLST and serotyping, determine if the closest refseq genus is known - i.e. if 16S
<END_TASK>
<USER_TASK:>
Description:
def genus_specific(self):
"""
For genus-specific targets, MLST and serotyping, determine if the closest refseq genus is known - i.e. if 16S
analyses have been performed. Perform the analyses if required
""" |
# Initialise a variable to store whether the necessary analyses have already been performed
closestrefseqgenus = False
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
try:
closestrefseqgenus = sample.general.closestrefseqgenus
except AttributeError:
pass
# Perform the 16S analyses as required
if not closestrefseqgenus:
logging.info('Must perform MASH analyses to determine genera of samples')
self.pipeline = True
# Run the analyses
mash.Mash(self, 'mash') |
<SYSTEM_TASK:>
This is a class decorator that declares a class to provide a set of services.
<END_TASK>
<USER_TASK:>
Description:
def service_provider(*services):
"""
This is a class decorator that declares a class to provide a set of services.
It is expected that the class has a no-arg constructor and will be instantiated
as a singleton.
""" |
def real_decorator(clazz):
instance = clazz()
for service in services:
global_lookup.add(service, instance)
return clazz
return real_decorator |
<SYSTEM_TASK:>
Tries to import object from default namespace.
<END_TASK>
<USER_TASK:>
Description:
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
""" |
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs) |
<SYSTEM_TASK:>
Create widgets and store them in self.widgets
<END_TASK>
<USER_TASK:>
Description:
def cree_widgets(self):
"""Create widgets and store them in self.widgets""" |
for t in self.FIELDS:
if type(t) is str:
attr, kwargs = t, {}
else:
attr, kwargs = t[0], t[1].copy()
self.champs.append(attr)
is_editable = kwargs.pop("is_editable", self.is_editable)
args = [self.acces[attr], is_editable]
with_base = kwargs.pop("with_base", False)
if with_base:
args.append(self.acces.base)
if 'with_label' in kwargs:
label = kwargs.pop('with_label')
else:
label = ASSOCIATION[attr][0]
if kwargs:
w = ASSOCIATION[attr][3](*args, **kwargs)
else:
w = ASSOCIATION[attr][3](*args)
self.widgets[attr] = (w, label) |
<SYSTEM_TASK:>
Alternative to create read only widgets. They should be set after.
<END_TASK>
<USER_TASK:>
Description:
def cree_ws_lecture(self, champs_ligne):
"""Alternative to create read only widgets. They should be set after.""" |
for c in champs_ligne:
label = ASSOCIATION[c][0]
w = ASSOCIATION[c][3](self.acces[c], False)
w.setObjectName("champ-lecture-seule-details")
self.widgets[c] = (w, label) |
<SYSTEM_TASK:>
Function for splitting a string into a list of characters, preserving surrogate pairs.
<END_TASK>
<USER_TASK:>
Description:
def preservesurrogates(s):
"""
Function for splitting a string into a list of characters, preserving surrogate pairs.
In python 2, unicode characters above 0x10000 are stored as surrogate pairs. For example, the Unicode character
u"\U0001e900" is stored as the surrogate pair u"\ud83a\udd00":
s = u"AB\U0001e900CD"
len(s) -> 6
list(s) -> [u'A', u'B', u'\ud83a', u'\udd00', u'C', 'D']
len(preservesurrogates(s)) -> 5
list(preservesurrogates(s)) -> [u'A', u'B', u'\U0001e900', u'C', u'D']
:param s: String to split
:return: List of characters
""" |
if not isinstance(s, six.text_type):
raise TypeError(u"String to split must be of type 'unicode'!")
surrogates_regex_str = u"[{0}-{1}][{2}-{3}]".format(HIGH_SURROGATE_START,
HIGH_SURROGATE_END,
LOW_SURROGATE_START,
LOW_SURROGATE_END)
surrogates_regex = re.compile(u"(?:{0})|.".format(surrogates_regex_str))
return surrogates_regex.findall(s) |
<SYSTEM_TASK:>
Helper function for taking a Unicode scalar value and returning a Unicode character.
<END_TASK>
<USER_TASK:>
Description:
def _unichr(i):
"""
Helper function for taking a Unicode scalar value and returning a Unicode character.
:param s: Unicode scalar value to convert.
:return: Unicode character
""" |
if not isinstance(i, int):
raise TypeError
try:
return six.unichr(i)
except ValueError:
# Workaround the error "ValueError: unichr() arg not in range(0x10000) (narrow Python build)"
return struct.pack("i", i).decode("utf-32") |
<SYSTEM_TASK:>
Helper function for looking up the derived name prefix associated with a Unicode scalar value.
<END_TASK>
<USER_TASK:>
Description:
def _get_nr_prefix(i):
"""
Helper function for looking up the derived name prefix associated with a Unicode scalar value.
:param i: Unicode scalar value.
:return: String with the derived name prefix.
""" |
for lookup_range, prefix_string in _nr_prefix_strings.items():
if i in lookup_range:
return prefix_string
raise ValueError("No prefix string associated with {0}!".format(i)) |
<SYSTEM_TASK:>
Function for performing case folding. This function will take the input
<END_TASK>
<USER_TASK:>
Description:
def casefold(s, fullcasefold=True, useturkicmapping=False):
"""
Function for performing case folding. This function will take the input
string s and return a copy of the string suitable for caseless comparisons.
The input string must be of type 'unicode', otherwise a TypeError will be
raised.
For more information on case folding, see section 3.13 of the Unicode Standard.
See also the following FAQ on the Unicode website:
https://unicode.org/faq/casemap_charprop.htm
By default, full case folding (where the string length may change) is done.
It is possible to use simple case folding (single character mappings only)
by setting the boolean parameter fullcasefold=False.
By default, case folding does not handle the Turkic case of dotted vs dotless 'i'.
To perform case folding using the special Turkic mappings, pass the boolean
parameter useturkicmapping=True. For more info on the dotted vs dotless 'i', see
the following web pages:
https://en.wikipedia.org/wiki/Dotted_and_dotless_I
http://www.i18nguy.com/unicode/turkish-i18n.html#problem
:param s: String to transform
:param fullcasefold: Boolean indicating if a full case fold (default is True) should be done. If False, a simple
case fold will be performed.
:param useturkicmapping: Boolean indicating if the special turkic mapping (default is False) for the dotted and
dotless 'i' should be used.
:return: Copy of string that has been transformed for caseless comparison.
""" |
if not isinstance(s, six.text_type):
raise TypeError(u"String to casefold must be of type 'unicode'!")
lookup_order = "CF"
if not fullcasefold:
lookup_order = "CS"
if useturkicmapping:
lookup_order = "T" + lookup_order
return u"".join([casefold_map.lookup(c, lookup_order=lookup_order) for c in preservesurrogates(s)]) |
<SYSTEM_TASK:>
Function to lookup a character in the casefold map.
<END_TASK>
<USER_TASK:>
Description:
def lookup(self, c, lookup_order="CF"):
"""
Function to lookup a character in the casefold map.
The casefold map has four sub-tables, the 'C' or common table, the 'F' or
full table, the 'S' or simple table and the 'T' or the Turkic special
case table. These tables correspond to the statuses defined in the
CaseFolding.txt file. We can specify the order of the tables to use for
performing the lookup by the lookup_order parameter.
Per the usage specified in the CaseFolding.txt file, we can use the 'C'
and 'S' tables for doing a simple case fold. To perform a full case
fold, we can use the 'C' and 'F' tables. The default behavior for this
function is a full case fold (lookup_order="CF").
:param c: character to lookup
:param lookup_order:
""" |
if not isinstance(c, six.text_type):
raise TypeError(u"Character to lookup must be of type 'unicode'!")
for d in lookup_order:
try:
return self._casefold_map[d][c]
except KeyError:
pass
return c |
<SYSTEM_TASK:>
Purge a single fastly url
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
Purge a single fastly url
""" |
parser = OptionParser(description=
"Purge a single url from fastly.")
parser.add_option("-k", "--key", dest="apikey",
default="", help="fastly api key")
parser.add_option("-H", "--host", dest="host",
help="host to purge from")
parser.add_option("-p", "--path", dest="path",
help="path to purge")
(options, args) = parser.parse_args()
for val in options.__dict__.values():
if val is None:
print "Missing required options"
parser.print_help()
sys.exit(1)
client = fastly.connect(options.apikey)
purge = client.purge_url(options.host, options.path)
print purge |
<SYSTEM_TASK:>
Set attributes in ``obj`` with ``setattr`` from the all values in
<END_TASK>
<USER_TASK:>
Description:
def populate(self, obj=None, section=None, parse_types=True):
"""Set attributes in ``obj`` with ``setattr`` from the all values in
``section``.
""" |
section = self.default_section if section is None else section
obj = Settings() if obj is None else obj
is_dict = isinstance(obj, dict)
for k, v in self.get_options(section).items():
if parse_types:
if v == 'None':
v = None
elif self.FLOAT_REGEXP.match(v):
v = float(v)
elif self.INT_REGEXP.match(v):
v = int(v)
elif self.BOOL_REGEXP.match(v):
v = v == 'True'
else:
m = self.EVAL_REGEXP.match(v)
if m:
evalstr = m.group(1)
v = eval(evalstr)
logger.debug('setting {} => {} on {}'.format(k, v, obj))
if is_dict:
obj[k] = v
else:
setattr(obj, k, v)
return obj |
<SYSTEM_TASK:>
Get the last module in the call stack that is not this module or ``None`` if
<END_TASK>
<USER_TASK:>
Description:
def _get_calling_module(self):
"""Get the last module in the call stack that is not this module or ``None`` if
the call originated from this module.
""" |
for frame in inspect.stack():
mod = inspect.getmodule(frame[0])
logger.debug(f'calling module: {mod}')
if mod is not None:
mod_name = mod.__name__
if mod_name != __name__:
return mod |
<SYSTEM_TASK:>
Return a resource based on a file name. This uses the ``pkg_resources``
<END_TASK>
<USER_TASK:>
Description:
def resource_filename(self, resource_name, module_name=None):
"""Return a resource based on a file name. This uses the ``pkg_resources``
package first to find the resources. If it doesn't find it, it returns
a path on the file system.
:param: resource_name the file name of the resource to obtain (or name
if obtained from an installed module)
:param module_name: the name of the module to obtain the data, which
defaults to ``__name__``
:return: a path on the file system or resource of the installed module
""" |
if module_name is None:
mod = self._get_calling_module()
logger.debug(f'calling module: {mod}')
if mod is not None:
mod_name = mod.__name__
if module_name is None:
module_name = __name__
if pkg_resources.resource_exists(mod_name, resource_name):
res = pkg_resources.resource_filename(mod_name, resource_name)
else:
res = resource_name
return Path(res) |
<SYSTEM_TASK:>
Get all options for a section. If ``opt_keys`` is given return
<END_TASK>
<USER_TASK:>
Description:
def get_options(self, section='default', opt_keys=None, vars=None):
"""
Get all options for a section. If ``opt_keys`` is given return
only options with those keys.
""" |
vars = vars if vars else self.default_vars
conf = self.parser
opts = {}
if opt_keys is None:
if conf is None:
opt_keys = {}
else:
if not self.robust or conf.has_section(section):
opt_keys = conf.options(section)
else:
opt_keys = {}
else:
logger.debug('conf: %s' % conf)
copts = conf.options(section) if conf else {}
opt_keys = set(opt_keys).intersection(set(copts))
for option in opt_keys:
logger.debug(f'option: {option}, vars: {vars}')
opts[option] = conf.get(section, option, vars=vars)
return opts |
<SYSTEM_TASK:>
Return an option from ``section`` with ``name``.
<END_TASK>
<USER_TASK:>
Description:
def get_option(self, name, section=None, vars=None, expect=None):
"""Return an option from ``section`` with ``name``.
:param section: section in the ini file to fetch the value; defaults to
constructor's ``default_section``
""" |
vars = vars if vars else self.default_vars
if section is None:
section = self.default_section
opts = self.get_options(section, opt_keys=[name], vars=vars)
if opts:
return opts[name]
else:
if self._narrow_expect(expect):
raise ValueError('no option \'{}\' found in section {}'.
format(name, section)) |
<SYSTEM_TASK:>
Just like ``get_option`` but parse as a list using ``split``.
<END_TASK>
<USER_TASK:>
Description:
def get_option_list(self, name, section=None, vars=None,
expect=None, separator=','):
"""Just like ``get_option`` but parse as a list using ``split``.
""" |
val = self.get_option(name, section, vars, expect)
return val.split(separator) if val else [] |
<SYSTEM_TASK:>
Just like ``get_option`` but parse as an integer.
<END_TASK>
<USER_TASK:>
Description:
def get_option_int(self, name, section=None, vars=None, expect=None):
"""Just like ``get_option`` but parse as an integer.""" |
val = self.get_option(name, section, vars, expect)
if val:
return int(val) |
<SYSTEM_TASK:>
Just like ``get_option`` but parse as a float.
<END_TASK>
<USER_TASK:>
Description:
def get_option_float(self, name, section=None, vars=None, expect=None):
"""Just like ``get_option`` but parse as a float.""" |
val = self.get_option(name, section, vars, expect)
if val:
return float(val) |
<SYSTEM_TASK:>
Wrapper for property reads which auto-dereferences Refs if required.
<END_TASK>
<USER_TASK:>
Description:
def property_get( prop, instance, **kwargs ):
"""Wrapper for property reads which auto-dereferences Refs if required.
prop
A Ref (which gets dereferenced and returned) or any other value (which gets returned).
instance
The context object used to dereference the Ref.
""" |
if isinstance( prop, Ref ):
return prop.get( instance, **kwargs )
return prop |
<SYSTEM_TASK:>
Wrapper for property writes which auto-deferences Refs.
<END_TASK>
<USER_TASK:>
Description:
def property_set( prop, instance, value, **kwargs ):
"""Wrapper for property writes which auto-deferences Refs.
prop
A Ref (which gets dereferenced and the target value set).
instance
The context object used to dereference the Ref.
value
The value to set the property to.
Throws AttributeError if prop is not a Ref.
""" |
if isinstance( prop, Ref ):
return prop.set( instance, value, **kwargs )
raise AttributeError( "can't change value of constant {} (context: {})".format( prop, instance ) ) |
<SYSTEM_TASK:>
Return an attribute from an object using the Ref path.
<END_TASK>
<USER_TASK:>
Description:
def get( self, instance, **kwargs ):
"""Return an attribute from an object using the Ref path.
instance
The object instance to traverse.
""" |
target = instance
for attr in self._path:
target = getattr( target, attr )
return target |
<SYSTEM_TASK:>
Set an attribute on an object using the Ref path.
<END_TASK>
<USER_TASK:>
Description:
def set( self, instance, value, **kwargs ):
"""Set an attribute on an object using the Ref path.
instance
The object instance to traverse.
value
The value to set.
Throws AttributeError if allow_write is False.
""" |
if not self._allow_write:
raise AttributeError( "can't set Ref directly, allow_write is disabled" )
target = instance
for attr in self._path[:-1]:
target = getattr( target, attr )
setattr( target, self._path[-1], value )
return |
<SYSTEM_TASK:>
Traverses the tree again and again until it is not modified.
<END_TASK>
<USER_TASK:>
Description:
def traverse_until_fixpoint(predicate, tree):
"""Traverses the tree again and again until it is not modified.""" |
old_tree = None
tree = simplify(tree)
while tree and old_tree != tree:
old_tree = tree
tree = tree.traverse(predicate)
if not tree:
return None
tree = simplify(tree)
return tree |
<SYSTEM_TASK:>
Create FASTA files of the PointFinder results to be fed into PointFinder
<END_TASK>
<USER_TASK:>
Description:
def fasta(self):
"""
Create FASTA files of the PointFinder results to be fed into PointFinder
""" |
logging.info('Extracting FASTA sequences matching PointFinder database')
for sample in self.runmetadata.samples:
# Ensure that there are sequence data to extract from the GenObject
if GenObject.isattr(sample[self.analysistype], 'sequences'):
# Set the name of the FASTA file
sample[self.analysistype].pointfinderfasta = \
os.path.join(sample[self.analysistype].outputdir,
'{seqid}_pointfinder.fasta'.format(seqid=sample.name))
# Create a list to store all the SeqRecords created
sequences = list()
with open(sample[self.analysistype].pointfinderfasta, 'w') as fasta:
for gene, sequence in sample[self.analysistype].sequences.items():
# Create a SeqRecord using a Seq() of the sequence - both SeqRecord and Seq are from BioPython
seq = SeqRecord(seq=Seq(sequence),
id=gene,
name=str(),
description=str())
sequences.append(seq)
# Write all the SeqRecords to file
SeqIO.write(sequences, fasta, 'fasta') |
<SYSTEM_TASK:>
Run PointFinder on the FASTA sequences extracted from the raw reads
<END_TASK>
<USER_TASK:>
Description:
def run_pointfinder(self):
"""
Run PointFinder on the FASTA sequences extracted from the raw reads
""" |
logging.info('Running PointFinder on FASTA files')
for i in range(len(self.runmetadata.samples)):
# Start threads
threads = Thread(target=self.pointfinder_threads, args=())
# Set the daemon to True - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# PointFinder requires the path to the blastn executable
blast_path = shutil.which('blastn')
for sample in self.runmetadata.samples:
# Ensure that the attribute storing the name of the FASTA file has been created
if GenObject.isattr(sample[self.analysistype], 'pointfinderfasta'):
sample[self.analysistype].pointfinder_outputs = os.path.join(sample[self.analysistype].outputdir,
'pointfinder_outputs')
# Don't run the analyses if the outputs have already been created
if not os.path.isfile(os.path.join(sample[self.analysistype].pointfinder_outputs,
'{samplename}_blastn_results.tsv'.format(samplename=sample.name))):
make_path(sample[self.analysistype].pointfinder_outputs)
# Create and run the PointFinder system call
pointfinder_cmd = \
'python -m pointfinder.PointFinder -i {input} -s {species} -p {db_path} -m blastn ' \
'-o {output_dir} -m_p {blast_path}'\
.format(input=sample[self.analysistype].pointfinderfasta,
species=sample[self.analysistype].pointfindergenus,
db_path=self.targetpath,
output_dir=sample[self.analysistype].pointfinder_outputs,
blast_path=blast_path)
self.queue.put(pointfinder_cmd)
self.queue.join() |
<SYSTEM_TASK:>
Create summary reports for the PointFinder outputs
<END_TASK>
<USER_TASK:>
Description:
def parse_pointfinder(self):
"""
Create summary reports for the PointFinder outputs
""" |
# Create the nested dictionary that stores the necessary values for creating summary reports
self.populate_summary_dict()
# Clear out any previous reports
for organism in self.summary_dict:
for report in self.summary_dict[organism]:
try:
os.remove(self.summary_dict[organism][report]['summary'])
except FileNotFoundError:
pass
for sample in self.runmetadata.samples:
# Find the PointFinder outputs. If the outputs don't exist, create the appropriate entries in the
# summary dictionary as required
try:
self.summary_dict[sample.general.referencegenus]['prediction']['output'] = \
glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*prediction.txt'
.format(seq=sample.name)))[0]
except IndexError:
try:
self.summary_dict[sample.general.referencegenus]['prediction']['output'] = str()
except KeyError:
self.populate_summary_dict(genus=sample.general.referencegenus,
key='prediction')
try:
self.summary_dict[sample.general.referencegenus]['table']['output'] = \
glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*table.txt'
.format(seq=sample.name)))[0]
except IndexError:
try:
self.summary_dict[sample.general.referencegenus]['table']['output'] = str()
except KeyError:
self.populate_summary_dict(genus=sample.general.referencegenus,
key='table')
try:
self.summary_dict[sample.general.referencegenus]['results']['output'] = \
glob(os.path.join(sample[self.analysistype].pointfinder_outputs, '{seq}*results.tsv'
.format(seq=sample.name)))[0]
except IndexError:
try:
self.summary_dict[sample.general.referencegenus]['results']['output'] = str()
except KeyError:
self.populate_summary_dict(genus=sample.general.referencegenus,
key='results')
# Process the predictions
self.write_report(summary_dict=self.summary_dict,
seqid=sample.name,
genus=sample.general.referencegenus,
key='prediction')
# Process the results summary
self.write_report(summary_dict=self.summary_dict,
seqid=sample.name,
genus=sample.general.referencegenus,
key='results')
# Process the table summary
self.write_table_report(summary_dict=self.summary_dict,
seqid=sample.name,
genus=sample.general.referencegenus) |
<SYSTEM_TASK:>
Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
<END_TASK>
<USER_TASK:>
Description:
def sequence_prep(self):
"""
Create metadata objects for all PacBio assembly FASTA files in the sequencepath.
Create individual subdirectories for each sample.
Relative symlink the original FASTA file to the appropriate subdirectory
""" |
# Create a sorted list of all the FASTA files in the sequence path
strains = sorted(glob(os.path.join(self.fastapath, '*.fa*'.format(self.fastapath))))
for sample in strains:
# Create the object
metadata = MetadataObject()
# Set the sample name to be the file name of the sequence by removing the path and file extension
sample_name = os.path.splitext(os.path.basename(sample))[0]
if sample_name in self.strainset:
# Extract the OLNID from the dictionary using the SEQID
samplename = self.straindict[sample_name]
# samplename = sample_name
# Set and create the output directory
outputdir = os.path.join(self.path, samplename)
make_path(outputdir)
# Set the name of the JSON file
json_metadata = os.path.join(outputdir, '{name}.json'.format(name=samplename))
if not os.path.isfile(json_metadata):
# Create the name and output directory attributes
metadata.name = samplename
metadata.seqid = sample_name
metadata.outputdir = outputdir
metadata.jsonfile = json_metadata
# Set the name of the FASTA file to use in the analyses
metadata.bestassemblyfile = os.path.join(metadata.outputdir,
'{name}.fasta'.format(name=metadata.name))
# Symlink the original file to the output directory
relative_symlink(sample, outputdir, '{sn}.fasta'.format(sn=metadata.name))
# Associate the corresponding FASTQ files with the assembly
metadata.fastqfiles = sorted(glob(os.path.join(self.fastqpath,
'{name}*.gz'.format(name=metadata.name))))
metadata.forward_fastq, metadata.reverse_fastq = metadata.fastqfiles
# Write the object to file
self.write_json(metadata)
else:
metadata = self.read_json(json_metadata)
# Add the metadata object to the list of objects
self.metadata.append(metadata) |
<SYSTEM_TASK:>
Use SeqIO.parse to extract the total number of bases in each assembly file
<END_TASK>
<USER_TASK:>
Description:
def assembly_length(self):
"""
Use SeqIO.parse to extract the total number of bases in each assembly file
""" |
for sample in self.metadata:
# Only determine the assembly length if is has not been previously calculated
if not GenObject.isattr(sample, 'assembly_length'):
# Create the assembly_length attribute, and set it to 0
sample.assembly_length = 0
for record in SeqIO.parse(sample.bestassemblyfile, 'fasta'):
# Update the assembly_length attribute with the length of the current contig
sample.assembly_length += len(record.seq)
# Write the updated object to file
self.write_json(sample) |
<SYSTEM_TASK:>
For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse
<END_TASK>
<USER_TASK:>
Description:
def sample_reads(self):
"""
For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse
lengths and sequencing depths using reformat.sh from the bbtools suite
""" |
logging.info('Read sampling')
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Set the name of the output directory
sample.sampled_reads[depth][read_pair].sampled_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir, 'sampled')
# Set the name of the forward reads - include the depth and read length information
sample.sampled_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
logging.info(
'Sampling {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.sampled_reads[depth][read_pair].forward_reads.length,
rl=sample.sampled_reads[depth][read_pair].reverse_reads.length))
# Use the reformat method in the OLCTools bbtools wrapper
# Note that upsample=t is used to ensure that the target number of reads (samplereadstarget) is met
if not os.path.isfile(sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_call = bbtools \
.reformat_reads(forward_in=sample.sampled_reads[depth][read_pair].trimmed_forwardfastq,
reverse_in=sample.sampled_reads[depth][read_pair].trimmed_reversefastq,
forward_out=sample.sampled_reads[depth][read_pair].forward_reads.fastq,
reverse_out=sample.sampled_reads[depth][read_pair].reverse_reads.fastq,
returncmd=True,
**{'samplereadstarget': sample.simulated_reads[depth][read_pair].num_reads,
'upsample': 't',
'minlength':
sample.sampled_reads[depth][read_pair].forward_reads.length,
'ziplevel': '9',
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the trimmed reads, as they are no longer necessary
# try:
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq)
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_reversefastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample) |
<SYSTEM_TASK:>
Run GeneSippr on each of the samples
<END_TASK>
<USER_TASK:>
Description:
def run_genesippr(self):
"""
Run GeneSippr on each of the samples
""" |
from pathlib import Path
home = str(Path.home())
logging.info('GeneSippr')
# These unfortunate hard coded paths appear to be necessary
miniconda_path = os.path.join(home, 'miniconda3')
miniconda_path = miniconda_path if os.path.isdir(miniconda_path) else os.path.join(home, 'miniconda')
logging.debug(miniconda_path)
activate = 'source {mp}/bin/activate {mp}/envs/sipprverse'.format(mp=miniconda_path)
sippr_path = '{mp}/envs/sipprverse/bin/sippr.py'.format(mp=miniconda_path)
for sample in self.metadata:
logging.info(sample.name)
# Run the pipeline. Check to make sure that the serosippr report, which is created last doesn't exist
if not os.path.isfile(os.path.join(sample.genesippr_dir, 'reports', 'genesippr.csv')):
cmd = 'python {py_path} -o {outpath} -s {seqpath} -r {refpath} -F'\
.format(py_path=sippr_path,
outpath=sample.genesippr_dir,
seqpath=sample.genesippr_dir,
refpath=self.referencefilepath
)
logging.critical(cmd)
# Create another shell script to execute within the PlasmidExtractor conda environment
template = "#!/bin/bash\n{activate} && {cmd}".format(activate=activate,
cmd=cmd)
genesippr_script = os.path.join(sample.genesippr_dir, 'run_genesippr.sh')
with open(genesippr_script, 'w+') as file:
file.write(template)
# Modify the permissions of the script to allow it to be run on the node
self.make_executable(genesippr_script)
# Run shell script
os.system('/bin/bash {}'.format(genesippr_script)) |
<SYSTEM_TASK:>
Set the logging level of this logger.
<END_TASK>
<USER_TASK:>
Description:
def set_level(self, level):
"""
Set the logging level of this logger.
:param level: must be an int or a str.
""" |
for handler in self.__coloredlogs_handlers:
handler.setLevel(level=level)
self.logger.setLevel(level=level) |
<SYSTEM_TASK:>
Redirect sys.stdout to file-like object.
<END_TASK>
<USER_TASK:>
Description:
def redirect_stdout(self, enabled=True, log_level=logging.INFO):
"""
Redirect sys.stdout to file-like object.
""" |
if enabled:
if self.__stdout_wrapper:
self.__stdout_wrapper.update_log_level(log_level=log_level)
else:
self.__stdout_wrapper = StdOutWrapper(logger=self, log_level=log_level)
self.__stdout_stream = self.__stdout_wrapper
else:
self.__stdout_stream = _original_stdout
# Assign the new stream to sys.stdout
sys.stdout = self.__stdout_stream |
<SYSTEM_TASK:>
Handler for logging to a file, rotating the log file at certain timed intervals.
<END_TASK>
<USER_TASK:>
Description:
def use_file(self, enabled=True,
file_name=None,
level=logging.WARNING,
when='d',
interval=1,
backup_count=30,
delay=False,
utc=False,
at_time=None,
log_format=None,
date_format=None):
"""
Handler for logging to a file, rotating the log file at certain timed intervals.
""" |
if enabled:
if not self.__file_handler:
assert file_name, 'File name is missing!'
# Create new TimedRotatingFileHandler instance
kwargs = {
'filename': file_name,
'when': when,
'interval': interval,
'backupCount': backup_count,
'encoding': 'UTF-8',
'delay': delay,
'utc': utc,
}
if sys.version_info[0] >= 3:
kwargs['atTime'] = at_time
self.__file_handler = TimedRotatingFileHandler(**kwargs)
# Use this format for default case
if not log_format:
log_format = '%(asctime)s %(name)s[%(process)d] ' \
'%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' \
'%(levelname)s %(message)s'
# Set formatter
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
self.__file_handler.setFormatter(fmt=formatter)
# Set level for this handler
self.__file_handler.setLevel(level=level)
# Add this handler to logger
self.add_handler(hdlr=self.__file_handler)
elif self.__file_handler:
# Remove handler from logger
self.remove_handler(hdlr=self.__file_handler)
self.__file_handler = None |
<SYSTEM_TASK:>
Enable handler for sending the record to Loggly service.
<END_TASK>
<USER_TASK:>
Description:
def use_loggly(self, enabled=True,
loggly_token=None,
loggly_tag=None,
level=logging.WARNING,
log_format=None,
date_format=None):
"""
Enable handler for sending the record to Loggly service.
""" |
if enabled:
if not self.__loggly_handler:
assert loggly_token, 'Loggly token is missing!'
# Use logger name for default Loggly tag
if not loggly_tag:
loggly_tag = self.name
# Create new LogglyHandler instance
self.__loggly_handler = LogglyHandler(token=loggly_token, tag=loggly_tag)
# Use this format for default case
if not log_format:
log_format = '{"name":"%(name)s","process":"%(process)d",' \
'"levelname":"%(levelname)s","time":"%(asctime)s",' \
'"filename":"%(filename)s","programname":"%(programname)s",' \
'"module":"%(module)s","funcName":"%(funcName)s",' \
'"lineno":"%(lineno)d","message":"%(message)s"}'
# Set formatter
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
self.__loggly_handler.setFormatter(fmt=formatter)
# Set level for this handler
self.__loggly_handler.setLevel(level=level)
# Add this handler to logger
self.add_handler(hdlr=self.__loggly_handler)
elif self.__loggly_handler:
# Remove handler from logger
self.remove_handler(hdlr=self.__loggly_handler)
self.__loggly_handler = None |
<SYSTEM_TASK:>
Log 'msg % args' with the integer severity 'level'.
<END_TASK>
<USER_TASK:>
Description:
def _log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
""" |
if not isinstance(level, int):
if logging.raiseExceptions:
raise TypeError('Level must be an integer!')
else:
return
if self.logger.isEnabledFor(level=level):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
exc_info = kwargs.get('exc_info', None)
extra = kwargs.get('extra', None)
stack_info = kwargs.get('stack_info', False)
record_filter = kwargs.get('record_filter', None)
tb_info = None
if _logone_src:
# IronPython doesn't track Python frames, so findCaller raises an
# exception on some versions of IronPython. We trap it here so that
# IronPython can use logging.
try:
fn, lno, func, tb_info = self.__find_caller(stack_info=stack_info)
except ValueError: # pragma: no cover
fn, lno, func = '(unknown file)', 0, '(unknown function)'
else: # pragma: no cover
fn, lno, func = '(unknown file)', 0, '(unknown function)'
if exc_info:
if sys.version_info[0] >= 3:
if isinstance(exc_info, BaseException):
# noinspection PyUnresolvedReferences
exc_info = type(exc_info), exc_info, exc_info.__traceback__
elif not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
else:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if sys.version_info[0] >= 3:
# noinspection PyArgumentList
record = self.logger.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, tb_info)
else:
record = self.logger.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra)
if record_filter:
record = record_filter(record)
self.logger.handle(record=record) |
<SYSTEM_TASK:>
Flush the buffer, if applicable.
<END_TASK>
<USER_TASK:>
Description:
def flush(self):
"""
Flush the buffer, if applicable.
""" |
if self.__buffer.tell() > 0:
# Write the buffer to log
# noinspection PyProtectedMember
self.__logger._log(level=self.__log_level, msg=self.__buffer.getvalue().strip(),
record_filter=StdErrWrapper.__filter_record)
# Remove the old buffer
self.__buffer.truncate(0)
self.__buffer.seek(0) |
<SYSTEM_TASK:>
Uses contextstring if request_id is set, otherwise default.
<END_TASK>
<USER_TASK:>
Description:
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default.""" |
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record) |
<SYSTEM_TASK:>
Update the display of button after querying data from interface
<END_TASK>
<USER_TASK:>
Description:
def _update(self):
"""Update the display of button after querying data from interface""" |
self.clear()
self._set_boutons_communs()
if self.interface:
self.addSeparator()
l_actions = self.interface.get_actions_toolbar()
self._set_boutons_interface(l_actions) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.