sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1])) | Builds a list of valid fields | entailment |
def _clean_doc(self, doc, namespace, timestamp):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
- inserts namespace and timestamp metadata into the document in order
to handle rollbacks
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
if '_id' in doc:
doc[self.unique_key] = u(doc.pop("_id"))
# Update namespace and timestamp metadata
if 'ns' in doc or '_ts' in doc:
raise errors.OperationFailed(
'Need to set "ns" and "_ts" fields, but these fields already '
'exist in the document %r!' % doc)
doc['ns'] = namespace
doc['_ts'] = timestamp
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc | Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
- inserts namespace and timestamp metadata into the document in order
to handle rollbacks
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8} | entailment |
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
if not '$set' in update_spec and not '$unset' in update_spec:
# update_spec contains the new document.
# Update the key in Solr based on the unique_key mentioned as
# parameter.
update_spec['_id'] = doc[self.unique_key]
return update_spec
for to_set in update_spec.get("$set", []):
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc | Override DocManagerBase.apply_update to have flat documents. | entailment |
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
# Commit outstanding changes so that the document to be updated is the
# same version to which the changes apply.
self.commit()
# Need to escape special characters in the document_id.
document_id = ''.join(map(
lambda c: '\\' + c if c in ESCAPE_CHARACTERS else c,
u(document_id)
))
query = "%s:%s" % (self.unique_key, document_id)
results = self.solr.search(query)
if not len(results):
# Document may not be retrievable yet
self.commit()
results = self.solr.search(query)
# Results is an iterable containing only 1 result
for doc in results:
# Remove metadata previously stored by Mongo Connector.
doc.pop('ns')
doc.pop('_ts')
updated = self.apply_update(doc, update_spec)
# A _version_ of 0 will always apply the update
updated['_version_'] = 0
self.upsert(updated, namespace, timestamp)
return updated | Apply updates given in update_spec to the document whose id
matches that of doc. | entailment |
def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc, namespace, timestamp)],
commit=(self.auto_commit_interval == 0),
commitWithin=u(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc, namespace, timestamp)],
commit=False) | Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary. | entailment |
def bulk_upsert(self, docs, namespace, timestamp):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d, namespace, timestamp) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs) | Update or insert multiple documents into Solr
docs may be any iterable | entailment |
def remove(self, document_id, namespace, timestamp):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=u(document_id),
commit=(self.auto_commit_interval == 0)) | Removes documents from Solr
The input is a python dictionary that represents a mongo document. | entailment |
def _stream_search(self, query):
"""Helper method for iterating over Solr search results."""
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc | Helper method for iterating over Solr search results. | entailment |
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range."""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self._stream_search(query) | Called to query Solr for documents in a time range. | entailment |
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
for r in result:
r['_id'] = r.pop(self.unique_key)
return r | Returns the last document stored in the Solr engine. | entailment |
def pbkdf2_single(password, salt, key_length, prf):
'''Returns the result of the Password-Based Key Derivation Function 2 with
a single iteration (i.e. count = 1).
prf - a psuedorandom function
See http://en.wikipedia.org/wiki/PBKDF2
'''
block_number = 0
result = b''
# The iterations
while len(result) < key_length:
block_number += 1
result += prf(password, salt + struct.pack('>L', block_number))
return result[:key_length] | Returns the result of the Password-Based Key Derivation Function 2 with
a single iteration (i.e. count = 1).
prf - a psuedorandom function
See http://en.wikipedia.org/wiki/PBKDF2 | entailment |
def salsa20_8(B):
'''Salsa 20/8 stream cypher; Used by BlockMix. See http://en.wikipedia.org/wiki/Salsa20'''
# Create a working copy
x = B[:]
# Expanded form of this code. The expansion is significantly faster but
# this is much easier to understand
# ROUNDS = (
# (4, 0, 12, 7), (8, 4, 0, 9), (12, 8, 4, 13), (0, 12, 8, 18),
# (9, 5, 1, 7), (13, 9, 5, 9), (1, 13, 9, 13), (5, 1, 13, 18),
# (14, 10, 6, 7), (2, 14, 10, 9), (6, 2, 14, 13), (10, 6, 2, 18),
# (3, 15, 11, 7), (7, 3, 15, 9), (11, 7, 3, 13), (15, 11, 7, 18),
# (1, 0, 3, 7), (2, 1, 0, 9), (3, 2, 1, 13), (0, 3, 2, 18),
# (6, 5, 4, 7), (7, 6, 5, 9), (4, 7, 6, 13), (5, 4, 7, 18),
# (11, 10, 9, 7), (8, 11, 10, 9), (9, 8, 11, 13), (10, 9, 8, 18),
# (12, 15, 14, 7), (13, 12, 15, 9), (14, 13, 12, 13), (15, 14, 13, 18),
# )
#
# for (destination, a1, a2, b) in ROUNDS:
# a = (x[a1] + x[a2]) & 0xffffffff
# x[destination] ^= ((a << b) | (a >> (32 - b))) & 0xffffffff
for i in (8, 6, 4, 2):
a = (x[0] + x[12]) & 0xffffffff
x[4] ^= ((a << 7) | (a >> 25))
a = (x[4] + x[0]) & 0xffffffff
x[8] ^= ((a << 9) | (a >> 23))
a = (x[8] + x[4]) & 0xffffffff
x[12] ^= ((a << 13) | (a >> 19))
a = (x[12] + x[8]) & 0xffffffff
x[0] ^= ((a << 18) | (a >> 14))
a = (x[5] + x[1]) & 0xffffffff
x[9] ^= ((a << 7) | (a >> 25))
a = (x[9] + x[5]) & 0xffffffff
x[13] ^= ((a << 9) | (a >> 23))
a = (x[13] + x[9]) & 0xffffffff
x[1] ^= ((a << 13) | (a >> 19))
a = (x[1] + x[13]) & 0xffffffff
x[5] ^= ((a << 18) | (a >> 14))
a = (x[10] + x[6]) & 0xffffffff
x[14] ^= ((a << 7) | (a >> 25))
a = (x[14] + x[10]) & 0xffffffff
x[2] ^= ((a << 9) | (a >> 23))
a = (x[2] + x[14]) & 0xffffffff
x[6] ^= ((a << 13) | (a >> 19))
a = (x[6] + x[2]) & 0xffffffff
x[10] ^= ((a << 18) | (a >> 14))
a = (x[15] + x[11]) & 0xffffffff
x[3] ^= ((a << 7) | (a >> 25))
a = (x[3] + x[15]) & 0xffffffff
x[7] ^= ((a << 9) | (a >> 23))
a = (x[7] + x[3]) & 0xffffffff
x[11] ^= ((a << 13) | (a >> 19))
a = (x[11] + x[7]) & 0xffffffff
x[15] ^= ((a << 18) | (a >> 14))
a = (x[0] + x[3]) & 0xffffffff
x[1] ^= ((a << 7) | (a >> 25))
a = (x[1] + x[0]) & 0xffffffff
x[2] ^= ((a << 9) | (a >> 23))
a = (x[2] + x[1]) & 0xffffffff
x[3] ^= ((a << 13) | (a >> 19))
a = (x[3] + x[2]) & 0xffffffff
x[0] ^= ((a << 18) | (a >> 14))
a = (x[5] + x[4]) & 0xffffffff
x[6] ^= ((a << 7) | (a >> 25))
a = (x[6] + x[5]) & 0xffffffff
x[7] ^= ((a << 9) | (a >> 23))
a = (x[7] + x[6]) & 0xffffffff
x[4] ^= ((a << 13) | (a >> 19))
a = (x[4] + x[7]) & 0xffffffff
x[5] ^= ((a << 18) | (a >> 14))
a = (x[10] + x[9]) & 0xffffffff
x[11] ^= ((a << 7) | (a >> 25))
a = (x[11] + x[10]) & 0xffffffff
x[8] ^= ((a << 9) | (a >> 23))
a = (x[8] + x[11]) & 0xffffffff
x[9] ^= ((a << 13) | (a >> 19))
a = (x[9] + x[8]) & 0xffffffff
x[10] ^= ((a << 18) | (a >> 14))
a = (x[15] + x[14]) & 0xffffffff
x[12] ^= ((a << 7) | (a >> 25))
a = (x[12] + x[15]) & 0xffffffff
x[13] ^= ((a << 9) | (a >> 23))
a = (x[13] + x[12]) & 0xffffffff
x[14] ^= ((a << 13) | (a >> 19))
a = (x[14] + x[13]) & 0xffffffff
x[15] ^= ((a << 18) | (a >> 14))
# Add the original values
for i in xrange(0, 16):
B[i] = (B[i] + x[i]) & 0xffffffff | Salsa 20/8 stream cypher; Used by BlockMix. See http://en.wikipedia.org/wiki/Salsa20 | entailment |
def blockmix_salsa8(BY, Yi, r):
'''Blockmix; Used by SMix.'''
start = (2 * r - 1) * 16
X = BY[start:start + 16] # BlockMix - 1
for i in xrange(0, 2 * r): # BlockMix - 2
for xi in xrange(0, 16): # BlockMix - 3(inner)
X[xi] ^= BY[i * 16 + xi]
salsa20_8(X) # BlockMix - 3(outer)
aod = Yi + i * 16 # BlockMix - 4
BY[aod:aod + 16] = X[:16]
for i in xrange(0, r): # BlockMix - 6 (and below)
aos = Yi + i * 32
aod = i * 16
BY[aod:aod + 16] = BY[aos:aos + 16]
for i in xrange(0, r):
aos = Yi + (i * 2 + 1) * 16
aod = (i + r) * 16
BY[aod:aod + 16] = BY[aos:aos + 16] | Blockmix; Used by SMix. | entailment |
def smix(B, Bi, r, N, V, X):
'''SMix; a specific case of ROMix. See scrypt.pdf in the links above.'''
X[:32 * r] = B[Bi:Bi + 32 * r] # ROMix - 1
for i in xrange(0, N): # ROMix - 2
aod = i * 32 * r # ROMix - 3
V[aod:aod + 32 * r] = X[:32 * r]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(0, N): # ROMix - 6
j = X[(2 * r - 1) * 16] & (N - 1) # ROMix - 7
for xi in xrange(0, 32 * r): # ROMix - 8(inner)
X[xi] ^= V[j * 32 * r + xi]
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:Bi + 32 * r] = X[:32 * r] | SMix; a specific case of ROMix. See scrypt.pdf in the links above. | entailment |
def hash(password, salt, N, r, p, dkLen):
"""Returns the result of the scrypt password-based key derivation function.
Constraints:
r * p < (2 ** 30)
dkLen <= (((2 ** 32) - 1) * 32
N must be a power of 2 greater than 1 (eg. 2, 4, 8, 16, 32...)
N, r, p must be positive
"""
# This only matters to Python 3
if not check_bytes(password):
raise ValueError('password must be a byte array')
if not check_bytes(salt):
raise ValueError('salt must be a byte array')
# Scrypt implementation. Significant thanks to https://github.com/wg/scrypt
if N < 2 or (N & (N - 1)): raise ValueError('Scrypt N must be a power of 2 greater than 1')
# A psuedorandom function
prf = lambda k, m: hmac.new(key = k, msg = m, digestmod = hashlib.sha256).digest()
# convert into integers
B = [ get_byte(c) for c in pbkdf2_single(password, salt, p * 128 * r, prf) ]
B = [ ((B[i + 3] << 24) | (B[i + 2] << 16) | (B[i + 1] << 8) | B[i + 0]) for i in xrange(0, len(B), 4)]
XY = [ 0 ] * (64 * r)
V = [ 0 ] * (32 * r * N)
for i in xrange(0, p):
smix(B, i * 32 * r, r, N, V, XY)
# Convert back into bytes
Bc = [ ]
for i in B:
Bc.append((i >> 0) & 0xff)
Bc.append((i >> 8) & 0xff)
Bc.append((i >> 16) & 0xff)
Bc.append((i >> 24) & 0xff)
return pbkdf2_single(password, chars_to_bytes(Bc), dkLen, prf) | Returns the result of the scrypt password-based key derivation function.
Constraints:
r * p < (2 ** 30)
dkLen <= (((2 ** 32) - 1) * 32
N must be a power of 2 greater than 1 (eg. 2, 4, 8, 16, 32...)
N, r, p must be positive | entailment |
def _load_get_attr(self, name):
'Return an internal attribute after ensuring the headers is loaded if necessary.'
if self._mode in _allowed_read and self._N is None:
self._read_header()
return getattr(self, name) | Return an internal attribute after ensuring the headers is loaded if necessary. | entailment |
def close(self):
'''Close the underlying file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.'''
if self._mode in _allowed_write and self._valid is None:
self._finalize_write()
result = self._fp.close()
self._closed = True
return result | Close the underlying file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing. | entailment |
def verify_file(fp, password):
'Returns whether a scrypt encrypted file is valid.'
sf = ScryptFile(fp = fp, password = password)
for line in sf: pass
sf.close()
return sf.valid | Returns whether a scrypt encrypted file is valid. | entailment |
def readline(self, size = None):
'''Next line from the decrypted file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.'''
if self.closed: raise ValueError('file closed')
if self._mode in _allowed_write:
raise Exception('file opened for write only')
if self._read_finished: return None
line = b''
while not line.endswith(b'\n') and not self._read_finished and (size is None or len(line) <= size):
line += self.read(1)
return line | Next line from the decrypted file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF. | entailment |
def _read_header(self):
'''Read and parse the header and calculate derived keys.'''
try:
# Read the entire header
header = self._fp.read(96)
if len(header) != 96:
raise InvalidScryptFileFormat("Incomplete header")
# Magic number
if header[0:6] != b'scrypt':
raise InvalidScryptFileFormat('Invalid magic number").')
# Version (we only support 0)
version = get_byte(header[6])
if version != 0:
raise InvalidScryptFileFormat('Unsupported version (%d)' % version)
# Scrypt parameters
self._N = 1 << get_byte(header[7])
(self._r, self._p) = struct.unpack('>II', header[8:16])
self._salt = header[16:48]
# Generate the key
self._key = hash(self._password, self._salt, self._N, self._r, self._p, 64)
# Header Checksum
checksum = header[48:64]
calculate_checksum = hashlib.sha256(header[0:48]).digest()[:16]
if checksum != calculate_checksum:
raise InvalidScryptFileFormat('Incorrect header checksum')
# Stream checksum
checksum = header[64:96]
self._checksumer = hmac.new(self.key[32:], msg = header[0:64], digestmod = hashlib.sha256)
if checksum != self._checksumer.digest():
raise InvalidScryptFileFormat('Incorrect header stream checksum')
self._checksumer.update(header[64:96])
# Prepare the AES engine
self._crypto = aesctr.AESCounterModeOfOperation(key = self.key[:32])
self._done_header = True
except InvalidScryptFileFormat as e:
self.close()
raise e
except Exception as e:
self.close()
raise InvalidScryptFileFormat('Header error (%s)' % e) | Read and parse the header and calculate derived keys. | entailment |
def read(self, size = None):
'''Read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given.'''
if self.closed: raise ValueError('File closed')
if self._mode in _allowed_write:
raise Exception('File opened for write only')
if not self._done_header:
self._read_header()
# The encrypted file has been entirely read, so return as much as they want
# and remove the returned portion from the decrypted buffer
if self._read_finished:
if size is None:
decrypted = self._decrypted_buffer
else:
decrypted = self._decrypted_buffer[:size]
self._decrypted_buffer = self._decrypted[len(decrypted):]
return decrypted
# Read everything in one chunk
if size is None or size < 0:
self._encrypted_buffer = self._fp.read()
self._read_finished = True
else:
# We fill the encrypted buffer (keeping it with a minimum of 32 bytes in case of the
# end-of-file checksum) and decrypt into a decrypted buffer 1 block at a time
while not self._read_finished:
# We have enough decrypted bytes (or will after decrypting the encrypted buffer)
available = len(self._decrypted_buffer) + len(self._encrypted_buffer) - 32
if available >= size: break
# Read a little extra for the possible final checksum
data = self._fp.read(BLOCK_SIZE)
# No data left; we're done
if not data:
self._read_finished = True
break
self._encrypted_buffer += data
# Decrypt as much of the encrypted data as possible (leaving the final check sum)
safe = self._encrypted_buffer[:-32]
self._encrypted_buffer = self._encrypted_buffer[-32:]
self._decrypted_buffer += self._crypto.decrypt(safe)
self._checksumer.update(safe)
# We read all the bytes, only the checksum remains
if self._read_finished:
self._check_final_checksum(self._encrypted_buffer)
# Send back the number of bytes requests and remove them from the buffer
decrypted = self._decrypted_buffer[:size]
self._decrypted_buffer = self._decrypted_buffer[size:]
return decrypted | Read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given. | entailment |
def _write_header(self):
'Writes the header to the underlying file object.'
header = b'scrypt' + CHR0 + struct.pack('>BII', int(math.log(self.N, 2)), self.r, self.p) + self.salt
# Add the header checksum to the header
checksum = hashlib.sha256(header).digest()[:16]
header += checksum
# Add the header stream checksum
self._checksumer = hmac.new(self.key[32:], msg = header, digestmod = hashlib.sha256)
checksum = self._checksumer.digest()
header += checksum
self._checksumer.update(checksum)
# Write the header
self._fp.write(header)
# Prepare the AES engine
self._crypto = aesctr.AESCounterModeOfOperation(key = self.key[:32])
#self._crypto = aes(self.key[:32])
self._done_header = True | Writes the header to the underlying file object. | entailment |
def _finalize_write(self):
'Finishes any unencrypted bytes and writes the final checksum.'
# Make sure we have written the header
if not self._done_header:
self._write_header()
# Write the remaining decrypted part to disk
block = self._crypto.encrypt(self._decrypted_buffer)
self._decrypted = ''
self._fp.write(block)
self._checksumer.update(block)
# Write the final checksum
self._fp.write(self._checksumer.digest())
self._valid = True | Finishes any unencrypted bytes and writes the final checksum. | entailment |
def write(self, str):
'''Write string str to the underlying file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written.'''
if self.closed: raise ValueError('File closed')
if self._mode in _allowed_read:
raise Exception('File opened for read only')
if self._valid is not None:
raise Exception('file already finalized')
if not self._done_header:
self._write_header()
# Encrypt and write the data
encrypted = self._crypto.encrypt(str)
self._checksumer.update(encrypted)
self._fp.write(encrypted) | Write string str to the underlying file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written. | entailment |
def get_logger(logger_name):
"""
Return a logger with the specified name, creating it if necessary.
"""
# Use default global logger
if logger_name is None:
return __instance
assert isinstance(logger_name, str), 'Logger name must be a string!'
with __lock:
if logger_name in __loggers:
return __loggers[logger_name]
logger_instance = LogOne(logger_name=logger_name)
__loggers[logger_name] = logger_instance
return logger_instance | Return a logger with the specified name, creating it if necessary. | entailment |
def removeFile(file):
"""remove a file"""
if "y" in speech.question("Are you sure you want to remove " + file + "? (Y/N): "):
speech.speak("Removing " + file + " with the 'rm' command.")
subprocess.call(["rm", "-r", file])
else:
speech.speak("Okay, I won't remove " + file + ".") | remove a file | entailment |
def copy(location):
"""copy file or directory at a given location; can be pasted later"""
copyData = settings.getDataFile()
copyFileLocation = os.path.abspath(location)
copy = {"copyLocation": copyFileLocation}
dataFile = open(copyData, "wb")
pickle.dump(copy, dataFile)
speech.speak(location + " copied successfully!")
speech.speak("Tip: use 'hallie paste' to paste this file.") | copy file or directory at a given location; can be pasted later | entailment |
def paste(location):
"""paste a file or directory that has been previously copied"""
copyData = settings.getDataFile()
if not location:
location = "."
try:
data = pickle.load(open(copyData, "rb"))
speech.speak("Pasting " + data["copyLocation"] + " to current directory.")
except:
speech.fail("It doesn't look like you've copied anything yet.")
speech.fail("Type 'hallie copy <file>' to copy a file or folder.")
return
process, error = subprocess.Popen(["cp", "-r", data["copyLocation"], location], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()
if "denied" in process:
speech.fail("Unable to paste your file successfully. This is most likely due to a permission issue. You can try to run me as sudo!") | paste a file or directory that has been previously copied | entailment |
def add_zfs_apt_repository():
""" adds the ZFS repository """
with settings(hide('warnings', 'running', 'stdout'),
warn_only=False, capture=True):
sudo('DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update')
install_ubuntu_development_tools()
apt_install(packages=['software-properties-common',
'dkms',
'linux-headers-generic',
'build-essential'])
sudo('echo | add-apt-repository ppa:zfs-native/stable')
sudo('DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update')
return True | adds the ZFS repository | entailment |
def apt_install(**kwargs):
"""
installs a apt package
"""
for pkg in list(kwargs['packages']):
if is_package_installed(distribution='ubuntu', pkg=pkg) is False:
sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get install -y %s" % pkg)
# if we didn't abort above, we should return True
return True | installs a apt package | entailment |
def apt_install_from_url(pkg_name, url, log=False):
""" installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package
"""
if is_package_installed(distribution='ubuntu', pkg=pkg_name) is False:
if log:
log_green(
"installing %s from %s" % (pkg_name, url))
with settings(hide('warnings', 'running', 'stdout'),
capture=True):
sudo("wget -c -O %s.deb %s" % (pkg_name, url))
sudo("dpkg -i %s.deb" % pkg_name)
# if we didn't abort above, we should return True
return True | installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package | entailment |
def apt_add_key(keyid, keyserver='keyserver.ubuntu.com', log=False):
""" trust a new PGP key related to a apt-repository """
if log:
log_green(
'trusting keyid %s from %s' % (keyid, keyserver)
)
with settings(hide('warnings', 'running', 'stdout')):
sudo('apt-key adv --keyserver %s --recv %s' % (keyserver, keyid))
return True | trust a new PGP key related to a apt-repository | entailment |
def enable_apt_repositories(prefix, url, version, repositories):
""" adds an apt repository """
with settings(hide('warnings', 'running', 'stdout'),
warn_only=False, capture=True):
sudo('apt-add-repository "%s %s %s %s"' % (prefix,
url,
version,
repositories))
with hide('running', 'stdout'):
output = sudo("DEBIAN_FRONTEND=noninteractive /usr/bin/apt-get update")
if 'Some index files failed to download' in output:
raise SystemExit(1)
else:
# if we didn't abort above, we should return True
return True | adds an apt repository | entailment |
def install_gem(gem):
""" install a particular gem """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=False, capture=True):
# convert 0 into True, any errors will always raise an exception
return not bool(
run("gem install %s --no-rdoc --no-ri" % gem).return_code) | install a particular gem | entailment |
def install_python_module_locally(name):
""" instals a python module using pip """
with settings(hide('everything'),
warn_only=False, capture=True):
# convert 0 into True, any errors will always raise an exception
print(not bool(local('pip --quiet install %s' % name).return_code))
return not bool(
local('pip --quiet install %s' % name).return_code) | instals a python module using pip | entailment |
def is_package_installed(distribution, pkg):
""" checks if a particular package is installed """
if ('centos' in distribution or
'el' in distribution or
'redhat' in distribution):
return(is_rpm_package_installed(pkg))
if ('ubuntu' in distribution or
'debian' in distribution):
return(is_deb_package_installed(pkg)) | checks if a particular package is installed | entailment |
def is_rpm_package_installed(pkg):
""" checks if a particular rpm package is installed """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo("rpm -q %s" % pkg)
if result.return_code == 0:
return True
elif result.return_code == 1:
return False
else: # print error to user
print(result)
raise SystemExit() | checks if a particular rpm package is installed | entailment |
def yum_install(**kwargs):
"""
installs a yum package
"""
if 'repo' in kwargs:
repo = kwargs['repo']
for pkg in list(kwargs['packages']):
if is_package_installed(distribution='el', pkg=pkg) is False:
if 'repo' in locals():
log_green(
"installing %s from repo %s ..." % (pkg, repo))
sudo("yum install -y --quiet --enablerepo=%s %s" % (repo, pkg))
else:
log_green("installing %s ..." % pkg)
sudo("yum install -y --quiet %s" % pkg) | installs a yum package | entailment |
def yum_group_install(**kwargs):
""" instals a yum group """
for grp in list(kwargs['groups']):
log_green("installing %s ..." % grp)
if 'repo' in kwargs:
repo = kwargs['repo']
sudo("yum groupinstall -y --quiet "
"--enablerepo=%s '%s'" % (repo, grp))
else:
sudo("yum groups mark install -y --quiet '%s'" % grp)
sudo("yum groups mark convert -y --quiet '%s'" % grp)
sudo("yum groupinstall -y --quiet '%s'" % grp) | instals a yum group | entailment |
def yum_install_from_url(pkg_name, url):
""" installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package
"""
if is_package_installed(distribution='el', pkg=pkg_name) is False:
log_green(
"installing %s from %s" % (pkg_name, url))
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo("rpm -i %s" % url)
if result.return_code == 0:
return True
elif result.return_code == 1:
return False
else: # print error to user
print(result)
raise SystemExit() | installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package | entailment |
def recherche(self, pattern, entete, in_all=False):
"""abstractSearch in fields of collection and reset rendering.
Returns number of results.
If in_all is True, call get_all before doing the search."""
if in_all:
self.collection = self.get_all()
self.collection.recherche(pattern, entete)
self._reset_render()
return len(self.collection) | abstractSearch in fields of collection and reset rendering.
Returns number of results.
If in_all is True, call get_all before doing the search. | entailment |
def launch_background_job(self, job, on_error=None, on_success=None):
"""Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success
"""
if not self.main.mode_online:
self.sortie_erreur_GUI(
"Local mode activated. Can't run background task !")
self.reset()
return
on_error = on_error or self.sortie_erreur_GUI
on_success = on_success or self.sortie_standard_GUI
def thread_end(r):
on_success(r)
self.update()
def thread_error(r):
on_error(r)
self.reset()
logging.info(
f"Launching background task from interface {self.__class__.__name__} ...")
th = threads.worker(job, thread_error, thread_end)
self._add_thread(th) | Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success | entailment |
def filtre(liste_base, criteres) -> groups.Collection:
"""
Return a filter list, bases on criteres
:param liste_base: Acces list
:param criteres: Criteria { `attribut`:[valeurs,...] }
"""
def choisi(ac):
for cat, li in criteres.items():
v = ac[cat]
if not (v in li):
return False
return True
return groups.Collection(a for a in liste_base if choisi(a)) | Return a filter list, bases on criteres
:param liste_base: Acces list
:param criteres: Criteria { `attribut`:[valeurs,...] } | entailment |
def load_remote_data(self, callback_etat=print):
"""
Load remote data. On succes, build base.
On failure, raise :class:`~.Core.exceptions.StructureError`, :class:`~.Core.exceptions.ConnexionError`
:param callback_etat: State renderer str , int , int -> None
"""
callback_etat("Chargement des utilisateurs", 0, 1)
self._load_users()
self.base = self.BASE_CLASS.load_from_db(callback_etat=callback_etat) | Load remote data. On succes, build base.
On failure, raise :class:`~.Core.exceptions.StructureError`, :class:`~.Core.exceptions.ConnexionError`
:param callback_etat: State renderer str , int , int -> None | entailment |
def _load_users(self):
"""Default implentation requires users from DB.
Should setup `users` attribute"""
r = sql.abstractRequetesSQL.get_users()()
self.users = {d["id"]: dict(d) for d in r} | Default implentation requires users from DB.
Should setup `users` attribute | entailment |
def load_modules(self):
"""Should instance interfaces and set them to interface, following `modules`"""
if self.INTERFACES_MODULE is None:
raise NotImplementedError("A module containing interfaces modules "
"should be setup in INTERFACES_MODULE !")
else:
for module, permission in self.modules.items():
i = getattr(self.INTERFACES_MODULE,
module).Interface(self, permission)
self.interfaces[module] = i | Should instance interfaces and set them to interface, following `modules` | entailment |
def has_autolog(self, user_id):
"""
Read auto-connection parameters and returns local password or None
"""
try:
with open("local/init", "rb") as f:
s = f.read()
s = security.protege_data(s, False)
self.autolog = json.loads(s).get("autolog", {})
except FileNotFoundError:
return
mdp = self.autolog.get(user_id, None)
return mdp | Read auto-connection parameters and returns local password or None | entailment |
def loggin(self, user_id, mdp, autolog):
"""Check mdp and return True it's ok"""
r = sql.abstractRequetesSQL.check_mdp_user(user_id, mdp)
if r():
# update auto-log params
self.autolog[user_id] = autolog and mdp or False
self.modules = self.users[user_id]["modules"] # load modules list
dic = {"autolog": self.autolog, "modules": self.modules}
s = json.dumps(dic, indent=4, ensure_ascii=False)
b = security.protege_data(s, True)
with open("local/init", "wb") as f:
f.write(b)
self.mode_online = True # authorization to execute bakground tasks
return True
else:
logging.debug("Bad password !") | Check mdp and return True it's ok | entailment |
def add_widget(self, w):
"""Convenience function"""
if self.layout():
self.layout().addWidget(w)
else:
layout = QVBoxLayout(self)
layout.addWidget(w) | Convenience function | entailment |
def add_layout(self, l):
"""Convenience function"""
if self.layout():
self.layout().addLayout(l)
else:
layout = QVBoxLayout(self)
layout.addLayout(l) | Convenience function | entailment |
def mkpad(items):
'''
Find the length of the longest element of a list. Return that value + two.
'''
pad = 0
stritems = [str(e) for e in items] # cast list to strings
for e in stritems:
index = stritems.index(e)
if len(stritems[index]) > pad:
pad = len(stritems[index])
pad += 2
return pad | Find the length of the longest element of a list. Return that value + two. | entailment |
def mkcols(l, rows):
'''
Compute the size of our columns by first making them a divisible of our row
height and then splitting our list into smaller lists the size of the row
height.
'''
cols = []
base = 0
while len(l) > rows and len(l) % rows != 0:
l.append("")
for i in range(rows, len(l) + rows, rows):
cols.append(l[base:i])
base = i
return cols | Compute the size of our columns by first making them a divisible of our row
height and then splitting our list into smaller lists the size of the row
height. | entailment |
def mkrows(l, pad, width, height):
'''
Compute the optimal number of rows based on our lists' largest element and
our terminal size in columns and rows.
Work out our maximum column number by dividing the width of the terminal by
our largest element.
While the length of our list is greater than the total number of elements we
can fit on the screen increment the height by one.
'''
maxcols = int(width/pad)
while len(l) > height * maxcols:
height += 1
return height | Compute the optimal number of rows based on our lists' largest element and
our terminal size in columns and rows.
Work out our maximum column number by dividing the width of the terminal by
our largest element.
While the length of our list is greater than the total number of elements we
can fit on the screen increment the height by one. | entailment |
def prtcols(items, vpad=6):
'''
After computing the size of our rows and columns based on the terminal size
and length of the largest element, use zip to aggregate our column lists
into row lists and then iterate over the row lists and print them.
'''
from os import get_terminal_size
items = list(items) # copy list so we don't mutate it
width, height = get_terminal_size()
height -= vpad # customize vertical padding
pad = mkpad(items)
rows = mkrows(items, pad, width, height)
cols = mkcols(items, rows)
# * operator in conjunction with zip, unzips the list
for c in zip(*cols):
row_format = '{:<{pad}}' * len(cols)
print(row_format.format(*c, pad=pad)) | After computing the size of our rows and columns based on the terminal size
and length of the largest element, use zip to aggregate our column lists
into row lists and then iterate over the row lists and print them. | entailment |
def cmd2list(cmd):
''' Executes a command through the operating system and returns the output
as a list, or on error a string with the standard error.
EXAMPLE:
>>> from subprocess import Popen, PIPE
>>> CMDout2array('ls -l')
'''
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
if p.returncode != 0 and stderr != '':
return "ERROR: %s\n"%(stderr)
else:
return stdout.split('\n') | Executes a command through the operating system and returns the output
as a list, or on error a string with the standard error.
EXAMPLE:
>>> from subprocess import Popen, PIPE
>>> CMDout2array('ls -l') | entailment |
def return_timer(self, name, status, timer):
''' Return a text formatted timer '''
timer_template = '%s %s %s : %s : %9s'
t = str(timedelta(0, timer)).split(',')[-1].strip().split(':')
#t = str(timedelta(0, timer)).split(':')
if len(t) == 4:
h, m, s = int(t[0])*24 + int(t[1]), int(t[2]), float(t[3])
elif len(t) == 3: h, m, s = int(t[0]), int(t[1]), float(t[2])
else: h, m, s = 0, 0, str(t)
return timer_template%(
name[:20].ljust(20),
status[:7].ljust(7),
'%3d'%h if h != 0 else ' --',
'%2d'%m if m != 0 else '--',
'%.6f'%s if isinstance(s, float) else s
) | Return a text formatted timer | entailment |
def print_timers(self):
''' PRINT EXECUTION TIMES FOR THE LIST OF PROGRAMS '''
self.timer += time()
total_time = self.timer
tmp = '* %s *'
debug.log(
'',
'* '*29,
tmp%(' '*51),
tmp%('%s %s %s'%('Program Name'.ljust(20), 'Status'.ljust(7), 'Execute Time (H:M:S)')),
tmp%('='*51)
)
for name in self.list:
if self.exists(name):
timer = getattr(self, name).get_time()
status = getattr(self, name).get_status()
self.timer -= timer
debug.log(tmp%(self.return_timer(name, status, timer)))
else:
debug.log(tmp%("%s %s -- : -- : --"%(name[:20].ljust(20),' '*8)))
debug.log(
tmp%(self.return_timer('Wrapper', '', self.timer)),
tmp%('='*51),
tmp%(self.return_timer('Total', '', total_time)),
tmp%(' '*51),
'* '*29,
''
) | PRINT EXECUTION TIMES FOR THE LIST OF PROGRAMS | entailment |
def get_cmd(self):
""" This function combines and return the commanline call of the program.
"""
cmd = []
if self.path is not None:
if '/' in self.path and not os.path.exists(self.path):
debug.log('Error: path contains / but does not exist: %s'%self.path)
else:
if self.ptype is not None:
if os.path.exists(self.ptype):
cmd.append(self.ptype)
elif '/' not in self.ptype:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
ppath = os.path.join(path, self.ptype)
if os.path.isfile(ppath):
cmd.append(ppath)
break
cmd.append(self.path)
if sys.version_info < (3, 0):
cmd.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in [quote(str(x)) for x in self.args]+self.unquoted_args])
else:
cmd.extend([str(x) for x in [quote(str(x)) for x in self.args]+self.unquoted_args])
else:
debug.log('Error: Program path not set!')
return ' '.join(cmd) | This function combines and return the commanline call of the program. | entailment |
def append_args(self, arg):
""" This function appends the provided arguments to the program object.
"""
debug.log("Adding Arguments: %s"%(arg))
if isinstance(arg, (int,float)): self.args.append(str(arg))
if isinstance(arg, str): self.args.append(arg)
if isinstance(arg, list):
if sys.version_info < (3, 0):
self.args.extend([str(x) if not isinstance(x, (unicode)) else x.encode('utf-8') for x in arg])
else:
self.args.extend([str(x) for x in arg]) | This function appends the provided arguments to the program object. | entailment |
def execute(self):
""" This function Executes the program with set arguments. """
prog_cmd = self.get_cmd().strip()
if prog_cmd == '':
self.status = 'Failure'
debug.log("Error: No program to execute for %s!"%self.name)
debug.log(("Could not combine path and arguments into cmdline:"
"\n%s %s)\n")%(self.path, ' '.join(self.args)))
else:
debug.log("\n\nExecute %s...\n%s" % (self.name, prog_cmd))
# Create shell script
script = '%s.sh'%self.name
if self.wdir != '':
script = '%s/%s'%(self.wdir, script)
else:
script = '%s/%s'%(os.getcwd(), script)
with open_(script, 'w') as f:
f.write('#!/bin/bash\n')
if self.wdir != '':
f.write('cd {workdir}\n'.format(workdir=self.wdir))
f.write(
('touch {stdout} {stderr}\n'
'chmod a+r {stdout} {stderr}\n'
'{cmd} 1> {stdout} 2> {stderr}\n'
'ec=$?\n').format(
stdout=self.stdout,
stderr=self.stderr,
cmd=prog_cmd
)
)
if not self.forcewait:
f.write(('if [ "$ec" -ne "0" ]; then echo "Error" >> {stderr}; '
'else echo "Done" >> {stderr}; fi\n').format(
stderr=self.stderr))
f.write('exit $ec\n')
os.chmod(script, 0o744)
if self.queue is not None:
# Setup execution of shell script through TORQUE
other_args = ''
if self.forcewait: other_args += "-K " # ADDING -K argument if wait() is forced
# QSUB INFO :: run_time_limit(walltime, dd:hh:mm:ss),
# memory(mem, up to 100GB *gigabyte),
# processors(ppn, up to 16) # USE AS LITTLE AS NEEDED!
cmd = ('/usr/bin/qsub '
'-l nodes=1:ppn={procs},walltime={hours}:00:00,mem={mem}g '
'-r y {workdir_arg} {other_args} {cmd}').format(
procs=self.procs,
hours=self.walltime,
mem=self.mem,
workdir_arg="-d %s"%(self.wdir) if self.wdir != '' else '',
other_args=other_args,
cmd=script)
debug.log("\n\nTORQUE SETUP %s...\n%s\n" % (self.name, cmd))
else:
cmd = script
if self.server is not None:
cmd = "ssh {server} {cmd}".format(
server=self.server,
cmd=quote(cmd)
)
self.status = 'Executing'
# EXECUTING PROGRAM
self.update_timer(-time()) # TIME START
if self.forcewait:
self.p = Popen(cmd)
ec = self.p.wait()
if ec == 0:
debug.log("Program finished successfully!")
self.status = 'Done'
else:
debug.log("Program failed on execution!")
self.status = 'Failure'
self.p = None
else: # WaitOn should be called to determine if the program has ended
debug.log("CMD: %s"%cmd)
self.p = Popen(cmd) # shell=True, executable="/bin/bash"
self.update_timer(time()) # TIME END
debug.log("timed: %s" % (self.get_time())) | This function Executes the program with set arguments. | entailment |
def wait(self, pattern='Done', interval=None,
epatterns=['error','Error','STACK','Traceback']):
""" This function will wait on a given pattern being shown on the last
line of a given outputfile.
OPTIONS
pattern - The string pattern to recognise when a program
finished properly.
interval - The amount of seconds to wait between checking the
log file.
epatterns - A list of string patterns to recognise when a program
has finished with an error.
"""
increasing_interval = False
if interval is None:
increasing_interval = True
interval = 10
if self.wdir != '':
stderr = "%s/%s"%(self.wdir, self.stderr)
else:
stderr = self.stderr
debug.log("\nWaiting for %s to finish..."%str(self.name))
if self.status == 'Executing':
self.update_timer(-time()) # TIME START
found = False
if self.queue is not None:
# Handling programs running on the compute servers
# Waiting for error log to be created.
# Prolonged waiting can be caused by the queue being full, or the
# server being unavailable.
debug.log(" Waiting for the error log to be created (%s)..."%(
stderr))
# Set maximum amount of seconds to wait on the errorlog creation,
# before assuming queue failure.
max_queued_time = 10800
while ( not os.path.exists(stderr)
and time()+self.timer < max_queued_time
and time()+self.timer > 0
):
debug.log(" Waiting... (max wait time left: %s seconds)"%(
str(max_queued_time-time()-self.timer)))
sleep(interval)
if increasing_interval:
interval *= 1.1
if os.path.exists(stderr):
if increasing_interval:
interval = 10
# File created looking for pattern
debug.log('\nError log created, waiting for program to finish...')
# calculate max loops left based on set walltime and check interval
max_time = time() + self.walltime * 60 * 60
while time() < max_time:
with open_(stderr) as f:
for l in f.readlines()[-5:]: # last five lines
if pattern in l:
found = True
max_time = 0
break
elif any([ep in l for ep in epatterns]):
found = False
max_time = 0
break
if max_time > 0:
debug.log(' Waiting... (max wait-time left: %s seconds)'%(
str(max_time-time())))
sleep(interval)
if found:
debug.log(" Program finished successfully!")
self.status = 'Done'
else:
debug.log("Error: Program took too long, or finished with error!")
if self.verbose:
debug.print_out(
"Technical error occurred!\n",
"The service was not able to produce a result.\n",
("Please check your settings are correct, and the file "
"type matches what you specified.\n"),
("Try again, and if the problem persists please notify the"
" technical support.\n")
)
self.status = 'Failure'
else:
debug.log(
("Error: %s still does not exist!\n")%(stderr),
("This error might be caused by the cgebase not being "
"available!")
)
if self.verbose:
debug.print_out(
"Technical error occurred!\n",
("This error might be caused by the server not being "
"available!\n"),
("Try again later, and if the problem persists please notify "
"the technical support.\n"),
"Sorry for any inconvenience.\n"
)
self.status = 'Failure'
if not self.p is None:
self.p.wait()
self.p = None
else:
# Handling wrappers running on the webserver
if self.p is None:
debug.log("Program not instanciated!")
self.status = 'Failure'
else:
ec = self.p.wait()
if ec != 0:
debug.log("Program failed on execution!")
self.status = 'Failure'
elif os.path.exists(stderr):
with open_(stderr) as f:
for l in f.readlines()[-5:]: # last five lines
if pattern in l:
found = True
break
elif any([ep in l for ep in epatterns]):
found = False
break
if found:
debug.log(" Program finished successfully!")
self.status = 'Done'
else:
debug.log("Error: Program failed to finish properly!")
if self.verbose:
debug.print_out("Technical error occurred!\n",
"The service was not able to produce a result.\n",
"Please check your settings are correct, and the file "+
"type matches what you specified.", "Try again, and if "+
"the problem persists please notify the technical "+
"support.\n")
self.status = 'Failure'
else:
debug.log(("Error: %s does not exist!\n")%(stderr),
"This error might be caused by the cgebase not being "+
"available!")
if self.verbose:
debug.print_out("Technical error occurred!\n",
"This error might be caused by the server not being "+
"available!\n", "Try again later, and if the problem "+
"persists please notify the technical support.\n",
"Sorry for any inconvenience.\n")
self.status = 'Failure'
self.p = None
self.update_timer(time()) # TIME END
debug.log(" timed: %s"%(self.get_time()))
else:
debug.log(" The check-out of the program has been sorted previously.") | This function will wait on a given pattern being shown on the last
line of a given outputfile.
OPTIONS
pattern - The string pattern to recognise when a program
finished properly.
interval - The amount of seconds to wait between checking the
log file.
epatterns - A list of string patterns to recognise when a program
has finished with an error. | entailment |
def print_stdout(self):
""" This function will read the standard out of the program and print it
"""
# First we check if the file we want to print does exists
if self.wdir != '':
stdout = "%s/%s"%(self.wdir, self.stdout)
else:
stdout = self.stdout
if os.path.exists(stdout):
with open_(stdout, 'r') as f:
debug.print_out("\n".join([line for line in f]))
else: # FILE DOESN'T EXIST
debug.log("Error: The stdout file %s does not exist!"%(stdout)) | This function will read the standard out of the program and print it | entailment |
def find_out_var(self, varnames=[]):
""" This function will read the standard out of the program, catch
variables and return the values
EG. #varname=value
"""
if self.wdir != '':
stdout = "%s/%s"%(self.wdir, self.stdout)
else:
stdout = self.stdout
response = [None]*len(varnames)
# First we check if the file we want to print does exists
if os.path.exists(stdout):
with open_(stdout, 'r') as f:
for line in f:
if '=' in line:
var = line.strip('#').split('=')
value = var[1].strip()
var = var[0].strip()
if var in varnames: response[varnames.index(var)] = value
else: # FILE DOESN'T EXIST
debug.log("Error: The stdout file %s does not exist!"%(stdout))
return response | This function will read the standard out of the program, catch
variables and return the values
EG. #varname=value | entailment |
def find_err_pattern(self, pattern):
""" This function will read the standard error of the program and return
a matching pattern if found.
EG. prog_obj.FindErrPattern("Update of mySQL failed")
"""
if self.wdir != '':
stderr = "%s/%s"%(self.wdir, self.stderr)
else:
stderr = self.stderr
response = []
# First we check if the file we want to print does exists
if os.path.exists(stderr):
with open_(stderr, 'r') as f:
for line in f:
if pattern in line:
response.append(line.strip())
else: # FILE DOESN'T EXIST
debug.log("Error: The stderr file %s does not exist!"%(stderr))
return response | This function will read the standard error of the program and return
a matching pattern if found.
EG. prog_obj.FindErrPattern("Update of mySQL failed") | entailment |
def find_out_pattern(self, pattern):
""" This function will read the standard error of the program and return
a matching pattern if found.
EG. prog_obj.FindErrPattern("Update of mySQL failed")
"""
if self.wdir != '':
stdout = "%s/%s"%(self.wdir, self.stdout)
else:
stdout = self.stdout
response = []
# First we check if the file we want to print does exists
if os.path.exists(stdout):
with open_(stdout, 'r') as f:
for line in f:
if pattern in line:
response.append(line.strip())
else: # FILE DOESN'T EXIST
debug.log("Error: The stdout file %s does not exist!"%(stdout))
return response | This function will read the standard error of the program and return
a matching pattern if found.
EG. prog_obj.FindErrPattern("Update of mySQL failed") | entailment |
def decode_nfo( buffer ):
"""Decodes a byte string in NFO format (beloved by PC scener groups) from DOS Code Page 437
to Unicode."""
assert utils.is_bytes( buffer )
return '\n'.join( [''.join( [CP437[y] for y in x] ) for x in buffer.split( b'\r\n' )] ) | Decodes a byte string in NFO format (beloved by PC scener groups) from DOS Code Page 437
to Unicode. | entailment |
def runner(self):
"""
Run the necessary methods in the correct order
"""
if os.path.isfile(self.report):
self.report_parse()
else:
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Create the objects to be used in the analyses (if required)
general = None
for sample in self.runmetadata.samples:
general = getattr(sample, 'general')
if general is None:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
# Run the analyses
MLSTmap(self, self.analysistype, self.cutoff)
# Create the reports
self.reporter()
# Print the metadata to a .json file
MetadataPrinter(self) | Run the necessary methods in the correct order | entailment |
def reporter(self):
"""
Runs the necessary methods to parse raw read outputs
"""
logging.info('Preparing reports')
# Populate self.plusdict in order to reuse parsing code from an assembly-based method
for sample in self.runmetadata.samples:
self.plusdict[sample.name] = dict()
self.matchdict[sample.name] = dict()
if sample.general.bestassemblyfile != 'NA':
for gene in sample[self.analysistype].allelenames:
self.plusdict[sample.name][gene] = dict()
for allele, percentidentity in sample[self.analysistype].results.items():
if gene in allele:
# Split the allele number from the gene name using the appropriate delimiter
if '_' in allele:
splitter = '_'
elif '-' in allele:
splitter = '-'
else:
splitter = ''
self.matchdict[sample.name].update({gene: allele.split(splitter)[-1]})
# Create the plusdict dictionary as in the assembly-based (r)MLST method. Allows all the
# parsing and sequence typing code to be reused.
try:
self.plusdict[sample.name][gene][allele.split(splitter)[-1]][percentidentity] \
= sample[self.analysistype].avgdepth[allele]
except KeyError:
self.plusdict[sample.name][gene][allele.split(splitter)[-1]] = dict()
self.plusdict[sample.name][gene][allele.split(splitter)[-1]][percentidentity] \
= sample[self.analysistype].avgdepth[allele]
if gene not in self.matchdict[sample.name]:
self.matchdict[sample.name].update({gene: 'N'})
self.profiler()
self.sequencetyper()
self.mlstreporter() | Runs the necessary methods to parse raw read outputs | entailment |
def profiler(self):
"""Creates a dictionary from the profile scheme(s)"""
logging.info('Loading profiles')
# Initialise variables
profiledata = defaultdict(make_dict)
reverse_profiledata = dict()
profileset = set()
# Find all the unique profiles to use with a set
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if sample[self.analysistype].profile != 'NA':
profileset.add(sample[self.analysistype].profile)
# Extract the profiles for each set
for sequenceprofile in profileset:
#
if sequenceprofile not in self.meta_dict:
self.meta_dict[sequenceprofile] = dict()
reverse_profiledata[sequenceprofile] = dict()
self.meta_dict[sequenceprofile]['ND'] = dict()
# Clear the list of genes
geneset = set()
# Calculate the total number of genes in the typing scheme
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if sequenceprofile == sample[self.analysistype].profile:
geneset = {allele for allele in sample[self.analysistype].alleles}
try:
# Open the sequence profile file as a dictionary
profile = DictReader(open(sequenceprofile), dialect='excel-tab')
# Revert to standard comma separated values
except KeyError:
# Open the sequence profile file as a dictionary
profile = DictReader(open(sequenceprofile))
# Iterate through the rows
for row in profile:
# Populate the profile dictionary with profile number: {gene: allele}. Use the first field name,
# which will be either ST, or rST as the key to determine the profile number value
allele_comprehension = {gene: allele for gene, allele in row.items() if gene in geneset}
st = row[profile.fieldnames[0]]
for header, value in row.items():
value = value if value else 'ND'
if header not in geneset and header not in ['ST', 'rST']:
if st not in self.meta_dict[sequenceprofile]:
self.meta_dict[sequenceprofile][st] = dict()
if header == 'CC' or header == 'clonal_complex':
header = 'CC'
self.meta_dict[sequenceprofile][st][header] = value
self.meta_dict[sequenceprofile]['ND'][header] = 'ND'
self.meta_dict[sequenceprofile][st]['PredictedSerogroup'] = 'ND'
if header not in self.meta_headers:
self.meta_headers.append(header)
profiledata[sequenceprofile][st] = allele_comprehension
# Create a 'reverse' dictionary using the the allele comprehension as the key, and
# the sequence type as the value - can be used if exact matches are ever desired
reverse_profiledata[sequenceprofile].update({frozenset(allele_comprehension.items()): st})
# Add the profile data, and gene list to each sample
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if sequenceprofile == sample[self.analysistype].profile:
# Populate the metadata with the profile data
sample[self.analysistype].profiledata = profiledata[sample[self.analysistype].profile]
sample[self.analysistype].reverse_profiledata = reverse_profiledata[sequenceprofile]
sample[self.analysistype].meta_dict = self.meta_dict[sequenceprofile]
else:
sample[self.analysistype].profiledata = 'NA'
sample[self.analysistype].reverse_profiledata = 'NA'
sample[self.analysistype].meta_dict = 'NA' | Creates a dictionary from the profile scheme(s) | entailment |
def sequencetyper(self):
"""Determines the sequence type of each strain based on comparisons to sequence type profiles"""
logging.info('Performing sequence typing')
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if type(sample[self.analysistype].allelenames) == list:
# Initialise variables
header = 0
genome = sample.name
# Initialise self.bestmatch[genome] with an int that will eventually be replaced by the # of matches
self.bestmatch[genome] = defaultdict(int)
if sample[self.analysistype].profile != 'NA':
# Create the profiledata variable to avoid writing self.profiledata[self.analysistype]
profiledata = sample[self.analysistype].profiledata
# Calculate the number of allele matches between each sequence type and the results
best_seqtype = dict()
for sequencetype in sample[self.analysistype].profiledata:
# Initialise a counter
count = 0
# Iterate through each gene for the sequence type
for gene, refallele in sample[self.analysistype].profiledata[sequencetype].items():
# Use the gene to extract the calculated allele
allele = self.matchdict[genome][gene]
# Increment the count on a match
if refallele == allele:
count += 1
# Add the sequence type to the set of sequence types with the number of matches as the key
try:
best_seqtype[count].add(sequencetype)
except KeyError:
best_seqtype[count] = set()
best_seqtype[count].add(sequencetype)
# Find the highest number of matches from the dictionary
best = sorted(best_seqtype.items(), key=operator.itemgetter(0), reverse=True)[0][1]
# Deal with multiple allele matches
for gene in sample[self.analysistype].allelenames:
# Clear the appropriate count and lists
multiallele = list()
multipercent = list()
# Go through the alleles in plusdict
for allele in self.plusdict[genome][gene]:
percentid = list(self.plusdict[genome][gene][allele].keys())[0]
# "N" alleles screw up the allele splitter function
if allele not in ['N', 'NA']:
# Append as appropriate - alleleNumber is treated as an integer for proper sorting
multiallele.append(int(allele))
multipercent.append(percentid)
# If the allele is "N"
else:
# Append "N" and a percent identity of 0
multiallele.append("N")
multipercent.append(0)
# Populate self.bestdict with genome, gene, alleles joined with a space (this was made like
# this because allele is a list generated by the .iteritems() above
try:
self.bestdict[genome][gene][" ".join(str(allele)
for allele in sorted(multiallele))] = \
multipercent[0]
except IndexError:
self.bestdict[genome][gene]['NA'] = 0
# Find the profile with the most alleles in common with the query genome
for sequencetype in best:
# The number of genes in the analysis
header = len(profiledata[sequencetype])
# refallele is the allele number of the sequence type
refallele = profiledata[sequencetype][gene]
# If there are multiple allele matches for a gene in the reference profile e.g. 10 692
if len(refallele.split(" ")) > 1:
# Map the split (on a space) alleles as integers - if they are treated as integers,
# the alleles will sort properly
intrefallele = map(int, refallele.split(" "))
# Create a string of the joined, sorted alleles
sortedrefallele = " ".join(str(allele) for allele in sorted(intrefallele))
else:
# Use the reference allele as the sortedRefAllele
sortedrefallele = refallele
for allele, percentid in self.bestdict[genome][gene].items():
# If the allele in the query genome matches the allele in the reference profile, add
# the result to the bestmatch dictionary. Genes with multiple alleles were sorted
# the same, strings with multiple alleles will match: 10 692 will never be 692 10
if allele == sortedrefallele and float(percentid) == 100.00:
# Increment the number of matches to each profile
self.bestmatch[genome][sequencetype] += 1
# Special handling of BACT000060 and BACT000065 genes for E. coli and BACT000014
# for Listeria. When the reference profile has an allele of 'N', and the query
# allele doesn't, set the allele to 'N', and count it as a match
elif sortedrefallele == 'N' and allele != 'N':
# Increment the number of matches to each profile
self.bestmatch[genome][sequencetype] += 1
# Consider cases with multiple allele matches
elif len(allele.split(' ')) > 1:
# Also increment the number of matches if one of the alleles matches the
# reference allele e.g. 41 16665 will match either 41 or 16665
if sortedrefallele != 'N' and allele != 'N':
match = False
for sub_allele in allele.split(' '):
if sub_allele == refallele:
match = True
if match:
# Increment the number of matches to each profile
self.bestmatch[genome][sequencetype] += 1
elif allele == sortedrefallele and sortedrefallele == 'N':
# Increment the number of matches to each profile
self.bestmatch[genome][sequencetype] += 1
# Get the best number of matches
# From: https://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
try:
sortedmatches = sorted(self.bestmatch[genome].items(), key=operator.itemgetter(1),
reverse=True)[0][1]
# If there are no matches, set :sortedmatches to zero
except IndexError:
sortedmatches = 0
# Otherwise, the query profile matches the reference profile
if int(sortedmatches) == header:
# Iterate through best match
for sequencetype, matches in self.bestmatch[genome].items():
if matches == sortedmatches:
for gene in profiledata[sequencetype]:
# Populate resultProfile with the genome, best match to profile, # of matches
# to the profile, gene, query allele(s), reference allele(s), and % identity
self.resultprofile[genome][sequencetype][sortedmatches][gene][
list(self.bestdict[genome][gene]
.keys())[0]] = str(list(self.bestdict[genome][gene].values())[0])
sample[self.analysistype].sequencetype = sequencetype
sample[self.analysistype].matchestosequencetype = matches
# If there are fewer matches than the total number of genes in the typing scheme
elif 0 < int(sortedmatches) < header:
mismatches = []
# Iterate through the sequence types and the number of matches in bestDict for each genome
for sequencetype, matches in self.bestmatch[genome].items():
# If the number of matches for a profile matches the best number of matches
if matches == sortedmatches:
# Iterate through the gene in the analysis
for gene in profiledata[sequencetype]:
# Get the reference allele as above
refallele = profiledata[sequencetype][gene]
# As above get the reference allele split and ordered as necessary
if len(refallele.split(" ")) > 1:
intrefallele = map(int, refallele.split(" "))
sortedrefallele = " ".join(str(allele) for allele in sorted(intrefallele))
else:
sortedrefallele = refallele
# Populate self.mlstseqtype with the genome, best match to profile, # of matches
# to the profile, gene, query allele(s), reference allele(s), and % identity
if self.analysistype == 'mlst':
self.resultprofile[genome][sequencetype][sortedmatches][gene][
list(self.bestdict[genome][gene]
.keys())[0]] = str(list(self.bestdict[genome][gene].values())[0])
else:
self.resultprofile[genome][sequencetype][sortedmatches][gene][
list(self.bestdict[genome][gene].keys())[0]] \
= str(list(self.bestdict[genome][gene].values())[0])
#
if sortedrefallele != list(self.bestdict[sample.name][gene].keys())[0]:
mismatches.append(
({gene: ('{} ({})'.format(list(self.bestdict[sample.name][gene]
.keys())[0], sortedrefallele))}))
sample[self.analysistype].mismatchestosequencetype = mismatches
sample[self.analysistype].sequencetype = sequencetype
sample[self.analysistype].matchestosequencetype = matches
elif sortedmatches == 0:
for gene in sample[self.analysistype].allelenames:
# Populate the results profile with negative values for sequence type and sorted matches
self.resultprofile[genome]['NA'][sortedmatches][gene]['NA'] = 0
# Add the new profile to the profile file (if the option is enabled)
sample[self.analysistype].sequencetype = 'NA'
sample[self.analysistype].matchestosequencetype = 'NA'
sample[self.analysistype].mismatchestosequencetype = 'NA'
else:
sample[self.analysistype].matchestosequencetype = 'NA'
sample[self.analysistype].mismatchestosequencetype = 'NA'
sample[self.analysistype].sequencetype = 'NA'
else:
sample[self.analysistype].matchestosequencetype = 'NA'
sample[self.analysistype].mismatchestosequencetype = 'NA'
sample[self.analysistype].sequencetype = 'NA'
else:
sample[self.analysistype].matchestosequencetype = 'NA'
sample[self.analysistype].mismatchestosequencetype = 'NA'
sample[self.analysistype].sequencetype = 'NA'
# Clear out the reverse_profiledata attribute - frozen sets can not be .json encoded
try:
delattr(sample[self.analysistype], 'reverse_profiledata')
except AttributeError:
pass | Determines the sequence type of each strain based on comparisons to sequence type profiles | entailment |
def mlstreporter(self):
""" Parse the results into a report"""
logging.info('Writing reports')
# Initialise variables
header_row = str()
combinedrow = str()
combined_header_row = str()
reportdirset = set()
mlst_dict = dict()
# Populate a set of all the report directories to use. A standard analysis will only have a single report
# directory, while pipeline analyses will have as many report directories as there are assembled samples
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
# Ignore samples that lack a populated reportdir attribute
if sample[self.analysistype].reportdir != 'NA':
make_path(sample[self.analysistype].reportdir)
# Add to the set - I probably could have used a counter here, but I decided against it
reportdirset.add(sample[self.analysistype].reportdir)
# Create a report for each sample from :self.resultprofile
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
if sample[self.analysistype].reportdir != 'NA':
if type(sample[self.analysistype].allelenames) == list:
# Initialise the string
row = str()
if self.analysistype == 'mlst':
header_row = str()
try:
if sample.general.referencegenus not in mlst_dict:
mlst_dict[sample.general.referencegenus] = dict()
except AttributeError:
sample.general.referencegenus = 'ND'
mlst_dict[sample.general.referencegenus] = dict()
# Additional fields such as clonal complex and lineage
additional_fields = list()
#
if self.meta_headers:
for header in self.meta_headers:
try:
_ = sample[self.analysistype].meta_dict[
sample[self.analysistype].sequencetype][header]
additional_fields.append(header.rstrip())
except (AttributeError, KeyError):
pass
if self.analysistype == 'mlst':
additional_fields = sorted(additional_fields)
#
try:
if sample.general.referencegenus == 'Listeria':
additional_fields.append('PredictedSerogroup')
except AttributeError:
pass
header_fields = additional_fields
else:
additional_fields = [
'genus', 'species', 'subspecies', 'lineage', 'sublineage', 'other_designation', 'notes'
]
header_fields = [
'rMLST_genus', 'species', 'subspecies', 'lineage', 'sublineage', 'other_designation',
'notes'
]
# Populate the header with the appropriate data, including all the genes in the list of targets
if not header_row:
if additional_fields:
header_row = 'Strain,MASHGenus,{additional},SequenceType,Matches,{matches},\n' \
.format(additional=','.join(header_fields),
matches=','.join(sorted(sample[self.analysistype].allelenames)))
else:
header_row = 'Strain,MASHGenus,SequenceType,Matches,{matches},\n' \
.format(matches=','.join(sorted(sample[self.analysistype].allelenames)))
# Iterate through the best sequence types for the sample
for seqtype in self.resultprofile[sample.name]:
sample[self.analysistype].sequencetype = seqtype
try:
if sample.general.referencegenus == 'Listeria':
for serogroup, mlst_list in self.listeria_serogroup_dict.items():
if seqtype in [str(string) for string in mlst_list]:
sample[self.analysistype].meta_dict[seqtype]['PredictedSerogroup'] = \
serogroup
except AttributeError:
pass
# The number of matches to the profile
sample[self.analysistype].matches = list(self.resultprofile[sample.name][seqtype].keys())[0]
# Extract the closest reference genus
try:
genus = sample.general.referencegenus
except AttributeError:
try:
genus = sample.general.closestrefseqgenus
except AttributeError:
genus = 'ND'
# If this is the first of one or more sequence types, include the sample name
if additional_fields:
row += '{name},{mashgenus},{additional},{seqtype},{matches},'\
.format(name=sample.name,
mashgenus=genus,
additional=','.join(sample[self.analysistype].
meta_dict[sample[self.analysistype]
.sequencetype][header] for header in additional_fields),
seqtype=seqtype,
matches=sample[self.analysistype].matches)
else:
row += '{name},{mashgenus},{seqtype},{matches},' \
.format(name=sample.name,
mashgenus=genus,
seqtype=seqtype,
matches=sample[self.analysistype].matches)
# Iterate through all the genes present in the analyses for the sample
for gene in sorted(sample[self.analysistype].allelenames):
refallele = sample[self.analysistype].profiledata[seqtype][gene]
# Set the allele and percent id from the dictionary's keys and values, respectively
allele = \
list(self.resultprofile[sample.name][seqtype][sample[self.analysistype].matches]
[gene].keys())[0]
percentid = \
list(self.resultprofile[sample.name][seqtype][sample[self.analysistype].matches]
[gene].values())[0]
try:
if refallele and refallele != allele:
if 0 < float(percentid) < 100:
row += '{} ({:.2f}%),'.format(allele, float(percentid))
else:
row += '{} ({}),'.format(allele, refallele)
else:
# Add the allele and % id to the row (only add the % identity if it is not 100%)
if 0 < float(percentid) < 100:
row += '{} ({:.2f}%),'.format(allele, float(percentid))
else:
row += '{},'.format(allele)
self.referenceprofile[sample.name][gene] = allele
except ValueError:
pass
# Add a newline
row += '\n'
#
combinedrow += row
#
combined_header_row += header_row
combined_header_row += row
if self.analysistype == 'mlst':
mlst_dict[sample.general.referencegenus]['header'] = header_row
try:
mlst_dict[sample.general.referencegenus]['combined_row'] += row
except KeyError:
mlst_dict[sample.general.referencegenus]['combined_row'] = str()
mlst_dict[sample.general.referencegenus]['combined_row'] += row
# If the length of the # of report directories is greater than 1 (script is being run as part of
# the assembly pipeline) make a report for each sample
if self.pipeline:
# Open the report
with open(os.path.join(sample[self.analysistype].reportdir,
'{}_{}.csv'.format(sample.name, self.analysistype)), 'w') as report:
# Write the row to the report
report.write(header_row)
report.write(row)
# Create the report folder
make_path(self.reportpath)
# Create the report containing all the data from all samples
if self.analysistype == 'mlst':
for genus in mlst_dict:
if mlst_dict[genus]['combined_row']:
with open(os.path.join(self.reportpath, '{at}_{genus}.csv'.format(at=self.analysistype,
genus=genus)), 'w') \
as mlstreport:
# Add the header
mlstreport.write(mlst_dict[genus]['header'])
# Write the results to this report
mlstreport.write(mlst_dict[genus]['combined_row'])
with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') \
as combinedreport:
# Write the results to this report
combinedreport.write(combined_header_row)
else:
with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') \
as combinedreport:
# Add the header
combinedreport.write(header_row)
# Write the results to this report
combinedreport.write(combinedrow) | Parse the results into a report | entailment |
def report_parse(self):
"""
If the pipeline has previously been run on these data, instead of reading through the results, parse the
report instead
"""
# Initialise lists
report_strains = list()
genus_list = list()
if self.analysistype == 'mlst':
for sample in self.runmetadata.samples:
try:
genus_list.append(sample.general.referencegenus)
except AttributeError:
sample.general.referencegenus = 'ND'
genus_list.append(sample.general.referencegenus)
# Read in the report
if self.analysistype == 'mlst':
for genus in genus_list:
try:
report_name = os.path.join(self.reportpath, '{at}_{genus}.csv'.format(at=self.analysistype,
genus=genus))
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
except FileNotFoundError:
report_name = self.report
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
else:
report_name = self.report
report_strains = self.report_read(report_strains=report_strains,
report_name=report_name)
# Populate strains not in the report with 'empty' GenObject with appropriate attributes
for sample in self.runmetadata.samples:
if sample.name not in report_strains:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].sequencetype = 'ND'
sample[self.analysistype].matches = 0
sample[self.analysistype].results = dict() | If the pipeline has previously been run on these data, instead of reading through the results, parse the
report instead | entailment |
def guess_type(filename, **kwargs):
""" Utility function to call classes based on filename extension.
Just usefull if you are reading the file and don't know file extension.
You can pass kwargs and these args are passed to class only if they are
used in class.
"""
extension = os.path.splitext(filename)[1]
case = {'.xls': Xls,
'.xlsx': Xlsx,
'.csv': Csv}
if extension and case.get(extension.lower()):
low_extension = extension.lower()
new_kwargs = dict()
class_name = case.get(low_extension)
class_kwargs = inspect.getargspec(class_name.__init__).args[1:]
for kwarg in kwargs:
if kwarg in class_kwargs:
new_kwargs[kwarg] = kwargs[kwarg]
return case.get(low_extension)(filename, **new_kwargs)
else:
raise Exception('No extension found') | Utility function to call classes based on filename extension.
Just usefull if you are reading the file and don't know file extension.
You can pass kwargs and these args are passed to class only if they are
used in class. | entailment |
def get_gene_seqs(database_path, gene):
"""
This function takes the database path and a gene name as inputs and
returns the gene sequence contained in the file given by the gene name
"""
gene_path = database_path + "/" + gene + ".fsa"
gene_seq = ""
# Open fasta file
with open(gene_path) as gene_file:
header = gene_file.readline()
for line in gene_file:
seq = line.strip()
gene_seq += seq
return gene_seq | This function takes the database path and a gene name as inputs and
returns the gene sequence contained in the file given by the gene name | entailment |
def get_db_mutations(mut_db_path, gene_list, res_stop_codons):
"""
This function opens the file resistenss-overview.txt, and reads the
content into a dict of dicts. The dict will contain information about
all known mutations given in the database. This dict is returned.
"""
# Open resistens-overview.txt
try:
drugfile = open(mut_db_path, "r")
except:
sys.exit("Wrong path: %s"%(mut_db_path))
# Initiate variables
known_mutations = dict()
drug_genes = dict()
known_stop_codon = dict()
indelflag = False
stopcodonflag = False
# Go throug mutation file line by line
for line in drugfile:
# Ignore headers and check where the indel section starts
if line.startswith("#"):
if "indel" in line.lower():
indelflag = True
elif "stop codon" in line.lower():
stopcodonflag = True
else:
stopcodonflag = False
continue
# Ignore empty lines
if line.strip() == "":
continue
# Assert that all lines have the correct set of columns
mutation = [data.strip() for data in line.strip().split("\t")]
assert len(mutation) == 9, "mutation overview file (%s) must have 9 columns, %s"%(mut_db_path, mutation)
# Extract all info on the line (even though it is not all used)
gene_ID = mutation[0]
# Only consider mutations in genes found in the gene list
if gene_ID in gene_list:
gene_name = mutation[1]
no_of_mut = int(mutation[2])
mut_pos = int(mutation[3])
ref_codon = mutation[4]
ref_aa = mutation[5]
alt_aa = mutation[6].split(",")
res_drug = mutation[7].replace("\t", " ")
pmid = mutation[8].split(",")
# Check if resistance is known to be caused by a stop codon in the gene
if ("*" in alt_aa and res_stop_codons != 'specified') or (res_stop_codons == 'specified' and stopcodonflag == True):
if gene_ID not in known_stop_codon:
known_stop_codon[gene_ID] = {"pos": [], "drug": res_drug}
known_stop_codon[gene_ID]["pos"].append(mut_pos)
# Add genes associated with drug resistance to drug_genes dict
drug_lst = res_drug.split(",")
for drug in drug_lst:
drug = drug.upper()
if drug not in drug_genes:
drug_genes[drug] = []
if gene_ID not in drug_genes[drug]:
drug_genes[drug].append(gene_ID)
# Initiate empty dict to store relevant mutation information
mut_info = dict()
# Save need mutation info with pmid cooresponding to the amino acid change
for i in range(len(alt_aa)):
try:
mut_info[alt_aa[i]] = {"gene_name": gene_name, "drug": res_drug, "pmid": pmid[i]}
except IndexError:
mut_info[alt_aa[i]] = {"gene_name": gene_name, "drug": res_drug, "pmid": "-"}
# Check if more than one mutations is needed for resistance
if no_of_mut != 1:
print("More than one mutation is needed, this is not implemented", mutation)
# Add all possible types of mutations to the dict
if gene_ID not in known_mutations:
known_mutations[gene_ID] = {"sub" : dict(), "ins" : dict(), "del" : dict()}
# Check for the type of mutation
if indelflag == False:
mutation_type = "sub"
else:
mutation_type = ref_aa
# Save mutations positions with required information given in mut_info
if mut_pos not in known_mutations[gene_ID][mutation_type]:
known_mutations[gene_ID][mutation_type][mut_pos] = dict()
for aa in alt_aa:
known_mutations[gene_ID][mutation_type][mut_pos][aa] = mut_info[aa]
drugfile.close()
# Check that all genes in the gene list has known mutations
for gene in gene_list:
if gene not in known_mutations:
known_mutations[gene] = {"sub" : dict(), "ins" : dict(), "del" : dict()}
return known_mutations, drug_genes, known_stop_codon | This function opens the file resistenss-overview.txt, and reads the
content into a dict of dicts. The dict will contain information about
all known mutations given in the database. This dict is returned. | entailment |
def KMA(inputfile_1, gene_list, kma_db, out_path, sample_name, min_cov, mapping_path):
"""
This function is called when KMA is the method of choice. The
function calls kma externally and waits for it to finish.
The kma output files with the prefixes .res and .aln are parsed
throught to obtain the required alignment informations. The subject
and query sequences as well as the start and stop position,
coverage, and subject length are stored in a results directory
which is returned in the end.
"""
# Get full path to input of output files
inputfile_1 = os.path.abspath(inputfile_1)
kma_outfile = os.path.abspath(out_path + "/kma_out_" + sample_name)
kma_cmd = "%s -i %s -t_db %s -o %s -1t1 -gapopen -5 -gapextend -2 -penalty -3 -reward 1"%(mapping_path, inputfile_1, kma_db, kma_outfile) # -ID 90
# Call KMA
os.system(kma_cmd)
if os.path.isfile(kma_outfile + ".aln") == False:
os.system(kma_cmd)
# Fetch kma output files
align_filename = kma_outfile + ".aln"
res_filename = kma_outfile + ".res"
results = dict()
# Open KMA result file
with open(res_filename, "r") as res_file:
header = res_file.readline()
# Parse through each line
for line in res_file:
data = [data.strip() for data in line.split("\t")]
gene = data[0]
# Check if gene one of the user specified genes
if gene not in gene_list:
continue
# Store subject length and coverage
sbjct_len = int(data[3])
identity = float(data[6])
coverage = float(data[7])
# Result dictionary assumes that more hits can occur
if gene not in results:
hit = '1'
results[gene] = dict()
# Gene will only be there once with KMA
else:
hit = str(len(results[gene])) +1
results[gene][hit] = dict()
results[gene][hit]['sbjct_length'] = sbjct_len
results[gene][hit]['coverage'] = coverage / 100
results[gene][hit]["sbjct_string"] = []
results[gene][hit]["query_string"] = []
results[gene][hit]["homology"] = []
results[gene][hit]['identity'] = identity
# Open KMA alignment file
with open(align_filename, "r") as align_file:
hit_no = dict()
gene = ""
# Parse through alignments
for line in align_file:
# Check when a new gene alignment start
if line.startswith("#"):
gene = line[1:].strip()
if gene not in hit_no:
hit_no[gene] = str(1)
else:
hit_no[gene] += str(int(hit_no[gene]) + 1)
else:
# Check if gene is one of the user specified genes
if gene in results:
if hit_no[gene] not in results[gene]:
sys.exit("Unexpected database redundency")
line_data = line.split("\t")[-1].strip()
if line.startswith("template"):
results[gene][hit_no[gene]]["sbjct_string"] += [line_data]
elif line.startswith("query"):
results[gene][hit_no[gene]]["query_string"] += [line_data]
else:
results[gene][hit_no[gene]]["homology"] += [line_data]
# Concatinate all sequences lists and find subject start and subject end
seq_start_search_str = re.compile("^-*(\w+)")
seq_end_search_str = re.compile("\w+(-*)$")
for gene in gene_list:
if gene in results:
for hit in results[gene]:
results[gene][hit]['sbjct_string'] = "".join(results[gene][hit]['sbjct_string'])
results[gene][hit]['query_string'] = "".join(results[gene][hit]['query_string'])
results[gene][hit]['homology'] = "".join(results[gene][hit]['homology'])
seq_start_object = seq_start_search_str.search(results[gene][hit]['query_string'])
sbjct_start = seq_start_object.start(1) + 1
seq_end_object = seq_end_search_str.search(results[gene][hit]['query_string'])
sbjct_end = seq_end_object.start(1) + 1
results[gene][hit]['query_string'] = results[gene][hit]['query_string'][sbjct_start-1:sbjct_end-1]
results[gene][hit]['sbjct_string'] = results[gene][hit]['sbjct_string'][sbjct_start-1:sbjct_end-1]
#if sbjct_start:
results[gene][hit]["sbjct_start"] = sbjct_start
results[gene][hit]["sbjct_end"] = sbjct_end
else:
results[gene] = ""
return results | This function is called when KMA is the method of choice. The
function calls kma externally and waits for it to finish.
The kma output files with the prefixes .res and .aln are parsed
throught to obtain the required alignment informations. The subject
and query sequences as well as the start and stop position,
coverage, and subject length are stored in a results directory
which is returned in the end. | entailment |
def find_best_sequence(hits_found, specie_path, gene, silent_N_flag):
"""
This function takes the list hits_found as argument. This contains all
hits found for the blast search of one gene. A hit includes the subjct
sequence, the query, and the start and stop position of the allignment
corresponding to the subject sequence. This function finds the best
hit by concatinating sequences of found hits. If different overlap
sequences occurr these are saved in the list alternative_overlaps. The
subject and query sequence of the concatinated sequence to gether with
alternative overlaps and the corresponding start stop
positions are returned.
"""
# Get information from the fisrt hit found
all_start = hits_found[0][0]
current_end = hits_found[0][1]
final_sbjct = hits_found[0][2]
final_qry = hits_found[0][3]
sbjct_len = hits_found[0][4]
alternative_overlaps = []
# Check if more then one hit was found within the same gene
for i in range(len(hits_found)-1):
# Save information from previous hit
pre_block_start = hits_found[i][0]
pre_block_end = hits_found[i][1]
pre_sbjct = hits_found[i][2]
pre_qry = hits_found[i][3]
# Save information from next hit
next_block_start = hits_found[i+1][0]
next_block_end = hits_found[i+1][1]
next_sbjct = hits_found[i+1][2]
next_qry = hits_found[i+1][3]
# Check for overlapping sequences, collaps them and save alternative overlaps if any
if next_block_start <= current_end:
# Find overlap start and take gaps into account
pos_count = 0
overlap_pos = pre_block_start
for i in range(len(pre_sbjct)):
# Stop loop if overlap_start position is reached
if overlap_pos == next_block_start:
overlap_start = pos_count
break
if pre_sbjct[i] != "-":
overlap_pos += 1
pos_count += 1
# Find overlap length and add next sequence to final sequence
if len(pre_sbjct[overlap_start:]) > len(next_sbjct):
# <--------->
# <--->
overlap_len = len(next_sbjct)
overlap_end_pos = next_block_end
else:
# <--------->
# <--------->
overlap_len = len(pre_sbjct[overlap_start:])
overlap_end_pos = pre_block_end
# Update current end
current_end = next_block_end
# Use the entire pre sequence and add the last part of the next sequence
final_sbjct += next_sbjct[overlap_len:]
final_qry += next_qry[overlap_len:]
# Find query overlap sequences
pre_qry_overlap = pre_qry[overlap_start : (overlap_start + overlap_len)] # can work for both types of overlap
next_qry_overlap = next_qry[:overlap_len]
sbjct_overlap = next_sbjct[:overlap_len]
# If alternative query overlap excist save it
if pre_qry_overlap != next_qry_overlap:
print("OVERLAP WARNING:")
print(pre_qry_overlap, "\n", next_qry_overlap)
# Save alternative overlaps
alternative_overlaps += [(next_block_start, overlap_end_pos, sbjct_overlap, next_qry_overlap)]
elif next_block_start > current_end:
# <------->
# <------->
gap_size = next_block_start - current_end - 1
final_qry += "N"*gap_size
if silent_N_flag:
final_sbjct += "N"*gap_size
else:
ref_seq = get_gene_seqs(specie_path, gene)
final_sbjct += ref_seq[pre_block_end:pre_block_end+gap_size]
current_end = next_block_end
final_sbjct += next_sbjct
final_qry += next_qry
# Calculate coverage
no_call = final_qry.upper().count("N")
coverage = (current_end - all_start +1 - no_call) / float(sbjct_len)
# Calculate identity
equal = 0
not_equal = 0
for i in range(len(final_qry)):
if final_qry[i].upper() != "N":
if final_qry[i].upper() == final_sbjct[i].upper():
equal += 1
else:
not_equal += 1
identity = equal/float(equal + not_equal)
return final_sbjct, final_qry, all_start, current_end, alternative_overlaps, coverage, identity | This function takes the list hits_found as argument. This contains all
hits found for the blast search of one gene. A hit includes the subjct
sequence, the query, and the start and stop position of the allignment
corresponding to the subject sequence. This function finds the best
hit by concatinating sequences of found hits. If different overlap
sequences occurr these are saved in the list alternative_overlaps. The
subject and query sequence of the concatinated sequence to gether with
alternative overlaps and the corresponding start stop
positions are returned. | entailment |
def find_mismatches(gene, sbjct_start, sbjct_seq, qry_seq, alternative_overlaps = []):
"""
This function finds mis matches between two sequeces. Depending on the
the sequence type either the function find_codon_mismatches or
find_nucleotid_mismatches are called, if the sequences contains both
a promoter and a coding region both functions are called. The function
can also call it self if alternative overlaps is give. All found mis
matches are returned
"""
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
# Find mis matches in RNA genes
if gene in RNA_gene_list:
mis_matches += find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq)
else:
# Check if the gene sequence is with a promoter
regex = r"promoter_size_(\d+)(?:bp)"
promtr_gene_objt = re.search(regex, gene)
# Check for promoter sequences
if promtr_gene_objt:
# Get promoter length
promtr_len = int(promtr_gene_objt.group(1))
# Extract promoter sequence, while considering gaps
# --------agt-->----
# ---->?
if sbjct_start <= promtr_len:
#Find position in sbjct sequence where promoter ends
promtr_end = 0
nuc_count = sbjct_start - 1
for i in range(len(sbjct_seq)):
promtr_end += 1
if sbjct_seq[i] != "-":
nuc_count += 1
if nuc_count == promtr_len:
break
# Check if only a part of the promoter is found
#--------agt-->----
# ----
promtr_sbjct_start = -1
if nuc_count < promtr_len:
promtr_sbjct_start = nuc_count - promtr_len
# Get promoter part of subject and query
sbjct_promtr_seq = sbjct_seq[:promtr_end]
qry_promtr_seq = qry_seq[:promtr_end]
# For promoter part find nucleotide mis matches
mis_matches += find_nucleotid_mismatches(promtr_sbjct_start, sbjct_promtr_seq, qry_promtr_seq, promoter = True)
# Check if gene is also found
#--------agt-->----
# -----------
if (sbjct_start + len(sbjct_seq.replace("-", ""))) > promtr_len:
sbjct_gene_seq = sbjct_seq[promtr_end:]
qry_gene_seq = qry_seq[promtr_end:]
sbjct_gene_start = 1
# Find mismatches in gene part
mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_gene_seq, qry_gene_seq)
# No promoter, only gene is found
#--------agt-->----
# -----
else:
sbjct_gene_start = sbjct_start - promtr_len
# Find mismatches in gene part
mis_matches += find_codon_mismatches(sbjct_gene_start, sbjct_seq, qry_seq)
else:
# Find mismatches in gene
mis_matches += find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq)
# Find mismatches in alternative overlaps if any
for overlap in alternative_overlaps:
mis_matches += find_mismatches(gene, overlap[0], overlap[2], overlap[3])
return mis_matches | This function finds mis matches between two sequeces. Depending on the
the sequence type either the function find_codon_mismatches or
find_nucleotid_mismatches are called, if the sequences contains both
a promoter and a coding region both functions are called. The function
can also call it self if alternative overlaps is give. All found mis
matches are returned | entailment |
def find_nucleotid_mismatches(sbjct_start, sbjct_seq, qry_seq, promoter = False):
"""
This function takes two alligned sequence (subject and query), and the
position on the subject where the alignment starts. The sequences are
compared one nucleotide at a time. If mis matches are found they are
saved. If a gap is found the function find_nuc_indel is called to find
the entire indel and it is also saved into the list mis_matches. If
promoter sequences are given as arguments, these are reversed the and
the absolut value of the sequence position used, but when mutations
are saved the negative value and det reverse sequences are saved in
mis_mathces.
"""
# Initiate the mis_matches list that will store all found mis matcehs
mis_matches = []
sbjct_start = abs(sbjct_start)
seq_pos = sbjct_start
# Set variables depending on promoter status
factor = 1
mut_prefix = "r."
if promoter == True:
factor = (-1)
mut_prefix = "n."
# Reverse promoter sequences
sbjct_seq = sbjct_seq[::-1]
qry_seq = qry_seq[::-1]
# Go through sequences one nucleotide at a time
shift = 0
for index in range(sbjct_start - 1, len(sbjct_seq)):
mut_name = mut_prefix
mut = ""
# Shift index according to gaps
i = index + shift
# If the end of the sequence is reached, stop
if i == len(sbjct_seq):
break
sbjct_nuc = sbjct_seq[i]
qry_nuc = qry_seq[i]
# Check for mis matches
if sbjct_nuc.upper() != qry_nuc.upper():
# check for insertions and deletions
if sbjct_nuc == "-" or qry_nuc == "-":
if sbjct_nuc == "-":
mut = "ins"
indel_start_pos = (seq_pos -1) *factor
indel_end_pos = seq_pos * factor
indel = find_nuc_indel(sbjct_seq[i:], qry_seq[i:])
else:
mut = "del"
indel_start_pos = seq_pos * factor
indel = find_nuc_indel(qry_seq[i:], sbjct_seq[i:])
indel_end_pos = (seq_pos + len(indel) - 1) * factor
seq_pos += len(indel) - 1
# Shift the index to the end of the indel
shift += len(indel) - 1
# Write mutation name, depending on sequnce
if len(indel) == 1 and mut == "del":
mut_name += str(indel_start_pos) + mut + indel
else:
if promoter == True:
# Reverse the sequence and the start and end positions
indel = indel[::-1]
temp = indel_start_pos
indel_start_pos = indel_end_pos
indel_end_pos = temp
mut_name += str(indel_start_pos) + "_" +str(indel_end_pos) + mut + indel
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, indel, mut_name, mut, indel]]
# Check for substitutions mutations
else:
mut = "sub"
mut_name += str(seq_pos * factor) + sbjct_nuc + ">" + qry_nuc
mis_matches += [[mut, seq_pos * factor, seq_pos * factor, qry_nuc, mut_name, sbjct_nuc, qry_nuc]]
# Increment sequence position
if mut != "ins":
seq_pos += 1
return mis_matches | This function takes two alligned sequence (subject and query), and the
position on the subject where the alignment starts. The sequences are
compared one nucleotide at a time. If mis matches are found they are
saved. If a gap is found the function find_nuc_indel is called to find
the entire indel and it is also saved into the list mis_matches. If
promoter sequences are given as arguments, these are reversed the and
the absolut value of the sequence position used, but when mutations
are saved the negative value and det reverse sequences are saved in
mis_mathces. | entailment |
def find_nuc_indel(gapped_seq, indel_seq):
"""
This function finds the entire indel missing in from a gapped sequence
compared to the indel_seqeunce. It is assumes that the sequences start
with the first position of the gap.
"""
ref_indel = indel_seq[0]
for j in range(1,len(gapped_seq)):
if gapped_seq[j] == "-":
ref_indel += indel_seq[j]
else:
break
return ref_indel | This function finds the entire indel missing in from a gapped sequence
compared to the indel_seqeunce. It is assumes that the sequences start
with the first position of the gap. | entailment |
def aa(codon):
"""
This function converts a codon to an amino acid. If the codon is not
valid an error message is given, or else, the amino acid is returned.
"""
codon = codon.upper()
aa = {"ATT": "I", "ATC": "I", "ATA": "I",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L", "TTA": "L", "TTG": "L",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TTT": "F", "TTC": "F",
"ATG": "M",
"TGT": "C", "TGC": "C",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"TAT": "Y", "TAC": "Y",
"TGG": "W",
"CAA": "Q", "CAG": "Q",
"AAT": "N", "AAC": "N",
"CAT": "H", "CAC": "H",
"GAA": "E", "GAG": "E",
"GAT": "D", "GAC": "D",
"AAA": "K", "AAG": "K",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TAA": "*", "TAG": "*", "TGA": "*"}
# Translate valid codon
try:
amino_a = aa[codon]
except KeyError:
amino_a = "?"
return amino_a | This function converts a codon to an amino acid. If the codon is not
valid an error message is given, or else, the amino acid is returned. | entailment |
def get_codon(seq, codon_no, start_offset):
"""
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
"""
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon | This function takes a sequece and a codon number and returns the codon
found in the sequence at that position | entailment |
def name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset):
"""
This function is used to name a insertion mutation based on the HGVS
recommendation.
"""
start_codon_no = codon_no - 1
if len(sbjct_nucs) == 3:
start_codon_no = codon_no
start_codon = get_codon(sbjct_seq, start_codon_no, start_offset)
end_codon = get_codon(sbjct_seq, codon_no, start_offset)
pos_name = "p.%s%d_%s%dins%s"%(aa(start_codon), start_codon_no, aa(end_codon), codon_no, aa_alt)
return pos_name | This function is used to name a insertion mutation based on the HGVS
recommendation. | entailment |
def name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, start_offset):
"""
This function serves to name the individual mutations dependently on
the type of the mutation.
"""
# Get the subject and query sequences without gaps
sbjct_nucs = sbjct_rf_indel.replace("-", "")
qry_nucs = qry_rf_indel.replace("-", "")
# Translate nucleotides to amino acids
aa_ref = ""
aa_alt = ""
for i in range(0, len(sbjct_nucs), 3):
aa_ref += aa(sbjct_nucs[i:i+3])
for i in range(0, len(qry_nucs), 3):
aa_alt += aa(qry_nucs[i:i+3])
# Identify the gapped sequence
if mut == "ins":
gapped_seq = sbjct_rf_indel
else:
gapped_seq = qry_rf_indel
gap_size = gapped_seq.count("-")
# Write mutation names
if gap_size < 3 and len(sbjct_nucs) ==3 and len(qry_nucs) == 3:
# Write mutation name for substitution mutation
mut_name = "p.%s%d%s"%(aa(sbjct_nucs), codon_no, aa(qry_nucs))
elif len(gapped_seq) == gap_size:
if mut == "ins":
# Write mutation name for insertion mutation
mut_name = name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset)
aa_ref = mut
else:
# Write mutation name for deletion mutation
mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "del")
aa_alt = mut
# Check for delins - mix of insertion and deletion
else:
# Write mutation name for a mixed insertion and deletion mutation
mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "delins")
# Check for frameshift
if gapped_seq.count("-")%3 != 0:
# Add the frameshift tag to mutation name
mut_name += " - Frameshift"
return mut_name, aa_ref, aa_alt | This function serves to name the individual mutations dependently on
the type of the mutation. | entailment |
def get_inframe_gap(seq, nucs_needed = 3):
"""
This funtion takes a sequnece starting with a gap or the complementary
seqeuence to the gap, and the number of nucleotides that the seqeunce
should contain in order to maintain the correct reading frame. The
sequence is gone through and the number of non-gap characters are
counted. When the number has reach the number of needed nucleotides
the indel is returned. If the indel is a 'clean' insert or deletion
that starts in the start of a codon and can be divided by 3, then only
the gap is returned.
"""
nuc_count = 0
gap_indel = ""
nucs = ""
for i in range(len(seq)):
# Check if the character is not a gap
if seq[i] != "-":
# Check if the indel is a 'clean'
# i.e. if the insert or deletion starts at the first nucleotide in the codon and can be divided by 3
if gap_indel.count("-") == len(gap_indel) and gap_indel.count("-") >= 3 and len(gap_indel) != 0:
return gap_indel
nuc_count += 1
gap_indel += seq[i]
# If the number of nucleotides in the indel equals the amount needed for the indel, the indel is returned.
if nuc_count == nucs_needed:
return gap_indel
# This will only happen if the gap is in the very end of a sequence
return gap_indel | This funtion takes a sequnece starting with a gap or the complementary
seqeuence to the gap, and the number of nucleotides that the seqeunce
should contain in order to maintain the correct reading frame. The
sequence is gone through and the number of non-gap characters are
counted. When the number has reach the number of needed nucleotides
the indel is returned. If the indel is a 'clean' insert or deletion
that starts in the start of a codon and can be divided by 3, then only
the gap is returned. | entailment |
def get_indels(sbjct_seq, qry_seq, start_pos):
"""
This function uses regex to find inserts and deletions in sequences
given as arguments. A list of these indels are returned. The list
includes, type of mutations(ins/del), subject codon no of found
mutation, subject sequence position, insert/deletions nucleotide
sequence, and the affected qry codon no.
"""
seqs = [sbjct_seq, qry_seq]
indels = []
gap_obj = re.compile(r"-+")
for i in range(len(seqs)):
for match in gap_obj.finditer(seqs[i]):
pos = int(match.start())
gap = match.group()
# Find position of the mutation corresponding to the subject sequence
sbj_pos = len(sbjct_seq[:pos].replace("-","")) + start_pos
# Get indel sequence and the affected sequences in sbjct and qry in the reading frame
indel = seqs[abs(i-1)][pos:pos+len(gap)]
# Find codon number for mutation
codon_no = int(math.ceil((sbj_pos)/3))
qry_pos = len(qry_seq[:pos].replace("-","")) + start_pos
qry_codon = int(math.ceil((qry_pos)/3))
if i == 0:
mut = "ins"
else:
mut = "del"
indels.append( [mut, codon_no, sbj_pos, indel, qry_codon])
# Sort indels based on codon position and sequence position
indels = sorted(indels, key = lambda x:(x[1],x[2]))
return indels | This function uses regex to find inserts and deletions in sequences
given as arguments. A list of these indels are returned. The list
includes, type of mutations(ins/del), subject codon no of found
mutation, subject sequence position, insert/deletions nucleotide
sequence, and the affected qry codon no. | entailment |
def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
"""
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
"""
mis_matches = []
# Find start pos of first codon in frame, i_start
codon_offset = (sbjct_start-1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
# Find codon number of the first codon in the sequence, start at 0
codon_no = int((sbjct_start-1) / 3) # 1,2,3 start on 0
# s_shift and q_shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
# Go through sequence and save mutations when found
for index in range(0, len(sbjct_seq), 3):
# Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq[s_i:s_i+3]
qry_codon = qry_seq[q_i:q_i+3]
if len(sbjct_seq[s_i:].replace("-","")) + len(qry_codon[q_i:].replace("-","")) < 6:
break
# Check for mutations
if sbjct_codon.upper() != qry_codon.upper():
# Check for codon insertions and deletions and frameshift mutations
if "-" in sbjct_codon or "-" in qry_codon:
# Get indel info
try:
indel_data = indels[indel_no]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no)
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
# Get the affected sequence in frame for both for sbjct and qry
if mut == "ins":
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel)/3) *3))
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel)/3) *3))
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
# Set index to the correct reading frame after the indel gap
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff %3 == 0:
if s_shift > q_shift:
nucs_needed = int((len(sbjct_rf_indel)/3) *3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel)
elif q_shift > s_shift:
nucs_needed = int((len(qry_rf_indel)/3)*3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel)
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if "Frameshift" in mut_name:
mut_name = mut_name.split("-")[0] + "- Frame restored"
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len(re.findall("\-\w", sbjct_rf_indel)) + len(re.findall("\-\w", qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j]
except IndexError:
sys.exit("indel_data list is out of range, bug!")
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Set codon number, and save nucleotides from out of frame mutations
if mut == "del":
codon_no += int((len(sbjct_rf_indel) - 3)/3)
# If evaluated insert is only gaps codon_no should not increment
elif sbjct_rf_indel.count("-") == len(sbjct_rf_indel):
codon_no -= 1
# Check of point mutations
else:
mut = "sub"
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
# End search for mutation if a premature stop codon is found
mut_name = "p." + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]]
# If a Premature stop codon occur report it an stop the loop
try:
if mis_matches[-1][-1] == "*":
mut_name += " - Premature stop codon"
mis_matches[-1][4] = mis_matches[-1][4].split("-")[0] + " - Premature stop codon"
break
except IndexError:
pass
# Sort mutations on position
mis_matches = sorted(mis_matches, key = lambda x:x[1])
return mis_matches | This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user. | entailment |
def write_output(gene, gene_name, mis_matches, known_mutations, known_stop_codon, unknown_flag, GENES):
"""
This function takes a gene name a list of mis matches found betreewn subject and query of
this gene, the dictionary of known mutation in the point finder database, and the flag telling
weather the user wants unknown mutations to be reported.
All mis matches are looked up in the known mutation dict to se if the mutation is known,
and in this case what drug resistence it causes.
The funtions returns a 3 strings that are used as output to the users.
One string is only tab seperated and contains the mutations listed line by line.
If the unknown flag is set to true it will contain both known and unknown mutations.
The next string contains only known mutation and are given in in a format that is easy to
convert to HTML. The last string is the HTML tab sting from the unknown mutations.
"""
RNA = False
known_header = "Mutation\tNucleotide change\tAmino acid change\tResistance\tPMID\n"
unknown_header = "Mutation\tNucleotide change\tAmino acid change\n"
if gene in RNA_gene_list:
RNA = True
known_header = "Mutation\tNucleotide change\tResistance\tPMID\n"
unknown_header = "Mutation\tNucleotide change\n"
known_lst = []
unknown_lst = []
all_results_lst = []
output_mut = []
stop_codons = []
# Go through each mutation
for i in range(len(mis_matches)):
m_type = mis_matches[i][0]
pos = mis_matches[i][1] # sort on pos?
look_up_pos = mis_matches[i][2]
look_up_mut = mis_matches[i][3]
mut_name = mis_matches[i][4]
nuc_ref = mis_matches[i][5]
nuc_alt = mis_matches[i][6]
ref = mis_matches[i][-2]
alt = mis_matches[i][-1]
# First index in list indicates if mutation is known
output_mut += [[]]
#output_mut[i] = [0]
# Define output vaiables
codon_change = nuc_ref + " -> " + nuc_alt
aa_change = ref + " -> " + alt
if RNA == True:
aa_change = "RNA mutations"
elif pos < 0:
aa_change = "Promoter mutations"
# Check if mutation is known
gene_mut_name, resistence, pmid = look_up_known_muts(known_mutations, known_stop_codon, gene, look_up_pos, look_up_mut, m_type, gene_name, mut_name)
gene_mut_name = gene_mut_name + " " + mut_name
output_mut[i] = [gene_mut_name, codon_change, aa_change, resistence, pmid]
# Add mutation to output strings for known mutations
if resistence != "Unknown":
if RNA == True:
# don't include the amino acid change field for RNA mutations
known_lst += ["\t".join(output_mut[i][:2]) + "\t" + "\t".join(output_mut[i][3:])]
else:
known_lst += ["\t".join(output_mut[i])]
all_results_lst += ["\t".join(output_mut[i])]
# Add mutation to output strings for unknown mutations
else:
if RNA == True:
unknown_lst += ["\t".join(output_mut[i][:2])]
else:
unknown_lst += ["\t".join(output_mut[i][:3])]
if unknown_flag == True:
all_results_lst += ["\t".join(output_mut[i])]
# Check that you do not print two equal lines (can happen it two indels occure in the same codon)
if len(output_mut) > 1:
if output_mut[i] == output_mut[i-1]:
if resistence != "Unknown":
known_lst = known_lst[:-1]
all_results_lst = all_results_lst[:-1]
else:
unknown_lst = unknown_lst[:-1]
if unknown_flag == True:
all_results_lst = all_results_lst[:-1]
if "Premature stop codon" in mut_name:
sbjct_len = GENES[gene]['sbjct_len']
qry_len = pos * 3
prec_truckat = round(((float(sbjct_len) - qry_len )/ float(sbjct_len)) * 100, 2)
perc = "%"
stop_codons.append("Premature stop codon in %s, %.2f%s lost"%(gene, prec_truckat, perc))
# Creat final strings
all_results = "\n".join(all_results_lst)
total_known_str = ""
total_unknown_str = ""
# Check if there are only unknown mutations
resistence_lst = [res for mut in output_mut for res in mut[3].split(",")]
# Save known mutations
unknown_no = resistence_lst.count("Unknown")
if unknown_no < len(resistence_lst):
total_known_str = known_header + "\n".join(known_lst)
else:
total_known_str = "No known mutations found in %s"%gene_name
# Save unknown mutations
if unknown_no > 0:
total_unknown_str = unknown_header + "\n".join(unknown_lst)
else:
total_unknown_str = "No unknown mutations found in %s"%gene_name
return all_results, total_known_str, total_unknown_str, resistence_lst + stop_codons | This function takes a gene name a list of mis matches found betreewn subject and query of
this gene, the dictionary of known mutation in the point finder database, and the flag telling
weather the user wants unknown mutations to be reported.
All mis matches are looked up in the known mutation dict to se if the mutation is known,
and in this case what drug resistence it causes.
The funtions returns a 3 strings that are used as output to the users.
One string is only tab seperated and contains the mutations listed line by line.
If the unknown flag is set to true it will contain both known and unknown mutations.
The next string contains only known mutation and are given in in a format that is easy to
convert to HTML. The last string is the HTML tab sting from the unknown mutations. | entailment |
def merge(self):
"""Try merging all the bravado_core models across all loaded APIs. If
duplicates occur, use the same bravado-core model to represent each, so
bravado-core won't treat them as different models when passing them
from one PyMacaron client stub to an other or when returning them via the
PyMacaron server stub.
"""
# The sole purpose of this method is to trick isinstance to return true
# on model_values of the same kind but different apis/specs at:
# https://github.com/Yelp/bravado-core/blob/4840a6e374611bb917226157b5948ee263913abc/bravado_core/marshal.py#L160
log.info("Merging models of apis " + ", ".join(apis.keys()))
# model_name => (api_name, model_json_def, bravado_core.model.MODELNAME)
models = {}
# First pass: find duplicate and keep only one model of each (fail if
# duplicates have same name but different definitions)
for api_name, api in apis.items():
for model_name, model_def in api.api_spec.swagger_dict['definitions'].items():
if model_name in models:
other_api_name, other_model_def, _ = models.get(model_name)
log.debug("Model %s in %s is a duplicate of one in %s" % (model_name, api_name, other_api_name))
if ApiPool._cmp_models(model_def, other_model_def) != 0:
raise MergeApisException("Cannot merge apis! Model %s exists in apis %s and %s but have different definitions:\n[%s]\n[%s]"
% (model_name, api_name, other_api_name, pprint.pformat(model_def), pprint.pformat(other_model_def)))
else:
models[model_name] = (api_name, model_def, api.api_spec.definitions[model_name])
# Second pass: patch every models and replace with the one we decided
# to keep
log.debug("Patching api definitions to remove all duplicates")
for api_name, api in apis.items():
for model_name in api.api_spec.definitions.keys():
_, _, model_class = models.get(model_name)
api.api_spec.definitions[model_name] = model_class | Try merging all the bravado_core models across all loaded APIs. If
duplicates occur, use the same bravado-core model to represent each, so
bravado-core won't treat them as different models when passing them
from one PyMacaron client stub to an other or when returning them via the
PyMacaron server stub. | entailment |
def _cmp_models(self, m1, m2):
"""Compare two models from different swagger APIs and tell if they are
equal (return 0), or not (return != 0)"""
# Don't alter m1/m2 by mistake
m1 = copy.deepcopy(m1)
m2 = copy.deepcopy(m2)
# Remove keys added by bravado-core
def _cleanup(d):
"""Remove all keys in the blacklist"""
for k in ('x-model', 'x-persist', 'x-scope'):
if k in d:
del d[k]
for v in list(d.values()):
if isinstance(v, dict):
_cleanup(v)
_cleanup(m1)
_cleanup(m2)
# log.debug("model1:\n" + pprint.pformat(m1))
# log.debug("model2:\n" + pprint.pformat(m2))
return not m1 == m2 | Compare two models from different swagger APIs and tell if they are
equal (return 0), or not (return != 0) | entailment |
def sync(self):
"""Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
This task type instance after syncing.
"""
self = self.manager.get(id=self.id)
return self | Sync this model with latest data on the saltant server.
Note that in addition to returning the updated object, it also
updates the existing object.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
This task type instance after syncing. | entailment |
def create(
self,
name,
command_to_run,
description="",
environment_variables=None,
required_arguments=None,
required_arguments_default_values=None,
extra_data_to_post=None,
):
"""Create a task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the tasks required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskType`:
A task type model instance representing the task type
just created.
"""
# Set None for optional list and dicts to proper datatypes
if environment_variables is None:
environment_variables = []
if required_arguments is None:
required_arguments = []
if required_arguments_default_values is None:
required_arguments_default_values = {}
# Create the object
request_url = self._client.base_api_url + self.list_url
data_to_post = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_post is not None:
data_to_post.update(extra_data_to_post)
response = self._client.session.post(request_url, data=data_to_post)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_201_CREATED,
)
# Return a model instance representing the task type
return self.response_data_to_model_instance(response.json()) | Create a task type.
Args:
name (str): The name of the task.
command_to_run (str): The command to run to execute the task.
description (str, optional): The description of the task type.
environment_variables (list, optional): The environment
variables required on the host to execute the task.
required_arguments (list, optional): The argument names for
the task type.
required_arguments_default_values (dict, optional): Default
values for the tasks required arguments.
extra_data_to_post (dict, optional): Extra key-value pairs
to add to the request data. This is useful for
subclasses which require extra parameters.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskType`:
A task type model instance representing the task type
just created. | entailment |
def put(
self,
id,
name,
description,
command_to_run,
environment_variables,
required_arguments,
required_arguments_default_values,
extra_data_to_put=None,
):
"""Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A :class:`saltant.models.base_task_type.BaseTaskType`
subclass instance representing the task type just
updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"command_to_run": command_to_run,
"environment_variables": json.dumps(environment_variables),
"required_arguments": json.dumps(required_arguments),
"required_arguments_default_values": json.dumps(
required_arguments_default_values
),
}
# Add in extra data if any was passed in
if extra_data_to_put is not None:
data_to_put.update(extra_data_to_put)
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json()) | Updates a task type on the saltant server.
Args:
id (int): The ID of the task type.
name (str): The name of the task type.
description (str): The description of the task type.
command_to_run (str): The command to run to execute the task.
environment_variables (list): The environment variables
required on the host to execute the task.
required_arguments (list): The argument names for the task type.
required_arguments_default_values (dict): Default values for
the tasks required arguments.
extra_data_to_put (dict, optional): Extra key-value pairs to
add to the request data. This is useful for subclasses
which require extra parameters.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A :class:`saltant.models.base_task_type.BaseTaskType`
subclass instance representing the task type just
updated. | entailment |
def response_data_to_model_instance(self, response_data):
"""Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data.
"""
# Coerce datetime strings into datetime objects
response_data["datetime_created"] = dateutil.parser.parse(
response_data["datetime_created"]
)
# Instantiate a model for the task instance
return super(
BaseTaskTypeManager, self
).response_data_to_model_instance(response_data) | Convert response data to a task type model.
Args:
response_data (dict): The data from the request's response.
Returns:
:class:`saltant.models.base_task_type.BaseTaskType`:
A model instance representing the task type from the
reponse data. | entailment |
def regression():
"""
Run regression testing - lint and then run all tests.
"""
# HACK: Start using hitchbuildpy to get around this.
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook = _storybook({}).only_uninherited()
#storybook.with_params(**{"python version": "2.7.10"})\
#.ordered_by_name().play()
Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run()
storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play()
lint() | Run regression testing - lint and then run all tests. | entailment |
def deploy(version):
"""
Deploy to pypi as specified version.
"""
NAME = "pathquery"
git = Command("git").in_dir(DIR.project)
version_file = DIR.project.joinpath("VERSION")
old_version = version_file.bytes().decode('utf8')
if version_file.bytes().decode("utf8") != version:
DIR.project.joinpath("VERSION").write_text(version)
git("add", "VERSION").run()
git("commit", "-m", "RELEASE: Version {0} -> {1}".format(
old_version,
version
)).run()
git("push").run()
git("tag", "-a", version, "-m", "Version {0}".format(version)).run()
git("push", "origin", version).run()
else:
git("push").run()
# Set __version__ variable in __init__.py, build sdist and put it back
initpy = DIR.project.joinpath(NAME, "__init__.py")
original_initpy_contents = initpy.bytes().decode('utf8')
initpy.write_text(
original_initpy_contents.replace("DEVELOPMENT_VERSION", version)
)
python("setup.py", "sdist").in_dir(DIR.project).run()
initpy.write_text(original_initpy_contents)
# Upload to pypi
python(
"-m", "twine", "upload", "dist/{0}-{1}.tar.gz".format(NAME, version)
).in_dir(DIR.project).run() | Deploy to pypi as specified version. | entailment |
def hvenvup(package, directory):
"""
Install a new version of a package in the hitch venv.
"""
pip = Command(DIR.gen.joinpath("hvenv", "bin", "pip"))
pip("uninstall", package, "-y").run()
pip("install", DIR.project.joinpath(directory).abspath()).run() | Install a new version of a package in the hitch venv. | entailment |
def set_up(self):
"""Set up your applications and the test environment."""
self.path.state = self.path.gen.joinpath("state")
if self.path.state.exists():
self.path.state.rmtree(ignore_errors=True)
self.path.state.mkdir()
if self.path.gen.joinpath("q").exists():
self.path.gen.joinpath("q").remove()
for filename, text in self.given.get("files", {}).items():
filepath = self.path.state.joinpath(filename)
if not filepath.dirname().exists():
filepath.dirname().makedirs()
filepath.write_text(text)
for filename, linkto in self.given.get("symlinks", {}).items():
filepath = self.path.state.joinpath(filename)
linktopath = self.path.state.joinpath(linkto)
linktopath.symlink(filepath)
for filename, permission in self.given.get("permissions", {}).items():
filepath = self.path.state.joinpath(filename)
filepath.chmod(int(permission, 8))
pylibrary = hitchbuildpy.PyLibrary(
name="py3.5.0",
base_python=hitchbuildpy.PyenvBuild("3.5.0").with_build_path(self.path.share),
module_name="pathquery",
library_src=self.path.project,
).with_build_path(self.path.gen)
pylibrary.ensure_built()
self.python = pylibrary.bin.python
self.example_py_code = ExamplePythonCode(self.python, self.path.state)\
.with_code(self.given.get('code', ''))\
.with_setup_code(self.given.get('setup', ''))\
.with_terminal_size(160, 100)\
.with_env(TMPDIR=self.path.gen)\
.with_long_strings(
yaml_snippet_1=self.given.get('yaml_snippet_1'),
yaml_snippet=self.given.get('yaml_snippet'),
yaml_snippet_2=self.given.get('yaml_snippet_2'),
modified_yaml_snippet=self.given.get('modified_yaml_snippet'),
) | Set up your applications and the test environment. | entailment |
def is_valid(self):
"""
Validates single instance. Returns boolean value and store errors in self.errors
"""
self.errors = []
for field in self.get_all_field_names_declared_by_user():
getattr(type(self), field).is_valid(self, type(self), field)
field_errors = getattr(type(self), field).errors(self)
self.errors.extend(field_errors)
return len(self.errors) == 0 | Validates single instance. Returns boolean value and store errors in self.errors | entailment |
def main(self):
"""
Run the analyses using the inputted values for forward and reverse read length. However, if not all strains
pass the quality thresholds, continue to periodically run the analyses on these incomplete strains until either
all strains are complete, or the sequencing run is finished
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
self.createobjects()
# Run the genesipping analyses
self.methods()
# Determine if the analyses are complete
self.complete()
self.additionalsipping()
# Update the report object
self.reports = Reports(self)
# Once all the analyses are complete, create reports for each sample
Reports.methodreporter(self.reports)
# Print the metadata
printer = MetadataPrinter(self)
printer.printmetadata() | Run the analyses using the inputted values for forward and reverse read length. However, if not all strains
pass the quality thresholds, continue to periodically run the analyses on these incomplete strains until either
all strains are complete, or the sequencing run is finished | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.