code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def _connectRelay(self, process, protocolFactory):
try:
wf = _WrappingFactory(protocolFactory)
connector = RelayConnector(process, wf, self.timeout,
self.inductor.reactor)
connector.connect()
except:
return defer.fail()
# Return a deferred that is called back when the protocol is connected.
return wf._onConnection | Set up and connect the protocol we want to relay to the process.
This method is automatically called when the process is started,
and we are ready to relay through it. |
def _startRelay(self, client):
process = client.transport.connector.process
# Relay any buffered data that was received from the process before
# we got connected and started relaying.
for _, data in process.data:
client.dataReceived(data)
process.protocol = client
@process._endedDeferred.addBoth
def stopRelay(reason):
"""Stop relaying data. Called when the process has ended.
"""
relay = client.transport
relay.loseConnection(reason)
connector = relay.connector
connector.connectionLost(reason)
# Pass through the client protocol.
return client | Start relaying data between the process and the protocol.
This method is called when the protocol is connected. |
def connectRelay(self):
self.protocol = self.connector.buildProtocol(None)
self.connected = True
self.protocol.makeConnection(self) | Builds the target protocol and connects it to the relay transport. |
def childDataReceived(self, childFD, data):
protocol = getattr(self, 'protocol', None)
if protocol:
protocol.dataReceived(data)
else:
self.data.append((childFD, data)) | Relay data received on any file descriptor to the process |
def publish(self, user, provider, obj, comment, **kwargs):
'''
user - django User or UserSocialAuth instance
provider - name of publisher provider
obj - sharing object
comment - string
'''
social_user = self._get_social_user(user, provider)
backend = self.get_backend(social_user, provider, context=kwargs)
return backend.publish(obj, commentf publish(self, user, provider, obj, comment, **kwargs):
'''
user - django User or UserSocialAuth instance
provider - name of publisher provider
obj - sharing object
comment - string
'''
social_user = self._get_social_user(user, provider)
backend = self.get_backend(social_user, provider, context=kwargs)
return backend.publish(obj, comment) | user - django User or UserSocialAuth instance
provider - name of publisher provider
obj - sharing object
comment - string |
def check(self, user, provider, permission, **kwargs):
'''
user - django User or UserSocialAuth instance
provider - name of publisher provider
permission - if backend maintains check permissions
vk - binary mask in int format
facebook - scope string
'''
try:
social_user = self._get_social_user(user, provider)
if not social_user:
return False
except SocialUserDoesNotExist:
return False
backend = self.get_backend(social_user, provider, context=kwargs)
return backend.check(permissionf check(self, user, provider, permission, **kwargs):
'''
user - django User or UserSocialAuth instance
provider - name of publisher provider
permission - if backend maintains check permissions
vk - binary mask in int format
facebook - scope string
'''
try:
social_user = self._get_social_user(user, provider)
if not social_user:
return False
except SocialUserDoesNotExist:
return False
backend = self.get_backend(social_user, provider, context=kwargs)
return backend.check(permission) | user - django User or UserSocialAuth instance
provider - name of publisher provider
permission - if backend maintains check permissions
vk - binary mask in int format
facebook - scope string |
def recognize_byte(self, image, timeout=10):
result = []
alpr = subprocess.Popen(
self._cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL
)
# send image
try:
# pylint: disable=unused-variable
stdout, stderr = alpr.communicate(input=image, timeout=10)
stdout = io.StringIO(str(stdout, 'utf-8'))
except subprocess.TimeoutExpired:
_LOGGER.error("Alpr process timeout!")
alpr.kill()
return None
tmp_res = {}
while True:
line = stdout.readline()
if not line:
if len(tmp_res) > 0:
result.append(tmp_res)
break
new_plate = self.__re_plate.search(line)
new_result = self.__re_result.search(line)
# found a new plate
if new_plate and len(tmp_res) > 0:
result.append(tmp_res)
tmp_res = {}
continue
# found plate result
if new_result:
try:
tmp_res[new_result.group(1)] = float(new_result.group(2))
except ValueError:
continue
_LOGGER.debug("Process alpr with result: %s", result)
return result | Process a byte image buffer. |
def finished(finished_status,
update_interval,
table,
status_column,
edit_at_column):
sql = select([table]).where(
and_(*[
status_column >= finished_status,
edit_at_column >= x_seconds_before_now(update_interval)
])
)
return sql | Create text sql statement query for sqlalchemy that getting all finished task.
:param finished_status: int, status code that greater or equal than this
will be considered as finished.
:param update_interval: int, the record will be updated every x seconds.
:return: sqlalchemy text sql statement.
**中文文档**
状态码大于某个值, 并且, 更新时间在最近一段时间以内. |
def unfinished(finished_status,
update_interval,
table,
status_column,
edit_at_column):
sql = select([table]).where(
or_(*[
status_column < finished_status,
edit_at_column < x_seconds_before_now(update_interval)
])
)
return sql | Create text sql statement query for sqlalchemy that getting all unfinished task.
:param finished_status: int, status code that less than this
will be considered as unfinished.
:param update_interval: int, the record will be updated every x seconds.
:return: sqlalchemy text sql statement.
**中文文档**
状态码小于某个值, 或者, 现在距离更新时间已经超过一定阈值. |
def find_nearest(x, x0) -> Tuple[int, Any]:
x = np.asanyarray(x) # for indexing upon return
x0 = np.atleast_1d(x0)
# %%
if x.size == 0 or x0.size == 0:
raise ValueError('empty input(s)')
if x0.ndim not in (0, 1):
raise ValueError('2-D x0 not handled yet')
# %%
ind = np.empty_like(x0, dtype=int)
# NOTE: not trapping IndexError (all-nan) becaues returning None can surprise with slice indexing
for i, xi in enumerate(x0):
if xi is not None and (isinstance(xi, (datetime.datetime, datetime.date, np.datetime64)) or np.isfinite(xi)):
ind[i] = np.nanargmin(abs(x-xi))
else:
raise ValueError('x0 must NOT be None or NaN to avoid surprising None return value')
return ind.squeeze()[()], x[ind].squeeze()[()] | This find_nearest function does NOT assume sorted input
inputs:
x: array (float, int, datetime, h5py.Dataset) within which to search for x0
x0: singleton or array of values to search for in x
outputs:
idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also)
xidx: x[idx]
Observe how bisect.bisect() gives the incorrect result!
idea based on:
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array |
def ensure_context_attribute_exists(context, name, default_value=None):
if not hasattr(context, name):
setattr(context, name, default_value) | Ensure a behave resource exists as attribute in the behave context.
If this is not the case, the attribute is created by using the default_value. |
def ensure_workdir_exists(context):
ensure_context_attribute_exists(context, "workdir", None)
if not context.workdir:
context.workdir = os.path.abspath(WORKDIR)
pathutil.ensure_directory_exists(context.workdir) | Ensures that the work directory exists.
In addition, the location of the workdir is stored as attribute in
the context object. |
def sorted_feed_cols(df):
cols = df.columns
ind = [int(c.split("feed")[1]) for c in cols]
cols = zip(ind,cols)
cols.sort()
cols = [c[1] for c in cols]
return cols | takes a dataframe's columns that would be of the form:
['feed003', 'failsafe_feed999', 'override_feed000', 'feed001', 'feed002']
and returns:
['override_feed000', 'feed001', 'feed002', 'feed003', 'failsafe_feed999'] |
def mean_fill(adf):
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].mean()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan | Looks at each row, and calculates the mean. Honours
the Trump override/failsafe logic. |
def median_fill(adf):
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].median()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan | Looks at each row, and chooses the median. Honours
the Trump override/failsafe logic. |
def most_populated(adf):
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df | Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. |
def most_recent(adf):
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the feeds with the most recent data...
feeds_with_data = feeds_only.dropna(how='all')
selected_feeds = feeds_with_data.T.dropna().index
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if there all empyty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df | Looks at each column, and chooses the feed with the most recent data
point. Honours the Trump override/failsafe logic. |
def build_tri(adf):
# just look at the capital (price), in "feed one", and income (dividend), in "feed two"
cap, inc = adf.columns[1:3]
data = adf[[cap,inc]]
# find the feeds with the most recent data...
inc_pct = data[inc].div(data[cap].shift(1))
cap_pct = data[cap].pct_change(1)
pre_final = inc_pct + cap_pct
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df | Looks at each column, and chooses the feed with the most recent data
point. Honours the Trump override/failsafe logic. |
async def send_audio(self, url, user, options=None):
return await self.chat.send_audio(url, user, options) | send audio message
:param url: link to the audio file
:param user: target user
:param options:
:return: |
def use(self, middleware):
logger.debug('use')
logger.debug(middleware)
self.middlewares.append(middleware)
di.injector.register(instance=middleware)
di.bind(middleware, auto=True)
# TODO: should use DI somehow
if check_spec(['send_text_message'], middleware):
self.chat.add_interface(middleware)
return middleware | attache middleware
:param middleware:
:return: |
def del_by_idx(tree, idxs):
if len(idxs) == 0:
tree['item'] = None
tree['subtrees'] = []
else:
hidx, tidxs = idxs[0], idxs[1:]
del_by_idx(tree['subtrees'][hidx][1], tidxs)
if len(tree['subtrees'][hidx][1]['subtrees']) == 0:
del tree['subtrees'][hidx] | Delete a key entry based on numerical indexes into subtree lists. |
def find_in_tree(tree, key, perfect=False):
if len(key) == 0:
if tree['item'] is not None:
return tree['item'], ()
else:
for i in range(len(tree['subtrees'])):
if not perfect and tree['subtrees'][i][0] == '*':
item, trace = find_in_tree(tree['subtrees'][i][1],
(), perfect)
return item, (i,) + trace
raise KeyError(key)
else:
head, tail = key[0], key[1:]
for i in range(len(tree['subtrees'])):
if tree['subtrees'][i][0] == head or \
not perfect and tree['subtrees'][i][0] == '*':
try:
item, trace = find_in_tree(tree['subtrees'][i][1],
tail, perfect)
return item, (i,) + trace
except KeyError:
pass
raise KeyError(key) | Helper to perform find in dictionary tree. |
def dominates(p, q):
return (len(p) == len(q) and
all(map(lambda es: es[0] == es[1] or es[0] == '*', zip(p, q)))) | Test for path domination. An individual path element *a*
dominates another path element *b*, written as *a* >= *b* if
either *a* == *b* or *a* is a wild card. A path *p* = *p1*, *p2*,
..., *pn* dominates another path *q* = *q1*, *q2*, ..., *qm* if
*n* == *m* and, for all *i*, *pi* >= *qi*. |
def find(self, key, perfect=False):
return find_in_tree(self.root, key, perfect) | Find a key path in the tree, matching wildcards. Return value for
key, along with index path through subtree lists to the result. Throw
``KeyError`` if the key path doesn't exist in the tree. |
def _purge_unreachable(self, key):
dels = []
for p in self:
if dominates(key, p):
dels.append(p)
for k in dels:
_, idxs = find_in_tree(self.root, k, perfect=True)
del_by_idx(self.root, idxs) | Purge unreachable dominated key paths before inserting a new key
path. |
def register(self, name, namespace):
if name in self._NAMESPACES:
raise ValueError("Namespace {0} already exists.".format(name))
if not isinstance(namespace, ns.Namespace):
raise TypeError("Namespaces must be of type Namespace.")
self._NAMESPACES[name] = namespace | Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered. |
def fromSearch(text):
terms = []
for term in nstr(text).split(','):
# assume if no *'s then the user wants to search anywhere as keyword
if '*' not in term:
term = '*%s*' % term
term = term.replace('*', '.*')
terms.append('^%s$' % term)
return '|'.join(terms) | Generates a regular expression from 'simple' search terms.
:param text | <str>
:usage |>>> import projex.regex
|>>> projex.regex.fromSearch('*cool*')
|'^.*cool.*$'
|>>> projex.projex.fromSearch('*cool*,*test*')
|'^.*cool.*$|^.*test.*$'
:return <str> |
def ast_smart(val):
if isinstance(val, Number):
return Num(n=val)
elif isinstance(val, basestring):
return Str(s=val)
else:
return ast_name(str(val)) | Return a suitable subclass of :class:`ast.AST` for storing numbers
or strings. For other type of objects, return a node class that will
indicate that the variable is contained in one of global or local
namespaces. |
def napi_compare(left, ops, comparators, **kwargs):
values = []
for op, right in zip(ops, comparators):
value = COMPARE[op](left, right)
values.append(value)
left = right
result = napi_and(values, **kwargs)
if isinstance(result, ndarray):
return result
else:
return bool(result) | Make pairwise comparisons of comparators. |
def napi_and(values, **kwargs):
arrays = []
result = None
shapes = set()
for value in values:
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif not value:
result = value
if len(shapes) > 1 and kwargs.get('sq', kwargs.get('squeeze', False)):
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
if len(shapes) > 1:
raise ValueError('array shape mismatch')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.zeros(shape, bool)
else:
return result
elif arrays:
sc = kwargs.get('sc', kwargs.get('shortcircuit', 0))
if sc and numpy.prod(shape) >= sc:
return short_circuit_and(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_and(*arrays)
else:
return numpy.all(arrays, 0)
else:
return value | Perform element-wise logical *and* operation on arrays.
If *values* contains a non-array object with truth_ value **False**, the
outcome will be an array of **False**\s with suitable shape without arrays
being evaluated. Non-array objects with truth value **True** are omitted.
If array shapes do not match (after squeezing when enabled by user),
:exc:`ValueError` is raised.
This function uses :obj:`numpy.logical_and` or :obj:`numpy.all`. |
def napi_or(values, **kwargs):
arrays = []
result = None
shapes = set()
for value in values:
if isinstance(value, ndarray) and value.shape:
arrays.append(value)
shapes.add(value.shape)
elif value:
result = value
if len(shapes) > 1 and kwargs.get('squeeze', kwargs.get('sq', False)):
shapes.clear()
for i, a in enumerate(arrays):
a = arrays[i] = a.squeeze()
shapes.add(a.shape)
if len(shapes) > 1:
raise ValueError('array shape mismatch, even after squeezing')
if len(shapes) > 1:
raise ValueError('array shape mismatch')
shape = shapes.pop() if shapes else None
if result is not None:
if shape:
return numpy.ones(shape, bool)
else:
return result
elif arrays:
sc = kwargs.get('sc', kwargs.get('shortcircuit', 0))
if sc and numpy.prod(shape) >= sc:
return short_circuit_or(arrays, shape)
elif len(arrays) == 2:
return numpy.logical_or(*arrays)
else:
return numpy.any(arrays, 0)
else:
return value | Perform element-wise logical *or* operation on arrays.
If *values* contains a non-array object with truth_ value **True**, the
outcome will be an array of **True**\s with suitable shape without arrays
being evaluated. Non-array objects with truth value **False** are omitted.
If array shapes do not match (after squeezing when enabled by user),
:exc:`ValueError` is raised.
This function uses :obj:`numpy.logical_or` or :obj:`numpy.any`. |
def visit_Compare(self, node):
if len(node.ops) > 1:
func = Name(id=self._prefix + 'napi_compare', ctx=Load())
args = [node.left,
List(elts=[Str(op.__class__.__name__)
for op in node.ops], ctx=Load()),
List(elts=node.comparators, ctx=Load())]
node = Call(func=func, args=args, keywords=self._kwargs)
fml(node)
self.generic_visit(node)
return node | Replace chained comparisons with calls to :func:`.napi_compare`. |
def visit_BoolOp(self, node):
if isinstance(node.op, And):
func = Name(id=self._prefix + 'napi_and', ctx=Load())
else:
func = Name(id=self._prefix + 'napi_or', ctx=Load())
args = [List(elts=node.values, ctx=Load())]
node = Call(func=func, args=args, keywords=self._kwargs)
fml(node)
self.generic_visit(node)
return node | Replace logical operations with calls to :func:`.napi_and` or
:func:`.napi_or`. |
def visit_UnaryOp(self, node):
if isinstance(node.op, Not):
self._debug('UnaryOp', node.op, incr=1)
operand = self[node.operand]
self._debug('|-', operand, incr=2)
tn = self._tn()
result = numpy.logical_not(operand)
self._debug('|_', result, incr=2)
self[tn] = result
return ast_name(tn)
else:
return self.generic_visit(node) | Interfere with ``not`` operation to :func:`numpy.logical_not`. |
def visit_BoolOp(self, node):
self._incr()
self._debug('BoolOp', node.op)
if isinstance(node.op, And):
result = self._and(node)
else:
result = self._or(node)
self._debug('|_', result, incr=1)
self._decr()
return self._return(result, node) | Interfere with boolean operations and use :func:`numpy.all` and
:func:`numpy.any` functions for ``and`` and ``or`` operations.
*axis* argument to these functions is ``0``. |
def rec_load_all(self, zone):
has_more = True
current_count = 0
while has_more:
records = self._request({
'a': 'rec_load_all',
'o': current_count,
'z': zone
})
try:
has_more = records['response']['recs']['has_more']
current_count += records['response']['recs']['count']
for record in records['response']['recs']['objs']:
yield record
except KeyError:
has_more = False | Lists all DNS records for the given domain
:param zone: the domain for which records are being retrieved
:type zone: str
:return:
:rtype: generator |
def zone_ips(self, zone, hours=24, ip_class=None, geo=False):
params = {
'a': 'zone_ips',
'z': zone,
'hours': hours,
'class': ip_class,
}
if geo:
params['geo'] = geo
return self._request(params) | Retrieve IP addresses of recent visitors
:param zone: the target domain
:type zone: str
:param hours: Past number of hours to query. Defaults to 24, maximum is 48.
:type hours: int
:param ip_class: Optional. Restrict the result set to a given class as given by:
"r" -- regular
"s" -- crawler
"t" -- threat
:type ip_class: str
:param geo: Optional. Set to True to add longitude and latitude information to response
:type geo: bool
:return:
:rtype: dict |
def rec_new(self, zone, record_type, name, content, ttl=1, priority=None, service=None, service_name=None,
protocol=None, weight=None, port=None, target=None):
params = {
'a': 'rec_new',
'z': zone,
'type': record_type,
'name': name,
'content': content,
'ttl': ttl
}
if priority is not None:
params['prio'] = priority
if service is not None:
params['service'] = service
if service_name is not None:
params['srvname'] = service_name
if protocol is not None:
params['protocol'] = protocol
if weight is not None:
params['weight'] = weight
if port is not None:
params['port'] = port
if target is not None:
params['target'] = target
return self._request(params) | Create a DNS record for the given zone
:param zone: domain name
:type zone: str
:param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC]
:type record_type: str
:param name: name of the DNS record
:type name: str
:param content: content of the DNS record
:type content: str
:param ttl: TTL of the DNS record in seconds. 1 = Automatic, otherwise, value must in between 120 and
4,294,967,295 seconds.
:type ttl: int
:param priority: [applies to MX/SRV] MX record priority.
:type priority: int
:param service: Service for SRV record
:type service: str
:param service_name: Service Name for SRV record
:type service_name: str
:param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls].
:type protocol: str
:param weight: Weight for SRV record.
:type weight: int
:param port: Port for SRV record
:type port: int
:param target: Target for SRV record
:type target: str
:return:
:rtype: dict |
def calc_transition_to_state(self, newstate):
cached_val = JTAGStateMachine._lookup_cache.\
get((self.state, newstate))
if cached_val:
return cached_val
if newstate not in self.states:
raise ValueError("%s is not a valid state for this state "
"machine"%newstate)
path = self._find_shortest_path(self._statestr, newstate)
if not path:
raise ValueError("No path to the requested state.")
res = self._get_steps_from_nodes_path(path)
res.reverse()
JTAGStateMachine._lookup_cache[(self.state, newstate)] = res
return res | Given a target state, generate the sequence of transitions that would move this state machine instance to that target state.
Args:
newstate: A str state name to calculate the path to.
Returns:
A bitarray containing the bits that would transition this
state machine to the target state. The bits read from right
to left. For efficiency, this retulting bitarray is cached.
Do not edit this bitarray, or it will cause undefined
behavior. |
def prompt(self, error=''):
if self.hidden:
return True
cmd = [self.label]
if self.default is not None:
cmd.append('(default: {0})'.format(self.default))
elif not self.required:
cmd.append('(default: )')
if self.type == 'bool':
cmd.append('(y/n)')
if self.choices:
print 'Choices:'
for choice in self.choices:
print choice
if error:
print error
value = raw_input(' '.join(cmd) + ':')
if value == '':
value = self.default
if self.type == 'bool':
if value == 'y':
value = True
elif value == 'n':
value = False
else:
value = self.default
if value is None and self.required:
return self.prompt('{0} is required.')
if self.regex and not re.match(self.regex, value):
error = '{0} must match {1}'.format(self.name, self.regex)
return self.prompt(error)
self.value = value
return True | Prompts the user to set the value for this item.
:return <bool> | success |
def render(self, template, fail='## :todo: add {template}'):
try:
return self._templates[template].render(scaffold=self)
except KeyError:
return fail.format(template=template) | Returns the rendered value for the inputted template name.
:param template | <str> |
def run(self, path=None):
if path is None:
path = '.'
for prop in self._properties.values():
if not prop.prompt():
return False
return self.build(path) | Runs the scaffold option generation for this scaffold in the given
path. If no path is supplied, then the current path is used.
:param path | <str> || None |
def structure(self):
opts = {'scaffold': self}
# build from a zip file
if zipfile.is_zipfile(self.source()):
zfile = zipfile.ZipFile(self.source(), 'r')
try:
contents = zfile.read('structure.xml')
contents = makotext.render(contents, opts)
zfile.close()
return ElementTree.fromstring(contents)
except StandardError:
logger.exception('Failed to load structure.')
zfile.close()
return None
else:
try:
filename = os.path.join(os.path.dirname(self.source()),
'structure.xml')
xdata = open(filename, 'r').read()
xdata = makotext.render(xdata, opts)
return ElementTree.fromstring(xdata)
except StandardError:
logger.exception('Failed to load structure.')
return None | Returns the structure for this scaffold.
:return <xml.etree.ElementTree.Element> || None |
def template(self, key):
try:
return self._templates[key]
except KeyError:
return Template.Plugins[key] | Returns the template associated with this scaffold.
:param key | <str>
:return <projex.scaffold.Template> || None |
def uifile(self):
output = ''
# build from a zip file
if zipfile.is_zipfile(self.source()):
zfile = zipfile.ZipFile(self.source(), 'r')
if 'properties.ui' in zfile.namelist():
tempdir = tempfile.gettempdir()
output = os.path.join(tempdir,
'{0}_properties.ui'.format(self.name()))
f = open(output, 'w')
f.write(zfile.read('properties.ui'))
f.close()
zfile.close()
else:
uifile = os.path.join(os.path.dirname(self.source()),
'properties.ui')
if os.path.exists(uifile):
output = uifile
return output | Returns the uifile for this scaffold.
:return <str> |
def load(filename):
# parse a zipped file
if zipfile.is_zipfile(filename):
zfile = zipfile.ZipFile(filename, 'r')
try:
xml = ElementTree.fromstring(zfile.read('scaffold.xml'))
except StandardError:
logger.exception('Failed to load scaffold: {0}'.format(filename))
zfile.close()
return None
zfile.close()
# parse a standard xml file
else:
try:
xml = ElementTree.parse(filename).getroot()
except StandardError:
logger.exception('Failed to load scaffold: {0}'.format(filename))
return None
# generate a scaffold
scaffold = Scaffold()
scaffold.setSource(filename)
scaffold.setName(xml.get('name', 'Missing'))
scaffold.setGroup(xml.get('group', 'Default'))
scaffold.setLanguage(xml.get('lang', 'Python'))
scaffold.setIcon(xml.get('icon', ''))
# define properties
xprops = xml.find('properties')
if xprops is not None:
for xprop in xprops:
scaffold.addProperty(Property.fromXml(xprop))
return scaffold | Loads the scaffold from the given XML file.
:param filename | <str>
:return <Scaffold> || None |
def displayhook(value):
global _displayhooks
new_hooks = []
for hook_ref in _displayhooks:
hook = hook_ref()
if hook:
hook(value)
new_hooks.append(hook_ref)
_displayhooks = new_hooks
sys.__displayhook__(value) | Runs all of the registered display hook methods with the given value.
Look at the sys.displayhook documentation for more information.
:param value | <variant> |
def excepthook(cls, error, trace):
global _excepthooks
new_hooks = []
for hook_ref in _excepthooks:
hook = hook_ref()
if hook:
hook(cls, error, trace)
new_hooks.append(hook_ref)
_excepthook = new_hooks
sys.__excepthook__(cls, error, trace) | Runs all of the registered exception hook methods with the given value.
Look at the sys.excepthook documentation for more information.
:param cls | <type>
error | <str>
trace | <traceback> |
def formatExcept(cls, error, trace):
clsname = cls.__name__ if cls else 'UnknownError'
tb = 'Traceback (most recent call last):\n'
tb += ''.join(traceback.format_tb(trace))
tb += '{0}: {1}'.format(clsname, error)
return tb | Formats the inputted class, error, and traceback information to the standard
output commonly found in Python interpreters.
:param cls | <type>
error | <str>
trace | <traceback>
:return <str> |
def registerDisplay(func):
setup()
ref = weakref.ref(func)
if ref not in _displayhooks:
_displayhooks.append(ref) | Registers a function to the display hook queue to be called on hook.
Look at the sys.displayhook documentation for more information.
:param func | <callable> |
def registerExcept(func):
setup()
ref = weakref.ref(func)
if ref not in _excepthooks:
_excepthooks.append(ref) | Registers a function to the except hook queue to be called on hook.
Look at the sys.displayhook documentation for more information.
:param func | <callable> |
def registerStdErr(func):
if not isinstance(sys.stderr, StreamHooks):
sys.stderr = StreamHooks(sys.stderr)
ref = weakref.ref(func)
if ref not in sys.stderr.hooks:
sys.stderr.hooks.append(ref) | Registers a function to the print hook queue to be called on hook.
This method will also override the current sys.stdout variable with a new
<StreamHooks> instance. This will preserve any current sys.stdout
overrides while providing a hookable class for linking multiple methods to.
:param func | <callable> |
def registerStdOut(func):
if not isinstance(sys.stdout, StreamHooks):
sys.stdout = StreamHooks(sys.stdout)
ref = weakref.ref(func)
if ref not in sys.stdout.hooks:
sys.stdout.hooks.append(ref) | Registers a function to the print hook queue to be called on hook.
This method will also override the current sys.stdout variable with a new
<StreamHooks> instance. This will preserve any current sys.stdout
overrides while providing a hookable class for linking multiple methods to.
:param func | <callable> |
def setup():
global _displayhooks, _excepthooks
if _displayhooks is not None:
return
_displayhooks = []
_excepthooks = []
# store any current hooks
if sys.displayhook != sys.__displayhook__:
_displayhooks.append(weakref.ref(sys.displayhook))
if sys.excepthook != sys.__excepthook__:
_excepthooks.append(weakref.ref(sys.excepthook))
# replace the current hooks
sys.displayhook = displayhook
sys.excepthook = excepthook | Initializes the hook queues for the sys module. This method will
automatically be called on the first registration for a hook to the system
by either the registerDisplay or registerExcept functions. |
def unregisterStdErr(func):
try:
sys.stderr.hooks.remove(weakref.ref(func))
except (AttributeError, ValueError):
pass | Un-registers a function from the print hook queue.
Look at the sys.displayhook documentation for more information.
:param func | <callable> |
def unregisterStdOut(func):
try:
sys.stdout.hooks.remove(weakref.ref(func))
except (AttributeError, ValueError):
pass | Un-registers a function from the print hook queue.
Look at the sys.displayhook documentation for more information.
:param func | <callable> |
def _parse_iso8601(text):
if isinstance(text, unicode):
try:
return parse_iso8601(text)
except ValueError:
raise CheckedValueTypeError(
None, (datetime,), unicode, text,
)
# Let pyrsistent reject it down the line.
return text | Maybe parse an ISO8601 datetime string into a datetime.
:param text: Either a ``unicode`` string to parse or any other object
(ideally a ``datetime`` instance) to pass through.
:return: A ``datetime.datetime`` representing ``text``. Or ``text`` if it
was anything but a ``unicode`` string. |
def from_path(cls, spec_path):
with spec_path.open() as spec_file:
return cls.from_document(load(spec_file)) | Load a specification from a path.
:param FilePath spec_path: The location of the specification to read. |
def add_behavior_for_pclass(self, definition, cls):
if definition in self._pclasses:
raise AlreadyCreatedClass(definition)
if definition not in self.definitions:
raise NoSuchDefinition(definition)
self._behaviors.setdefault(definition, []).append(cls) | Define an additional base class for the Python class created for a
particular definition.
:param unicode definition: The definition the Python class for which
the base class will be included.
:param type cls: The additional base class.
:raise ValueError: If a Python class for the given definition has
already been created. Behavior cannot be retroactively added to a
Python class. All behaviors must be registered before the first
call to ``pclass_for_definition`` for a particular definition.
:return: ``None`` |
def to_document(self):
return dict(
info=thaw(self.info),
paths=thaw(self.paths),
definitions=thaw(self.definitions),
securityDefinitions=thaw(self.securityDefinitions),
security=thaw(self.security),
swagger=thaw(self.swagger),
) | Serialize this specification to a JSON-compatible object representing a
Swagger specification. |
def pclass_for_definition(self, name):
while True:
try:
cls = self._pclasses[name]
except KeyError:
try:
original_definition = self.definitions[name]
except KeyError:
raise NoSuchDefinition(name)
if "$ref" in original_definition:
# Identify definitions that are merely a reference to
# another and restart processing. There is some
# duplication of logic between this and the $ref handling
# in _ClassModel. It would be nice to eliminate this
# duplication.
name = original_definition[u"$ref"]
assert name.startswith(u"#/definitions/")
name = name[len(u"#/definitions/"):]
continue
definition = self.transform_definition(name, original_definition)
kind = self._identify_kind(definition)
if kind is None:
raise NotClassLike(name, definition)
generator = getattr(self, "_model_for_{}".format(kind))
model = generator(name, definition)
bases = tuple(self._behaviors.get(name, []))
cls = model.pclass(bases)
self._pclasses[name] = cls
return cls | Get a ``pyrsistent.PClass`` subclass representing the Swagger definition
in this specification which corresponds to the given name.
:param unicode name: The name of the definition to use.
:return: A Python class which can be used to represent the Swagger
definition of the given name. |
def _model_for_CLASS(self, name, definition):
return _ClassModel.from_swagger(
self.pclass_for_definition,
name,
definition,
) | Model a Swagger definition that is like a Python class.
:param unicode name: The name of the definition from the
specification.
:param pyrsistent.PMap definition: A Swagger definition to categorize.
This will be a value like the one found at
``spec["definitions"][name]``. |
def pclass_field_for_attribute(self):
return self.type_model.pclass_field_for_type(
required=self.required,
default=self.default,
) | :return: A pyrsistent field reflecting this attribute and its type model. |
def from_swagger(cls, pclass_for_definition, name, definition):
return cls(
name=name,
doc=definition.get(u"description", name),
attributes=cls._attributes_for_definition(
pclass_for_definition,
definition,
),
) | Create a new ``_ClassModel`` from a single Swagger definition.
:param pclass_for_definition: A callable like
``Swagger.pclass_for_definition`` which can be used to resolve
type references encountered in the definition.
:param unicode name: The name of the definition.
:param definition: The Swagger definition to model. This will be a
value like the one found at ``spec["definitions"][name]``.
:return: A new model for the given definition. |
def pclass(self, bases):
def discard_constant_fields(cls, **kwargs):
def ctor():
return super(huh, cls).__new__(cls, **kwargs)
try:
return ctor()
except AttributeError:
if u"kind" in kwargs or u"apiVersion" in kwargs:
kwargs.pop("kind", None)
kwargs.pop("apiVersion", None)
return ctor()
raise
def lt_pclass(self, other):
if isinstance(other, self.__class__):
return sorted(self.serialize().items()) < sorted(other.serialize().items())
return NotImplemented
def eq_pclass(self, other):
if isinstance(other, self.__class__):
return sorted(self.serialize().items()) == sorted(other.serialize().items())
return NotImplemented
content = {
attr.name: attr.pclass_field_for_attribute()
for attr
in self.attributes
}
content["__doc__"] = nativeString(self.doc)
content["serialize"] = _serialize_with_omit
content["__new__"] = discard_constant_fields
content["__lt__"] = lt_pclass
content["__eq__"] = eq_pclass
content["__hash__"] = PClass.__hash__
content = total_ordering(content)
huh = type(nativeString(self.name), bases + (PClass,), content)
return huh | Create a ``pyrsistent.PClass`` subclass representing this class.
:param tuple bases: Additional base classes to give the resulting
class. These will appear to the left of ``PClass``. |
def add_behavior_for_pclass(self, cls):
kind = cls.__name__
for version in sorted(self.versions):
try:
self.spec.add_behavior_for_pclass(self.full_name(version, kind), cls)
except NoSuchDefinition:
pass
else:
return None
raise NoSuchDefinition(kind) | Define an additional base class for the Python class created for a
particular definition.
:param type cls: The additional base class. Its name must exactly
match the name of a definition with a version matching this
object's version.
:return: ``None`` |
def dumps_bytes(obj):
b = dumps(obj)
if isinstance(b, unicode):
b = b.encode("ascii")
return b | Serialize ``obj`` to JSON formatted ``bytes``. |
def native_string_to_bytes(s, encoding="ascii", errors="strict"):
if not isinstance(s, str):
raise TypeError("{} must be type str, not {}".format(s, type(s)))
if str is bytes:
# Python 2
return s
else:
# Python 3
return s.encode(encoding=encoding, errors=errors) | Ensure that the native string ``s`` is converted to ``bytes``. |
def native_string_to_unicode(s, encoding="ascii", errors="strict"):
if not isinstance(s, str):
raise TypeError("{} must be type str, not {}".format(s, type(s)))
if str is unicode:
# Python 3
return s
else:
# Python 2
return s.decode(encoding=encoding, errors=errors) | Ensure that the native string ``s`` is converted to ``unicode``. |
def datetime_handler(x):
if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
return x.isoformat()
raise TypeError("Unknown type") | Allow serializing datetime objects to JSON |
def save(self, *args, **kwargs):
self.uid = 'party:{}'.format(slugify(self.ap_code))
if not self.slug:
if self.organization:
self.slug = slugify(self.organization.name)
else:
self.slug = slugify(self.label)
super(Party, self).save(*args, **kwargs) | **uid**: :code:`party:{apcode}` |
def parsed(self):
if not self._parsed:
self._parsed = compile(self.content, self.path, 'exec')
return self._parsed | Get the code object which represents the compiled Python file.
This property is cached and only parses the content once. |
def get_version():
version_file = os.path.join(PKG, 'lib/version.py')
ver_str_line = open(version_file, "rt").read()
version_regex = r'^__version__ = [\'"]([^\'"]*)[\'"]'
mo = re.search(version_regex, ver_str_line, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in %s.'
% (version_file,)) | parse __init__.py for version number instead of importing the file
see http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package |
def _chunk(iterable, size):
# We're going to use some star magic to chunk the iterable. We create a
# copy of the iterator size times, then pull a value from each to form a
# chunk. The last chunk may have some trailing Nones if the length of the
# iterable isn't a multiple of size, so we filter them out.
args = (iter(iterable),) * size
return (
# pylint: disable=star-args
itertools.takewhile(lambda x: x is not None, group)
for group in itertools.zip_longest(*args)
) | Split an iterable into chunks of a fixed size. |
def _matrix_add_column(matrix, column, default=0):
height_difference = len(column) - len(matrix)
# The width of the matrix is the length of its longest row.
width = max(len(row) for row in matrix) if matrix else 0
# For now our offset is 0. We may need to shift our column down later.
offset = 0
# If we need extra rows, add them to the top of the matrix.
if height_difference > 0:
for _ in range(height_difference):
matrix.insert(0, [default] * width)
# If the column is shorter, we'll need to shift it down.
if height_difference < 0:
offset = -height_difference
#column = ([default] * offset) + column
for index, value in enumerate(column):
# The row index is the index in the column plus our offset.
row_index = index + offset
row = matrix[row_index]
# If this row is short, pad it with default values.
width_difference = width - len(row)
row.extend([default] * width_difference)
row.append(value) | Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary. |
def isValid(self):
if self._callback_func_ref is not None and self._callback_func_ref():
if self._callback_self_ref is None or self._callback_self_ref():
return True
return False | Checks to see if the callback pointers are still valid or not.
:return <bool> |
def clear(self, signal=None):
if signal is not None:
self._callbacks.pop(signal, None)
else:
self._callbacks.clear() | Clears either all the callbacks or the callbacks for a particular
signal.
:param signal | <variant> || None |
def connect(self, signal, slot):
if self.isConnected(signal, slot):
return False
callback = Callback(slot)
self._callbacks.setdefault(signal, [])
self._callbacks[signal].append(callback)
return True | Creates a new connection between the inputted signal and slot.
:param signal | <variant>
slot | <callable>
:return <bool> | new connection created |
def disconnect(self, signal, slot):
sig_calls = self._callbacks.get(signal, [])
for callback in sig_calls:
if callback == slot:
sig_calls.remove(callback)
return True
return False | Breaks the connection between the inputted signal and the given slot.
:param signal | <variant>
slot | <callable>
:return <bool> | connection broken |
def isConnected(self, signal, slot):
sig_calls = self._callbacks.get(signal, [])
for callback in sig_calls:
if callback == slot:
return True
return False | Returns if the given signal is connected to the inputted slot.
:param signal | <variant>
slot | <callable>
:return <bool> | is connected |
def emit(self, signal, *args):
callbacks = self._callbacks.get(signal, [])
new_callbacks = []
for callback in callbacks:
# clear out deleted pointers
if not callback.isValid():
continue
new_callbacks.append(callback)
try:
callback(*args)
except StandardError:
logger.exception('Error occurred during callback.')
self._callbacks[signal] = new_callbacks | Emits the given signal with the inputted args. This will go through
its list of connected callback slots and call them.
:param signal | <variant>
*args | variables |
def generate_example(config, ext='json'):
template_name = 'example.{0}'.format(ext.lower())
template = ENV.get_template(template_name)
return template.render(config=config) | Generate an example file based on the given Configuration object.
Args:
config (confpy.core.configuration.Configuration): The configuration
object on which to base the example.
ext (str): The file extension to render. Choices: JSON and INI.
Returns:
str: The text of the example file. |
def run(self):
rtype = self.name
resource_content = '\n'.join(self.content)
resource_class = ResourceDirective.get_resource_class(rtype)
this_resource = resource_class(self.docname, rtype, resource_content)
# Add this resource to the site
self.resources[this_resource.docname] = this_resource
# Don't need to return a resource "node", the document is the node
return [] | Run at parse time.
When the documents are initially being scanned, this method runs
and does two things: (a) creates an instance that is added to
the site's widgets, and (b) leaves behind a placeholder docutils
node that can later be processed after the docs are resolved.
The latter needs enough information to retrieve the former. |
def fastaParserSpectraClusterPy(header):
isUniprot = lambda h: h[0:3] in ['sp|', 'tr|', 'up|']
if isUniprot(header):
start = 3
end = header.find('|', start)
else:
start = 0
breakPositions = [header.find(' '), header.find('|')]
breakPositions = [i if i > 0 else len(header) for i in breakPositions]
end = min(breakPositions)
return {'id': header[start:end]} | Custom parser for fasta headers adapted from
https://github.com/spectra-cluster/spectra-cluster-py
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header |
def _removeHeaderTag(header, tag):
if header.startswith(tag):
tagPresent = True
header = header[len(tag):]
else:
tagPresent = False
return header, tagPresent | Removes a tag from the beginning of a header string.
:param header: str
:param tag: str
:returns: (str, bool), header without the tag and a bool that indicates
wheter the tag was present. |
def _parseFastaHeader(fastaHeader, parser=None, forceId=False):
if parser is None:
try:
headerInfo = pyteomics.fasta.parse(fastaHeader)
except pyteomics.auxiliary.PyteomicsError as raisedPyteomicsError:
#If forceId is set True, the whole header is used as id
if forceId:
headerInfo = {'id': fastaHeader}
else:
raise raisedPyteomicsError
else:
headerInfo = parser(fastaHeader)
return headerInfo | Parses a fasta header and returns extracted information in a dictionary.
Unless a custom parser is specified, a ``Pyteomics`` function is used, which
provides parsers for the formats of UniProtKB, UniRef, UniParc and UniMES
(UniProt Metagenomic and Environmental Sequences), described at
`www.uniprot.org <http://www.uniprot.org/help/fasta-headers>_`.
:param fastaHeader: str, protein entry header from a fasta file
:param parser: is a function that takes a fastaHeader string and returns a
dictionary, containing at least the key "id". If None the parser
function from pyteomics ``pyteomics.fasta.parse()`` is used.
:param forceId: bool, if True and no id can be extracted from the fasta
header the whole header sequence is used as a protein id instead of
raising an exception.
:returns: dict, describing a fasta header. Minimally contains an 'id' key. |
def _idFromHeaderInfo(headerInfo, isDecoy, decoyTag):
proteinId = headerInfo['id']
if isDecoy:
proteinId = ''.join((decoyTag, proteinId))
return proteinId | Generates a protein id from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein id.
:param headerInfo: dict, must contain a key "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein id |
def _nameFromHeaderInfo(headerInfo, isDecoy, decoyTag):
if 'name' in headerInfo:
proteinName = headerInfo['name']
else:
proteinName = headerInfo['id']
if isDecoy:
proteinName = ''.join((decoyTag, proteinName))
return proteinName | Generates a protein name from headerInfo. If "isDecoy" is True, the
"decoyTag" is added to beginning of the generated protein name.
:param headerInfo: dict, must contain a key "name" or "id"
:param isDecoy: bool, determines if the "decoyTag" is added or not.
:param decoyTag: str, a tag that identifies decoy / reverse protein entries.
:returns: str, protein name |
def _addProtein(self, proteinId, proteinName, sequence, fastaHeader,
headerInfo, isDecoy=False, isContaminant=False):
proteinEntry = ProteinEntry(
proteinId, proteinName, sequence, fastaHeader, headerInfo,
isDecoy=isDecoy, isContaminant=isContaminant
)
self.proteins[proteinEntry.id] = proteinEntry | #TODO |
def _addPeptide(self, sequence, proteinId, digestInfo):
stdSequence = self.getStdSequence(sequence)
if stdSequence not in self.peptides:
self.peptides[stdSequence] = PeptideEntry(
stdSequence, mc=digestInfo['missedCleavage']
)
if sequence not in self.peptides:
self.peptides[sequence] = self.peptides[stdSequence]
if proteinId not in self.peptides[stdSequence].proteins:
#FUTURE: peptide can appear at multiple positions per protein.
#peptideEntry.addSource(proteinId, startPos, endPos)
self.peptides[stdSequence].proteins.add(proteinId)
self.peptides[stdSequence].proteinPositions[proteinId] = (
digestInfo['startPos'], digestInfo['endPos']
)
self.proteins[proteinId].peptides.add(sequence) | Add a peptide to the protein database.
:param sequence: str, amino acid sequence
:param proteinId: str, proteinId
:param digestInfo: dict, contains information about the in silico digest
must contain the keys 'missedCleavage', 'startPos' and 'endPos' |
def options(self, parser, env):
super(LeakDetectorPlugin, self).options(parser, env)
parser.add_option("--leak-detector-level", action="store",
default=env.get('NOSE_LEAK_DETECTOR_LEVEL'),
dest="leak_detector_level",
help="Level at which to detect leaks and report memory deltas "
"(0=None, 1=Dir, 2=Module, 3=TestCaseClass, 4=Test)")
parser.add_option("--leak-detector-report-delta", action="store_true",
default=env.get('NOSE_LEAK_DETECTOR_REPORT_DELTA'),
dest="leak_detector_report_delta",
help="")
parser.add_option("--leak-detector-patch-mock", action="store_true",
default=env.get('NOSE_LEAK_DETECTOR_PATCH_MOCK', True),
dest="leak_detector_patch_mock",
help="")
parser.add_option("--leak-detector-add-traceback", action="store_true",
default=env.get('NOSE_LEAK_DETECTOR_SAVE_TRACEBACK', False),
dest="leak_detector_save_traceback",
help="")
parser.add_option("--leak-detector-ignore-pattern", action="append",
default=(list(filter(operator.truth,
env.get('NOSE_LEAK_DETECTOR_IGNORE_PATTERNS',
'').split(','))) or
['NOSE_LEAK_DETECTOR_IGNORE']),
dest="leak_detector_ignore_patterns",
help="") | Add options to command line. |
def configure(self, options, conf):
super(LeakDetectorPlugin, self).configure(options, conf)
if options.leak_detector_level:
self.reporting_level = int(options.leak_detector_level)
self.report_delta = options.leak_detector_report_delta
self.patch_mock = options.leak_detector_patch_mock
self.ignore_patterns = options.leak_detector_ignore_patterns
self.save_traceback = options.leak_detector_save_traceback
self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False)) | Configure plugin. |
def bind(self, instance, auto=False):
methods = [
(m, cls.__dict__[m])
for cls in inspect.getmro(type(instance))
for m in cls.__dict__ if inspect.isfunction(cls.__dict__[m])
]
try:
deps_of_endpoints = [(method_ptr, self.entrypoint_deps(method_ptr))
for (method_name, method_ptr) in methods]
for (method_ptr, method_deps) in deps_of_endpoints:
if len(method_deps) > 0:
method_ptr(instance, **method_deps)
except KeyError:
pass
if auto and instance not in self.current_scope.get_auto_bind_list():
self.current_scope.auto_bind(instance)
return instance | Bind deps to instance
:param instance:
:param auto: follow update of DI and refresh binds once we will get something new
:return: |
def map_dict(self, dict_entity):
self.dict_entity = dict_entity
Entity.map(self, self.dict_entity) | map dict_entity to current instance(self) |
def as_dict(self):
odict = OrderedDict()
for name in self._order:
attr_value = getattr(self, name)
if isinstance(attr_value, List):
_list = []
for item in attr_value:
_list.append((item.as_dict() if isinstance(item, Entity) else item))
odict[name] = _list
elif isinstance(attr_value, Entity):
odict[name] = attr_value.as_dict()
else:
odict[name] = getattr(self, name)
return odict | create a dict based on class attributes |
def map(cls, dict_entity):
for key, value in dict_entity.items():
if hasattr(cls, key):
if isinstance(value, list):
_list = getattr(cls, key)
if isinstance(_list.expected_type, list):
for _dict in value:
_list.append(cls.map(_list.typeof(), _dict))
elif isinstance(value, dict):
attr = getattr(cls, key)
instance = attr.expected_type()
Entity.map(instance, value)
setattr(cls, key, instance)
else:
setattr(cls, key, value)
else:
setattr(cls, key, value) | staticmethod which will be used in recursive mode in order to map dict to instance |
def writeParams(rawfilepath, outputpath, isolationWindow, coElute=0):
paramText = generateParams(rawfilepath, outputpath, isolationWindow,
coElute)
filename, fileext = os.path.splitext(os.path.basename(rawfilepath))
paramPath = aux.joinpath(outputpath, filename+'.pparse.para')
with open(paramPath, 'wb') as openfile:
openfile.write(paramText)
return paramPath | Generate and write a pParse parameter file.
:param rawfilepath: location of the thermo ".raw" file
:param outputpath: path to the output directory of pParse
:param isolationWindow: MSn isolation window that was used for the
aquisition of the specified thermo raw file
:param coElute:
:returns: file path of the pParse parameter file |
def execute(paramPath, executable='pParse.exe'):
procArgs = [executable, paramPath]
## run it ##
proc = subprocess.Popen(procArgs, stderr=subprocess.PIPE)
## But do not wait till netstat finish, start displaying output immediately ##
while True:
out = proc.stderr.read(1)
if out == '' and proc.poll() != None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
return proc.returncode | Execute pParse with the specified parameter file.
:param paramPath: location of the pParse parameter file
:param executable: must specify the complete file path of the pParse.exe
if its location is not in the ``PATH`` environment variable.
:returns: :func:`subprocess.Popen` return code, 0 if pParse was executed
successful |
def cleanUpPparse(outputpath, rawfilename, mgf=False):
extensions = ['csv', 'ms1', 'ms2', 'xtract']
filename, fileext = os.path.splitext(os.path.basename(rawfilename))
additionalFiles = [aux.joinpath(outputpath, 'pParsePlusLog.txt'),
aux.joinpath(outputpath, filename+'.pparse.para'),
]
for ext in extensions:
filepath = aux.joinpath(outputpath, '.'.join([filename, ext]))
if os.path.isfile(filepath):
print('Removing file: ', filepath)
os.remove(filepath)
for filepath in additionalFiles:
if os.path.isfile(filepath):
print('Removing file: ', filepath)
os.remove(filepath)
if mgf:
for _filename in os.listdir(outputpath):
_basename, _fileext = os.path.splitext(_filename)
if _fileext.lower() != '.mgf':
continue
if _basename.find(basename) != -1 and _basename != basename:
filepath = aux.joinpath(outputpath, _filename)
print('Removing file: ', filepath)
os.remove(filepath) | Delete temporary files generated by pparse, including the filetypes
".csv", ".ms1", ".ms2", ".xtract", the files "pParsePlusLog.txt" and
"pParse.para" and optionally also the ".mgf" file generated by pParse.
.. warning:
When the parameter "mgf" is set to "True" all files ending with ".mgf"
and containing the specified "filename" are deleted. This could
potentially also affect MGF files not generated by pParse.
:param outputpath: path to the output directory of pParse
:param rawfilename: filename of the thermo ".raw" file
:param mgf: bool, if True the ".mgf" file generated by pParse is also
removed |
def launch_server(message_handler, options):
logger = logging.getLogger(__name__)
if (options.debug):
logger.setLevel(logging.DEBUG)
if not options.monitor_port:
logger.warning(
"Monitoring not enabled. No monitor-port option defined.")
else:
threading.Thread(target=launch_monitor_server, args=(options.host, options.monitor_port, logger)).start()
# Create the server, binding to specified host on configured port
logger.info(
'Starting server on host %s port %d Python version %s.%s.%s' % ((options.host, options.port) + sys.version_info[:3]))
server = ThreadedTCPServer((options.host, options.port),
StreamHandler.create_handler(message_handler,
options.buffer_size,
logger))
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
try:
server.serve_forever()
except KeyboardInterrupt:
logger.info("Ctrl-C, exiting...")
os._exit(142) | Launch a message server
:param handler_function: The handler function to execute for each message
:param options: Application options for TCP, etc. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.