code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj['individuals']:
if individual['individual_id'] == individual_id:
individual_obj = individual
return (individual_obj['display_name'], individual_obj['vcf2cytosure']) | vcf2cytosure CGH file for inidividual. |
def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs):
"""
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4)
"""
entries = self.filter_entries(
request_type=request_type, content_type=content_type,
status_code=status_code
)
if "async" in kwargs:
asynchronous = kwargs['async']
if not asynchronous:
time = 0
for entry in entries:
time += entry['time']
return time
else:
return len(self.parser.create_asset_timeline(entries)) | This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4) |
def _invoke(self, arguments, autoescape):
"""This method is being swapped out by the async implementation."""
rv = self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv | This method is being swapped out by the async implementation. |
def _match_item(item, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not.
"""
it = get_item_tags(item)
return any_all(
_match_field(
get_field(it, field), pattern, ignore_case=ignore_case, normalize_values=normalize_values
) for field, patterns in kwargs.items() for pattern in patterns
) | Match items by metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath.
any_all (callable): A callable to determine if any or all filters must match to match item.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Returns:
bool: True if matched, False if not. |
def to_dict(self, prefix=None):
'''
Converts recursively the Config object into a valid dictionary.
:param prefix: A string to optionally prefix all key elements in the
returned dictonary.
'''
conf_obj = dict(self)
return self.__dictify__(conf_obj, prefix) | Converts recursively the Config object into a valid dictionary.
:param prefix: A string to optionally prefix all key elements in the
returned dictonary. |
async def set_max_ch_setpoint(self, temperature,
timeout=OTGW_DEFAULT_TIMEOUT):
"""
Set the maximum central heating setpoint. This command is only
available with boilers that support this function.
Return the newly accepted setpoint, or None on failure.
This method is a coroutine
"""
cmd = OTGW_CMD_SET_MAX
status = {}
ret = await self._wait_for_cmd(cmd, temperature, timeout)
if ret is None:
return
ret = float(ret)
status[DATA_MAX_CH_SETPOINT] = ret
self._update_status(status)
return ret | Set the maximum central heating setpoint. This command is only
available with boilers that support this function.
Return the newly accepted setpoint, or None on failure.
This method is a coroutine |
def show_tip(self, point, tip, wrapped_tiplines):
""" Attempts to show the specified tip at the current cursor location.
"""
# Don't attempt to show it if it's already visible and the text
# to be displayed is the same as the one displayed before.
if self.isVisible():
if self.tip == tip:
return True
else:
self.hide()
# Attempt to find the cursor position at which to show the call tip.
text_edit = self._text_edit
cursor = text_edit.textCursor()
search_pos = cursor.position() - 1
self._start_position, _ = self._find_parenthesis(search_pos,
forward=False)
if self._start_position == -1:
return False
if self.hide_timer_on:
self._hide_timer.stop()
# Logic to decide how much time to show the calltip depending
# on the amount of text present
if len(wrapped_tiplines) == 1:
args = wrapped_tiplines[0].split('(')[1]
nargs = len(args.split(','))
if nargs == 1:
hide_time = 1400
elif nargs == 2:
hide_time = 1600
else:
hide_time = 1800
elif len(wrapped_tiplines) == 2:
args1 = wrapped_tiplines[1].strip()
nargs1 = len(args1.split(','))
if nargs1 == 1:
hide_time = 2500
else:
hide_time = 2800
else:
hide_time = 3500
self._hide_timer.start(hide_time, self)
# Set the text and resize the widget accordingly.
self.tip = tip
self.setText(tip)
self.resize(self.sizeHint())
# Locate and show the widget. Place the tip below the current line
# unless it would be off the screen. In that case, decide the best
# location based trying to minimize the area that goes off-screen.
padding = 3 # Distance in pixels between cursor bounds and tip box.
cursor_rect = text_edit.cursorRect(cursor)
screen_rect = self.app.desktop().screenGeometry(text_edit)
point.setY(point.y() + padding)
tip_height = self.size().height()
tip_width = self.size().width()
vertical = 'bottom'
horizontal = 'Right'
if point.y() + tip_height > screen_rect.height() + screen_rect.y():
point_ = text_edit.mapToGlobal(cursor_rect.topRight())
# If tip is still off screen, check if point is in top or bottom
# half of screen.
if point_.y() - tip_height < padding:
# If point is in upper half of screen, show tip below it.
# otherwise above it.
if 2*point.y() < screen_rect.height():
vertical = 'bottom'
else:
vertical = 'top'
else:
vertical = 'top'
if point.x() + tip_width > screen_rect.width() + screen_rect.x():
point_ = text_edit.mapToGlobal(cursor_rect.topRight())
# If tip is still off-screen, check if point is in the right or
# left half of the screen.
if point_.x() - tip_width < padding:
if 2*point.x() < screen_rect.width():
horizontal = 'Right'
else:
horizontal = 'Left'
else:
horizontal = 'Left'
pos = getattr(cursor_rect, '%s%s' %(vertical, horizontal))
adjusted_point = text_edit.mapToGlobal(pos())
if vertical == 'top':
point.setY(adjusted_point.y() - tip_height - padding)
if horizontal == 'Left':
point.setX(adjusted_point.x() - tip_width - padding)
self.move(point)
self.show()
return True | Attempts to show the specified tip at the current cursor location. |
def absent(name, auth=None):
'''
Ensure a subnet does not exists
name
Name of the subnet
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
__salt__['neutronng.setup_clouds'](auth)
subnet = __salt__['neutronng.subnet_get'](name=name)
if subnet:
if __opts__['test'] is True:
ret['result'] = None
ret['changes'] = {'id': subnet.id}
ret['comment'] = 'Project will be deleted.'
return ret
__salt__['neutronng.subnet_delete'](name=subnet)
ret['changes']['id'] = name
ret['comment'] = 'Deleted subnet'
return ret | Ensure a subnet does not exists
name
Name of the subnet |
def as_select(self, _items=None, **kwargs):
"""Render the field as a `<select>` element.
:param **kwargs:
Named paremeters used to generate the HTML attributes of each item.
It follows the same rules as `get_html_attrs`
"""
attrs = self.extra.copy()
attrs.update(kwargs)
attrs['name'] = self.name
if not self.optional:
attrs['required'] = True
html = [u'<select %s>' % get_html_attrs(attrs)]
values = [self.to_string(**attrs)] or []
items = _items or self.items
for item in items:
if isinstance(item, list):
html.extend(self._render_optgroup(item, values))
else:
html.append(self._render_option(item, values))
html.append(u'</select>')
return Markup('\n'.join(html)) | Render the field as a `<select>` element.
:param **kwargs:
Named paremeters used to generate the HTML attributes of each item.
It follows the same rules as `get_html_attrs` |
def get_thunk_env(self, k):
"""Return the thunk AND environment for validating it in for the given key.
There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its
environment bound on retrieval, that will be done here.
"""
if k not in self.__items:
raise exceptions.EvaluationError('Unknown key: %r in tuple %r' % (k, self))
x = self.__items[k]
env = self.env(self)
# Bind this to the tuple's parent environment
if isinstance(x, framework.BindableThunk):
return x.bind(self.__parent_env), env
return x, env | Return the thunk AND environment for validating it in for the given key.
There might be different envs in case the thunk comes from a different (composed) tuple. If the thunk needs its
environment bound on retrieval, that will be done here. |
def update_module(self, modname, underlined=None):
"""Update the cache for global names in `modname` module
`modname` is the name of a module.
"""
try:
pymodule = self.project.get_module(modname)
self._add_names(pymodule, modname, underlined)
except exceptions.ModuleNotFoundError:
pass | Update the cache for global names in `modname` module
`modname` is the name of a module. |
def check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV, xdirect,
verb):
r"""Check the model: depth and corresponding layer parameters.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
aniso : array_like
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
epermH, epermV : array_like
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res.
mpermH, mpermV : array_like
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res.
xdirect : bool, optional
If True and source and receiver are in the same layer, the direct field
is calculated analytically in the frequency domain, if False it is
calculated in the wavenumber domain.
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
depth : array
Depths of layer interfaces, adds -infty at beginning if not present.
res : array
As input, checked for size.
aniso : array
As input, checked for size. If None, defaults to an array of ones.
epermH, epermV : array_like
As input, checked for size. If None, defaults to an array of ones.
mpermH, mpermV : array_like
As input, checked for size. If None, defaults to an array of ones.
isfullspace : bool
If True, the model is a fullspace (res, aniso, epermH, epermV, mpermM,
and mpermV are in all layers the same).
"""
global _min_res
# Check depth
if depth is None:
depth = []
depth = _check_var(depth, float, 1, 'depth')
# Add -infinity at the beginning
# => The top-layer (-infinity to first interface) is layer 0.
if depth.size == 0:
depth = np.array([-np.infty, ])
elif depth[0] != -np.infty:
depth = np.insert(depth, 0, -np.infty)
# Ensure depth is increasing
if np.any(depth[1:] - depth[:-1] < 0):
print('* ERROR :: <depth> must be increasing;' +
' <depth> provided: ' + _strvar(depth))
raise ValueError('depth')
# Check if the user provided a model for etaH/etaV/zetaH/zetaV
if isinstance(res, dict):
res_dict, res = res, res['res']
else:
res_dict = False
# Cast and check resistivity
res = _check_var(res, float, 1, 'res', depth.shape)
# => min_res can be set with utils.set_min
res = _check_min(res, _min_res, 'Resistivities', 'Ohm.m', verb)
# Check optional parameters anisotropy, electric permittivity, and magnetic
# permeability
def check_inp(var, name, min_val):
r"""Param-check function. Default to ones if not provided"""
if var is None:
return np.ones(depth.size)
else:
param = _check_var(var, float, 1, name, depth.shape)
if name == 'aniso': # Convert aniso into vertical resistivity
param = param**2*res
param = _check_min(param, min_val, 'Parameter ' + name, '', verb)
if name == 'aniso': # Convert vert. resistivity back to aniso
param = np.sqrt(param/res)
return param
# => min_res can be set with utils.set_min
aniso = check_inp(aniso, 'aniso', _min_res)
epermH = check_inp(epermH, 'epermH', 0.0)
epermV = check_inp(epermV, 'epermV', 0.0)
mpermH = check_inp(mpermH, 'mpermH', 0.0)
mpermV = check_inp(mpermV, 'mpermV', 0.0)
# Print model parameters
if verb > 2:
print(" depth [m] : ", _strvar(depth[1:]))
print(" res [Ohm.m] : ", _strvar(res))
print(" aniso [-] : ", _strvar(aniso))
print(" epermH [-] : ", _strvar(epermH))
print(" epermV [-] : ", _strvar(epermV))
print(" mpermH [-] : ", _strvar(mpermH))
print(" mpermV [-] : ", _strvar(mpermV))
# Check if medium is a homogeneous full-space. If that is the case, the
# EM-field is computed analytically directly in the frequency-domain.
# Note: Also a stack of layers with the same material parameters is treated
# as a homogeneous full-space.
isores = (res - res[0] == 0).all()*(aniso - aniso[0] == 0).all()
isoep = (epermH - epermH[0] == 0).all()*(epermV - epermV[0] == 0).all()
isomp = (mpermH - mpermH[0] == 0).all()*(mpermV - mpermV[0] == 0).all()
isfullspace = isores*isoep*isomp
# Check parameters of user-provided parameters
if res_dict:
# Switch off fullspace-option
isfullspace = False
# Loop over key, value pair and check
for key, value in res_dict.items():
if key not in ['res', 'func_eta', 'func_zeta']:
res_dict[key] = check_inp(value, key, None)
# Put res back
res_dict['res'] = res
# store res_dict back to res
res = res_dict
# Print fullspace info
if verb > 2 and isfullspace:
if xdirect:
print("\n> MODEL IS A FULLSPACE; returning analytical " +
"frequency-domain solution")
else:
print("\n> MODEL IS A FULLSPACE")
# Print xdirect info
if verb > 2:
if xdirect is None:
print(" direct field : Not calculated (secondary field)")
elif xdirect:
print(" direct field : Calc. in frequency domain")
else:
print(" direct field : Calc. in wavenumber domain")
return depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace | r"""Check the model: depth and corresponding layer parameters.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
aniso : array_like
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
epermH, epermV : array_like
Relative horizontal/vertical electric permittivities
epsilon_h/epsilon_v (-);
#epermH = #epermV = #res.
mpermH, mpermV : array_like
Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-);
#mpermH = #mpermV = #res.
xdirect : bool, optional
If True and source and receiver are in the same layer, the direct field
is calculated analytically in the frequency domain, if False it is
calculated in the wavenumber domain.
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
depth : array
Depths of layer interfaces, adds -infty at beginning if not present.
res : array
As input, checked for size.
aniso : array
As input, checked for size. If None, defaults to an array of ones.
epermH, epermV : array_like
As input, checked for size. If None, defaults to an array of ones.
mpermH, mpermV : array_like
As input, checked for size. If None, defaults to an array of ones.
isfullspace : bool
If True, the model is a fullspace (res, aniso, epermH, epermV, mpermM,
and mpermV are in all layers the same). |
def option(self, section, option):
""" Returns the value of the option """
if self.config.has_section(section):
if self.config.has_option(section, option):
return (True, self.config.get(section, option))
return (False, 'Option: ' + option + ' does not exist')
return (False, 'Section: ' + section + ' does not exist') | Returns the value of the option |
def get_input_info_dict(self, signature=None):
"""Describes the inputs required by a signature.
Args:
signature: A string with the signature to get inputs information for.
If None, the default signature is used if defined.
Returns:
The result of ModuleSpec.get_input_info_dict() for the given signature,
and the graph variant selected by `tags` when this Module was initialized.
Raises:
KeyError: if there is no such signature.
"""
return self._spec.get_input_info_dict(signature=signature, tags=self._tags) | Describes the inputs required by a signature.
Args:
signature: A string with the signature to get inputs information for.
If None, the default signature is used if defined.
Returns:
The result of ModuleSpec.get_input_info_dict() for the given signature,
and the graph variant selected by `tags` when this Module was initialized.
Raises:
KeyError: if there is no such signature. |
def parse_footnote(document, container, elem):
"Parse the footnote element."
_rid = elem.attrib[_name('{{{w}}}id')]
foot = doc.Footnote(_rid)
container.elements.append(foot) | Parse the footnote element. |
def get_relationship(self, from_object, relation_type):
"""return a relation ship or None
"""
for rel in self.relationships.get(relation_type, ()):
if rel.from_object is from_object:
return rel
raise KeyError(relation_type) | return a relation ship or None |
def compact(paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
sep = os.path.sep
short_paths = set()
for path in sorted(paths, key=len):
should_skip = any(
path.startswith(shortpath.rstrip("*")) and
path[len(shortpath.rstrip("*").rstrip(sep))] == sep
for shortpath in short_paths
)
if not should_skip:
short_paths.add(path)
return short_paths | Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path. |
def _prepare_find(cls, *args, **kw):
"""Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
"""
cls, collection, query, options = cls._prepare_query(
cls.FIND_MAPPING,
cls.FIND_OPTIONS,
*args,
**kw
)
if 'await' in options:
raise TypeError("Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.")
if 'cursor_type' in options and {'tail', 'wait'} & set(options):
raise TypeError("Can not combine cursor_type and tail/wait arguments.")
elif options.pop('tail', False):
options['cursor_type'] = CursorType.TAILABLE_AWAIT if options.pop('wait', True) else CursorType.TAILABLE
elif 'wait' in options:
raise TypeError("Wait option only applies to tailing cursors.")
modifiers = options.get('modifiers', dict())
if 'max_time_ms' in options:
modifiers['$maxTimeMS'] = options.pop('max_time_ms')
if modifiers:
options['modifiers'] = modifiers
return cls, collection, query, options | Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring. |
def _ensure_someone_took_responsability(self, state, _responses):
'''
Called as a callback for sending *died* notifications to all the
partners.
Check if someone has offered to restart the agent.
If yes, setup expiration call and wait for report.
If no, initiate doing it on our own.
'''
if not state.so_took_responsability:
self.debug('Noone took responsability, I will try to restart '
'%r agent myself', state.factory.descriptor_type)
return self._restart_yourself() | Called as a callback for sending *died* notifications to all the
partners.
Check if someone has offered to restart the agent.
If yes, setup expiration call and wait for report.
If no, initiate doing it on our own. |
def put(self, id):
"""
Update a resource by bson ObjectId
:returns: json string representation
:rtype: JSON
"""
try:
#Async update flow
object_ = json_util.loads(self.request.body)
toa = self.request.headers.get("Caesium-TOA", None)
obj_check = yield self.client.find_one_by_id(id)
if not obj_check:
self.raise_error(404, "Resource not found: %s" % id)
self.finish()
return
if toa:
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings, master_id=id)
revision_id = yield stack.push(object_, int(toa), meta=self._get_meta_data())
if isinstance(revision_id, str):
self.set_header("Caesium-TOA", toa)
#We add the id of the original request, because we don't want to infer this
#On the client side, as the state of the client code could change easily
#We want this request to return with the originating ID as well.
object_["id"] = id
self.return_resource(object_)
else:
self.raise_error(404, "Revision not scheduled for object: %s" % id)
else:
if object_.get("_id"):
del object_["_id"]
response = yield self.client.update(id, object_)
if response.get("updatedExisting"):
object_ = yield self.client.find_one_by_id(id)
self.return_resource(object_)
else:
self.raise_error(404, "Resource not found: %s" % id)
except ValidationError as vex:
self.logger.error("%s validation error" % self.object_name, vex)
self.raise_error(400, "Your %s cannot be updated because it is missing required fields, see docs" % self.object_name)
except ValueError as ex:
self.raise_error(400, "Invalid JSON Body, check formatting. %s" % ex[0])
except InvalidId as ex:
self.raise_error(message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error() | Update a resource by bson ObjectId
:returns: json string representation
:rtype: JSON |
def flavor_delete(self, flavor_id): # pylint: disable=C0103
'''
Delete a flavor
'''
nt_ks = self.compute_conn
nt_ks.flavors.delete(flavor_id)
return 'Flavor deleted: {0}'.format(flavor_id) | Delete a flavor |
def show_network(kwargs=None, call=None):
'''
Show the details of an existing network.
CLI Example:
.. code-block:: bash
salt-cloud -f show_network gce name=mynet
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_network function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'Must specify name of network.'
)
return False
conn = get_conn()
return _expand_item(conn.ex_get_network(kwargs['name'])) | Show the details of an existing network.
CLI Example:
.. code-block:: bash
salt-cloud -f show_network gce name=mynet |
def maintain_leases(self):
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
"""
while self._manager.is_active and not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
p99 = self._manager.ack_histogram.percentile(99)
_LOGGER.debug("The current p99 value is %d seconds.", p99)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are well beyond max lease time. This
# ensures that in the event of a badly behaving actor, we can
# drop messages and allow Pub/Sub to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size)
for ack_id, item in six.iteritems(leased_messages)
if item.added_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a streaming pull request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
self._manager.dispatcher.modify_ack_deadline(
[requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids]
)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, p99 * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME) | Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats. |
def p_IndexTypes(self, p):
"""IndexTypes : IndexTypes ',' IndexType
| IndexType"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] | IndexTypes : IndexTypes ',' IndexType
| IndexType |
async def rcpt(
self,
recipient: str,
options: Iterable[str] = None,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
"""
Send an SMTP RCPT command, which specifies a single recipient for
the message. This command is sent once per recipient and must be
preceded by 'MAIL'.
:raises SMTPRecipientRefused: on unexpected server response code
"""
await self._ehlo_or_helo_if_needed()
if options is None:
options = []
options_bytes = [option.encode("ascii") for option in options]
to = b"TO:" + quote_address(recipient).encode("ascii")
async with self._command_lock:
response = await self.execute_command(
b"RCPT", to, *options_bytes, timeout=timeout
)
success_codes = (SMTPStatus.completed, SMTPStatus.will_forward)
if response.code not in success_codes:
raise SMTPRecipientRefused(response.code, response.message, recipient)
return response | Send an SMTP RCPT command, which specifies a single recipient for
the message. This command is sent once per recipient and must be
preceded by 'MAIL'.
:raises SMTPRecipientRefused: on unexpected server response code |
def load(fileobj):
"""Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission
"""
with gzip.GzipFile(fileobj=fileobj, mode='r') as z:
submission = Submission(metadata=json.loads(z.readline()))
for line in z:
token_id, token = json.loads(line)
submission['tokens'][token_id] = token
return submission | Load the submission from a file-like object
:param fileobj: File-like object
:return: the loaded submission |
def _gei8(ins):
""" Compares & pops top 2 operands out of the stack, and checks
if the 1st operand >= 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit signed version
"""
output = _8bit_oper(ins.quad[2], ins.quad[3], reversed_=True)
output.append('call __LEI8')
output.append('push af')
REQUIRES.add('lei8.asm')
return output | Compares & pops top 2 operands out of the stack, and checks
if the 1st operand >= 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit signed version |
def update_subtask(client, subtask_id, revision, title=None, completed=None):
'''
Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : int(revision),
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information |
def _handle_func_decl(self, node, scope, ctxt, stream):
"""Handle FuncDecl nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling func decl")
if node.args is not None:
# could just call _handle_param_list directly...
for param in node.args.params:
# see the check in _handle_decl for how this is kept from
# being added to the local context/scope
param.is_func_param = True
params = self._handle_node(node.args, scope, ctxt, stream)
else:
params = functions.ParamListDef([], node.coord)
func_type = self._handle_node(node.type, scope, ctxt, stream)
func = functions.Function(func_type, params, scope)
return func | Handle FuncDecl nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO |
def dinfdistdown(np, ang, fel, slp, src, statsm, distm, edgecontamination, wg, dist,
workingdir=None, mpiexedir=None, exedir=None,
log_file=None, runtime_file=None, hostfile=None):
"""Run D-inf distance down to stream"""
in_params = {'-m': '%s %s' % (TauDEM.convertstatsmethod(statsm),
TauDEM.convertdistmethod(distm))}
if StringClass.string_match(edgecontamination, 'false') or edgecontamination is False:
in_params['-nc'] = None
fname = TauDEM.func_name('dinfdistdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-fel': fel, '-slp': slp, '-ang': ang, '-src': src, '-wg': wg},
workingdir,
in_params,
{'-dd': dist},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | Run D-inf distance down to stream |
def update_volumes(self):
"""Update list of EBS Volumes for the account / region
Returns:
`None`
"""
self.log.debug('Updating EBSVolumes for {}/{}'.format(self.account.account_name, self.region))
ec2 = self.session.resource('ec2', region_name=self.region)
try:
existing_volumes = EBSVolume.get_all(self.account, self.region)
volumes = {x.id: x for x in ec2.volumes.all()}
for data in list(volumes.values()):
if data.id in existing_volumes:
vol = existing_volumes[data.id]
if vol.update(data):
self.log.debug('Changed detected for EBSVolume {}/{}/{}'.format(
self.account.account_name,
self.region,
vol.resource.resource_id
))
else:
properties = {
'create_time': data.create_time,
'encrypted': data.encrypted,
'iops': data.iops or 0,
'kms_key_id': data.kms_key_id,
'size': data.size,
'state': data.state,
'snapshot_id': data.snapshot_id,
'volume_type': data.volume_type,
'attachments': sorted([x['InstanceId'] for x in data.attachments])
}
tags = {t['Key']: t['Value'] for t in data.tags or {}}
vol = EBSVolume.create(
data.id,
account_id=self.account.account_id,
location=self.region,
properties=properties,
tags=tags
)
self.log.debug('Added new EBSVolume {}/{}/{}'.format(
self.account.account_name,
self.region,
vol.resource.resource_id
))
db.session.commit()
vk = set(list(volumes.keys()))
evk = set(list(existing_volumes.keys()))
try:
for volumeID in evk - vk:
db.session.delete(existing_volumes[volumeID].resource)
self.log.debug('Deleted EBSVolume {}/{}/{}'.format(
volumeID,
self.account.account_name,
self.region
))
db.session.commit()
except:
self.log.exception('Failed removing deleted volumes')
db.session.rollback()
finally:
del ec2 | Update list of EBS Volumes for the account / region
Returns:
`None` |
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content | Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str |
def all_to_public(self):
"""Sets all members, types and executables in this module as public as
long as it doesn't already have the 'private' modifier.
"""
if "private" not in self.modifiers:
def public_collection(attribute):
for key in self.collection(attribute):
if key not in self.publics:
self.publics[key.lower()] = 1
public_collection("members")
public_collection("types")
public_collection("executables") | Sets all members, types and executables in this module as public as
long as it doesn't already have the 'private' modifier. |
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ""
for c in self.contracts:
(name, _inheritance, _var, func_summaries, _modif_summaries) = c.get_summary()
txt += blue("\n+ Contract %s\n"%name)
# (c_name, f_name, visi, _, _, _, _, _) in func_summaries
public = [(elem[0], (elem[1], elem[2]) ) for elem in func_summaries]
collect = collections.defaultdict(list)
for a,b in public:
collect[a].append(b)
public = list(collect.items())
for contract, functions in public:
txt += blue(" - From {}\n".format(contract))
functions = sorted(functions)
for (function, visi) in functions:
if visi in ['external', 'public']:
txt += green(" - {} ({})\n".format(function, visi))
for (function, visi) in functions:
if visi in ['internal', 'private']:
txt += magenta(" - {} ({})\n".format(function, visi))
for (function, visi) in functions:
if visi not in ['external', 'public', 'internal', 'private']:
txt += " - {} ({})\n".format(function, visi)
self.info(txt) | _filename is not used
Args:
_filename(string) |
def get_handler_classes(self):
"""Return the list of handlers to use when receiving RPC requests."""
handler_classes = [import_string(handler_cls) for handler_cls in settings.MODERNRPC_HANDLERS]
if self.protocol == ALL:
return handler_classes
else:
return [cls for cls in handler_classes if cls.protocol in ensure_sequence(self.protocol)] | Return the list of handlers to use when receiving RPC requests. |
def set_shared_config(cls, config):
""" This allows to set a config that will be used when calling
``shared_blockchain_instance`` and allows to define the configuration
without requiring to actually create an instance
"""
assert isinstance(config, dict)
cls._sharedInstance.config.update(config)
# if one is already set, delete
if cls._sharedInstance.instance:
cls._sharedInstance.instance = None | This allows to set a config that will be used when calling
``shared_blockchain_instance`` and allows to define the configuration
without requiring to actually create an instance |
def _create_simulated_annealing_expander(schedule):
'''
Creates an expander that has a random chance to choose a node that is worse
than the current (first) node, but that chance decreases with time.
'''
def _expander(fringe, iteration, viewer):
T = schedule(iteration)
current = fringe[0]
neighbors = current.expand(local_search=True)
if viewer:
viewer.event('expanded', [current], [neighbors])
if neighbors:
succ = random.choice(neighbors)
delta_e = succ.value - current.value
if delta_e > 0 or random.random() < math.exp(delta_e / T):
fringe.pop()
fringe.append(succ)
if viewer:
viewer.event('chosen_node', succ)
return _expander | Creates an expander that has a random chance to choose a node that is worse
than the current (first) node, but that chance decreases with time. |
def generateRecords(self, records):
"""Generate multiple records. Refer to definition for generateRecord"""
if self.verbosity>0: print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record) | Generate multiple records. Refer to definition for generateRecord |
def calculate_checksum_on_stream(
f,
algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM,
chunk_size=DEFAULT_CHUNK_SIZE,
):
"""Calculate the checksum of a stream.
Args:
f: file-like object
Only requirement is a ``read()`` method that returns ``bytes``.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
chunk_size : int
Number of bytes to read from the file and add to the checksum at a time.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
"""
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
while True:
chunk = f.read(chunk_size)
if not chunk:
break
checksum_calc.update(chunk)
return checksum_calc.hexdigest() | Calculate the checksum of a stream.
Args:
f: file-like object
Only requirement is a ``read()`` method that returns ``bytes``.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
chunk_size : int
Number of bytes to read from the file and add to the checksum at a time.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm. |
def _issubclass_Union_rec(subclass, superclass, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check):
"""Helper for _issubclass_Union.
"""
# this function is partly based on code from typing module 3.5.2.2
super_args = get_Union_params(superclass)
if super_args is None:
return is_Union(subclass)
elif is_Union(subclass):
sub_args = get_Union_params(subclass)
if sub_args is None:
return False
return all(_issubclass(c, superclass, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check) \
for c in (sub_args))
elif isinstance(subclass, TypeVar):
if subclass in super_args:
return True
if subclass.__constraints__:
return _issubclass(Union[subclass.__constraints__],
superclass, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check)
return False
else:
return any(_issubclass(subclass, t, bound_Generic, bound_typevars,
bound_typevars_readonly, follow_fwd_refs, _recursion_check) \
for t in super_args) | Helper for _issubclass_Union. |
def compare_version(value):
""" Determines if the provided version value compares with program version.
`value`
Version comparison string (e.g. ==1.0, <=1.0, >1.1)
Supported operators:
<, <=, ==, >, >=
"""
# extract parts from value
import re
res = re.match(r'(<|<=|==|>|>=)(\d{1,2}\.\d{1,2}(\.\d{1,2})?)$',
str(value).strip())
if not res:
return False
operator, value, _ = res.groups()
# break into pieces
value = tuple(int(x) for x in str(value).split('.'))
if len(value) < 3:
value += (0,)
version = __version_info__
if operator in ('<', '<='):
if version < value:
return True
if operator != '<=':
return False
elif operator in ('>=', '>'):
if version > value:
return True
if operator != '>=':
return False
return value == version | Determines if the provided version value compares with program version.
`value`
Version comparison string (e.g. ==1.0, <=1.0, >1.1)
Supported operators:
<, <=, ==, >, >= |
def format_float(value): # not used
"""Modified form of the 'g' format specifier.
"""
string = "{:g}".format(value).replace("e+", "e")
string = re.sub("e(-?)0*(\d+)", r"e\1\2", string)
return string | Modified form of the 'g' format specifier. |
def get_assessment_offered_bank_session(self, proxy):
"""Gets the session for retrieving offered assessments to bank mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentOfferedBankSession) - an
``AssessmentOfferedBankSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_offered_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_bank()`` is ``true``.*
"""
if not self.supports_assessment_offered_bank():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentOfferedBankSession(proxy=proxy, runtime=self._runtime) | Gets the session for retrieving offered assessments to bank mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentOfferedBankSession) - an
``AssessmentOfferedBankSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_offered_bank()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_bank()`` is ``true``.* |
def all(cls, include_deactivated=False):
"""
Get all sub-resources
:param include_deactivated: Include deactivated resources in response
:returns: list of SubResource instances
:raises: SocketError, CouchException
"""
if include_deactivated:
resources = yield cls.view.get(include_docs=True)
else:
resources = yield cls.active_view.get(include_docs=True)
result = []
for resource in resources['rows']:
parent = cls.parent_resource(**resource['doc'])
result.append(cls(parent=parent, **resource['value']))
raise Return(result) | Get all sub-resources
:param include_deactivated: Include deactivated resources in response
:returns: list of SubResource instances
:raises: SocketError, CouchException |
def config_logging(debug):
"""Config logging level output output"""
if debug:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.debug("Debug mode activated")
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') | Config logging level output output |
def update_from(
self,
obj=None,
yaml_env=None,
yaml_file=None,
json_env=None,
json_file=None,
env_namespace=None,
):
"""
Update dict from several sources at once.
This is simply a convenience method that can be used as an alternative
to making several calls to the various
:meth:`~ConfigLoader.update_from_*` methods.
Updates will be applied in the order that the parameters are listed
below, with each source taking precedence over those before it.
:arg obj: Object or name of object, e.g. 'myapp.settings'.
:arg yaml_env: Name of an environment variable containing the path to
a YAML config file.
:arg yaml_file: Path to a YAML config file, or a file-like object.
:arg json_env: Name of an environment variable containing the path to
a JSON config file.
:arg json_file: Path to a JSON config file, or a file-like object.
:arg env_namespace: Common prefix of the environment variables
containing the desired config.
"""
if obj:
self.update_from_object(obj)
if yaml_env:
self.update_from_yaml_env(yaml_env)
if yaml_file:
self.update_from_yaml_file(yaml_file)
if json_env:
self.update_from_json_env(json_env)
if json_file:
self.update_from_json_file(json_file)
if env_namespace:
self.update_from_env_namespace(env_namespace) | Update dict from several sources at once.
This is simply a convenience method that can be used as an alternative
to making several calls to the various
:meth:`~ConfigLoader.update_from_*` methods.
Updates will be applied in the order that the parameters are listed
below, with each source taking precedence over those before it.
:arg obj: Object or name of object, e.g. 'myapp.settings'.
:arg yaml_env: Name of an environment variable containing the path to
a YAML config file.
:arg yaml_file: Path to a YAML config file, or a file-like object.
:arg json_env: Name of an environment variable containing the path to
a JSON config file.
:arg json_file: Path to a JSON config file, or a file-like object.
:arg env_namespace: Common prefix of the environment variables
containing the desired config. |
def parse_file_args(file_obj,
file_type,
resolver=None,
**kwargs):
"""
Given a file_obj and a file_type try to turn them into a file-like
object and a lowercase string of file type.
Parameters
-----------
file_obj: str: if string represents a file path, returns
-------------------------------------------
file_obj: an 'rb' opened file object of the path
file_type: the extension from the file path
str: if string is NOT a path, but has JSON-like special characters
-------------------------------------------
file_obj: the same string passed as file_obj
file_type: set to 'json'
str: string is a valid URL
-------------------------------------------
file_obj: an open 'rb' file object with retrieved data
file_type: from the extension
str: string is not an existing path or a JSON-like object
-------------------------------------------
ValueError will be raised as we can't do anything with input
file like object: we cannot grab information on file_type automatically
-------------------------------------------
ValueError will be raised if file_type is None
file_obj: same as input
file_type: same as input
other object: like a shapely.geometry.Polygon, etc:
-------------------------------------------
file_obj: same as input
file_type: if None initially, set to the class name
(in lower case), otherwise passed through
file_type: str, type of file and handled according to above
Returns
-----------
file_obj: loadable object
file_type: str, lower case of the type of file (eg 'stl', 'dae', etc)
metadata: dict, any metadata
opened: bool, did we open the file or not
"""
metadata = {}
opened = False
if ('metadata' in kwargs and
isinstance(kwargs['metadata'], dict)):
metadata.update(kwargs['metadata'])
if util.is_file(file_obj) and file_type is None:
raise ValueError('file_type must be set when passing file objects!')
if util.is_string(file_obj):
try:
# os.path.isfile will return False incorrectly
# if we don't give it an absolute path
file_path = os.path.expanduser(file_obj)
file_path = os.path.abspath(file_path)
exists = os.path.isfile(file_path)
except BaseException:
exists = False
# file obj is a string which exists on filesystm
if exists:
# if not passed create a resolver to find other files
if resolver is None:
resolver = visual.resolvers.FilePathResolver(file_path)
# save the file name and path to metadata
metadata['file_path'] = file_path
metadata['file_name'] = os.path.basename(file_obj)
# if file_obj is a path that exists use extension as file_type
if file_type is None:
file_type = util.split_extension(
file_path,
special=['tar.gz', 'tar.bz2'])
# actually open the file
file_obj = open(file_path, 'rb')
opened = True
else:
if '{' in file_obj:
# if a dict bracket is in the string, its probably a straight
# JSON
file_type = 'json'
elif 'https://' in file_obj or 'http://' in file_obj:
# we've been passed a URL, warn to use explicit function
# and don't do network calls via magical pipeline
raise ValueError(
'use load_remote to load URL: {}'.format(file_obj))
elif file_type is None:
raise ValueError('string is not a file: {}'.format(file_obj))
if file_type is None:
file_type = file_obj.__class__.__name__
if util.is_string(file_type) and '.' in file_type:
# if someone has passed the whole filename as the file_type
# use the file extension as the file_type
if 'file_path' not in metadata:
metadata['file_path'] = file_type
metadata['file_name'] = os.path.basename(file_type)
file_type = util.split_extension(file_type)
if resolver is None and os.path.exists(file_type):
resolver = visual.resolvers.FilePathResolver(file_type)
# all our stored extensions reference in lower case
file_type = file_type.lower()
# if we still have no resolver try using file_obj name
if (resolver is None and
hasattr(file_obj, 'name') and
len(file_obj.name) > 0):
resolver = visual.resolvers.FilePathResolver(file_obj.name)
return file_obj, file_type, metadata, opened, resolver | Given a file_obj and a file_type try to turn them into a file-like
object and a lowercase string of file type.
Parameters
-----------
file_obj: str: if string represents a file path, returns
-------------------------------------------
file_obj: an 'rb' opened file object of the path
file_type: the extension from the file path
str: if string is NOT a path, but has JSON-like special characters
-------------------------------------------
file_obj: the same string passed as file_obj
file_type: set to 'json'
str: string is a valid URL
-------------------------------------------
file_obj: an open 'rb' file object with retrieved data
file_type: from the extension
str: string is not an existing path or a JSON-like object
-------------------------------------------
ValueError will be raised as we can't do anything with input
file like object: we cannot grab information on file_type automatically
-------------------------------------------
ValueError will be raised if file_type is None
file_obj: same as input
file_type: same as input
other object: like a shapely.geometry.Polygon, etc:
-------------------------------------------
file_obj: same as input
file_type: if None initially, set to the class name
(in lower case), otherwise passed through
file_type: str, type of file and handled according to above
Returns
-----------
file_obj: loadable object
file_type: str, lower case of the type of file (eg 'stl', 'dae', etc)
metadata: dict, any metadata
opened: bool, did we open the file or not |
def sequence_content_plot (self):
""" Create the epic HTML for the FastQC sequence content heatmap """
# Prep the data
data = OrderedDict()
for s_name in sorted(self.fastqc_data.keys()):
try:
data[s_name] = {self.avg_bp_from_range(d['base']): d for d in self.fastqc_data[s_name]['per_base_sequence_content']}
except KeyError:
pass
# Old versions of FastQC give counts instead of percentages
for b in data[s_name]:
tot = sum([data[s_name][b][base] for base in ['a','c','t','g']])
if tot == 100.0:
break
else:
for base in ['a','c','t','g']:
data[s_name][b][base] = (float(data[s_name][b][base])/float(tot)) * 100.0
if len(data) == 0:
log.debug('sequence_content not found in FastQC reports')
return None
html = '''<div id="fastqc_per_base_sequence_content_plot_div">
<div class="alert alert-info">
<span class="glyphicon glyphicon-hand-up"></span>
Click a sample row to see a line plot for that dataset.
</div>
<h5><span class="s_name text-primary"><span class="glyphicon glyphicon-info-sign"></span> Rollover for sample name</span></h5>
<button id="fastqc_per_base_sequence_content_export_btn"><span class="glyphicon glyphicon-download-alt"></span> Export Plot</button>
<div class="fastqc_seq_heatmap_key">
Position: <span id="fastqc_seq_heatmap_key_pos">-</span>
<div><span id="fastqc_seq_heatmap_key_t"> %T: <span>-</span></span></div>
<div><span id="fastqc_seq_heatmap_key_c"> %C: <span>-</span></span></div>
<div><span id="fastqc_seq_heatmap_key_a"> %A: <span>-</span></span></div>
<div><span id="fastqc_seq_heatmap_key_g"> %G: <span>-</span></span></div>
</div>
<div id="fastqc_seq_heatmap_div" class="fastqc-overlay-plot">
<div id="fastqc_per_base_sequence_content_plot" class="hc-plot has-custom-export">
<canvas id="fastqc_seq_heatmap" height="100%" width="800px" style="width:100%;"></canvas>
</div>
</div>
<div class="clearfix"></div>
</div>
<script type="text/javascript">
fastqc_seq_content_data = {d};
$(function () {{ fastqc_seq_content_heatmap(); }});
</script>'''.format(d=json.dumps(data))
self.add_section (
name = 'Per Base Sequence Content',
anchor = 'fastqc_per_base_sequence_content',
description = 'The proportion of each base position for which each of the four normal DNA bases has been called.',
helptext = '''
To enable multiple samples to be shown in a single plot, the base composition data
is shown as a heatmap. The colours represent the balance between the four bases:
an even distribution should give an even muddy brown colour. Hover over the plot
to see the percentage of the four bases under the cursor.
**To see the data as a line plot, as in the original FastQC graph, click on a sample track.**
From the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/4%20Per%20Base%20Sequence%20Content.html):
_Per Base Sequence Content plots out the proportion of each base position in a
file for which each of the four normal DNA bases has been called._
_In a random library you would expect that there would be little to no difference
between the different bases of a sequence run, so the lines in this plot should
run parallel with each other. The relative amount of each base should reflect
the overall amount of these bases in your genome, but in any case they should
not be hugely imbalanced from each other._
_It's worth noting that some types of library will always produce biased sequence
composition, normally at the start of the read. Libraries produced by priming
using random hexamers (including nearly all RNA-Seq libraries) and those which
were fragmented using transposases inherit an intrinsic bias in the positions
at which reads start. This bias does not concern an absolute sequence, but instead
provides enrichement of a number of different K-mers at the 5' end of the reads.
Whilst this is a true technical bias, it isn't something which can be corrected
by trimming and in most cases doesn't seem to adversely affect the downstream
analysis._
''',
content = html
) | Create the epic HTML for the FastQC sequence content heatmap |
async def emitters(self, key, value):
"""
Single-channel emitter
"""
while True:
await asyncio.sleep(value['schedule'].total_seconds())
await self.channel_layer.send(key, {
"type": value['type'],
"message": value['message']
}) | Single-channel emitter |
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x) | Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None`` |
def dump_simulation(simulation, directory):
"""
Write simulation data to directory, so that it can be restored later.
"""
parent_directory = os.path.abspath(os.path.join(directory, os.pardir))
if not os.path.isdir(parent_directory): # To deal with reforms
os.mkdir(parent_directory)
if not os.path.isdir(directory):
os.mkdir(directory)
if os.listdir(directory):
raise ValueError("Directory '{}' is not empty".format(directory))
entities_dump_dir = os.path.join(directory, "__entities__")
os.mkdir(entities_dump_dir)
for entity in simulation.populations.values():
# Dump entity structure
_dump_entity(entity, entities_dump_dir)
# Dump variable values
for holder in entity._holders.values():
_dump_holder(holder, directory) | Write simulation data to directory, so that it can be restored later. |
def received_char_count(self, count):
'''Set recieved char count limit
Args:
count: the amount of received characters you want to stop at.
Returns:
None
Raises:
None
'''
n1 = count/100
n2 = (count-(n1*100))/10
n3 = (count-((n1*100)+(n2*10)))
self.send('^PC'+chr(n1)+chr(n2)+chr(n3)) | Set recieved char count limit
Args:
count: the amount of received characters you want to stop at.
Returns:
None
Raises:
None |
def requestAvatar(self, avatarId, mind, *interfaces):
"""
Create Adder avatars for any IBoxReceiver request.
"""
if IBoxReceiver in interfaces:
return (IBoxReceiver, Adder(avatarId), lambda: None)
raise NotImplementedError() | Create Adder avatars for any IBoxReceiver request. |
def sign(self, pkey, digest):
"""
Sign the certificate request with this key and digest type.
:param pkey: The private key to sign with.
:type pkey: :py:class:`PKey`
:param digest: The message digest to use.
:type digest: :py:class:`bytes`
:return: ``None``
"""
if pkey._only_public:
raise ValueError("Key has only public part")
if not pkey._initialized:
raise ValueError("Key is uninitialized")
digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest))
if digest_obj == _ffi.NULL:
raise ValueError("No such digest method")
sign_result = _lib.NETSCAPE_SPKI_sign(
self._spki, pkey._pkey, digest_obj
)
_openssl_assert(sign_result > 0) | Sign the certificate request with this key and digest type.
:param pkey: The private key to sign with.
:type pkey: :py:class:`PKey`
:param digest: The message digest to use.
:type digest: :py:class:`bytes`
:return: ``None`` |
def __patch_write_method(tango_device_klass, attribute):
"""
Checks if method given by it's name for the given DeviceImpl
class has the correct signature. If a read/write method doesn't
have a parameter (the traditional Attribute), then the method is
wrapped into another method which has correct parameter definition
to make it work.
:param tango_device_klass: a DeviceImpl class
:type tango_device_klass: class
:param attribute: the attribute data information
:type attribute: AttrData
"""
write_method = getattr(attribute, "fset", None)
if write_method:
method_name = "__write_{0}__".format(attribute.attr_name)
attribute.write_method_name = method_name
else:
method_name = attribute.write_method_name
write_method = getattr(tango_device_klass, method_name)
write_attr = _get_wrapped_write_method(attribute, write_method)
setattr(tango_device_klass, method_name, write_attr) | Checks if method given by it's name for the given DeviceImpl
class has the correct signature. If a read/write method doesn't
have a parameter (the traditional Attribute), then the method is
wrapped into another method which has correct parameter definition
to make it work.
:param tango_device_klass: a DeviceImpl class
:type tango_device_klass: class
:param attribute: the attribute data information
:type attribute: AttrData |
def fix_text_segment(text,
*,
fix_entities='auto',
remove_terminal_escapes=True,
fix_encoding=True,
fix_latin_ligatures=True,
fix_character_width=True,
uncurl_quotes=True,
fix_line_breaks=True,
fix_surrogates=True,
remove_control_chars=True,
remove_bom=True,
normalization='NFC'):
"""
Apply fixes to text in a single chunk. This could be a line of text
within a larger run of `fix_text`, or it could be a larger amount
of text that you are certain is in a consistent encoding.
See `fix_text` for a description of the parameters.
"""
if isinstance(text, bytes):
raise UnicodeError(fixes.BYTES_ERROR_TEXT)
if fix_entities == 'auto' and '<' in text and '>' in text:
fix_entities = False
while True:
origtext = text
if remove_terminal_escapes:
text = fixes.remove_terminal_escapes(text)
if fix_encoding:
text = fixes.fix_encoding(text)
if fix_entities:
text = fixes.unescape_html(text)
if fix_latin_ligatures:
text = fixes.fix_latin_ligatures(text)
if fix_character_width:
text = fixes.fix_character_width(text)
if uncurl_quotes:
text = fixes.uncurl_quotes(text)
if fix_line_breaks:
text = fixes.fix_line_breaks(text)
if fix_surrogates:
text = fixes.fix_surrogates(text)
if remove_control_chars:
text = fixes.remove_control_chars(text)
if remove_bom and not remove_control_chars:
# Skip this step if we've already done `remove_control_chars`,
# because it would be redundant.
text = fixes.remove_bom(text)
if normalization is not None:
text = unicodedata.normalize(normalization, text)
if text == origtext:
return text | Apply fixes to text in a single chunk. This could be a line of text
within a larger run of `fix_text`, or it could be a larger amount
of text that you are certain is in a consistent encoding.
See `fix_text` for a description of the parameters. |
def _validate_certificate_url(self, cert_url):
# type: (str) -> None
"""Validate the URL containing the certificate chain.
This method validates if the URL provided adheres to the format
mentioned here :
https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-a-web-service.html#cert-verify-signature-certificate-url
:param cert_url: URL for retrieving certificate chain
:type cert_url: str
:raises: :py:class:`VerificationException` if the URL is invalid
"""
parsed_url = urlparse(cert_url)
protocol = parsed_url.scheme
if protocol.lower() != CERT_CHAIN_URL_PROTOCOL.lower():
raise VerificationException(
"Signature Certificate URL has invalid protocol: {}. "
"Expecting {}".format(protocol, CERT_CHAIN_URL_PROTOCOL))
hostname = parsed_url.hostname
if (hostname is None or
hostname.lower() != CERT_CHAIN_URL_HOSTNAME.lower()):
raise VerificationException(
"Signature Certificate URL has invalid hostname: {}. "
"Expecting {}".format(hostname, CERT_CHAIN_URL_HOSTNAME))
normalized_path = os.path.normpath(parsed_url.path)
if not normalized_path.startswith(CERT_CHAIN_URL_STARTPATH):
raise VerificationException(
"Signature Certificate URL has invalid path: {}. "
"Expecting the path to start with {}".format(
normalized_path, CERT_CHAIN_URL_STARTPATH))
port = parsed_url.port
if port is not None and port != CERT_CHAIN_URL_PORT:
raise VerificationException(
"Signature Certificate URL has invalid port: {}. "
"Expecting {}".format(str(port), str(CERT_CHAIN_URL_PORT))) | Validate the URL containing the certificate chain.
This method validates if the URL provided adheres to the format
mentioned here :
https://developer.amazon.com/docs/custom-skills/host-a-custom-skill-as-a-web-service.html#cert-verify-signature-certificate-url
:param cert_url: URL for retrieving certificate chain
:type cert_url: str
:raises: :py:class:`VerificationException` if the URL is invalid |
def get_jwt_value(self, request):
"""
This function has been overloaded and it returns the proper JWT
auth string.
Parameters
----------
request: HttpRequest
This is the request that is received by DJango in the view.
Returns
-------
str
This returns the extracted JWT auth token string.
"""
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from rest_framework import exceptions
auth = self.get_authorization(request).split()
auth_header_prefix = self.prefix.lower() or ''
if not auth:
if self.cookie:
return request.COOKIES.get(self.cookie)
return None
if auth_header_prefix is None or len(auth_header_prefix) < 1:
auth.append('')
auth.reverse()
if smart_text(auth[0].lower()) != auth_header_prefix:
return None
if len(auth) == 1:
msg = _('Invalid Authorization header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid Authorization header. Credentials string '
'should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return auth[1] | This function has been overloaded and it returns the proper JWT
auth string.
Parameters
----------
request: HttpRequest
This is the request that is received by DJango in the view.
Returns
-------
str
This returns the extracted JWT auth token string. |
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggSpeex(filething)
filething.fileobj.seek(0)
t.delete(filething) | delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file. |
def from_github(user_repo_pair, file='plashfile'):
"build and use a file (default 'plashfile') from github repo"
from urllib.request import urlopen
url = 'https://raw.githubusercontent.com/{}/master/{}'.format(
user_repo_pair, file)
with utils.catch_and_die([Exception], debug=url):
resp = urlopen(url)
plashstr = resp.read()
return utils.run_write_read(['plash', 'build', '--eval-stdin'],
plashstr).decode().rstrip('\n') | build and use a file (default 'plashfile') from github repo |
def get_trend(timeseries):
"""
Using the values returned by get_timeseries(), compare the current
Metric value with it's previous period's value
:param timeseries: data returned from the get_timeseries() method
:returns: the last period value and relative change
"""
last = timeseries['value'][len(timeseries['value']) - 1]
prev = timeseries['value'][len(timeseries['value']) - 2]
trend = last - prev
trend_percentage = None
if last == 0:
if prev > 0:
trend_percentage = -100
else:
trend_percentage = 0
else:
trend_percentage = int((trend / last) * 100)
return (last, trend_percentage) | Using the values returned by get_timeseries(), compare the current
Metric value with it's previous period's value
:param timeseries: data returned from the get_timeseries() method
:returns: the last period value and relative change |
def dt_month_name(x):
"""Returns the month names of a datetime sample in English.
:returns: an expression containing the month names extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.month_name
Expression = dt_month_name(date)
Length: 3 dtype: str (expression)
---------------------------------
0 October
1 February
2 November
"""
import pandas as pd
return pd.Series(_pandas_dt_fix(x)).dt.month_name().values.astype(str) | Returns the month names of a datetime sample in English.
:returns: an expression containing the month names extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.month_name
Expression = dt_month_name(date)
Length: 3 dtype: str (expression)
---------------------------------
0 October
1 February
2 November |
def badge(left_text: str, right_text: str, left_link: Optional[str] = None,
right_link: Optional[str] = None,
whole_link: Optional[str] = None, logo: Optional[str] = None,
left_color: str = '#555', right_color: str = '#007ec6',
measurer: Optional[text_measurer.TextMeasurer] = None,
embed_logo: bool = False) -> str:
"""Creates a github-style badge as an SVG image.
>>> badge(left_text='coverage', right_text='23%', right_color='red')
'<svg...</svg>'
>>> badge(left_text='build', right_text='green', right_color='green',
... whole_link="http://www.example.com/")
'<svg...</svg>'
Args:
left_text: The text that should appear on the left-hand-side of the
badge e.g. "coverage".
right_text: The text that should appear on the right-hand-side of the
badge e.g. "23%".
left_link: The URL that should be redirected to when the left-hand text
is selected.
right_link: The URL that should be redirected to when the right-hand
text is selected.
whole_link: The link that should be redirected to when the badge is
selected. If set then left_link and right_right may not be set.
logo: A url representing a logo that will be displayed inside the
badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..."
left_color: The color of the part of the badge containing the left-hand
text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
right_color: The color of the part of the badge containing the
right-hand text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
measurer: A text_measurer.TextMeasurer that can be used to measure the
width of left_text and right_text.
embed_logo: If True then embed the logo image directly in the badge.
This can prevent an HTTP request and some browsers will not render
external image referenced. When True, `logo` must be a HTTP/HTTPS
URI or a filesystem path. Also, the `badge` call may raise an
exception if the logo cannot be loaded, is not an image, etc.
"""
if measurer is None:
measurer = (
precalculated_text_measurer.PrecalculatedTextMeasurer
.default())
if (left_link or right_link) and whole_link:
raise ValueError(
'whole_link may not bet set with left_link or right_link')
template = _JINJA2_ENVIRONMENT.get_template('badge-template-full.svg')
if logo and embed_logo:
logo = _embed_image(logo)
svg = template.render(
left_text=left_text,
right_text=right_text,
left_text_width=measurer.text_width(left_text) / 10.0,
right_text_width=measurer.text_width(right_text) / 10.0,
left_link=left_link,
right_link=right_link,
whole_link=whole_link,
logo=logo,
left_color=_NAME_TO_COLOR.get(left_color, left_color),
right_color=_NAME_TO_COLOR.get(right_color, right_color),
)
xml = minidom.parseString(svg)
_remove_blanks(xml)
xml.normalize()
return xml.documentElement.toxml() | Creates a github-style badge as an SVG image.
>>> badge(left_text='coverage', right_text='23%', right_color='red')
'<svg...</svg>'
>>> badge(left_text='build', right_text='green', right_color='green',
... whole_link="http://www.example.com/")
'<svg...</svg>'
Args:
left_text: The text that should appear on the left-hand-side of the
badge e.g. "coverage".
right_text: The text that should appear on the right-hand-side of the
badge e.g. "23%".
left_link: The URL that should be redirected to when the left-hand text
is selected.
right_link: The URL that should be redirected to when the right-hand
text is selected.
whole_link: The link that should be redirected to when the badge is
selected. If set then left_link and right_right may not be set.
logo: A url representing a logo that will be displayed inside the
badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..."
left_color: The color of the part of the badge containing the left-hand
text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
right_color: The color of the part of the badge containing the
right-hand text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
measurer: A text_measurer.TextMeasurer that can be used to measure the
width of left_text and right_text.
embed_logo: If True then embed the logo image directly in the badge.
This can prevent an HTTP request and some browsers will not render
external image referenced. When True, `logo` must be a HTTP/HTTPS
URI or a filesystem path. Also, the `badge` call may raise an
exception if the logo cannot be loaded, is not an image, etc. |
def is_module_reloadable(self, module, modname):
"""Decide if a module is reloadable or not."""
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False
else:
if (self.is_module_in_pathlist(module) or
self.is_module_in_namelist(modname)):
return False
else:
return True | Decide if a module is reloadable or not. |
def get_args(stream_spec, overwrite_output=False):
"""Build command-line arguments to be passed to ffmpeg."""
nodes = get_stream_spec_nodes(stream_spec)
args = []
# TODO: group nodes together, e.g. `-i somefile -r somerate`.
sorted_nodes, outgoing_edge_maps = topo_sort(nodes)
input_nodes = [node for node in sorted_nodes if isinstance(node, InputNode)]
output_nodes = [node for node in sorted_nodes if isinstance(node, OutputNode)]
global_nodes = [node for node in sorted_nodes if isinstance(node, GlobalNode)]
filter_nodes = [node for node in sorted_nodes if isinstance(node, FilterNode)]
stream_name_map = {(node, None): str(i) for i, node in enumerate(input_nodes)}
filter_arg = _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map)
args += reduce(operator.add, [_get_input_args(node) for node in input_nodes])
if filter_arg:
args += ['-filter_complex', filter_arg]
args += reduce(operator.add, [_get_output_args(node, stream_name_map) for node in output_nodes])
args += reduce(operator.add, [_get_global_args(node) for node in global_nodes], [])
if overwrite_output:
args += ['-y']
return args | Build command-line arguments to be passed to ffmpeg. |
def ids(self):
""" Returns set with all todo IDs. """
if config().identifiers() == 'text':
ids = self._id_todo_map.keys()
else:
ids = [str(i + 1) for i in range(self.count())]
return set(ids) | Returns set with all todo IDs. |
def prt_gos_flat(self, prt):
"""Print flat GO list."""
prtfmt = self.datobj.kws['fmtgo']
_go2nt = self.sortobj.grprobj.go2nt
go2nt = {go:_go2nt[go] for go in self.go2nt}
prt.write("\n{N} GO IDs:\n".format(N=len(go2nt)))
_sortby = self._get_sortgo()
for ntgo in sorted(go2nt.values(), key=_sortby):
prt.write(prtfmt.format(**ntgo._asdict())) | Print flat GO list. |
def leave_module(self, node):
"""leave module: check globals
"""
assert len(self._to_consume) == 1
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed) | leave module: check globals |
def insert_image(filename, extnum_filename, auximage, extnum_auximage):
"""Replace image in filename by another image (same size) in newimage.
Parameters
----------
filename : str
File name where the new image will be inserted.
extnum_filename : int
Extension number in filename where the new image will be
inserted. Note that the first extension is 1 (and not zero).
auximage : str
File name of the new image.
extnum_auximage : int
Extension number where the new image is located in auximage.
Note that the first extension is 1 (and not zero).
"""
# read the new image
with fits.open(auximage) as hdulist:
newimage = hdulist[extnum_auximage].data
# open the destination image
hdulist = fits.open(filename, mode='update')
oldimage_shape = hdulist[extnum_filename].data.shape
if oldimage_shape == newimage.shape:
hdulist[extnum_filename].data = newimage
hdulist.flush()
else:
print('filename shape:', oldimage_shape)
print('newimage shape:', newimage.shape)
print("ERROR: new image doesn't have the same shape")
hdulist.close() | Replace image in filename by another image (same size) in newimage.
Parameters
----------
filename : str
File name where the new image will be inserted.
extnum_filename : int
Extension number in filename where the new image will be
inserted. Note that the first extension is 1 (and not zero).
auximage : str
File name of the new image.
extnum_auximage : int
Extension number where the new image is located in auximage.
Note that the first extension is 1 (and not zero). |
def cli(obj, role, scopes, delete):
"""Add or delete role-to-permission lookup entry."""
client = obj['client']
if delete:
client.delete_perm(delete)
else:
if not role:
raise click.UsageError('Missing option "--role".')
if not scopes:
raise click.UsageError('Missing option "--scope".')
try:
perm = client.create_perm(role, scopes)
except Exception as e:
click.echo('ERROR: {}'.format(e))
sys.exit(1)
click.echo(perm.id) | Add or delete role-to-permission lookup entry. |
def tags(self):
'Return a thread local :class:`dossier.web.Tags` client.'
if self._tags is None:
config = global_config('dossier.tags')
self._tags = self.create(Tags, config=config)
return self._tags | Return a thread local :class:`dossier.web.Tags` client. |
def _get_content_type(self, content_type, filename=None):
"""Determine the content type from the current object.
The return value will be determined in order of precedence:
- The value passed in to this method (if not :data:`None`)
- The value stored on the current blob
- The default value ('application/octet-stream')
:type content_type: str
:param content_type: (Optional) type of content.
:type filename: str
:param filename: (Optional) The name of the file where the content
is stored.
:rtype: str
:returns: Type of content gathered from the object.
"""
if content_type is None:
content_type = self.content_type
if content_type is None and filename is not None:
content_type, _ = mimetypes.guess_type(filename)
if content_type is None:
content_type = _DEFAULT_CONTENT_TYPE
return content_type | Determine the content type from the current object.
The return value will be determined in order of precedence:
- The value passed in to this method (if not :data:`None`)
- The value stored on the current blob
- The default value ('application/octet-stream')
:type content_type: str
:param content_type: (Optional) type of content.
:type filename: str
:param filename: (Optional) The name of the file where the content
is stored.
:rtype: str
:returns: Type of content gathered from the object. |
def copy_memory(self, address, size):
"""
Copy the bytes from address to address+size into Unicorn
Used primarily for copying memory maps
:param address: start of buffer to copy
:param size: How many bytes to copy
"""
start_time = time.time()
map_bytes = self._cpu._raw_read(address, size)
self._emu.mem_write(address, map_bytes)
if time.time() - start_time > 3:
logger.info(f"Copying {hr_size(size)} map at {hex(address)} took {time.time() - start_time} seconds") | Copy the bytes from address to address+size into Unicorn
Used primarily for copying memory maps
:param address: start of buffer to copy
:param size: How many bytes to copy |
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
if self.marker_table is None:
self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')
engine = self.engine
with engine.begin() as con:
metadata = sqlalchemy.MetaData()
if not con.dialect.has_table(con, self.marker_table):
self.marker_table_bound = sqlalchemy.Table(
self.marker_table, metadata,
sqlalchemy.Column("update_id", sqlalchemy.String(128), primary_key=True),
sqlalchemy.Column("target_table", sqlalchemy.String(128)),
sqlalchemy.Column("inserted", sqlalchemy.DateTime, default=datetime.datetime.now()))
metadata.create_all(engine)
else:
metadata.reflect(only=[self.marker_table], bind=engine)
self.marker_table_bound = metadata.tables[self.marker_table] | Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset. |
def list2key(self, keyList):
"""
Convert a list of (``QtModifier``, ``QtCore.Qt.Key_*``) tuples
into a key sequence.
If no error is raised, then the list was accepted.
|Args|
* ``keyList`` (**list**): eg. (QtCore.Qt.ControlModifier,
QtCore.Qt.Key_F).
|Returns|
**None**
|Raises|
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed.
"""
for keyCombo in keyList:
if not (isinstance(keyCombo, list) or isinstance(keyCombo, tuple)):
msg = ('Format of native key list is invalid.'
' Must be a list/tuple of list/tuples.')
raise QtmacsKeysequenceError(msg)
if len(keyCombo) != 2:
msg = 'Format of native key list is invalid.'
msg += 'Each element must have exactly 2 entries.'
raise QtmacsKeysequenceError(msg)
# Construct a new QKeyEvent. Note that the general
# modifier (ie. <ctrl> and <alt>) still need to be
# combined with shift modifier (which is never a general
# modifier) if the key demands it. This combination is a
# simple "or" on the QFlags structure. Also note that the
# "text" argument is omitted because Qt is smart enough to
# fill it internally. Furthermore, the QKeyEvent method
# will raise an error if the provided key sequence makes
# no sense, but to avoid raising an exception inside an
# exception the QtmacsKeysequenceError is not raised
# inside the exception block.
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress, keyCombo[1],
keyCombo[0])
try:
key_event = QtGui.QKeyEvent(QtCore.QEvent.KeyPress,
keyCombo[1], keyCombo[0])
err = False
except TypeError:
err = True
if err:
msg = ('Format of native key list is invalid. '
'Must be a list/tuple of list/tuples.')
raise QtmacsKeysequenceError(msg)
else:
self.appendQKeyEvent(key_event) | Convert a list of (``QtModifier``, ``QtCore.Qt.Key_*``) tuples
into a key sequence.
If no error is raised, then the list was accepted.
|Args|
* ``keyList`` (**list**): eg. (QtCore.Qt.ControlModifier,
QtCore.Qt.Key_F).
|Returns|
**None**
|Raises|
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed. |
def rev_reg_id2cred_def_id__tag(rr_id: str) -> (str, str):
"""
Given a revocation registry identifier, return its corresponding credential definition identifier and
(stringified int) tag.
:param rr_id: revocation registry identifier
:return: credential definition identifier and tag
"""
return (
':'.join(rr_id.split(':')[2:-2]), # rev reg id comprises (prefixes):<cred_def_id>:(suffixes)
str(rr_id.split(':')[-1]) # tag is last token
) | Given a revocation registry identifier, return its corresponding credential definition identifier and
(stringified int) tag.
:param rr_id: revocation registry identifier
:return: credential definition identifier and tag |
def export_widgets(self_or_cls, obj, filename, fmt=None, template=None,
json=False, json_path='', **kwargs):
"""
Render and export object as a widget to a static HTML
file. Allows supplying a custom template formatting string
with fields to interpolate 'js', 'css' and the main 'html'
containing the widget. Also provides options to export widget
data to a json file in the supplied json_path (defaults to
current path).
"""
if fmt not in list(self_or_cls.widgets.keys())+['auto', None]:
raise ValueError("Renderer.export_widget may only export "
"registered widget types.")
if not isinstance(obj, NdWidget):
if not isinstance(filename, (BytesIO, StringIO)):
filedir = os.path.dirname(filename)
current_path = os.getcwd()
html_path = os.path.abspath(filedir)
rel_path = os.path.relpath(html_path, current_path)
save_path = os.path.join(rel_path, json_path)
else:
save_path = json_path
kwargs['json_save_path'] = save_path
kwargs['json_load_path'] = json_path
widget = self_or_cls.get_widget(obj, fmt, **kwargs)
else:
widget = obj
html = self_or_cls.static_html(widget, fmt, template)
encoded = self_or_cls.encode((html, {'mime_type': 'text/html'}))
if isinstance(filename, (BytesIO, StringIO)):
filename.write(encoded)
filename.seek(0)
else:
with open(filename, 'wb') as f:
f.write(encoded) | Render and export object as a widget to a static HTML
file. Allows supplying a custom template formatting string
with fields to interpolate 'js', 'css' and the main 'html'
containing the widget. Also provides options to export widget
data to a json file in the supplied json_path (defaults to
current path). |
def node_transmit(node_id):
"""Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom. However,
the values these accept are more limited than for the back end due to the
necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g. 'Info' or
'Agent'). Passing an int will get that info/node, passing a class name will
pass the class. Note that if the class you are specifying is a custom class
it will need to be added to the dictionary of known_classes in your
experiment code.
You may also pass the values property1, property2, property3, property4,
property5 and details. If passed this will fill in the relevant values of
the transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
dallinger.post(
"/node/" + my_node_id + "/transmit",
{what: "Meme",
to_whom: 10}
);
"""
exp = Experiment(session)
what = request_parameter(parameter="what", optional=True)
to_whom = request_parameter(parameter="to_whom", optional=True)
# check the node exists
node = models.Node.query.get(node_id)
if node is None:
return error_response(error_type="/node/transmit, node does not exist")
# create what
if what is not None:
try:
what = int(what)
what = models.Info.query.get(what)
if what is None:
return error_response(
error_type="/node/transmit POST, info does not exist",
participant=node.participant,
)
except Exception:
try:
what = exp.known_classes[what]
except KeyError:
msg = "/node/transmit POST, {} not in experiment.known_classes"
return error_response(
error_type=msg.format(what), participant=node.participant
)
# create to_whom
if to_whom is not None:
try:
to_whom = int(to_whom)
to_whom = models.Node.query.get(to_whom)
if to_whom is None:
return error_response(
error_type="/node/transmit POST, recipient Node does not exist",
participant=node.participant,
)
except Exception:
try:
to_whom = exp.known_classes[to_whom]
except KeyError:
msg = "/node/transmit POST, {} not in experiment.known_classes"
return error_response(
error_type=msg.format(to_whom), participant=node.participant
)
# execute the request
try:
transmissions = node.transmit(what=what, to_whom=to_whom)
for t in transmissions:
assign_properties(t)
session.commit()
# ping the experiment
exp.transmission_post_request(node=node, transmissions=transmissions)
session.commit()
except Exception:
return error_response(
error_type="/node/transmit POST, server error", participant=node.participant
)
# return the data
return success_response(transmissions=[t.__json__() for t in transmissions]) | Transmit to another node.
The sender's node id must be specified in the url.
As with node.transmit() the key parameters are what and to_whom. However,
the values these accept are more limited than for the back end due to the
necessity of serialization.
If what and to_whom are not specified they will default to None.
Alternatively you can pass an int (e.g. '5') or a class name (e.g. 'Info' or
'Agent'). Passing an int will get that info/node, passing a class name will
pass the class. Note that if the class you are specifying is a custom class
it will need to be added to the dictionary of known_classes in your
experiment code.
You may also pass the values property1, property2, property3, property4,
property5 and details. If passed this will fill in the relevant values of
the transmissions created with the values you specified.
For example, to transmit all infos of type Meme to the node with id 10:
dallinger.post(
"/node/" + my_node_id + "/transmit",
{what: "Meme",
to_whom: 10}
); |
def save_files(self, nodes):
"""
Saves user defined files using give nodes.
:param nodes: Nodes.
:type nodes: list
:return: Method success.
:rtype: bool
"""
metrics = {"Opened": 0, "Cached": 0}
for node in nodes:
file = node.file
if self.__container.get_editor(file):
if self.__container.save_file(file):
metrics["Opened"] += 1
self.__uncache(file)
else:
cache_data = self.__files_cache.get_content(file)
if cache_data is None:
LOGGER.warning(
"!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file))
continue
if cache_data.document:
file_handle = File(file)
file_handle.content = [cache_data.document.toPlainText().toUtf8()]
if file_handle.write():
metrics["Cached"] += 1
self.__uncache(file)
else:
LOGGER.warning(
"!> {0} | '{1}' file document doesn't exists in files cache!".format(self.__class__.__name__,
file))
self.__container.engine.notifications_manager.notify(
"{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!".format(self.__class__.__name__,
metrics["Opened"],
metrics["Cached"])) | Saves user defined files using give nodes.
:param nodes: Nodes.
:type nodes: list
:return: Method success.
:rtype: bool |
def add_extensions(self, extensions):
"""
Add extensions to the certificate signing request.
:param extensions: The X.509 extensions to add.
:type extensions: iterable of :py:class:`X509Extension`
:return: ``None``
"""
stack = _lib.sk_X509_EXTENSION_new_null()
_openssl_assert(stack != _ffi.NULL)
stack = _ffi.gc(stack, _lib.sk_X509_EXTENSION_free)
for ext in extensions:
if not isinstance(ext, X509Extension):
raise ValueError("One of the elements is not an X509Extension")
# TODO push can fail (here and elsewhere)
_lib.sk_X509_EXTENSION_push(stack, ext._extension)
add_result = _lib.X509_REQ_add_extensions(self._req, stack)
_openssl_assert(add_result == 1) | Add extensions to the certificate signing request.
:param extensions: The X.509 extensions to add.
:type extensions: iterable of :py:class:`X509Extension`
:return: ``None`` |
def build_response(content, code=200):
"""Build response, add headers"""
response = make_response( jsonify(content), content['code'] )
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = \
'Origin, X-Requested-With, Content-Type, Accept, Authorization'
return response | Build response, add headers |
def save_conf(fn=None):
"""Save current configuration to file as YAML
If not given, uses current config directory, ``confdir``, which can be
set by INTAKE_CONF_DIR.
"""
if fn is None:
fn = cfile()
try:
os.makedirs(os.path.dirname(fn))
except (OSError, IOError):
pass
with open(fn, 'w') as f:
yaml.dump(conf, f) | Save current configuration to file as YAML
If not given, uses current config directory, ``confdir``, which can be
set by INTAKE_CONF_DIR. |
def pause(self, duration_seconds=0, force=False, force_regen_rospec=False):
"""Pause an inventory operation for a set amount of time."""
logger.debug('pause(%s)', duration_seconds)
if self.state != LLRPClient.STATE_INVENTORYING:
if not force:
logger.info('ignoring pause(); not inventorying (state==%s)',
self.getStateName(self.state))
return None
else:
logger.info('forcing pause()')
if duration_seconds:
logger.info('pausing for %s seconds', duration_seconds)
rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec']
self.sendMessage({
'DISABLE_ROSPEC': {
'Ver': 1,
'Type': 25,
'ID': 0,
'ROSpecID': rospec['ROSpecID']
}})
self.setState(LLRPClient.STATE_PAUSING)
d = defer.Deferred()
d.addCallback(self._setState_wrapper, LLRPClient.STATE_PAUSED)
d.addErrback(self.complain, 'pause() failed')
self._deferreds['DISABLE_ROSPEC_RESPONSE'].append(d)
if duration_seconds > 0:
startAgain = task.deferLater(reactor, duration_seconds,
lambda: None)
startAgain.addCallback(lambda _: self.resume())
return d | Pause an inventory operation for a set amount of time. |
def center(self):
'''
Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin.
'''
try:
return self._center
except AttributeError:
pass
self._center = Point()
return self._center | Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin. |
def write(self, notifications):
"Connect to the APNS service and send notifications"
if not self.factory:
log.msg('APNSService write (connecting)')
server, port = ((APNS_SERVER_SANDBOX_HOSTNAME
if self.environment == 'sandbox'
else APNS_SERVER_HOSTNAME), APNS_SERVER_PORT)
self.factory = self.clientProtocolFactory()
context = self.getContextFactory()
reactor.connectSSL(server, port, self.factory, context)
client = self.factory.clientProtocol
if client:
return client.sendMessage(notifications)
else:
d = self.factory.deferred
timeout = reactor.callLater(self.timeout,
lambda: d.called or d.errback(
Exception('Notification timed out after %i seconds' % self.timeout)))
def cancel_timeout(r):
try: timeout.cancel()
except: pass
return r
d.addCallback(lambda p: p.sendMessage(notifications))
d.addErrback(log_errback('apns-service-write'))
d.addBoth(cancel_timeout)
return d | Connect to the APNS service and send notifications |
def redraw_canvas(self):
""" Parses the Xdot attributes of all graph components and adds
the components to a new canvas.
"""
from xdot_parser import XdotAttrParser
xdot_parser = XdotAttrParser()
canvas = self._component_default()
for node in self.nodes:
components = xdot_parser.parse_xdot_data( node._draw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( node._ldraw_ )
canvas.add( *components )
for edge in self.edges:
components = xdot_parser.parse_xdot_data( edge._draw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._ldraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._hdraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._tdraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._hldraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._tldraw_ )
canvas.add( *components )
self.component = canvas
self.vp.request_redraw() | Parses the Xdot attributes of all graph components and adds
the components to a new canvas. |
def _load_candidate_wrapper(self, source_file=None, source_config=None, dest_file=None,
file_system=None):
"""
Transfer file to remote device for either merge or replace operations
Returns (return_status, msg)
"""
return_status = False
msg = ''
if source_file and source_config:
raise ValueError("Cannot simultaneously set source_file and source_config")
if source_config:
if self.inline_transfer:
(return_status, msg) = self._inline_tcl_xfer(source_config=source_config,
dest_file=dest_file,
file_system=file_system)
else:
# Use SCP
tmp_file = self._create_tmp_file(source_config)
(return_status, msg) = self._scp_file(source_file=tmp_file, dest_file=dest_file,
file_system=file_system)
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file)
if source_file:
if self.inline_transfer:
(return_status, msg) = self._inline_tcl_xfer(source_file=source_file,
dest_file=dest_file,
file_system=file_system)
else:
(return_status, msg) = self._scp_file(source_file=source_file, dest_file=dest_file,
file_system=file_system)
if not return_status:
if msg == '':
msg = "Transfer to remote device failed"
return (return_status, msg) | Transfer file to remote device for either merge or replace operations
Returns (return_status, msg) |
def add_info(self, entry):
"""Parse and store the info field"""
entry = entry[8:-1]
info = entry.split(',')
if len(info) < 4:
return False
for v in info:
key, value = v.split('=', 1)
if key == 'ID':
self.info[value] = {}
id_ = value
elif key == 'Number':
if value == 'A' or value == 'G':
value = -1
self.info[id_]['num_entries'] = value
elif key == 'Type':
self.info[id_]['type'] = self.type_map[value]
elif key == 'Description':
self.info[id_]['description'] = value
if len(info) > 4:
self.info[id_]['description'] += '; '.join(info[4:])
break
return True | Parse and store the info field |
def parse_gtf(
filepath_or_buffer,
chunksize=1024 * 1024,
features=None,
intern_columns=["seqname", "source", "strand", "frame"],
fix_quotes_columns=["attribute"]):
"""
Parameters
----------
filepath_or_buffer : str or buffer object
chunksize : int
features : set or None
Drop entries which aren't one of these features
intern_columns : list
These columns are short strings which should be interned
fix_quotes_columns : list
Most commonly the 'attribute' column which had broken quotes on
some Ensembl release GTF files.
"""
if features is not None:
features = set(features)
dataframes = []
def parse_frame(s):
if s == ".":
return 0
else:
return int(s)
# GTF columns:
# 1) seqname: str ("1", "X", "chrX", etc...)
# 2) source : str
# Different versions of GTF use second column as of:
# (a) gene biotype
# (b) transcript biotype
# (c) the annotation source
# See: https://www.biostars.org/p/120306/#120321
# 3) feature : str ("gene", "transcript", &c)
# 4) start : int
# 5) end : int
# 6) score : float or "."
# 7) strand : "+", "-", or "."
# 8) frame : 0, 1, 2 or "."
# 9) attribute : key-value pairs separated by semicolons
# (see more complete description in docstring at top of file)
chunk_iterator = pd.read_csv(
filepath_or_buffer,
sep="\t",
comment="#",
names=REQUIRED_COLUMNS,
skipinitialspace=True,
skip_blank_lines=True,
error_bad_lines=True,
warn_bad_lines=True,
chunksize=chunksize,
engine="c",
dtype={
"start": np.int64,
"end": np.int64,
"score": np.float32,
"seqname": str,
},
na_values=".",
converters={"frame": parse_frame})
dataframes = []
try:
for df in chunk_iterator:
for intern_column in intern_columns:
df[intern_column] = [intern(str(s)) for s in df[intern_column]]
# compare feature strings after interning
if features is not None:
df = df[df["feature"].isin(features)]
for fix_quotes_column in fix_quotes_columns:
# Catch mistaken semicolons by replacing "xyz;" with "xyz"
# Required to do this since the Ensembl GTF for Ensembl
# release 78 has mistakes such as:
# gene_name = "PRAMEF6;" transcript_name = "PRAMEF6;-201"
df[fix_quotes_column] = [
s.replace(';\"', '\"').replace(";-", "-")
for s in df[fix_quotes_column]
]
dataframes.append(df)
except Exception as e:
raise ParsingError(str(e))
df = pd.concat(dataframes)
return df | Parameters
----------
filepath_or_buffer : str or buffer object
chunksize : int
features : set or None
Drop entries which aren't one of these features
intern_columns : list
These columns are short strings which should be interned
fix_quotes_columns : list
Most commonly the 'attribute' column which had broken quotes on
some Ensembl release GTF files. |
def remove_not_allowed_chars(savepath):
"""
Removes invalid filepath characters from the savepath.
:param str savepath: the savepath to work on
:return str: the savepath without invalid filepath characters
"""
split_savepath = os.path.splitdrive(savepath)
# https://msdn.microsoft.com/en-us/library/aa365247.aspx
savepath_without_invalid_chars = re.sub(r'<|>|:|\"|\||\?|\*', '_',
split_savepath[1])
return split_savepath[0] + savepath_without_invalid_chars | Removes invalid filepath characters from the savepath.
:param str savepath: the savepath to work on
:return str: the savepath without invalid filepath characters |
def _get_scsi_controller_key(bus_number, scsi_ctrls):
'''
Returns key number of the SCSI controller keys
bus_number
Controller bus number from the adapter
scsi_ctrls
List of SCSI Controller objects (old+newly created)
'''
# list of new/old VirtualSCSIController objects, both new and old objects
# should contain a key attribute key should be a negative integer in case
# of a new object
keys = [ctrl.key for ctrl in scsi_ctrls if
scsi_ctrls and ctrl.busNumber == bus_number]
if not keys:
raise salt.exceptions.VMwareVmCreationError(
'SCSI controller number {0} doesn\'t exist'.format(bus_number))
return keys[0] | Returns key number of the SCSI controller keys
bus_number
Controller bus number from the adapter
scsi_ctrls
List of SCSI Controller objects (old+newly created) |
def quantize(image, bits_per_channel=None):
'''Reduces the number of bits per channel in the given image.'''
if bits_per_channel is None:
bits_per_channel = 6
assert image.dtype == np.uint8
shift = 8-bits_per_channel
halfbin = (1 << shift) >> 1
return ((image.astype(int) >> shift) << shift) + halfbin | Reduces the number of bits per channel in the given image. |
def peek(init, exposes, debug=False):
"""
Default deserializer factory.
Arguments:
init (callable): type constructor.
exposes (iterable): attributes to be peeked and passed to `init`.
Returns:
callable: deserializer (`peek` routine).
"""
def _peek(store, container, _stack=None):
args = [ store.peek(objname, container, _stack=_stack) \
for objname in exposes ]
if debug:
print(args)
return init(*args)
return _peek | Default deserializer factory.
Arguments:
init (callable): type constructor.
exposes (iterable): attributes to be peeked and passed to `init`.
Returns:
callable: deserializer (`peek` routine). |
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy):
"""Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
objective bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - a
``ActivityAdminSession``
raise: NotFound - ``objective_bank_id`` not found
raise: NullArgument - ``objective_bank_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_activity_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_activity_admin():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.ActivityAdminSession(objective_bank_id, proxy, self._runtime) | Gets the ``OsidSession`` associated with the activity admin service for the given objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
objective bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - a
``ActivityAdminSession``
raise: NotFound - ``objective_bank_id`` not found
raise: NullArgument - ``objective_bank_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_activity_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` and
``supports_visible_federation()`` are ``true``.* |
def pickle_dumps(inbox):
"""
Serializes the first element of the input using the pickle protocol using
the fastes binary protocol.
"""
# http://bugs.python.org/issue4074
gc.disable()
str_ = cPickle.dumps(inbox[0], cPickle.HIGHEST_PROTOCOL)
gc.enable()
return str_ | Serializes the first element of the input using the pickle protocol using
the fastes binary protocol. |
def validate_seal(cls, header: BlockHeader) -> None:
"""
Validate the seal on the given header.
"""
check_pow(
header.block_number, header.mining_hash,
header.mix_hash, header.nonce, header.difficulty) | Validate the seal on the given header. |
def extract_images(bs4, lazy_image_attribute=None):
"""If lazy attribute is supplied, find image url on that attribute
:param bs4:
:param lazy_image_attribute:
:return:
"""
# get images form 'img' tags
if lazy_image_attribute:
images = [image[lazy_image_attribute] for image in bs4.select('img') if image.has_attr(lazy_image_attribute)]
else:
images = [image['src'] for image in bs4.select('img') if image.has_attr('src')]
# get images from detected links
image_links = [link for link in extract_links(bs4) if link.endswith(('.jpg', '.JPG', '.png', '.PNG', '.gif', '.GIF'))]
# get images from meta content
image_metas = [meta['content'] for meta in extract_metas(bs4)
if 'content' in meta
if meta['content'].endswith(('.jpg', '.JPG', '.png', '.PNG', '.gif', '.GIF'))]
return list(set(images + image_links + image_metas)) | If lazy attribute is supplied, find image url on that attribute
:param bs4:
:param lazy_image_attribute:
:return: |
def assert_equal(first, second, msg_fmt="{msg}"):
"""Fail unless first equals second, as determined by the '==' operator.
>>> assert_equal(5, 5.0)
>>> assert_equal("Hello World!", "Goodbye!")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' != 'Goodbye!'
The following msg_fmt arguments are supported:
* msg - the default error message
* first - the first argument
* second - the second argument
"""
if isinstance(first, dict) and isinstance(second, dict):
assert_dict_equal(first, second, msg_fmt)
elif not first == second:
msg = "{!r} != {!r}".format(first, second)
fail(msg_fmt.format(msg=msg, first=first, second=second)) | Fail unless first equals second, as determined by the '==' operator.
>>> assert_equal(5, 5.0)
>>> assert_equal("Hello World!", "Goodbye!")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' != 'Goodbye!'
The following msg_fmt arguments are supported:
* msg - the default error message
* first - the first argument
* second - the second argument |
def cdssequencethreads(self):
"""
Extracts the sequence of each gene for each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cdssequence, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Initialise a dictionary to store the sequence of each core gene
sample[self.analysistype].coresequence = dict()
self.sequencequeue.put(sample)
self.sequencequeue.join() | Extracts the sequence of each gene for each strain |
def get_stats_summary(start=None, end=None, **kwargs):
"""
Stats Historical Summary
Reference: https://iexcloud.io/docs/api/#stats-historical-summary
Data Weighting: ``Free``
Parameters
----------
start: datetime.datetime, default None, optional
Start of data retrieval period
end: datetime.datetime, default None, optional
End of data retrieval period
kwargs:
Additional Request Parameters (see base class)
"""
return MonthlySummaryReader(start=start, end=end, **kwargs).fetch() | Stats Historical Summary
Reference: https://iexcloud.io/docs/api/#stats-historical-summary
Data Weighting: ``Free``
Parameters
----------
start: datetime.datetime, default None, optional
Start of data retrieval period
end: datetime.datetime, default None, optional
End of data retrieval period
kwargs:
Additional Request Parameters (see base class) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.