code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def killCells(i, options, tm):
"""
Kill cells as appropriate
"""
# Kill cells if called for
if options.simulation == "killer":
if i == options.switchover:
print "i=",i,"Killing cells for the first time!"
tm.killCells(percent = options.noise)
if i == options.secondKill:
print "i=",i,"Killing cells again up to",options.secondNoise
tm.killCells(percent = options.secondNoise)
elif options.simulation == "killingMeSoftly" and (i%100 == 0):
steps = (options.secondKill - options.switchover)/100
nsteps = (options.secondNoise - options.noise)/steps
noise = options.noise + nsteps*(i-options.switchover)/100
if i in xrange(options.switchover, options.secondKill+1):
print "i=",i,"Killing cells!"
tm.killCells(percent = noise) | Kill cells as appropriate |
def query_by_account(self, account_id, end_time=None, start_time=None):
"""
Query by account.
List authentication events for a given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - start_time
"""The beginning of the time range from which you want events."""
if start_time is not None:
params["start_time"] = start_time
# OPTIONAL - end_time
"""The end of the time range from which you want events."""
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/authentication/accounts/{account_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/authentication/accounts/{account_id}".format(**path), data=data, params=params, no_data=True) | Query by account.
List authentication events for a given account. |
def psetex(self, key, milliseconds, value):
""":meth:`~tredis.RedisClient.psetex` works exactly like
:meth:`~tredis.RedisClient.psetex` with the sole difference that the
expire time is specified in milliseconds instead of seconds.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to set
:type key: :class:`str`, :class:`bytes`
:param int milliseconds: Number of milliseconds for TTL
:param value: The value to set
:type value: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError`
"""
return self._execute(
[b'PSETEX', key, ascii(milliseconds), value], b'OK') | :meth:`~tredis.RedisClient.psetex` works exactly like
:meth:`~tredis.RedisClient.psetex` with the sole difference that the
expire time is specified in milliseconds instead of seconds.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(1)``
:param key: The key to set
:type key: :class:`str`, :class:`bytes`
:param int milliseconds: Number of milliseconds for TTL
:param value: The value to set
:type value: :class:`str`, :class:`bytes`
:rtype: bool
:raises: :exc:`~tredis.exceptions.RedisError` |
def bdecode(text):
"""Decodes a bencoded bytearray and returns it as a python object"""
text = text.decode('utf-8')
def bdecode_next(start):
"""bdecode helper function"""
if text[start] == 'i':
end = text.find('e', start)
return int(text[start+1:end], 10), end + 1
if text[start] == 'l':
res = []
start += 1
while text[start] != 'e':
elem, start = bdecode_next(start)
res.append(elem)
return res, start + 1
if text[start] == 'd':
res = {}
start += 1
while text[start] != 'e':
key, start = bdecode_next(start)
value, start = bdecode_next(start)
res[key] = value
return res, start + 1
lenend = text.find(':', start)
length = int(text[start:lenend], 10)
end = lenend + length + 1
return text[lenend+1:end], end
return bdecode_next(0)[0] | Decodes a bencoded bytearray and returns it as a python object |
def published_tracks(self):
"""
Access the published_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
"""
if self._published_tracks is None:
self._published_tracks = PublishedTrackList(
self._version,
room_sid=self._solution['room_sid'],
participant_sid=self._solution['sid'],
)
return self._published_tracks | Access the published_tracks
:returns: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList
:rtype: twilio.rest.video.v1.room.room_participant.room_participant_published_track.PublishedTrackList |
def values(self, *fields):
"""
Ask the collection to return a list of dict of given fields for each
instance found in the collection.
If no fields are given, all "simple value" fields are used.
"""
if not fields:
fields = self._get_simple_fields()
fields = self._coerce_fields_parameters(fields)
self._instances = False
self._values = {'fields': fields, 'mode': 'dicts'}
return self | Ask the collection to return a list of dict of given fields for each
instance found in the collection.
If no fields are given, all "simple value" fields are used. |
def track(self, event_key, user_id, attributes=None, event_tags=None):
""" Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event.
"""
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))
return
if not validator.is_non_empty_string(event_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))
return
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return
if not self._validate_user_inputs(attributes, event_tags):
return
event = self.config.get_event(event_key)
if not event:
self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key))
return
conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)
self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id))
self.logger.debug('Dispatching conversion event to URL %s with params %s.' % (
conversion_event.url,
conversion_event.params
))
try:
self.event_dispatcher.dispatch_event(conversion_event)
except:
self.logger.exception('Unable to dispatch conversion event!')
self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id,
attributes, event_tags, conversion_event) | Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event. |
def constraint_from_choices(cls, value_type: type, choices: collections.Sequence):
"""
Returns a constraint callable based on choices of a given type
"""
choices_str = ', '.join(map(str, choices))
def constraint(value):
value = value_type(value)
if value not in choices:
raise ParameterError('Argument must be one of %s' % choices_str)
return value
constraint.__name__ = 'choices_%s' % value_type.__name__
constraint.__doc__ = 'choice of %s' % choices_str
return constraint | Returns a constraint callable based on choices of a given type |
def validate(self):
"""
Apply the `is_valid` methods to self and possibly raise a ValueError.
"""
# it is important to have the validator applied in a fixed order
valids = [getattr(self, valid)
for valid in sorted(dir(self.__class__))
if valid.startswith('is_valid_')]
for is_valid in valids:
if not is_valid():
docstring = '\n'.join(
line.strip() for line in is_valid.__doc__.splitlines())
doc = docstring.format(**vars(self))
raise ValueError(doc) | Apply the `is_valid` methods to self and possibly raise a ValueError. |
def _set_random_detect(self, v, load=False):
"""
Setter method for random_detect, mapped from YANG variable /interface/ethernet/qos/random_detect (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_random_detect is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_random_detect() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=random_detect.random_detect, is_container='container', presence=False, yang_name="random-detect", rest_name="random-detect", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Random Early Detect (RED) Profile', u'callpoint': u'cos_profile_te', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """random_detect must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=random_detect.random_detect, is_container='container', presence=False, yang_name="random-detect", rest_name="random-detect", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Random Early Detect (RED) Profile', u'callpoint': u'cos_profile_te', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)""",
})
self.__random_detect = t
if hasattr(self, '_set'):
self._set() | Setter method for random_detect, mapped from YANG variable /interface/ethernet/qos/random_detect (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_random_detect is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_random_detect() directly. |
def _IncrementNestLevel():
"""Increments the per thread nest level of imports."""
# This is the top call to import (no nesting), init the per-thread nest level
# and names set.
if getattr(_import_local, 'nest_level', None) is None:
_import_local.nest_level = 0
if _import_local.nest_level == 0:
# Re-initialize names set at each top-level import to prevent any
# accidental unforeseen memory leak.
_import_local.names = set()
_import_local.nest_level += 1 | Increments the per thread nest level of imports. |
def get_points(self, measurement=None, tags=None):
"""Return a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator
"""
# Raise error if measurement is not str or bytes
if not isinstance(measurement,
(bytes, type(b''.decode()), type(None))):
raise TypeError('measurement must be an str or None')
for series in self._get_series():
series_name = series.get('measurement',
series.get('name', 'results'))
if series_name is None:
# this is a "system" query or a query which
# doesn't return a name attribute.
# like 'show retention policies' ..
if tags is None:
for item in self._get_points_for_series(series):
yield item
elif measurement in (None, series_name):
# by default if no tags was provided then
# we will matches every returned series
series_tags = series.get('tags', {})
for item in self._get_points_for_series(series):
if tags is None or \
self._tag_matches(item, tags) or \
self._tag_matches(series_tags, tags):
yield item | Return a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator |
def check_partition_column(partition_column, cols):
""" Check partition_column existence and type
Args:
partition_column: partition_column name
cols: dict with columns names and python types
Returns:
None
"""
for k, v in cols.items():
if k == partition_column:
if v == "int":
return
else:
raise InvalidPartitionColumn(
"partition_column must be int, and not {0}".format(v)
)
raise InvalidPartitionColumn(
"partition_column {0} not found in the query".format(partition_column)
) | Check partition_column existence and type
Args:
partition_column: partition_column name
cols: dict with columns names and python types
Returns:
None |
def add_domain_name(list_name, item_name):
'''
Adds a domain name to a domain name list.
list_name(str): The name of the specific policy domain name list to append to.
item_name(str): The domain name to append.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_domain_name MyDomainName foo.bar.com
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_domain_names",
"params": [list_name, {"item_name": item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) | Adds a domain name to a domain name list.
list_name(str): The name of the specific policy domain name list to append to.
item_name(str): The domain name to append.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_domain_name MyDomainName foo.bar.com |
def list_sebool():
'''
Return a structure listing all of the selinux booleans on the system and
what state they are in
CLI Example:
.. code-block:: bash
salt '*' selinux.list_sebool
'''
bdata = __salt__['cmd.run']('semanage boolean -l').splitlines()
ret = {}
for line in bdata[1:]:
if not line.strip():
continue
comps = line.split()
ret[comps[0]] = {'State': comps[1][1:],
'Default': comps[3][:-1],
'Description': ' '.join(comps[4:])}
return ret | Return a structure listing all of the selinux booleans on the system and
what state they are in
CLI Example:
.. code-block:: bash
salt '*' selinux.list_sebool |
def set_ortho_choice(self, small_asset_data, large_asset_data, name='Choice'):
"""stub"""
o3d_asset_id = self.create_o3d_asset(manip=None,
small_ov_set=small_asset_data,
large_ov_set=large_asset_data,
display_name=name)
self.add_choice(o3d_asset_id, name=name) | stub |
def load(self, label):
""" Load obj with give label from hidden state directory """
objloc = '{0}/{1}'.format(self.statedir, label)
try:
obj = pickle.load(open(objloc, 'r'))
except (KeyError, IndexError, EOFError):
obj = open(objloc, 'r').read()
try:
obj = float(obj)
except ValueError:
pass
except IOError:
obj = None
return obj | Load obj with give label from hidden state directory |
def get_unit_hostnames(self, units):
"""Return a dict of juju unit names to hostnames."""
host_names = {}
for unit in units:
host_names[unit.info['unit_name']] = \
str(unit.file_contents('/etc/hostname').strip())
self.log.debug('Unit host names: {}'.format(host_names))
return host_names | Return a dict of juju unit names to hostnames. |
def direction(theta, phi):
'''Return the direction vector of a cylinder defined
by the spherical coordinates theta and phi.
'''
return np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta),
np.cos(theta)]) | Return the direction vector of a cylinder defined
by the spherical coordinates theta and phi. |
def iterDiffs(self):
""" Return all diffs used in optimal network. """
nodes = self.nodes.values()
nodes.sort(key=lambda node: self._height(node))
for node in nodes:
yield node.diff | Return all diffs used in optimal network. |
def list_group_categories_for_context_courses(self, course_id):
"""
List group categories for a context.
Returns a list of group categories in a context
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
self.logger.debug("GET /api/v1/courses/{course_id}/group_categories with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/group_categories".format(**path), data=data, params=params, all_pages=True) | List group categories for a context.
Returns a list of group categories in a context |
def handle_request(self):
"""simply collect requests and put them on the queue for the workers."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address)) | simply collect requests and put them on the queue for the workers. |
def parent(self, parent):
"""
Sets the parent of the actor.
:param parent: the parent
:type parent: Actor
"""
self._name = self.unique_name(self._name)
self._full_name = None
self._logger = None
self._parent = parent | Sets the parent of the actor.
:param parent: the parent
:type parent: Actor |
def euclidean_distance_square(point1, point2):
"""!
@brief Calculate square Euclidean distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}(a_{i} - b_{i})^{2};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Square Euclidean distance between two vectors.
@see euclidean_distance, manhattan_distance, chebyshev_distance
"""
distance = 0.0
for i in range(len(point1)):
distance += (point1[i] - point2[i]) ** 2.0
return distance | !
@brief Calculate square Euclidean distance between two vectors.
\f[
dist(a, b) = \sum_{i=0}^{N}(a_{i} - b_{i})^{2};
\f]
@param[in] point1 (array_like): The first vector.
@param[in] point2 (array_like): The second vector.
@return (double) Square Euclidean distance between two vectors.
@see euclidean_distance, manhattan_distance, chebyshev_distance |
def writeRaw8(self, value):
"""Write an 8-bit value on the bus (without register)."""
value = value & 0xFF
self._bus.write_byte(self._address, value)
self._logger.debug("Wrote 0x%02X",
value) | Write an 8-bit value on the bus (without register). |
def movies_directed_by(self, director):
"""Return list of movies that were directed by certain person.
:param director: Director's name
:type director: str
:rtype: list[movies.models.Movie]
:return: List of movie instances.
"""
return [movie for movie in self._movie_finder.find_all()
if movie.director == director] | Return list of movies that were directed by certain person.
:param director: Director's name
:type director: str
:rtype: list[movies.models.Movie]
:return: List of movie instances. |
def cert_info(cert, digest='sha256'):
'''
Return information for a particular certificate
cert
path to the certifiate PEM file or string
.. versionchanged:: 2018.3.4
digest
what digest to use for fingerprinting
CLI Example:
.. code-block:: bash
salt '*' tls.cert_info /dir/for/certs/cert.pem
'''
# format that OpenSSL returns dates in
date_fmt = '%Y%m%d%H%M%SZ'
if '-----BEGIN' not in cert:
with salt.utils.files.fopen(cert) as cert_file:
cert = cert_file.read()
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
cert
)
issuer = {}
for key, value in cert.get_issuer().get_components():
if isinstance(key, bytes):
key = salt.utils.stringutils.to_unicode(key)
if isinstance(value, bytes):
value = salt.utils.stringutils.to_unicode(value)
issuer[key] = value
subject = {}
for key, value in cert.get_subject().get_components():
if isinstance(key, bytes):
key = salt.utils.stringutils.to_unicode(key)
if isinstance(value, bytes):
value = salt.utils.stringutils.to_unicode(value)
subject[key] = value
ret = {
'fingerprint': salt.utils.stringutils.to_unicode(
cert.digest(salt.utils.stringutils.to_str(digest))
),
'subject': subject,
'issuer': issuer,
'serial_number': cert.get_serial_number(),
'not_before': calendar.timegm(time.strptime(
str(cert.get_notBefore().decode(__salt_system_encoding__)),
date_fmt)),
'not_after': calendar.timegm(time.strptime(
cert.get_notAfter().decode(__salt_system_encoding__),
date_fmt)),
}
# add additional info if your version of pyOpenSSL supports it
if hasattr(cert, 'get_extension_count'):
ret['extensions'] = {}
for i in _range(cert.get_extension_count()):
try:
ext = cert.get_extension(i)
key = salt.utils.stringutils.to_unicode(ext.get_short_name())
ret['extensions'][key] = str(ext).strip()
except AttributeError:
continue
if 'subjectAltName' in ret.get('extensions', {}):
valid_entries = ('DNS', 'IP Address')
valid_names = set()
for name in str(ret['extensions']['subjectAltName']).split(', '):
entry, name = name.split(':', 1)
if entry not in valid_entries:
log.error('Cert %s has an entry (%s) which does not start '
'with %s', ret['subject'], name, '/'.join(valid_entries))
else:
valid_names.add(name)
ret['subject_alt_names'] = list(valid_names)
if hasattr(cert, 'get_signature_algorithm'):
try:
value = cert.get_signature_algorithm()
if isinstance(value, bytes):
value = salt.utils.stringutils.to_unicode(value)
ret['signature_algorithm'] = value
except AttributeError:
# On py3 at least
# AttributeError: cdata 'X509 *' points to an opaque type: cannot read fields
pass
return ret | Return information for a particular certificate
cert
path to the certifiate PEM file or string
.. versionchanged:: 2018.3.4
digest
what digest to use for fingerprinting
CLI Example:
.. code-block:: bash
salt '*' tls.cert_info /dir/for/certs/cert.pem |
def _move_here(self):
"""Move the cursor to this item."""
cu = self.scraper.current_item
# Already here?
if self is cu:
return
# A child?
if cu.items and self in cu.items:
self.scraper.move_to(self)
return
# A parent?
if self is cu.parent:
self.scraper.move_up()
# A sibling?
if self.parent and self in self.parent.items:
self.scraper.move_up()
self.scraper.move_to(self)
return
# Last resort: Move to top and all the way down again
self.scraper.move_to_top()
for step in self.path:
self.scraper.move_to(step) | Move the cursor to this item. |
def get_ip_info(ip: str, exceptions: bool=False, timeout: int=10) -> tuple:
"""
Returns (ip, country_code, host) tuple of the IP address.
:param ip: IP address
:param exceptions: Raise Exception or not
:param timeout: Timeout in seconds. Note that timeout only affects geo IP part, not getting host name.
:return: (ip, country_code, host)
"""
import traceback
import socket
if not ip: # localhost
return None, '', ''
host = ''
country_code = get_geo_ip(ip, exceptions=exceptions, timeout=timeout).get('country_code', '')
try:
res = socket.gethostbyaddr(ip)
host = res[0][:255] if ip else ''
except Exception as e:
msg = 'socket.gethostbyaddr({}) failed: {}'.format(ip, traceback.format_exc())
logger.error(msg)
if exceptions:
raise e
return ip, country_code, host | Returns (ip, country_code, host) tuple of the IP address.
:param ip: IP address
:param exceptions: Raise Exception or not
:param timeout: Timeout in seconds. Note that timeout only affects geo IP part, not getting host name.
:return: (ip, country_code, host) |
def moist_lapse(pressure, temperature, ref_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `ref_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
ref_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return frac / p
if ref_pressure is None:
ref_pressure = pressure[0]
pressure = pressure.to('mbar')
ref_pressure = ref_pressure.to('mbar')
temperature = atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, ref_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if ref_pressure > pressure.min():
# Integrate downward in pressure
pres_down = np.append(ref_pressure, pressure[(ref_pres_idx - 1)::-1])
trace_down = si.odeint(dt, temperature.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if ref_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(ref_pressure, pressure[ref_pres_idx:])
trace_up = si.odeint(dt, temperature.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units) | r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `ref_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
ref_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_. |
def text_search(self, text, sort=None, offset=100, page=1):
"""
Search in aquarius using text query.
Given the string aquarius will do a full-text query to search in all documents.
Currently implemented are the MongoDB and Elastic Search drivers.
For a detailed guide on how to search, see the MongoDB driver documentation:
mongodb driverCurrently implemented in:
https://docs.mongodb.com/manual/reference/operator/query/text/
And the Elastic Search documentation:
https://www.elastic.co/guide/en/elasticsearch/guide/current/full-text-search.html
Other drivers are possible according to each implementation.
:param text: String to be search.
:param sort: 1/-1 to sort ascending or descending.
:param offset: Integer with the number of elements displayed per page.
:param page: Integer with the number of page.
:return: List of DDO instance
"""
assert page >= 1, f'Invalid page value {page}. Required page >= 1.'
payload = {"text": text, "sort": sort, "offset": offset, "page": page}
response = self.requests_session.get(
f'{self.url}/query',
params=payload,
headers=self._headers
)
if response.status_code == 200:
return self._parse_search_response(response.content)
else:
raise Exception(f'Unable to search for DDO: {response.content}') | Search in aquarius using text query.
Given the string aquarius will do a full-text query to search in all documents.
Currently implemented are the MongoDB and Elastic Search drivers.
For a detailed guide on how to search, see the MongoDB driver documentation:
mongodb driverCurrently implemented in:
https://docs.mongodb.com/manual/reference/operator/query/text/
And the Elastic Search documentation:
https://www.elastic.co/guide/en/elasticsearch/guide/current/full-text-search.html
Other drivers are possible according to each implementation.
:param text: String to be search.
:param sort: 1/-1 to sort ascending or descending.
:param offset: Integer with the number of elements displayed per page.
:param page: Integer with the number of page.
:return: List of DDO instance |
def on_line(client, line):
"""Default handling for incoming lines.
This handler will automatically manage the following IRC messages:
PING:
Responds with a PONG.
PRIVMSG:
Dispatches the PRIVMSG event.
NOTICE:
Dispatches the NOTICE event.
MOTDSTART:
Initializes MOTD receive buffer.
MOTD:
Appends a line to the MOTD receive buffer.
ENDOFMOTD:
Joins the contents of the MOTD receive buffer, assigns the result
to the .motd of the server, and dispatches the MOTD event.
"""
if line.startswith("PING"):
client.send("PONG" + line[4:])
return True
if line.startswith(":"):
actor, _, line = line[1:].partition(" ")
else:
actor = None
command, _, args = line.partition(" ")
command = NUMERIC_EVENTS.get(command, command)
parser = PARSERS.get(command, False)
if parser:
parser(client, command, actor, args)
return True
elif parser is False:
# Explicitly ignored message
return True | Default handling for incoming lines.
This handler will automatically manage the following IRC messages:
PING:
Responds with a PONG.
PRIVMSG:
Dispatches the PRIVMSG event.
NOTICE:
Dispatches the NOTICE event.
MOTDSTART:
Initializes MOTD receive buffer.
MOTD:
Appends a line to the MOTD receive buffer.
ENDOFMOTD:
Joins the contents of the MOTD receive buffer, assigns the result
to the .motd of the server, and dispatches the MOTD event. |
def disconnect_channel(self, destination_id):
""" Disconnect a channel with destination_id. """
if destination_id in self._open_channels:
try:
self.send_message(
destination_id, NS_CONNECTION,
{MESSAGE_TYPE: TYPE_CLOSE, 'origin': {}},
no_add_request_id=True, force=True)
except NotConnected:
pass
except Exception: # pylint: disable=broad-except
self.logger.exception("[%s:%s] Exception",
self.fn or self.host, self.port)
self._open_channels.remove(destination_id)
self.handle_channel_disconnected() | Disconnect a channel with destination_id. |
def GetRealPath(filename):
"""Given an executable filename, find in the PATH or find absolute path.
Args:
filename An executable filename (string)
Returns:
Absolute version of filename.
None if filename could not be found locally, absolutely, or in PATH
"""
if os.path.isabs(filename): # already absolute
return filename
if filename.startswith('./') or filename.startswith('../'): # relative
return os.path.abspath(filename)
path = os.getenv('PATH', '')
for directory in path.split(':'):
tryname = os.path.join(directory, filename)
if os.path.exists(tryname):
if not os.path.isabs(directory): # relative directory
return os.path.abspath(tryname)
return tryname
if os.path.exists(filename):
return os.path.abspath(filename)
return None | Given an executable filename, find in the PATH or find absolute path.
Args:
filename An executable filename (string)
Returns:
Absolute version of filename.
None if filename could not be found locally, absolutely, or in PATH |
def forget_canvas(canvas):
""" Forget about the given canvas. Used by the canvas when closed.
"""
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] | Forget about the given canvas. Used by the canvas when closed. |
def save(self, filename, fformat=None, fill_value=None, compute=True,
keep_palette=False, cmap=None, **format_kwargs):
"""Save the image to the given *filename*.
Args:
filename (str): Output filename
fformat (str): File format of output file (optional). Can be
one of many image formats supported by the
`rasterio` or `PIL` libraries ('jpg', 'png',
'tif'). By default this is determined by the
extension of the provided filename.
If the format allows, geographical information will
be saved to the ouput file, in the form of grid
mapping or ground control points.
fill_value (float): Replace invalid data values with this value
and do not produce an Alpha band. Default
behavior is to create an alpha band.
compute (bool): If True (default) write the data to the file
immediately. If False the return value is either
a `dask.Delayed` object or a tuple of
``(source, target)`` to be passed to
`dask.array.store`.
keep_palette (bool): Saves the palettized version of the image if
set to True. False by default.
cmap (Colormap or dict): Colormap to be applied to the image when
saving with rasterio, used with
keep_palette=True. Should be uint8.
format_kwargs: Additional format options to pass to `rasterio`
or `PIL` saving methods.
Returns:
Either `None` if `compute` is True or a `dask.Delayed` object or
``(source, target)`` pair to be passed to `dask.array.store`.
If compute is False the return value depends on format and how
the image backend is used. If ``(source, target)`` is provided
then target is an open file-like object that must be closed by
the caller.
"""
fformat = fformat or os.path.splitext(filename)[1][1:4]
if fformat in ('tif', 'jp2') and rasterio:
return self.rio_save(filename, fformat=fformat,
fill_value=fill_value, compute=compute,
keep_palette=keep_palette, cmap=cmap,
**format_kwargs)
else:
return self.pil_save(filename, fformat, fill_value,
compute=compute, **format_kwargs) | Save the image to the given *filename*.
Args:
filename (str): Output filename
fformat (str): File format of output file (optional). Can be
one of many image formats supported by the
`rasterio` or `PIL` libraries ('jpg', 'png',
'tif'). By default this is determined by the
extension of the provided filename.
If the format allows, geographical information will
be saved to the ouput file, in the form of grid
mapping or ground control points.
fill_value (float): Replace invalid data values with this value
and do not produce an Alpha band. Default
behavior is to create an alpha band.
compute (bool): If True (default) write the data to the file
immediately. If False the return value is either
a `dask.Delayed` object or a tuple of
``(source, target)`` to be passed to
`dask.array.store`.
keep_palette (bool): Saves the palettized version of the image if
set to True. False by default.
cmap (Colormap or dict): Colormap to be applied to the image when
saving with rasterio, used with
keep_palette=True. Should be uint8.
format_kwargs: Additional format options to pass to `rasterio`
or `PIL` saving methods.
Returns:
Either `None` if `compute` is True or a `dask.Delayed` object or
``(source, target)`` pair to be passed to `dask.array.store`.
If compute is False the return value depends on format and how
the image backend is used. If ``(source, target)`` is provided
then target is an open file-like object that must be closed by
the caller. |
def _get_satisfiability_smt_script(self, constraints=(), variables=()):
"""
Returns a SMT script that declare all the symbols and constraint and checks
their satisfiability (check-sat)
:param extra-constraints: list of extra constraints that we want to evaluate only
in the scope of this call
:return string: smt-lib representation of the script that checks the satisfiability
"""
smt_script = '(set-logic ALL)\n'
smt_script += self._smtlib_exprs(variables)
smt_script += self._smtlib_exprs(constraints)
smt_script += '(check-sat)\n'
return smt_script | Returns a SMT script that declare all the symbols and constraint and checks
their satisfiability (check-sat)
:param extra-constraints: list of extra constraints that we want to evaluate only
in the scope of this call
:return string: smt-lib representation of the script that checks the satisfiability |
def expand_path(path):
"""Returns ``path`` as an absolute path with ~user and env var expansion applied.
:API: public
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(path))) | Returns ``path`` as an absolute path with ~user and env var expansion applied.
:API: public |
def get_file(self, secure_data_path, version=None):
"""
Return a requests.structures.CaseInsensitiveDict object containing a file and the
metadata/header information around it.
The binary data of the file is under the key 'data'
"""
query = self._get_file(secure_data_path, version)
resp = query.headers.copy()
resp = self._parse_metadata_filename(resp)
resp['data'] = query.content
return resp | Return a requests.structures.CaseInsensitiveDict object containing a file and the
metadata/header information around it.
The binary data of the file is under the key 'data' |
def force_delete(self):
"""
Force a hard delete on a soft deleted model.
"""
self.__force_deleting__ = True
self.delete()
self.__force_deleting__ = False | Force a hard delete on a soft deleted model. |
def add_caveats(self, cavs, key, loc):
'''Add an array of caveats to the macaroon.
This method does not mutate the current object.
@param cavs arrary of caveats.
@param key the PublicKey to encrypt third party caveat.
@param loc locator to find the location object that has a method
third_party_info.
'''
if cavs is None:
return
for cav in cavs:
self.add_caveat(cav, key, loc) | Add an array of caveats to the macaroon.
This method does not mutate the current object.
@param cavs arrary of caveats.
@param key the PublicKey to encrypt third party caveat.
@param loc locator to find the location object that has a method
third_party_info. |
def run(self, inputs, **kwargs):
"""Run model inference and return the result
Parameters
----------
inputs : numpy array
input to run a layer on
Returns
-------
params : numpy array
result obtained after running the inference on mxnet
"""
input_data = np.asarray(inputs[0], dtype='f')
# create module, passing cpu context
if self.device == 'CPU':
ctx = mx.cpu()
else:
raise NotImplementedError("Only CPU context is supported for now")
mod = mx.mod.Module(symbol=self.symbol, data_names=['input_0'], context=ctx,
label_names=None)
mod.bind(for_training=False, data_shapes=[('input_0', input_data.shape)],
label_shapes=None)
mod.set_params(arg_params=self.params, aux_params=None)
# run inference
batch = namedtuple('Batch', ['data'])
mod.forward(batch([mx.nd.array(input_data)]))
result = mod.get_outputs()[0].asnumpy()
return [result] | Run model inference and return the result
Parameters
----------
inputs : numpy array
input to run a layer on
Returns
-------
params : numpy array
result obtained after running the inference on mxnet |
def get_method_name(method):
"""
Returns given method name.
:param method: Method to retrieve the name.
:type method: object
:return: Method name.
:rtype: unicode
"""
name = get_object_name(method)
if name.startswith("__") and not name.endswith("__"):
name = "_{0}{1}".format(get_object_name(method.im_class), name)
return name | Returns given method name.
:param method: Method to retrieve the name.
:type method: object
:return: Method name.
:rtype: unicode |
def setup_resource(self):
""" Setting Up Resource """
template = self.template
variables = self.get_variables()
tclass = variables['Class']
tprops = variables['Properties']
output = variables['Output']
klass = load_object_from_string('troposphere.' + tclass)
instance = klass.from_dict('ResourceRefName', tprops)
template.add_resource(instance)
template.add_output(Output(
output,
Description="A reference to the object created in this blueprint",
Value=Ref(instance)
)) | Setting Up Resource |
def is_readable(path):
"""
Returns if given path is readable.
:param path: Path to check access.
:type path: unicode
:return: Is path writable.
:rtype: bool
"""
if os.access(path, os.R_OK):
LOGGER.debug("> '{0}' path is readable.".format(path))
return True
else:
LOGGER.debug("> '{0}' path is not readable.".format(path))
return False | Returns if given path is readable.
:param path: Path to check access.
:type path: unicode
:return: Is path writable.
:rtype: bool |
def list(self):
"""List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
# "self" is iterable (see __iter__() method).
data = [{'name': version['name'].split()[-1],
'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']}
for version in self.get_iterator()]
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime'])) | List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment. |
def stat(self, follow_symlinks=True):
"""
Return a stat_result object for this entry.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
os.stat_result: Stat result object
"""
return self._system.stat(
path=self._path, client_kwargs=self._client_kwargs,
header=self._header) | Return a stat_result object for this entry.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
os.stat_result: Stat result object |
def xrefs_from(self):
"""Xrefs from the function.
This includes the xrefs from every line in the function, as `Xref` objects.
Xrefs are filtered to exclude code references that are internal to the function. This
means that every xrefs to the function's code will NOT be returned (yet, references
to the function's data will be returnd). To get those extra xrefs, you need to iterate
the function's lines yourself.
"""
for line in self.lines:
for xref in line.xrefs_from:
if xref.type.is_flow:
continue
if xref.to in self and xref.iscode:
continue
yield xref | Xrefs from the function.
This includes the xrefs from every line in the function, as `Xref` objects.
Xrefs are filtered to exclude code references that are internal to the function. This
means that every xrefs to the function's code will NOT be returned (yet, references
to the function's data will be returnd). To get those extra xrefs, you need to iterate
the function's lines yourself. |
def _computeStatus(self, dfile, service):
"""Computes status for file, basically this means if
more than one service handles the file, it will place
a 'C' (for complicated) otherwise if status matches
between all services, will place that status"""
# If only one service requested
if service:
if not dfile['services'].has_key(service):
return self.ST_UNTRACKED
else:
return dfile['services'][service]['status']
# Otherwise go through all services and compute
# a sensible status
first_service_key=dfile['services'].keys()[0]
# Save off one of the statuses so we can compute
# if they are all the same between services.
first_status=dfile['services'][first_service_key]['status']
all_status_match=True
# Return ST_COMPLICATED "C" if status
# differs
for service in dfile['services']:
if dfile['services'][service]['status']!=first_status:
return self.ST_COMPLICATED
return first_status | Computes status for file, basically this means if
more than one service handles the file, it will place
a 'C' (for complicated) otherwise if status matches
between all services, will place that status |
def on_finished(self):
"""Finished signal handler"""
self.controller.is_running = False
error = self.controller.current_error
if error is not None:
self.info(self.tr("Stopped due to error(s), see Terminal."))
else:
self.info(self.tr("Finished successfully!")) | Finished signal handler |
def info(name):
'''
Return information for the specified user
This is just returns dummy data so that salt states can work.
:param str name: The name of the user account to show.
CLI Example:
.. code-block:: bash
salt '*' shadow.info root
'''
info = __salt__['user.info'](name=name)
ret = {'name': name,
'passwd': '',
'lstchg': '',
'min': '',
'max': '',
'warn': '',
'inact': '',
'expire': ''}
if info:
ret = {'name': info['name'],
'passwd': 'Unavailable',
'lstchg': info['password_changed'],
'min': '',
'max': '',
'warn': '',
'inact': '',
'expire': info['expiration_date']}
return ret | Return information for the specified user
This is just returns dummy data so that salt states can work.
:param str name: The name of the user account to show.
CLI Example:
.. code-block:: bash
salt '*' shadow.info root |
def _modify(self, **patch):
'''Modify only draft or legacy policies
Published policies cannot be modified
:raises: OperationNotSupportedOnPublishedPolicy
'''
legacy = patch.pop('legacy', False)
tmos_ver = self._meta_data['bigip']._meta_data['tmos_version']
self._filter_version_specific_options(tmos_ver, **patch)
if 'Drafts' not in self._meta_data['uri'] and \
LooseVersion(tmos_ver) >= LooseVersion('12.1.0') and \
not legacy:
msg = 'Modify operation not allowed on a published policy.'
raise OperationNotSupportedOnPublishedPolicy(msg)
super(Policy, self)._modify(**patch) | Modify only draft or legacy policies
Published policies cannot be modified
:raises: OperationNotSupportedOnPublishedPolicy |
def rejoin_lines(nb):
"""rejoin multiline text into strings
For reversing effects of ``split_lines(nb)``.
This only rejoins lines that have been split, so if text objects were not split
they will pass through unchanged.
Used when reading JSON files that may have been passed through split_lines.
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
if 'input' in cell and isinstance(cell.input, list):
cell.input = _join_lines(cell.input)
for output in cell.outputs:
for key in _multiline_outputs:
item = output.get(key, None)
if isinstance(item, list):
output[key] = _join_lines(item)
else: # text, heading cell
for key in ['source', 'rendered']:
item = cell.get(key, None)
if isinstance(item, list):
cell[key] = _join_lines(item)
return nb | rejoin multiline text into strings
For reversing effects of ``split_lines(nb)``.
This only rejoins lines that have been split, so if text objects were not split
they will pass through unchanged.
Used when reading JSON files that may have been passed through split_lines. |
def _prepare_reserved_tokens(reserved_tokens):
"""Prepare reserved tokens and a regex for splitting them out of strings."""
reserved_tokens = [tf.compat.as_text(tok) for tok in reserved_tokens or []]
dups = _find_duplicates(reserved_tokens)
if dups:
raise ValueError("Duplicates found in tokens: %s" % dups)
reserved_tokens_re = _make_reserved_tokens_re(reserved_tokens)
return reserved_tokens, reserved_tokens_re | Prepare reserved tokens and a regex for splitting them out of strings. |
def update(self, iterable):
"""
Return a new PSet with elements in iterable added
>>> s1 = s(1, 2)
>>> s1.update([3, 4, 4])
pset([1, 2, 3, 4])
"""
e = self.evolver()
for element in iterable:
e.add(element)
return e.persistent() | Return a new PSet with elements in iterable added
>>> s1 = s(1, 2)
>>> s1.update([3, 4, 4])
pset([1, 2, 3, 4]) |
def curve_reduce_approx(curve, reduced):
"""Image for :meth:`.curve.Curve.reduce` docstring."""
if NO_IMAGES:
return
ax = curve.plot(256)
color = ax.lines[-1].get_color()
add_patch(ax, curve._nodes, color, alpha=0.25, node_color=color)
reduced.plot(256, ax=ax)
color = ax.lines[-1].get_color()
add_patch(ax, reduced._nodes, color, alpha=0.25, node_color=color)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
save_image(ax.figure, "curve_reduce_approx.png") | Image for :meth:`.curve.Curve.reduce` docstring. |
def create_summary_tear_sheet(factor_data,
long_short=True,
group_neutral=False):
"""
Creates a small summary tear sheet with returns, information, and turnover
analysis.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio? if so, then
mean quantile returns will be demeaned across the factor universe.
group_neutral : bool
Should this computation happen on a group neutral portfolio? if so,
returns demeaning will occur on the group level.
"""
# Returns Analysis
mean_quant_ret, std_quantile = \
perf.mean_return_by_quantile(factor_data,
by_group=False,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret = \
mean_quant_ret.apply(utils.rate_of_return, axis=0,
base_period=mean_quant_ret.columns[0])
mean_quant_ret_bydate, std_quant_daily = \
perf.mean_return_by_quantile(factor_data,
by_date=True,
by_group=False,
demeaned=long_short,
group_adjust=group_neutral)
mean_quant_rateret_bydate = mean_quant_ret_bydate.apply(
utils.rate_of_return,
axis=0,
base_period=mean_quant_ret_bydate.columns[0]
)
compstd_quant_daily = std_quant_daily.apply(
utils.std_conversion, axis=0,
base_period=std_quant_daily.columns[0]
)
alpha_beta = perf.factor_alpha_beta(factor_data,
demeaned=long_short,
group_adjust=group_neutral)
mean_ret_spread_quant, std_spread_quant = perf.compute_mean_returns_spread(
mean_quant_rateret_bydate,
factor_data['factor_quantile'].max(),
factor_data['factor_quantile'].min(),
std_err=compstd_quant_daily)
periods = utils.get_forward_returns_columns(factor_data.columns)
fr_cols = len(periods)
vertical_sections = 2 + fr_cols * 3
gf = GridFigure(rows=vertical_sections, cols=1)
plotting.plot_quantile_statistics_table(factor_data)
plotting.plot_returns_table(alpha_beta,
mean_quant_rateret,
mean_ret_spread_quant)
plotting.plot_quantile_returns_bar(mean_quant_rateret,
by_group=False,
ylim_percentiles=None,
ax=gf.next_row())
# Information Analysis
ic = perf.factor_information_coefficient(factor_data)
plotting.plot_information_table(ic)
# Turnover Analysis
quantile_factor = factor_data['factor_quantile']
quantile_turnover = \
{p: pd.concat([perf.quantile_turnover(quantile_factor, q, p)
for q in range(1, int(quantile_factor.max()) + 1)],
axis=1)
for p in periods}
autocorrelation = pd.concat(
[perf.factor_rank_autocorrelation(factor_data, period) for period in
periods], axis=1)
plotting.plot_turnover_table(autocorrelation, quantile_turnover)
plt.show()
gf.close() | Creates a small summary tear sheet with returns, information, and turnover
analysis.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
long_short : bool
Should this computation happen on a long short portfolio? if so, then
mean quantile returns will be demeaned across the factor universe.
group_neutral : bool
Should this computation happen on a group neutral portfolio? if so,
returns demeaning will occur on the group level. |
def add(self, name: str, pattern: str) -> None:
""" add url pattern for name
"""
self.patterns[name] = URITemplate(
pattern, converters=self.converters) | add url pattern for name |
def process_iter():
"""Return a generator yielding a Process class instance for all
running processes on the local machine.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
The sorting order in which processes are yielded is based on
their PIDs.
"""
def add(pid):
proc = Process(pid)
_pmap[proc.pid] = proc
return proc
def remove(pid):
_pmap.pop(pid, None)
a = set(get_pid_list())
b = set(_pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
for pid, proc in sorted(list(_pmap.items()) + \
list(dict.fromkeys(new_pids).items())):
try:
if proc is None: # new process
yield add(pid)
else:
# use is_running() to check whether PID has been reused by
# another process in which case yield a new Process instance
if proc.is_running():
yield proc
else:
yield add(pid)
except NoSuchProcess:
remove(pid)
except AccessDenied:
# Process creation time can't be determined hence there's
# no way to tell whether the pid of the cached process
# has been reused. Just return the cached version.
yield proc | Return a generator yielding a Process class instance for all
running processes on the local machine.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
The sorting order in which processes are yielded is based on
their PIDs. |
def render_template(self):
"""Render and save API doc in openapi.yml."""
self._parse_paths()
context = dict(napp=self._napp.__dict__, paths=self._paths)
self._save(context) | Render and save API doc in openapi.yml. |
def rpc_get_name_record(self, name, **con_info):
"""
Get the curernt state of a name or subdomain, excluding its history.
Return {'status': True, 'record': rec} on success
Return {'error': ...} on error
"""
res = None
if check_name(name):
res = self.get_name_record(name, include_expired=True, include_history=False)
elif check_subdomain(name):
res = self.get_subdomain_record(name, include_history=False)
else:
return {'error': 'Invalid name or subdomain', 'http_status': 400}
if 'error' in res:
return {'error': res['error'], 'http_status': 404}
# also get a DID
did_info = None
did = None
if check_name(name):
did_info = self.get_name_DID_info(name)
elif check_subdomain(name):
did_info = self.get_subdomain_DID_info(name)
else:
return {'error': 'Invalid name or subdomain', 'http_status': 400}
if did_info is not None:
did = make_DID(did_info['name_type'], did_info['address'], did_info['index'])
res['record']['did'] = did
return self.success_response({'record': res['record']}) | Get the curernt state of a name or subdomain, excluding its history.
Return {'status': True, 'record': rec} on success
Return {'error': ...} on error |
def dispatch(self, *args, **kwargs):
"""
Check that user signup is allowed before even bothering to
dispatch or do other processing.
"""
if not self.registration_allowed():
return HttpResponseRedirect(force_text(self.disallowed_url))
return super(RegistrationView, self).dispatch(*args, **kwargs) | Check that user signup is allowed before even bothering to
dispatch or do other processing. |
def supports_currency_type(self, currency_type):
"""Tests if the given currency type is supported.
arg: currency_type (osid.type.Type): a currency Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``CURRENCY``
raise: NullArgument - ``currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
if self._kwargs['syntax'] not in ['``CURRENCY``']:
raise errors.IllegalState()
return currency_type in self.get_currency_types | Tests if the given currency type is supported.
arg: currency_type (osid.type.Type): a currency Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``CURRENCY``
raise: NullArgument - ``currency_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def _apply(self, f, grouper=None, *args, **kwargs):
"""
Dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object.
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, str):
return getattr(x, f)(**kwargs)
return x.apply(f, *args, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result) | Dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object. |
def create_element(tag: str, name: str = None, base: type = None,
attr: dict = None) -> Node:
"""Create element with a tag of ``name``.
:arg str name: html tag.
:arg type base: Base class of the created element
(defatlt: ``WdomElement``)
:arg dict attr: Attributes (key-value pairs dict) of the new element.
"""
from wdom.web_node import WdomElement
from wdom.tag import Tag
from wdom.window import customElements
if attr is None:
attr = {}
if name:
base_class = customElements.get((name, tag))
else:
base_class = customElements.get((tag, None))
if base_class is None:
attr['_registered'] = False
base_class = base or WdomElement
if issubclass(base_class, Tag):
return base_class(**attr)
return base_class(tag, **attr) | Create element with a tag of ``name``.
:arg str name: html tag.
:arg type base: Base class of the created element
(defatlt: ``WdomElement``)
:arg dict attr: Attributes (key-value pairs dict) of the new element. |
def create(cls, name, certificate):
"""
Create a TLS CA. The certificate must be compatible with OpenSSL
and be in PEM format. The certificate can be either a file with
the Root CA, or a raw string starting with BEGIN CERTIFICATE, etc.
When creating a TLS CA, you must also import the CA certificate. Once
the CA is created, it is possible to import a different certificate to
map to the CA if necessary.
:param str name: name of root CA
:param str,file certificate: The root CA contents
:raises CreateElementFailed: failed to create the root CA
:raises ValueError: if loading from file and no certificates present
:raises IOError: cannot find specified file for certificate
:rtype: TLSCertificateAuthority
"""
json = {'name': name,
'certificate': certificate if pem_as_string(certificate) else \
load_cert_chain(certificate)[0][1].decode('utf-8')}
return ElementCreator(cls, json) | Create a TLS CA. The certificate must be compatible with OpenSSL
and be in PEM format. The certificate can be either a file with
the Root CA, or a raw string starting with BEGIN CERTIFICATE, etc.
When creating a TLS CA, you must also import the CA certificate. Once
the CA is created, it is possible to import a different certificate to
map to the CA if necessary.
:param str name: name of root CA
:param str,file certificate: The root CA contents
:raises CreateElementFailed: failed to create the root CA
:raises ValueError: if loading from file and no certificates present
:raises IOError: cannot find specified file for certificate
:rtype: TLSCertificateAuthority |
def add_files_to_git_repository(base_dir, files, description):
"""
Add and commit all files given in a list into a git repository in the
base_dir directory. Nothing is done if the git repository has
local changes.
@param files: the files to commit
@param description: the commit message
"""
if not os.path.isdir(base_dir):
printOut('Output path is not a directory, cannot add files to git repository.')
return
# find out root directory of repository
gitRoot = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'],
cwd=base_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = gitRoot.communicate()[0]
if gitRoot.returncode != 0:
printOut('Cannot commit results to repository: git rev-parse failed, perhaps output path is not a git directory?')
return
gitRootDir = decode_to_string(stdout).splitlines()[0]
# check whether repository is clean
gitStatus = subprocess.Popen(['git','status','--porcelain', '--untracked-files=no'],
cwd=gitRootDir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = gitStatus.communicate()
if gitStatus.returncode != 0:
printOut('Git status failed! Output was:\n' + decode_to_string(stderr))
return
if stdout:
printOut('Git repository has local changes, not commiting results.')
return
# add files to staging area
files = [os.path.realpath(file) for file in files]
# Use --force to add all files in result-files directory even if .gitignore excludes them
gitAdd = subprocess.Popen(['git', 'add', '--force', '--'] + files,
cwd=gitRootDir)
if gitAdd.wait() != 0:
printOut('Git add failed, will not commit results!')
return
# commit files
printOut('Committing results files to git repository in ' + gitRootDir)
gitCommit = subprocess.Popen(['git', 'commit', '--file=-', '--quiet'],
cwd=gitRootDir,
stdin=subprocess.PIPE)
gitCommit.communicate(description.encode('UTF-8'))
if gitCommit.returncode != 0:
printOut('Git commit failed!')
return | Add and commit all files given in a list into a git repository in the
base_dir directory. Nothing is done if the git repository has
local changes.
@param files: the files to commit
@param description: the commit message |
def load_configuration(configuration):
"""Returns a dictionary, accepts a dictionary or a path to a JSON file."""
if isinstance(configuration, dict):
return configuration
else:
with open(configuration) as configfile:
return json.load(configfile) | Returns a dictionary, accepts a dictionary or a path to a JSON file. |
def getElementsWithAttrValues(self, attrName, attrValues):
'''
getElementsWithAttrValues - Search children of this tag for tags with an attribute name and one of several values
@param attrName <lowercase str> - Attribute name (lowercase)
@param attrValues set<str> - set of acceptable attribute values
@return - TagCollection of matching elements
'''
elements = []
for child in self.children:
if child.getAttribute(attrName) in attrValues:
elements.append(child)
elements += child.getElementsWithAttrValues(attrName, attrValues)
return TagCollection(elements) | getElementsWithAttrValues - Search children of this tag for tags with an attribute name and one of several values
@param attrName <lowercase str> - Attribute name (lowercase)
@param attrValues set<str> - set of acceptable attribute values
@return - TagCollection of matching elements |
def save_data(self, trigger_id, **data):
"""
let's save the data
don't want to handle empty title nor content
otherwise this will produce an Exception by
the Evernote's API
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
# set the title and content of the data
title, content = super(ServiceEvernote, self).save_data(trigger_id, **data)
# get the evernote data of this trigger
trigger = Evernote.objects.get(trigger_id=trigger_id)
# initialize notestore process
note_store = self._notestore(trigger_id, data)
if isinstance(note_store, evernote.api.client.Store):
# note object
note = self._notebook(trigger, note_store)
# its attributes
note = self._attributes(note, data)
# its footer
content = self._footer(trigger, data, content)
# its title
note.title = limit_content(title, 255)
# its content
note = self._content(note, content)
# create a note
return EvernoteMgr.create_note(note_store, note, trigger_id, data)
else:
# so its note an evernote object, so something wrong happens
return note_store | let's save the data
don't want to handle empty title nor content
otherwise this will produce an Exception by
the Evernote's API
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean |
def is_instance_of(self, some_class):
"""Asserts that val is an instance of the given class."""
try:
if not isinstance(self.val, some_class):
if hasattr(self.val, '__name__'):
t = self.val.__name__
elif hasattr(self.val, '__class__'):
t = self.val.__class__.__name__
else:
t = 'unknown'
self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__))
except TypeError:
raise TypeError('given arg must be a class')
return self | Asserts that val is an instance of the given class. |
def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""
Sleep an incremental amount of time after each attempt, starting at
wait_incrementing_start and incrementing by wait_incrementing_increment
"""
result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1))
if result > self._wait_incrementing_max:
result = self._wait_incrementing_max
if result < 0:
result = 0
return result | Sleep an incremental amount of time after each attempt, starting at
wait_incrementing_start and incrementing by wait_incrementing_increment |
def create_script_staging_table(self, output_table, col_list):
"""
appends the CREATE TABLE, index etc to another table
"""
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += '-- CREATE Staging Table - ' + output_table + '\n'
self.ddl_text += '---------------------------------------------\n'
self.ddl_text += 'DROP TABLE ' + output_table + ' CASCADE CONSTRAINTS;\n'
self.ddl_text += 'CREATE TABLE ' + output_table + ' (\n '
self.ddl_text += ' '.join([col + ' VARCHAR2(200), \n' for col in col_list])
self.ddl_text += ' ' + self.date_updated_col + ' DATE \n' # + src_table + '; \n'
self.ddl_text += ');\n' | appends the CREATE TABLE, index etc to another table |
def fix_paths(self, d, root=None, project=None):
"""
Fix the paths in the given dictionary to get absolute paths
Parameters
----------
%(ExperimentsConfig.fix_paths.parameters)s
Returns
-------
%(ExperimentsConfig.fix_paths.returns)s
Notes
-----
d is modified in place!"""
if root is None and project is None:
project = d.get('project')
if project is not None:
root = self[project]['root']
else:
root = d['root']
elif root is None:
root = self[project]['root']
elif project is None:
pass
paths = self.paths
for key, val in d.items():
if isinstance(val, dict):
d[key] = self.fix_paths(val, root, project)
elif key in paths:
val = d[key]
if isinstance(val, six.string_types) and not osp.isabs(val):
d[key] = osp.join(root, val)
elif (isinstance(utils.safe_list(val)[0], six.string_types) and
not osp.isabs(val[0])):
for i in range(len(val)):
val[i] = osp.join(root, val[i])
return d | Fix the paths in the given dictionary to get absolute paths
Parameters
----------
%(ExperimentsConfig.fix_paths.parameters)s
Returns
-------
%(ExperimentsConfig.fix_paths.returns)s
Notes
-----
d is modified in place! |
def create(self, name, network):
"""Create a new Account object and add it to this Accounts collection.
Args:
name (str): Account name
network (str): Type of cryptocurrency. Can be one of, 'bitcoin', '
bitcoin_testnet', 'litecoin', 'dogecoin'.
Returns: The new round.Account
"""
if not network in SUPPORTED_NETWORKS:
raise ValueError('Network not valid!')
account = self.wrap(self.resource.create(dict(name=name,
network=network)))
self.add(account)
return account | Create a new Account object and add it to this Accounts collection.
Args:
name (str): Account name
network (str): Type of cryptocurrency. Can be one of, 'bitcoin', '
bitcoin_testnet', 'litecoin', 'dogecoin'.
Returns: The new round.Account |
def iter_admin_log(
self, entity, limit=None, *, max_id=0, min_id=0, search=None,
admins=None, join=None, leave=None, invite=None, restrict=None,
unrestrict=None, ban=None, unban=None, promote=None, demote=None,
info=None, settings=None, pinned=None, edit=None, delete=None):
"""
Iterator over the admin log for the specified channel.
Note that you must be an administrator of it to use this method.
If none of the filters are present (i.e. they all are ``None``),
*all* event types will be returned. If at least one of them is
``True``, only those that are true will be returned.
Args:
entity (`entity`):
The channel entity from which to get its admin log.
limit (`int` | `None`, optional):
Number of events to be retrieved.
The limit may also be ``None``, which would eventually return
the whole history.
max_id (`int`):
All the events with a higher (newer) ID or equal to this will
be excluded.
min_id (`int`):
All the events with a lower (older) ID or equal to this will
be excluded.
search (`str`):
The string to be used as a search query.
admins (`entity` | `list`):
If present, the events will be filtered by these admins
(or single admin) and only those caused by them will be
returned.
join (`bool`):
If ``True``, events for when a user joined will be returned.
leave (`bool`):
If ``True``, events for when a user leaves will be returned.
invite (`bool`):
If ``True``, events for when a user joins through an invite
link will be returned.
restrict (`bool`):
If ``True``, events with partial restrictions will be
returned. This is what the API calls "ban".
unrestrict (`bool`):
If ``True``, events removing restrictions will be returned.
This is what the API calls "unban".
ban (`bool`):
If ``True``, events applying or removing all restrictions will
be returned. This is what the API calls "kick" (restricting
all permissions removed is a ban, which kicks the user).
unban (`bool`):
If ``True``, events removing all restrictions will be
returned. This is what the API calls "unkick".
promote (`bool`):
If ``True``, events with admin promotions will be returned.
demote (`bool`):
If ``True``, events with admin demotions will be returned.
info (`bool`):
If ``True``, events changing the group info will be returned.
settings (`bool`):
If ``True``, events changing the group settings will be
returned.
pinned (`bool`):
If ``True``, events of new pinned messages will be returned.
edit (`bool`):
If ``True``, events of message edits will be returned.
delete (`bool`):
If ``True``, events of message deletions will be returned.
Yields:
Instances of `telethon.tl.custom.adminlogevent.AdminLogEvent`.
"""
return _AdminLogIter(
self,
limit,
entity=entity,
admins=admins,
search=search,
min_id=min_id,
max_id=max_id,
join=join,
leave=leave,
invite=invite,
restrict=restrict,
unrestrict=unrestrict,
ban=ban,
unban=unban,
promote=promote,
demote=demote,
info=info,
settings=settings,
pinned=pinned,
edit=edit,
delete=delete
) | Iterator over the admin log for the specified channel.
Note that you must be an administrator of it to use this method.
If none of the filters are present (i.e. they all are ``None``),
*all* event types will be returned. If at least one of them is
``True``, only those that are true will be returned.
Args:
entity (`entity`):
The channel entity from which to get its admin log.
limit (`int` | `None`, optional):
Number of events to be retrieved.
The limit may also be ``None``, which would eventually return
the whole history.
max_id (`int`):
All the events with a higher (newer) ID or equal to this will
be excluded.
min_id (`int`):
All the events with a lower (older) ID or equal to this will
be excluded.
search (`str`):
The string to be used as a search query.
admins (`entity` | `list`):
If present, the events will be filtered by these admins
(or single admin) and only those caused by them will be
returned.
join (`bool`):
If ``True``, events for when a user joined will be returned.
leave (`bool`):
If ``True``, events for when a user leaves will be returned.
invite (`bool`):
If ``True``, events for when a user joins through an invite
link will be returned.
restrict (`bool`):
If ``True``, events with partial restrictions will be
returned. This is what the API calls "ban".
unrestrict (`bool`):
If ``True``, events removing restrictions will be returned.
This is what the API calls "unban".
ban (`bool`):
If ``True``, events applying or removing all restrictions will
be returned. This is what the API calls "kick" (restricting
all permissions removed is a ban, which kicks the user).
unban (`bool`):
If ``True``, events removing all restrictions will be
returned. This is what the API calls "unkick".
promote (`bool`):
If ``True``, events with admin promotions will be returned.
demote (`bool`):
If ``True``, events with admin demotions will be returned.
info (`bool`):
If ``True``, events changing the group info will be returned.
settings (`bool`):
If ``True``, events changing the group settings will be
returned.
pinned (`bool`):
If ``True``, events of new pinned messages will be returned.
edit (`bool`):
If ``True``, events of message edits will be returned.
delete (`bool`):
If ``True``, events of message deletions will be returned.
Yields:
Instances of `telethon.tl.custom.adminlogevent.AdminLogEvent`. |
def phenotypes_actions(institute_id, case_name):
"""Perform actions on multiple phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for('.case', institute_id=institute_id, case_name=case_name)
action = request.form['action']
hpo_ids = request.form.getlist('hpo_id')
user_obj = store.user(current_user.email)
if action == 'DELETE':
for hpo_id in hpo_ids:
# DELETE a phenotype from the list
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == 'PHENOMIZER':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
username = current_app.config['PHENOMIZER_USERNAME']
password = current_app.config['PHENOMIZER_PASSWORD']
diseases = controllers.hpo_diseases(username, password, hpo_ids)
return render_template('cases/diseases.html', diseases=diseases,
institute=institute_obj, case=case_obj)
elif action == 'GENES':
hgnc_symbols = set()
for raw_symbols in request.form.getlist('genes'):
# avoid empty lists
if raw_symbols:
hgnc_symbols.update(raw_symbol.split(' ', 1)[0] for raw_symbol in
raw_symbols.split('|'))
store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols)
elif action == 'GENERATE':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
results = store.generate_hpo_gene_list(*hpo_ids)
# determine how many HPO terms each gene must match
hpo_count = int(request.form.get('min_match') or 1)
hgnc_ids = [result[0] for result in results if result[1] >= hpo_count]
store.update_dynamic_gene_list(case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids)
return redirect(case_url) | Perform actions on multiple phenotypes. |
def decide(self, accepts, context_aware=False):
""" Returns what (mimetype,format) the client wants to receive
Parses the given Accept header and picks the best one that
we know how to output
Returns (mimetype, format)
An empty Accept will default to rdf+xml
An Accept with */* use rdf+xml unless a better match is found
An Accept that doesn't match anything will return (None,None)
context_aware=True will allow nquad serialization
"""
mimetype = self.decide_mimetype(accepts, context_aware)
# return what format to serialize as
if mimetype is not None:
return (mimetype, self.get_serialize_format(mimetype))
else:
# couldn't find a matching mimetype for the Accepts header
return (None, None) | Returns what (mimetype,format) the client wants to receive
Parses the given Accept header and picks the best one that
we know how to output
Returns (mimetype, format)
An empty Accept will default to rdf+xml
An Accept with */* use rdf+xml unless a better match is found
An Accept that doesn't match anything will return (None,None)
context_aware=True will allow nquad serialization |
def sample(polygon, count, factor=1.5, max_iter=10):
"""
Use rejection sampling to generate random points inside a
polygon.
Parameters
-----------
polygon : shapely.geometry.Polygon
Polygon that will contain points
count : int
Number of points to return
factor : float
How many points to test per loop
IE, count * factor
max_iter : int,
Maximum number of intersection loops
to run, total points sampled is
count * factor * max_iter
Returns
-----------
hit : (n, 2) float
Random points inside polygon
where n <= count
"""
bounds = np.reshape(polygon.bounds, (2, 2))
extents = bounds.ptp(axis=0)
hit = []
hit_count = 0
per_loop = int(count * factor)
for i in range(max_iter):
# generate points inside polygons AABB
points = np.random.random((per_loop, 2))
points = (points * extents) + bounds[0]
# do the point in polygon test and append resulting hits
mask = vectorized.contains(polygon, *points.T)
hit.append(points[mask])
# keep track of how many points we've collected
hit_count += len(hit[-1])
# if we have enough points exit the loop
if hit_count > count:
break
# stack the hits into an (n,2) array and truncate
hit = np.vstack(hit)[:count]
return hit | Use rejection sampling to generate random points inside a
polygon.
Parameters
-----------
polygon : shapely.geometry.Polygon
Polygon that will contain points
count : int
Number of points to return
factor : float
How many points to test per loop
IE, count * factor
max_iter : int,
Maximum number of intersection loops
to run, total points sampled is
count * factor * max_iter
Returns
-----------
hit : (n, 2) float
Random points inside polygon
where n <= count |
def load_experiment(folder, return_path=False):
'''load_experiment:
reads in the config.json for a folder, returns None if not found.
:param folder: full path to experiment folder
:param return_path: if True, don't load the config.json, but return it
'''
fullpath = os.path.abspath(folder)
config = "%s/config.json" %(fullpath)
if not os.path.exists(config):
bot.error("config.json could not be found in %s" %(folder))
config = None
if return_path is False and config is not None:
config = read_json(config)
return config | load_experiment:
reads in the config.json for a folder, returns None if not found.
:param folder: full path to experiment folder
:param return_path: if True, don't load the config.json, but return it |
def addDataset(self, dataset):
"""
Creates a new dataset instance for this scene.
:param dataset | <XChartDataset>
:return <XChartDatasetItem>
"""
item = XChartDatasetItem()
self.addItem(item)
item.setDataset(dataset)
return item | Creates a new dataset instance for this scene.
:param dataset | <XChartDataset>
:return <XChartDatasetItem> |
def resize(self, container, height, width):
"""
Resize the tty session.
Args:
container (str or dict): The container to resize
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res) | Resize the tty session.
Args:
container (str or dict): The container to resize
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
def cli(ctx, comment, metadata=""):
"""Add a canned comment
Output:
A dictionnary containing canned comment description
"""
return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata) | Add a canned comment
Output:
A dictionnary containing canned comment description |
def p_null_assignment(self, t):
'''null_assignment : IDENT EQ NULL'''
self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","0"])) | null_assignment : IDENT EQ NULL |
def hex_color(self, safe: bool = False) -> str:
"""Generate a random hex color.
:param safe: Get safe Flat UI hex color.
:return: Hex color code.
:Example:
#d8346b
"""
if safe:
return self.random.choice(SAFE_COLORS)
return '#{:06x}'.format(
self.random.randint(0x000000, 0xffffff)) | Generate a random hex color.
:param safe: Get safe Flat UI hex color.
:return: Hex color code.
:Example:
#d8346b |
def _SetupBotoConfig(self):
"""Set the boto config so GSUtil works with provisioned service accounts."""
project_id = self._GetNumericProjectId()
try:
boto_config.BotoConfig(project_id, debug=self.debug)
except (IOError, OSError) as e:
self.logger.warning(str(e)) | Set the boto config so GSUtil works with provisioned service accounts. |
def _jaccard_similarity(f1, f2, weight_func):
"""Calculate generalized Jaccard similarity of formulas.
Returns the weighted similarity value or None if there is no overlap
at all. If the union of the formulas has a weight of zero (i.e. the
denominator in the Jaccard similarity is zero), a value of zero is
returned.
"""
elements = set(f1)
elements.update(f2)
count, w_count, w_total = 0, 0, 0
for element in elements:
mi = min(f1.get(element, 0), f2.get(element, 0))
mx = max(f1.get(element, 0), f2.get(element, 0))
count += mi
w = weight_func(element)
w_count += w * mi
w_total += w * mx
if count == 0:
return None
return 0.0 if w_total == 0.0 else w_count / w_total | Calculate generalized Jaccard similarity of formulas.
Returns the weighted similarity value or None if there is no overlap
at all. If the union of the formulas has a weight of zero (i.e. the
denominator in the Jaccard similarity is zero), a value of zero is
returned. |
def choice(self, obj):
"""
Overloads the choice method to add the position
of the object in the tree for future sorting.
"""
tree_id = getattr(obj, self.queryset.model._mptt_meta.tree_id_attr, 0)
left = getattr(obj, self.queryset.model._mptt_meta.left_attr, 0)
return super(MPTTModelChoiceIterator,
self).choice(obj) + ((tree_id, left),) | Overloads the choice method to add the position
of the object in the tree for future sorting. |
def pop_group(self):
"""Terminates the redirection begun by a call to :meth:`push_group`
or :meth:`push_group_with_content`
and returns a new pattern containing the results
of all drawing operations performed to the group.
The :meth:`pop_group` method calls :meth:`restore`,
(balancing a call to :meth:`save` by the push_group method),
so that any changes to the graphics state
will not be visible outside the group.
:returns:
A newly created :class:`SurfacePattern`
containing the results of all drawing operations
performed to the group.
"""
return Pattern._from_pointer(
cairo.cairo_pop_group(self._pointer), incref=False) | Terminates the redirection begun by a call to :meth:`push_group`
or :meth:`push_group_with_content`
and returns a new pattern containing the results
of all drawing operations performed to the group.
The :meth:`pop_group` method calls :meth:`restore`,
(balancing a call to :meth:`save` by the push_group method),
so that any changes to the graphics state
will not be visible outside the group.
:returns:
A newly created :class:`SurfacePattern`
containing the results of all drawing operations
performed to the group. |
def _visual_bounds_at(self, pos, node=None):
"""Find a visual whose bounding rect encompasses *pos*.
"""
if node is None:
node = self.scene
for ch in node.children:
hit = self._visual_bounds_at(pos, ch)
if hit is not None:
return hit
if (not isinstance(node, VisualNode) or not node.visible or
not node.interactive):
return None
bounds = [node.bounds(axis=i) for i in range(2)]
if None in bounds:
return None
tr = self.scene.node_transform(node).inverse
corners = np.array([
[bounds[0][0], bounds[1][0]],
[bounds[0][0], bounds[1][1]],
[bounds[0][1], bounds[1][0]],
[bounds[0][1], bounds[1][1]]])
bounds = tr.map(corners)
xhit = bounds[:, 0].min() < pos[0] < bounds[:, 0].max()
yhit = bounds[:, 1].min() < pos[1] < bounds[:, 1].max()
if xhit and yhit:
return node | Find a visual whose bounding rect encompasses *pos*. |
def compile_into_spirv(raw, stage, filepath, language="glsl",
optimization='size', suppress_warnings=False,
warnings_as_errors=False):
"""Compile shader code into Spir-V binary.
This function uses shaderc to compile your glsl or hlsl code into Spir-V
code. You can refer to the shaderc documentation.
Args:
raw (bytes): glsl or hlsl code (bytes format, not str)
stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',
'frag', 'comp']
filepath (str): Absolute path of the file (needed for #include)
language (str): 'glsl' or 'hlsl'
optimization (str): 'zero' (no optimization) or 'size' (reduce size)
suppress_warnings (bool): True to suppress warnings
warnings_as_errors (bool): Turn warnings into errors
Returns:
bytes: Compiled Spir-V binary.
Raises:
CompilationError: If compilation fails.
"""
# extract parameters
stage = stages_mapping[stage]
lang = languages_mapping[language]
opt = opt_mapping[optimization]
# initialize options
options = lib.shaderc_compile_options_initialize()
lib.shaderc_compile_options_set_source_language(options, lang)
lib.shaderc_compile_options_set_optimization_level(options, opt)
lib.shaderc_compile_options_set_target_env(
options, lib.shaderc_target_env_vulkan, 0)
lib.shaderc_compile_options_set_auto_bind_uniforms(options, False)
lib.shaderc_compile_options_set_include_callbacks(
options, lib.resolve_callback, lib.release_callback, ffi.NULL)
if suppress_warnings:
lib.shaderc_compile_options_set_suppress_warnings(options)
if warnings_as_errors:
lib.shaderc_compile_options_set_warnings_as_errors(options)
# initialize compiler
compiler = lib.shaderc_compiler_initialize()
# compile
result = lib.shaderc_compile_into_spv(compiler, raw, len(raw), stage,
str.encode(filepath), b"main",
options)
# extract result
status = lib.shaderc_result_get_compilation_status(result)
if status != lib.shaderc_compilation_status_success:
msg = _get_log(result)
lib.shaderc_compile_options_release(options)
lib.shaderc_result_release(result)
lib.shaderc_compiler_release(compiler)
raise CompilationError(msg)
length = lib.shaderc_result_get_length(result)
output_pointer = lib.shaderc_result_get_bytes(result)
tmp = bytearray(length)
ffi.memmove(tmp, output_pointer, length)
spirv = bytes(tmp)
# release resources
lib.shaderc_compile_options_release(options)
lib.shaderc_result_release(result)
lib.shaderc_compiler_release(compiler)
return spirv | Compile shader code into Spir-V binary.
This function uses shaderc to compile your glsl or hlsl code into Spir-V
code. You can refer to the shaderc documentation.
Args:
raw (bytes): glsl or hlsl code (bytes format, not str)
stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',
'frag', 'comp']
filepath (str): Absolute path of the file (needed for #include)
language (str): 'glsl' or 'hlsl'
optimization (str): 'zero' (no optimization) or 'size' (reduce size)
suppress_warnings (bool): True to suppress warnings
warnings_as_errors (bool): Turn warnings into errors
Returns:
bytes: Compiled Spir-V binary.
Raises:
CompilationError: If compilation fails. |
def fetch(self):
'''
Gives all the data it has stored, and remembers what it has given.
Later we need to call commit() to actually remove the data from the
cache.
'''
if self._fetched is not None:
raise RuntimeError('fetch() was called but the previous one has '
'not yet been applied. Not supported')
if self._cache:
self._fetched = len(self._cache)
return self._cache[0:self._fetched] | Gives all the data it has stored, and remembers what it has given.
Later we need to call commit() to actually remove the data from the
cache. |
def build_launcher(self, clsname, kind=None):
"""import and instantiate a Launcher based on importstring"""
try:
klass = find_launcher_class(clsname, kind)
except (ImportError, KeyError):
self.log.fatal("Could not import launcher class: %r"%clsname)
self.exit(1)
launcher = klass(
work_dir=u'.', config=self.config, log=self.log,
profile_dir=self.profile_dir.location, cluster_id=self.cluster_id,
)
return launcher | import and instantiate a Launcher based on importstring |
def _process_macro_default_arg(self):
"""Handle the bit after an '=' in a macro default argument. This is
probably the trickiest thing. The goal here is to accept all strings
jinja would accept and always handle block start/end correctly: It's
fine to have false positives, jinja can fail later.
Return True if there are more arguments expected.
"""
while self._parenthesis_stack:
match = self._expect_match(
'macro argument',
# you could have a string
STRING_PATTERN,
# a quote, a comma, or a open/close parenthesis
NON_STRING_MACRO_ARGS_PATTERN,
# we want to "match", not "search"
method='match'
)
matchgroups = match.groupdict()
self.advance(match.end())
if matchgroups.get('string') is not None:
# we got a string value. There could be more data.
continue
elif matchgroups.get('quote') is not None:
# we got a bunch of data and then a string opening value.
# put the quote back on the menu
self.rewind()
# now look for a string
match = self._expect_match('any string', STRING_PATTERN)
self.advance(match.end())
elif matchgroups.get('comma') is not None:
# small hack: if we hit a comma and there is one parenthesis
# left, return to look for a new name. otherwise we're still
# looking for the parameter close.
if len(self._parenthesis_stack) == 1:
return
elif matchgroups.get('open'):
self._parenthesis_stack.append(True)
elif matchgroups.get('close'):
self._parenthesis_stack.pop()
else:
raise dbt.exceptions.InternalException(
'unhandled regex in _process_macro_default_arg(), no match'
': {}'.format(matchgroups)
) | Handle the bit after an '=' in a macro default argument. This is
probably the trickiest thing. The goal here is to accept all strings
jinja would accept and always handle block start/end correctly: It's
fine to have false positives, jinja can fail later.
Return True if there are more arguments expected. |
def get_order_history(self, market=None):
"""
Used to retrieve order trade history of account
Endpoint:
1.1 /account/getorderhistory
2.0 /key/orders/getorderhistory or /key/market/GetOrderHistory
:param market: optional a string literal for the market (ie. BTC-LTC).
If omitted, will return for all markets
:type market: str
:return: order history in JSON
:rtype : dict
"""
if market:
return self._api_query(path_dict={
API_V1_1: '/account/getorderhistory',
API_V2_0: '/key/market/GetOrderHistory'
}, options={'market': market, 'marketname': market}, protection=PROTECTION_PRV)
else:
return self._api_query(path_dict={
API_V1_1: '/account/getorderhistory',
API_V2_0: '/key/orders/getorderhistory'
}, protection=PROTECTION_PRV) | Used to retrieve order trade history of account
Endpoint:
1.1 /account/getorderhistory
2.0 /key/orders/getorderhistory or /key/market/GetOrderHistory
:param market: optional a string literal for the market (ie. BTC-LTC).
If omitted, will return for all markets
:type market: str
:return: order history in JSON
:rtype : dict |
def update_instance(
self,
instance,
field_mask,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates an instance, and begins allocating or releasing resources as
requested. The returned ``long-running operation`` can be used to track
the progress of updating the instance. If the named instance does not
exist, returns ``NOT_FOUND``.
Immediately upon completion of this request:
- For resource types for which a decrease in the instance's allocation
has been requested, billing is based on the newly-requested level.
Until completion of the returned operation:
- Cancelling the operation sets its metadata's ``cancel_time``, and
begins restoring resources to their pre-request values. The operation
is guaranteed to succeed at undoing all resource changes, after which
point it terminates with a ``CANCELLED`` status.
- All other attempts to modify the instance are rejected.
- Reading the instance via the API continues to give the pre-request
resource levels.
Upon completion of the returned operation:
- Billing begins for all successfully-allocated resources (some types
may have lower than the requested levels).
- All newly-reserved resources are available for serving the instance's
tables.
- The instance's new resource levels are readable via the API.
The returned ``long-running operation`` will have a name of the format
``<instance_name>/operations/<operation_id>`` and can be used to track
the instance modification. The ``metadata`` field type is
``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``,
if successful.
Authorization requires ``spanner.instances.update`` permission on
resource ``name``.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> # TODO: Initialize `instance`:
>>> instance = {}
>>>
>>> # TODO: Initialize `field_mask`:
>>> field_mask = {}
>>>
>>> response = client.update_instance(instance, field_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the instance
name. Otherwise, only fields mentioned in
[][google.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask]
need be included.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in
[][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance]
should be updated. The field mask must always be specified; this
prevents any future fields in
[][google.spanner.admin.instance.v1.Instance] from being erased
accidentally by clients that do not know about them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_instance" not in self._inner_api_calls:
self._inner_api_calls[
"update_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_instance,
default_retry=self._method_configs["UpdateInstance"].retry,
default_timeout=self._method_configs["UpdateInstance"].timeout,
client_info=self._client_info,
)
request = spanner_instance_admin_pb2.UpdateInstanceRequest(
instance=instance, field_mask=field_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("instance.name", instance.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["update_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
spanner_instance_admin_pb2.Instance,
metadata_type=spanner_instance_admin_pb2.UpdateInstanceMetadata,
) | Updates an instance, and begins allocating or releasing resources as
requested. The returned ``long-running operation`` can be used to track
the progress of updating the instance. If the named instance does not
exist, returns ``NOT_FOUND``.
Immediately upon completion of this request:
- For resource types for which a decrease in the instance's allocation
has been requested, billing is based on the newly-requested level.
Until completion of the returned operation:
- Cancelling the operation sets its metadata's ``cancel_time``, and
begins restoring resources to their pre-request values. The operation
is guaranteed to succeed at undoing all resource changes, after which
point it terminates with a ``CANCELLED`` status.
- All other attempts to modify the instance are rejected.
- Reading the instance via the API continues to give the pre-request
resource levels.
Upon completion of the returned operation:
- Billing begins for all successfully-allocated resources (some types
may have lower than the requested levels).
- All newly-reserved resources are available for serving the instance's
tables.
- The instance's new resource levels are readable via the API.
The returned ``long-running operation`` will have a name of the format
``<instance_name>/operations/<operation_id>`` and can be used to track
the instance modification. The ``metadata`` field type is
``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``,
if successful.
Authorization requires ``spanner.instances.update`` permission on
resource ``name``.
Example:
>>> from google.cloud import spanner_admin_instance_v1
>>>
>>> client = spanner_admin_instance_v1.InstanceAdminClient()
>>>
>>> # TODO: Initialize `instance`:
>>> instance = {}
>>>
>>> # TODO: Initialize `field_mask`:
>>> field_mask = {}
>>>
>>> response = client.update_instance(instance, field_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the instance
name. Otherwise, only fields mentioned in
[][google.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask]
need be included.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in
[][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance]
should be updated. The field mask must always be specified; this
prevents any future fields in
[][google.spanner.admin.instance.v1.Instance] from being erased
accidentally by clients that do not know about them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_admin_instance_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def complete(self, text: str) -> Iterable[str]:
"""Return an iterable of possible completions for the given text in
this namespace."""
assert not text.startswith(":")
if "/" in text:
prefix, suffix = text.split("/", maxsplit=1)
results = itertools.chain(
self.__complete_alias(prefix, name_in_ns=suffix),
self.__complete_imports_and_aliases(prefix, name_in_module=suffix),
)
else:
results = itertools.chain(
self.__complete_alias(text),
self.__complete_imports_and_aliases(text),
self.__complete_interns(text),
self.__complete_refers(text),
)
return results | Return an iterable of possible completions for the given text in
this namespace. |
def bulk_create(self, objs, *args, **kwargs):
"""Insert many object at once."""
if hasattr(self.model, 'save_prep'):
# Method from AbstractBaseModel. If the model class doesn't
# subclass AbstractBaseModel, then don't call this.
self.model.save_prep(instance_or_instances=objs)
return super(CommonManager, self).bulk_create(objs=objs,
*args,
**kwargs) | Insert many object at once. |
def configure_client(
cls, address: Union[str, Tuple[str, int], Path] = 'localhost', port: int = 6379,
db: int = 0, password: str = None, ssl: Union[bool, str, SSLContext] = False,
**client_args) -> Dict[str, Any]:
"""
Configure a Redis client.
:param address: IP address, host name or path to a UNIX socket
:param port: port number to connect to (ignored for UNIX sockets)
:param db: database number to connect to
:param password: password used if the server requires authentication
:param ssl: one of the following:
* ``False`` to disable SSL
* ``True`` to enable SSL using the default context
* an :class:`~ssl.SSLContext` instance
* a ``module:varname`` reference to an :class:`~ssl.SSLContext` instance
* name of an :class:`~ssl.SSLContext` resource
:param client_args: extra keyword arguments passed to :func:`~aioredis.create_redis_pool`
"""
assert check_argument_types()
if isinstance(address, str) and not address.startswith('/'):
address = (address, port)
elif isinstance(address, Path):
address = str(address)
client_args.update({
'address': address,
'db': db,
'password': password,
'ssl': resolve_reference(ssl)
})
return client_args | Configure a Redis client.
:param address: IP address, host name or path to a UNIX socket
:param port: port number to connect to (ignored for UNIX sockets)
:param db: database number to connect to
:param password: password used if the server requires authentication
:param ssl: one of the following:
* ``False`` to disable SSL
* ``True`` to enable SSL using the default context
* an :class:`~ssl.SSLContext` instance
* a ``module:varname`` reference to an :class:`~ssl.SSLContext` instance
* name of an :class:`~ssl.SSLContext` resource
:param client_args: extra keyword arguments passed to :func:`~aioredis.create_redis_pool` |
def import_source(self, sheet, source, delimiter=","):
"""
Function:
Save original data into specific sheet, and try to translate data to float type
Input:
sheet: Must be a non exists sheet
source: File path of source
"""
# check input parameters
if ' ' in sheet:
raise RuntimeError("Error sheet name: %s" % sheet)
if not source.endswith("txt") and not source.endswith("csv"):
raise RuntimeError("Error source name: %s" % source)
self.source_sheet = sheet
source_data = np.loadtxt(source, dtype=str, delimiter=delimiter)
self.source_data = {"title": source_data[0].tolist(),
"data": source_data[1:]}
cell_format_title = self.workbook.add_format({'bold': True,
'font_name': u'等线',
'bg_color': '#c5d9f1',
'rotation': 45})
cell_format = self.workbook.add_format({'bold': False,
'font_name': u'等线',
'num_format': 0})
worksheet = self.workbook.add_worksheet(sheet)
worksheet.write_row('A1', self.source_data['title'], cell_format_title)
_, col_num = self.source_data['data'].shape
for i in range(col_num):
try:
data_array = self.source_data['data'][:, i].astype(float)
except ValueError:
data_array = self.source_data['data'][:, i]
worksheet.write_column(1, i, data_array.tolist(), cell_format) | Function:
Save original data into specific sheet, and try to translate data to float type
Input:
sheet: Must be a non exists sheet
source: File path of source |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.