code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def add_results(self, *rvs, **kwargs):
if not rvs:
raise MissingTokenError.pyexc(message='No results passed')
for rv in rvs:
mi = rv._mutinfo
if not mi:
if kwargs.get('quiet'):
return False
raise MissingTokenError.pyexc(
message='Result does not contain token')
self._add_scanvec(mi)
return True
|
Changes the state to reflect the mutation which yielded the given
result.
In order to use the result, the `fetch_mutation_tokens` option must
have been specified in the connection string, _and_ the result
must have been successful.
:param rvs: One or more :class:`~.OperationResult` which have been
returned from mutations
:param quiet: Suppress errors if one of the results does not
contain a convertible state.
:return: `True` if the result was valid and added, `False` if not
added (and `quiet` was specified
:raise: :exc:`~.MissingTokenError` if `result` does not contain
a valid token
|
def assure_relation(cls, cms_page):
try:
cms_page.cascadepage
except cls.DoesNotExist:
cls.objects.create(extended_object=cms_page)
|
Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage.
|
def airspeed_energy_error(NAV_CONTROLLER_OUTPUT, VFR_HUD):
aspeed_cm = VFR_HUD.airspeed*100
target_airspeed = NAV_CONTROLLER_OUTPUT.aspd_error + aspeed_cm
airspeed_energy_error = ((target_airspeed*target_airspeed) - (aspeed_cm*aspeed_cm))*0.00005
return airspeed_energy_error
|
return airspeed energy error matching APM internals
This is positive when we are going too slow
|
def full_clean(self, *args, **kwargs):
name = getattr(self, 'name', self.slugName.title())
self.title = "{} for {}".format(name, dateFormat(self.except_date))
self.slug = "{}-{}".format(self.except_date, self.slugName)
super().full_clean(*args, **kwargs)
|
Apply fixups that need to happen before per-field validation occurs.
Sets the page's title.
|
def save(self, *args, **kwargs):
self._create_slug()
self._create_date_slug()
self._render_content()
send_published_signal = False
if self.published and self.published_on is None:
send_published_signal = self._set_published()
super(Entry, self).save(*args, **kwargs)
if send_published_signal:
entry_published.send(sender=self, entry=self)
|
Auto-generate a slug from the name.
|
def less_than_pi_constraints(self):
pi = self.prior_information
lt_pi = pi.loc[pi.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0, axis=1), "pilbl"]
return lt_pi
|
get the names of the prior information eqs that
are listed as less than inequality constraints. Zero-
weighted pi are skipped
Returns
-------
pandas.Series : pilbl of prior information that are non-zero weighted
less than constraints
|
def elapsed_time(seconds: float) -> str:
environ.abort_thread()
parts = (
'{}'.format(timedelta(seconds=seconds))
.rsplit('.', 1)
)
hours, minutes, seconds = parts[0].split(':')
return templating.render_template(
'elapsed_time.html',
hours=hours.zfill(2),
minutes=minutes.zfill(2),
seconds=seconds.zfill(2),
microseconds=parts[-1] if len(parts) > 1 else ''
)
|
Displays the elapsed time since the current step started running.
|
def _cron_id(cron):
cid = None
if cron['identifier']:
cid = cron['identifier']
else:
cid = SALT_CRON_NO_IDENTIFIER
if cid:
return _ensure_string(cid)
|
SAFETYBELT, Only set if we really have an identifier
|
def client(self):
if not hasattr(self, "_client"):
self._client = connections.get_connection("default")
return self._client
|
Get an elasticsearch client
|
def get_highest_build_tool(sdk_version=None):
if sdk_version is None:
sdk_version = config.sdk_version
android_home = os.environ.get('AG_MOBILE_SDK', os.environ.get('ANDROID_HOME'))
build_tool_folder = '%s/build-tools' % android_home
folder_list = os.listdir(build_tool_folder)
versions = [folder for folder in folder_list if folder.startswith('%s.' % sdk_version)]
if len(versions) == 0:
return config.build_tool_version
return versions[::-1][0]
|
Gets the highest build tool version based on major version sdk version.
:param sdk_version(int) - sdk version to be used as the marjor build tool version context.
Returns:
A string containg the build tool version (default is 23.0.2 if none is found)
|
def build_vcf_deletion(x, genome_2bit):
base1 = genome_2bit[x.chrom1].get(x.start1, x.start1 + 1).upper()
id1 = "hydra{0}".format(x.name)
return VcfLine(x.chrom1, x.start1, id1, base1, "<DEL>",
_vcf_single_end_info(x, "DEL", True))
|
Provide representation of deletion from BedPE breakpoints.
|
def create_from_mesh_and_lines(cls, mesh, lines):
mesh_with_lines = mesh.copy()
mesh_with_lines.add_lines(lines)
return mesh_with_lines
|
Return a copy of mesh with line vertices and edges added.
mesh: A Mesh
lines: A list of Polyline or Lines objects.
|
def selinux_fs_path():
try:
for directory in ('/sys/fs/selinux', '/selinux'):
if os.path.isdir(directory):
if os.path.isfile(os.path.join(directory, 'enforce')):
return directory
return None
except AttributeError:
return None
|
Return the location of the SELinux VFS directory
CLI Example:
.. code-block:: bash
salt '*' selinux.selinux_fs_path
|
def _del_controller(self, uid):
try:
self.controllers.pop(uid)
e = Event(uid, E_DISCONNECT)
self.queue.put_nowait(e)
except KeyError:
pass
|
Remove controller from internal list and tell the game.
:param uid: Unique id of the controller
:type uid: str
|
def exists_orm(session: Session,
ormclass: DeclarativeMeta,
*criteria: Any) -> bool:
q = session.query(ormclass)
for criterion in criteria:
q = q.filter(criterion)
exists_clause = q.exists()
return bool_from_exists_clause(session=session,
exists_clause=exists_clause)
|
Detects whether a database record exists for the specified ``ormclass``
and ``criteria``.
Example usage:
.. code-block:: python
bool_exists = exists_orm(session, MyClass, MyClass.myfield == value)
|
def saveScreenCapture(self, path=None, name=None):
bitmap = self.getBitmap()
target_file = None
if path is None and name is None:
_, target_file = tempfile.mkstemp(".png")
elif name is None:
_, tpath = tempfile.mkstemp(".png")
target_file = os.path.join(path, tfile)
else:
target_file = os.path.join(path, name+".png")
cv2.imwrite(target_file, bitmap)
return target_file
|
Saves the region's bitmap
|
def default_if_empty(self, default):
if self.closed():
raise ValueError("Attempt to call default_if_empty() on a "
"closed Queryable.")
return self._create(self._generate_default_if_empty_result(default))
|
If the source sequence is empty return a single element sequence
containing the supplied default value, otherwise return the source
sequence unchanged.
Note: This method uses deferred execution.
Args:
default: The element to be returned if the source sequence is empty.
Returns:
The source sequence, or if the source sequence is empty an sequence
containing a single element with the supplied default value.
Raises:
ValueError: If the Queryable has been closed.
|
def handle_response(self, content, target=None, single_result=True, raw=False):
response = content['response']
self.check_errors(response)
data = response.get('data')
if is_empty(data):
return data
elif is_paginated(data):
if 'count' in data and not data['count']:
return data['data']
data = data['data']
if raw:
return data
return self.init_all_objects(data, target=target, single_result=single_result)
|
Parses response, checks it.
|
def lmpool(cvals):
lenvals = ctypes.c_int(len(max(cvals, key=len)) + 1)
n = ctypes.c_int(len(cvals))
cvals = stypes.listToCharArrayPtr(cvals, xLen=lenvals, yLen=n)
libspice.lmpool_c(cvals, lenvals, n)
|
Load the variables contained in an internal buffer into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lmpool_c.html
:param cvals: list of strings.
:type cvals: list of str
|
def startup_gce_instance(instance_name, project, zone, username, machine_type,
image, public_key, disk_name=None):
log_green("Started...")
log_yellow("...Creating GCE Jenkins Slave Instance...")
instance_config = get_gce_instance_config(
instance_name, project, zone, machine_type, image,
username, public_key, disk_name
)
operation = _get_gce_compute().instances().insert(
project=project,
zone=zone,
body=instance_config
).execute()
result = gce_wait_until_done(operation)
if not result:
raise RuntimeError("Creation of VM timed out or returned no result")
log_green("Instance has booted")
|
For now, jclouds is broken for GCE and we will have static slaves
in Jenkins. Use this to boot them.
|
def _add_word(completer):
def inner(word: str):
completer.words.add(word)
return inner
|
Used to add words to the completors
|
def replace_headers(source_pdb_content, target_pdb_content):
s = PDB(source_pdb_content)
t = PDB(target_pdb_content)
source_headers = []
for l in s.lines:
if l[:6].strip() in non_header_records:
break
else:
source_headers.append(l)
target_body = []
in_header = True
for l in t.lines:
if l[:6].strip() in non_header_records:
in_header = False
if not in_header:
target_body.append(l)
return '\n'.join(source_headers + target_body)
|
Takes the headers from source_pdb_content and adds them to target_pdb_content, removing any headers that
target_pdb_content had.
Only the content up to the first structural line are taken from source_pdb_content and only the content from
the first structural line in target_pdb_content are taken.
|
def add_to(self, email):
if email.substitutions:
if isinstance(email.substitutions, list):
for substitution in email.substitutions:
self.add_substitution(substitution)
else:
self.add_substitution(email.substitutions)
if email.subject:
if isinstance(email.subject, str):
self.subject = email.subject
else:
self.subject = email.subject.get()
self._tos.append(email.get())
|
Add a single recipient to this Personalization.
:type email: Email
|
def away_save_percentage(self):
try:
save_pct = float(self.away_saves) / float(self.home_shots_on_goal)
return round(save_pct, 3)
except ZeroDivisionError:
return 0.0
|
Returns a ``float`` of the percentage of shots the away team saved.
Percentage ranges from 0-1.
|
def add_significance_indicator(plot, col_a=0, col_b=1, significant=False):
plot_bottom, plot_top = plot.get_ylim()
line_height = vertical_percent(plot, 0.1)
plot_top = plot_top + line_height
plot.set_ylim(top=plot_top + line_height * 2)
color = "black"
line_top = plot_top + line_height
plot.plot([col_a, col_a, col_b, col_b], [plot_top, line_top, line_top, plot_top], lw=1.5, color=color)
indicator = "*" if significant else "ns"
plot.text((col_a + col_b) * 0.5, line_top, indicator, ha="center", va="bottom", color=color)
|
Add a p-value significance indicator.
|
def start(self):
old_start_count = self.__start_count
self.__start_count += 1
if old_start_count == 0:
self.data_channel_start_event.fire()
|
Called from hardware source when data starts streaming.
|
def dump(self):
return {
'target': str(self.target),
'data': base64.b64encode(self.data).decode('utf-8'),
'var_id': self.var_id,
'valid': self.valid
}
|
Serialize this object.
|
def humanize_bytes(size):
if size == 0: return "0"
if size is None: return ""
assert size >= 0, "`size` cannot be negative, got %d" % size
suffixes = "TGMK"
maxl = len(suffixes)
for i in range(maxl + 1):
shift = (maxl - i) * 10
if size >> shift == 0: continue
ndigits = 0
for nd in [3, 2, 1]:
if size >> (shift + 12 - nd * 3) == 0:
ndigits = nd
break
if ndigits == 0 or size == (size >> shift) << shift:
rounded_val = str(size >> shift)
else:
rounded_val = "%.*f" % (ndigits, size / (1 << shift))
return "%s%sB" % (rounded_val, suffixes[i] if i < maxl else "")
|
Convert given number of bytes into a human readable representation, i.e. add
prefix such as KB, MB, GB, etc. The `size` argument must be a non-negative
integer.
:param size: integer representing byte size of something
:return: string representation of the size, in human-readable form
|
def generate_GitHub_token(*, note="Doctr token for pushing to gh-pages from Travis", scopes=None, **login_kwargs):
if scopes is None:
scopes = ['public_repo']
AUTH_URL = "https://api.github.com/authorizations"
data = {
"scopes": scopes,
"note": note,
"note_url": "https://github.com/drdoctr/doctr",
"fingerprint": str(uuid.uuid4()),
}
return GitHub_post(data, AUTH_URL, **login_kwargs)
|
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
|
def copy_with(self, geometry=None, properties=None, assets=None):
def copy_assets_object(asset):
obj = asset.get("__object")
if hasattr("copy", obj):
new_obj = obj.copy()
if obj:
asset["__object"] = new_obj
geometry = geometry or self.geometry.copy()
new_properties = copy.deepcopy(self.properties)
if properties:
new_properties.update(properties)
if not assets:
assets = copy.deepcopy(self.assets)
map(copy_assets_object, assets.values())
else:
assets = {}
return self.__class__(geometry, new_properties, assets)
|
Generate a new GeoFeature with different geometry or preperties.
|
def calcSMA(self):
try:
return eq.KeplersThirdLaw(None, self.star.M, self.P).a
except HierarchyError:
return np.nan
|
Calculates the semi-major axis from Keplers Third Law
|
def trusted_permission(f):
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
trusted(request)
return f(request, *args, **kwargs)
return wrapper
|
Access only by D1 infrastructure.
|
def cumsum(x, axis=0, exclusive=False):
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
|
TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
|
def get(self, instance, acl):
base_url = self._url.format(instance=instance)
url = '{base}{aclid}/'.format(base=base_url, aclid=acl)
response = requests.get(url, **self._default_request_kwargs)
data = self._get_response_data(response)
return self._concrete_acl(data)
|
Get an ACL by ID belonging to the instance specified by name.
:param str instance: The name of the instance from which to fetch the ACL.
:param str acl: The ID of the ACL to fetch.
:returns: An :py:class:`Acl` object, or None if ACL does not exist.
:rtype: :py:class:`Acl`
|
def _make_pmap_field_type(key_type, value_type):
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
|
Create a subclass of CheckedPMap with the given key and value types.
|
def get_ipv4(hostname):
addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET,
socket.SOCK_STREAM)
return [addrinfo[x][4][0] for x in range(len(addrinfo))]
|
Get list of ipv4 addresses for hostname
|
def succ(cmd, check_stderr=True, stdout=None, stderr=None):
code, out, err = run(cmd)
if stdout is not None:
stdout[:] = out
if stderr is not None:
stderr[:] = err
if code != 0:
for l in out:
print(l)
assert code == 0, 'Return: {} {}\nStderr: {}'.format(code, cmd, err)
if check_stderr:
assert err == [], 'Error: {} {}'.format(err, code)
return code, out, err
|
Alias to run with check return code and stderr
|
def get_all_incomings(chebi_ids):
all_incomings = [get_incomings(chebi_id) for chebi_id in chebi_ids]
return [x for sublist in all_incomings for x in sublist]
|
Returns all incomings
|
def command_getkeys(self, command, *args, encoding='utf-8'):
return self.execute(b'COMMAND', b'GETKEYS', command, *args,
encoding=encoding)
|
Extract keys given a full Redis command.
|
def __json_strnum_to_bignum(json_object):
for key in ('id', 'week', 'in_reply_to_id', 'in_reply_to_account_id', 'logins', 'registrations', 'statuses'):
if (key in json_object and isinstance(json_object[key], six.text_type)):
try:
json_object[key] = int(json_object[key])
except ValueError:
pass
return json_object
|
Converts json string numerals to native python bignums.
|
def ConsultarCTGActivosPorPatente(self, patente="ZZZ999"):
"Consulta de CTGs activos por patente"
ret = self.client.consultarCTGActivosPorPatente(request=dict(
auth={
'token': self.Token, 'sign': self.Sign,
'cuitRepresentado': self.Cuit, },
patente=patente,
))['response']
self.__analizar_errores(ret)
datos = ret.get('arrayConsultarCTGActivosPorPatenteResponse')
if datos:
self.DatosCTG = datos
self.LeerDatosCTG(pop=False)
return True
else:
self.DatosCTG = []
return False
|
Consulta de CTGs activos por patente
|
def get_all_instances(sql, class_type, *args, **kwargs):
records = CoyoteDb.get_all_records(sql, *args, **kwargs)
instances = [CoyoteDb.get_object_from_dictionary_representation(
dictionary=record, class_type=class_type) for record in records]
for instance in instances:
instance._query = sql
return instances
|
Returns a list of instances of class_type populated with attributes from the DB record
@param sql: Sql statement to execute
@param class_type: The type of class to instantiate and populate with DB record
@return: Return a list of instances with attributes set to values from DB
|
def parse(cls, addr):
if addr.endswith('/'):
raise ValueError("Uris must not end in '/'")
parts = addr.split('/')
if ':' in parts[0]:
node, parts[0] = parts[0], ''
else:
node = None
ret = None
for step in parts:
ret = Uri(name=step, parent=ret, node=node)
node = None
return ret
|
Parses a new `Uri` instance from a string representation of a URI.
>>> u1 = Uri.parse('/foo/bar')
>>> u1.node, u1.steps, u1.path, u1.name
(None, ['', 'foo', 'bar'], '/foo/bar', 'bar')
>>> u2 = Uri.parse('somenode:123/foo/bar')
>>> u2.node, u1.steps, u2.path, ur2.name
('somenode:123', ['', 'foo', 'bar'], '/foo/bar', 'bar')
>>> u1 = Uri.parse('foo/bar')
>>> u1.node, u1.steps, u1.path, u1.name
(None, ['foo', 'bar'], 'foo/bar', 'bar')
|
def do_quit(self, _):
if (self.server.is_server_running() == 'yes' or
self.server.is_server_running() == 'maybe'):
user_input = raw_input("Quitting shell will shut down experiment "
"server. Really quit? y or n: ")
if user_input == 'y':
self.server_off()
else:
return False
return True
|
Override do_quit for network clean up.
|
def add_translation(sender):
signals.post_save.connect(_save_translations, sender=sender)
sender.add_to_class("get_fieldtranslations", _get_fieldtranslations)
sender.add_to_class("load_translations", _load_translations)
sender.add_to_class("set_translation_fields", _set_dict_translations)
sender.add_to_class("_", _get_translated_field)
sender.add_to_class("get_trans_attr", _get_translated_field)
sender.add_to_class("_t", _get_translated_field)
|
Adds the actions to a class.
|
def selectnotnone(table, field, complement=False):
return select(table, field, lambda v: v is not None,
complement=complement)
|
Select rows where the given field is not `None`.
|
def iterweekdays(self, d1, d2):
for dt in self.iterdays(d1, d2):
if not self.isweekend(dt):
yield dt
|
Date iterator returning dates in d1 <= x < d2, excluding weekends
|
def script_deployment(path, script, submap=None):
if submap is None:
submap = {}
script = substitute(script, submap)
return libcloud.compute.deployment.ScriptDeployment(script, path)
|
Return a ScriptDeployment from script with possible template
substitutions.
|
def crashlog_clean(name, timestamp, size, **kwargs):
ctx = Context(**kwargs)
ctx.execute_action('crashlog:clean', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'size': size,
'timestamp': timestamp,
})
|
For application NAME leave SIZE crashlogs or remove all crashlogs with timestamp > TIMESTAMP.
|
def right_model_factory(*, validator=validators.is_right_model,
ld_type='Right', **kwargs):
return _model_factory(validator=validator, ld_type=ld_type, **kwargs)
|
Generate a Right model.
Expects ``data``, ``validator``, ``model_cls``, ``ld_type``, and
``ld_context`` as keyword arguments.
|
def rebin(a, newshape):
slices = [slice(0, old, float(old)/new)
for old, new in zip(a.shape, newshape)]
coordinates = numpy.mgrid[slices]
indices = coordinates.astype('i')
return a[tuple(indices)]
|
Rebin an array to a new shape.
|
def render(self, text, add_header=False):
html = mark_text(text, self.aesthetics, self.rules)
html = html.replace('\n', '<br/>')
if add_header:
html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER])
return html
|
Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML.
|
def get_schema_spec(self, key):
member_node = self._ast_node.member.get(key, None)
if not member_node:
return schema.AnySchema()
s = framework.eval(member_node.member_schema, self.env(self))
if not isinstance(s, schema.Schema):
raise ValueError('Node %r with schema node %r should evaluate to Schema, got %r' % (member_node, member_node.member_schema, s))
return s
|
Return the evaluated schema expression from a subkey.
|
def items(self):
return [(section, dict(self.conf.items(section, raw=True))) for \
section in [section for section in self.conf.sections()]]
|
Settings as key-value pair.
|
def unlink_intermediate(self, sourceId, targetId):
source = self.database['items'][(self.database.get('name'), sourceId)]
target = self.database['items'][(self.database.get('name'), targetId)]
production_exchange = [x['input'] for x in source['exchanges'] if x['type'] == 'production'][0]
new_exchanges = [x for x in target['exchanges'] if x['input'] != production_exchange]
target['exchanges'] = new_exchanges
self.parameter_scan()
return True
|
Remove a link between two processes
|
def get_es_requirements(es_version):
es_version = es_version.replace('x', '0')
es_version = map(int, es_version.split('.'))
if es_version >= [6]:
return ">=6.0.0, <7.0.0"
elif es_version >= [5]:
return ">=5.0.0, <6.0.0"
elif es_version >= [2]:
return ">=2.0.0, <3.0.0"
elif es_version >= [1]:
return ">=1.0.0, <2.0.0"
else:
return "<1.0.0"
|
Get the requirements string for elasticsearch-py library
Returns a suitable requirements string for the elsaticsearch-py library
according to the elasticsearch version to be supported (es_version)
|
def mainswitch_state(frames):
reader = MessageReader(frames)
res = reader.string("command").bool("state").assert_end().get()
if res.command != "mainswitch.state":
raise MessageParserError("Command is not 'mainswitch.state'")
return (res.state,)
|
parse a mainswitch.state message
|
def parse_channel_name(cls, name, strict=True):
match = cls.MATCH.search(name)
if match is None or (strict and (
match.start() != 0 or match.end() != len(name))):
raise ValueError("Cannot parse channel name according to LIGO "
"channel-naming convention T990033")
return match.groupdict()
|
Decompose a channel name string into its components
Parameters
----------
name : `str`
name to parse
strict : `bool`, optional
require exact matching of format, with no surrounding text,
default `True`
Returns
-------
match : `dict`
`dict` of channel name components with the following keys:
- `'ifo'`: the letter-number interferometer prefix
- `'system'`: the top-level system name
- `'subsystem'`: the second-level sub-system name
- `'signal'`: the remaining underscore-delimited signal name
- `'trend'`: the trend type
- `'ndstype'`: the NDS2 channel suffix
Any optional keys that aren't found will return a value of `None`
Raises
------
ValueError
if the name cannot be parsed with at least an IFO and SYSTEM
Examples
--------
>>> Channel.parse_channel_name('L1:LSC-DARM_IN1_DQ')
{'ifo': 'L1',
'ndstype': None,
'signal': 'IN1_DQ',
'subsystem': 'DARM',
'system': 'LSC',
'trend': None}
>>> Channel.parse_channel_name(
'H1:ISI-BS_ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M.rms,m-trend')
{'ifo': 'H1',
'ndstype': 'm-trend',
'signal': 'ST1_SENSCOR_GND_STS_X_BLRMS_100M_300M',
'subsystem': 'BS',
'system': 'ISI',
'trend': 'rms'}
|
def ssh_cmd(self, name, ssh_command):
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
|
SSH into given container and executre command if given
|
def floor(x, context=None):
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_rint_floor,
(BigFloat._implicit_convert(x),),
context,
)
|
Return the next lower or equal integer to x.
If the result is not exactly representable, it will be rounded according to
the current context.
Note that it's possible for the result to be larger than ``x``. See the
documentation of the :func:`ceil` function for more information.
.. note::
This function corresponds to the MPFR function ``mpfr_rint_floor``,
not to ``mpfr_floor``.
|
def set_input(self):
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
|
Returns form input field of Field.
|
def get_color_scheme(name):
name = name.lower()
scheme = {}
for key in COLOR_SCHEME_KEYS:
try:
scheme[key] = CONF.get('appearance', name+'/'+key)
except:
scheme[key] = CONF.get('appearance', 'spyder/'+key)
return scheme
|
Get a color scheme from config using its name
|
def _pfp__set_watch(self, watch_fields, update_func, *func_call_info):
self._pfp__watch_fields = watch_fields
for watch_field in watch_fields:
watch_field._pfp__watch(self)
self._pfp__update_func = update_func
self._pfp__update_func_call_info = func_call_info
|
Subscribe to update events on each field in ``watch_fields``, using
``update_func`` to update self's value when ``watch_field``
changes
|
def BuildLegacySubject(subject_id, approval_type):
at = rdf_objects.ApprovalRequest.ApprovalType
if approval_type == at.APPROVAL_TYPE_CLIENT:
return "aff4:/%s" % subject_id
elif approval_type == at.APPROVAL_TYPE_HUNT:
return "aff4:/hunts/%s" % subject_id
elif approval_type == at.APPROVAL_TYPE_CRON_JOB:
return "aff4:/cron/%s" % subject_id
raise ValueError("Invalid approval type.")
|
Builds a legacy AFF4 urn string for a given subject and approval type.
|
def paste(self):
text = to_text_string(QApplication.clipboard().text())
if len(text.splitlines()) > 1:
if self.new_input_line:
self.on_new_line()
self.remove_selected_text()
end = self.get_current_line_from_cursor()
lines = self.get_current_line_to_cursor() + text + end
self.clear_line()
self.execute_lines(lines)
self.move_cursor(-len(end))
else:
ShellBaseWidget.paste(self)
|
Reimplemented slot to handle multiline paste action
|
def connectExec(connection, protocol, commandLine):
deferred = connectSession(connection, protocol)
@deferred.addCallback
def requestSubsystem(session):
return session.requestExec(commandLine)
return deferred
|
Connect a Protocol to a ssh exec session
|
def run_config(self, project, run=None, entity=None):
query = gql(
)
response = self.gql(query, variable_values={
'name': project, 'run': run, 'entity': entity
})
if response['model'] == None:
raise ValueError("Run {}/{}/{} not found".format(entity, project, run) )
run = response['model']['bucket']
commit = run['commit']
patch = run['patch']
config = json.loads(run['config'] or '{}')
if len(run['files']['edges']) > 0:
url = run['files']['edges'][0]['node']['url']
res = requests.get(url)
res.raise_for_status()
metadata = res.json()
else:
metadata = {}
return (commit, config, patch, metadata)
|
Get the relevant configs for a run
Args:
project (str): The project to download, (can include bucket)
run (str, optional): The run to download
entity (str, optional): The entity to scope this project to.
|
def update_beliefs(self, corpus_id):
corpus = self.get_corpus(corpus_id)
be = BeliefEngine(self.scorer)
stmts = list(corpus.statements.values())
be.set_prior_probs(stmts)
for uuid, correct in corpus.curations.items():
stmt = corpus.statements.get(uuid)
if stmt is None:
logger.warning('%s is not in the corpus.' % uuid)
continue
stmt.belief = correct
belief_dict = {st.uuid: st.belief for st in stmts}
return belief_dict
|
Return updated belief scores for a given corpus.
Parameters
----------
corpus_id : str
The ID of the corpus for which beliefs are to be updated.
Returns
-------
dict
A dictionary of belief scores with keys corresponding to Statement
UUIDs and values to new belief scores.
|
def wordrelationships(relationshiplist):
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
|
Generate a Word relationships file
|
def get(self, buffer_type, offset):
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset]
|
Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get
|
def _load_config_section(self, section_name):
if self._config.has_section(section_name):
section = dict(self._config.items(section_name))
elif self._config.has_section("Default"):
section = dict(self._config.items("Default"))
else:
raise KeyError((
"'{}' was not found in the configuration file and no default " +
"configuration was provided."
).format(section_name))
if "protocol" in section and "host" in section and "token" in section:
return section
else:
raise KeyError(
"Missing values in configuration data. " +
"Must contain: protocol, host, token"
)
|
Method to load the specific Service section from the config file if it
exists, or fall back to the default
Args:
section_name (str): The desired service section name
Returns:
(dict): the section parameters
|
def _has_fulltext(cls, uri):
coll = cls._get_collection(uri)
with ExceptionTrap(storage.pymongo.errors.OperationFailure) as trap:
coll.create_index([('message', 'text')], background=True)
return not trap
|
Enable full text search on the messages if possible and return True.
If the full text search cannot be enabled, then return False.
|
def _is_device_active(device):
cmd = ['dmsetup', 'info', device]
dmsetup_info = util.subp(cmd)
for dm_line in dmsetup_info.stdout.split("\n"):
line = dm_line.split(':')
if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()):
return True
return False
|
Checks dmsetup to see if a device is already active
|
def get_gaf_format(self):
sep = '\t'
return sep.join(
[self.gene, self.db_ref, self.term.id, self.evidence,
'|'.join(self.db_ref), '|'.join(self.with_)])
|
Return a GAF 2.0-compatible string representation of the annotation.
Parameters
----------
Returns
-------
str
The formatted string.
|
def patch_script_directory(graph):
temporary_dir = mkdtemp()
from_config_original = getattr(ScriptDirectory, "from_config")
run_env_original = getattr(ScriptDirectory, "run_env")
with open(join(temporary_dir, "script.py.mako"), "w") as file_:
file_.write(make_script_py_mako())
file_.flush()
setattr(ScriptDirectory, "from_config", classmethod(make_script_directory))
setattr(ScriptDirectory, "run_env", run_online_migration)
setattr(ScriptDirectory, "graph", graph)
try:
yield temporary_dir
finally:
delattr(ScriptDirectory, "graph")
setattr(ScriptDirectory, "run_env", run_env_original)
setattr(ScriptDirectory, "from_config", from_config_original)
rmtree(temporary_dir)
|
Monkey patch the `ScriptDirectory` class, working around configuration assumptions.
Changes include:
- Using a generated, temporary directory (with a generated, temporary `script.py.mako`)
instead of the assumed script directory.
- Using our `make_script_directory` function instead of the default `ScriptDirectory.from_config`.
- Using our `run_online_migration` function instead of the default `ScriptDirectory.run_env`.
- Injecting the current object graph.
|
def _weight_init(self, m, n, name):
x = np.sqrt(6.0/(m+n))
with tf.name_scope(name) as scope:
return tf.Variable(
tf.random_uniform(
[m, n], minval=-x, maxval=x), name=name)
|
Uses the Xavier Glorot method for initializing weights. This is
built in to TensorFlow as `tf.contrib.layers.xavier_initializer`,
but it's nice to see all the details.
|
def _consume_blanklines(self):
empty_size = 0
first_line = True
while True:
line = self.reader.readline()
if len(line) == 0:
return None, empty_size
stripped = line.rstrip()
if len(stripped) == 0 or first_line:
empty_size += len(line)
if len(stripped) != 0:
err_offset = self.fh.tell() - self.reader.rem_length() - empty_size
sys.stderr.write(self.INC_RECORD.format(err_offset, line))
self.err_count += 1
first_line = False
continue
return line, empty_size
|
Consume blank lines that are between records
- For warcs, there are usually 2
- For arcs, may be 1 or 0
- For block gzipped files, these are at end of each gzip envelope
and are included in record length which is the full gzip envelope
- For uncompressed, they are between records and so are NOT part of
the record length
count empty_size so that it can be substracted from
the record length for uncompressed
if first line read is not blank, likely error in WARC/ARC,
display a warning
|
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList'
) -> 'QubitOrder':
if isinstance(val, collections.Iterable):
return QubitOrder.explicit(val)
if isinstance(val, QubitOrder):
return val
raise ValueError(
"Don't know how to interpret <{}> as a Basis.".format(val))
|
Converts a value into a basis.
Args:
val: An iterable or a basis.
Returns:
The basis implied by the value.
|
def get_intercepted(target):
function = _get_function(target)
intercepted = getattr(function, _INTERCEPTED, None)
ctx = getattr(function, _INTERCEPTED_CTX, None)
return intercepted, ctx
|
Get intercepted function and ctx from input target.
:param target: target from where getting the intercepted function and ctx.
:return: target intercepted function and ctx.
(None, None) if no intercepted function exist.
(fn, None) if not ctx exists.
:rtype: tuple
|
def send_request(self, request):
self.aggregate.wait_for_host(self.urlparts[1])
kwargs = self.get_request_kwargs()
kwargs["allow_redirects"] = False
self._send_request(request, **kwargs)
|
Send request and store response in self.url_connection.
|
def get_page_dpi(pageinfo, options):
"Get the DPI when nonsquare DPI is tolerable"
xres = max(
pageinfo.xres or VECTOR_PAGE_DPI,
options.oversample or 0,
VECTOR_PAGE_DPI if pageinfo.has_vector else 0,
)
yres = max(
pageinfo.yres or VECTOR_PAGE_DPI,
options.oversample or 0,
VECTOR_PAGE_DPI if pageinfo.has_vector else 0,
)
return (float(xres), float(yres))
|
Get the DPI when nonsquare DPI is tolerable
|
def get_annotated_list(cls, parent=None, max_depth=None):
result, info = [], {}
start_depth, prev_depth = (None, None)
qs = cls.get_tree(parent)
if max_depth:
qs = qs.filter(depth__lte=max_depth)
return cls.get_annotated_list_qs(qs)
|
Gets an annotated list from a tree branch.
:param parent:
The node whose descendants will be annotated. The node itself
will be included in the list. If not given, the entire tree
will be annotated.
:param max_depth:
Optionally limit to specified depth
|
def _raise_or_append_exception(self):
message = (
'Connection dead, no heartbeat or data received in >= '
'%ds' % (
self._interval * 2
)
)
why = AMQPConnectionError(message)
if self._exceptions is None:
raise why
self._exceptions.append(why)
|
The connection is presumably dead and we need to raise or
append an exception.
If we have a list for exceptions, append the exception and let
the connection handle it, if not raise the exception here.
:return:
|
def inspect(logdir='', event_file='', tag=''):
print(PRINT_SEPARATOR +
'Processing event files... (this can take a few minutes)\n' +
PRINT_SEPARATOR)
inspection_units = get_inspection_units(logdir, event_file, tag)
for unit in inspection_units:
if tag:
print('Event statistics for tag {} in {}:'.format(tag, unit.name))
else:
print('These tags are in {}:'.format(unit.name))
print_dict(get_unique_tags(unit.field_to_obs))
print(PRINT_SEPARATOR)
print('Event statistics for {}:'.format(unit.name))
print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag))
print(PRINT_SEPARATOR)
|
Main function for inspector that prints out a digest of event files.
Args:
logdir: A log directory that contains event files.
event_file: Or, a particular event file path.
tag: An optional tag name to query for.
Raises:
ValueError: If neither logdir and event_file are given, or both are given.
|
def add_view(self, request, **kwargs):
if self.model is Page:
return HttpResponseRedirect(self.get_content_models()[0].add_url)
return super(PageAdmin, self).add_view(request, **kwargs)
|
For the ``Page`` model, redirect to the add view for the
first page model, based on the ``ADD_PAGE_ORDER`` setting.
|
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
|
Exponential weighted moving average.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
|
def reduce_opacity(img, opacity):
assert opacity >= 0 and opacity <= 1
if img.mode != 'RGBA':
img = img.convert('RGBA')
else:
img = img.copy()
alpha = img.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
img.putalpha(alpha)
return img
|
Returns an image with reduced opacity.
|
def voice(self):
if self._voice is None:
from twilio.rest.voice import Voice
self._voice = Voice(self)
return self._voice
|
Access the Voice Twilio Domain
:returns: Voice Twilio Domain
:rtype: twilio.rest.voice.Voice
|
def setup(app):
sphinx_compatibility._app = app
app.add_config_value('sphinx_gallery_conf', DEFAULT_GALLERY_CONF, 'html')
for key in ['plot_gallery', 'abort_on_example_error']:
app.add_config_value(key, get_default_config_value(key), 'html')
try:
app.add_css_file('gallery.css')
except AttributeError:
app.add_stylesheet('gallery.css')
extensions_attr = '_extensions' if hasattr(
app, '_extensions') else 'extensions'
if 'sphinx.ext.autodoc' in getattr(app, extensions_attr):
app.connect('autodoc-process-docstring', touch_empty_backreferences)
app.connect('builder-inited', generate_gallery_rst)
app.connect('build-finished', copy_binder_files)
app.connect('build-finished', summarize_failing_examples)
app.connect('build-finished', embed_code_links)
metadata = {'parallel_read_safe': True,
'parallel_write_safe': False,
'version': _sg_version}
return metadata
|
Setup sphinx-gallery sphinx extension
|
def _on_mouse_released(self, event):
if event.button() == 1 and self._deco:
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if cursor and cursor.selectedText():
self._timer.request_job(
self.word_clicked.emit, cursor)
|
mouse pressed callback
|
def register_arguments(self, parser):
parser.add_argument('x', type=int, help='the first value')
parser.add_argument('y', type=int, help='the second value')
|
Guacamole method used by the argparse ingredient.
:param parser:
Argument parser (from :mod:`argparse`) specific to this command.
|
def add_output_list_opt(self, opt, outputs):
self.add_opt(opt)
for out in outputs:
self.add_opt(out)
self._add_output(out)
|
Add an option that determines a list of outputs
|
def is_valid_article_slug(article, language, slug):
from ..models import Title
qs = Title.objects.filter(slug=slug, language=language)
if article.pk:
qs = qs.exclude(Q(language=language) & Q(article=article))
qs = qs.exclude(article__publisher_public=article)
if qs.count():
return False
return True
|
Validates given slug depending on settings.
|
def add_row(self):
tbl = self._tbl
tr = tbl.add_tr()
for gridCol in tbl.tblGrid.gridCol_lst:
tc = tr.add_tc()
tc.width = gridCol.w
return _Row(tr, self)
|
Return a |_Row| instance, newly added bottom-most to the table.
|
def run(self):
config = self.state.document.settings.env.config
processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url)
process_nodes = []
for process in sorted(processes, key=itemgetter('name')):
process_nodes.extend(self.make_process_node(process))
return process_nodes
|
Create a list of process definitions.
|
def create_subscriptions(config, profile_name):
if 'kinesis' in config.subscription.keys():
data = config.subscription['kinesis']
function_name = config.name
stream_name = data['stream']
batch_size = data['batch_size']
starting_position = data['starting_position']
starting_position_ts = None
if starting_position == 'AT_TIMESTAMP':
ts = data.get('starting_position_timestamp')
starting_position_ts = datetime.strptime(ts, '%Y-%m-%dT%H:%M:%SZ')
s = KinesisSubscriber(config, profile_name,
function_name, stream_name, batch_size,
starting_position,
starting_position_ts=starting_position_ts)
s.subscribe()
|
Adds supported subscriptions
|
def convert_frame(frame, body_encoding=None):
lines = []
body = None
if frame.body:
if body_encoding:
body = encode(frame.body, body_encoding)
else:
body = encode(frame.body)
if HDR_CONTENT_LENGTH in frame.headers:
frame.headers[HDR_CONTENT_LENGTH] = len(body)
if frame.cmd:
lines.append(encode(frame.cmd))
lines.append(ENC_NEWLINE)
for key, vals in sorted(frame.headers.items()):
if vals is None:
continue
if type(vals) != tuple:
vals = (vals,)
for val in vals:
lines.append(encode("%s:%s\n" % (key, val)))
lines.append(ENC_NEWLINE)
if body:
lines.append(body)
if frame.cmd:
lines.append(ENC_NULL)
return lines
|
Convert a frame to a list of lines separated by newlines.
:param Frame frame: the Frame object to convert
:rtype: list(str)
|
def reset(self):
self.stepid = 0
for task, agent in zip(self.tasks, self.agents):
task.reset()
agent.module.reset()
agent.history.reset()
|
Sets initial conditions for the experiment.
|
def _parse_device_path(self, device_path, char_path_override=None):
try:
device_type = device_path.rsplit('-', 1)[1]
except IndexError:
warn("The following device path was skipped as it could "
"not be parsed: %s" % device_path, RuntimeWarning)
return
realpath = os.path.realpath(device_path)
if realpath in self._raw:
return
self._raw.append(realpath)
if device_type == 'kbd':
self.keyboards.append(Keyboard(self, device_path,
char_path_override))
elif device_type == 'mouse':
self.mice.append(Mouse(self, device_path,
char_path_override))
elif device_type == 'joystick':
self.gamepads.append(GamePad(self,
device_path,
char_path_override))
else:
self.other_devices.append(OtherDevice(self,
device_path,
char_path_override))
|
Parse each device and add to the approriate list.
|
def _initialize(self, show_bounds, reset_camera, outline):
self.plotter.subplot(*self.loc)
if outline is None:
self.plotter.add_mesh(self.input_dataset.outline_corners(),
reset_camera=False, color=vtki.rcParams['outline_color'],
loc=self.loc)
elif outline:
self.plotter.add_mesh(self.input_dataset.outline(),
reset_camera=False, color=vtki.rcParams['outline_color'],
loc=self.loc)
if show_bounds:
self.plotter.show_bounds(reset_camera=False, loc=loc)
if reset_camera:
cpos = self.plotter.get_default_cam_pos()
self.plotter.camera_position = cpos
self.plotter.reset_camera()
self.plotter.camera_set = False
|
Outlines the input dataset and sets up the scene
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.