code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def autozoom(self, points):
'''Fit the current view to the correct zoom level to display
all *points*.
The camera viewing direction and rotation pivot match the
geometric center of the points and the distance from that
point is calculated in order for all points to be in the field
of view. This is currently used to provide optimal
visualization for molecules and systems
**Parameters**
points: np.ndarray((N, 3))
Array of points.
'''
points = np.asarray(points)
extraoff = 0.01
# Project points on the plane defined by camera up and right
# vector. This is achieved by using dot product on camera a
# and b vectors
abc = np.array([self.a, self.b, self.c])
old_geom_center = points.sum(axis=0)/len(points)
# Translate points
points = points.copy() + self.position
# Translate position to geometric_center along directions
# a and b
geom_center = points.sum(axis=0)/len(points)
self.position += self.a * np.dot(geom_center, self.a)
self.position += self.b * np.dot(geom_center, self.b)
# Translate pivot to the geometric center
self.pivot = old_geom_center
# Get the bounding sphere radius by searching for the most
# distant point
bound_radius = np.sqrt(((points-geom_center) * (points-geom_center)).sum(axis=1).max())
# Calculate the distance in order to have the most distant
# point in our field of view (top/bottom)
fov_topbottom = self.fov*np.pi/180.0
dist = (bound_radius + self.z_near)/np.tan(fov_topbottom * 0.5)
# Set the c-component of the position at the calculated distance
# 1) translate the position on the pivot
self.position = self.pivot.copy()
# 2) add the distance plus a little extra room
self.position -= self.c * (dist*(1 + extraoff)) | Fit the current view to the correct zoom level to display
all *points*.
The camera viewing direction and rotation pivot match the
geometric center of the points and the distance from that
point is calculated in order for all points to be in the field
of view. This is currently used to provide optimal
visualization for molecules and systems
**Parameters**
points: np.ndarray((N, 3))
Array of points. |
def prebinned_hist(counts, binlims, ax=None, *args, **kwargs):
"""Plot a histogram with counts, binlims already given.
Example
=======
>>> gaus = np.random.normal(size=100)
>>> counts, binlims = np.histogram(gaus, bins='auto')
>>> prebinned_hist(countsl binlims)
"""
ax = get_ax(ax)
x = bincenters(binlims)
weights = counts
return ax.hist(x, bins=binlims, weights=weights, *args, **kwargs) | Plot a histogram with counts, binlims already given.
Example
=======
>>> gaus = np.random.normal(size=100)
>>> counts, binlims = np.histogram(gaus, bins='auto')
>>> prebinned_hist(countsl binlims) |
def entails(self, other):
""" Inverse is_entailed_by """
other = BoolCell.coerce(other)
return other.is_entailed_by(self) | Inverse is_entailed_by |
def irregular_sampling(T, N, rseed=None):
"""
Generates an irregularly sampled time vector by perturbating a
linearly spaced vector and latter deleting a certain number of
points
Parameters
----------
T: float
Time span of the vector, i.e. how long it is in time
N: positive integer
Number of samples of the resulting time vector
rseed:
Random seed to feed the random number generator
Returns
-------
t_irr: ndarray
An irregulary sampled time vector
"""
sampling_period = (T/float(N))
N = int(N)
np.random.seed(rseed)
t = np.linspace(0, T, num=5*N)
# First we add jitter
t[1:-1] += sampling_period*0.5*np.random.randn(5*N-2)
# Then we do a random permutation and keep only N points
P = np.random.permutation(5*N)
t_irr = np.sort(t[P[:N]])
return t_irr | Generates an irregularly sampled time vector by perturbating a
linearly spaced vector and latter deleting a certain number of
points
Parameters
----------
T: float
Time span of the vector, i.e. how long it is in time
N: positive integer
Number of samples of the resulting time vector
rseed:
Random seed to feed the random number generator
Returns
-------
t_irr: ndarray
An irregulary sampled time vector |
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self | Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features) |
def _fingerprint_target_specs(self, specs):
"""Returns a fingerprint of the targets resolved from given target specs."""
assert self._build_graph is not None, (
'cannot fingerprint specs `{}` without a `BuildGraph`'.format(specs)
)
hasher = sha1()
for spec in sorted(specs):
for target in sorted(self._build_graph.resolve(spec)):
# Not all targets have hashes; in particular, `Dependencies` targets don't.
h = target.compute_invalidation_hash()
if h:
hasher.update(h.encode('utf-8'))
return hasher.hexdigest() | Returns a fingerprint of the targets resolved from given target specs. |
def parse_option(self, option, block_name, *values):
""" Parse status, end_status, timer_status and status_msg options.
"""
if option.endswith('status'):
status = values[0]
if status not in self.VALID_STATUSES:
raise ValueError(u'Invalid IM status "{0}"'.format(status))
if len(values) > 2:
raise TypeError
if option == 'status':
option = 'start_' + option
key = option.split('_', 1)[0]
self.statuses[key] = values[:2]
elif option == 'status_msg':
if len(values) != 2:
raise TypeError
name, msg = values
self.messages[name] = msg | Parse status, end_status, timer_status and status_msg options. |
def from_config(cls, cp, model, nprocesses=1, use_mpi=False):
"""Loads the sampler from the given config file."""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of walkers to use
nwalkers = int(cp.get(section, "nwalkers"))
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
# get the logpost function
lnpost = get_optional_arg_from_config(cp, section, 'logpost-function')
obj = cls(model, nwalkers,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
logpost_function=lnpost, nprocesses=nprocesses,
use_mpi=use_mpi)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
return obj | Loads the sampler from the given config file. |
def get_cgi_parameter_str_or_none(form: cgi.FieldStorage,
key: str) -> Optional[str]:
"""
Extracts a string parameter from a CGI form, or ``None`` if the key doesn't
exist or the string is zero-length.
"""
s = get_cgi_parameter_str(form, key)
if s is None or len(s) == 0:
return None
return s | Extracts a string parameter from a CGI form, or ``None`` if the key doesn't
exist or the string is zero-length. |
def sort(self, column, order=Qt.AscendingOrder):
"""Overriding sort method."""
ascending = order == Qt.AscendingOrder
self.model.sort(self.COLUMN_INDEX, order=ascending)
return True | Overriding sort method. |
def push(self, line):
"""Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
if transforms.FROM_EXPERIMENTAL.match(line):
transforms.add_transformers(line)
self.buffer.append("\n")
else:
self.buffer.append(line)
add_pass = False
if line.rstrip(' ').endswith(":"):
add_pass = True
source = "\n".join(self.buffer)
if add_pass:
source += "pass"
source = transforms.transform(source)
if add_pass:
source = source.rstrip(' ')
if source.endswith("pass"):
source = source[:-4]
# some transformations may strip an empty line meant to end a block
if not self.buffer[-1]:
source += "\n"
try:
more = self.runsource(source, self.filename)
except SystemExit:
os._exit(1)
if not more:
self.resetbuffer()
return more | Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()). |
def _FormatMessage(self, event):
"""Formats the message.
Args:
event (EventObject): event.
Returns:
str: message field.
Raises:
NoFormatterFound: if no event formatter can be found to match the data
type in the event.
"""
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
return message | Formats the message.
Args:
event (EventObject): event.
Returns:
str: message field.
Raises:
NoFormatterFound: if no event formatter can be found to match the data
type in the event. |
def getSingle(self, type_uri, default=None):
"""Get a single value for an attribute. If no value was sent
for this attribute, use the supplied default. If there is more
than one value for this attribute, this method will fail.
@type type_uri: str
@param type_uri: The URI for the attribute
@param default: The value to return if the attribute was not
sent in the fetch_response.
@returns: The value of the attribute in the fetch_response
message, or the default supplied
@rtype: unicode or NoneType
@raises ValueError: If there is more than one value for this
parameter in the fetch_response message.
@raises KeyError: If the attribute was not sent in this response
"""
values = self.data.get(type_uri)
if not values:
return default
elif len(values) == 1:
return values[0]
else:
raise AXError(
'More than one value present for %r' % (type_uri,)) | Get a single value for an attribute. If no value was sent
for this attribute, use the supplied default. If there is more
than one value for this attribute, this method will fail.
@type type_uri: str
@param type_uri: The URI for the attribute
@param default: The value to return if the attribute was not
sent in the fetch_response.
@returns: The value of the attribute in the fetch_response
message, or the default supplied
@rtype: unicode or NoneType
@raises ValueError: If there is more than one value for this
parameter in the fetch_response message.
@raises KeyError: If the attribute was not sent in this response |
async def eat(self, philosopher):
'''The ``philosopher`` performs one of these two actions:
* eat, if he has both forks and then :meth:`release_forks`.
* try to :meth:`pickup_fork`, if he has fewer than 2 forks.
'''
loop = philosopher._loop
while True:
forks = self.forks
if forks:
#
# Two forks. Eat!
if len(forks) == 2:
self.thinking = 0
self.eaten += 1
philosopher.logger.info("eating... So far %s times",
self.eaten)
eat_time = 2*self.cfg.eating_period*random.random()
await sleep(eat_time)
await self.release_forks(philosopher)
#
# One fork only! release fork or try to pick one up
elif len(forks) == 1:
waiting_period = 2*self.cfg.waiting_period*random.random()
if self.started_waiting == 0:
self.started_waiting = loop.time()
elif loop.time() - self.started_waiting > waiting_period:
philosopher.logger.debug("tired of waiting")
await self.release_forks(philosopher)
#
# this should never happen
elif len(forks) > 2: # pragma nocover
philosopher.logger.critical('more than 2 forks!!!')
await self.release_forks(philosopher)
else:
if not self.thinking:
philosopher.logger.warning('thinking...')
self.thinking += 1
await self.pickup_fork(philosopher) | The ``philosopher`` performs one of these two actions:
* eat, if he has both forks and then :meth:`release_forks`.
* try to :meth:`pickup_fork`, if he has fewer than 2 forks. |
def _hm_read_address(self):
"""Reads from the DCB and maps to yaml config file."""
response = self._hm_send_address(self.address, 0, 0, 0)
lookup = self.config['keys']
offset = self.config['offset']
keydata = {}
for i in lookup:
try:
kdata = lookup[i]
ddata = response[i + offset]
keydata[i] = {
'label': kdata,
'value': ddata
}
except IndexError:
logging.info("Finished processing at %d", i)
return keydata | Reads from the DCB and maps to yaml config file. |
def internal_name(self):
"""
Return the unique internal name
"""
unq = 'f_' + super().internal_name()
if self.tparams is not None:
unq += "_" + "_".join(self.tparams)
if self.tret is not None:
unq += "_" + self.tret
return unq | Return the unique internal name |
def stream(self, date_created_from=values.unset, date_created_to=values.unset,
limit=None, page_size=None):
"""
Streams ExecutionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time.
:param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
date_created_from=date_created_from,
date_created_to=date_created_to,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | Streams ExecutionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param datetime date_created_from: Only show Executions that started on or after this ISO8601 date-time.
:param datetime date_created_to: Only show Executions that started before this this ISO8601 date-time.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.studio.v1.flow.execution.ExecutionInstance] |
async def _send_loop(self):
"""
This loop is responsible for popping items off the send
queue, encrypting them, and sending them over the network.
Besides `connect`, only this method ever sends data.
"""
while self._user_connected and not self._reconnecting:
if self._pending_ack:
ack = RequestState(MsgsAck(list(self._pending_ack)), self._loop)
self._send_queue.append(ack)
self._last_acks.append(ack)
self._pending_ack.clear()
self._log.debug('Waiting for messages to send...')
# TODO Wait for the connection send queue to be empty?
# This means that while it's not empty we can wait for
# more messages to be added to the send queue.
batch, data = await self._send_queue.get()
if not data:
continue
self._log.debug('Encrypting %d message(s) in %d bytes for sending',
len(batch), len(data))
data = self._state.encrypt_message_data(data)
try:
await self._connection.send(data)
except IOError as e:
self._log.info('Connection closed while sending data')
self._start_reconnect(e)
return
for state in batch:
if not isinstance(state, list):
if isinstance(state.request, TLRequest):
self._pending_state[state.msg_id] = state
else:
for s in state:
if isinstance(s.request, TLRequest):
self._pending_state[s.msg_id] = s
self._log.debug('Encrypted messages put in a queue to be sent') | This loop is responsible for popping items off the send
queue, encrypting them, and sending them over the network.
Besides `connect`, only this method ever sends data. |
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
# Disable "apply_local_fixes()" for now due to issue #175.
fixed_source = tmp_source
else:
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source,
options,
filename=filename)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline)) | Return fixed source code. |
def __gen_token_anno_file(self, top_level_layer):
"""
creates an etree representation of a <multiFeat> file that describes
all the annotations that only span one token (e.g. POS, lemma etc.).
Note: discoursegraphs will create one token annotation file for each
top level layer (e.g. conano, tiger etc.).
"""
base_paula_id = '{0}.{1}.tok'.format(self.corpus_name, self.name)
paula_id = '{0}.{1}.{2}.tok_multiFeat'.format(top_level_layer,
self.corpus_name,
self.name)
E, tree = gen_paula_etree(paula_id)
mflist = E('multiFeatList',
{XMLBASE: base_paula_id+'.xml'})
for token_id in self.dg.tokens:
mfeat = E('multiFeat',
{XLINKHREF: '#{0}'.format(token_id)})
token_dict = self.dg.node[token_id]
for feature in token_dict:
# TODO: highly inefficient! refactor!1!!
if feature not in IGNORED_TOKEN_ATTRIBS \
and feature.startswith(top_level_layer):
mfeat.append(E('feat',
{'name': feature,
'value': token_dict[feature]}))
if self.human_readable: # adds token string as a <!-- comment -->
mfeat.append(Comment(token_dict[self.dg.ns+':token']))
mflist.append(mfeat)
tree.append(mflist)
self.files[paula_id] = tree
self.file2dtd[paula_id] = PaulaDTDs.multifeat
return paula_id | creates an etree representation of a <multiFeat> file that describes
all the annotations that only span one token (e.g. POS, lemma etc.).
Note: discoursegraphs will create one token annotation file for each
top level layer (e.g. conano, tiger etc.). |
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False, validating=True):
"""Return UIDs of the selected services
"""
service_uids = form.get("uids", [])
return service_uids, {} | Return UIDs of the selected services |
def _extra_trust_root_validation(self):
"""
Manually invoked windows certificate chain builder and verification
step when there are extra trust roots to include in the search process
"""
store = None
cert_chain_context_pointer = None
try:
# We set up an in-memory store to pass as an extra store to grab
# certificates from when performing the verification
store = crypt32.CertOpenStore(
Crypt32Const.CERT_STORE_PROV_MEMORY,
Crypt32Const.X509_ASN_ENCODING,
null(),
0,
null()
)
if is_null(store):
handle_crypt32_error(0)
cert_hashes = set()
for cert in self._session._extra_trust_roots:
cert_data = cert.dump()
result = crypt32.CertAddEncodedCertificateToStore(
store,
Crypt32Const.X509_ASN_ENCODING,
cert_data,
len(cert_data),
Crypt32Const.CERT_STORE_ADD_USE_EXISTING,
null()
)
if not result:
handle_crypt32_error(0)
cert_hashes.add(cert.sha256)
cert_context_pointer_pointer = new(crypt32, 'PCERT_CONTEXT *')
result = secur32.QueryContextAttributesW(
self._context_handle_pointer,
Secur32Const.SECPKG_ATTR_REMOTE_CERT_CONTEXT,
cert_context_pointer_pointer
)
handle_error(result)
cert_context_pointer = unwrap(cert_context_pointer_pointer)
cert_context_pointer = cast(crypt32, 'PCERT_CONTEXT', cert_context_pointer)
# We have to do a funky shuffle here because FILETIME from kernel32
# is different than FILETIME from crypt32 when using cffi. If we
# overwrite the "now_pointer" variable, cffi releases the backing
# memory and we end up getting a validation error about certificate
# expiration time.
orig_now_pointer = new(kernel32, 'FILETIME *')
kernel32.GetSystemTimeAsFileTime(orig_now_pointer)
now_pointer = cast(crypt32, 'FILETIME *', orig_now_pointer)
usage_identifiers = new(crypt32, 'char *[3]')
usage_identifiers[0] = cast(crypt32, 'char *', Crypt32Const.PKIX_KP_SERVER_AUTH)
usage_identifiers[1] = cast(crypt32, 'char *', Crypt32Const.SERVER_GATED_CRYPTO)
usage_identifiers[2] = cast(crypt32, 'char *', Crypt32Const.SGC_NETSCAPE)
cert_enhkey_usage_pointer = struct(crypt32, 'CERT_ENHKEY_USAGE')
cert_enhkey_usage = unwrap(cert_enhkey_usage_pointer)
cert_enhkey_usage.cUsageIdentifier = 3
cert_enhkey_usage.rgpszUsageIdentifier = cast(crypt32, 'char **', usage_identifiers)
cert_usage_match_pointer = struct(crypt32, 'CERT_USAGE_MATCH')
cert_usage_match = unwrap(cert_usage_match_pointer)
cert_usage_match.dwType = Crypt32Const.USAGE_MATCH_TYPE_OR
cert_usage_match.Usage = cert_enhkey_usage
cert_chain_para_pointer = struct(crypt32, 'CERT_CHAIN_PARA')
cert_chain_para = unwrap(cert_chain_para_pointer)
cert_chain_para.RequestedUsage = cert_usage_match
cert_chain_para_size = sizeof(crypt32, cert_chain_para)
cert_chain_para.cbSize = cert_chain_para_size
cert_chain_context_pointer_pointer = new(crypt32, 'PCERT_CHAIN_CONTEXT *')
result = crypt32.CertGetCertificateChain(
null(),
cert_context_pointer,
now_pointer,
store,
cert_chain_para_pointer,
Crypt32Const.CERT_CHAIN_CACHE_END_CERT | Crypt32Const.CERT_CHAIN_REVOCATION_CHECK_CACHE_ONLY,
null(),
cert_chain_context_pointer_pointer
)
handle_crypt32_error(result)
cert_chain_policy_para_flags = Crypt32Const.CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS
cert_chain_context_pointer = unwrap(cert_chain_context_pointer_pointer)
# Unwrap the chain and if the final element in the chain is one of
# extra trust roots, set flags so that we trust the certificate even
# though it is not in the Trusted Roots store
cert_chain_context = unwrap(cert_chain_context_pointer)
num_chains = native(int, cert_chain_context.cChain)
if num_chains == 1:
first_simple_chain_pointer = unwrap(cert_chain_context.rgpChain)
first_simple_chain = unwrap(first_simple_chain_pointer)
num_elements = native(int, first_simple_chain.cElement)
last_element_pointer = first_simple_chain.rgpElement[num_elements - 1]
last_element = unwrap(last_element_pointer)
last_element_cert = unwrap(last_element.pCertContext)
last_element_cert_data = bytes_from_buffer(
last_element_cert.pbCertEncoded,
native(int, last_element_cert.cbCertEncoded)
)
last_cert = x509.Certificate.load(last_element_cert_data)
if last_cert.sha256 in cert_hashes:
cert_chain_policy_para_flags |= Crypt32Const.CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG
ssl_extra_cert_chain_policy_para_pointer = struct(crypt32, 'SSL_EXTRA_CERT_CHAIN_POLICY_PARA')
ssl_extra_cert_chain_policy_para = unwrap(ssl_extra_cert_chain_policy_para_pointer)
ssl_extra_cert_chain_policy_para.cbSize = sizeof(crypt32, ssl_extra_cert_chain_policy_para)
ssl_extra_cert_chain_policy_para.dwAuthType = Crypt32Const.AUTHTYPE_SERVER
ssl_extra_cert_chain_policy_para.fdwChecks = 0
ssl_extra_cert_chain_policy_para.pwszServerName = cast(
crypt32,
'wchar_t *',
buffer_from_unicode(self._hostname)
)
cert_chain_policy_para_pointer = struct(crypt32, 'CERT_CHAIN_POLICY_PARA')
cert_chain_policy_para = unwrap(cert_chain_policy_para_pointer)
cert_chain_policy_para.cbSize = sizeof(crypt32, cert_chain_policy_para)
cert_chain_policy_para.dwFlags = cert_chain_policy_para_flags
cert_chain_policy_para.pvExtraPolicyPara = cast(crypt32, 'void *', ssl_extra_cert_chain_policy_para_pointer)
cert_chain_policy_status_pointer = struct(crypt32, 'CERT_CHAIN_POLICY_STATUS')
cert_chain_policy_status = unwrap(cert_chain_policy_status_pointer)
cert_chain_policy_status.cbSize = sizeof(crypt32, cert_chain_policy_status)
result = crypt32.CertVerifyCertificateChainPolicy(
Crypt32Const.CERT_CHAIN_POLICY_SSL,
cert_chain_context_pointer,
cert_chain_policy_para_pointer,
cert_chain_policy_status_pointer
)
handle_crypt32_error(result)
cert_context = unwrap(cert_context_pointer)
cert_data = bytes_from_buffer(cert_context.pbCertEncoded, native(int, cert_context.cbCertEncoded))
cert = x509.Certificate.load(cert_data)
error = cert_chain_policy_status.dwError
if error:
if error == Crypt32Const.CERT_E_EXPIRED:
raise_expired_not_yet_valid(cert)
if error == Crypt32Const.CERT_E_UNTRUSTEDROOT:
oscrypto_cert = load_certificate(cert)
if oscrypto_cert.self_signed:
raise_self_signed(cert)
else:
raise_no_issuer(cert)
if error == Crypt32Const.CERT_E_CN_NO_MATCH:
raise_hostname(cert, self._hostname)
if error == Crypt32Const.TRUST_E_CERT_SIGNATURE:
raise_weak_signature(cert)
if error == Crypt32Const.CRYPT_E_REVOKED:
raise_revoked(cert)
raise_verification(cert)
if cert.hash_algo in set(['md5', 'md2']):
raise_weak_signature(cert)
finally:
if store:
crypt32.CertCloseStore(store, 0)
if cert_chain_context_pointer:
crypt32.CertFreeCertificateChain(cert_chain_context_pointer) | Manually invoked windows certificate chain builder and verification
step when there are extra trust roots to include in the search process |
def _get_init_args(self):
"""Creates dict with properties marked as readonly"""
args = {}
for rop in self.ro_properties:
if rop in self.properties:
args[rop] = self.properties[rop]
return args | Creates dict with properties marked as readonly |
def process_flagged_blocks(self, content: str) -> str:
'''Replace flagged blocks either with their contents or nothing, depending on the value
of ``FOLIANT_FLAGS`` environment variable and ``flags`` config value.
:param content: Markdown content
:returns: Markdown content without flagged blocks
'''
def _sub(flagged_block):
options = self.get_options(flagged_block.group('options'))
required_flags = {
flag.lower()
for flag in re.split(self._flag_delimiters, options.get('flags', ''))
if flag
} | {
f'target:{target.lower()}'
for target in re.split(self._flag_delimiters, options.get('targets', ''))
if target
} | {
f'backend:{backend.lower()}'
for backend in re.split(self._flag_delimiters, options.get('backends', ''))
if backend
}
env_flags = {
flag.lower()
for flag in re.split(self._flag_delimiters, getenv(self._flags_envvar, ''))
if flag
}
config_flags = {flag.lower() for flag in self.options['flags']}
set_flags = env_flags \
| config_flags \
| {f'target:{self.context["target"]}', f'backend:{self.context["backend"]}'}
kind = options.get('kind', 'all')
if (kind == 'all' and required_flags <= set_flags) \
or (kind == 'any' and required_flags & set_flags) \
or (kind == 'none' and not required_flags & set_flags):
return flagged_block.group('body').strip()
else:
return ''
return self.pattern.sub(_sub, content) | Replace flagged blocks either with their contents or nothing, depending on the value
of ``FOLIANT_FLAGS`` environment variable and ``flags`` config value.
:param content: Markdown content
:returns: Markdown content without flagged blocks |
def getProjectAreas(self, archived=False, returned_properties=None):
"""Get all :class:`rtcclient.project_area.ProjectArea` objects
If no :class:`rtcclient.project_area.ProjectArea` objects are
retrieved, `None` is returned.
:param archived: (default is False) whether the project area
is archived
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: A :class:`list` that contains all the
:class:`rtcclient.project_area.ProjectArea` objects
:rtype: list
"""
return self._getProjectAreas(archived=archived,
returned_properties=returned_properties) | Get all :class:`rtcclient.project_area.ProjectArea` objects
If no :class:`rtcclient.project_area.ProjectArea` objects are
retrieved, `None` is returned.
:param archived: (default is False) whether the project area
is archived
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: A :class:`list` that contains all the
:class:`rtcclient.project_area.ProjectArea` objects
:rtype: list |
def names(self):
"""Get the names in an expression"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms)) | Get the names in an expression |
def path_wrapper(func):
"""return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion
"""
@functools.wraps(func)
def wrapped(node, context=None, _func=func, **kwargs):
"""wrapper function handling context"""
if context is None:
context = contextmod.InferenceContext()
if context.push(node):
return None
yielded = set()
generator = _func(node, context, **kwargs)
try:
while True:
res = next(generator)
# unproxy only true instance, not const, tuple, dict...
if res.__class__.__name__ == "Instance":
ares = res._proxied
else:
ares = res
if ares not in yielded:
yield res
yielded.add(ares)
except StopIteration as error:
if error.args:
return error.args[0]
return None
return wrapped | return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion |
def days(self):
"""Return the 7 days of the week as a list (of datetime.date objects)"""
monday = self.day(0)
return [monday + timedelta(days=i) for i in range(7)] | Return the 7 days of the week as a list (of datetime.date objects) |
def _press_special_key(self, key, down):
""" Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
"""
key_code = special_key_translate_table[key]
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0,0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key_code << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.Quartz.CGEvent()) | Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac |
def _validate_alias_command(alias_command):
"""
Check if the alias command is valid.
Args:
alias_command: The command to validate.
"""
if not alias_command:
raise CLIError(EMPTY_ALIAS_ERROR)
split_command = shlex.split(alias_command)
boundary_index = len(split_command)
for i, subcommand in enumerate(split_command):
if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:
boundary_index = i
break
# Extract possible CLI commands and validate
command_to_validate = ' '.join(split_command[:boundary_index]).lower()
for command in azext_alias.cached_reserved_commands:
if re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate), command):
return
_validate_positional_arguments(shlex.split(alias_command)) | Check if the alias command is valid.
Args:
alias_command: The command to validate. |
def insert(self):
"""Insert the object into the database"""
if not self.curs:
raise LIGOLwDBError, "Database connection not initalized"
if len(self.table) == 0:
raise LIGOLwDBError, 'attempt to insert empty table'
for tab in self.table.keys():
# find and add any missing unique ids
generate = []
missingcols = [k for k in self.ldb.uniqueids[tab]
if k not in self.table[tab]['column']]
for m in missingcols:
generate.append(',BLOB(GENERATE_UNIQUE())')
self.table[tab]['orderedcol'].append(m)
# and construct the sql query
self.table[tab]['query'] = ' '.join(
['INSERT INTO', tab, '(', ','.join(self.table[tab]['orderedcol']),
') VALUES (', ','.join(['?' for x in self.table[tab]['column']]) ,
''.join(generate), ')'])
for tabtup in self.ldb.tables:
tab = tabtup[0].lower()
try:
try:
self.curs.executemany(self.table[tab]['query'],
self.table[tab]['stream'])
rowcount = self.curs.rowcount
except DB2.Error, e:
self.curs.execute('rollback')
msg = e[2]
msg += self.xml() + '\n'
msg += str(self.table[tab]['query']) + '\n'
msg += str(self.table[tab]['stream']) + '\n'
raise LIGOLwDBError, msg
except DB2.Warning, e:
self.curs.execute('rollback')
raise LIGOLwDBError, e[2]
#except Exception, e:
# self.curs.execute('rollback')
# raise LIGOLwDBError, e[2]
except KeyError:
pass
self.curs.execute('commit')
return rowcount | Insert the object into the database |
def fade_to_rgb_uncorrected(self, fade_milliseconds, red, green, blue, led_number=0):
"""
Command blink(1) to fade to RGB color, no color correction applied.
"""
action = ord('c')
fade_time = int(fade_milliseconds / 10)
th = (fade_time & 0xff00) >> 8
tl = fade_time & 0x00ff
buf = [REPORT_ID, action, int(red), int(green), int(blue), th, tl, led_number, 0]
self.write( buf ) | Command blink(1) to fade to RGB color, no color correction applied. |
def predict(self):
"""
Returns
-------
proba : ndarray, shape=(n_clusters, )
The probability of given cluster being label 1.
"""
if self.w_ is not None:
sigmoid = lambda t: 1. / (1. + np.exp(-t))
return sigmoid(np.dot(self.centers, self.w_[:-1]) + self.w_[-1])
else:
# TODO the model is not trained
pass | Returns
-------
proba : ndarray, shape=(n_clusters, )
The probability of given cluster being label 1. |
def temp_output_file(prefix="tmp", suffix="", dir=None, make_parents=False, always_clean=False):
"""
A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
...
"""
return _temp_output(False, prefix=prefix, suffix=suffix, dir=dir, make_parents=make_parents,
always_clean=always_clean) | A context manager for convenience in creating a temporary file,
which is deleted when exiting the context.
Usage:
with temp_output_file() as (fd, path):
... |
def get_families_by_ids(self, *args, **kwargs):
"""Pass through to provider FamilyLookupSession.get_families_by_ids"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids
catalogs = self._get_provider_session('family_lookup_session').get_families_by_ids(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Family(self._provider_manager, cat, self._runtime, self._proxy))
return FamilyList(cat_list) | Pass through to provider FamilyLookupSession.get_families_by_ids |
def set_proxy(self, proxy, update=True):
"""
Set proxy for requests session
"""
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
self.driver_args['service_args'] = self.default_service_args
else:
proxy_parts = cutil.get_proxy_parts(proxy)
self.driver_args['service_args'].extend(['--proxy={host}:{port}'.format(**proxy_parts),
'--proxy-type={schema}'.format(**proxy_parts),
])
if proxy_parts.get('user') is not None:
self.driver_args['service_args'].append('--proxy-auth={user}:{password}'.format(**proxy_parts))
# Recreate webdriver with new proxy settings
if update is True and update_web_driver is True:
self._update() | Set proxy for requests session |
def debug(self):
"""Return debug setting"""
debug = False
if os.path.isfile(os.path.join(self.tcex.args.tc_temp_path, 'DEBUG')):
debug = True
return debug | Return debug setting |
def _apply_over_vars_with_dim(func, self, dim=None, **kwargs):
'''wrapper for datasets'''
ds = type(self)(coords=self.coords, attrs=self.attrs)
for name, var in self.data_vars.items():
if dim in var.dims:
ds[name] = func(var, dim=dim, **kwargs)
else:
ds[name] = var
return ds | wrapper for datasets |
def mode_yubikey_otp(self, private_uid, aes_key):
"""
Set the YubiKey up for standard OTP validation.
"""
if not self.capabilities.have_yubico_OTP():
raise yubikey_base.YubiKeyVersionError('Yubico OTP not available in %s version %d.%d' \
% (self.capabilities.model, self.ykver[0], self.ykver[1]))
if private_uid.startswith(b'h:'):
private_uid = binascii.unhexlify(private_uid[2:])
if len(private_uid) != yubikey_defs.UID_SIZE:
raise yubico_exception.InputError('Private UID must be %i bytes' % (yubikey_defs.UID_SIZE))
self._change_mode('YUBIKEY_OTP', major=0, minor=9)
self.uid = private_uid
self.aes_key(aes_key) | Set the YubiKey up for standard OTP validation. |
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml()) | Commit this change |
def check_content(content, **kwargs):
'''check content for "active" urls'''
# valid html root tag
try:
# render elements tree from content
tree = fragment_fromstring(content)
# flag for prevent content rerendering, when no "active" urls found
processed = False
# django > 1.5 template boolean\None variables feature
if isinstance(kwargs['parent_tag'], bool):
if not kwargs['parent_tag']:
kwargs['parent_tag'] = 'self'
else:
raise ImproperlyConfigured('''
parent_tag=True is not allowed
''')
elif kwargs['parent_tag'] is None:
kwargs['parent_tag'] = 'self'
# if parent_tag is False\None\''\a\self
# "active" status will be applied directly to "<a>"
if kwargs['parent_tag'].lower() in ('a', 'self', ''):
# xpath query to get all "<a>"
urls = tree.xpath('.//a')
# check "active" status for all urls
for url in urls:
if check_active(url, url, **kwargs):
# mark flag for rerendering content
processed = True
# otherwise css_class must be applied to parent_tag
else:
# xpath query to get all parent tags
elements = tree.xpath('.//{parent_tag}'.format(
parent_tag=kwargs['parent_tag'],
))
# check all elements for "active" "<a>"
for element in elements:
# xpath query to get all "<a>"
urls = element.xpath('.//a')
# check "active" status for all urls
for url in urls:
if check_active(url, element, **kwargs):
# flag for rerendering content tree
processed = True
# stop checking other "<a>"
break
# do not rerender content if no "active" urls found
if processed:
# render content from tree
return tostring(tree, encoding='unicode')
# not valid html root tag
except ParserError:
# raise an exception with configuration example
raise ImproperlyConfigured('''
content of {% activeurl %} must have valid html root tag
for example
{% activeurl %}
<ul>
<li>
<a href="/page/">page</a>
</li>
<li>
<a href="/other_page/">other_page</a>
</li>
</ul>
{% endactiveurl %}
in this case <ul> is valid content root tag
''')
return content | check content for "active" urls |
def _parse_function_return_types_from_doc(cls, doc):
"""
This will extract the return type for list of lists so that the
repr can display the header.
:param doc: str of the function doc
:return dict of {func.__name__:{'api_type':'type','col_name':[],
'col_type':[],'repr_type':None}}
"""
data = dict(name='', col_types=[], col_names=[], _type=None)
if doc:
return_doc = __doc__.split(':return')[-1].strip()
data['name'] = return_doc.split(':')[0]
if data['name'].startswith('list of'):
if data['name'].endswith('LIST'):
data['_type'] = 'list_list'
for row in return_doc.split('\n')[3:]:
index, col_type, col_name = row.split(None, 2)
assert (index == str(index))
data['col_types'].append(col_type)
data['col_names'].append(col_name.split()[0])
return data | This will extract the return type for list of lists so that the
repr can display the header.
:param doc: str of the function doc
:return dict of {func.__name__:{'api_type':'type','col_name':[],
'col_type':[],'repr_type':None}} |
def get_http_authentication(private_key: RsaKey, private_key_id: str) -> HTTPSignatureHeaderAuth:
"""
Get HTTP signature authentication for a request.
"""
key = private_key.exportKey()
return HTTPSignatureHeaderAuth(
headers=["(request-target)", "user-agent", "host", "date"],
algorithm="rsa-sha256",
key=key,
key_id=private_key_id,
) | Get HTTP signature authentication for a request. |
def nii_ones_like(in_file, value, dtype, newpath=None):
"""Create a NIfTI file filled with ``value``, matching properties of ``in_file``"""
import os
import numpy as np
import nibabel as nb
nii = nb.load(in_file)
data = np.ones(nii.shape, dtype=float) * value
out_file = os.path.join(newpath or os.getcwd(), "filled.nii.gz")
nii = nb.Nifti1Image(data, nii.affine, nii.header)
nii.set_data_dtype(dtype)
nii.to_filename(out_file)
return out_file | Create a NIfTI file filled with ``value``, matching properties of ``in_file`` |
def enum_check(*args, func=None):
"""Check if arguments are of protocol type."""
func = func or inspect.stack()[2][3]
for var in args:
if not isinstance(var, (enum.EnumMeta, aenum.EnumMeta)):
name = type(var).__name__
raise EnumError(
f'Function {func} expected enumeration, {name} got instead.') | Check if arguments are of protocol type. |
def _query(self, path, args=None, skip_cache=False, skip_sleep=False):
"""return results for a NCBI query, possibly from the cache
:param: path: relative query path (e.g., 'einfo.fcgi')
:param: args: dictionary of query args
:param: skip_cache: whether to bypass the cache on reading
:param: skip_sleep: whether to bypass query throttling
:rtype: xml string
The args are joined with args required by NCBI (tool and email
address) and with the default args declared when instantiating
the client.
"""
if args is None:
args = {}
def _cacheable(r):
"""return False if r shouldn't be cached (contains a no-cache meta
line); True otherwise"""
return not ("no-cache" in r # obviate parsing, maybe
and lxml.etree.XML(r).xpath("//meta/@content='no-cache'"))
# cache key: the key associated with this endpoint and args The
# key intentionally excludes the identifying args (tool and email)
# and is independent of the request method (GET/POST) args are
# sorted for canonicalization
url = url_base + path
# next 3 lines converted by 2to3 -nm
defining_args = dict(list(self.default_args.items()) + list(args.items()))
full_args = dict(list(self._ident_args.items()) + list(defining_args.items()))
cache_key = hashlib.md5(pickle.dumps((url, sorted(defining_args.items())))).hexdigest()
sqas = ';'.join([k + '=' + str(v) for k, v in sorted(args.items())])
full_args_str = ';'.join([k + '=' + str(v) for k, v in sorted(full_args.items())])
logging.debug("CACHE:" + str(skip_cache) + "//" + str(self._cache))
if not skip_cache and self._cache:
try:
v = self._cache[cache_key]
_logger.debug('cache hit for key {cache_key} ({url}, {sqas}) '.format(
cache_key=cache_key,
url=url,
sqas=sqas))
return v
except KeyError:
_logger.debug('cache miss for key {cache_key} ({url}, {sqas}) '.format(
cache_key=cache_key,
url=url,
sqas=sqas))
pass
if self.api_key:
url += '?api_key={self.api_key}'.format(self=self)
# --
if not skip_sleep:
req_int = self.request_interval
sleep_time = req_int - (time.clock() - self._last_request_clock)
if sleep_time > 0:
_logger.debug('sleeping {sleep_time:.3f}'.format(sleep_time=sleep_time))
time.sleep(sleep_time)
r = requests.post(url, full_args)
self._last_request_clock = time.clock()
_logger.debug('post({url}, {fas}): {r.status_code} {r.reason}, {len})'.format(
url=url,
fas=full_args_str,
r=r,
len=len(r.text)))
if not r.ok:
# TODO: discriminate between types of errors
if r.headers["Content-Type"] == "application/json":
json = r.json()
raise EutilsRequestError('{r.reason} ({r.status_code}): {error}'.format(r=r, error=json["error"]))
try:
xml = lxml.etree.fromstring(r.text.encode('utf-8'))
raise EutilsRequestError('{r.reason} ({r.status_code}): {error}'.format(r=r, error=xml.find('ERROR').text))
except Exception as ex:
raise EutilsNCBIError('Error parsing response object from NCBI: {}'.format(ex))
if any(bad_word in r.text for bad_word in ['<error>', '<ERROR>']):
if r.text is not None:
try:
xml = lxml.etree.fromstring(r.text.encode('utf-8'))
raise EutilsRequestError('{r.reason} ({r.status_code}): {error}'.format(r=r, error=xml.find('ERROR').text))
except Exception as ex:
raise EutilsNCBIError('Error parsing response object from NCBI: {}'.format(ex))
if '<h1 class="error">Access Denied</h1>' in r.text:
raise EutilsRequestError('Access Denied: {url}'.format(url=url))
if self._cache and _cacheable(r.text):
# N.B. we cache results even when skip_cache (read) is true
self._cache[cache_key] = r.content
_logger.info('cached results for key {cache_key} ({url}, {sqas}) '.format(
cache_key=cache_key,
url=url,
sqas=sqas))
return r.content | return results for a NCBI query, possibly from the cache
:param: path: relative query path (e.g., 'einfo.fcgi')
:param: args: dictionary of query args
:param: skip_cache: whether to bypass the cache on reading
:param: skip_sleep: whether to bypass query throttling
:rtype: xml string
The args are joined with args required by NCBI (tool and email
address) and with the default args declared when instantiating
the client. |
def exit_standby(name, instance_ids, should_decrement_desired_capacity=False,
region=None, key=None, keyid=None, profile=None):
'''
Exit desired instances from StandBy mode
.. versionadded:: 2016.11.0
CLI example::
salt-call boto_asg.exit_standby my_autoscale_group_name '["i-xxxxxx"]'
'''
conn = _get_conn_autoscaling_boto3(
region=region, key=key, keyid=keyid, profile=profile)
try:
response = conn.exit_standby(
InstanceIds=instance_ids,
AutoScalingGroupName=name)
except ClientError as e:
err = __utils__['boto3.get_error'](e)
if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException':
return {'exists': False}
return {'error': err}
return all(activity['StatusCode'] != 'Failed' for activity in response['Activities']) | Exit desired instances from StandBy mode
.. versionadded:: 2016.11.0
CLI example::
salt-call boto_asg.exit_standby my_autoscale_group_name '["i-xxxxxx"]' |
def get_all_handleable_leaves(self):
"""
Get list of all handleable devices, return only those that represent
leaf nodes within the filtered device tree.
"""
nodes = self.get_device_tree()
return [node.device
for node in sorted(nodes.values(), key=DevNode._sort_key)
if not node.ignored and node.device
and all(child.ignored for child in node.children)] | Get list of all handleable devices, return only those that represent
leaf nodes within the filtered device tree. |
def multidict(D):
'''creates a multidictionary'''
keys = list(D.keys())
if len(keys) == 0:
return [[]]
try:
N = len(D[keys[0]])
islist = True
except:
N = 1
islist = False
dlist = [dict() for d in range(N)]
for k in keys:
if islist:
for i in range(N):
dlist[i][k] = D[k][i]
else:
dlist[0][k] = D[k]
return [keys]+dlist | creates a multidictionary |
def create_from_hdu(cls, hdu, ebins):
""" Creates and returns an HpxMap object from a FITS HDU.
hdu : The FITS
ebins : Energy bin edges [optional]
"""
hpx = HPX.create_from_hdu(hdu, ebins)
colnames = hdu.columns.names
cnames = []
if hpx.conv.convname == 'FGST_SRCMAP_SPARSE':
pixs = hdu.data.field('PIX')
chans = hdu.data.field('CHANNEL')
keys = chans * hpx.npix + pixs
vals = hdu.data.field('VALUE')
nebin = len(ebins)
data = np.zeros((nebin, hpx.npix))
data.flat[keys] = vals
else:
for c in colnames:
if c.find(hpx.conv.colstring) == 0:
cnames.append(c)
nebin = len(cnames)
data = np.ndarray((nebin, hpx.npix))
for i, cname in enumerate(cnames):
data[i, 0:] = hdu.data.field(cname)
return cls(data, hpx) | Creates and returns an HpxMap object from a FITS HDU.
hdu : The FITS
ebins : Energy bin edges [optional] |
def print_item_callback(item):
"""Print an item callback, used by &listen."""
print('&listen [{}, {}={}]'.format(
item.get('cmd', ''),
item.get('id', ''),
item.get('data', ''))) | Print an item callback, used by &listen. |
def update_stats(self, stats, value, _type, sample_rate=1):
"""
Pipeline function that formats data, samples it and passes to send()
>>> client = StatsdClient()
>>> client.update_stats('example.update_stats', 73, "c", 0.9)
"""
stats = self.format(stats, value, _type, self.prefix)
self.send(self.sample(stats, sample_rate), self.addr) | Pipeline function that formats data, samples it and passes to send()
>>> client = StatsdClient()
>>> client.update_stats('example.update_stats', 73, "c", 0.9) |
def _macs2_cmd(method="chip"):
"""Main command for macs2 tool."""
if method.lower() == "chip":
cmd = ("{macs2} callpeak -t {chip_bam} -c {input_bam} {paired} "
" {genome_size} -n {name} -B {options}")
elif method.lower() == "atac":
cmd = ("{macs2} callpeak -t {chip_bam} --nomodel "
" {paired} {genome_size} -n {name} -B {options}"
" --nolambda --keep-dup all")
else:
raise ValueError("chip_method should be chip or atac.")
return cmd | Main command for macs2 tool. |
def RgbToHsl(r, g, b):
'''Convert the color from RGB coordinates to HSL.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, l) tuple in the range:
h[0...360],
s[0...1],
l[0...1]
>>> Color.RgbToHsl(1, 0.5, 0)
(30.0, 1.0, 0.5)
'''
minVal = min(r, g, b) # min RGB value
maxVal = max(r, g, b) # max RGB value
l = (maxVal + minVal) / 2.0
if minVal==maxVal:
return (0.0, 0.0, l) # achromatic (gray)
d = maxVal - minVal # delta RGB value
if l < 0.5: s = d / (maxVal + minVal)
else: s = d / (2.0 - maxVal - minVal)
dr, dg, db = [(maxVal-val) / d for val in (r, g, b)]
if r==maxVal:
h = db - dg
elif g==maxVal:
h = 2.0 + dr - db
else:
h = 4.0 + dg - dr
h = (h*60.0) % 360.0
return (h, s, l) | Convert the color from RGB coordinates to HSL.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, l) tuple in the range:
h[0...360],
s[0...1],
l[0...1]
>>> Color.RgbToHsl(1, 0.5, 0)
(30.0, 1.0, 0.5) |
def strip_rts_retries(self, idx):
"""strip(1 byte) rts_retries
:idx: int
:return: int
idx
:return: int
"""
rts_retries, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, rts_retries | strip(1 byte) rts_retries
:idx: int
:return: int
idx
:return: int |
def fetch_items(self, category, **kwargs):
"""Fetch the commits
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
to_date = kwargs['to_date']
branches = kwargs['branches']
latest_items = kwargs['latest_items']
no_update = kwargs['no_update']
ncommits = 0
try:
if os.path.isfile(self.gitpath):
commits = self.__fetch_from_log()
else:
commits = self.__fetch_from_repo(from_date, to_date, branches,
latest_items, no_update)
for commit in commits:
yield commit
ncommits += 1
except EmptyRepositoryError:
pass
logger.info("Fetch process completed: %s commits fetched",
ncommits) | Fetch the commits
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items |
def generate_dummy_graph(network):
"""Generate a dummy graph to feed to the FIAS libraries.
It adds the "pos" attribute and removes the 380 kV duplicate
buses when the buses have been split, so that all load and generation
is attached to the 220kV bus."""
graph = pypsa.descriptors.OrderedGraph()
graph.add_nodes_from([bus for bus in network.buses.index if bus not in buses_to_split])
#add positions to graph for voronoi cell computation
for node in graph.nodes():
graph.node[node]["pos"] = np.array(network.buses.loc[node,["x","y"]],dtype=float)
return graph | Generate a dummy graph to feed to the FIAS libraries.
It adds the "pos" attribute and removes the 380 kV duplicate
buses when the buses have been split, so that all load and generation
is attached to the 220kV bus. |
def reset(self):
"""Reset emulator. All registers and memory are reset.
"""
self.__mem.reset()
self.__cpu.reset()
self.__tainter.reset()
# Instructions pre and post handlers.
self.__instr_handler_pre = None, None
self.__instr_handler_post = None, None
self.__set_default_handlers() | Reset emulator. All registers and memory are reset. |
def diff_lines(self):
"""A diff between the original BUILD file and the resulting BUILD file."""
start_lines = self._build_file_source_lines[:]
end_lines = self.build_file_lines()
diff_generator = unified_diff(start_lines,
end_lines,
fromfile=self.build_file.relpath,
tofile=self.build_file.relpath,
lineterm='')
return list(diff_generator) | A diff between the original BUILD file and the resulting BUILD file. |
def parse(self, sentence):
"""Parse raw sentence into ConllSentence
Parameters
----------
sentence : list
a list of (word, tag) tuples
Returns
-------
ConllSentence
ConllSentence object
"""
words = np.zeros((len(sentence) + 1, 1), np.int32)
tags = np.zeros((len(sentence) + 1, 1), np.int32)
words[0, 0] = ParserVocabulary.ROOT
tags[0, 0] = ParserVocabulary.ROOT
vocab = self._vocab
for i, (word, tag) in enumerate(sentence):
words[i + 1, 0], tags[i + 1, 0] = vocab.word2id(word.lower()), vocab.tag2id(tag)
with mx.Context(mxnet_prefer_gpu()):
outputs = self._parser.forward(words, tags)
words = []
for arc, rel, (word, tag) in zip(outputs[0][0], outputs[0][1], sentence):
words.append(ConllWord(id=len(words) + 1, form=word, pos=tag, head=arc, relation=vocab.id2rel(rel)))
return ConllSentence(words) | Parse raw sentence into ConllSentence
Parameters
----------
sentence : list
a list of (word, tag) tuples
Returns
-------
ConllSentence
ConllSentence object |
def get_members(cls, member_class=None, is_member=None, sort_key=None, _parameter=None):
"""
Collect all class level attributes matching the given criteria.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object
"""
if member_class is None and is_member is None:
raise TypeError("get_members either needs a member_class parameter or an is_member check function (or both)")
members = OrderedDict()
for base in cls.__bases__:
if _parameter is None:
inherited_members = get_members(base, member_class=member_class, is_member=is_member, sort_key=sort_key)
else:
# When user by @declarative, only traverse up the class inheritance to the decorated class.
inherited_members = get_declared(base, _parameter)
members.update(inherited_members)
def generate_member_bindings():
for name in cls.__dict__:
if name.startswith('__'):
continue
obj = getattr(cls, name)
if member_class is not None and isinstance(obj, member_class):
yield name, obj
elif is_member is not None and is_member(obj):
yield name, obj
elif type(obj) is tuple and len(obj) == 1 and isinstance(obj[0], member_class):
raise TypeError("'%s' is a one-tuple containing what we are looking for. Trailing comma much? Don't... just don't." % name) # pragma: no mutate
bindings = generate_member_bindings()
if sort_key is not None:
try:
sorted_bindings = sorted(bindings, key=lambda x: sort_key(x[1]))
except AttributeError:
if sort_key is default_sort_key:
raise TypeError('Missing member ordering definition. Use @creation_ordered or specify sort_key')
else: # pragma: no covererage
raise
members.update(sorted_bindings)
else:
members.update(bindings)
return members | Collect all class level attributes matching the given criteria.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object |
def set_permissions(filename, uid=None, gid=None, mode=0775):
"""
Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode
"""
if uid is None:
uid = get_ftp_uid()
if gid is None:
gid = -1
os.chown(filename, uid, gid)
os.chmod(filename, mode) | Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode |
def dump(data, abspath,
indent_format=False,
float_precision=None,
ensure_ascii=True,
overwrite=False,
enable_verbose=True):
"""Dump Json serializable object to file.
Provides multiple choice to customize the behavior.
:param data: Serializable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed Json)
:type abspath: string
:param indent_format: default ``False``, If ``True``, then dump to human
readable format, but it's slower, the file is larger
:type indent_format: boolean
:param float_precision: default ``None``, limit flotas to N-decimal points.
:type float_precision: integer
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import js
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.json", overwrite=True)
Dumping to 'test.json'...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Json文件
:type abspath: ``字符串``
:param indent_format: 默认 ``False``, 当为 ``True`` 时, Json编码时会对Key进行
排序, 并进行缩进排版。但是这样写入速度较慢, 文件体积也更大。
:type indent_format: "布尔值"
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param float_precision: 默认 ``None``, 当为任意整数时, 则会保留小数点后N位
:type float_precision: "整数"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
prt("\nDump to '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_json = is_json_file(abspath)
if os.path.exists(abspath):
if not overwrite: # 存在, 并且overwrite=False
prt(" Stop! File exists and overwrite is not allowed",
enable_verbose)
return
if float_precision is not None:
encoder.FLOAT_REPR = lambda x: format(x, ".%sf" % float_precision)
indent_format = True
else:
encoder.FLOAT_REPR = repr
if indent_format:
sort_keys = True
indent = 4
else:
sort_keys = False
indent = None
st = time.clock()
js = json.dumps(data, sort_keys=sort_keys, indent=indent,
ensure_ascii=ensure_ascii)
content = js.encode("utf-8")
if is_json:
textfile.writebytes(content, abspath)
else:
compress.write_gzip(content, abspath)
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) | Dump Json serializable object to file.
Provides multiple choice to customize the behavior.
:param data: Serializable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed Json)
:type abspath: string
:param indent_format: default ``False``, If ``True``, then dump to human
readable format, but it's slower, the file is larger
:type indent_format: boolean
:param float_precision: default ``None``, limit flotas to N-decimal points.
:type float_precision: integer
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import js
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.json", overwrite=True)
Dumping to 'test.json'...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: Json文件绝对路径, 扩展名需为 ``.json`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Json文件
:type abspath: ``字符串``
:param indent_format: 默认 ``False``, 当为 ``True`` 时, Json编码时会对Key进行
排序, 并进行缩进排版。但是这样写入速度较慢, 文件体积也更大。
:type indent_format: "布尔值"
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param float_precision: 默认 ``None``, 当为任意整数时, 则会保留小数点后N位
:type float_precision: "整数"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值`` |
def _get_choices(self, gandi):
""" Internal method to get choices list """
packages = super(CertificatePackageType, self)._get_choices(gandi)
return list(set([pack.split('_')[1] for pack in packages])) | Internal method to get choices list |
def benchmark_setup(self):
"""Benchmark setup execution.
"""
def f():
self._setup()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.setup_stat = self._calc_benchmark_stat(f) | Benchmark setup execution. |
def p_gate_op_5(self, program):
"""
gate_op : BARRIER id_list ';'
"""
program[0] = node.Barrier([program[2]])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]]) | gate_op : BARRIER id_list ';' |
def get_devicecore_api(self):
"""Returns a :class:`.DeviceCoreAPI` bound to this device cloud instance
This provides access to the same API as :attr:`.DeviceCloud.devicecore` but will create
a new object (with a new cache) each time called.
:return: devicecore API object bound to this device cloud account
:rtype: :class:`.DeviceCoreAPI`
"""
from devicecloud.devicecore import DeviceCoreAPI
return DeviceCoreAPI(self._conn, self.get_sci_api()) | Returns a :class:`.DeviceCoreAPI` bound to this device cloud instance
This provides access to the same API as :attr:`.DeviceCloud.devicecore` but will create
a new object (with a new cache) each time called.
:return: devicecore API object bound to this device cloud account
:rtype: :class:`.DeviceCoreAPI` |
def _ConvertValueMessage(value, message):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
_ConvertStructMessage(value, message.struct_value)
elif isinstance(value, list):
_ConvertListValueMessage(value, message.list_value)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, six.string_types):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError('Unexpected type for Value message.') | Convert a JSON representation into Value message. |
def stem_singular_word(self, word):
"""Stem a singular word to its common stem form."""
context = Context(word, self.dictionary, self.visitor_provider)
context.execute()
return context.result | Stem a singular word to its common stem form. |
def to_array_with_default(value, default_value):
"""
Converts value into array object with specified default.
Single values are converted into arrays with single element.
:param value: the value to convert.
:param default_value: default array object.
:return: array object or default array when value is None.
"""
result = ArrayConverter.to_nullable_array(value)
return result if result != None else default_value | Converts value into array object with specified default.
Single values are converted into arrays with single element.
:param value: the value to convert.
:param default_value: default array object.
:return: array object or default array when value is None. |
async def delete_shade_from_scene(self, shade_id, scene_id):
"""Delete a shade from a scene."""
return await self.request.delete(
self._base_path, params={ATTR_SCENE_ID: scene_id, ATTR_SHADE_ID: shade_id}
) | Delete a shade from a scene. |
def next(self, times=1):
"""Returns a new instance of self
times is not supported yet.
"""
return Range(copy(self.end),
self.end + self.elapse, tz=self.start.tz) | Returns a new instance of self
times is not supported yet. |
def randkey(bits, keyspace=string.ascii_letters + string.digits + '#/.',
rng=None):
""" Returns a cryptographically secure random key of desired @bits of
entropy within @keyspace using :class:random.SystemRandom
@bits: (#int) minimum bits of entropy
@keyspace: (#str) or iterable allowed output chars
@rng: the random number generator to use. Defaults to
:class:random.SystemRandom. Must have a |choice| method
-> (#str) random key
..
from vital.security import randkey
randkey(24)
# -> '9qaX'
randkey(48)
# -> 'iPJ5YWs9'
randkey(64)
# - > 'C..VJ.KLdxg'
randkey(64, keyspace="abc", rng=random)
# -> 'aabcccbabcaacaccccabcaabbabcacabacbbbaaab'
..
"""
return "".join(char for char in iter_random_chars(bits, keyspace, rng)) | Returns a cryptographically secure random key of desired @bits of
entropy within @keyspace using :class:random.SystemRandom
@bits: (#int) minimum bits of entropy
@keyspace: (#str) or iterable allowed output chars
@rng: the random number generator to use. Defaults to
:class:random.SystemRandom. Must have a |choice| method
-> (#str) random key
..
from vital.security import randkey
randkey(24)
# -> '9qaX'
randkey(48)
# -> 'iPJ5YWs9'
randkey(64)
# - > 'C..VJ.KLdxg'
randkey(64, keyspace="abc", rng=random)
# -> 'aabcccbabcaacaccccabcaabbabcacabacbbbaaab'
.. |
def local_check (self):
"""Local check function can be overridden in subclasses."""
log.debug(LOG_CHECK, "Checking %s", unicode(self))
# strict extern URLs should not be checked
assert not self.extern[1], 'checking strict extern URL'
# check connection
log.debug(LOG_CHECK, "checking connection")
try:
self.check_connection()
self.set_content_type()
self.add_size_info()
self.aggregate.plugin_manager.run_connection_plugins(self)
except tuple(ExcList) as exc:
value = self.handle_exception()
# make nicer error msg for unknown hosts
if isinstance(exc, socket.error) and exc.args[0] == -2:
value = _('Hostname not found')
elif isinstance(exc, UnicodeError):
# idna.encode(host) failed
value = _('Bad hostname %(host)r: %(msg)s') % {'host': self.host, 'msg': str(value)}
self.set_result(unicode_safe(value), valid=False) | Local check function can be overridden in subclasses. |
def discard(self, element):
"""Remove element from the RangeSet if it is a member.
If the element is not a member, do nothing.
"""
try:
i = int(element)
set.discard(self, i)
except ValueError:
pass | Remove element from the RangeSet if it is a member.
If the element is not a member, do nothing. |
def for_category(self, category, live_only=False):
"""
Returns queryset of EntryTag instances for specified category.
:param category: the Category instance.
:param live_only: flag to include only "live" entries.
:rtype: django.db.models.query.QuerySet.
"""
filters = {'tag': category.tag}
if live_only:
filters.update({'entry__live': True})
return self.filter(**filters) | Returns queryset of EntryTag instances for specified category.
:param category: the Category instance.
:param live_only: flag to include only "live" entries.
:rtype: django.db.models.query.QuerySet. |
def binary(self):
"""
return encoded representation
"""
if isinstance(self.value, bytes):
length = len(self.value)
if length > 4294967295:
raise OutputException('uint32 overflow')
elif self.bits != 8:
return (
b_chr(_TAG_BIT_BINARY_EXT) +
struct.pack(b'>I', length) +
b_chr(self.bits) + self.value
)
else:
return (
b_chr(_TAG_BINARY_EXT) +
struct.pack(b'>I', length) +
self.value
)
else:
raise OutputException('unknown binary type') | return encoded representation |
def _free(self, ptr):
"""
Handler for any libc `free` SimProcedure call. If the heap has faithful support for `free`, it ought to be
implemented in a `free` function (as opposed to the `_free` function).
:param ptr: the location in memory to be freed
"""
raise NotImplementedError("%s not implemented for %s" % (self._free.__func__.__name__,
self.__class__.__name__)) | Handler for any libc `free` SimProcedure call. If the heap has faithful support for `free`, it ought to be
implemented in a `free` function (as opposed to the `_free` function).
:param ptr: the location in memory to be freed |
def getDocFactory(self, fragmentName, default=None):
"""
Retrieve a Nevow document factory for the given name.
@param fragmentName: a short string that names a fragment template.
@param default: value to be returned if the named template is not
found.
"""
themes = self._preferredThemes()
for t in themes:
fact = t.getDocFactory(fragmentName, None)
if fact is not None:
return fact
return default | Retrieve a Nevow document factory for the given name.
@param fragmentName: a short string that names a fragment template.
@param default: value to be returned if the named template is not
found. |
def send_mail(
subject,
sender,
to,
message,
html_message=None,
cc=None,
bcc=None,
attachments=None,
host=None,
port=None,
auth_user=None,
auth_password=None,
use_tls=False,
fail_silently=False,
):
"""Send a single email to a recipient list.
All members of the recipient list will see the other recipients in the 'To'
field.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
if message is None and html_message is None:
raise ValueError("Either message or html_message must be provided")
if message is None:
message = strip_tags(html_message)
connection = SMTPConnection(
host=host,
port=port,
username=auth_user,
password=auth_password,
use_tls=use_tls,
fail_silently=fail_silently,
)
# Convert the to field just for easier usage
if isinstance(to, six.string_types):
to = [to]
if html_message is None:
email = EmailMessage(
subject=subject,
body=message,
sender=sender,
to=to,
cc=cc,
bcc=bcc,
attachments=attachments,
connection=connection,
)
else:
email = EmailMultiAlternatives(
subject=subject,
body=message,
sender=sender,
to=to,
cc=cc,
bcc=bcc,
attachments=attachments,
connection=connection,
)
email.attach_alternative(html_message, "text/html")
return email.send() | Send a single email to a recipient list.
All members of the recipient list will see the other recipients in the 'To'
field.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly. |
def _ProcessTask(self, task):
"""Processes a task.
Args:
task (Task): task.
"""
logger.debug('Started processing task: {0:s}.'.format(task.identifier))
if self._tasks_profiler:
self._tasks_profiler.Sample(task, 'processing_started')
self._task = task
storage_writer = self._storage_writer.CreateTaskStorage(task)
if self._serializers_profiler:
storage_writer.SetSerializersProfiler(self._serializers_profiler)
storage_writer.Open()
self._parser_mediator.SetStorageWriter(storage_writer)
storage_writer.WriteTaskStart()
try:
# TODO: add support for more task types.
self._ProcessPathSpec(
self._extraction_worker, self._parser_mediator, task.path_spec)
self._number_of_consumed_sources += 1
if self._guppy_memory_profiler:
self._guppy_memory_profiler.Sample()
finally:
storage_writer.WriteTaskCompletion(aborted=self._abort)
self._parser_mediator.SetStorageWriter(None)
storage_writer.Close()
try:
self._storage_writer.FinalizeTaskStorage(task)
except IOError:
pass
self._task = None
if self._tasks_profiler:
self._tasks_profiler.Sample(task, 'processing_completed')
logger.debug('Completed processing task: {0:s}.'.format(task.identifier)) | Processes a task.
Args:
task (Task): task. |
def report_numbers2marc(self, key, value):
"""Populate the ``037`` MARC field."""
def _get_mangled_source(source):
if source == 'arXiv':
return 'arXiv:reportnumber'
return source
source = _get_mangled_source(value.get('source'))
if value.get('hidden'):
return {
'9': source,
'z': value.get('value'),
}
return {
'9': source,
'a': value.get('value'),
} | Populate the ``037`` MARC field. |
def read(self, line, f, data):
"""See :meth:`PunchParser.read`"""
data["energy"] = float(f.readline().split()[1])
N = len(data["symbols"])
# if the data are already read before, just overwrite them
gradient = data.get("gradient")
if gradient is None:
gradient = np.zeros((N,3), float)
data["gradient"] = gradient
for i in range(N):
words = f.readline().split()
gradient[i,0] = float(words[2])
gradient[i,1] = float(words[3])
gradient[i,2] = float(words[4]) | See :meth:`PunchParser.read` |
def is_valid(self):
"""
Error reporting is triggered when a form is checked for validity
"""
is_valid = super(GAErrorReportingMixin, self).is_valid()
if self.is_bound and not is_valid:
try:
self.report_errors_to_ga(self.errors)
except: # noqa: E722
logger.exception('Failed to report form errors to Google Analytics')
return is_valid | Error reporting is triggered when a form is checked for validity |
def power_spectrum(self, input_filepath):
'''Calculates the power spectrum (4096 point DFT). This method
internally invokes the stat command with the -freq option.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
Returns
-------
power_spectrum : list
List of frequency (Hz), amplitude pairs.
See Also
--------
stat, stats, sox.file_info
'''
effect_args = ['channels', '1', 'stat', '-freq']
_, _, stat_output = self.build(
input_filepath, None, extra_args=effect_args, return_output=True
)
power_spectrum = []
lines = stat_output.split('\n')
for line in lines:
split_line = line.split()
if len(split_line) != 2:
continue
freq, amp = split_line
power_spectrum.append([float(freq), float(amp)])
return power_spectrum | Calculates the power spectrum (4096 point DFT). This method
internally invokes the stat command with the -freq option.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
Returns
-------
power_spectrum : list
List of frequency (Hz), amplitude pairs.
See Also
--------
stat, stats, sox.file_info |
def getLocation(self):
"""
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
"""
method = 'flickr.photos.geo.getLocation'
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # Some other error might have occured too!?
return None
loc = data.rsp.photo.location
return [loc.latitude, loc.longitude] | Return the latitude+longitutde of the picture.
Returns None if no location given for this pic. |
def cli(env):
"""List Reserved Capacity groups."""
manager = CapacityManager(env.client)
result = manager.list()
table = formatting.Table(
["ID", "Name", "Capacity", "Flavor", "Location", "Created"],
title="Reserved Capacity"
)
for r_c in result:
occupied_string = "#" * int(r_c.get('occupiedInstanceCount', 0))
available_string = "-" * int(r_c.get('availableInstanceCount', 0))
try:
flavor = r_c['instances'][0]['billingItem']['description']
# cost = float(r_c['instances'][0]['billingItem']['hourlyRecurringFee'])
except KeyError:
flavor = "Unknown Billing Item"
location = r_c['backendRouter']['hostname']
capacity = "%s%s" % (occupied_string, available_string)
table.add_row([r_c['id'], r_c['name'], capacity, flavor, location, r_c['createDate']])
env.fout(table) | List Reserved Capacity groups. |
def properties_strict(instance):
"""Ensure that no custom properties are used, but only the official ones
from the specification.
"""
if instance['type'] not in enums.TYPES:
return # only check properties for official objects
defined_props = enums.PROPERTIES.get(instance['type'], [])
for prop in instance.keys():
if prop not in defined_props:
yield JSONError("Property '%s' is not one of those defined in the"
" specification." % prop, instance['id'])
if has_cyber_observable_data(instance):
for key, obj in instance['objects'].items():
type_ = obj.get('type', '')
if type_ not in enums.OBSERVABLE_PROPERTIES:
continue # custom observable types handled outside this function
observable_props = enums.OBSERVABLE_PROPERTIES.get(type_, [])
embedded_props = enums.OBSERVABLE_EMBEDDED_PROPERTIES.get(type_, {})
extensions = enums.OBSERVABLE_EXTENSIONS.get(type_, [])
for prop in obj.keys():
if prop not in observable_props:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for %s objects."
% (prop, type_), instance['id'])
# Check properties of embedded cyber observable types
elif prop in embedded_props:
embedded_prop_keys = embedded_props.get(prop, [])
for embedded_key in obj[prop]:
if isinstance(embedded_key, dict):
for embedded in embedded_key:
if embedded not in embedded_prop_keys:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for the %s property in %s objects."
% (embedded, prop, type_), instance['id'])
elif embedded_key not in embedded_prop_keys:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for the %s property in %s objects."
% (embedded_key, prop, type_), instance['id'])
# Check properties of embedded cyber observable types
for ext_key in obj.get('extensions', {}):
if ext_key not in extensions:
continue # don't check custom extensions
extension_props = enums.OBSERVABLE_EXTENSION_PROPERTIES[ext_key]
for ext_prop in obj['extensions'][ext_key]:
if ext_prop not in extension_props:
yield JSONError("Property '%s' is not one of those defined in the"
" specification for the %s extension in %s objects."
% (ext_prop, ext_key, type_), instance['id'])
embedded_ext_props = enums.OBSERVABLE_EXTENSION_EMBEDDED_PROPERTIES.get(ext_key, {}).get(ext_prop, [])
if embedded_ext_props:
for embed_ext_prop in obj['extensions'][ext_key].get(ext_prop, []):
if embed_ext_prop not in embedded_ext_props:
yield JSONError("Property '%s' in the %s property of the %s extension "
"is not one of those defined in the specification."
% (embed_ext_prop, ext_prop, ext_key), instance['id']) | Ensure that no custom properties are used, but only the official ones
from the specification. |
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs | Reduce the array along a given axis by minimum value |
def get_task_fs(self, courseid, taskid):
"""
:param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException
:return: A FileSystemProvider to the folder containing the task files
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if not id_checker(taskid):
raise InvalidNameException("Task with invalid name: " + taskid)
return self._filesystem.from_subfolder(courseid).from_subfolder(taskid) | :param courseid: the course id of the course
:param taskid: the task id of the task
:raise InvalidNameException
:return: A FileSystemProvider to the folder containing the task files |
def update(self, max_norm=None):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch. Gradients are clipped by their global norm
if `max_norm` is set.
Parameters
----------
max_norm: float, optional
If set, clip values of all gradients the ratio of the sum of their norms.
"""
if max_norm is not None:
self._clip_by_global_norm(max_norm)
self._module.update() | Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch. Gradients are clipped by their global norm
if `max_norm` is set.
Parameters
----------
max_norm: float, optional
If set, clip values of all gradients the ratio of the sum of their norms. |
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
component = self.get_argument_component()
metric_names = self.get_required_arguments_metricnames()
start_time = self.get_argument_starttime()
end_time = self.get_argument_endtime()
self.validateInterval(start_time, end_time)
instances = self.get_arguments(constants.PARAM_INSTANCE)
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
metrics = yield tornado.gen.Task(metricstimeline.getMetricsTimeline,
topology.tmaster, component, metric_names,
instances, int(start_time), int(end_time))
self.write_success_response(metrics)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e) | get method |
def incr(self, key, to_add=1):
"""Increments the value of a given key by ``to_add``"""
if key not in self.value:
self.value[key] = CountMetric()
self.value[key].incr(to_add) | Increments the value of a given key by ``to_add`` |
def summary(self, solution=None, threshold=1E-06, fva=None, names=False,
floatfmt='.3g'):
"""
Print a summary of the input and output fluxes of the model.
Parameters
----------
solution: cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g').
"""
from cobra.flux_analysis.summary import model_summary
return model_summary(self, solution=solution, threshold=threshold,
fva=fva, names=names, floatfmt=floatfmt) | Print a summary of the input and output fluxes of the model.
Parameters
----------
solution: cobra.Solution, optional
A previously solved model solution to use for generating the
summary. If none provided (default), the summary method will
resolve the model. Note that the solution object must match the
model, i.e., changes to the model such as changed bounds,
added or removed reactions are not taken into account by this
method.
threshold : float, optional
Threshold below which fluxes are not reported.
fva : pandas.DataFrame, float or None, optional
Whether or not to include flux variability analysis in the output.
If given, fva should either be a previous FVA solution matching
the model or a float between 0 and 1 representing the
fraction of the optimum objective to be searched.
names : bool, optional
Emit reaction and metabolite names rather than identifiers (default
False).
floatfmt : string, optional
Format string for floats (default '.3g'). |
def install_payment_instruction(self, instruction,
token_type="Unrestricted",
transaction_id=None):
"""
InstallPaymentInstruction
instruction: The PaymentInstruction to send, for example:
MyRole=='Caller' orSay 'Roles do not match';
token_type: Defaults to "Unrestricted"
transaction_id: Defaults to a new ID
"""
if(transaction_id == None):
transaction_id = uuid.uuid4()
params = {}
params['PaymentInstruction'] = instruction
params['TokenType'] = token_type
params['CallerReference'] = transaction_id
response = self.make_request("InstallPaymentInstruction", params)
return response | InstallPaymentInstruction
instruction: The PaymentInstruction to send, for example:
MyRole=='Caller' orSay 'Roles do not match';
token_type: Defaults to "Unrestricted"
transaction_id: Defaults to a new ID |
def get_mass(chebi_id):
'''Returns mass'''
if len(__MASSES) == 0:
__parse_chemical_data()
return __MASSES[chebi_id] if chebi_id in __MASSES else float('NaN') | Returns mass |
def execute(self):
"""
Execute the command. Intercepts the help subsubcommand to show the help
text.
"""
if self.args and self.argument(0) == "help":
self.error(self.usage() + "\n\n" + self.help())
return False
return True | Execute the command. Intercepts the help subsubcommand to show the help
text. |
def update_view(self, table, view):
"""Updates the SQL query for a view.
If the output table exists, it is replaced with the supplied view query. Otherwise a new
table is created with this view.
:param table: The table to contain the view.
:type table: BQTable
:param view: The SQL query for the view.
:type view: str
"""
body = {
'tableReference': {
'projectId': table.project_id,
'datasetId': table.dataset_id,
'tableId': table.table_id
},
'view': {
'query': view
}
}
if self.table_exists(table):
self.client.tables().update(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id,
body=body).execute()
else:
self.client.tables().insert(projectId=table.project_id,
datasetId=table.dataset_id,
body=body).execute() | Updates the SQL query for a view.
If the output table exists, it is replaced with the supplied view query. Otherwise a new
table is created with this view.
:param table: The table to contain the view.
:type table: BQTable
:param view: The SQL query for the view.
:type view: str |
def identifiers(self, identifiers):
"""
:type identifiers: subject_abcs.IdentifierCollection
"""
if (isinstance(identifiers, subject_abcs.IdentifierCollection) or
identifiers is None):
self._identifiers = identifiers
else:
raise ValueError('must use IdentifierCollection') | :type identifiers: subject_abcs.IdentifierCollection |
def deploy_snmp(snmp, host=None, admin_username=None,
admin_password=None, module=None):
'''
Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret
'''
return __execute_cmd('deploy -v SNMPv2 {0} ro'.format(snmp),
host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module) | Change the QuickDeploy SNMP community string, used for switches as well
CLI Example:
.. code-block:: bash
salt dell dracr.deploy_snmp SNMP_STRING
host=<remote DRAC or CMC> admin_username=<DRAC user>
admin_password=<DRAC PW>
salt dell dracr.deploy_password diana secret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.