Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
20,600 | def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
])
v = tf.transpose(v, [0, 2, 1, 3])
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output | ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum. |
20,601 | def shift_right_3d(x, pad_value=None):
if pad_value is None:
shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
else:
shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]
return shifted_targets | Shift the second dimension of x right by one. |
20,602 | def compute(self):
if "Signature" in self.params:
raise RuntimeError("Existing signature in parameters")
if self.signature_version is not None:
version = self.signature_version
else:
version = self.params["SignatureVersion"]
if str(version) == "1":
bytes = self.old_signing_text()
hash_type = "sha1"
elif str(version) == "2":
bytes = self.signing_text()
if self.signature_method is not None:
signature_method = self.signature_method
else:
signature_method = self.params["SignatureMethod"]
hash_type = signature_method[len("Hmac"):].lower()
else:
raise RuntimeError("Unsupported SignatureVersion: " % version)
return self.creds.sign(bytes, hash_type) | Compute and return the signature according to the given data. |
20,603 | def setTau(self, vehID, tau):
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_TAU, vehID, tau) | setTau(string, double) -> None
Sets the driver's tau-parameter (reaction time or anticipation time depending on the car-following model) in s for this vehicle. |
20,604 | def split_filename(pathname):
filepart = win32.PathFindFileName(pathname)
pathpart = win32.PathRemoveFileSpec(pathname)
return (pathpart, filepart) | @type pathname: str
@param pathname: Absolute path.
@rtype: tuple( str, str )
@return: Tuple containing the path to the file and the base filename. |
20,605 | def get_source_from_contracts_list(self, contracts):
if contracts is None or len(contracts) == 0:
return
if isinstance(contracts[0], SolidityContract):
self.source_type = "solidity-file"
self.source_format = "text"
for contract in contracts:
self.source_list += [file.filename for file in contract.solidity_files]
self._source_hash.append(contract.bytecode_hash)
self._source_hash.append(contract.creation_bytecode_hash)
elif isinstance(contracts[0], EVMContract):
self.source_format = "evm-byzantium-bytecode"
self.source_type = (
"ethereum-address"
if len(contracts[0].name) == 42 and contracts[0].name[0:2] == "0x"
else "raw-bytecode"
)
for contract in contracts:
if contract.creation_code:
self.source_list.append(contract.creation_bytecode_hash)
if contract.code:
self.source_list.append(contract.bytecode_hash)
self._source_hash = self.source_list
else:
assert False | get the source data from the contracts list
:param contracts: the list of contracts
:return: |
20,606 | def get_relationship_query_session_for_family(self, family_id=None, proxy=None):
if not family_id:
raise NullArgument
if not self.supports_relationship_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.RelationshipQuerySession(family_id, proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the ``OsidSession`` associated with the relationship query service for the given family.
arg: family_id (osid.id.Id): the ``Id`` of the family
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.RelationshipQuerySession) - a
``RelationshipQuerySession``
raise: NotFound - no ``Family`` found by the given ``Id``
raise: NullArgument - ``family_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_relationship_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_relationship_query()``
and ``supports_visible_federation()`` are ``true``* |
20,607 | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt,
stddev_types)
stddevs = [np.ones(len(dists.repi))*get_sigma(imt)]
return mean, stddevs | See documentation for method `GroundShakingIntensityModel` in
:class:~`openquake.hazardlib.gsim.base.GSIM` |
20,608 | def is_all_field_none(self):
if self._uuid is not None:
return False
if self._avatar is not None:
return False
if self._public_nick_name is not None:
return False
if self._display_name is not None:
return False
if self._country is not None:
return False
return True | :rtype: bool |
20,609 | def _countOverlap(rep1, rep2):
overlap = 0
for e in rep1:
if e in rep2:
overlap += 1
return overlap | Return the overlap between two representations. rep1 and rep2 are lists of
non-zero indices. |
20,610 | def list_campaigns(self, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, Campaign, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.update_campaign_list, lwrap_type=Campaign, **kwargs) | List all update campaigns.
:param int limit: number of campaigns to retrieve
:param str order: sort direction of campaigns when ordered by creation time (desc|asc)
:param str after: get campaigns after given campaign ID
:param dict filters: Dictionary of filters to apply
:return: List of :py:class:`Campaign` objects
:rtype: PaginatedResponse |
20,611 | def _iterbfs(self, start, end=None, forward=True):
queue, visited = deque([(start, 0)]), set([start])
if forward:
get_edges = self.out_edges
get_next = self.tail
else:
get_edges = self.inc_edges
get_next = self.head
while queue:
curr_node, curr_step = queue.popleft()
yield (curr_node, curr_step)
if curr_node == end:
break
for edge in get_edges(curr_node):
tail = get_next(edge)
if tail not in visited:
visited.add(tail)
queue.append((tail, curr_step + 1)) | The forward parameter specifies whether it is a forward or backward
traversal. Returns a list of tuples where the first value is the hop
value the second value is the node id. |
20,612 | def moreData(ra,dec,box):
import cfhtCutout
cdata={: ra, : dec, : 0.2}
inter=cfhtCutout.find_images(cdata,0.2) | Search the CFHT archive for more images of this location |
20,613 | def get(self, key):
s key
:return: corresponding value
'
if key in self._data_fields:
return self._data_fields[key]
if key in self._sub_reports:
return self._sub_reports[key]
return None | Get a value for a given key
:param key: entry's key
:return: corresponding value |
20,614 | def _update_secrets(self):
self.secrets = read_client_secrets()
if self.secrets is not None:
if "registry" in self.secrets:
if "base" in self.secrets[]:
self.base = self.secrets[][]
self._update_base() | update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. |
20,615 | def averageValues(self):
assert self.opts[] and self.opts[] ==
filled = self.density > 1
v = self.values.copy()
v[filled] /= self.density[filled]
v[~filled] *= self.density[~filled]
return v | return the averaged values in the grid |
20,616 | def pull_tar(url, name, verify=False):
return _pull_image(, url, name, verify=verify) | Execute a ``machinectl pull-raw`` to download a .tar container image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_tar http://foo.domain.tld/containers/archlinux-2015.02.01.tar.gz arch2 |
20,617 | def _(f, x):
result = {}
for k, v in x.items():
k_, v_ = f(k, v)
result[k_] = v_
return result | fmap for dict like, not `f` should have signature `f::key->value->(key, value)` |
20,618 | def _get_driver(self):
ComputeEngine = get_driver(Provider.GCE)
return ComputeEngine(
self.service_account_email,
self.service_account_file,
project=self.service_account_project
) | Get authenticated GCE driver. |
20,619 | def create_weapon_layer(weapon, hashcode, isSecond=False):
return pgnreader.parse_pagan_file(( % (PACKAGE_DIR, os.sep, os.sep)) + weapon + , hashcode, sym=False, invert=isSecond) | Creates the layer for weapons. |
20,620 | def decimal(self, prompt, default=None, lower=None, upper=None):
prompt = prompt if prompt is not None else "Enter a decimal number"
prompt += " [{0}]: ".format(default) if default is not None else
return self.input(
curry(filter_decimal, default=default, lower=lower, upper=upper),
prompt
) | Prompts user to input decimal, with optional default and bounds. |
20,621 | def exit_config_mode(self, exit_config="exit configuration-mode"):
output = ""
if self.check_config_mode():
output = self.send_command_timing(
exit_config, strip_prompt=False, strip_command=False
)
if "uncommitted changes" in output:
output += self.send_command_timing(
"yes", strip_prompt=False, strip_command=False
)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output | Exit configuration mode. |
20,622 | def insert(self, context, plan):
op = execution.Insert(self.__comp_name, self.__comp())
if op not in plan and self.available(context) != True:
for dep_stub in self.dependencies():
dep_stub.insert(context, plan)
plan.append(op) | Include an insert operation to the given plan.
:param execution.Context context:
Current execution context.
:param list plan:
List of :class:`execution.Operation` instances. |
20,623 | def run(**kwargs):
c3.large
if in kwargs:
kwargs.pop()
ret = {
: list(kwargs),
: {},
: ,
: None,
}
functions = [func for func in kwargs if in func]
missing = []
tests = []
for func in functions:
func = func.split()[0]
if func not in __salt__:
missing.append(func)
elif __opts__[]:
tests.append(func)
if tests or missing:
ret[] = .join([
missing and "Unavailable function{plr}: "
"{func}.".format(plr=(len(missing) > 1 or ),
func=(.join(missing) or )) or ,
tests and "Function{plr} {func} to be "
"executed.".format(plr=(len(tests) > 1 or ),
func=(.join(tests)) or ) or ,
]).strip()
ret[] = not (missing or not tests)
if ret[] is None:
ret[] = True
failures = []
success = []
for func in functions:
_func = func.split()[0]
try:
func_ret = _call_function(_func, returner=kwargs.get(),
func_args=kwargs.get(func))
if not _get_result(func_ret, ret[].get(, {})):
if isinstance(func_ret, dict):
failures.append(" failed: {1}".format(
func, func_ret.get(, )))
else:
success.append(.format(
func, func_ret.get(, ) if isinstance(func_ret, dict) else func_ret))
ret[][func] = func_ret
except (SaltInvocationError, TypeError) as ex:
failures.append(" failed: {1}".format(func, ex))
ret[] = .join(failures + success)
ret[] = not bool(failures)
return ret | Run a single module function or a range of module functions in a batch.
Supersedes ``module.run`` function, which requires ``m_`` prefix to
function-specific parameters.
:param returner:
Specify a common returner for the whole batch to send the return data
:param kwargs:
Pass any arguments needed to execute the function(s)
.. code-block:: yaml
some_id_of_state:
module.run:
- network.ip_addrs:
- interface: eth0
- cloud.create:
- names:
- test-isbm-1
- test-isbm-2
- ssh_username: sles
- image: sles12sp2
- securitygroup: default
- size: 'c3.large'
- location: ap-northeast-1
- delvol_on_destroy: True
:return: |
20,624 | def rollback(self):
logger.info()
config_text = self.compare_config(other=self.original_config)
if len(config_text) > 0:
return self._commit(config_text, force=True, reload_original_config=False) | It will rollback all changes and go to the *original_config* |
20,625 | def set_default(self):
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
self._config = configparser.RawConfigParser()
self._config.add_section()
for key, val in self.DEFAULTS.items():
self._config.set(, key, val)
with open(self._configfile, ) as f:
self._config.write(f) | Set config to default. |
20,626 | def smooth(x0, rho, gamma, axis=0):
n = x0.shape[axis]
lap_op = spdiags([(2 + rho / gamma) * np.ones(n), -1 * np.ones(n), -1 * np.ones(n)], [0, -1, 1], n, n, format=)
x_out = np.rollaxis(spsolve(gamma * lap_op, rho * np.rollaxis(x0, axis, 0)), axis, 0)
return x_out | Proximal operator for a smoothing function enforced via the discrete laplacian operator
Notes
-----
Currently only works with matrices (2-D arrays) as input
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step |
20,627 | def add_sign(xml, key, cert, debug=False, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1, digest_algorithm=OneLogin_Saml2_Constants.SHA1):
if xml is None or xml == :
raise Exception()
elem = OneLogin_Saml2_XML.to_etree(xml)
sign_algorithm_transform_map = {
OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1,
OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256,
OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384,
OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512
}
sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.Transform.RSA_SHA1)
signature = xmlsec.template.create(elem, xmlsec.Transform.EXCL_C14N, sign_algorithm_transform, ns=)
issuer = OneLogin_Saml2_XML.query(elem, )
if len(issuer) > 0:
issuer = issuer[0]
issuer.addnext(signature)
elem_to_sign = issuer.getparent()
else:
entity_descriptor = OneLogin_Saml2_XML.query(elem, )
if len(entity_descriptor) > 0:
elem.insert(0, signature)
else:
elem[0].insert(0, signature)
elem_to_sign = elem
elem_id = elem_to_sign.get(, None)
if elem_id is not None:
if elem_id:
elem_id = + elem_id
else:
generated_id = generated_id = OneLogin_Saml2_Utils.generate_unique_id()
elem_id = + generated_id
elem_to_sign.attrib[] = generated_id
xmlsec.enable_debug_trace(debug)
xmlsec.tree.add_ids(elem_to_sign, ["ID"])
digest_algorithm_transform_map = {
OneLogin_Saml2_Constants.SHA1: xmlsec.Transform.SHA1,
OneLogin_Saml2_Constants.SHA256: xmlsec.Transform.SHA256,
OneLogin_Saml2_Constants.SHA384: xmlsec.Transform.SHA384,
OneLogin_Saml2_Constants.SHA512: xmlsec.Transform.SHA512
}
digest_algorithm_transform = digest_algorithm_transform_map.get(digest_algorithm, xmlsec.Transform.SHA1)
ref = xmlsec.template.add_reference(signature, digest_algorithm_transform, uri=elem_id)
xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED)
xmlsec.template.add_transform(ref, xmlsec.Transform.EXCL_C14N)
key_info = xmlsec.template.ensure_key_info(signature)
xmlsec.template.add_x509_data(key_info)
dsig_ctx = xmlsec.SignatureContext()
sign_key = xmlsec.Key.from_memory(key, xmlsec.KeyFormat.PEM, None)
sign_key.load_cert_from_memory(cert, xmlsec.KeyFormat.PEM)
dsig_ctx.key = sign_key
dsig_ctx.sign(signature)
return OneLogin_Saml2_XML.to_string(elem) | Adds signature key and senders certificate to an element (Message or
Assertion).
:param xml: The element we should sign
:type: string | Document
:param key: The private key
:type: string
:param cert: The public
:type: string
:param debug: Activate the xmlsec debug
:type: bool
:param sign_algorithm: Signature algorithm method
:type sign_algorithm: string
:param digest_algorithm: Digest algorithm method
:type digest_algorithm: string
:returns: Signed XML
:rtype: string |
20,628 | def remove_functions(source, all_inline=False):
global INLINE_COUNT
inline = {}
hoisted = {}
n = 0
limit = len(source) - 9
res =
last = 0
while n < limit:
if n and source[n - 1] in IDENTIFIER_PART:
n += 1
continue
if source[n:n + 8] == and source[n +
8] not in IDENTIFIER_PART:
if source[:n].rstrip().endswith(
):
n += 1
continue
if source[n + 8:].lstrip().startswith(
):
n += 1
continue
entered = n
res += source[last:n]
name =
n = pass_white(source, n + 8)
if source[n] in IDENTIFIER_START:
name, n = parse_identifier(source, n)
args, n = pass_bracket(source, n, )
if not args:
raise SyntaxError()
args = args.strip()
args = tuple(parse_identifier(e, 0)[0]
for e in argsplit(args)) if args else ()
if len(args) - len(set(args)):
raise SyntaxError(
)
block, n = pass_bracket(source, n, )
if not block:
raise SyntaxError(
)
mixed = False
if name and not all_inline:
before = source[:entered].rstrip()
if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):
mixed = True
elif before and before[-1] not in PRE_ALLOWED and not before[
-2:] in INCREMENTS:
mixed = True
else:
hoisted[name] = block, args
if not name or mixed or all_inline:
INLINE_COUNT += 1
iname = INLINE_NAME % INLINE_COUNT
res += + iname
inline[ % (
iname, name
)] = block, args
last = n
else:
n += 1
res += source[last:]
return res, hoisted, inline | removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions |
20,629 | def _baseattrs(self):
result = super()._baseattrs
result["params"] = ", ".join(self.parameters)
return result | A dict of members expressed in literals |
20,630 | def loggers(self):
ret = []
if self.logger_name:
if isinstance(self.logger_name, logging.Logger):
ret.append((self.logger_name.name, self.logger_name))
else:
ret.append((self.logger_name, logging.getLogger(self.logger_name)))
else:
ret = list(logging.Logger.manager.loggerDict.items())
ret.append(("root", logging.getLogger()))
return ret | Return all the loggers that should be activated |
20,631 | def _detach(cls, disk_id):
disk = cls._info(disk_id)
opers = []
if disk.get():
for vm_id in disk[]:
cls.echo( % vm_id)
cls.echo()
opers.append(cls.call(,
vm_id, disk_id))
return opers | Detach a disk from a vm. |
20,632 | def sponsored(self, **kwargs):
eqs = self.search(**kwargs)
eqs = eqs.filter(AllSponsored())
published_offset = getattr(settings, "RECENT_SPONSORED_OFFSET_HOURS", None)
if published_offset:
now = timezone.now()
eqs = eqs.filter(
Published(
after=now - timezone.timedelta(hours=published_offset),
before=now
)
)
return eqs | Search containing any sponsored pieces of Content. |
20,633 | def downgrade():
op.drop_constraint(op.f(),
, type_=)
op.drop_index(op.f(),
table_name=)
op.create_foreign_key(,
, , [],
[])
op.drop_constraint(
op.f(),
, type_=)
op.drop_index(op.f(),
table_name=)
op.create_foreign_key(
,
, , [],
[])
op.drop_constraint(op.f(),
, type_=)
op.drop_index(op.f(),
table_name=)
op.create_foreign_key(,
, , [],
[]) | Downgrade database. |
20,634 | def sub_symbols(pattern, code, symbol):
return pattern.replace(, code).replace(, symbol) | Substitutes symbols in CLDR number pattern. |
20,635 | def albedo(self, value=999.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._albedo = value | Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
20,636 | def get(self, request, uri):
uri = self.decode_uri(uri)
revisions = cio.revisions(uri)
revisions = [list(revision) for revision in revisions]
return self.render_to_json(revisions) | List uri revisions.
JSON Response:
[[uri, state], ...] |
20,637 | def grant(self, lock, unit):
if not hookenv.is_leader():
return False
granted = set()
for u in self.grants:
if lock in self.grants[u]:
granted.add(u)
if unit in granted:
return True
reqs = set()
for u in self.requests:
if u in granted:
continue
for _lock, ts in self.requests[u].items():
if _lock == lock:
reqs.add((ts, u))
queue = [t[1] for t in sorted(reqs)]
if unit not in queue:
return False
grant_func = getattr(self, .format(lock), self.default_grant)
if grant_func(lock, unit, granted, queue):
self.msg(.format(lock, unit))
self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
return True
return False | Maybe grant the lock to a unit.
The decision to grant the lock or not is made for $lock
by a corresponding method grant_$lock, which you may define
in a subclass. If no such method is defined, the default_grant
method is used. See Serial.default_grant() for details. |
20,638 | def write_summary(self, global_step, delta_train_start, lesson_num=0):
if global_step % self.trainer_parameters[] == 0 and global_step != 0:
is_training = "Training." if self.is_training and self.get_step <= self.get_max_steps else "Not Training."
if len(self.stats[]) > 0:
mean_reward = np.mean(
self.stats[])
LOGGER.info(" {}: {}: Step: {}. "
"Time Elapsed: {:0.3f} s "
"Mean "
"Reward: {"
":0.3f}. Std of Reward: {:0.3f}. {}"
.format(self.run_id, self.brain_name,
min(self.get_step, self.get_max_steps),
delta_train_start,
mean_reward, np.std(
self.stats[]),
is_training))
else:
LOGGER.info(" {}: {}: Step: {}. No episode was completed since last summary. {}"
.format(self.run_id, self.brain_name, self.get_step, is_training))
summary = tf.Summary()
for key in self.stats:
if len(self.stats[key]) > 0:
stat_mean = float(np.mean(self.stats[key]))
summary.value.add(tag=.format(
key), simple_value=stat_mean)
self.stats[key] = []
summary.value.add(tag=, simple_value=lesson_num)
self.summary_writer.add_summary(summary, self.get_step)
self.summary_writer.flush() | Saves training statistics to Tensorboard.
:param delta_train_start: Time elapsed since training started.
:param lesson_num: Current lesson number in curriculum.
:param global_step: The number of steps the simulation has been going for |
20,639 | def dump_service(self, sc):
def lprint(fmt, data, index):
ispchr = lambda x: x >= 32 and x <= 126
def print_bytes(octets):
return .join([ % x for x in octets])
def print_chars(octets):
return .join([chr(x) if ispchr(x) else for x in octets])
return fmt.format(index, print_bytes(data), print_chars(data))
data_line_fmt = "{0:04X}: {1} |{2}|"
same_line_fmt = "{0:<4s} {1} |{2}|"
lines = list()
last_data = None
same_data = 0
for i in itertools.count():
assert i < 0x10000
try:
this_data = self.read_without_encryption([sc], [BlockCode(i)])
except Type3TagCommandError:
i = i - 1
break
if this_data == last_data:
same_data += 1
else:
if same_data > 1:
lines.append(lprint(same_line_fmt, last_data, "*"))
lines.append(lprint(data_line_fmt, this_data, i))
last_data = this_data
same_data = 0
if same_data > 1:
lines.append(lprint(same_line_fmt, last_data, "*"))
if same_data > 0:
lines.append(lprint(data_line_fmt, this_data, i))
return lines | Read all data blocks of a given service.
:meth:`dump_service` reads all data blocks from the service
with service code *sc* and returns a list of strings suitable
for printing. The number of strings returned does not
necessarily reflect the number of data blocks because a range
of data blocks with equal content is reduced to fewer lines of
output. |
20,640 | def get_historical_klines(symbol, interval, start_str, end_str=None):
client = Client("", "")
output_data = []
limit = 500
timeframe = interval_to_milliseconds(interval)
start_ts = date_to_milliseconds(start_str)
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
symbol_existed = False
while True:
temp_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
output_data += temp_data
start_ts = temp_data[len(temp_data) - 1][0] + timeframe
else:
start_ts += timeframe
idx += 1
if len(temp_data) < limit:
break
if idx % 3 == 0:
time.sleep(1)
return output_data | Get Historical Klines from Binance
See dateparse docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values |
20,641 | def get_profile_info(self, raw_token, profile_info_params={}):
"Fetch user profile information."
try:
response = self.request(, self.provider.profile_url, token=raw_token, params=profile_info_params)
response.raise_for_status()
except RequestException as e:
logger.error(.format(e))
return None
else:
return response.json() or response.text | Fetch user profile information. |
20,642 | def add_cookie_header(self, request, referrer_host=None):
new_request = convert_http_request(request, referrer_host)
self._cookie_jar.add_cookie_header(new_request)
request.fields.clear()
for name, value in new_request.header_items():
request.fields.add(name, value) | Wrapped ``add_cookie_header``.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL. |
20,643 | def get_issues_in_queue(self, service_desk_id, queue_id, start=0, limit=50):
url = .format(service_desk_id, queue_id)
params = {}
if start is not None:
params[] = int(start)
if limit is not None:
params[] = int(limit)
return self.get(url, headers=self.experimental_headers, params=params) | Returns a page of issues inside a queue for a given queue ID.
Only fields that the queue is configured to show are returned.
For example, if a queue is configured to show only Description and Due Date,
then only those two fields are returned for each issue in the queue.
Permissions: The calling user must have permission to view the requested queue,
i.e. they must be an agent of the service desk that the queue belongs to.
:param service_desk_id: str
:param queue_id: str
:param start: int
:param limit: int
:return: a page of issues |
20,644 | def check_auth(self, username, password):
if username == self.args.username:
from glances.password import GlancesPassword
pwd = GlancesPassword()
return pwd.check_password(self.args.password, pwd.sha256_hash(password))
else:
return False | Check if a username/password combination is valid. |
20,645 | def list_load_areas(self, session, mv_districts):
lv_loads_threshold = cfg_ding0.get(, )
gw2kw = 10 ** 6
stations_list = [d.mv_grid._station.id_db for d in mv_districts]
lv_load_areas_sqla = session.query(
self.orm[].id.label(),
(self.orm[].sector_peakload_residential * gw2kw).\
label(),
(self.orm[].sector_peakload_retail * gw2kw).\
label(),
(self.orm[].sector_peakload_industrial * gw2kw).\
label(),
(self.orm[].sector_peakload_agricultural * gw2kw).\
label(),
). \
filter(self.orm[].subst_id.in_(stations_list)).\
filter(((self.orm[].sector_peakload_residential
+ self.orm[].sector_peakload_retail
+ self.orm[].sector_peakload_industrial
+ self.orm[].sector_peakload_agricultural)
* gw2kw) > lv_loads_threshold). \
filter(self.orm[])
lv_load_areas = pd.read_sql_query(lv_load_areas_sqla.statement,
session.bind,
index_col=)
return lv_load_areas | list load_areas (load areas) peak load from database for a single MV grid_district
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_districts:
List of MV districts |
20,646 | def json_files_serializer(objs, status=None):
files = [file_serializer(obj) for obj in objs]
return make_response(json.dumps(files), status) | JSON Files Serializer.
:parma objs: A list of:class:`invenio_files_rest.models.ObjectVersion`
instances.
:param status: A HTTP Status. (Default: ``None``)
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`. |
20,647 | def _validate_schema(self, schema, field, value):
if schema is None:
return
if isinstance(value, Sequence) and not isinstance(value, _str_type):
self.__validate_schema_sequence(field, schema, value)
elif isinstance(value, Mapping):
self.__validate_schema_mapping(field, schema, value) | {'type': ['dict', 'string'],
'anyof': [{'validator': 'schema'},
{'validator': 'bulk_schema'}]} |
20,648 | def sun_ra_dec(utc_time):
jdate = jdays2000(utc_time) / 36525.0
eps = np.deg2rad(23.0 + 26.0 / 60.0 + 21.448 / 3600.0 -
(46.8150 * jdate + 0.00059 * jdate * jdate -
0.001813 * jdate * jdate * jdate) / 3600)
eclon = sun_ecliptic_longitude(utc_time)
x__ = np.cos(eclon)
y__ = np.cos(eps) * np.sin(eclon)
z__ = np.sin(eps) * np.sin(eclon)
r__ = np.sqrt(1.0 - z__ * z__)
declination = np.arctan2(z__, r__)
right_ascension = 2 * np.arctan2(y__, (x__ + r__))
return right_ascension, declination | Right ascension and declination of the sun at *utc_time*. |
20,649 | def sort(self, col: str):
try:
self.df = self.df.copy().sort_values(col)
except Exception as e:
self.err(e, "Can not sort the dataframe from column " +
str(col)) | Sorts the main dataframe according to the given column
:param col: column name
:type col: str
:example: ``ds.sort("Col 1")`` |
20,650 | def safe_dump(d, fname, *args, **kwargs):
if osp.exists(fname):
os.rename(fname, fname + )
lock = fasteners.InterProcessLock(fname + )
lock.acquire()
try:
with open(fname, ) as f:
ordered_yaml_dump(d, f, *args, **kwargs)
except:
raise
finally:
lock.release() | Savely dump `d` to `fname` using yaml
This method creates a copy of `fname` called ``fname + '~'`` before saving
`d` to `fname` using :func:`ordered_yaml_dump`
Parameters
----------
d: object
The object to dump
fname: str
The path where to dump `d`
Other Parameters
----------------
``*args, **kwargs``
Will be forwarded to the :func:`ordered_yaml_dump` function |
20,651 | def _getReader(self, filename, scoreClass):
if filename.endswith() or filename.endswith():
return JSONRecordsReader(filename, scoreClass)
else:
raise ValueError(
% filename) | Obtain a JSON record reader for DIAMOND records.
@param filename: The C{str} file name holding the JSON.
@param scoreClass: A class to hold and compare scores (see scores.py). |
20,652 | def _sentiment(self, distance=True):
sum_pos = 0
sum_neg = 0
text = self.parent
entity_positions = range(self.start, self.end)
non_entity_positions = set(range(len(text.words))).difference(entity_positions)
if not distance:
non_entity_polarities = np.array([text.words[i].polarity for i in non_entity_positions])
sum_pos = sum(non_entity_polarities == 1)
sum_neg = sum(non_entity_polarities == -1)
else:
polarities = np.array([w.polarity for w in text.words])
polarized_positions = np.argwhere(polarities != 0)[0]
polarized_non_entity_positions = non_entity_positions.intersection(polarized_positions)
sentence_len = len(text.words)
for i in polarized_non_entity_positions:
min_dist = min(abs(self.start - i), abs(self.end - i))
if text.words[i].polarity == 1:
sum_pos += 1.0 - (min_dist - 1.0) / (2.0 * sentence_len)
else:
sum_neg += 1.0 - (min_dist - 1.0) / (2.0 *sentence_len)
return (sum_pos, sum_neg) | Calculates the sentiment of an entity as it appears in text. |
20,653 | def filter_renderers(self, renderers, format):
renderers = [renderer for renderer in renderers
if renderer.format == format]
if not renderers:
raise Http404
return renderers | If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format. |
20,654 | def _adaptSegment(self, segUpdate):
trimSegment = False
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment
print " before:",
segment.debugPrint()
segment.lastActiveIteration = self.lrnIterationIdx
segment.positiveActivations += 1
segment.dutyCycle(active=True)
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = (len(segment.syns) + len(synsToAdd) -
self.maxSynapsesPerSegment)
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
else:
newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment)
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment | This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
:param segUpdate SegmentUpdate instance
:returns: True if some synapses were decremented to 0 and the segment is a
candidate for trimming |
20,655 | def fit_transform(self, Z):
self._validate_vocabulary()
analyze = self.build_analyzer()
A = Z.transform(lambda X: list(map(analyze, X)), column=).persist()
X = A[:, ] if isinstance(A, DictRDD) else A
self.vocabulary_ = self._init_vocab(X)
mapper = self.broadcast(self._count_vocab, A.context)
Z = A.transform(mapper, column=, dtype=sp.spmatrix)
if not self.fixed_vocabulary_:
X = Z[:, ] if isinstance(Z, DictRDD) else Z
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
kept_indices, self.stop_words_ = self._limit_features(
X, self.vocabulary_, max_doc_count, min_doc_count, max_features)
map_index = self._sort_features(self.vocabulary_)
mask = kept_indices[map_index]
Z = Z.transform(lambda x: x[:, mask], column=, dtype=sp.spmatrix)
A.unpersist()
return Z | Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
Z : iterable or DictRDD with column 'X'
An iterable of raw_documents which yields either str, unicode or
file objects; or a DictRDD with column 'X' containing such
iterables.
Returns
-------
X : array, [n_samples, n_features] or DictRDD
Document-term matrix. |
20,656 | def generate_description(dataset_name, local_cache_dir=None):
assert (local_cache_dir!=None)
readme_file = open(os.path.join(local_cache_dir,,dataset_name,), )
try:
df = fetch_data(dataset_name)
fnames = [col for col in df.columns if col!=]
types = get_types(df.ix[:, df.columns != ])
feat=count_features_type(df.ix[:, df.columns != ])
endpoint=determine_endpoint_type(df.ix[:, df.columns == ])
mse=imbalance_metrics(df[].tolist())
readme_file.write( % dataset_name)
readme_file.write()
readme_file.write( % str(len(df.axes[0])))
readme_file.write("
readme_file.write("
readme_file.write("
readme_file.write("
readme_file.write("Endpoint type: %s\n\n" % endpoint)
readme_file.write("
readme_file.write("Imbalance metric: %s\n\n" % mse[1])
readme_file.write( % .join([f + + t for f,t in
zip(fnames,types)]))
except IOError as err:
print(err)
finally:
readme_file.close() | Generates desription for a given dataset in its README.md file in a dataset local_cache_dir file.
:param dataset_name: str
The name of the data set to load from PMLB.
:param local_cache_dir: str (required)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used. |
20,657 | def predict(self, Xnew, full_cov=False, kern=None, **kwargs):
return self.predict_noiseless(Xnew, full_cov=full_cov, kern=kern) | Predict the function(s) at the new point(s) Xnew. For Student-t processes, this method is equivalent to
predict_noiseless as no likelihood is included in the model. |
20,658 | def _get_encoding(encoding_or_label):
if hasattr(encoding_or_label, ):
return encoding_or_label
encoding = lookup(encoding_or_label)
if encoding is None:
raise LookupError( % encoding_or_label)
return encoding | Accept either an encoding object or label.
:param encoding: An :class:`Encoding` object or a label string.
:returns: An :class:`Encoding` object.
:raises: :exc:`~exceptions.LookupError` for an unknown label. |
20,659 | def _piecewise(x, condlist, funclist, *args, **kw):
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
(isinstance(condlist, np.ndarray) and condlist.ndim == 0) or \
(x.ndim > 0 and condlist[0].ndim == 0):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
if n == n2-1:
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y | Fixed version of numpy.piecewise for 0-d arrays |
20,660 | def uinit(self, ushape):
if self.opt[] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
U0 = np.sign(self.block_sep0(self.Y)) / self.rho
U1 = self.block_sep1(self.Y) - self.S
return self.block_cat(U0, U1) | Return initialiser for working variable U. |
20,661 | def get_query_error(self,i):
x = self._query_hpas[i]
h = x[]
pos = x[]
prob = 0
be = BaseError()
be.set_observable(h.get_target(),h.get_query())
if i != 0 and pos == 0:
prev = x[]
if len(prev.get_query()) == 0:
be.set_unobserved_before(len(prev.get_target()),0,prev.get_target()[0],0.5)
if i != len(self._query_hpas)-1 and pos == len(h.get_query())-1:
if x[]:
foll = x[]
if len(foll.get_query()) == 0:
be.set_unobserved_after(len(foll.get_target()),0,foll.get_target()[0],0.5)
return be | Just get a single error characterization based on the index
:param i: list index
:type i: int
:returns: base-wise error
:rtype: HPA group description |
20,662 | def executed_without_callbacks(self):
T = self.tm.trace
if T: T.write(self.trace_message(,
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED) | Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods. |
20,663 | def keys(self):
keys = list()
for n in range(len(self)):
key = self.get_value()
if not key in [, None]: keys.append(key)
return keys | Returns a sorted list of keys |
20,664 | def filter_trends(self, pattern=):
filtered_trends = {}
with open(self.abspath) as fobj:
for idx, line in enumerate(fobj):
variable_idx = idx-self._attributes[]-1
if in line:
break
if pattern in line and variable_idx > 0:
filtered_trends[variable_idx] = line
return filtered_trends | Filter available trends |
20,665 | def check_output(self, cmd):
ret, output = self._exec(cmd)
if not ret == 0:
raise CommandError(self)
return output | Wrapper for subprocess.check_output. |
20,666 | def setdbo(self, bond1, bond2, dboval):
if self.bondtype != 2:
raise FrownsError("To set double bond order, center bond must be double!")
assert dboval in [DX_CHI_CIS, DX_CHI_TRANS, DX_CHI_NO_DBO], "bad dboval value"
self.dbo.append(bond1, bond2, dboval) | Set the double bond orientation for bond1 and bond2
based on this bond |
20,667 | def update_tab_label(self, state_m):
state_identifier = self.get_state_identifier(state_m)
if state_identifier not in self.tabs and state_identifier not in self.closed_tabs:
return
tab_info = self.tabs[state_identifier] if state_identifier in self.tabs else self.closed_tabs[state_identifier]
page = tab_info[]
set_tab_label_texts(page.title_label, state_m, tab_info[]) | Update all tab labels
:param rafcon.state_machine.states.state.State state_m: State model who's tab label is to be updated |
20,668 | def num_samples(input_filepath):
validate_input_file(input_filepath)
output = soxi(input_filepath, )
if output == :
logger.warning("Number of samples unavailable for %s", input_filepath)
return int(output) | Show number of samples (0 if unavailable).
Parameters
----------
input_filepath : str
Path to audio file.
Returns
-------
n_samples : int
total number of samples in audio file.
Returns 0 if empty or unavailable |
20,669 | def reduce_multiline(string):
string = str(string)
return " ".join([item.strip()
for item in string.split("\n")
if item.strip()]) | reduces a multiline string to a single line of text.
args:
string: the text to reduce |
20,670 | def _register_template(cls, template_bytes):
elif isinstance(o, _Child):
cls.bind_template_child_full(name, True, 0)
bound_widgets.add(name)
cls.set_connect_func(_connect_func, cls)
cls.__gtemplate_methods__ = bound_methods
cls.__gtemplate_widgets__ = bound_widgets
base_init_template = cls.init_template
cls.init_template = lambda s: _init_template(s, cls, base_init_template) | Registers the template for the widget and hooks init_template |
20,671 | def sanitize_for_archive(url, headers, payload):
url = re.sub(, , url)
return url, headers, payload | Sanitize URL of a HTTP request by removing the token information
before storing/retrieving archived items
:param: url: HTTP url request
:param: headers: HTTP headers request
:param: payload: HTTP payload request
:returns the sanitized url, plus the headers and payload |
20,672 | def _scope_and_enforce_robots(self, site, parent_page, outlinks):
pages = {}
blocked = set()
out_of_scope = set()
for url in outlinks or []:
url_for_scoping = urlcanon.semantic(url)
url_for_crawling = urlcanon.whatwg(url)
decision = site.accept_reject_or_neither(
url_for_scoping, parent_page=parent_page)
if decision is True:
hops_off = 0
elif decision is None:
decision = parent_page.hops_off < site.scope.get(
, 0)
hops_off = parent_page.hops_off + 1
if decision is True:
if brozzler.is_permitted_by_robots(site, str(url_for_crawling)):
fresh_page = self._build_fresh_page(
site, parent_page, url, hops_off)
if fresh_page.id in pages:
self._merge_page(pages[fresh_page.id], fresh_page)
else:
pages[fresh_page.id] = fresh_page
else:
blocked.add(str(url_for_crawling))
else:
out_of_scope.add(str(url_for_crawling))
return pages, blocked, out_of_scope | Returns tuple (
dict of {page_id: Page} of fresh `brozzler.Page` representing in
scope links accepted by robots policy,
set of in scope urls (canonicalized) blocked by robots policy,
set of out-of-scope urls (canonicalized)). |
20,673 | def pre_fork(self, process_manager):
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device) | Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager |
20,674 | def get_reduced_assignment(
self,
original_assignment,
cluster_topology,
max_partition_movements,
max_leader_only_changes,
max_movement_size=DEFAULT_MAX_MOVEMENT_SIZE,
force_progress=False,
):
new_assignment = cluster_topology.assignment
if (not original_assignment or not new_assignment or
max_partition_movements < 0 or max_leader_only_changes < 0 or
max_movement_size < 0):
return {}
leaders_changes = [
(t_p, new_assignment[t_p])
for t_p, replica in six.iteritems(original_assignment)
if replica != new_assignment[t_p] and
set(replica) == set(new_assignment[t_p])
]
partition_change_count = [
(
t_p,
len(set(replica) - set(new_assignment[t_p])),
)
for t_p, replica in six.iteritems(original_assignment)
if set(replica) != set(new_assignment[t_p])
]
self.log.info(
"Total number of actions before reduction: %s.",
len(partition_change_count) + len(leaders_changes),
)
reduced_actions = self._extract_actions_unique_topics(
partition_change_count,
max_partition_movements,
cluster_topology,
max_movement_size,
)
if len(reduced_actions) == 0 and force_progress:
smallest_size = min([cluster_topology.partitions[t_p[0]].size for t_p in partition_change_count])
self.log.warning(
.format(
max_movement_size=max_movement_size,
smallest_size=smallest_size,
)
)
max_movement_size = smallest_size
reduced_actions = self._extract_actions_unique_topics(
partition_change_count,
max_partition_movements,
cluster_topology,
max_movement_size,
)
reduced_partition_changes = [
(t_p, new_assignment[t_p]) for t_p in reduced_actions
]
self.log.info(
"Number of partition changes: %s."
" Number of leader-only changes: %s",
len(reduced_partition_changes),
min(max_leader_only_changes, len(leaders_changes)),
)
reduced_assignment = {
t_p: replicas
for t_p, replicas in (
reduced_partition_changes + leaders_changes[:max_leader_only_changes]
)
}
return reduced_assignment | Reduce the assignment based on the total actions.
Actions represent actual partition movements
and/or changes in preferred leader.
Get the difference of original and proposed assignment
and take the subset of this plan for given limit.
Argument(s):
original_assignment: Current assignment of cluster in zookeeper
cluster_topology: Cluster topology containing the new proposed-assignment of cluster
max_partition_movements:Maximum number of partition-movements in
final set of actions
max_leader_only_changes:Maximum number of actions with leader only changes
max_movement_size: Maximum size, in bytes, to move in final set of actions
force_progress: Whether to force progress if max_movement_size is too small
:return:
:reduced_assignment: Final reduced assignment |
20,675 | def request_token(self):
logging.debug("Getting request token from %s:%d",
self.server, self.port)
token, secret = self._token("/oauth/requestToken")
return "{}/oauth/authorize?oauth_token={}".format(self.host, token), \
token, secret | Returns url, request_token, request_secret |
20,676 | def referencegenomefinder(self):
referencematch = defaultdict(make_dict)
referencehits = defaultdict(make_dict)
referencegenomeprofile = .format(self.referenceprofilepath)
with open(referencegenomeprofile) as referencefile:
referencetypes = json.load(referencefile)
for sample in self.metadata:
if sample[self.analysistype].reportdir != :
for genome in referencetypes:
referencehits[sample.name][genome] = 0
for gene in self.bestdict[sample.name]:
if list(self.bestdict[sample.name][gene].keys())[0] == referencetypes[genome][gene]:
referencematch[sample.name][genome][gene] = 1
referencehits[sample.name][genome] += 1
else:
referencematch[sample.name][genome][gene] = 0
for sample in self.metadata:
if sample[self.analysistype].reportdir != :
try:
matches = sorted(referencehits[sample.name].items(),
key=operator.itemgetter(1), reverse=True)
most_matches = matches[0][1]
i = 0
match_list = list()
while matches[i][1] == most_matches:
match_list.append(matches[i])
i += 1
sorted_list = sorted(match_list)
sortedmatches = sorted_list[0]
except IndexError:
sortedmatches = (0, 0)
if 0 < int(sortedmatches[1]) < len(sample[self.analysistype].allelenames):
mismatches = []
for gene, allele in referencetypes[sortedmatches[0]].items():
percentidentity = .format(list(self.bestdict[sample.name][gene].values())[0])
self.referencegenome[sample.name][sortedmatches[0]][sortedmatches[1]][gene][list(self.bestdict[
sample.name][gene].keys())[0]] = percentidentity
if list(self.bestdict[sample.name][gene].keys())[0] != allele:
sample[self.analysistype].referencegenome = sortedmatches[0]
sample.general.referencegenus = sortedmatches[0].split()[0]
sample[self.analysistype].referencegenomepath = \
.format(self.referenceprofilepath, sortedmatches[0])
sample[self.analysistype].matchestoreferencegenome = sortedmatches[1]
mismatches.append(({gene: (.format(list(self.bestdict[sample.name][gene]
.keys())[0], allele))}))
sample[self.analysistype].mismatchestoreferencegenome = mismatches
elif sortedmatches == 0:
for gene in sample[self.analysistype].allelenames:
self.referencegenome[sample.name][sortedmatches[0]][0][gene][] = 0
sample[self.analysistype].referencegenome =
sample.general.referencegenus =
sample[self.analysistype].referencegenomepath =
sample[self.analysistype].matchestoreferencegenome = 0
sample[self.analysistype].mismatchestoreferencegenome = [0]
else:
for gene in referencetypes[sortedmatches[0]]:
self.referencegenome[sample.name][sortedmatches[0]][sortedmatches[1]][gene][list(self.bestdict[
sample.name][gene].keys())[0]] = .format(list(self.bestdict[
sample.name][gene].values())[0])
sample[self.analysistype].referencegenome = sortedmatches[0]
sample[self.analysistype].referencegenomepath = \
.format(self.referenceprofilepath, sortedmatches[0])
sample.general.referencegenus = sortedmatches[0].split()[0]
sample[self.analysistype].matchestoreferencegenome = sortedmatches[1]
sample[self.analysistype].mismatchestoreferencegenome = [0]
make_path(self.reportpath)
with open(.format(self.reportpath), ) as referencegenomereport:
row =
for sample in self.metadata:
if sample[self.analysistype].reportdir != :
row += .format(sample.name, sample[self.analysistype].referencegenome,
sample[self.analysistype].matchestoreferencegenome)
referencegenomereport.write(row)
dotter() | Finds the closest reference genome to the profile of interest |
20,677 | def identical_dataset_and_algorithm_tuner(self, additional_parents=None):
return self._create_warm_start_tuner(additional_parents=additional_parents,
warm_start_type=WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM) | Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"IdenticalDataAndAlgorithm" and parents as the union of provided list of ``additional_parents`` and the ``self``
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the identical dataset and algorithm tuner.
Returns:
sagemaker.tuner.HyperparameterTuner: HyperparameterTuner instance which can be used to launch identical
dataset and algorithm tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> identical_dataset_algo_tuner = parent_tuner.identical_dataset_and_algorithm_tuner(
>>> additional_parents={"parent-job-2"})
Later On:
>>> identical_dataset_algo_tuner.fit(inputs={}) |
20,678 | def sens_mppt_send(self, mppt_timestamp, mppt1_volt, mppt1_amp, mppt1_pwm, mppt1_status, mppt2_volt, mppt2_amp, mppt2_pwm, mppt2_status, mppt3_volt, mppt3_amp, mppt3_pwm, mppt3_status, force_mavlink1=False):
return self.send(self.sens_mppt_encode(mppt_timestamp, mppt1_volt, mppt1_amp, mppt1_pwm, mppt1_status, mppt2_volt, mppt2_amp, mppt2_pwm, mppt2_status, mppt3_volt, mppt3_amp, mppt3_pwm, mppt3_status), force_mavlink1=force_mavlink1) | Maximum Power Point Tracker (MPPT) sensor data for solar module power
performance tracking
mppt_timestamp : MPPT last timestamp (uint64_t)
mppt1_volt : MPPT1 voltage (float)
mppt1_amp : MPPT1 current (float)
mppt1_pwm : MPPT1 pwm (uint16_t)
mppt1_status : MPPT1 status (uint8_t)
mppt2_volt : MPPT2 voltage (float)
mppt2_amp : MPPT2 current (float)
mppt2_pwm : MPPT2 pwm (uint16_t)
mppt2_status : MPPT2 status (uint8_t)
mppt3_volt : MPPT3 voltage (float)
mppt3_amp : MPPT3 current (float)
mppt3_pwm : MPPT3 pwm (uint16_t)
mppt3_status : MPPT3 status (uint8_t) |
20,679 | def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s[]:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt[], ) as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s[]:
words.append(w[])
else:
text =
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line +
words = re.split(r, text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams | Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af |
20,680 | def get_compositions_by_repository(self, repository_id):
mgr = self._get_provider_manager(, local=True)
lookup_session = mgr.get_composition_lookup_session_for_repository(repository_id, proxy=self._proxy)
lookup_session.use_isolated_repository_view()
return lookup_session.get_compositions() | Gets the list of ``Compositions`` associated with a ``Repository``.
arg: repository_id (osid.id.Id): ``Id`` of the ``Repository``
return: (osid.repository.CompositionList) - list of related
compositions
raise: NotFound - ``repository_id`` is not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
20,681 | def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir)) | Tar a directory |
20,682 | def status_favourite(self, id):
id = self.__unpack_id(id)
url = .format(str(id))
return self.__api_request(, url) | Favourite a status.
Returns a `toot dict`_ with the favourited status. |
20,683 | def _open_ftp(self):
_ftp = FTP()
_ftp.set_debuglevel(0)
with ftp_errors(self):
_ftp.connect(self.host, self.port, self.timeout)
_ftp.login(self.user, self.passwd, self.acct)
self._features = {}
try:
feat_response = _decode(_ftp.sendcmd("FEAT"), "latin-1")
except error_perm:
self.encoding = "latin-1"
else:
self._features = self._parse_features(feat_response)
self.encoding = "utf-8" if "UTF8" in self._features else "latin-1"
if not PY2:
_ftp.file = _ftp.sock.makefile(
"r", encoding=self.encoding
)
_ftp.encoding = self.encoding
self._welcome = _ftp.welcome
return _ftp | Open a new ftp object. |
20,684 | def process(source, target, rdfsonly, base=None, logger=logging):
for link in source.match():
s, p, o = link[:3]
if s == (base or ) + : continue
if p in RESOURCE_MAPPING: p = RESOURCE_MAPPING[p]
if o in RESOURCE_MAPPING: o = RESOURCE_MAPPING[o]
if p == VERSA_BASEIRI + :
tlinks = list(source.match(s, TYPE_REL))
if tlinks:
if tlinks[0][TARGET] == VERSA_BASEIRI + :
p = I(RDFS_NAMESPACE + )
elif tlinks[0][TARGET] == VERSA_BASEIRI + :
p = I(RDFS_NAMESPACE + )
if p == VERSA_BASEIRI + :
suri = I(iri.absolutize(s, base)) if base else s
target.add((URIRef(o), URIRef(RDFS_NAMESPACE + ), URIRef(suri)))
continue
if p == VERSA_BASEIRI + :
if o not in [, ]:
ouri = I(iri.absolutize(o, base)) if base else o
target.add((URIRef(s), URIRef(RDFS_NAMESPACE + ), URIRef(ouri)))
continue
s = URIRef(s)
p = RDF.type if p == TYPE_REL else URIRef(p)
o = URIRef(o) if isinstance(o, I) else Literal(o)
if not rdfsonly or p.startswith(RDF_NAMESPACE) or p.startswith(RDFS_NAMESPACE):
target.add((s, p, o))
return | Prepare a statement into a triple ready for rdflib graph |
20,685 | def mutate(self, row):
mutation_count = len(row._get_mutations())
if mutation_count > MAX_MUTATIONS:
raise MaxMutationsError(
"The row key {} exceeds the number of mutations {}.".format(
row.row_key, mutation_count
)
)
if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS:
self.flush()
self.rows.append(row)
self.total_mutation_count += mutation_count
self.total_size += row.get_mutations_size()
if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count:
self.flush() | Add a row to the batch. If the current batch meets one of the size
limits, the batch is sent synchronously.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_batcher_mutate]
:end-before: [END bigtable_batcher_mutate]
:type row: class
:param row: class:`~google.cloud.bigtable.row.DirectRow`.
:raises: One of the following:
* :exc:`~.table._BigtableRetryableError` if any
row returned a transient error.
* :exc:`RuntimeError` if the number of responses doesn't
match the number of rows that were retried
* :exc:`.batcher.MaxMutationsError` if any row exceeds max
mutations count. |
20,686 | def query(self, coords, **kwargs):
return super(Lenz2017Query, self).query(coords, **kwargs) | Returns E(B-V), in mags, at the specified location(s) on the sky.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
Returns:
A float array of the reddening, in magnitudes of E(B-V), at the
selected coordinates. |
20,687 | def write_config_file(self, parsed_namespace, output_file_paths, exit_after=False):
for output_file_path in output_file_paths:
try:
with open(output_file_path, "w") as output_file:
pass
except IOError as e:
raise ValueError("Couldn't open %s for writing: %s" % (
output_file_path, e))
if output_file_paths:
config_items = self.get_items_for_config_file_output(
self._source_to_settings, parsed_namespace)
file_contents = self._config_file_parser.serialize(config_items)
for output_file_path in output_file_paths:
with open(output_file_path, "w") as output_file:
output_file.write(file_contents)
message = "Wrote config file to " + ", ".join(output_file_paths)
if exit_after:
self.exit(0, message)
else:
print(message) | Write the given settings to output files.
Args:
parsed_namespace: namespace object created within parse_known_args()
output_file_paths: any number of file paths to write the config to
exit_after: whether to exit the program after writing the config files |
20,688 | def find_threads_by_name(self, name, bExactMatch = True):
found_threads = list()
if name is None:
for aThread in self.iter_threads():
if aThread.get_name() is None:
found_threads.append(aThread)
elif bExactMatch:
for aThread in self.iter_threads():
if aThread.get_name() == name:
found_threads.append(aThread)
else:
for aThread in self.iter_threads():
t_name = aThread.get_name()
if t_name is not None and name in t_name:
found_threads.append(aThread)
return found_threads | Find threads by name, using different search methods.
@type name: str, None
@param name: Name to look for. Use C{None} to find nameless threads.
@type bExactMatch: bool
@param bExactMatch: C{True} if the name must be
B{exactly} as given, C{False} if the name can be
loosely matched.
This parameter is ignored when C{name} is C{None}.
@rtype: list( L{Thread} )
@return: All threads matching the given name. |
20,689 | def main(self, c):
conj = self.conjugate.main(c)
mult = self.complex_mult.main(c, conj)
angle = self.angle.main(mult)
self.y = self.GAIN_SFIX * angle
return self.y | :type c: Complex
:rtype: Sfix |
20,690 | def get_object(self):
obj = super(PublishActionView, self).get_object()
if obj:
if not hasattr(obj, ):
raise http.Http404
return obj | Get the object for publishing
Raises a http404 error if the object is not found. |
20,691 | def pset_field(item_type, optional=False, initial=()):
return _sequence_field(CheckedPSet, item_type, optional,
initial) | Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type. |
20,692 | def redirect_to():
args_dict = request.args.items()
args = CaseInsensitiveDict(args_dict)
response = app.make_response("")
response.status_code = 302
if "status_code" in args:
status_code = int(args["status_code"])
if status_code >= 300 and status_code < 400:
response.status_code = status_code
response.headers["Location"] = args["url"].encode("utf-8")
return response | 302/3XX Redirects to the given URL.
---
tags:
- Redirects
produces:
- text/html
get:
parameters:
- in: query
name: url
type: string
required: true
- in: query
name: status_code
type: int
post:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
patch:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
put:
consumes:
- application/x-www-form-urlencoded
parameters:
- in: formData
name: url
type: string
required: true
- in: formData
name: status_code
type: int
required: false
responses:
302:
description: A redirection. |
20,693 | def inspect_built_image(self):
logger.info("inspecting built image ", self.image_id)
self.ensure_is_built()
inspect_data = self.tasker.inspect_image(self.image_id)
return inspect_data | inspect built image
:return: dict |
20,694 | def get(self):
self.lock.acquire()
try:
c = self.conn.popleft()
yield c
except self.exc_classes:
gevent.spawn_later(1, self._addOne)
raise
except:
self.conn.append(c)
self.lock.release()
raise
else:
self.conn.append(c)
self.lock.release() | Get a connection from the pool, to make and receive traffic.
If the connection fails for any reason (socket.error), it is dropped
and a new one is scheduled. Please use @retry as a way to automatically
retry whatever operation you were performing. |
20,695 | def task(self, task_name):
return Task(uri=.join((self._engine_name, task_name)), cwd=self._cwd) | Returns an ENVI Py Engine Task object. See ENVI Py Engine Task for examples.
:param task_name: The name of the task to retrieve.
:return: An ENVI Py Engine Task object. |
20,696 | def apply(self, func, *args, **kwargs):
self._prep_pandas_groupby()
def key_by_index(data):
for key, row in data.iterrows():
yield (key, pd.DataFrame.from_dict(
dict([(key, row)]), orient=))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
dataframe = self._sortIfNeeded(reKeyedRDD).values()
return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx) | Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame. |
20,697 | def __bindings(self):
self.textctrl.Bind(wx.EVT_TEXT, self.OnText)
self.fontbutton.Bind(wx.EVT_BUTTON, self.OnFont)
self.Bind(csel.EVT_COLOURSELECT, self.OnColor) | Binds events to handlers |
20,698 | def get_grouped_translations(instances, **kwargs):
grouped_translations = collections.defaultdict(list)
if not instances:
return grouped_translations
if not isinstance(instances, collections.Iterable):
instances = [instances]
if isinstance(instances, QuerySet):
model = instances.model
else:
model = instances[0]._meta.model
instances_ids = []
for instance in instances:
instances_ids.append(instance.pk)
if instance._meta.model != model:
raise Exception(
"You cannot use different model instances, only one authorized."
)
from .models import Translation
from .mixins import ModelMixin
decider = model._meta.linguist.get("decider", Translation)
identifier = model._meta.linguist.get("identifier", None)
chunks_length = kwargs.get("chunks_length", None)
populate_missing = kwargs.get("populate_missing", True)
if identifier is None:
raise Exception()
lookup = dict(identifier=identifier)
for kwarg in ("field_names", "languages"):
value = kwargs.get(kwarg, None)
if value is not None:
if not isinstance(value, (list, tuple)):
value = [value]
lookup["%s__in" % kwarg[:-1]] = value
if chunks_length is not None:
translations_qs = []
for ids in utils.chunks(instances_ids, chunks_length):
ids_lookup = copy.copy(lookup)
ids_lookup["object_id__in"] = ids
translations_qs.append(decider.objects.filter(**ids_lookup))
translations = itertools.chain.from_iterable(translations_qs)
else:
lookup["object_id__in"] = instances_ids
translations = decider.objects.filter(**lookup)
for translation in translations:
grouped_translations[translation.object_id].append(translation)
return grouped_translations | Takes instances and returns grouped translations ready to
be set in cache. |
20,699 | def _scan_for_tokens(contents):
scanner = re.Scanner([
(r"(?<![^\s\(])([\"\]([^\])*$",
lambda s, t: (TokenType.BeginSingleQuotedLiteral, t)),
(r"[^\s]*(?<!\\)[\'](?![^\s\)])",
lambda s, t: (TokenType.EndSingleQuotedLiteral, t)),
(r"
(r"
(r"
(r"
(r"([^\s\(\)]+|[^\s\(]*[^\)]|[^\(][^\s\)]*)",
lambda s, t: (TokenType.UnquotedLiteral, t))
])
tokens_return = []
lines = contents.splitlines(True)
lineno = 0
for line in lines:
lineno += 1
col = 1
tokens, remaining = scanner.scan(line)
if remaining != "":
msg = "Unknown tokens found on line {0}: {1}".format(lineno,
remaining)
raise RuntimeError(msg)
for token_type, token_contents in tokens:
tokens_return.append(Token(type=token_type,
content=token_contents,
line=lineno,
col=col))
col += len(token_contents)
return tokens_return | Scan a string for tokens and return immediate form tokens. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.