code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def worker_stopped(name, workers=None, profile='default'):
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_stop', name, workers, profile
)
|
Stop all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_stopped:
- workers:
- app1
- app2
|
def get_dbcollection_with_es(self, **kwargs):
es_objects = self.get_collection_es()
db_objects = self.Model.filter_objects(es_objects)
return db_objects
|
Get DB objects collection by first querying ES.
|
def retrieve(url):
try:
pem_data = urlopen(url).read()
except (ValueError, HTTPError):
warnings.warn('Certificate URL is invalid.')
return False
if sys.version >= '3':
try:
pem_data = pem_data.decode()
except(UnicodeDecodeError):
warnings.warn('Certificate encoding is not utf-8.')
return False
return _parse_pem_data(pem_data)
|
Retrieve and parse PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
list or bool: If url is valid, returns the certificate chain as a list
of cryptography.hazmat.backends.openssl.x509._Certificate
certificates where certs[0] is the first certificate in the file; if
url is invalid, returns False.
|
def get_player_summaries(players, **kwargs):
if (isinstance(players, list)):
params = {'steamids': ','.join(str(p) for p in players)}
elif (isinstance(players, int)):
params = {'steamids': players}
else:
raise ValueError("The players input needs to be a list or int")
return make_request("GetPlayerSummaries", params, version="v0002",
base="http://api.steampowered.com/ISteamUser/", **kwargs)
|
Get players steam profile from their steam ids
|
def compare_config(self):
diff = self.device.cu.diff()
if diff is None:
return ''
else:
return diff.strip()
|
Compare candidate config with running.
|
def clean_videos(self):
if self.videos:
self.videos = [int(v) for v in self.videos if v is not None and is_valid_digit(v)]
|
Validates that all values in the video list are integer ids and removes all None values.
|
def has_permission(self, request, view):
user_filter = self._get_user_filter(request)
if not user_filter:
return True
username_param = get_username_param(request)
allowed = user_filter == username_param
if not allowed:
log.warning(
u"Permission JwtHasUserFilterForRequestedUser: user_filter %s doesn't match username %s.",
user_filter,
username_param,
)
return allowed
|
If the JWT has a user filter, verify that the filtered
user value matches the user in the URL.
|
def models_max_input_output_length(models: List[InferenceModel],
num_stds: int,
forced_max_input_len: Optional[int] = None,
forced_max_output_len: Optional[int] = None) -> Tuple[int, Callable]:
max_mean = max(model.length_ratio_mean for model in models)
max_std = max(model.length_ratio_std for model in models)
supported_max_seq_len_source = min((model.max_supported_seq_len_source for model in models
if model.max_supported_seq_len_source is not None),
default=None)
supported_max_seq_len_target = min((model.max_supported_seq_len_target for model in models
if model.max_supported_seq_len_target is not None),
default=None)
training_max_seq_len_source = min(model.training_max_seq_len_source for model in models)
return get_max_input_output_length(supported_max_seq_len_source,
supported_max_seq_len_target,
training_max_seq_len_source,
length_ratio_mean=max_mean,
length_ratio_std=max_std,
num_stds=num_stds,
forced_max_input_len=forced_max_input_len,
forced_max_output_len=forced_max_output_len)
|
Returns a function to compute maximum output length given a fixed number of standard deviations as a
safety margin, and the current input length.
Mean and std are taken from the model with the largest values to allow proper ensembling of models
trained on different data sets.
:param models: List of models.
:param num_stds: Number of standard deviations to add as a safety margin. If -1, returned maximum output lengths
will always be 2 * input_length.
:param forced_max_input_len: An optional overwrite of the maximum input length.
:param forced_max_output_len: An optional overwrite of the maximum output length.
:return: The maximum input length and a function to get the output length given the input length.
|
def load_manifest_file(client, bucket, schema, versioned, ifilters, key_info):
yield None
with tempfile.NamedTemporaryFile() as fh:
client.download_fileobj(Bucket=bucket, Key=key_info['key'], Fileobj=fh)
fh.seek(0)
reader = csv.reader(gzip.GzipFile(fileobj=fh, mode='r'))
for key_set in chunks(reader, 1000):
keys = []
for kr in key_set:
k = kr[1]
if inventory_filter(ifilters, schema, kr):
continue
k = unquote_plus(k)
if versioned:
if kr[3] == 'true':
keys.append((k, kr[2], True))
else:
keys.append((k, kr[2]))
else:
keys.append(k)
yield keys
|
Given an inventory csv file, return an iterator over keys
|
def load_private_key(pem_path, passphrase_bytes=None):
with open(pem_path, "rb") as f:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data=f.read(),
password=passphrase_bytes,
backend=cryptography.hazmat.backends.default_backend(),
)
|
Load private key from PEM encoded file
|
def outputtemplate(self, template_id):
for profile in self.profiles:
for outputtemplate in profile.outputtemplates():
if outputtemplate.id == template_id:
return outputtemplate
return KeyError("Outputtemplate " + template_id + " not found")
|
Get an output template by ID
|
def point_stokes(self, context):
(ls, us), (lt, ut), (l, u) = context.array_extents(context.name)
data = np.empty(context.shape, context.dtype)
data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:]
return data
|
Supply point source stokes parameters to montblanc
|
def is_binary(self):
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if b'\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
|
Return true if this is a binary file.
|
def steepest_descent(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
projection=None, callback=None):
r
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `grad` {!r}'
''.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
grad_x = grad.range.element()
for _ in range(maxiter):
grad(x, out=grad_x)
dir_derivative = -grad_x.norm() ** 2
if np.abs(dir_derivative) < tol:
return
step = line_search(x, -grad_x, dir_derivative)
x.lincomb(1, x, -step, grad_x)
if projection is not None:
projection(x)
if callback is not None:
callback(x)
|
r"""Steepest descent method to minimize an objective function.
General implementation of steepest decent (also known as gradient
decent) for solving
.. math::
\min f(x)
The algorithm is intended for unconstrained problems. It needs line
search in order guarantee convergence. With appropriate line search,
it can also be used for constrained problems where one wants to
minimize over some given set :math:`C`. This can be done by defining
:math:`f(x) = \infty` for :math:`x\\not\\in C`, or by providing a
``projection`` function that projects the iterates on :math:`C`.
The algorithm is described in [BV2004], section 9.3--9.4
(`book available online
<http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
[GNS2009], Section 12.2, and wikipedia
`Gradient_descent
<https://en.wikipedia.org/wiki/Gradient_descent>`_.
Parameters
----------
f : `Functional`
Goal functional. Needs to have ``f.gradient``.
x : ``f.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate
See Also
--------
odl.solvers.iterative.iterative.landweber :
Optimized solver for the case ``f(x) = ||Ax - b||_2^2``
odl.solvers.iterative.iterative.conjugate_gradient :
Optimized solver for the case ``f(x) = x^T Ax - 2 x^T b``
References
----------
[BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
Cambridge university press, 2004.
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
|
def modify(self, *, sort=None, purge=False, done=None):
return self._modifyInternal(sort=sort, purge=purge, done=done)
|
Calls Model._modifyInternal after loading the database.
|
def ParseUserEngagedRow(
self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = WindowsTimelineUserEngagedEventData()
event_data.package_identifier = self._GetRowValue(
query_hash, row, 'PackageName')
payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload'))
payload_json_string = payload_json_bytes.decode('utf-8')
payload = json.loads(payload_json_string)
if 'reportingApp' in payload:
event_data.reporting_app = payload['reportingApp']
if 'activeDurationSeconds' in payload:
event_data.active_duration_seconds = int(payload['activeDurationSeconds'])
timestamp = self._GetRowValue(query_hash, row, 'StartTime')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a timeline row that describes a user interacting with an app.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
def find_executable(executable_name):
if six.PY3:
executable_abs = shutil.which(executable_name)
else:
import distutils.spawn
executable_abs = distutils.spawn.find_executable(executable_name)
return executable_abs
|
Tries to find executable in PATH environment
It uses ``shutil.which`` method in Python3 and
``distutils.spawn.find_executable`` method in Python2.7 to find the
absolute path to the 'name' executable.
:param executable_name: name of the executable
:returns: Returns the absolute path to the executable or None if not found.
|
def creating_schema_and_index(self, models, func):
waiting_models = []
self.base_thread.do_with_submit(func, models, waiting_models, threads=self.threads)
if waiting_models:
print("WAITING MODELS ARE CHECKING...")
self.creating_schema_and_index(waiting_models, func)
|
Executes given functions with given models.
Args:
models: models to execute
func: function name to execute
Returns:
|
def put_scancode(self, scancode):
if not isinstance(scancode, baseinteger):
raise TypeError("scancode can only be an instance of type baseinteger")
self._call("putScancode",
in_p=[scancode])
|
Sends a scancode to the keyboard.
in scancode of type int
raises :class:`VBoxErrorIprtError`
Could not send scan code to virtual keyboard.
|
def to_string(self, verbose=0, title=None, **kwargs):
from pprint import pformat
s = pformat(self, **kwargs)
if title is not None:
return "\n".join([marquee(title, mark="="), s])
return s
|
String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string.
|
def apply(
self, doc_loader, pdf_path=None, clear=True, parallelism=None, progress_bar=True
):
super(Parser, self).apply(
doc_loader,
pdf_path=pdf_path,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
|
Run the Parser.
:param doc_loader: An iteratable of ``Documents`` to parse. Typically,
one of Fonduer's document preprocessors.
:param pdf_path: The path to the PDF documents, if any. This path will
override the one used in initialization, if provided.
:param clear: Whether or not to clear the labels table before applying
these LFs.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
|
def check_rates(self, rates, base):
if "rates" not in rates:
raise RuntimeError("%s: 'rates' not found in results" % self.name)
if "base" not in rates or rates["base"] != base or base not in rates["rates"]:
self.log(logging.WARNING, "%s: 'base' not found in results", self.name)
self.rates = rates
|
Local helper function for validating rates response
|
def clear_breakpoint(self, filename, lineno):
clear_breakpoint(filename, lineno)
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
index = self.is_file_opened(filename)
if index is not None:
editorstack.data[index].editor.debugger.toogle_breakpoint(
lineno)
|
Remove a single breakpoint
|
def get_items_by_banks(self, bank_ids):
item_list = []
for bank_id in bank_ids:
item_list += list(
self.get_items_by_bank(bank_id))
return objects.ItemList(item_list)
|
Gets the list of ``Items`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.assessment.ItemList) - list of items
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - assessment failure
*compliance: mandatory -- This method must be implemented.*
|
def pyxb_to_dict(rp_pyxb):
return {
'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')),
'num': _get_as_int(rp_pyxb),
'block': _get_as_set(rp_pyxb, 'block'),
'pref': _get_as_set(rp_pyxb, 'pref'),
}
|
Convert ReplicationPolicy PyXB object to a normalized dict.
Args:
rp_pyxb: ReplicationPolicy to convert.
Returns:
dict : Replication Policy as normalized dict.
Example::
{
'allowed': True,
'num': 3,
'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'},
'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'},
}
|
def get_encoder_from_vocab(vocab_filepath):
if not tf.gfile.Exists(vocab_filepath):
raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath))
tf.logging.info("Found vocab file: %s", vocab_filepath)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder
|
Get encoder from vocab file.
If vocab is not found in output dir, it will be copied there by
copy_vocab_to_output_dir to clarify the vocab used to generate the data.
Args:
vocab_filepath: path to vocab, either local or cns
Returns:
A SubwordTextEncoder vocabulary object. None if the output_parallel_text
is set.
|
def set_entry_points(self, names):
names = util.return_set(names)
self.entry_point_names = names
|
sets the internal collection of entry points to be
equal to `names`
`names` can be a single object or an iterable but
must be a string or iterable of strings.
|
def freq_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1):
return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[0]
|
Returns QNM frequency for the given mass and spin and mode.
Parameters
----------
final_mass : float or array
Mass of the black hole (in solar masses).
final_spin : float or array
Dimensionless spin of the final black hole.
l : int or array, optional
l-index of the harmonic. Default is 2.
m : int or array, optional
m-index of the harmonic. Default is 2.
nmodes : int, optional
The number of overtones to generate. Default is 1.
Returns
-------
float or array
The frequency of the QNM(s), in Hz. If only a single mode is requested
(and mass, spin, l, and m are not arrays), this will be a float. If
multiple modes requested, will be an array with shape
``[input shape x] nmodes``, where ``input shape`` is the broadcasted
shape of the inputs.
|
def _parse_default(self, target):
if not isinstance(target, (list, tuple)):
k, v, t = target, None, lambda x: x
elif len(target) == 1:
k, v, t = target[0], None, lambda x: x
elif len(target) == 2:
k, v, t = target[0], target[1], lambda x: x
elif len(target) > 2:
k, v, t = target[0], target[1], target[2]
else:
k = None
if not isinstance(k, string_types):
msg = "{} is not a valid target, (name, default) expected.".format(target)
raise ValueError(msg)
return k, v, t
|
Helper function to parse default values.
|
def _BuildKeyHierarchy(self, subkeys, values):
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
registry_key._key_path = key_paths.JoinKeyPath([
self._key_path, registry_key.name])
if values:
for registry_value in values:
name = registry_value.name.upper()
if name in self._values:
continue
self._values[name] = registry_value
|
Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values.
|
def truncate(self, path, length, fh=None):
"Download existing path, truncate and reupload"
try:
f = self._getpath(path)
except JFS.JFSError:
raise OSError(errno.ENOENT, '')
if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted():
raise OSError(errno.ENOENT)
data = StringIO(f.read())
data.truncate(length)
try:
self.client.up(path, data)
self._dirty(path)
return ESUCCESS
except:
raise OSError(errno.ENOENT, '')
|
Download existing path, truncate and reupload
|
def get_all_delivery_notes(self, params=None):
if not params:
params = {}
return self._iterate_through_pages(
self.get_delivery_notes_per_page,
resource=DELIVERY_NOTES,
**{'params': params}
)
|
Get all delivery notes
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
|
def todo_results_changed(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
index = editorstack.get_stack_index()
if index != -1:
filename = editorstack.data[index].filename
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_todo_results(filename, results)
self.update_todo_actions()
|
Synchronize todo results between editorstacks
Refresh todo list navigation buttons
|
def send_response(self, msgid, error=None, result=None):
msg = self._encoder.create_response(msgid, error, result)
self._send_message(msg)
|
Send a response
|
def get_next_file_path(self, service, operation):
base_name = '{0}.{1}'.format(service, operation)
if self.prefix:
base_name = '{0}.{1}'.format(self.prefix, base_name)
LOG.debug('get_next_file_path: %s', base_name)
next_file = None
serializer_format = None
index = self._index.setdefault(base_name, 1)
while not next_file:
file_name = os.path.join(
self._data_path, base_name + '_{0}'.format(index))
next_file, serializer_format = self.find_file_format(file_name)
if next_file:
self._index[base_name] += 1
elif index != 1:
index = 1
self._index[base_name] = 1
else:
raise IOError('response file ({0}.[{1}]) not found'.format(
file_name, "|".join(Format.ALLOWED)))
return next_file, serializer_format
|
Returns a tuple with the next file to read and the serializer
format used
|
def build_absolute_uri(self, uri):
request = self.context.get('request', None)
return (
request.build_absolute_uri(uri) if request is not None else uri
)
|
Return a fully qualified absolute url for the given uri.
|
def list_subnets(conn=None, call=None, kwargs=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_subnets function must be called with '
'-f or --function.'
)
if conn is None:
conn = get_conn()
if kwargs is None or (isinstance(kwargs, dict) and 'network' not in kwargs):
raise SaltCloudSystemExit(
'A `network` must be specified'
)
return conn.list_subnets(filters={'network': kwargs['network']})
|
List subnets in a virtual network
network
network to list subnets of
.. code-block:: bash
salt-cloud -f list_subnets myopenstack network=salt-net
|
def validate_examples(example_file):
def test_example(raw):
example = tf.train.Example()
example.ParseFromString(raw)
pi = np.frombuffer(example.features.feature['pi'].bytes_list.value[0], np.float32)
value = example.features.feature['outcome'].float_list.value[0]
assert abs(pi.sum() - 1) < 1e-4, pi.sum()
assert value in (-1, 1), value
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
for record in tqdm(tf.python_io.tf_record_iterator(example_file, opts)):
test_example(record)
|
Validate that examples are well formed.
Pi should sum to 1.0
value should be {-1,1}
Usage:
validate_examples("../data/300.tfrecord.zz")
|
def states(self, states):
if not isinstance(states, dict):
raise TypeError("states must be of type dict")
if [state_id for state_id, state in states.items() if not isinstance(state, State)]:
raise TypeError("element of container_state.states must be of type State")
if [state_id for state_id, state in states.items() if not state_id == state.state_id]:
raise AttributeError("The key of the state dictionary and the id of the state do not match")
old_states = self._states
self._states = states
for state_id, state in states.items():
try:
state.parent = self
except ValueError:
self._states = old_states
raise
for old_state in old_states.values():
if old_state not in self._states.values() and old_state.parent is self:
old_state.parent = None
|
Setter for _states field
See property
:param states: Dictionary of States
:raises exceptions.TypeError: if the states parameter is of wrong type
:raises exceptions.AttributeError: if the keys of the dictionary and the state_ids in the dictionary do not match
|
def parse_error(output_dir):
sys.stderr.seek(0)
std_err = sys.stderr.read().decode('utf-8')
err_file = os.path.join(output_dir, "eplusout.err")
if os.path.isfile(err_file):
with open(err_file, "r") as f:
ep_err = f.read()
else:
ep_err = "<File not found>"
message = "\r\n{std_err}\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}".format(**locals())
return message
|
Add contents of stderr and eplusout.err and put it in the exception message.
:param output_dir: str
:return: str
|
def from_string(cls, s):
for num, text in cls._STATUS2STR.items():
if text == s:
return cls(num)
else:
raise ValueError("Wrong string %s" % s)
|
Return a `Status` instance from its string representation.
|
def handle_event(self, message):
needs_update = 0
for zone in self.zones:
if zone in message:
_LOGGER.debug("Received message for zone: %s", zone)
self.zones[zone].update_status(message[zone])
if 'netusb' in message:
needs_update += self.handle_netusb(message['netusb'])
if needs_update > 0:
_LOGGER.debug("needs_update: %d", needs_update)
self.update_hass()
|
Dispatch all event messages
|
def safe_cd(path):
starting_directory = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(starting_directory)
|
Changes to a directory, yields, and changes back.
Additionally any error will also change the directory back.
Usage:
>>> with safe_cd('some/repo'):
... call('git status')
|
def read_serialized_rsa_pub_key(serialized):
n = None
e = None
rsa = from_hex(serialized)
pos = 0
ln = len(rsa)
while pos < ln:
tag = bytes_to_byte(rsa, pos)
pos += 1
length = bytes_to_short(rsa, pos)
pos += 2
if tag == 0x81:
e = bytes_to_long(rsa[pos:pos+length])
elif tag == 0x82:
n = bytes_to_long(rsa[pos:pos+length])
pos += length
if e is None or n is None:
logger.warning("Could not process import key")
raise ValueError('Public key deserialization failed')
return n, e
|
Reads serialized RSA pub key
TAG|len-2B|value. 81 = exponent, 82 = modulus
:param serialized:
:return: n, e
|
def AppendContent(self, src_fd):
while 1:
blob = src_fd.read(self.chunksize)
if not blob:
break
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob)
self.AddBlob(blob_id, len(blob))
self.Flush()
|
Create new blob hashes and append to BlobImage.
We don't support writing at arbitrary file offsets, but this method provides
a convenient way to add blobs for a new file, or append content to an
existing one.
Args:
src_fd: source file handle open for read
Raises:
IOError: if blob has already been finalized.
|
def _autocomplete(client, url_part, input_text, session_token=None,
offset=None, location=None, radius=None, language=None,
types=None, components=None, strict_bounds=False):
params = {"input": input_text}
if session_token:
params["sessiontoken"] = session_token
if offset:
params["offset"] = offset
if location:
params["location"] = convert.latlng(location)
if radius:
params["radius"] = radius
if language:
params["language"] = language
if types:
params["types"] = types
if components:
if len(components) != 1 or list(components.keys())[0] != "country":
raise ValueError("Only country components are supported")
params["components"] = convert.components(components)
if strict_bounds:
params["strictbounds"] = "true"
url = "/maps/api/place/%sautocomplete/json" % url_part
return client._request(url, params).get("predictions", [])
|
Internal handler for ``autocomplete`` and ``autocomplete_query``.
See each method's docs for arg details.
|
def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):
destdb = destslab.initdb(destdbname, sourcedb.dupsort)
statdict = destslab.stat(db=destdb)
if statdict['entries'] > 0:
raise s_exc.DataAlreadyExists()
rowcount = 0
for chunk in s_common.chunks(self.scanByFull(db=sourcedb), COPY_CHUNKSIZE):
ccount, acount = destslab.putmulti(chunk, dupdata=True, append=True, db=destdb)
if ccount != len(chunk) or acount != len(chunk):
raise s_exc.BadCoreStore(mesg='Unexpected number of values written')
rowcount += len(chunk)
if progresscb is not None and 0 == (rowcount % PROGRESS_PERIOD):
progresscb(rowcount)
return rowcount
|
Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab.
|
def files(self):
tag_name = self.release['tag_name']
repo_name = self.repository['full_name']
zipball_url = self.release['zipball_url']
filename = u'{name}-{tag}.zip'.format(name=repo_name, tag=tag_name)
response = self.gh.api.session.head(zipball_url)
assert response.status_code == 302, \
u'Could not retrieve archive from GitHub: {0}'.format(zipball_url)
yield filename, zipball_url
|
Extract files to download from GitHub payload.
|
def _get_hangul_syllable_type(hangul_syllable):
if not _is_hangul_syllable(hangul_syllable):
raise ValueError("Value 0x%0.4x does not represent a Hangul syllable!" % hangul_syllable)
if not _hangul_syllable_types:
_load_hangul_syllable_types()
return _hangul_syllable_types[hangul_syllable]
|
Function for taking a Unicode scalar value representing a Hangul syllable and determining the correct value for its
Hangul_Syllable_Type property. For more information on the Hangul_Syllable_Type property see the Unicode Standard,
ch. 03, section 3.12, Conjoining Jamo Behavior.
https://www.unicode.org/versions/latest/ch03.pdf
:param hangul_syllable: Unicode scalar value representing a Hangul syllable
:return: Returns a string representing its Hangul_Syllable_Type property ("L", "V", "T", "LV" or "LVT")
|
def load_http_response(cls, http_response):
if not http_response.ok:
raise APIResponseError(http_response.text)
c = cls(http_response)
c.response = http_response
RateLimits.getRateLimits(cls.__name__).set(c.response.headers)
return c
|
This method should return an instantiated class and set its response
to the requests.Response object.
|
def param_defs(self, method):
pts = self.bodypart_types(method)
if not method.soap.input.body.wrapped:
return pts
pt = pts[0][1].resolve()
return [(c.name, c, a) for c, a in pt if not c.isattr()]
|
Get parameter definitions for document literal.
|
def metadata():
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md)
|
Get the current charm metadata.yaml contents as a python object
|
def usearch61_chimera_check_ref(abundance_fp,
uchime_ref_fp,
reference_seqs_fp,
minlen=64,
output_dir=".",
remove_usearch_logs=False,
uchime_ref_log_fp="uchime_ref.log",
usearch61_minh=0.28,
usearch61_xn=8.0,
usearch61_dn=1.4,
usearch61_mindiffs=3,
usearch61_mindiv=0.8,
threads=1.0,
HALT_EXEC=False):
params = {'--minseqlength': minlen,
'--uchime_ref': abundance_fp,
'--uchimeout': uchime_ref_fp,
'--db': reference_seqs_fp,
'--minh': usearch61_minh,
'--xn': usearch61_xn,
'--dn': usearch61_dn,
'--mindiffs': usearch61_mindiffs,
'--mindiv': usearch61_mindiv,
'--strand': 'plus',
'--threads': threads
}
if not remove_usearch_logs:
params['--log'] = uchime_ref_log_fp
app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC)
app_result = app()
return uchime_ref_fp, app_result
|
Does reference based chimera checking with usearch61
abundance_fp: input consensus fasta file with abundance information for
each cluster.
uchime_ref_fp: output uchime filepath for reference results
reference_seqs_fp: reference fasta database for chimera checking.
minlen: minimum sequence length for usearch input fasta seqs.
output_dir: output directory
removed_usearch_logs: suppresses creation of log file.
uchime_denovo_log_fp: output filepath for log file.
usearch61_minh: Minimum score (h) to be classified as chimera.
Increasing this value tends to the number of false positives (and also
sensitivity).
usearch61_xn: Weight of "no" vote. Increasing this value tends to the
number of false positives (and also sensitivity).
usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this
value tends to the number of false positives (and also sensitivity).
usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this
value tends to reduce the number of false positives while reducing
sensitivity to very low-divergence chimeras.
usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the
query and closest reference database sequence. Expressed as a percentage,
so the default is 0.8%, which allows chimeras that are up to 99.2% similar
to a reference sequence.
threads: Specify number of threads used per core per CPU
HALTEXEC: halt execution and returns command used for app controller.
|
def set_trace(*args, **kwargs):
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back)
|
Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output.
|
def to_text(self, relative=False, indent_level=0, clean_empty_block=False):
if relative:
fwd = self.rel_path_fwd
bwd = self.rel_path_bwd
else:
fwd = self.full_path_fwd
bwd = self.full_path_bwd
indent = 4*indent_level*' '
pre = '%s%s' % (indent, fwd)
post = '%s%s' % (indent, bwd)
text = ''
for param, value in self.iterparams():
text += ' %sset %s %s\n' % (indent, param, value)
for key, block in self.iterblocks():
text += block.to_text(True, indent_level+1)
if len(text) > 0 or not clean_empty_block:
text = '%s%s%s' % (pre, text, post)
return text
|
This method returns the object model in text format. You should be able to copy&paste this text into any
device running a supported version of FortiOS.
Args:
- **relative** (bool):
* If ``True`` the text returned will assume that you are one block away
* If ``False`` the text returned will contain instructions to reach the block from the root.
- **indent_level** (int): This value is for aesthetics only. It will help format the text in blocks to\
increase readability.
- **clean_empty_block** (bool):
* If ``True`` a block without parameters or with sub_blocks without parameters will return an empty\
string
* If ``False`` a block without parameters will still return how to create it.
|
def _findSingleMemberGroups(classDictionaries):
toRemove = {}
for classDictionaryGroup in classDictionaries:
for classDictionary in classDictionaryGroup:
for name, members in list(classDictionary.items()):
if len(members) == 1:
toRemove[name] = list(members)[0]
del classDictionary[name]
return toRemove
|
Find all classes that have only one member.
|
def checksum_creation_action(target, source, env):
import crcmod
crc32_func = crcmod.mkCrcFun(0x104C11DB7, initCrc=0xFFFFFFFF, rev=False, xorOut=0)
with open(str(source[0]), 'rb') as f:
data = f.read()
data = data[:-4]
magicbin = data[-4:]
magic, = struct.unpack('<L', magicbin)
if magic != 0xBAADDAAD:
raise BuildError("Attempting to patch a file that is not a CDB binary or has the wrong size", reason="invalid magic number found", actual_magic=magic, desired_magic=0xBAADDAAD)
checksum = crc32_func(data) & 0xFFFFFFFF
with open(str(target[0]), 'w') as f:
checkhex = hex(checksum)
if checkhex[-1] == 'L':
checkhex = checkhex[:-1]
f.write("--defsym=__image_checksum=%s\n" % checkhex)
|
Create a linker command file for patching an application checksum into a firmware image
|
def full(self):
if not self.size: return False
return len(self.pq) == (self.size + self.removed_count)
|
Return True if the queue is full
|
def calc_padding(fmt, align):
remain = struct.calcsize(fmt) % align
if remain == 0:
return ""
return 'x' * (align - remain)
|
Calculate how many padding bytes needed for ``fmt`` to be aligned to
``align``.
Args:
fmt (str): :mod:`struct` format.
align (int): alignment (2, 4, 8, etc.)
Returns:
str: padding format (e.g., various number of 'x').
>>> calc_padding('b', 2)
'x'
>>> calc_padding('b', 3)
'xx'
|
def _ensure_tuple(item):
if isinstance(item, tuple):
return item
elif isinstance(item, list):
return tuple(item)
elif isinstance(item, np.ndarray):
return tuple(item.tolist())
else:
raise NotImplementedError
|
Simply ensure that the passed item is a tuple. If it is not, then
convert it if possible, or raise a NotImplementedError
Args:
item: the item that needs to become a tuple
Returns:
the item casted as a tuple
Raises:
NotImplementedError: if converting the given item to a tuple
is not implemented.
|
def process_tags(self, tag=None):
if self.downloaded is False:
raise serror("Track not downloaded, can't process tags..")
filetype = magic.from_file(self.filepath, mime=True)
if filetype != "audio/mpeg":
raise serror("Cannot process tags for file type %s." % filetype)
print("Processing tags for %s.." % self.filepath)
if tag is None:
tag = stag()
tag.load_id3(self)
tag.write_id3(self.filepath)
|
Process ID3 Tags for mp3 files.
|
def _repair_column(self):
check_for_title = True
for column_index in range(self.start[1], self.end[1]):
table_column = TableTranspose(self.table)[column_index]
column_start = table_column[self.start[0]]
if check_for_title and is_empty_cell(column_start):
self._stringify_column(column_index)
elif (isinstance(column_start, basestring) and
re.search(allregex.year_regex, column_start)):
self._check_stringify_year_column(column_index)
else:
check_for_title = False
|
Same as _repair_row but for columns.
|
def mark_backward(output_tensor, used_node_names):
op = output_tensor.op
if op.name in used_node_names:
return
used_node_names.add(op.name)
for input_tensor in op.inputs:
mark_backward(input_tensor, used_node_names)
for control_input_op in op.control_inputs:
used_node_names.add(control_input_op.name)
for input_tensor in control_input_op.inputs:
mark_backward(input_tensor, used_node_names)
|
Function to propagate backwards in the graph and mark nodes as used.
Traverses recursively through the graph from the end tensor, through the op
that generates the tensor, and then to the input tensors that feed the op.
Nodes encountered are stored in used_node_names.
Args:
output_tensor: A Tensor which we start the propagation.
used_node_names: A list of strings, stores the name of nodes we've marked as
visited.
|
def _compute_distance_term(self, C, mag, rrup):
term1 = C['b'] * rrup
term2 = - np.log(rrup + C['c'] * np.exp(C['d'] * mag))
return term1 + term2
|
Compute second and third terms in equation 1, p. 901.
|
def prepare_args(self, args, transform=True):
updated_args = list(args)
if transform:
updated_args[-1] = self.transform_value(updated_args[-1])
if self.key:
updated_args.insert(-1, self.key)
return updated_args
|
Prepare args to be used by a sub-index
Parameters
----------
args: list
The while list of arguments passed to add, check_uniqueness, get_filtered_keys...
transform: bool
If ``True``, the last entry in `args`, ie the value, will be transformed.
Else it will be kept as is.
|
def patched_packing_env(env):
old_env = pkg_resources.packaging.markers.default_environment
new_env = lambda: env
pkg_resources._vendor.packaging.markers.default_environment = new_env
try:
yield
finally:
pkg_resources._vendor.packaging.markers.default_environment = old_env
|
Monkey patch packaging.markers.default_environment
|
def convert_complex_output(out_in):
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out
|
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
|
def _read_bks_key(cls, data, pos, store_type):
key_type = b1.unpack_from(data, pos)[0]; pos += 1
key_format, pos = BksKeyStore._read_utf(data, pos, kind="key format")
key_algorithm, pos = BksKeyStore._read_utf(data, pos, kind="key algorithm")
key_enc, pos = BksKeyStore._read_data(data, pos)
entry = BksKeyEntry(key_type, key_format, key_algorithm, key_enc, store_type=store_type)
return entry, pos
|
Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry.
|
def load_nicknames(self, file):
with open(os.path.join(main_dir, file + '.dat'), 'r') as f:
self.nicknames = json.load(f)
|
Load dict from file for random nicknames.
:param str file: filename
|
def cluster_seqs(seqs,
neighbor_join=False,
params={},
add_seq_names=True,
WorkingDir=tempfile.gettempdir(),
SuppressStderr=None,
SuppressStdout=None,
max_chars=1000000,
max_hours=1.0,
constructor=PhyloNode,
clean_up=True
):
num_seqs = len(seqs)
if num_seqs < 2:
raise ValueError, "Muscle requres 2 or more sequences to cluster."
num_chars = sum(map(len, seqs))
if num_chars > max_chars:
params["-maxiters"] = 2
params["-diags1"] = True
params["-sv"] = True
print "lots of chars, using fast align", num_chars
params["-maxhours"] = max_hours
params["-clusteronly"] = True
params["-tree1"] = get_tmp_filename(WorkingDir)
muscle_res = muscle_seqs(seqs,
params=params,
add_seq_names=add_seq_names,
WorkingDir=WorkingDir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout)
tree = DndParser(muscle_res["Tree1Out"], constructor=constructor)
if clean_up:
muscle_res.cleanUp()
return tree
|
Muscle cluster list of sequences.
seqs: either file name or list of sequence objects or list of strings or
single multiline string containing sequences.
Addl docs coming soon
|
def chi_p_from_xi1_xi2(xi1, xi2):
xi1, xi2, input_is_array = ensurearray(xi1, xi2)
chi_p = copy.copy(xi1)
mask = xi1 < xi2
chi_p[mask] = xi2[mask]
return formatreturn(chi_p, input_is_array)
|
Returns effective precession spin from xi1 and xi2.
|
def remove_from_postmortem_exclusion_list(cls, pathname, bits = None):
if bits is None:
bits = cls.bits
elif bits not in (32, 64):
raise NotImplementedError("Unknown architecture (%r bits)" % bits)
if bits == 32 and cls.bits == 64:
keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
else:
keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList'
try:
key = cls.registry[keyname]
except KeyError:
return
try:
del key[pathname]
except KeyError:
return
|
Removes the given filename to the exclusion list for postmortem
debugging from the Registry.
@warning: This method requires administrative rights.
@warning: Don't ever delete entries you haven't created yourself!
Some entries are set by default for your version of Windows.
Deleting them might deadlock your system under some circumstances.
For more details see:
U{http://msdn.microsoft.com/en-us/library/bb204634(v=vs.85).aspx}
@see: L{get_postmortem_exclusion_list}
@type pathname: str
@param pathname: Application pathname to remove from the postmortem
debugging exclusion list.
@type bits: int
@param bits: Set to C{32} for the 32 bits debugger, or C{64} for the
64 bits debugger. Set to {None} for the default (L{System.bits}).
@raise WindowsError:
Raises an exception on error.
|
def get_field_values(self, fldnames, rpt_fmt=True, itemid2name=None):
row = []
for fld in fldnames:
val = getattr(self, fld, None)
if val is not None:
if rpt_fmt:
val = self._get_rpt_fmt(fld, val, itemid2name)
row.append(val)
else:
val = getattr(self.goterm, fld, None)
if rpt_fmt:
val = self._get_rpt_fmt(fld, val, itemid2name)
if val is not None:
row.append(val)
else:
self._err_fld(fld, fldnames)
if rpt_fmt:
assert not isinstance(val, list), \
"UNEXPECTED LIST: FIELD({F}) VALUE({V}) FMT({P})".format(
P=rpt_fmt, F=fld, V=val)
return row
|
Get flat namedtuple fields for one GOEnrichmentRecord.
|
def put_replication(Bucket, Role, Rules,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
Role = _get_role_arn(name=Role,
region=region, key=key, keyid=keyid, profile=profile)
if Rules is None:
Rules = []
elif isinstance(Rules, six.string_types):
Rules = salt.utils.json.loads(Rules)
conn.put_bucket_replication(Bucket=Bucket, ReplicationConfiguration={
'Role': Role,
'Rules': Rules
})
return {'updated': True, 'name': Bucket}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
|
Given a valid config, update the replication configuration for a bucket.
Returns {updated: true} if replication configuration was updated and returns
{updated: False} if replication configuration was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_replication my_bucket my_role [...]
|
def update_data(self):
url = ('https://www.openhumans.org/api/direct-sharing/project/'
'members/?access_token={}'.format(self.master_access_token))
results = get_all_results(url)
self.project_data = dict()
for result in results:
self.project_data[result['project_member_id']] = result
if len(result['data']) < result['file_count']:
member_data = get_page(result['exchange_member'])
final_data = member_data['data']
while member_data['next']:
member_data = get_page(member_data['next'])
final_data = final_data + member_data['data']
self.project_data[
result['project_member_id']]['data'] = final_data
return self.project_data
|
Returns data for all users including shared data files.
|
def _exec_command(self, command: str):
stdin, stdout, stderr = self._ssh.exec_command(command)
stdout.read()
stderr.read()
stdin.close()
|
Executes the command and closes the handles
afterwards.
|
def add_configuration_file(self, file_name):
logger.info('adding %s to configuration files', file_name)
if file_name not in self.configuration_files and self._inotify:
self._watch_manager.add_watch(file_name, pyinotify.IN_MODIFY)
if os.access(file_name, os.R_OK):
self.configuration_files[file_name] = SafeConfigParser()
self.configuration_files[file_name].read(file_name)
else:
logger.warn('could not read %s', file_name)
warnings.warn('could not read {}'.format(file_name), ResourceWarning)
|
Register a file path from which to read parameter values.
This method can be called multiple times to register multiple files for
querying. Files are expected to be ``ini`` formatted.
No assumptions should be made about the order that the registered files
are read and values defined in multiple files may have unpredictable
results.
**Arguments**
:``file_name``: Name of the file to add to the parameter search.
|
def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Decrypts a stream of bytes from in_stream to out_stream using mode.'
decrypter = Decrypter(mode, padding = padding)
_feed_stream(decrypter, in_stream, out_stream, block_size)
|
Decrypts a stream of bytes from in_stream to out_stream using mode.
|
def _parse_line_entry(self, line, type):
name = None
key_values = {}
if type == 'vars':
key_values = self._parse_line_vars(line)
else:
tokens = shlex.split(line.strip())
name = tokens.pop(0)
try:
key_values = self._parse_vars(tokens)
except ValueError:
self.log.warning("Unsupported vars syntax. Skipping line: {0}".format(line))
return (name, {})
return (name, key_values)
|
Parse a section entry line into its components. In case of a 'vars'
section, the first field will be None. Otherwise, the first field will
be the unexpanded host or group name the variables apply to.
For example:
[production:children]
frontend purpose="web" # The line we process
Returns:
('frontend', {'purpose': 'web'})
For example:
[production:vars]
purpose="web" # The line we process
Returns:
(None, {'purpose': 'web'})
Undocumented feature:
[prod:vars]
json_like_vars=[{'name': 'htpasswd_auth'}]
Returns:
(None, {'name': 'htpasswd_auth'})
|
def create_package_level_rst_index_file(
package_name, max_depth, modules, inner_packages=None):
if inner_packages is None:
inner_packages = []
return_text = 'Package::' + package_name
dash = '=' * len(return_text)
return_text += '\n' + dash + '\n\n'
return_text += '.. toctree::' + '\n'
return_text += ' :maxdepth: ' + str(max_depth) + '\n\n'
upper_package = package_name.split('.')[-1]
for module in modules:
if module in EXCLUDED_PACKAGES:
continue
return_text += ' ' + upper_package + os.sep + module[:-3] + '\n'
for inner_package in inner_packages:
if inner_package in EXCLUDED_PACKAGES:
continue
return_text += ' ' + upper_package + os.sep + inner_package + '\n'
return return_text
|
Function for creating text for index for a package.
:param package_name: name of the package
:type package_name: str
:param max_depth: Value for max_depth in the index file.
:type max_depth: int
:param modules: list of module in the package.
:type modules: list
:return: A text for the content of the index file.
:rtype: str
|
def _wva(values, weights):
assert len(values) == len(weights) and len(weights) > 0
return sum([mul(*x) for x in zip(values, weights)]) / sum(weights)
|
Calculates a weighted average
|
def _get_more(collection_name, num_to_return, cursor_id):
return b"".join([
_ZERO_32,
_make_c_string(collection_name),
_pack_int(num_to_return),
_pack_long_long(cursor_id)])
|
Get an OP_GET_MORE message.
|
def redo(self):
if self._undoing or self._redoing:
raise RuntimeError
if not self._redo:
return
group = self._redo.pop()
self._redoing = True
self.begin_grouping()
group.perform()
self.set_action_name(group.name)
self.end_grouping()
self._redoing = False
self.notify()
|
Performs the top group on the redo stack, if present. Creates an undo
group with the same name. Raises RuntimeError if called while undoing.
|
def findBinomialNsWithExpectedSampleMinimum(desiredValuesSorted, p, numSamples, nMax):
actualValues = [
getExpectedValue(
SampleMinimumDistribution(numSamples,
BinomialDistribution(n, p, cache=True)))
for n in xrange(nMax + 1)]
results = []
n = 0
for desiredValue in desiredValuesSorted:
while n + 1 <= nMax and actualValues[n + 1] < desiredValue:
n += 1
if n + 1 > nMax:
break
interpolated = n + ((desiredValue - actualValues[n]) /
(actualValues[n+1] - actualValues[n]))
result = (interpolated, actualValues[n], actualValues[n + 1])
results.append(result)
return results
|
For each desired value, find an approximate n for which the sample minimum
has a expected value equal to this value.
For each value, find an adjacent pair of n values whose expected sample minima
are below and above the desired value, respectively, and return a
linearly-interpolated n between these two values.
@param p (float)
The p if the binomial distribution.
@param numSamples (int)
The number of samples in the sample minimum distribution.
@return
A list of results. Each result contains
(interpolated_n, lower_value, upper_value).
where each lower_value and upper_value are the expected sample minimum for
floor(interpolated_n) and ceil(interpolated_n)
|
def _get_bank_redis_key(bank):
opts = _get_redis_keys_opts()
return '{prefix}{separator}{bank}'.format(
prefix=opts['bank_prefix'],
separator=opts['separator'],
bank=bank
)
|
Return the Redis key for the bank given the name.
|
def fetch_friend_ids(self, user):
friends = self.fetch_friends(user)
friend_ids = []
for friend in friends:
friend_ids.append(friend.id)
return friend_ids
|
fethces friend id's from twitter
Return:
collection of friend ids
|
def get_package_info(self, name):
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
)
|
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
|
def verify_and_extract_time(self, log_file, division, result_name):
expected_level = constants.DIVISION_COMPLIANCE_CHECK_LEVEL.get(
division, None)
print(result_name)
if expected_level is None:
raise Exception('Unknown division: {}'.format(division))
start_time, level, dt, _, success = self.get_compliance(log_file)
print(float(start_time))
if int(level) != expected_level:
raise Exception('Error Level {} does not match needed level {}:{}'.format(
level, expected_level, log_file))
if success and dt:
return dt, start_time
else:
print('Result was not a success set to INFINITE_TIME({})'.format(
INFINITE_TIME))
return INFINITE_TIME, start_time
|
Verifies and result and returns timing.
Uses submodule mlp_compliance (https://github.com/bitfort/mlp_compliance)
Args:
log_file: Absolute path to result file.
division: open, closed
result_name: name of the benchmark, ncf, ssd, etc
Returns:
Time for the result or `INFINITE_TIME` if not a success
Raises:
Exception: If expected compliance level is not hit or cannot figure
out expected compliance level.
|
def ledger_effects(self, ledger_id, cursor=None, order='asc', limit=10):
endpoint = '/ledgers/{ledger_id}/effects'.format(ledger_id=ledger_id)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params)
|
This endpoint represents all effects that occurred in the given
ledger.
`GET /ledgers/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-ledger.html>`_
:param int ledger_id: The id of the ledger to look up.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: The effects for a single ledger.
:rtype: dict
|
def __render(self, context, **kwargs):
kwargs["namespaces"] = [context, ] + kwargs.get("namespaces", []) \
+ kwargs.get("searchList", [])
kwargs["searchList"] = None
kwargs = self.filter_options(kwargs, self.engine_valid_options())
self.engine_options.update(kwargs)
return render_impl(**self.engine_options)
|
Render template.
:param context: A dict or dict-like object to instantiate given
template file
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
:return: Rendered string
|
def update(self, ipv6s):
data = {'ips': ipv6s}
ipv6s_ids = [str(ipv6.get('id')) for ipv6 in ipv6s]
return super(ApiIPv6, self).put('api/v3/ipv6/%s/' %
';'.join(ipv6s_ids), data)
|
Method to update ipv6's
:param ipv6s: List containing ipv6's desired to updated
:return: None
|
def get_help_usage(command):
if not command:
doc = get_primary_command_usage()
elif command in ('-a', '--all'):
subcommands = [k for k in settings.subcommands if k is not None]
available_commands = subcommands + ['help']
command_doc = '\nAvailable commands:\n{}\n'.format(
'\n'.join(' {}'.format(c) for c in sorted(available_commands)))
doc = get_primary_command_usage(command_doc)
elif command.startswith('-'):
raise ValueError("Unrecognized option '{}'.".format(command))
elif command in settings.subcommands:
subcommand = settings.subcommands[command]
doc = format_usage(subcommand.__doc__)
docopt.docopt(doc, argv=('--help',))
|
Print out a help message and exit the program.
Args:
command: If a command value is supplied then print the help message for
the command module if available. If the command is '-a' or '--all',
then print the standard help message but with a full list of
available commands.
Raises:
ValueError: Raised if the help message is requested for an invalid
command or an unrecognized option is passed to help.
|
def avroize_type(field_type, name_prefix=""):
if isinstance(field_type, MutableSequence):
for field in field_type:
avroize_type(field, name_prefix)
elif isinstance(field_type, MutableMapping):
if field_type["type"] in ("enum", "record"):
if "name" not in field_type:
field_type["name"] = name_prefix + Text(uuid.uuid4())
if field_type["type"] == "record":
avroize_type(field_type["fields"], name_prefix)
if field_type["type"] == "array":
avroize_type(field_type["items"], name_prefix)
if isinstance(field_type["type"], MutableSequence):
for ctype in field_type["type"]:
avroize_type(ctype, name_prefix)
return field_type
|
adds missing information to a type so that CWL types are valid in schema_salad.
|
def load_from_dict(dct=None, **kwargs):
dct = dct or dict()
dct.update(kwargs)
def _load_from_dict(metadata):
return dict(dct)
return _load_from_dict
|
Load configuration from a dictionary.
|
def _setTaskParsObj(self, theTask):
self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask,
self._strict, False)
self._taskParsObj.setDebugLogger(self)
self._lastSavedState = self._taskParsObj.dict()
|
Overridden version for ConfigObj. theTask can be either
a .cfg file name or a ConfigObjPars object.
|
def set_source_filter(self, source):
if isinstance(source, str if py3k else basestring) and len(source) >= 2:
self.source_filter = source
else:
raise TwitterSearchException(1009)
|
Only search for tweets entered via given source
:param source: String. Name of the source to search for. An example \
would be ``source=twitterfeed`` for tweets submitted via TwitterFeed
:raises: TwitterSearchException
|
def hash_file(path, digest=None):
digest = digest or hashlib.sha1()
with open(path, 'rb') as fd:
s = fd.read(8192)
while s:
digest.update(s)
s = fd.read(8192)
return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8')
|
Hashes the contents of the file at the given path and returns the hash digest in hex form.
If a hashlib message digest is not supplied a new sha1 message digest is used.
|
def get_branch_container_tag(self):
if self.__prefix:
return "{0}-{1}".format(
self.__prefix,
self.__branch)
else:
return "{0}".format(self.__branch)
|
Returns the branch container tag
|
async def query_handler(service, action_type, payload, props, **kwds):
if action_type == query_action_type():
print('encountered query event {!r} '.format(payload))
result = await parse_string(payload,
service.object_resolver,
service.connection_resolver,
service.mutation_resolver,
obey_auth=False
)
reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {}
await service.event_broker.send(
payload=result,
action_type=change_action_status(action_type, success_status()),
**reply_props
)
|
This action handler interprets the payload as a query to be executed
by the api gateway service.
|
def error_response(response):
if response.status_code >= 500:
raise exceptions.GeocodioServerError
elif response.status_code == 403:
raise exceptions.GeocodioAuthError
elif response.status_code == 422:
raise exceptions.GeocodioDataError(response.json()["error"])
else:
raise exceptions.GeocodioError(
"Unknown service error (HTTP {0})".format(response.status_code)
)
|
Raises errors matching the response code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.