Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
8,800 | def add_content(obj, language, slot, content):
placeholder = obj.placeholders.get(slot=slot)
add_plugin(placeholder, TextPlugin, language, body=content) | Adds a TextPlugin with given content to given slot |
8,801 | def get_film(film_id):
result = _get(film_id, settings.FILMS)
return Film(result.content) | Return a single film |
8,802 | def _calc_min_width(self, table):
width = len(table.name)
cap = table.consumed_capacity["__table__"]
width = max(width, 4 + len("%.1f/%d" % (cap["read"], table.read_throughput)))
width = max(width, 4 + len("%.1f/%d" % (cap["write"], table.write_throughput)))
for index_name, cap in iteritems(table.consumed_capacity):
if index_name == "__table__":
continue
index = table.global_indexes[index_name]
width = max(
width,
4 + len(index_name + "%.1f/%d" % (cap["read"], index.read_throughput)),
)
width = max(
width,
4
+ len(index_name + "%.1f/%d" % (cap["write"], index.write_throughput)),
)
return width | Calculate the minimum allowable width for a table |
8,803 | def prepare_url(self, url, params):
url = to_native_string(url)
if not path:
path =
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode()
if isinstance(netloc, str):
netloc = netloc.encode()
if isinstance(path, str):
path = path.encode()
if isinstance(query, str):
query = query.encode()
if isinstance(fragment, str):
fragment = fragment.encode()
enc_params = self._encode_params(params)
if enc_params:
if query:
query = % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url | Prepares the given HTTP URL. |
8,804 | def delimit(delimiters, content):
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return .join([delimiters[0], content, delimiters[1]]) | Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"' |
8,805 | def _set_lastpage(self):
self.last_page = (len(self._page_data) - 1) // self.screen.page_size | Calculate value of class attribute ``last_page``. |
8,806 | def verify_signature(certificate, signing_pub_key=None,
signing_pub_key_passphrase=None):
*
cert = _get_certificate_obj(certificate)
if signing_pub_key:
signing_pub_key = get_public_key(signing_pub_key,
passphrase=signing_pub_key_passphrase, asObj=True)
return bool(cert.verify(pkey=signing_pub_key) == 1) | Verify that ``certificate`` has been signed by ``signing_pub_key``
certificate:
The certificate to verify. Can be a path or string containing a
PEM formatted certificate.
signing_pub_key:
The public key to verify, can be a string or path to a PEM formatted
certificate, csr, or private key.
signing_pub_key_passphrase:
Passphrase to the signing_pub_key if it is an encrypted private key.
CLI Example:
.. code-block:: bash
salt '*' x509.verify_signature /etc/pki/mycert.pem \\
signing_pub_key=/etc/pki/myca.crt |
8,807 | def optimise_partition_multiplex(self, partitions, layer_weights=None, n_iterations=2):
if not layer_weights:
layer_weights = [1]*len(partitions)
itr = 0
diff = 0
continue_iteration = itr < n_iterations or n_iterations < 0
while continue_iteration:
diff_inc = _c_leiden._Optimiser_optimise_partition_multiplex(
self._optimiser,
[partition._partition for partition in partitions],
layer_weights)
diff += diff_inc
itr += 1
if n_iterations < 0:
continue_iteration = (diff_inc > 0)
else:
continue_iteration = itr < n_iterations
for partition in partitions:
partition._update_internal_membership()
return diff | Optimise the given partitions simultaneously.
Parameters
----------
partitions
List of :class:`~VertexPartition.MutableVertexPartition` layers to optimise.
layer_weights
List of weights of layers.
n_iterations : int
Number of iterations to run the Leiden algorithm. By default, 2 iterations
are run. If the number of iterations is negative, the Leiden algorithm is
run until an iteration in which there was no improvement.
Returns
-------
float
Improvement in quality of combined partitions, see `Notes <#notes-multiplex>`_.
.. _notes-multiplex:
Notes
-----
This method assumes that the partitions are defined for graphs with the
same vertices. The connections between the vertices may be different, but
the vertices themselves should be identical. In other words, all vertices
should have identical indices in all graphs (i.e. node `i` is assumed to be
the same node in all graphs). The quality of the overall partition is
simply the sum of the individual qualities for the various partitions,
weighted by the layer_weight. If we denote by :math:`Q_k` the quality of
layer :math:`k` and the weight by :math:`\\lambda_k`, the overall quality
is then
.. math:: Q = \sum_k \\lambda_k Q_k.
This is particularly useful for graphs containing negative links. When
separating the graph in two graphs, the one containing only the positive
links, and the other only the negative link, by supplying a negative weight
to the latter layer, we try to find relatively many positive links within a
community and relatively many negative links between communities. Note that
in this case it may be better to assign a node to a community to which it
is not connected so that :attr:`consider_comms` may be better set to
:attr:`leidenalg.ALL_COMMS`.
Besides multiplex graphs where each node is assumed to have a single
community, it is also useful in the case of for example multiple time
slices, or in situations where nodes can have different communities in
different slices. The package includes some special helper functions for
using :func:`optimise_partition_multiplex` in such cases, where there is a
conversion required from (time) slices to layers suitable for use in this
function.
See Also
--------
:func:`slices_to_layers`
:func:`time_slices_to_layers`
:func:`find_partition_multiplex`
:func:`find_partition_temporal`
Examples
--------
>>> G_pos = ig.Graph.SBM(100, pref_matrix=[[0.5, 0.1], [0.1, 0.5]], block_sizes=[50, 50])
>>> G_neg = ig.Graph.SBM(100, pref_matrix=[[0.1, 0.5], [0.5, 0.1]], block_sizes=[50, 50])
>>> optimiser = la.Optimiser()
>>> partition_pos = la.ModularityVertexPartition(G_pos)
>>> partition_neg = la.ModularityVertexPartition(G_neg)
>>> diff = optimiser.optimise_partition_multiplex(
... partitions=[partition_pos, partition_neg],
... layer_weights=[1,-1]) |
8,808 | def ind_nodes(self, graph=None):
if graph is None:
graph = self.graph
dependent_nodes = set(
node for dependents in six.itervalues(graph) for node in dependents
)
return [node for node in graph.keys() if node not in dependent_nodes] | Returns a list of all nodes in the graph with no dependencies. |
8,809 | def transform_config_from_estimator(estimator, task_id, task_type, instance_count, instance_type, data,
data_type=, content_type=None, compression_type=None, split_type=None,
job_name=None, model_name=None, strategy=None, assemble_with=None, output_path=None,
output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None,
max_payload=None, tags=None, role=None, volume_kms_key=None,
model_server_workers=None, image=None, vpc_config_override=None):
model_base_config = model_config_from_estimator(instance_type=instance_type, estimator=estimator, task_id=task_id,
task_type=task_type, role=role, image=image, name=model_name,
model_server_workers=model_server_workers,
vpc_config_override=vpc_config_override)
if isinstance(estimator, sagemaker.estimator.Framework):
transformer = estimator.transformer(instance_count, instance_type, strategy, assemble_with, output_path,
output_kms_key, accept, env, max_concurrent_transforms,
max_payload, tags, role, model_server_workers, volume_kms_key)
else:
transformer = estimator.transformer(instance_count, instance_type, strategy, assemble_with, output_path,
output_kms_key, accept, env, max_concurrent_transforms,
max_payload, tags, role, volume_kms_key)
transformer.model_name = model_base_config[]
transform_base_config = transform_config(transformer, data, data_type, content_type, compression_type,
split_type, job_name)
config = {
: model_base_config,
: transform_base_config
}
return config | Export Airflow transform config from a SageMaker estimator
Args:
estimator (sagemaker.model.EstimatorBase): The SageMaker estimator to export Airflow config from.
It has to be an estimator associated with a training job.
task_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or
airflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG. The transform
config is built based on the training job generated in this operator.
task_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be
'training', 'tuning' or None (which means training job is not from any task).
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.
data (str): Input data location in S3.
data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values:
* 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as
inputs for the transform job.
* 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as
an input for the transform job.
content_type (str): MIME type of the input data (default: None).
compression_type (str): Compression type of the input data, if compressed (default: None).
Valid values: 'Gzip', None.
split_type (str): The record delimiter for the input object (default: 'None').
Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'.
job_name (str): transform job name (default: None). If not specified, one will be generated.
model_name (str): model name (default: None). If not specified, one will be generated.
strategy (str): The strategy used to decide how to batch records in a single request (default: None).
Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'.
assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If not specified, results are stored to
a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None).
accept (str): The content type accepted by the endpoint deployed during the transform job.
env (dict): Environment variables to be set for use during the transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for
the training job are used for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML
compute instance (default: None).
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
image (str): An container image to use for deploying the model
vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
dict: Transform config that can be directly used by SageMakerTransformOperator in Airflow. |
8,810 | def understand(self):
if self._understand is None:
self._understand = Understand(self)
return self._understand | :returns: Version understand of preview
:rtype: twilio.rest.preview.understand.Understand |
8,811 | def fix_parameters(self):
for W, b in zip(self.W_list, self.b_list):
W.fix()
b.fix() | Helper function that fixes all parameters |
8,812 | def get_correlation_matrix_from_columns(self):
header_to_column = {}
for header in self.headers:
header_to_column[header] = self.headers.index(header)
data_to_test = []
for header in self.headers_to_test:
header_column = Matrix(self.data) \
.get_column(header_to_column[header])
for i, value in enumerate(header_column):
header_column[i] = float(value)
data_to_test.append(header_column)
return self.get_correlation_matrix(data_to_test) | Computes correlation matrix of columns
:return: Correlation matrix of columns |
8,813 | def to_pandas_closed_closed(date_range, add_tz=True):
if not date_range:
return None
start = date_range.start
end = date_range.end
if start:
start = to_dt(start, mktz()) if add_tz else start
if date_range.startopen:
start += timedelta(milliseconds=1)
if end:
end = to_dt(end, mktz()) if add_tz else end
if date_range.endopen:
end -= timedelta(milliseconds=1)
return DateRange(start, end) | Pandas DateRange slicing is CLOSED-CLOSED inclusive at both ends.
Parameters
----------
date_range : `DateRange` object
converted to CLOSED_CLOSED form for Pandas slicing
add_tz : `bool`
Adds a TimeZone to the daterange start and end if it doesn't
have one.
Returns
-------
Returns a date_range with start-end suitable for slicing in pandas. |
8,814 | def index(self):
config_list = self.config
layout = self.layout
for (dirpath, dirnames, filenames) in os.walk(self.path):
layout_file = self.layout.config_filename
if layout_file in filenames:
filenames.remove(layout_file)
for f in filenames:
abs_fn = os.path.join(self.path, f)
if not self.force_index and not layout._validate_file(abs_fn):
continue
bf = BIDSFile(abs_fn, self)
match_vals = {}
for e in self.available_entities.values():
m = e.match_file(bf)
if m is None and e.mandatory:
break
if m is not None:
match_vals[e.name] = (e, m)
if match_vals:
for name, (ent, val) in match_vals.items():
bf.entities[name] = val
ent.add_file(bf.path, val)
self.files.append(bf)
if check_path_matches_patterns(d, self.layout.force_index):
self.force_index = True
else:
valid_dir = layout._validate_dir(d)
if not valid_dir and not self.layout.force_index:
continue
child_class = self._get_child_class(d)
child = child_class(d, config_list, root_node, self,
force_index=self.force_index)
if self.force_index or valid_dir:
self.children.append(child)
break | Index all files/directories below the current BIDSNode. |
8,815 | def decode_terminated(data, encoding, strict=True):
codec_info = codecs.lookup(encoding)
encoding = codec_info.name
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b"" | Returns the decoded data until the first NULL terminator
and all data after it.
Args:
data (bytes): data to decode
encoding (str): The codec to use
strict (bool): If True will raise ValueError in case no NULL is found
but the available data decoded successfully.
Returns:
Tuple[`text`, `bytes`]: A tuple containing the decoded text and the
remaining data after the found NULL termination.
Raises:
UnicodeError: In case the data can't be decoded.
LookupError:In case the encoding is not found.
ValueError: In case the data isn't null terminated (even if it is
encoded correctly) except if strict is False, then the decoded
string will be returned anyway. |
8,816 | def avoid(self) -> Tuple[Tuple[int], Tuple[int]]:
to_avoid = set()
for i, state in enumerate(self.global_avoid_states):
for word_id in state.avoid():
if word_id > 0:
to_avoid.add((i, word_id))
for i, state in enumerate(self.local_avoid_states):
for word_id in state.avoid():
if word_id > 0:
to_avoid.add((i, word_id))
return tuple(zip(*to_avoid)) | Assembles a list of per-hypothesis words to avoid. The indices are (x, y) pairs into the scores
array, which has dimensions (beam_size, target_vocab_size). These values are then used by the caller
to set these items to np.inf so they won't be selected. Words to be avoided are selected by
consulting both the global trie of phrases and the sentence-specific one.
:return: Two lists of indices: the x coordinates and y coordinates. |
8,817 | def json_response(request, data):
data["messages"] = []
for msg in messages.get_messages(request):
data["messages"].append({: msg.message, : msg.level_tag})
return HttpResponse(json.dumps(data), content_type="application/json") | Wrapper dumping `data` to a json and sending it to the user with an HttpResponse
:param django.http.HttpRequest request: The request object used to generate this response.
:param dict data: The python dictionnary to return as a json
:return: The content of ``data`` serialized in json
:rtype: django.http.HttpResponse |
8,818 | def confd_state_loaded_data_models_data_model_namespace(self, **kwargs):
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
loaded_data_models = ET.SubElement(confd_state, "loaded-data-models")
data_model = ET.SubElement(loaded_data_models, "data-model")
name_key = ET.SubElement(data_model, "name")
name_key.text = kwargs.pop()
namespace = ET.SubElement(data_model, "namespace")
namespace.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
8,819 | def _calculate_timestamps(self):
self._timestamps_data = []
if not self._is_reversed:
self._calc_timestamps(self.st_time, self.end_time)
else:
self._calc_timestamps(self.st_time, DateTime.from_hoy(8759))
self._calc_timestamps(DateTime.from_hoy(0), self.end_time) | Return a list of Ladybug DateTime in this analysis period. |
8,820 | def kill_tasks(self, app_id, scale=False, wipe=False,
host=None, batch_size=0, batch_delay=0):
def batch(iterable, size):
sourceiter = iter(iterable)
while True:
batchiter = itertools.islice(sourceiter, size)
yield itertools.chain([next(batchiter)], batchiter)
if batch_size == 0:
params = {: scale, : wipe}
if host:
params[] = host
response = self._do_request(
, .format(app_id=app_id), params)
if "tasks" in response.json():
return self._parse_response(response, MarathonTask, is_list=True, resource_name=)
else:
return response.json()
else:
tasks = self.list_tasks(
app_id, host=host) if host else self.list_tasks(app_id)
for tbatch in batch(tasks, batch_size):
killed_tasks = [self.kill_task(app_id, t.id, scale=scale, wipe=wipe)
for t in tbatch]
killed_task_ids = set(t.id for t in killed_tasks)
running_task_ids = killed_task_ids
while killed_task_ids.intersection(running_task_ids):
time.sleep(1)
running_task_ids = set(
t.id for t in self.get_app(app_id).tasks)
if batch_delay == 0:
desired_instances = self.get_app(app_id).instances
running_instances = 0
while running_instances < desired_instances:
time.sleep(1)
running_instances = sum(
t.started_at is None for t in self.get_app(app_id).tasks)
else:
time.sleep(batch_delay)
return tasks | Kill all tasks belonging to app.
:param str app_id: application ID
:param bool scale: if true, scale down the app by the number of tasks killed
:param str host: if provided, only terminate tasks on this Mesos slave
:param int batch_size: if non-zero, terminate tasks in groups of this size
:param int batch_delay: time (in seconds) to wait in between batched kills. If zero, automatically determine
:returns: list of killed tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`] |
8,821 | def require_scopes_exact(self, scope_string):
num_scopes = len(_process_scopes(scope_string))
pks = [v[] for v in self.annotate(models.Count()).require_scopes(scope_string).filter(
scopes__count=num_scopes).values(, )]
return self.filter(pk__in=pks) | :param scope_string: The required scopes.
:type scope_string: Union[str, list]
:return: The tokens with only the requested scopes.
:rtype: :class:`esi.managers.TokenQueryset` |
8,822 | def excess_sharpe(returns, factor_returns, out=None):
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
returns_1d = returns.ndim == 1
if len(returns) < 2:
out[()] = np.nan
if returns_1d:
out = out.item()
return out
active_return = _adjust_returns(returns, factor_returns)
tracking_error = np.nan_to_num(nanstd(active_return, ddof=1, axis=0))
out = np.divide(
nanmean(active_return, axis=0, out=out),
tracking_error,
out=out,
)
if returns_1d:
out = out.item()
return out | Determines the Excess Sharpe of a strategy.
Parameters
----------
returns : pd.Series or np.ndarray
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns: float / series
Benchmark return to compare returns against.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
excess_sharpe : float
Note
-----
The excess Sharpe is a simplified Information Ratio that uses
tracking error rather than "active risk" as the denominator. |
8,823 | def get_terms_in_subset(ont, subset):
namedGraph = get_named_graph(ont)
query = .format(s=subset, g=namedGraph)
bindings = run_sparql(query)
return [(r[][],r[][]) for r in bindings] | Find all nodes in a subset.
We assume the oboInOwl encoding of subsets, and subset IDs are IRIs |
8,824 | def set_theme(self, theme_name, toplevel=None, themebg=None):
if self._toplevel is not None and toplevel is None:
toplevel = self._toplevel
if self._themebg is not None and themebg is None:
themebg = self._themebg
ThemedWidget.set_theme(self, theme_name)
color = self._get_bg_color()
if themebg is True:
self.config(background=color)
if toplevel is True:
self._setup_toplevel_hook(color) | Redirect the set_theme call to also set Tk background color |
8,825 | def _make_namespace(self) -> Namespace:
namespace = Namespace(
name=self._get_namespace_name(),
keyword=self._get_namespace_keyword(),
url=self._get_namespace_url(),
version=str(time.asctime()),
)
self.session.add(namespace)
entries = self._get_namespace_entries(namespace)
self.session.add_all(entries)
t = time.time()
log.info()
self.session.commit()
log.info(, time.time() - t)
return namespace | Make a namespace. |
8,826 | def _writeData(self, config=None):
if config is None:
config = ID3SaveConfig()
if config.v2_version == 3:
frame = self._get_v23_frame(sep=config.v23_separator)
else:
frame = self
data = []
for writer in self._framespec:
try:
data.append(
writer.write(config, frame, getattr(frame, writer.name)))
except SpecError as e:
raise error(e)
for writer in self._optionalspec:
try:
data.append(
writer.write(config, frame, getattr(frame, writer.name)))
except AttributeError:
break
except SpecError as e:
raise error(e)
return b.join(data) | Raises error |
8,827 | def check_version_info(conn, version_table, expected_version):
version_from_table = conn.execute(
sa.select((version_table.c.version,)),
).scalar()
if version_from_table is None:
version_from_table = 0
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version) | Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION. |
8,828 | def datetime_period(base=None, hours=None, minutes=None, seconds=None):
if base is None:
base = utcnow()
base -= timedelta(
hours = 0 if hours is None else (base.hour % hours),
minutes = (base.minute if hours else 0) if minutes is None else (base.minute % minutes),
seconds = (base.second if minutes or hours else 0) if seconds is None else (base.second % seconds),
microseconds = base.microsecond
)
return base | Round a datetime object down to the start of a defined period.
The `base` argument may be used to find the period start for an arbitrary datetime, defaults to `utcnow()`. |
8,829 | def init_logger(
name="",
handler_path_levels=None,
level=logging.INFO,
formatter=None,
formatter_str=None,
datefmt="%Y-%m-%d %H:%M:%S",
):
levels = {
"NOTSET": logging.NOTSET,
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
if not formatter:
if formatter_str:
formatter_str = formatter_str
else:
formatter_str = "%(asctime)s %(levelname)-5s [%(name)s] %(filename)s(%(lineno)s): %(message)s"
formatter = logging.Formatter(formatter_str, datefmt=datefmt)
logger = name if isinstance(name, logging.Logger) else logging.getLogger(str(name))
logger.setLevel(level)
handler_path_levels = handler_path_levels or [["", "INFO"]]
for each_handler in handler_path_levels:
path, handler_level = each_handler
handler = logging.FileHandler(path) if path else logging.StreamHandler()
handler.setLevel(
levels.get(handler_level.upper(), 1)
if isinstance(handler_level, str)
else handler_level
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | Add a default handler for logger.
Args:
name = '' or logger obj.
handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]
level = the least level for the logger.
formatter = logging.Formatter(
'%(levelname)-7s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S")
formatter_str = '%(levelname)-7s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s'
custom formatter:
%(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s |
8,830 | def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism=, principal=None,
domain=None):
if protocol is None:
protocol =
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, ) and
stub.host != .join([host, six.text_type(port)]))):
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
log.trace()
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace()
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance | Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi`` |
8,831 | def goto_assignments(request_data):
code = request_data[]
line = request_data[] + 1
column = request_data[]
path = request_data[]
encoding =
script = jedi.Script(code, line, column, path, encoding)
try:
definitions = script.goto_assignments()
except jedi.NotFoundError:
pass
else:
ret_val = [(d.module_path, d.line - 1 if d.line else None,
d.column, d.full_name)
for d in definitions]
return ret_val | Go to assignements worker. |
8,832 | def _sid_subdir_path(sid):
padded_sid = format(sid, )
return os.path.join(
padded_sid[0:2],
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
) | Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz |
8,833 | def readString(self, st):
if not isinstance(st, str) and not isinstance(st, bytes):
raise ValueError("String must be of type string or bytes, not %s" % type(st))
return etree.fromstring(st) | Parse a WFS capabilities document, returning an
instance of WFSCapabilitiesInfoset
string should be an XML capabilities document |
8,834 | def geom2localortho(geom):
cx, cy = geom.Centroid().GetPoint_2D()
lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs)
local_srs = localortho(lon,lat)
local_geom = geom_dup(geom)
geom_transform(local_geom, local_srs)
return local_geom | Convert existing geom to local orthographic projection
Useful for local cartesian distance/area calculations |
8,835 | def tablecopy(tablename, newtablename, deep=False, valuecopy=False, dminfo={},
endian=, memorytable=False, copynorows=False):
t = table(tablename, ack=False)
return t.copy(newtablename, deep=deep, valuecopy=valuecopy,
dminfo=dminfo, endian=endian, memorytable=memorytable,
copynorows=copynorows) | Copy a table.
It is the same as :func:`table.copy`, but without the need to open
the table first. |
8,836 | def _create_subepochs(x, nperseg, step):
axis = x.ndim - 1
nsmp = x.shape[axis]
stride = x.strides[axis]
noverlap = nperseg - step
v_shape = *x.shape[:axis], (nsmp - noverlap) // step, nperseg
v_strides = *x.strides[:axis], stride * step, stride
v = as_strided(x, shape=v_shape, strides=v_strides,
writeable=False)
return v | Transform the data into a matrix for easy manipulation
Parameters
----------
x : 1d ndarray
actual data values
nperseg : int
number of samples in each row to create
step : int
distance in samples between rows
Returns
-------
2d ndarray
a view (i.e. doesn't copy data) of the original x, with shape
determined by nperseg and step. You should use the last dimension |
8,837 | def _bg_combine(self, bgs):
out = np.ones(self.h5["raw"].shape, dtype=float)
for bg in bgs:
out *= bg[:]
return out | Combine several background amplitude images |
8,838 | def grep(self, path, content, flags):
try:
match = re.compile(content, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
for gpath, matches in self.do_grep(path, match):
yield (gpath, matches) | grep every child path under path for content |
8,839 | def start_server(self, host=, port=9000, app=None):
from wsgiref.simple_server import make_server
if app is None:
app = self.wsgi
server = make_server(host, port, app)
server_addr = "%s:%s" % (server.server_name, server.server_port)
print "Server listening at http://%s/" % server_addr
server.serve_forever() | Start a `wsgiref.simple_server` based server to run this mapper. |
8,840 | def leader_get(attribute=None):
cmd = [, ] + [attribute or ]
return json.loads(subprocess.check_output(cmd).decode()) | Juju leader get value(s) |
8,841 | def set_pending_symbol(self, pending_symbol=None):
if pending_symbol is None:
pending_symbol = CodePointArray()
self.value = bytearray()
self.pending_symbol = pending_symbol
self.line_comment = False
return self | Sets the context's ``pending_symbol`` with the given unicode sequence and resets the context's ``value``.
If the input is None, an empty :class:`CodePointArray` is used. |
8,842 | def encode(data, encoding=None, errors=, keep=False,
preserve_dict_class=False, preserve_tuples=False):
if isinstance(data, Mapping):
return encode_dict(data, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
elif isinstance(data, list):
return encode_list(data, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
elif isinstance(data, tuple):
return encode_tuple(data, encoding, errors, keep, preserve_dict_class) \
if preserve_tuples \
else encode_list(data, encoding, errors, keep,
preserve_dict_class, preserve_tuples)
else:
try:
return salt.utils.stringutils.to_bytes(data, encoding, errors)
except TypeError:
pass
except UnicodeEncodeError:
if not keep:
raise
return data | Generic function which will encode whichever type is passed, if necessary
If `strict` is True, and `keep` is False, and we fail to encode, a
UnicodeEncodeError will be raised. Passing `keep` as True allows for the
original value to silently be returned in cases where encoding fails. This
can be useful for cases where the data passed to this function is likely to
contain binary blobs. |
8,843 | def sg_flatten(tensor, opt):
r
dim = np.prod(tensor.get_shape().as_list()[1:])
return tf.reshape(tensor, [-1, dim], name=opt.name) | r"""Reshapes a tensor to `batch_size x -1`.
See `tf.reshape()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A 2-D tensor. |
8,844 | def name(self):
basename = self.basename
if basename is None:
return None
parent = self.Naming_parent
if parent is None:
return basename
else:
return parent.generate_unique_name(basename) | The unique name of this object, relative to the parent. |
8,845 | def compute_freq(self, csd=False):
progress = QProgressDialog(, ,
0, len(self.data) - 1, self)
progress.setWindowModality(Qt.ApplicationModal)
freq = self.frequency
prep = freq[].get_value()
scaling = freq[].get_value()
log_trans = freq[].get_value()
taper = freq[].get_value()
halfbandwidth = freq[].get_value()
NW = freq[].get_value()
duration = freq[].get_value()
overlap = freq[].value()
step = freq[].get_value()
centend = freq[].get_value()
detrend = freq[].get_value()
norm = freq[].get_value()
norm_concat = freq[].get_value()
if csd:
output =
elif freq[].isChecked():
output =
else:
output =
sides =
if freq[].isChecked():
step = None
else:
overlap = None
if NW == 0 or not freq[].get_value():
NW = None
if duration == 0 or not freq[].get_value():
duration = None
if step == 0:
step = None
if detrend == :
detrend = None
if freq[].isChecked():
n_fft = int(freq[].get_value())
elif freq[].isChecked():
n_fft = max([x[].number_of()[0] for x in self.data])
lg.info( + str(n_fft))
elif freq[].isChecked():
n_fft = None
if norm not in [, ]:
norm_evt_type = None
norm_stage = None
norm_chan = None
ncat = (0, 0, 0, 0)
if norm == :
norm_chan = [x + + self.idx_group.currentText() +
for x in self.one_grp[]]
norm_evt_type = [x.text() for x in \
freq[].selectedItems()]
if norm == :
norm_stage = [x.text() for x in \
freq[].selectedItems()]
if norm_concat:
ncat = (1, 1, 1, 1)
lg.info(.join([, str(ncat),
, str(norm_evt_type), ,
str(norm_stage), , str(norm_chan)]))
norm_seg = fetch(self.parent.info.dataset,
self.parent.notes.annot, ncat,
evt_type=norm_evt_type, stage=norm_stage,
chan_full=norm_chan)
if not norm_seg.segments:
msg =
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle()
error_dialog.showMessage(msg)
progress.cancel()
return
norm_seg.read_data(self.chan, ref_chan=self.one_grp[],
grp_name=self.one_grp[], parent=None)
if prep:
norm_seg = self.transform_data(norm_seg)
all_Sxx = []
for seg in norm_seg:
dat = seg[]
if prep:
dat = seg[]
try:
Sxx = frequency(dat, output=output, scaling=scaling,
sides=sides, taper=taper,
halfbandwidth=halfbandwidth, NW=NW,
duration=duration, overlap=overlap, step=step,
detrend=detrend, n_fft=n_fft,
log_trans=log_trans, centend=centend)
except ValueError:
msg = (
)
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle()
error_dialog.showMessage(msg)
progress.cancel()
return
all_Sxx.append(Sxx)
nSxx = ChanFreq()
nSxx.s_freq = Sxx.s_freq
nSxx.axis[] = Sxx.axis[]
nSxx.axis[] = Sxx.axis[]
nSxx.data = empty(1, dtype=)
nSxx.data[0] = empty((Sxx.number_of()[0],
Sxx.number_of()[0]), dtype=)
nSxx.data[0] = mean(
stack([x()[0] for x in all_Sxx], axis=2), axis=2)
lg.info(.join([, output, scaling, ,
str(sides), taper, , str(halfbandwidth), ,
str(NW), , str(duration), ,
str(overlap), , str(step), ,
str(detrend), , str(n_fft), ,
str(norm), , str(log_trans), ,
str(centend)]))
xfreq = []
for i, seg in enumerate(self.data):
new_seg = dict(seg)
data = seg[]
if prep:
data = seg[]
timeline = seg[].axis[][0]
new_seg[] = timeline[0]
new_seg[] = timeline[-1]
new_seg[] = len(timeline) / data.s_freq
try:
Sxx = frequency(data, output=output, scaling=scaling,
sides=sides, taper=taper,
halfbandwidth=halfbandwidth, NW=NW,
duration=duration, overlap=overlap, step=step,
detrend=detrend, n_fft=n_fft,
log_trans=log_trans, centend=centend)
except SyntaxError:
msg =
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle()
error_dialog.showMessage(msg)
progress.cancel()
return
if norm != :
for j, chan in enumerate(Sxx.axis[][0]):
dat = Sxx.data[0][j,:]
sf = Sxx.axis[][0]
f_res = sf[1] - sf[0]
if norm == :
norm_dat = sum(dat) * f_res
else:
norm_dat = nSxx(chan=chan)[0]
Sxx.data[0][j,:] = dat / norm_dat
new_seg[] = Sxx
xfreq.append(new_seg)
progress.setValue(i)
if progress.wasCanceled():
msg =
self.parent.statusBar().showMessage(msg)
return
progress.close()
return xfreq | Compute frequency domain analysis.
Returns
-------
list of dict
each item is a dict where 'data' is an instance of ChanFreq for a
single segment of signal, 'name' is the event type, if applicable,
'times' is a tuple of the start and end times in sec, 'duration' is
the actual duration of the segment, in seconds (can be dissociated
from 'times' if the signal was concatenated)
and with 'chan' (str), 'stage' (str) and 'cycle' (int) |
8,846 | def get_cgi_parameter_list(form: cgi.FieldStorage, key: str) -> List[str]:
return form.getlist(key) | Extracts a list of values, all with the same key, from a CGI form. |
8,847 | def self_register_user(self, user_name, account_id, user_terms_of_use, pseudonym_unique_id, communication_channel_address=None, communication_channel_type=None, user_birthdate=None, user_locale=None, user_short_name=None, user_sortable_name=None, user_time_zone=None):
path = {}
data = {}
params = {}
path["account_id"] = account_id
data["user[name]"] = user_name
if user_short_name is not None:
data["user[short_name]"] = user_short_name
if user_sortable_name is not None:
data["user[sortable_name]"] = user_sortable_name
if user_time_zone is not None:
data["user[time_zone]"] = user_time_zone
if user_locale is not None:
data["user[locale]"] = user_locale
if user_birthdate is not None:
data["user[birthdate]"] = user_birthdate
data["user[terms_of_use]"] = user_terms_of_use
data["pseudonym[unique_id]"] = pseudonym_unique_id
if communication_channel_type is not None:
data["communication_channel[type]"] = communication_channel_type
if communication_channel_address is not None:
data["communication_channel[address]"] = communication_channel_address
self.logger.debug("POST /api/v1/accounts/{account_id}/self_registration with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/self_registration".format(**path), data=data, params=params, single_item=True) | Self register a user.
Self register and return a new user and pseudonym for an account.
If self-registration is enabled on the account, you can use this
endpoint to self register new users. |
8,848 | def send(self, to, message):
super(ProtobufProcess, self).send(to, message.DESCRIPTOR.full_name, message.SerializeToString()) | Send a message to another process.
Same as ``Process.send`` except that ``message`` is a protocol buffer.
Returns immediately.
:param to: The pid of the process to send a message.
:type to: :class:`PID`
:param message: The message to send
:type method: A protocol buffer instance.
:raises: Will raise a ``Process.UnboundProcess`` exception if the
process is not bound to a context.
:return: Nothing |
8,849 | def italic(s, *, escape=True):
r
if escape:
s = escape_latex(s)
return NoEscape(r + s + ) | r"""Make a string appear italicized in LaTeX formatting.
italic() wraps a given string in the LaTeX command \textit{}.
Args
----
s : str
The string to be formatted.
escape: bool
If true the italic text will be escaped
Returns
-------
NoEscape
The formatted string.
Examples
--------
>>> italic("hello")
'\\textit{hello}'
>>> print(italic("hello"))
\textit{hello} |
8,850 | def get_datetime_type(to_string):
from datetime import datetime
def datetime_type(string):
accepted_date_formats = [, ,
, ]
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type | Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 |
8,851 | def container_stop(name, timeout=30, force=True, remote_addr=None,
cert=None, key=None, verify_cert=True):
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
container.stop(timeout, force, wait=True)
return _pylxd_model_to_dict(container) | Stop a container
name :
Name of the container to stop
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates. |
8,852 | def unflatten_list(flat_dict, separator=):
_unflatten_asserts(flat_dict, separator)
unflattened_dict = unflatten(flat_dict, separator)
def _convert_dict_to_list(object_, parent_object, parent_object_key):
if isinstance(object_, dict):
for key in object_:
if isinstance(object_[key], dict):
_convert_dict_to_list(object_[key], object_, key)
try:
keys = [int(key) for key in object_]
keys.sort()
except (ValueError, TypeError):
keys = []
keys_len = len(keys)
if (keys_len > 0 and sum(keys) ==
int(((keys_len - 1) * keys_len) / 2) and keys[0] == 0 and
keys[-1] == keys_len - 1 and
check_if_numbers_are_consecutive(keys)):
parent_object[parent_object_key] = []
for key_index, key in enumerate(keys):
parent_object[parent_object_key].append(object_[str(key)])
_convert_dict_to_list(parent_object[parent_object_key][-1],
parent_object[parent_object_key],
key_index)
_convert_dict_to_list(unflattened_dict, None, None)
return unflattened_dict | Unflattens a dictionary, first assuming no lists exist and then tries to
identify lists and replaces them
This is probably not very efficient and has not been tested extensively
Feel free to add test cases or rewrite the logic
Issues that stand out to me:
- Sorting all the keys in the dictionary, which specially for the root
dictionary can be a lot of keys
- Checking that numbers are consecutive is O(N) in number of keys
:param flat_dict: dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy |
8,853 | def get_win32_short_path_name(long_name):
import ctypes
from ctypes import wintypes
_GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW
_GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]
_GetShortPathNameW.restype = wintypes.DWORD
output_buf_size = 0
while True:
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
if output_buf_size >= needed:
short_name = output_buf.value
break
else:
output_buf_size = needed
return short_name | Gets the short path name of a given long path.
References:
http://stackoverflow.com/a/23598461/200291
http://stackoverflow.com/questions/23598289/get-win-short-fname-python
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> # build test data
>>> #long_name = unicode(normpath(ut.get_resource_dir()))
>>> long_name = unicode(r'C:/Program Files (x86)')
>>> #long_name = unicode(r'C:/Python27')
#unicode(normpath(ut.get_resource_dir()))
>>> # execute function
>>> result = get_win32_short_path_name(long_name)
>>> # verify results
>>> print(result)
C:/PROGRA~2 |
8,854 | def makeDependencyMap(aMap):
index = {}
for i in aMap.keys():
iNode = index.get(i,None)
if not iNode:
iNode = Node(i)
index[i] = iNode
for c in aMap[i]:
cNode = index.get(c,None)
if not cNode:
cNode = Node(c)
index[c] = cNode
iNode.addChild(cNode)
return index | create a dependency data structure as follows:
- Each key in aMap represents an item that depends on each item in the iterable which is that key's value
- Each Node represents an item which is a precursor to its parents and depends on its children
Returns a map whose keys are the items described in aMap and whose values are the dependency (sub)tree for that item
Thus, for aMap = {a:(b,c), b:(d,), c:[]},
returns {a:Node(a),b:Node(b),c:Node(c),d:Node(d)} where
- Node(a) has no parent and children: Node(b) and Node(c)
- Node(b) has parent: Node(a) and child: Node(d)
- Node(c) has parent: Node(a) and no child
- Node(d) which was not a key in aMap was created. It has parent: Node(b) and no child
This map is used to find the precursors for a given item by using BottomUpVisitor on the Node associated with that item |
8,855 | def ip_hide_as_path_holder_as_path_access_list_name(self, **kwargs):
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_as_path_holder = ET.SubElement(ip, "hide-as-path-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
as_path = ET.SubElement(hide_as_path_holder, "as-path")
access_list = ET.SubElement(as_path, "access-list")
seq_keyword_key = ET.SubElement(access_list, "seq-keyword")
seq_keyword_key.text = kwargs.pop()
instance_key = ET.SubElement(access_list, "instance")
instance_key.text = kwargs.pop()
name = ET.SubElement(access_list, "name")
name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
8,856 | def drain_K(self):
drain_K = minorloss.PIPE_ENTRANCE_K_MINOR + minorloss.PIPE_ENTRANCE_K_MINOR + minorloss.PIPE_EXIT_K_MINOR
return drain_K | Return the minor loss coefficient of the drain pipe.
:returns: Minor Loss Coefficient
:return: float |
8,857 | def dehtml(text):
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
class _DeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
text = data.strip()
if len(text) > 0:
text = re.sub(, , text)
self.__text.append(text + )
def handle_starttag(self, tag, attrs):
if tag == :
self.__text.append()
elif tag == :
self.__text.append()
elif tag == :
self.__text.append()
elif tag == :
self.__text.append()
def handle_endtag(self, tag):
if tag == :
self.__text.append()
if tag == :
self.__text.append()
def handle_startendtag(self, tag, attrs):
if tag == :
self.__text.append()
def text(self):
return .join(self.__text).strip()
try:
parser = _DeHTMLParser()
parser.feed(text)
parser.close()
return parser.text()
except Exception as e:
return text | Remove HTML tag in input text and format the texts
accordingly. |
8,858 | def stream(self,Tsec = 2,numChan = 1):
self.Tsec = Tsec
self.numChan = numChan
self.N_samples = int(self.fs*Tsec)
self.data_capture = []
self.data_capture_left = []
self.data_capture_right = []
self.capture_sample_count = 0
self.DSP_tic = []
self.DSP_toc = []
self.start_time = time.time()
self.stop_stream = False
stream = self.p.open(format=pyaudio.paInt16,
channels=numChan,
rate=self.fs,
input=True,
output=True,
input_device_index = self.in_idx,
output_device_index = self.out_idx,
frames_per_buffer = self.frame_length,
stream_callback=self.stream_callback)
stream.start_stream()
if(Tsec == 0):
while stream.is_active():
if self.stop_stream:
stream.stop_stream()
time.sleep(self.sleep_time)
else:
while stream.is_active():
if self.capture_sample_count >= self.N_samples:
stream.stop_stream()
if self.stop_stream:
stream.stop_stream()
time.sleep(self.sleep_time)
stream.stop_stream()
stream.close()
self.p.terminate()
self.stream_data = True
if(self.interactiveFG):
self.play.children[0].value =
else:
if(self.print_when_done == 1):
print() | Stream audio using callback
Parameters
----------
Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite
mode. When in infinite mode, Tsec.stop() can be used to stop the stream.
numChan : number of channels. Use 1 for mono and 2 for stereo. |
8,859 | def get_qcqp_form(prob):
if not prob.objective.args[0].is_quadratic():
raise Exception("Objective is not quadratic.")
if not all([constr._expr.is_quadratic() for constr in prob.constraints]):
raise Exception("Not all constraints are quadratic.")
if prob.is_dcp():
logging.warning("Problem is already convex; specifying solve method is unnecessary.")
extractor = QuadCoeffExtractor(*get_id_map(prob.variables()))
P0, q0, r0 = extractor.get_coeffs(prob.objective.args[0])
P0, q0, r0 = (P0[0]+P0[0].T)/2., q0.T.tocsc(), r0[0]
if prob.objective.NAME == "maximize":
P0, q0, r0 = -P0, -q0, -r0
f0 = QuadraticFunction(P0, q0, r0)
fs = []
for constr in prob.constraints:
sz = constr._expr.size[0]*constr._expr.size[1]
Pc, qc, rc = extractor.get_coeffs(constr._expr)
for i in range(sz):
fs.append(QuadraticFunction((Pc[i]+Pc[i].T)/2., qc[i, :].T.tocsc(), rc[i], constr.OP_NAME))
return QCQPForm(f0, fs) | Returns the problem metadata in QCQP class |
8,860 | def build_application_map(vertices_applications, placements, allocations,
core_resource=Cores):
application_map = defaultdict(lambda: defaultdict(set))
for vertex, application in iteritems(vertices_applications):
chip_cores = application_map[application][placements[vertex]]
core_slice = allocations[vertex].get(core_resource, slice(0, 0))
chip_cores.update(range(core_slice.start, core_slice.stop))
return application_map | Build a mapping from application to a list of cores where the
application is used.
This utility function assumes that each vertex is associated with a
specific application.
Parameters
----------
vertices_applications : {vertex: application, ...}
Applications are represented by the path of their APLX file.
placements : {vertex: (x, y), ...}
allocations : {vertex: {resource: slice, ...}, ...}
One of these resources should match the `core_resource` argument.
core_resource : object
The resource identifier which represents cores.
Returns
-------
{application: {(x, y) : set([c, ...]), ...}, ...}
For each application, for each used chip a set of core numbers onto
which the application should be loaded. |
8,861 | def _catalog_report_helper(catalog, catalog_validation, url, catalog_id,
catalog_org):
fields = OrderedDict()
fields["catalog_metadata_url"] = url
fields["catalog_federation_id"] = catalog_id
fields["catalog_federation_org"] = catalog_org
fields["catalog_title"] = catalog.get("title")
fields["catalog_description"] = catalog.get("description")
fields["valid_catalog_metadata"] = (
1 if catalog_validation["status"] == "OK" else 0)
return fields | Toma un dict con la metadata de un catálogo, y devuelve un dict con
los valores que catalog_report() usa para reportar sobre él.
Args:
catalog (dict): Diccionario con la metadata de un catálogo.
validation (dict): Resultado, únicamente a nivel catálogo, de la
validación completa de `catalog`.
Returns:
dict: Diccionario con los campos a nivel catálogo que requiere
catalog_report(). |
8,862 | def reformat_python_docstrings(top_dirs: List[str],
correct_copyright_lines: List[str],
show_only: bool = True,
rewrite: bool = False,
process_only_filenum: int = None) -> None:
filenum = 0
for top_dir in top_dirs:
for dirpath, dirnames, filenames in walk(top_dir):
for filename in filenames:
fullname = join(dirpath, filename)
extension = splitext(filename)[1]
if extension != PYTHON_EXTENSION:
continue
filenum += 1
if process_only_filenum and filenum != process_only_filenum:
continue
log.info("Processing file {}: {}", filenum, fullname)
proc = PythonProcessor(
full_path=fullname,
top_dir=top_dir,
correct_copyright_lines=correct_copyright_lines)
if show_only:
proc.show()
elif rewrite:
proc.rewrite_file() | Walk a directory, finding Python files and rewriting them.
Args:
top_dirs: list of directories to descend into
correct_copyright_lines:
list of lines (without newlines) representing the copyright
docstring block, including the transition lines of equals
symbols
show_only: show results (to stdout) only; don't rewrite
rewrite: write the changes
process_only_filenum: only process this file number (1-based index);
for debugging only |
8,863 | def scale(self, sx, sy=None):
if sy is None:
sy = sx
cairo.cairo_scale(self._pointer, sx, sy)
self._check_status() | Modifies the current transformation matrix (CTM)
by scaling the X and Y user-space axes
by :obj:`sx` and :obj:`sy` respectively.
The scaling of the axes takes place after
any existing transformation of user space.
If :obj:`sy` is omitted, it is the same as :obj:`sx`
so that scaling preserves aspect ratios.
:param sx: Scale factor in the X direction.
:param sy: Scale factor in the Y direction.
:type sx: float
:type sy: float |
8,864 | def list_roles(self, principal_name, principal_type):
self.send_list_roles(principal_name, principal_type)
return self.recv_list_roles() | Parameters:
- principal_name
- principal_type |
8,865 | def item_dict(self, item):
ret_dict = {"terms":{"category":[],"post_tag":[]}}
for e in item:
if "category" in e.tag:
slug = e.attrib["nicename"]
name = htmlparser.unescape(e.text)
cat_dict = self.category_dict.get(slug) or {"slug":slug,
"name":name,
"taxonomy":"category"}
ret_dict[][].append(cat_dict)
elif e.tag[-3:] == :
slug = e.attrib.get("tag_slug")
name = htmlparser.unescape(e.text)
tag_dict = self.tags_dict.get(slug) or {"slug":slug,
"name":name,
"taxonomy":"post_tag"}
ret_dict[][].append(tag_dict)
else:
ret_dict[e.tag] = e.text
empty_keys = [k for k,v in ret_dict["terms"].items() if not v]
for k in empty_keys:
ret_dict["terms"].pop(k)
return ret_dict | create a default dict of values, including
category and tag lookup |
8,866 | async def __handle_ping(self, _ : Ping):
self.__last_ping = time.time()
await ZMQUtils.send(self.__backend_socket, Pong()) | Handle a Ping message. Pong the backend |
8,867 | def compute_ranks(x):
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks | Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns ranks in
[1, len(x)]. |
8,868 | def OpenFile(filename, binary=False, newline=None, encoding=None):
\n\r\r\nio.opens encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:returns file:
The open file, it must be closed by the caller
@raise: FileNotFoundError
When the given filename cannot be found
.. seealso:: FTP LIMITATIONS at this module
from six.moves.urllib.parse import urlparse
filename_url = urlparse(filename)
if _UrlIsLocal(filename_url):
if not os.path.isfile(filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(filename)
mode = if binary else
return io.open(filename, mode, encoding=encoding, newline=newline)
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme) | Open a file and returns it.
Consider the possibility of a remote file (HTTP, HTTPS, FTP)
:param unicode filename:
Local or remote filename.
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
If set ignores univeral_newlines parameter.
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:returns file:
The open file, it must be closed by the caller
@raise: FileNotFoundError
When the given filename cannot be found
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information |
8,869 | def match(self, name):
if self.method == Ex.Method.PREFIX:
return name.startswith(self.value)
elif self.method == Ex.Method.SUFFIX:
return name.endswith(self.value)
elif self.method == Ex.Method.CONTAINS:
return self.value in name
elif self.method == Ex.Method.EXACT:
return self.value == name
elif self.method == Ex.Method.REGEX:
return re.search(self.value, name)
return False | Check if given name matches.
Args:
name (str): name to check.
Returns:
bool: matches name. |
8,870 | def rpc_method(func=None, name=None, entry_point=ALL, protocol=ALL,
str_standardization=settings.MODERNRPC_PY2_STR_TYPE,
str_standardization_encoding=settings.MODERNRPC_PY2_STR_ENCODING):
def decorated(_func):
_func.modernrpc_enabled = True
_func.modernrpc_name = name or _func.__name__
_func.modernrpc_entry_point = entry_point
_func.modernrpc_protocol = protocol
_func.str_standardization = str_standardization
_func.str_standardization_encoding = str_standardization_encoding
return _func
if func is None:
return decorated
return decorated(func) | Mark a standard python function as RPC method.
All arguments are optional
:param func: A standard function
:param name: Used as RPC method name instead of original function name
:param entry_point: Default: ALL. Used to limit usage of the RPC method for a specific set of entry points
:param protocol: Default: ALL. Used to limit usage of the RPC method for a specific protocol (JSONRPC or XMLRPC)
:param str_standardization: Default: settings.MODERNRPC_PY2_STR_TYPE. Configure string standardization on python 2.
Ignored on python 3.
:param str_standardization_encoding: Default: settings.MODERNRPC_PY2_STR_ENCODING. Configure the encoding used
to perform string standardization conversion. Ignored on python 3.
:type name: str
:type entry_point: str
:type protocol: str
:type str_standardization: type str or unicode
:type str_standardization_encoding: str |
8,871 | def update(self, ng_template_id, name=NotUpdated, plugin_name=NotUpdated,
hadoop_version=NotUpdated, flavor_id=NotUpdated,
description=NotUpdated, volumes_per_node=NotUpdated,
volumes_size=NotUpdated, node_processes=NotUpdated,
node_configs=NotUpdated, floating_ip_pool=NotUpdated,
security_groups=NotUpdated, auto_security_group=NotUpdated,
availability_zone=NotUpdated,
volumes_availability_zone=NotUpdated, volume_type=NotUpdated,
image_id=NotUpdated, is_proxy_gateway=NotUpdated,
volume_local_to_instance=NotUpdated, use_autoconfig=NotUpdated,
shares=NotUpdated, is_public=NotUpdated,
is_protected=NotUpdated, volume_mount_prefix=NotUpdated):
data = {}
self._copy_if_updated(
data, name=name, plugin_name=plugin_name,
hadoop_version=hadoop_version, flavor_id=flavor_id,
description=description, volumes_per_node=volumes_per_node,
volumes_size=volumes_size, node_processes=node_processes,
node_configs=node_configs, floating_ip_pool=floating_ip_pool,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=volumes_availability_zone,
volume_type=volume_type, image_id=image_id,
is_proxy_gateway=is_proxy_gateway,
volume_local_to_instance=volume_local_to_instance,
use_autoconfig=use_autoconfig, shares=shares,
is_public=is_public, is_protected=is_protected,
volume_mount_prefix=volume_mount_prefix
)
return self._update( % ng_template_id, data,
) | Update a Node Group Template. |
8,872 | def get_ftr(self):
if not self.ftr:
return self.ftr
width = self.size()[0]
return re.sub(
"%time", "%s\n" % time.strftime("%H:%M:%S"), self.ftr).rjust(width) | Process footer and return the processed string |
8,873 | def load_toml_path_config(filename):
if not os.path.exists(filename):
LOGGER.info(
"Skipping path loading from non-existent config file: %s",
filename)
return PathConfig()
LOGGER.info("Loading path information from config: %s", filename)
try:
with open(filename) as fd:
raw_config = fd.read()
except IOError as e:
raise LocalConfigurationError(
"Unable to load path configuration file: {}".format(str(e)))
toml_config = toml.loads(raw_config)
invalid_keys = set(toml_config.keys()).difference(
[, , , ])
if invalid_keys:
raise LocalConfigurationError("Invalid keys in path config: {}".format(
", ".join(sorted(list(invalid_keys)))))
config = PathConfig(
config_dir=None,
data_dir=toml_config.get(, None),
key_dir=toml_config.get(, None),
log_dir=toml_config.get(, None),
policy_dir=toml_config.get(, None)
)
return config | Returns a PathConfig created by loading a TOML file from the
filesystem. |
8,874 | def put_item(TableName=None, Item=None, Expected=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, ConditionalOperator=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):
pass | Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values.
In addition to putting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.
When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.
For more information about PutItem , see Working with Items in the Amazon DynamoDB Developer Guide .
See also: AWS API Documentation
Examples
This example adds a new item to the Music table.
Expected Output:
:example: response = client.put_item(
TableName='string',
Item={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
Expected={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Exists': True|False,
'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',
'AttributeValueList': [
{
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
]
}
},
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
ConditionalOperator='AND'|'OR',
ConditionExpression='string',
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
}
)
:type TableName: string
:param TableName: [REQUIRED]
The name of the table to contain the item.
:type Item: dict
:param Item: [REQUIRED]
A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.
For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer Guide .
Each element in the Item map is an AttributeValue object.
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type Expected: dict
:param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:
Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.
Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.
Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.
Value (dict) --Represents the data for the expected attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:
If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException .
If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.
DynamoDB returns a ValidationException if:
Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)
Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)
ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.
The following comparison operators are available:
EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
The following are descriptions of each comparison operator.
EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.
Note
This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.
NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.
Note
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.
CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).
IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.
BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}
AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.
For type Number, value comparisons are numeric.
String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type ReturnValues: string
:param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem , the valid values are:
NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)
ALL_OLD - If PutItem overwrote an attribute name-value pair, then the content of the old item is returned.
Note
The ReturnValues parameter is used by several DynamoDB operations; however, PutItem does not recognize any values other than NONE or ALL_OLD .
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:type ReturnItemCollectionMetrics: string
:param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.
:type ConditionalOperator: string
:param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .
:type ConditionExpression: string
:param ConditionExpression: A condition that must be satisfied in order for a conditional PutItem operation to succeed.
An expression can contain any of the following:
Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.
Comparison operators: = | | | | = | = | BETWEEN | IN
Logical operators: AND | OR | NOT
For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ExpressionAttributeValues: dict
:param ExpressionAttributeValues: One or more values that can be substituted in an expression.
Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
Available | Backordered | Discontinued
You would first need to specify ExpressionAttributeValues as follows:
{ ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }
You could then use these values in an expression, such as this:
ProductStatus IN (:avail, :back, :disc)
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:rtype: dict
:return: {
'Attributes': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
:returns:
(string) -- |
8,875 | def __getDependenciesRecursiveWithProvider(self,
available_components = None,
search_dirs = None,
target = None,
traverse_links = False,
update_installed = False,
provider = None,
test = False,
_processed = None
):
s similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
traverse_links:
False (default) or True: whether to recurse into linked
dependencies. You normally want to set this to "True" when
getting a list of dependencies, and False when installing
them (unless the user has explicitly asked dependencies to
be installed in linked components).
provider: None (default) or function:
provider(
dependency_spec,
available_components,
search_dirs,
working_directory,
update_if_installed
)
test:
True, False, : should test-only dependencies be
included (yes, no, or only at this level, not recursively)
do not recurse into failed componentt recurse into failed components
return False
if c.getName() in _processed:
logger.debug( % c)
return False
if c.installedLinked() and not traverse_links:
return False
return True
available_components = self.ensureOrderedDict(available_components)
if search_dirs is None:
search_dirs = []
if _processed is None:
_processed = set()
assert(test in [True, False, ])
search_dirs.append(self.modulesPath())
logger.debug( % (self.getName(), search_dirs))
if self.isTestDependency():
logger.debug("wonFailed to satisfy dependencies of %s:processed %s\nneed recursion: %s\navailable:%s\nsearch dirs:%stoplevelt perform this step in parallel, since the available
for c in need_recursion:
dep_components, dep_errors = c.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test,
_processed = _processed
)
available_components.update(dep_components)
components.update(dep_components)
errors += dep_errors
return (components, errors) | Get installed components using "provider" to find (and possibly
install) components.
This function is called with different provider functions in order
to retrieve a list of all of the dependencies, or install all
dependencies.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
traverse_links:
False (default) or True: whether to recurse into linked
dependencies. You normally want to set this to "True" when
getting a list of dependencies, and False when installing
them (unless the user has explicitly asked dependencies to
be installed in linked components).
provider: None (default) or function:
provider(
dependency_spec,
available_components,
search_dirs,
working_directory,
update_if_installed
)
test:
True, False, 'toplevel': should test-only dependencies be
included (yes, no, or only at this level, not recursively) |
8,876 | def view_matrix(self):
self._update_yaw_and_pitch()
return self._gl_look_at(self.position, self.position + self.dir, self._up) | :return: The current view matrix for the camera |
8,877 | def police_priority_map_exceed_map_pri7_exceed(self, **kwargs):
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop()
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri7_exceed = ET.SubElement(exceed, "map-pri7-exceed")
map_pri7_exceed.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
8,878 | def max(self, spec):
if not isinstance(spec, (list, tuple)):
raise TypeError("spec must be an instance of list or tuple")
self.__check_okay_to_chain()
self.__max = SON(spec)
return self | Adds `max` operator that specifies upper bound for specific index.
:Parameters:
- `spec`: a list of field, limit pairs specifying the exclusive
upper bound for all keys of a specific index in order.
.. versionadded:: 2.7 |
8,879 | def options(self, *args, **kwargs):
data = OrderedDict([(k, v.options(*args, **kwargs))
for k, v in self.data.items()])
return self.clone(data) | Applies simplified option definition returning a new object
Applies options defined in a flat format to the objects
returned by the DynamicMap. If the options are to be set
directly on the objects in the HoloMap a simple format may be
used, e.g.:
obj.options(cmap='viridis', show_title=False)
If the object is nested the options must be qualified using
a type[.group][.label] specification, e.g.:
obj.options('Image', cmap='viridis', show_title=False)
or using:
obj.options({'Image': dict(cmap='viridis', show_title=False)})
Args:
*args: Sets of options to apply to object
Supports a number of formats including lists of Options
objects, a type[.group][.label] followed by a set of
keyword options to apply and a dictionary indexed by
type[.group][.label] specs.
backend (optional): Backend to apply options to
Defaults to current selected backend
clone (bool, optional): Whether to clone object
Options can be applied inplace with clone=False
**kwargs: Keywords of options
Set of options to apply to the object
Returns:
Returns the cloned object with the options applied |
8,880 | def get_generator(self, field):
if isinstance(field, fields.AutoField):
return None
if self.is_inheritance_parent(field):
return None
if (
field.default is not fields.NOT_PROVIDED and
not self.overwrite_defaults and
field.name not in self.field_values):
return None
kwargs = {}
if field.name in self.field_values:
value = self.field_values[field.name]
if isinstance(value, generators.Generator):
return value
elif isinstance(value, AutoFixture):
return generators.InstanceGenerator(autofixture=value)
elif callable(value):
return generators.CallableGenerator(value=value)
return generators.StaticGenerator(value=value)
if field.null:
kwargs[] = self.none_p
if field.choices:
return generators.ChoicesGenerator(choices=field.choices, **kwargs)
if isinstance(field, related.ForeignKey):
is_self_fk = (get_remote_field_to(field)().__class__ == self.model)
if field.name in self.generate_fk and not is_self_fk:
return generators.InstanceGenerator(
autofixture.get(
get_remote_field_to(field),
follow_fk=self.follow_fk.get_deep_links(field.name),
generate_fk=self.generate_fk.get_deep_links(field.name)),
limit_choices_to=get_remote_field(field).limit_choices_to)
if field.name in self.follow_fk:
selected = generators.InstanceSelector(
get_remote_field_to(field),
limit_choices_to=get_remote_field(field).limit_choices_to)
if selected.get_value() is not None:
return selected
if field.blank or field.null:
return generators.NoneGenerator()
if is_self_fk and not field.null:
raise CreateInstanceError(
u % (
field.name,
% (
get_remote_field_to(field)._meta.app_label,
get_remote_field_to(field)._meta.object_name,
)
))
raise CreateInstanceError(
u
u % (
field.name,
% (
get_remote_field_to(field)._meta.app_label,
get_remote_field_to(field)._meta.object_name,
)
))
if isinstance(field, related.ManyToManyField):
if field.name in self.generate_m2m:
min_count, max_count = self.generate_m2m[field.name]
return generators.MultipleInstanceGenerator(
autofixture.get(get_remote_field_to(field)),
limit_choices_to=get_remote_field(field).limit_choices_to,
min_count=min_count,
max_count=max_count,
**kwargs)
if field.name in self.follow_m2m:
min_count, max_count = self.follow_m2m[field.name]
return generators.InstanceSelector(
get_remote_field_to(field),
limit_choices_to=get_remote_field(field).limit_choices_to,
min_count=min_count,
max_count=max_count,
**kwargs)
if field.blank or field.null:
return generators.StaticGenerator([])
raise CreateInstanceError(
u
u % (
% (
get_remote_field_to(field)._meta.app_label,
get_remote_field_to(field)._meta.object_name,
),
field.name,
))
if isinstance(field, fields.FilePathField):
return generators.FilePathGenerator(
path=field.path, match=field.match, recursive=field.recursive,
max_length=field.max_length, **kwargs)
if isinstance(field, fields.CharField):
if isinstance(field, fields.SlugField):
generator = generators.SlugGenerator
elif isinstance(field, fields.EmailField):
return generators.EmailGenerator(
max_length=min(field.max_length, 30))
elif isinstance(field, fields.URLField):
return generators.URLGenerator(
max_length=min(field.max_length, 25))
elif field.max_length > 15:
return generators.LoremSentenceGenerator(
common=False,
max_length=field.max_length)
else:
generator = generators.StringGenerator
return generator(max_length=field.max_length)
if isinstance(field, fields.DecimalField):
return generators.DecimalGenerator(
decimal_places=field.decimal_places,
max_digits=field.max_digits)
if hasattr(fields, ):
if isinstance(field, fields.BigIntegerField):
return generators.IntegerGenerator(
min_value=-field.MAX_BIGINT - 1,
max_value=field.MAX_BIGINT,
**kwargs)
if isinstance(field, ImageField):
return generators.ImageGenerator(storage=field.storage, **kwargs)
for field_class, generator in self.field_to_generator.items():
if isinstance(field, field_class):
return generator(**kwargs)
return None | Return a value generator based on the field instance that is passed to
this method. This function may return ``None`` which means that the
specified field will be ignored (e.g. if no matching generator was
found). |
8,881 | def _find_parenthesis(self, position, forward=True):
commas = depth = 0
document = self._text_edit.document()
char = document.characterAt(position)
while category(char) != and position > 0:
if char == and depth == 0:
commas += 1
elif char == :
if forward and depth == 0:
break
depth += 1
elif char == :
if not forward and depth == 0:
break
depth -= 1
position += 1 if forward else -1
char = document.characterAt(position)
else:
position = -1
return position, commas | If 'forward' is True (resp. False), proceed forwards
(resp. backwards) through the line that contains 'position' until an
unmatched closing (resp. opening) parenthesis is found. Returns a
tuple containing the position of this parenthesis (or -1 if it is
not found) and the number commas (at depth 0) found along the way. |
8,882 | def tangency_portfolio(cov_mat, exp_rets, allow_short=False):
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
if not isinstance(exp_rets, pd.Series):
raise ValueError("Expected returns is not a Series")
if not cov_mat.index.equals(exp_rets.index):
raise ValueError("Indices do not match")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
if not allow_short:
G = opt.matrix(np.vstack((-exp_rets.values,
-np.identity(n))))
h = opt.matrix(np.vstack((-1.0,
np.zeros((n, 1)))))
else:
G = opt.matrix(-exp_rets.values).T
h = opt.matrix(-1.0)
optsolvers.options[] = False
sol = optsolvers.qp(P, q, G, h)
if sol[] != :
warnings.warn("Convergence problem")
weights = pd.Series(sol[], index=cov_mat.index)
weights /= weights.sum()
return weights | Computes a tangency portfolio, i.e. a maximum Sharpe ratio portfolio.
Note: As the Sharpe ratio is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral tangency portfolios. This is because for
a positive initial Sharpe ratio the sharpe grows unbound
with increasing leverage.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
exp_rets: pandas.Series
Expected asset returns (often historical returns).
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights. |
8,883 | def import_from_string(self, text, title=None):
data = self.model.get_data()
if not hasattr(data, "keys"):
return
editor = ImportWizard(self, text, title=title,
contents_title=_("Clipboard contents"),
varname=fix_reference_name("data",
blacklist=list(data.keys())))
if editor.exec_():
var_name, clip_data = editor.get_data()
self.new_value(var_name, clip_data) | Import data from string |
8,884 | def mknts(self, add_dct):
nts = []
assert len(add_dct) == len(self.nts)
flds = list(next(iter(self.nts))._fields) + list(next(iter(add_dct)).keys())
ntobj = cx.namedtuple("ntgoea", " ".join(flds))
for dct_new, ntgoea in zip(add_dct, self.nts):
dct_curr = ntgoea._asdict()
for key, val in dct_new.items():
dct_curr[key] = val
nts.append(ntobj(**dct_curr))
return nts | Add information from add_dct to a new copy of namedtuples stored in nts. |
8,885 | def channels(self):
if self._channels is None:
self._channels = ChannelList(self._version, service_sid=self._solution[], )
return self._channels | Access the channels
:returns: twilio.rest.chat.v2.service.channel.ChannelList
:rtype: twilio.rest.chat.v2.service.channel.ChannelList |
8,886 | def _init_metadata(self):
self._min_integer_value = None
self._max_integer_value = None
self._integer_value_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: False,
: False,
: False,
: False,
: [None],
: ,
: self._min_integer_value,
: self._max_integer_value,
: []
} | stub |
8,887 | def match_serializers(self, serializers, default_media_type):
return self._match_serializers_by_query_arg(serializers) or self.\
_match_serializers_by_accept_headers(serializers,
default_media_type) | Choose serializer for a given request based on query arg or headers.
Checks if query arg `format` (by default) is present and tries to match
the serializer based on the arg value, by resolving the mimetype mapped
to the arg value.
Otherwise, chooses the serializer by retrieving the best quality
`Accept` headers and matching its value (mimetype).
:param serializers: Dictionary of serializers.
:param default_media_type: The default media type.
:returns: Best matching serializer based on `format` query arg first,
then client `Accept` headers or None if no matching serializer. |
8,888 | def binary_dilation(x, radius=3):
mask = disk(radius)
x = _binary_dilation(x, selem=mask)
return x | Return fast binary morphological dilation of an image.
see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image. |
8,889 | def read_namespaced_event(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespaced_event_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_event_with_http_info(name, namespace, **kwargs)
return data | read_namespaced_event # noqa: E501
read the specified Event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_event(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Event (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1Event
If the method is called asynchronously,
returns the request thread. |
8,890 | def destroy(self):
if self._running is False:
return
self._running = False
if hasattr(self, ):
del self.schedule
if hasattr(self, ) and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, ):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, ):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop() | Tear down the minion |
8,891 | def set_cache_complex_value(self, name, value):
for item in self.json_state.get():
if item.get() == name:
item[] = str(value) | Set a variable in the local complex state dictionary.
This does not change the physical device. Useful if you want the
device state to refect a new value which has not yet updated from
Vera. |
8,892 | def _join_json_files(cls, prefix, clear=False):
filetype_list = [, , ]
json_dict = {}
try:
for filetype in filetype_list:
fname = prefix + + filetype +
with open(fname, ) as f:
json_dict[filetype] = json.load(f)
if clear:
remove(fname)
logger.debug("Removed %s." % fname)
except IOError as e:
logger.error(
% prefix
)
logger.exception(e)
return None
return json_dict | Join different REACH output JSON files into a single JSON object.
The output of REACH is broken into three files that need to be joined
before processing. Specifically, there will be three files of the form:
`<prefix>.uaz.<subcategory>.json`.
Parameters
----------
prefix : str
The absolute path up to the extensions that reach will add.
clear : bool
Default False - if True, delete the files as soon as they are
loaded.
Returns
-------
json_obj : dict
The result of joining the files, keyed by the three subcategories. |
8,893 | def on_install(self, editor):
EditorExtension.on_install(self, editor)
self.setParent(editor)
self.setPalette(QApplication.instance().palette())
self.setFont(QApplication.instance().font())
self.editor.panels.refresh()
self._background_brush = QBrush(QColor(
self.palette().window().color()))
self._foreground_pen = QPen(QColor(
self.palette().windowText().color()))
if self.position == self.Position.FLOATING:
self.setAttribute(Qt.WA_TransparentForMouseEvents) | Extends :meth:`spyder.api.EditorExtension.on_install` method to set the
editor instance as the parent widget.
.. warning:: Don't forget to call **super** if you override this
method!
:param editor: editor instance
:type editor: spyder.plugins.editor.widgets.codeeditor.CodeEditor |
8,894 | def main(args):
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n)))
_logger.info("Script ends here") | Main entry point allowing external calls
Args:
args ([str]): command line parameter list |
8,895 | def facilities(self):
facilities = []
try:
list_items = self._ad_page_content.select("
except Exception as e:
if self._debug:
logging.error(
"Error getting facilities. Error message: " + e.args[0])
return
for li in list_items:
facilities.append(li.text)
return facilities | This method returns the properties facilities.
:return: |
8,896 | def fingerprint(P, obs1, obs2=None, p0=None, tau=1, k=None, ncv=None):
r
if obs2 is None:
obs2 = obs1
R, D, L = rdl_decomposition(P, k=k, ncv=ncv)
mu = L[0, :]
w = np.diagonal(D)
timescales = timescales_from_eigenvalues(w, tau)
if p0 is None:
amplitudes = np.dot(mu * obs1, R) * np.dot(L, obs2)
else:
amplitudes = np.dot(p0 * obs1, R) * np.dot(L, obs2)
return timescales, amplitudes | r"""Dynamical fingerprint for equilibrium or relaxation experiment
The dynamical fingerprint is given by the implied time-scale
spectrum together with the corresponding amplitudes.
Parameters
----------
P : (M, M) scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
p0 : (M,) ndarray (optional)
Initial distribution for a relaxation experiment
tau : int (optional)
Lag time of given transition matrix, for correct time-scales
k : int (optional)
Number of time-scales and amplitudes to compute
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
timescales : (N,) ndarray
Time-scales of the transition matrix
amplitudes : (N,) ndarray
Amplitudes for the given observable(s) |
8,897 | def from_db_value(self, value, expression, connection, context):
if value is None:
return value
return json_decode(value) | "Called in all circumstances when the data is loaded from the
database, including in aggregates and values() calls." |
8,898 | def get_system_config_dir():
from appdirs import site_config_dir
return site_config_dir(
appname=Config.APPNAME, appauthor=Config.APPAUTHOR
) | Returns system config location. E.g. /etc/dvc.conf.
Returns:
str: path to the system config directory. |
8,899 | def get_element_mass(self, element):
result = [0]
for compound in self.material.compounds:
c = self.get_compound_mass(compound)
f = [c * x for x in emf(compound, [element])]
result = [v+f[ix] for ix, v in enumerate(result)]
return result[0] | Determine the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.