code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def insertBefore(self, child: Node, ref_node: Node) -> Node:
"""Insert new child node before the reference child node.
If the reference node is not a child of this node, raise ValueError. If
this instance is connected to the node on browser, the child node is
also added to it.
"""
if self.connected:
self._insert_before_web(child, ref_node)
return self._insert_before(child, ref_node) | Insert new child node before the reference child node.
If the reference node is not a child of this node, raise ValueError. If
this instance is connected to the node on browser, the child node is
also added to it. |
def buglist(self, from_date=DEFAULT_DATETIME):
"""Get a summary of bugs in CSV format.
:param from_date: retrieve bugs that where updated from that date
"""
if not self.version:
self.version = self.__fetch_version()
if self.version in self.OLD_STYLE_VERSIONS:
order = 'Last+Changed'
else:
order = 'changeddate'
date = from_date.strftime("%Y-%m-%d %H:%M:%S")
params = {
self.PCHFIELD_FROM: date,
self.PCTYPE: self.CTYPE_CSV,
self.PLIMIT: self.max_bugs_csv,
self.PORDER: order
}
response = self.call(self.CGI_BUGLIST, params)
return response | Get a summary of bugs in CSV format.
:param from_date: retrieve bugs that where updated from that date |
def get_ports(self, id_or_uri, start=0, count=-1):
"""
Gets all interconnect ports.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
Returns:
list: All interconnect ports.
"""
uri = self._client.build_subresource_uri(resource_id_or_uri=id_or_uri, subresource_path="ports")
return self._client.get_all(start, count, uri=uri) | Gets all interconnect ports.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
Returns:
list: All interconnect ports. |
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write('\r|')
color_print('=' * bar_fill, 'blue', file=file, end='')
if bar_fill < self._bar_length:
color_print('>', 'green', file=file, end='')
write('-' * (self._bar_length - bar_fill - 1))
write('|')
if value >= self._total:
t = time.time() - self._start_time
prefix = ' '
elif value <= 0:
t = None
prefix = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = ' ETA '
write(' {0:>4s}/{1:>4s}'.format(
human_file_size(value),
self._human_total))
write(' ({0:>6s}%)'.format('{0:.2f}'.format(frac * 100.0)))
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush() | Update the progress bar to the given value (out of the total
given to the constructor). |
def contains_non_repeat_actions(self):
'''
Because repeating repeat actions can get ugly real fast
'''
for action in self.actions:
if not isinstance(action, (int, dynamic.RepeatCommand)):
return True
return False | Because repeating repeat actions can get ugly real fast |
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25666's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa
# | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: 'X' means it throws an exception during the conversion.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF) | Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+ |
def _cmp(self, other):
"""
Comparator function for two :class:`~pywbem.CIMParameter` objects.
The comparison is based on their public attributes, in descending
precedence:
* `name`
* `type`
* `reference_class`
* `is_array`
* `array_size`
* `qualifiers`
* `value`
* `embedded_object`
The comparison takes into account any case insensitivities described
for these attributes.
Raises `TypeError', if the `other` object is not a
:class:`~pywbem.CIMParameter` object.
"""
if self is other:
return 0
try:
assert isinstance(other, CIMParameter)
except AssertionError:
raise TypeError(
_format("other must be CIMParameter, but is: {0}",
type(other)))
return (cmpname(self.name, other.name) or
cmpitem(self.type, other.type) or
cmpname(self.reference_class, other.reference_class) or
cmpitem(self.is_array, other.is_array) or
cmpitem(self.array_size, other.array_size) or
cmpdict(self.qualifiers, other.qualifiers) or
cmpitem(self.value, other.value) or
cmpitem(self.embedded_object, other.embedded_object)) | Comparator function for two :class:`~pywbem.CIMParameter` objects.
The comparison is based on their public attributes, in descending
precedence:
* `name`
* `type`
* `reference_class`
* `is_array`
* `array_size`
* `qualifiers`
* `value`
* `embedded_object`
The comparison takes into account any case insensitivities described
for these attributes.
Raises `TypeError', if the `other` object is not a
:class:`~pywbem.CIMParameter` object. |
def zip(self, *items):
"""
Zip the collection together with one or more arrays.
:param items: The items to zip
:type items: list
:rtype: Collection
"""
return self.__class__(list(zip(self.items, *items))) | Zip the collection together with one or more arrays.
:param items: The items to zip
:type items: list
:rtype: Collection |
def funnel_rebuild(psg_trm_spec):
"""Rebuilds a model and compares it to a reference model.
Parameters
----------
psg_trm: (([float], float, int), AMPAL, specification)
A tuple containing the parameters, score and generation for a
model as well as a model of the best scoring parameters.
Returns
-------
energy_rmsd_gen: (float, float, int)
A triple containing the BUFF score, RMSD to the top model
and generation of a model generated during the minimisation.
"""
param_score_gen, top_result_model, specification = psg_trm_spec
params, score, gen = param_score_gen
model = specification(*params)
rmsd = top_result_model.rmsd(model)
return rmsd, score, gen | Rebuilds a model and compares it to a reference model.
Parameters
----------
psg_trm: (([float], float, int), AMPAL, specification)
A tuple containing the parameters, score and generation for a
model as well as a model of the best scoring parameters.
Returns
-------
energy_rmsd_gen: (float, float, int)
A triple containing the BUFF score, RMSD to the top model
and generation of a model generated during the minimisation. |
def most_recent_common_ancestor(self, *ts):
"""Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found.
"""
if len(ts) > 200:
res = self._large_mrca(ts)
else:
res = self._small_mrca(ts)
if res:
(res,), = res
else:
raise NoAncestor()
return res | Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found. |
def build_sdk_span(self, span):
""" Takes a BasicSpan and converts into an SDK type JsonSpan """
custom_data = CustomData(tags=span.tags,
logs=self.collect_logs(span))
sdk_data = SDKData(name=span.operation_name,
custom=custom_data,
Type=self.get_span_kind_as_string(span))
if "arguments" in span.tags:
sdk_data.arguments = span.tags["arguments"]
if "return" in span.tags:
sdk_data.Return = span.tags["return"]
data = Data(service=instana.singletons.agent.sensor.options.service_name, sdk=sdk_data)
entity_from = {'e': instana.singletons.agent.from_.pid,
'h': instana.singletons.agent.from_.agentUuid}
json_span = JsonSpan(
t=span.context.trace_id,
p=span.parent_id,
s=span.context.span_id,
ts=int(round(span.start_time * 1000)),
d=int(round(span.duration * 1000)),
k=self.get_span_kind_as_int(span),
n="sdk",
f=entity_from,
data=data)
error = span.tags.pop("error", False)
ec = span.tags.pop("ec", None)
if error and ec:
json_span.error = error
json_span.ec = ec
return json_span | Takes a BasicSpan and converts into an SDK type JsonSpan |
def _load_github_repo():
""" Loads the GitHub repository from the users config. """
if 'TRAVIS' in os.environ:
raise RuntimeError('Detected that we are running in Travis. '
'Stopping to prevent infinite loops.')
try:
with open(os.path.join(config_dir, 'repo'), 'r') as f:
return f.read()
except (OSError, IOError):
raise RuntimeError('Could not find your repository. '
'Have you ran `trytravis --repo`?') | Loads the GitHub repository from the users config. |
def get(self):
"""
Get a JSON-ready representation of this OpenTracking.
:returns: This OpenTracking, ready for use in a request body.
:rtype: dict
"""
open_tracking = {}
if self.enable is not None:
open_tracking["enable"] = self.enable
if self.substitution_tag is not None:
open_tracking["substitution_tag"] = self.substitution_tag.get()
return open_tracking | Get a JSON-ready representation of this OpenTracking.
:returns: This OpenTracking, ready for use in a request body.
:rtype: dict |
def handle(client, request):
"""
Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config
"""
formaters = request.get('formaters', None)
if not formaters:
formaters = [{'name': 'autopep8'}]
logging.debug('formaters: ' + json.dumps(formaters, indent=4))
data = request.get('data', None)
if not isinstance(data, str):
return send(client, 'invalid data', None)
max_line_length = None
for formater in formaters:
max_line_length = formater.get('config', {}).get('max_line_length')
if max_line_length:
break
for formater in formaters:
name = formater.get('name', None)
config = formater.get('config', {})
if name not in FORMATERS:
return send(client, 'formater {} not support'.format(name), None)
formater = FORMATERS[name]
if formater is None:
return send(client, 'formater {} not installed'.format(name), None)
if name == 'isort' and max_line_length:
config.setdefault('line_length', max_line_length)
data = formater(data, **config)
return send(client, None, data) | Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config |
def has_perm(self, user_obj, perm, obj=None):
"""
Check if user have permission of himself
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the object is the
user.
So users can change or delete themselves (you can change this behavior
to set ``any_permission``, ``change_permissino`` or
``delete_permission`` attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object).
"""
if not is_authenticated(user_obj):
return False
# construct the permission full name
change_permission = self.get_full_permission_string('change')
delete_permission = self.get_full_permission_string('delete')
# check if the user is authenticated
if obj is None:
# object permission without obj should return True
# Ref: https://code.djangoproject.com/wiki/RowLevelPermissions
if self.any_permission:
return True
if self.change_permission and perm == change_permission:
return True
if self.delete_permission and perm == delete_permission:
return True
return False
elif user_obj.is_active:
# check if the user trying to interact with himself
if obj == user_obj:
if self.any_permission:
# have any kind of permissions to himself
return True
if (self.change_permission and
perm == change_permission):
return True
if (self.delete_permission and
perm == delete_permission):
return True
return False | Check if user have permission of himself
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the object is the
user.
So users can change or delete themselves (you can change this behavior
to set ``any_permission``, ``change_permissino`` or
``delete_permission`` attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object). |
def get(zpool, prop=None, show_source=False, parsable=True):
'''
.. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool
'''
ret = OrderedDict()
value_properties = ['name', 'property', 'value', 'source']
## collect get output
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='get',
flags=['-H'],
property_name=prop if prop else 'all',
target=zpool,
),
python_shell=False,
)
if res['retcode'] != 0:
return __utils__['zfs.parse_command_result'](res)
# NOTE: command output for reference
# ========================================================================
# ...
# data mountpoint /data local
# data compression off default
# ...
# =========================================================================
# parse get output
for line in res['stdout'].splitlines():
# NOTE: transform data into dict
prop_data = OrderedDict(list(zip(
value_properties,
[x for x in line.strip().split('\t') if x not in ['']],
)))
# NOTE: older zfs does not have -o, fall back to manually stipping the name field
del prop_data['name']
# NOTE: normalize values
if parsable:
# NOTE: raw numbers and pythonic types
prop_data['value'] = __utils__['zfs.from_auto'](prop_data['property'], prop_data['value'])
else:
# NOTE: human readable zfs types
prop_data['value'] = __utils__['zfs.to_auto'](prop_data['property'], prop_data['value'])
# NOTE: show source if requested
if show_source:
ret[prop_data['property']] = prop_data
del ret[prop_data['property']]['property']
else:
ret[prop_data['property']] = prop_data['value']
return ret | .. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool |
def cluster_application_attempts(self, application_id):
"""
With the application attempts API, you can obtain a collection of
resources that represent an application attempt.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/cluster/apps/{appid}/appattempts'.format(
appid=application_id)
return self.request(path) | With the application attempts API, you can obtain a collection of
resources that represent an application attempt.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` |
def page(self, course, task, submission):
""" Get all data and display the page """
submission = self.submission_manager.get_input_from_submission(submission)
submission = self.submission_manager.get_feedback_from_submission(
submission,
show_everything=True,
translation=self.app._translations.get(self.user_manager.session_language(), gettext.NullTranslations())
)
to_display = {
problem.get_id(): {
"id": problem.get_id(),
"name": problem.get_name(self.user_manager.session_language()),
"defined": True
} for problem in task.get_problems()
}
to_display.update({
pid: {
"id": pid,
"name": pid,
"defined": False
} for pid in (set(submission["input"]) - set(to_display))
})
return self.template_helper.get_renderer().course_admin.submission(course, task, submission, to_display.values()) | Get all data and display the page |
def graph_loads(graph_json):
'''
Load graph
'''
layers = []
for layer in graph_json['layers']:
layer_info = Layer(layer['type'], layer['input'], layer['output'], layer['size'])
layer_info.is_delete = layer['is_delete']
layers.append(layer_info)
graph = Graph(graph_json['max_layer_num'], [], [], [])
graph.layers = layers
return graph | Load graph |
def _decompose_vectorized_indexer(indexer, shape, indexing_support):
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer = []
np_indexer = []
# convert negative indices
indexer = [np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)]
for k, s in zip(indexer, shape):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer))
np_indexer = VectorizedIndexer(tuple(np_indexer))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer, np_indexer | Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray. |
def log(self, text, level=logging.INFO):
"""
convenience wrapper for :func:`fileStore.logToMaster`
"""
self._fileStore.logToMaster(text, level) | convenience wrapper for :func:`fileStore.logToMaster` |
def desired_destination(self, network, edge):
"""Returns the agents next destination given their current
location on the network.
An ``Agent`` chooses one of the out edges at random. The
probability that the ``Agent`` will travel along a specific
edge is specified in the :class:`QueueNetwork's<.QueueNetwork>`
transition matrix.
Parameters
----------
network : :class:`.QueueNetwork`
The :class:`.QueueNetwork` where the Agent resides.
edge : tuple
A 4-tuple indicating which edge this agent is located at.
The first two slots indicate the current edge's source and
target vertices, while the third slot indicates this edges
``edge_index``. The last slot indicates the edge type of
that edge
Returns
-------
out : int
Returns an the edge index corresponding to the agents next
edge to visit in the network.
See Also
--------
:meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>`
method that returns the transition probabilities for each
edge in the graph.
"""
n = len(network.out_edges[edge[1]])
if n <= 1:
return network.out_edges[edge[1]][0]
u = uniform()
pr = network._route_probs[edge[1]]
k = _choice(pr, u, n)
# _choice returns an integer between 0 and n-1 where the
# probability of k being selected is equal to pr[k].
return network.out_edges[edge[1]][k] | Returns the agents next destination given their current
location on the network.
An ``Agent`` chooses one of the out edges at random. The
probability that the ``Agent`` will travel along a specific
edge is specified in the :class:`QueueNetwork's<.QueueNetwork>`
transition matrix.
Parameters
----------
network : :class:`.QueueNetwork`
The :class:`.QueueNetwork` where the Agent resides.
edge : tuple
A 4-tuple indicating which edge this agent is located at.
The first two slots indicate the current edge's source and
target vertices, while the third slot indicates this edges
``edge_index``. The last slot indicates the edge type of
that edge
Returns
-------
out : int
Returns an the edge index corresponding to the agents next
edge to visit in the network.
See Also
--------
:meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>`
method that returns the transition probabilities for each
edge in the graph. |
def com_google_fonts_check_family_equal_glyph_names(ttFonts):
"""Fonts have equal glyph names?"""
fonts = list(ttFonts)
all_glyphnames = set()
for ttFont in fonts:
all_glyphnames |= set(ttFont["glyf"].glyphs.keys())
missing = {}
available = {}
for glyphname in all_glyphnames:
missing[glyphname] = []
available[glyphname] = []
failed = False
for ttFont in fonts:
fontname = ttFont.reader.file.name
these_ones = set(ttFont["glyf"].glyphs.keys())
for glyphname in all_glyphnames:
if glyphname not in these_ones:
failed = True
missing[glyphname].append(fontname)
else:
available[glyphname].append(fontname)
for gn in missing.keys():
if missing[gn]:
available_styles = [style(k) for k in available[gn]]
missing_styles = [style(k) for k in missing[gn]]
if None not in available_styles + missing_styles:
# if possible, use stylenames in the log messages.
avail = ', '.join(available_styles)
miss = ', '.join(missing_styles)
else:
# otherwise, print filenames:
avail = ', '.join(available[gn])
miss = ', '.join(missing[gn])
yield FAIL, (f"Glyphname '{gn}' is defined on {avail}"
f" but is missing on {miss}.")
if not failed:
yield PASS, "All font files have identical glyph names." | Fonts have equal glyph names? |
def write_data(self, variable_id, value):
"""
write values to the device
"""
i = 0
j = 0
while i < 10:
try:
self.inst.query('*IDN?')
# logger.info("Visa-AFG1022-Write-variable_id : %s et value : %s" %(variable_id, value))
i = 12
j = 1
except:
self.connect()
time.sleep(1)
i += 1
logger.error("AFG1022 connect error i : %s" %i)
if j == 0:
logger.error("AFG1022-Instrument not connected")
return None
if variable_id == 'init_BODE':
# N
try:
N = int(RecordedData.objects.last_element(variable_id=Variable.objects.get(name='BODE_n').id).value())
except:
N = 0
logger.error('AFG1022 cannot load N')
if N == 0:
# Set N to 1
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='BODE_n').id, value=1, start=time.time())
cwt.save()
# ReCall init GBF
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Init_BODE_GBF').id, value=1, start=time.time())
cwt.save()
return None
elif N == 1:
self.inst.read_termination = '\n'
# Récup de Ve
Vepp = RecordedData.objects.last_element(variable_id=Variable.objects.get(name='BODE_Vpp').id).value()
# Fmin
Fmin = RecordedData.objects.last_element(variable_id=Variable.objects.get(name='BODE_Fmin').id).value()
# Call Range MM
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Set_AC_Range_and_Resolution_and_Measure_MM').id, value=Vepp, start=time.time())
cwt.save()
# Call Init Osc
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Init_BODE_Osc').id, value=1, start=time.time())
cwt.save()
# Reset GBF
CMD = str('*RST;OUTPut1:STATe ON;OUTP1:IMP MAX;SOUR1:AM:STAT OFF;SOUR1:FUNC:SHAP SIN;SOUR1:VOLT:LEV:IMM:AMPL '+str(Vepp)+'Vpp')
self.inst.write(CMD)
# self.inst.write('*CLS')
# Set F value
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='BODE_F').id, value=Fmin, start=time.time())
cwt.save()
# Call Set Freq GBF
# cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Set_Freq_GBF').id, value=Fmin, start=time.time())
# cwt.save()
self.write_data("set_freq", Fmin)
return True
else:
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Init_BODE_GBF').id, value=1, start=time.time())
cwt.save()
logger.info("Init GBF - N : %s" %N)
return False
return None
elif variable_id == 'set_freq':
# Define Freq
self.inst.write('SOUR1:FREQ:FIX '+str(value))
# Call Read MM
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Read_MM_acual_value').id, value=1, start=time.time())
cwt.save()
return self.parse_value(value)
elif variable_id == 'set_tension':
# Define tension
self.inst.write('SOUR1:VOLT:LEV:IMM:AMPL '+str(value)+'Vpp')
# F = Fmin
F = RecordedData.objects.last_element(variable_id=Variable.objects.get(name='BODE_Fmin').id).value()
# Set F value
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='BODE_F').id, value=F, start=time.time())
cwt.save()
# Call Set Freq GBF
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Set_Freq_GBF').id, value=F, start=time.time())
cwt.save()
return self.parse_value(value)
elif variable_id == 'return_value':
return self.parse_value(value)
elif variable_id == 'reboot':
import os
os.system('sudo reboot')
return 1
else:
return self.parse_value(self.inst.query(str(variable_id)+' '+str(value)))
return None | write values to the device |
def purge_db(self):
"""
Clear all matching our user_id.
"""
with self.engine.begin() as db:
purge_user(db, self.user_id) | Clear all matching our user_id. |
def get_projects(session, query):
"""
Get one or more projects
"""
# GET /api/projects/0.1/projects
response = make_get_request(session, 'projects', params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise ProjectsNotFoundException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | Get one or more projects |
def get_area_def(self, dsid):
"""Get the area definition of the dataset."""
geocoding = self.root.find('.//Tile_Geocoding')
epsg = geocoding.find('HORIZONTAL_CS_CODE').text
rows = int(geocoding.find('Size[@resolution="' + str(dsid.resolution) + '"]/NROWS').text)
cols = int(geocoding.find('Size[@resolution="' + str(dsid.resolution) + '"]/NCOLS').text)
geoposition = geocoding.find('Geoposition[@resolution="' + str(dsid.resolution) + '"]')
ulx = float(geoposition.find('ULX').text)
uly = float(geoposition.find('ULY').text)
xdim = float(geoposition.find('XDIM').text)
ydim = float(geoposition.find('YDIM').text)
area_extent = (ulx, uly + rows * ydim, ulx + cols * xdim, uly)
area = geometry.AreaDefinition(
self.tile,
"On-the-fly area",
self.tile,
{'init': epsg},
cols,
rows,
area_extent)
return area | Get the area definition of the dataset. |
def extant_item(arg, arg_type):
"""Determine if parser argument is an existing file or directory.
This technique comes from http://stackoverflow.com/a/11541450/95592
and from http://stackoverflow.com/a/11541495/95592
Args:
arg: parser argument containing filename to be checked
arg_type: string of either "file" or "directory"
Returns:
If the file exists, return the filename or directory.
Raises:
If the file does not exist, raise a parser error.
"""
if arg_type == "file":
if not os.path.isfile(arg):
raise argparse.ArgumentError(
None,
"The file {arg} does not exist.".format(arg=arg))
else:
# File exists so return the filename
return arg
elif arg_type == "directory":
if not os.path.isdir(arg):
raise argparse.ArgumentError(
None,
"The directory {arg} does not exist.".format(arg=arg))
else:
# Directory exists so return the directory name
return arg | Determine if parser argument is an existing file or directory.
This technique comes from http://stackoverflow.com/a/11541450/95592
and from http://stackoverflow.com/a/11541495/95592
Args:
arg: parser argument containing filename to be checked
arg_type: string of either "file" or "directory"
Returns:
If the file exists, return the filename or directory.
Raises:
If the file does not exist, raise a parser error. |
def validate_slice_increment(dicoms):
"""
Validate that the distance between all slices is equal (or very close to)
:param dicoms: list of dicoms
"""
first_image_position = numpy.array(dicoms[0].ImagePositionPatient)
previous_image_position = numpy.array(dicoms[1].ImagePositionPatient)
increment = first_image_position - previous_image_position
for dicom_ in dicoms[2:]:
current_image_position = numpy.array(dicom_.ImagePositionPatient)
current_increment = previous_image_position - current_image_position
if not numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):
logger.warning('Slice increment not consistent through all slices')
logger.warning('---------------------------------------------------------')
logger.warning('%s %s' % (previous_image_position, increment))
logger.warning('%s %s' % (current_image_position, current_increment))
if 'InstanceNumber' in dicom_:
logger.warning('Instance Number: %s' % dicom_.InstanceNumber)
logger.warning('---------------------------------------------------------')
raise ConversionValidationError('SLICE_INCREMENT_INCONSISTENT')
previous_image_position = current_image_position | Validate that the distance between all slices is equal (or very close to)
:param dicoms: list of dicoms |
def hot(self, limit=None):
"""GETs hot links from this subreddit. Calls :meth:`narwal.Reddit.hot`.
:param limit: max number of links to return
"""
return self._reddit.hot(self.display_name, limit=limit) | GETs hot links from this subreddit. Calls :meth:`narwal.Reddit.hot`.
:param limit: max number of links to return |
def copy(self):
""" Copy the instance and make sure not to use a reference
"""
return self.__class__(
amount=self["amount"],
asset=self["asset"].copy(),
blockchain_instance=self.blockchain,
) | Copy the instance and make sure not to use a reference |
def score_group(group_name=None):
'''
Warning this is deprecated as of Compliance Checker v3.2!
Please do not using scoring groups and update your plugins
if necessary
'''
warnings.warn('Score_group is deprecated as of Compliance Checker v3.2.')
def _inner(func):
def _dec(s, ds):
ret_val = func(s, ds)
"""
if group_name != None and not isinstance(ret_val[0], tuple):
return tuple([(group_name, ret_val[0])] + list(ret_val[1:]))
"""
# multiple returns
if not isinstance(ret_val, list):
ret_val = [ret_val]
def dogroup(r):
cur_grouping = r.name
if isinstance(cur_grouping, tuple):
cur_grouping = list(cur_grouping)
elif not isinstance(cur_grouping, list):
cur_grouping = [cur_grouping]
cur_grouping.insert(0, group_name)
return Result(r.weight, r.value, tuple(cur_grouping), r.msgs)
ret_val = [fix_return_value(x, func.__name__, func, s) for x in
ret_val]
ret_val = list(map(dogroup, ret_val))
return ret_val
return wraps(func)(_dec)
return _inner | Warning this is deprecated as of Compliance Checker v3.2!
Please do not using scoring groups and update your plugins
if necessary |
def inst_matches(self, start, end, instr, target=None, include_beyond_target=False):
"""
Find all `instr` in the block from start to end.
`instr` is a Python opcode or a list of opcodes
If `instr` is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found.
"""
try:
None in instr
except:
instr = [instr]
first = self.offset2inst_index[start]
result = []
for inst in self.insts[first:]:
if inst.opcode in instr:
if target is None:
result.append(inst.offset)
else:
t = self.get_target(inst.offset)
if include_beyond_target and t >= target:
result.append(inst.offset)
elif t == target:
result.append(inst.offset)
pass
pass
pass
if inst.offset >= end:
break
pass
# FIXME: put in a test
# check = self.all_instr(start, end, instr, target, include_beyond_target)
# assert result == check
return result | Find all `instr` in the block from start to end.
`instr` is a Python opcode or a list of opcodes
If `instr` is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found. |
def fit_general(xy, uv):
""" Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
"""
# Set up products used for computing the fit
gxy = uv.astype(ndfloat128)
guv = xy.astype(ndfloat128)
Sx = gxy[:,0].sum()
Sy = gxy[:,1].sum()
Su = guv[:,0].sum()
Sv = guv[:,1].sum()
Sux = np.dot(guv[:,0], gxy[:,0])
Svx = np.dot(guv[:,1], gxy[:,0])
Suy = np.dot(guv[:,0], gxy[:,1])
Svy = np.dot(guv[:,1], gxy[:,1])
Sxx = np.dot(gxy[:,0], gxy[:,0])
Syy = np.dot(gxy[:,1], gxy[:,1])
Sxy = np.dot(gxy[:,0], gxy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
# The fit solutioN...
# where
# u = P0 + P1*x + P2*y
# v = Q0 + Q1*x + Q2*y
#
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError('Singular matrix.')
# Return the shift, rotation, and scale changes
result = build_fit(P, Q, 'general')
resids = xy - np.dot(uv, result['fit_matrix']) - result['offset']
result['rms'] = resids.std(axis=0)
result['resids'] = resids
result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return result | Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
================================= |
def even_even(self):
"""
Selects even-even nuclei from the table
"""
return self.select(lambda Z, N: not(Z % 2) and not(N % 2), name=self.name) | Selects even-even nuclei from the table |
def copy_path(self):
"""Return a copy of the current path.
:returns:
A list of ``(path_operation, coordinates)`` tuples
of a :ref:`PATH_OPERATION` string
and a tuple of floats coordinates
whose content depends on the operation type:
* :obj:`MOVE_TO <PATH_MOVE_TO>`: 1 point ``(x, y)``
* :obj:`LINE_TO <PATH_LINE_TO>`: 1 point ``(x, y)``
* :obj:`CURVE_TO <PATH_CURVE_TO>`: 3 points
``(x1, y1, x2, y2, x3, y3)``
* :obj:`CLOSE_PATH <PATH_CLOSE_PATH>` 0 points ``()`` (empty tuple)
"""
path = cairo.cairo_copy_path(self._pointer)
result = list(_iter_path(path))
cairo.cairo_path_destroy(path)
return result | Return a copy of the current path.
:returns:
A list of ``(path_operation, coordinates)`` tuples
of a :ref:`PATH_OPERATION` string
and a tuple of floats coordinates
whose content depends on the operation type:
* :obj:`MOVE_TO <PATH_MOVE_TO>`: 1 point ``(x, y)``
* :obj:`LINE_TO <PATH_LINE_TO>`: 1 point ``(x, y)``
* :obj:`CURVE_TO <PATH_CURVE_TO>`: 3 points
``(x1, y1, x2, y2, x3, y3)``
* :obj:`CLOSE_PATH <PATH_CLOSE_PATH>` 0 points ``()`` (empty tuple) |
def plot_latent(self, labels=None, which_indices=None,
resolution=60, legend=True,
plot_limits=None,
updates=False,
kern=None, marker='<>^vsd',
num_samples=1000, projection='2d',
predict_kwargs={},
scatter_kwargs=None, **imshow_kwargs):
"""
see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'.
"""
from ..plotting.gpy_plot.latent_plots import plot_latent
if "Yindex" not in predict_kwargs:
predict_kwargs['Yindex'] = 0
Yindex = predict_kwargs['Yindex']
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
return plot_latent(self, labels, which_indices, resolution, legend, plot_limits, updates, kern, marker, num_samples, projection, scatter_kwargs) | see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'. |
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names) | Mark the filter as being ordered if search has occurred. |
def _process_response(response, save_to):
"""
Given a response object, prepare it to be handed over to the external caller.
Preparation steps include:
* detect if the response has error status, and convert it to an appropriate exception;
* detect Content-Type, and based on that either parse the response as JSON or return as plain text.
"""
status_code = response.status_code
if status_code == 200 and save_to:
if save_to.startswith("~"): save_to = os.path.expanduser(save_to)
if os.path.isdir(save_to) or save_to.endswith(os.path.sep):
dirname = os.path.abspath(save_to)
filename = H2OConnection._find_file_name(response)
else:
dirname, filename = os.path.split(os.path.abspath(save_to))
fullname = os.path.join(dirname, filename)
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fullname, "wb") as f:
for chunk in response.iter_content(chunk_size=65536):
if chunk: # Empty chunks may occasionally happen
f.write(chunk)
except OSError as e:
raise H2OValueError("Cannot write to file %s: %s" % (fullname, e))
return fullname
content_type = response.headers.get("Content-Type", "")
if ";" in content_type: # Remove a ";charset=..." part
content_type = content_type[:content_type.index(";")]
# Auto-detect response type by its content-type. Decode JSON, all other responses pass as-is.
if content_type == "application/json":
try:
data = response.json(object_pairs_hook=H2OResponse)
except (JSONDecodeError, requests.exceptions.ContentDecodingError) as e:
raise H2OServerError("Malformed JSON from server (%s):\n%s" % (str(e), response.text))
else:
data = response.text
# Success (200 = "Ok", 201 = "Created", 202 = "Accepted", 204 = "No Content")
if status_code in {200, 201, 202, 204}:
return data
# Client errors (400 = "Bad Request", 404 = "Not Found", 412 = "Precondition Failed")
if status_code in {400, 404, 412} and isinstance(data, (H2OErrorV3, H2OModelBuilderErrorV3)):
raise H2OResponseError(data)
# Server errors (notably 500 = "Server Error")
# Note that it is possible to receive valid H2OErrorV3 object in this case, however it merely means the server
# did not provide the correct status code.
raise H2OServerError("HTTP %d %s:\n%r" % (status_code, response.reason, data)) | Given a response object, prepare it to be handed over to the external caller.
Preparation steps include:
* detect if the response has error status, and convert it to an appropriate exception;
* detect Content-Type, and based on that either parse the response as JSON or return as plain text. |
def save(self, *objs, condition=None, atomic=False):
"""Save one or more objects.
:param objs: objects to save.
:param condition: only perform each save if this condition holds.
:param bool atomic: only perform each save if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
"""
objs = set(objs)
validate_not_abstract(*objs)
for obj in objs:
self.session.save_item({
"TableName": self._compute_table_name(obj.__class__),
"Key": dump_key(self, obj),
**render(self, obj=obj, atomic=atomic, condition=condition, update=True)
})
object_saved.send(self, engine=self, obj=obj)
logger.info("successfully saved {} objects".format(len(objs))) | Save one or more objects.
:param objs: objects to save.
:param condition: only perform each save if this condition holds.
:param bool atomic: only perform each save if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. |
def _read_mode_tsopt(self, size, kind):
"""Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply
"""
temp = struct.unpack('>II', self._read_fileng(size))
data = dict(
kind=kind,
length=size,
val=temp[0],
ecr=temp[1],
)
return data | Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply |
def entry_detail(request, slug, template='djournal/entry_detail.html'):
'''Returns a response of an individual entry, for the given slug.'''
entry = get_object_or_404(Entry.public, slug=slug)
context = {
'entry': entry,
}
return render_to_response(
template,
context,
context_instance=RequestContext(request),
) | Returns a response of an individual entry, for the given slug. |
def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None):
"""
Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
evars = evars or {}
options = options or {}
data = {'template': template,
'email': ','.join(emails) if isinstance(emails, list) else emails,
'vars': _vars.copy(),
'evars': evars.copy(),
'options': options.copy()}
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data) | Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion |
def draft_context(cls):
"""Set the context to draft"""
previous_state = g.get('draft')
try:
g.draft = True
yield
finally:
g.draft = previous_state | Set the context to draft |
def checkedItems( self ):
"""
Returns the checked items for this combobox.
:return [<str>, ..]
"""
if not self.isCheckable():
return []
return [nativestring(self.itemText(i)) for i in self.checkedIndexes()] | Returns the checked items for this combobox.
:return [<str>, ..] |
def load_var_files(opt, p_obj=None):
"""Load variable files, merge, return contents"""
obj = {}
if p_obj:
obj = p_obj
for var_file in opt.extra_vars_file:
LOG.debug("loading vars from %s", var_file)
obj = merge_dicts(obj.copy(), load_var_file(var_file, obj))
return obj | Load variable files, merge, return contents |
def interpolate(values, color_map=None, dtype=np.uint8):
"""
Given a 1D list of values, return interpolated colors
for the range.
Parameters
---------------
values : (n, ) float
Values to be interpolated over
color_map : None, or str
Key to a colormap contained in:
matplotlib.pyplot.colormaps()
e.g: 'viridis'
Returns
-------------
interpolated : (n, 4) dtype
Interpolated RGBA colors
"""
# get a color interpolation function
if color_map is None:
cmap = linear_color_map
else:
from matplotlib.pyplot import get_cmap
cmap = get_cmap(color_map)
# make input always float
values = np.asanyarray(values, dtype=np.float64).ravel()
# scale values to 0.0 - 1.0 and get colors
colors = cmap((values - values.min()) / values.ptp())
# convert to 0-255 RGBA
rgba = to_rgba(colors, dtype=dtype)
return rgba | Given a 1D list of values, return interpolated colors
for the range.
Parameters
---------------
values : (n, ) float
Values to be interpolated over
color_map : None, or str
Key to a colormap contained in:
matplotlib.pyplot.colormaps()
e.g: 'viridis'
Returns
-------------
interpolated : (n, 4) dtype
Interpolated RGBA colors |
def received_message(self, msg):
"""
Handle receiving a message by checking whether it is in response
to a command or unsolicited, and dispatching it to the appropriate
object method.
"""
logger.debug("Received message: %s", msg)
if msg.is_binary:
raise ValueError("Binary messages not supported")
resps = json.loads(msg.data)
cmd_group = _get_cmds_id(*resps)
if cmd_group:
(cmds, promise) = self._cmd_groups[cmd_group]
promise.fulfill((cmds, resps))
else:
try:
self.received_unsolicited(resps)
except:
logger.exception("Error in unsolicited msg handler")
raise | Handle receiving a message by checking whether it is in response
to a command or unsolicited, and dispatching it to the appropriate
object method. |
def ge(self, other):
"""
Greater than or overlaps. Returns True if no part of this Interval
extends lower than other.
:raises ValueError: if either self or other is a null Interval
:param other: Interval or point
:return: True or False
:rtype: bool
"""
self._raise_if_null(other)
return self.begin >= getattr(other, 'begin', other) | Greater than or overlaps. Returns True if no part of this Interval
extends lower than other.
:raises ValueError: if either self or other is a null Interval
:param other: Interval or point
:return: True or False
:rtype: bool |
def shard_data(self, region):
"""
Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus
"""
url, query = LolStatusApiV3Urls.shard_data(region=region)
return self._raw_request(self.shard_data.__name__, region, url, query) | Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus |
def remove_tiers(self, tiers):
"""Remove multiple tiers, note that this is a lot faster then removing
them individually because of the delayed cleaning of timeslots.
:param list tiers: Names of the tier to remove.
:raises KeyError: If a tier is non existent.
"""
for a in tiers:
self.remove_tier(a, clean=False)
self.clean_time_slots() | Remove multiple tiers, note that this is a lot faster then removing
them individually because of the delayed cleaning of timeslots.
:param list tiers: Names of the tier to remove.
:raises KeyError: If a tier is non existent. |
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | Close all connections |
def _do_packet_out(self, datapath, data, in_port, actions):
"""send a packet."""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
out = parser.OFPPacketOut(
datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER,
data=data, in_port=in_port, actions=actions)
datapath.send_msg(out) | send a packet. |
def t384(args):
"""
%prog t384
Print out a table converting between 96 well to 384 well
"""
p = OptionParser(t384.__doc__)
opts, args = p.parse_args(args)
plate, splate = get_plate()
fw = sys.stdout
for i in plate:
for j, p in enumerate(i):
if j != 0:
fw.write('|')
fw.write(p)
fw.write('\n') | %prog t384
Print out a table converting between 96 well to 384 well |
def read_adjacency_matrix(file_path, separator):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
# Add edge
append_row(source_node)
append_col(target_node)
# Since this is an undirected network also add the reciprocal edge
append_row(target_node)
append_col(source_node)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
number_of_nodes = np.max(row) # I assume that there are no missing nodes at the end.
# Array count should start from 0.
row -= 1
col -= 1
# Form sparse adjacency matrix
adjacency_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
return adjacency_matrix | Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format. |
def reload_list(self):
'''Press R in home view to retrieve quiz list'''
self.leetcode.load()
if self.leetcode.quizzes and len(self.leetcode.quizzes) > 0:
self.home_view = self.make_listview(self.leetcode.quizzes)
self.view_stack = []
self.goto_view(self.home_view) | Press R in home view to retrieve quiz list |
def _salt_send_event(opaque, conn, data):
'''
Convenience function adding common data to the event and sending it
on the salt event bus.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param data: additional event data dict to send
'''
tag_prefix = opaque['prefix']
object_type = opaque['object']
event_type = opaque['event']
# Prepare the connection URI to fit in the tag
# qemu+ssh://user@host:1234/system -> qemu+ssh/user@host:1234/system
uri = urlparse(conn.getURI())
uri_tag = [uri.scheme]
if uri.netloc:
uri_tag.append(uri.netloc)
path = uri.path.strip('/')
if path:
uri_tag.append(path)
uri_str = "/".join(uri_tag)
# Append some common data
all_data = {
'uri': conn.getURI()
}
all_data.update(data)
tag = '/'.join((tag_prefix, uri_str, object_type, event_type))
# Actually send the event in salt
if __opts__.get('__role') == 'master':
salt.utils.event.get_master_event(
__opts__,
__opts__['sock_dir']).fire_event(all_data, tag)
else:
__salt__['event.send'](tag, all_data) | Convenience function adding common data to the event and sending it
on the salt event bus.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param data: additional event data dict to send |
def set_context_suffix(self, name, suffix):
"""Set a context's suffix.
This will be applied to all wrappers for the tools in this context. For
example, a tool called 'foo' would appear as 'foo<suffix>' in the
suite's bin path.
Args:
name (str): Name of the context to suffix.
suffix (str): Suffix to apply to tools.
"""
data = self._context(name)
data["suffix"] = suffix
self._flush_tools() | Set a context's suffix.
This will be applied to all wrappers for the tools in this context. For
example, a tool called 'foo' would appear as 'foo<suffix>' in the
suite's bin path.
Args:
name (str): Name of the context to suffix.
suffix (str): Suffix to apply to tools. |
def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
"""Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru
"""
# check cache first
cache_key = "{}:{}".format(site_code, course_url)
response = cache.get(cache_key)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
except SailthruClientError:
response = {}
if not response:
logger.error('Could not get course data from Sailthru on enroll/purchase event. '
'Calling Ecommerce Course API to get course info for enrollment confirmation email')
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get('SAILTHRU_CACHE_TTL_SECONDS'))
return response | Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru |
def convert_epoch_to_timestamp(cls, timestamp, tsformat):
"""Converts the given float representing UNIX-epochs into an actual timestamp.
:param float timestamp: Timestamp as UNIX-epochs.
:param string tsformat: Format of the given timestamp. This is used to convert the
timestamp from UNIX epochs. For valid examples take a look into the
:py:func:`time.strptime` documentation.
:return: Returns the timestamp as defined in format.
:rtype: string
"""
return time.strftime(tsformat, time.gmtime(timestamp)) | Converts the given float representing UNIX-epochs into an actual timestamp.
:param float timestamp: Timestamp as UNIX-epochs.
:param string tsformat: Format of the given timestamp. This is used to convert the
timestamp from UNIX epochs. For valid examples take a look into the
:py:func:`time.strptime` documentation.
:return: Returns the timestamp as defined in format.
:rtype: string |
def DbGetHostServersInfo(self, argin):
""" Get info about all servers running on specified host, name, mode and level
:param argin: Host name
:type: tango.DevString
:return: Server info for all servers running on specified host
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetHostServersInfo()")
argin = replace_wildcard(argin)
return self.db.get_host_servers_info(argin) | Get info about all servers running on specified host, name, mode and level
:param argin: Host name
:type: tango.DevString
:return: Server info for all servers running on specified host
:rtype: tango.DevVarStringArray |
def rename_file(db, user_id, old_api_path, new_api_path):
"""
Rename a file.
"""
# Overwriting existing files is disallowed.
if file_exists(db, user_id, new_api_path):
raise FileExists(new_api_path)
old_dir, old_name = split_api_filepath(old_api_path)
new_dir, new_name = split_api_filepath(new_api_path)
if old_dir != new_dir:
raise ValueError(
dedent(
"""
Can't rename object to new directory.
Old Path: {old_api_path}
New Path: {new_api_path}
""".format(
old_api_path=old_api_path,
new_api_path=new_api_path
)
)
)
db.execute(
files.update().where(
_file_where(user_id, old_api_path),
).values(
name=new_name,
created_at=func.now(),
)
) | Rename a file. |
def generate_hash(data, algorithm='chd_ph', hash_fns=(), chd_keys_per_bin=1,
chd_load_factor=None, fch_bits_per_key=None,
num_graph_vertices=None, brz_memory_size=8,
brz_temp_dir=None, brz_max_keys_per_bucket=128,
bdz_precomputed_rank=7, chd_avg_keys_per_bucket=4):
"""
Generates a new Minimal Perfect Hash (MPH)
Parameters
----------
data : list, array-like, file-like
The input that is used to generate the minimal perfect hash.
Be aware, in most cases the input is expected to be distinct, and
many of the algorithms benefit from the input being sorted.
algorithm : string, optional
{chd_ph (default), chd, bmz, bmz8, chm, brz, fch, bdz, bdz_ph}
The algorithm to use in generating MPH's, choice of:
chd / chd_ph - Compress Hash and Displace (default)
(http://cmph.sourceforge.net/chd.html)
- It is the fastest algorithm to build PHFs and MPHFs in linear
time.
- It generates the most compact PHFs and MPHFs we know of.
- It can generate PHFs with a load factor up to 99 %.
- It can be used to generate t-perfect hash functions. A
t-perfect hash function allows at most t collisions in a given
bin. It is a well-known fact that modern memories are
organized as blocks which constitute transfer unit. Example of
such blocks are cache lines for internal memory or sectors for
hard disks. Thus, it can be very useful for devices that
carry out I/O operations in blocks.
- It is a two level scheme. It uses a first level hash function
to split the key set in buckets of average size determined by
a parameter b in the range [1,32]. In the second level it uses
displacement values to resolve the collisions that have given
rise to the buckets.
- It can generate MPHFs that can be stored in approximately 2.07
bits per key.
- For a load factor equal to the maximum one that is achieved by
the BDZ algorithm (81 %), the resulting PHFs are stored in
approximately 1.40 bits per key.
bdz - BDZ / BPZ algorithm
(http://cmph.sourceforge.net/bdz.html)
- It is very simple and efficient. It outperforms all others
except CHD.
- It constructs both PHFs and MPHFs in linear time.
- The maximum load factor one can achieve for a PHF is 1/1.23.
- It is based on acyclic random 3-graphs. A 3-graph is a
generalization of a graph where each edge connects 3 vertices
instead of only 2.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs can be stored in only (2 + x)cn bits,
where c should be larger than or equal to 1.23 and x is a
constant larger than 0 (actually, x = 1/b and b is a parameter
that should be larger than 2). For c = 1.23 and b = 8, the
resulting functions are stored in approximately 2.6 bits per
key.
- For its maximum load factor (81 %), the resulting PHFs are
stored in approximately 1.95 bits per key.
bmz - Botelho, Menoti and Ziviani algorithm:
(http://cmph.sourceforge.net/bdz.html)
- Constructs MPHFs in linear time.
- It is based on cyclic random graphs. This makes it faster than
the CHM algorithm.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs are more compact than the ones generated by
the CHM algorithm and can be stored in 4cn bytes, where c is in
the range [0.93,1.15].
brz - BRZ algorithm:
(http://cmph.sourceforge.net/brz.html)
- A very fast external memory based algorithm for constructing
minimal perfect hash functions for sets in the order of
billions of keys.
- It works in linear time.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs can be stored using less than 8.0 bits per
key.
chm - Czech, Havas and Majewski algorithm:
(http://cmph.sourceforge.net/chm.html)
- Construct minimal MPHFs in linear time.
- It is based on acyclic random graphs
- The resulting MPHFs are order preserving.
- The resulting MPHFs are stored in 4cn bytes, where c is greater
than 2.
fch - Fox, Chen and Heath algorithm:
(http://cmph.sourceforge.net/chm.html)
- Construct minimal perfect hash functions that require less than
4 bits per key to be stored.
- The resulting MPHFs are very compact and very efficient at
evaluation time
- The algorithm is only efficient for small sets.
- It is used as internal algorithm in the BRZ algorithm to
efficiently solve larger problems and even so to generate MPHFs
that require approximately 4.1 bits per key to be stored. For
that, you just need to set the parameters -a to brz and -c to a
value larger than or equal to 2.6.
hash_fns : list {jenkins (default), count} optional
Internal hash functions to use inside MPH generation functions,
can be multiple fns as a list.
chd_keys_per_bin : int [1 to 128], optional
Set the number of keys per bin for a t-perfect hashing function. A
t-perfect hash function allows at most t collisions in a given bin.
This parameter applies only to the `chd` and `chd_ph` algorithms.
Its value should be an integer in the range [1, 128].
Default is 1
chd_load_factor : float, optional
The load factor used in the `chd_ph` algorithm
fch_bits_per_key : int, optional
The number of bits per key required in the FCH algorithm
num_graph_vertices : int, optional
The number of vertices in the graph for the algorithms BMZ and CHM
brz_memory_size : int (default 8), optional
Main memory availability (in MB) used in BRZ algorithm
Default is 8Mb
brz_temp_dir : string, optional
Temporary directory used in BRZ algorithm
brz_max_keys_per_bucket : int [64 to 175] (default 128), optional
Used to make the maximal number of keys in a bucket lower than 256.
In this case its value should be an integer in the range [64,175].
Default is 128.
bdz_precomputed_rank : int [3 to 10] (default 7), optional
For BDZ it is used to determine the size of some precomputed rank
information and its value should be an integer in the range [3,10].
Default is 7.
The larger is this value, the more compact are the resulting
functions and the slower are them at evaluation time.
chd_avg_keys_per_bucket : int [1 to 32] (default 4), optional
For CHD and CHD_PH it is used to set the average number of keys per
bucket and its value should be an integer in the range [1,32].
Default is 4.
The larger is this value, the slower is the construction of the
functions.
Returns
-------
MPH
A wrapper object that represents a minimal perfect hash in memory
Raises
------
ValueError
If arguments presented are incomplete, or incompatable
RuntimeError
If the MPH generation fails
"""
cfg = _cfg(algorithm, hash_fns, chd_keys_per_bin, chd_load_factor,
fch_bits_per_key, num_graph_vertices, brz_memory_size,
brz_temp_dir, brz_max_keys_per_bucket, bdz_precomputed_rank,
chd_avg_keys_per_bucket)
with create_adapter(_cmph, ffi, data) as source:
with _create_config(source, cfg) as config:
_mph = _cmph.cmph_new(config)
if not _mph:
raise RuntimeError("MPH generation failed")
return MPH(_mph) | Generates a new Minimal Perfect Hash (MPH)
Parameters
----------
data : list, array-like, file-like
The input that is used to generate the minimal perfect hash.
Be aware, in most cases the input is expected to be distinct, and
many of the algorithms benefit from the input being sorted.
algorithm : string, optional
{chd_ph (default), chd, bmz, bmz8, chm, brz, fch, bdz, bdz_ph}
The algorithm to use in generating MPH's, choice of:
chd / chd_ph - Compress Hash and Displace (default)
(http://cmph.sourceforge.net/chd.html)
- It is the fastest algorithm to build PHFs and MPHFs in linear
time.
- It generates the most compact PHFs and MPHFs we know of.
- It can generate PHFs with a load factor up to 99 %.
- It can be used to generate t-perfect hash functions. A
t-perfect hash function allows at most t collisions in a given
bin. It is a well-known fact that modern memories are
organized as blocks which constitute transfer unit. Example of
such blocks are cache lines for internal memory or sectors for
hard disks. Thus, it can be very useful for devices that
carry out I/O operations in blocks.
- It is a two level scheme. It uses a first level hash function
to split the key set in buckets of average size determined by
a parameter b in the range [1,32]. In the second level it uses
displacement values to resolve the collisions that have given
rise to the buckets.
- It can generate MPHFs that can be stored in approximately 2.07
bits per key.
- For a load factor equal to the maximum one that is achieved by
the BDZ algorithm (81 %), the resulting PHFs are stored in
approximately 1.40 bits per key.
bdz - BDZ / BPZ algorithm
(http://cmph.sourceforge.net/bdz.html)
- It is very simple and efficient. It outperforms all others
except CHD.
- It constructs both PHFs and MPHFs in linear time.
- The maximum load factor one can achieve for a PHF is 1/1.23.
- It is based on acyclic random 3-graphs. A 3-graph is a
generalization of a graph where each edge connects 3 vertices
instead of only 2.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs can be stored in only (2 + x)cn bits,
where c should be larger than or equal to 1.23 and x is a
constant larger than 0 (actually, x = 1/b and b is a parameter
that should be larger than 2). For c = 1.23 and b = 8, the
resulting functions are stored in approximately 2.6 bits per
key.
- For its maximum load factor (81 %), the resulting PHFs are
stored in approximately 1.95 bits per key.
bmz - Botelho, Menoti and Ziviani algorithm:
(http://cmph.sourceforge.net/bdz.html)
- Constructs MPHFs in linear time.
- It is based on cyclic random graphs. This makes it faster than
the CHM algorithm.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs are more compact than the ones generated by
the CHM algorithm and can be stored in 4cn bytes, where c is in
the range [0.93,1.15].
brz - BRZ algorithm:
(http://cmph.sourceforge.net/brz.html)
- A very fast external memory based algorithm for constructing
minimal perfect hash functions for sets in the order of
billions of keys.
- It works in linear time.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs can be stored using less than 8.0 bits per
key.
chm - Czech, Havas and Majewski algorithm:
(http://cmph.sourceforge.net/chm.html)
- Construct minimal MPHFs in linear time.
- It is based on acyclic random graphs
- The resulting MPHFs are order preserving.
- The resulting MPHFs are stored in 4cn bytes, where c is greater
than 2.
fch - Fox, Chen and Heath algorithm:
(http://cmph.sourceforge.net/chm.html)
- Construct minimal perfect hash functions that require less than
4 bits per key to be stored.
- The resulting MPHFs are very compact and very efficient at
evaluation time
- The algorithm is only efficient for small sets.
- It is used as internal algorithm in the BRZ algorithm to
efficiently solve larger problems and even so to generate MPHFs
that require approximately 4.1 bits per key to be stored. For
that, you just need to set the parameters -a to brz and -c to a
value larger than or equal to 2.6.
hash_fns : list {jenkins (default), count} optional
Internal hash functions to use inside MPH generation functions,
can be multiple fns as a list.
chd_keys_per_bin : int [1 to 128], optional
Set the number of keys per bin for a t-perfect hashing function. A
t-perfect hash function allows at most t collisions in a given bin.
This parameter applies only to the `chd` and `chd_ph` algorithms.
Its value should be an integer in the range [1, 128].
Default is 1
chd_load_factor : float, optional
The load factor used in the `chd_ph` algorithm
fch_bits_per_key : int, optional
The number of bits per key required in the FCH algorithm
num_graph_vertices : int, optional
The number of vertices in the graph for the algorithms BMZ and CHM
brz_memory_size : int (default 8), optional
Main memory availability (in MB) used in BRZ algorithm
Default is 8Mb
brz_temp_dir : string, optional
Temporary directory used in BRZ algorithm
brz_max_keys_per_bucket : int [64 to 175] (default 128), optional
Used to make the maximal number of keys in a bucket lower than 256.
In this case its value should be an integer in the range [64,175].
Default is 128.
bdz_precomputed_rank : int [3 to 10] (default 7), optional
For BDZ it is used to determine the size of some precomputed rank
information and its value should be an integer in the range [3,10].
Default is 7.
The larger is this value, the more compact are the resulting
functions and the slower are them at evaluation time.
chd_avg_keys_per_bucket : int [1 to 32] (default 4), optional
For CHD and CHD_PH it is used to set the average number of keys per
bucket and its value should be an integer in the range [1,32].
Default is 4.
The larger is this value, the slower is the construction of the
functions.
Returns
-------
MPH
A wrapper object that represents a minimal perfect hash in memory
Raises
------
ValueError
If arguments presented are incomplete, or incompatable
RuntimeError
If the MPH generation fails |
def save(self, dolist=0):
"""Return .par format string for this parameter
If dolist is set, returns fields as a list of strings. Default
is to return a single string appropriate for writing to a file.
"""
quoted = not dolist
array_size = 1
for d in self.shape:
array_size = d*array_size
ndim = len(self.shape)
fields = (7+2*ndim+len(self.value))*[""]
fields[0] = self.name
fields[1] = self.type
fields[2] = self.mode
fields[3] = str(ndim)
next = 4
for d in self.shape:
fields[next] = str(d); next += 1
fields[next] = '1'; next += 1
nvstart = 7+2*ndim
if self.choice is not None:
schoice = list(map(self.toString, self.choice))
schoice.insert(0,'')
schoice.append('')
fields[nvstart-3] = repr('|'.join(schoice))
elif self.min not in [None,INDEF]:
fields[nvstart-3] = self.toString(self.min,quoted=quoted)
# insert an escaped line break before min field
if quoted:
fields[nvstart-3] = '\\\n' + fields[nvstart-3]
if self.max not in [None,INDEF]:
fields[nvstart-2] = self.toString(self.max,quoted=quoted)
if self.prompt:
if quoted:
sprompt = repr(self.prompt)
else:
sprompt = self.prompt
# prompt can have embedded newlines (which are printed)
sprompt = sprompt.replace(r'\012', '\n')
sprompt = sprompt.replace(r'\n', '\n')
fields[nvstart-1] = sprompt
for i in range(len(self.value)):
fields[nvstart+i] = self.toString(self.value[i],quoted=quoted)
# insert an escaped line break before value fields
if dolist:
return fields
else:
fields[nvstart] = '\\\n' + fields[nvstart]
return ','.join(fields) | Return .par format string for this parameter
If dolist is set, returns fields as a list of strings. Default
is to return a single string appropriate for writing to a file. |
def vectorize_inhibit(audio: np.ndarray) -> np.ndarray:
"""
Returns an array of inputs generated from the
wake word audio that shouldn't cause an activation
"""
def samp(x):
return int(pr.sample_rate * x)
inputs = []
for offset in range(samp(inhibit_t), samp(inhibit_dist_t), samp(inhibit_hop_t)):
if len(audio) - offset < samp(pr.buffer_t / 2.):
break
inputs.append(vectorize(audio[:-offset]))
return np.array(inputs) if inputs else np.empty((0, pr.n_features, pr.feature_size)) | Returns an array of inputs generated from the
wake word audio that shouldn't cause an activation |
def get_definition(self, stmt: Statement,
sctx: SchemaContext) -> Tuple[Statement, SchemaContext]:
"""Find the statement defining a grouping or derived type.
Args:
stmt: YANG "uses" or "type" statement.
sctx: Schema context where the definition is used.
Returns:
A tuple consisting of the definition statement ('grouping' or
'typedef') and schema context of the definition.
Raises:
ValueError: If `stmt` is neither "uses" nor "type" statement.
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in the argument of `stmt`
is not declared.
DefinitionNotFound: If the corresponding definition is not found.
"""
if stmt.keyword == "uses":
kw = "grouping"
elif stmt.keyword == "type":
kw = "typedef"
else:
raise ValueError("not a 'uses' or 'type' statement")
loc, did = self.resolve_pname(stmt.argument, sctx.text_mid)
if did == sctx.text_mid:
dstmt = stmt.get_definition(loc, kw)
if dstmt:
return (dstmt, sctx)
else:
dstmt = self.modules[did].statement.find1(kw, loc)
if dstmt:
return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, did))
for sid in self.modules[did].submodules:
dstmt = self.modules[sid].statement.find1(kw, loc)
if dstmt:
return (
dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, sid))
raise DefinitionNotFound(kw, stmt.argument) | Find the statement defining a grouping or derived type.
Args:
stmt: YANG "uses" or "type" statement.
sctx: Schema context where the definition is used.
Returns:
A tuple consisting of the definition statement ('grouping' or
'typedef') and schema context of the definition.
Raises:
ValueError: If `stmt` is neither "uses" nor "type" statement.
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in the argument of `stmt`
is not declared.
DefinitionNotFound: If the corresponding definition is not found. |
def clear_file(self):
"""stub"""
if (self.get_file_metadata().is_read_only() or
self.get_file_metadata().is_required()):
raise NoAccess()
if 'assetId' in self.my_osid_object_form._my_map['fileId']:
rm = self.my_osid_object_form._get_provider_manager('REPOSITORY')
catalog_id_str = ''
if 'assignedBankIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedBankIds'][0]
elif 'assignedRepositoryIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedRepositoryIds'][0]
try:
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
except AttributeError:
# for update forms
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
aas.delete_asset(Id(self.my_osid_object_form._my_map['fileId']['assetId']))
self.my_osid_object_form._my_map['fileId'] = \
dict(self.get_file_metadata().get_default_object_values()[0]) | stub |
def availabledirs(self) -> Folder2Path:
"""Names and paths of the available working directories.
Available working directories are those beeing stored in the
base directory of the respective |FileManager| subclass.
Folders with names starting with an underscore are ignored
(use this for directories handling additional data files,
if you like). Zipped directories, which can be unpacked
on the fly, do also count as available directories:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename/folder1')
... os.makedirs('projectname/basename/folder2')
... open('projectname/basename/folder3.zip', 'w').close()
... os.makedirs('projectname/basename/_folder4')
... open('projectname/basename/folder5.tar', 'w').close()
... filemanager.availabledirs # doctest: +ELLIPSIS
Folder2Path(folder1=.../projectname/basename/folder1,
folder2=.../projectname/basename/folder2,
folder3=.../projectname/basename/folder3.zip)
"""
directories = Folder2Path()
for directory in os.listdir(self.basepath):
if not directory.startswith('_'):
path = os.path.join(self.basepath, directory)
if os.path.isdir(path):
directories.add(directory, path)
elif directory.endswith('.zip'):
directories.add(directory[:-4], path)
return directories | Names and paths of the available working directories.
Available working directories are those beeing stored in the
base directory of the respective |FileManager| subclass.
Folders with names starting with an underscore are ignored
(use this for directories handling additional data files,
if you like). Zipped directories, which can be unpacked
on the fly, do also count as available directories:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename/folder1')
... os.makedirs('projectname/basename/folder2')
... open('projectname/basename/folder3.zip', 'w').close()
... os.makedirs('projectname/basename/_folder4')
... open('projectname/basename/folder5.tar', 'w').close()
... filemanager.availabledirs # doctest: +ELLIPSIS
Folder2Path(folder1=.../projectname/basename/folder1,
folder2=.../projectname/basename/folder2,
folder3=.../projectname/basename/folder3.zip) |
def _extract_secrets_from_file(self, f, filename):
"""Extract secrets from a given file object.
:type f: File object
:type filename: string
"""
try:
log.info("Checking file: %s", filename)
for results, plugin in self._results_accumulator(filename):
results.update(plugin.analyze(f, filename))
f.seek(0)
except UnicodeDecodeError:
log.warning("%s failed to load.", filename) | Extract secrets from a given file object.
:type f: File object
:type filename: string |
def add_parent(self,node):
"""
Add a parent to this node. This node will not be executed until the
parent node has run sucessfully.
@param node: CondorDAGNode to add as a parent.
"""
if not isinstance(node, (CondorDAGNode,CondorDAGManNode) ):
raise CondorDAGNodeError, "Parent must be a CondorDAGNode or a CondorDAGManNode"
self.__parents.append( node ) | Add a parent to this node. This node will not be executed until the
parent node has run sucessfully.
@param node: CondorDAGNode to add as a parent. |
def get_all_children(self):
"""Return all children GO IDs."""
all_children = set()
for parent in self.children:
all_children.add(parent.item_id)
all_children |= parent.get_all_children()
return all_children | Return all children GO IDs. |
def is_valid_catalog(self, catalog=None):
"""Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False.
"""
catalog = catalog or self
return validation.is_valid_catalog(catalog, validator=self.validator) | Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False. |
def projects(self):
""" Returns a set of all projects in this list. """
result = set()
for todo in self._todos:
projects = todo.projects()
result = result.union(projects)
return result | Returns a set of all projects in this list. |
def get_instance_status(self):
"""Get the status the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance status operation.
"""
status_url = self._get_url('status_path')
res = self.rest_client.session.get(status_url)
_handle_http_errors(res)
return res.json() | Get the status the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance status operation. |
def _retry(self, context, backoff):
'''
A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None
'''
# If the context does not contain a count parameter, this request has not
# been retried yet. Add the count parameter to track the number of retries.
if not hasattr(context, 'count'):
context.count = 0
# Determine whether to retry, and if so increment the count, modify the
# request as desired, and return the backoff.
if self._should_retry(context):
backoff_interval = backoff(context)
context.count += 1
# If retry to secondary is enabled, attempt to change the host if the
# request allows it
if self.retry_to_secondary:
self._set_next_host_location(context)
# rewind the request body if it is a stream
if hasattr(context.request.body, 'read'):
# no position was saved, then retry would not work
if context.body_position is None:
return None
else:
try:
# attempt to rewind the body to the initial position
context.request.body.seek(context.body_position, SEEK_SET)
except UnsupportedOperation:
# if body is not seekable, then retry would not work
return None
return backoff_interval
return None | A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None |
def recent(self, with_catalog=True, with_date=True):
'''
List posts that recent edited.
'''
kwd = {
'pager': '',
'title': 'Recent posts.',
'with_catalog': with_catalog,
'with_date': with_date,
}
self.render('list/post_list.html',
kwd=kwd,
view=MPost.query_recent(num=20),
postrecs=MPost.query_recent(num=2),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG, ) | List posts that recent edited. |
def create(self, throw_on_exists=False):
"""
Creates a database defined by the current database object, if it
does not already exist and raises a CloudantException if the operation
fails. If the database already exists then this method call is a no-op.
:param bool throw_on_exists: Boolean flag dictating whether or
not to throw a CloudantDatabaseException when attempting to
create a database that already exists.
:returns: The database object
"""
if not throw_on_exists and self.exists():
return self
resp = self.r_session.put(self.database_url, params={
'partitioned': TYPE_CONVERTERS.get(bool)(self._partitioned)
})
if resp.status_code == 201 or resp.status_code == 202:
return self
raise CloudantDatabaseException(
resp.status_code, self.database_url, resp.text
) | Creates a database defined by the current database object, if it
does not already exist and raises a CloudantException if the operation
fails. If the database already exists then this method call is a no-op.
:param bool throw_on_exists: Boolean flag dictating whether or
not to throw a CloudantDatabaseException when attempting to
create a database that already exists.
:returns: The database object |
def before(method_name):
"""
Run the given method prior to the decorated view.
If you return anything besides ``None`` from the given method,
its return values will replace the arguments of the decorated
view.
If you return an instance of ``HttpResponse`` from the given method,
Respite will return it immediately without delegating the request to the
decorated view.
Example usage::
class ArticleViews(Views):
@before('_load')
def show(self, request, article):
return self._render(
request = request,
template = 'show',
context = {
'article': article
}
)
def _load(self, request, id):
try:
return request, Article.objects.get(id=id)
except Article.DoesNotExist:
return self._error(request, 404, message='The article could not be found.')
:param method: A string describing a class method.
"""
def decorator(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
returns = getattr(self, method_name)(*args, **kwargs)
if returns is None:
return function(self, *args, **kwargs)
else:
if isinstance(returns, HttpResponse):
return returns
else:
return function(self, *returns)
return wrapper
return decorator | Run the given method prior to the decorated view.
If you return anything besides ``None`` from the given method,
its return values will replace the arguments of the decorated
view.
If you return an instance of ``HttpResponse`` from the given method,
Respite will return it immediately without delegating the request to the
decorated view.
Example usage::
class ArticleViews(Views):
@before('_load')
def show(self, request, article):
return self._render(
request = request,
template = 'show',
context = {
'article': article
}
)
def _load(self, request, id):
try:
return request, Article.objects.get(id=id)
except Article.DoesNotExist:
return self._error(request, 404, message='The article could not be found.')
:param method: A string describing a class method. |
def to_report_json(self):
"""
Generate a summary in json format
"""
return self.reporter.json(self.n_lines, self.n_assocs, self.skipped) | Generate a summary in json format |
def execute(self, fn, *args, **kwargs):
"""Execute an operation and return the result."""
if self.in_executor_context():
corofn = asyncio.coroutine(lambda: fn(*args, **kwargs))
return corofn()
future = self.submit(fn, *args, **kwargs)
return future.result() | Execute an operation and return the result. |
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(imp, 'get_tag'):
return
base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc' | Generate file paths to be excluded for namespace packages (bytecode
cache files). |
def autoencoder_residual_text():
"""Residual autoencoder model for text."""
hparams = autoencoder_residual()
hparams.bottleneck_bits = 32
hparams.batch_size = 1024
hparams.hidden_size = 64
hparams.max_hidden_size = 512
hparams.bottleneck_noise = 0.0
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
hparams.autoregressive_mode = "none"
hparams.sample_width = 1
return hparams | Residual autoencoder model for text. |
def setDesigns(self, F, A):
""" set fixed effect designs """
F = to_list(F)
A = to_list(A)
assert len(A) == len(F), 'MeanKronSum: A and F must have same length!'
n_terms = len(F)
n_covs = 0
k = 0
l = 0
for ti in range(n_terms):
assert F[ti].shape[0] == self._N, 'MeanKronSum: Dimension mismatch'
assert A[ti].shape[1] == self._P, 'MeanKronSum: Dimension mismatch'
n_covs += F[ti].shape[1] * A[ti].shape[0]
k += F[ti].shape[1]
l += A[ti].shape[0]
self._n_terms = n_terms
self._n_covs = n_covs
self._k = k
self._l = l
self._F = F
self._A = A
self._b = sp.zeros((n_covs, 1))
self.clear_cache('predict_in_sample', 'Yres', 'designs')
self._notify('designs')
self._notify() | set fixed effect designs |
def create_mod_site(self, mc):
"""Create modification site for the BaseAgent from a ModCondition."""
site_name = get_mod_site_name(mc)
(unmod_site_state, mod_site_state) = states[mc.mod_type]
self.create_site(site_name, (unmod_site_state, mod_site_state))
site_anns = [Annotation((site_name, mod_site_state), mc.mod_type,
'is_modification')]
if mc.residue:
site_anns.append(Annotation(site_name, mc.residue, 'is_residue'))
if mc.position:
site_anns.append(Annotation(site_name, mc.position, 'is_position'))
self.site_annotations += site_anns | Create modification site for the BaseAgent from a ModCondition. |
def shortname(inputid): # type: (Text) -> Text
"""Returns the last segment of the provided fragment or path."""
parsed_id = urllib.parse.urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split(u"/")[-1]
return parsed_id.path.split(u"/")[-1] | Returns the last segment of the provided fragment or path. |
def maps_get_rules_output_rules_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
value = ET.SubElement(rules, "value")
value.text = kwargs.pop('value')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
"""Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
"""
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin) | Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set. |
def request_response(self):
"""Verify that a card is still present and get its operating mode.
The Request Response command returns the current operating
state of the card. The operating state changes with the
authentication process, a card is in Mode 0 after power-up or
a Polling command, transitions to Mode 1 with Authentication1,
to Mode 2 with Authentication2, and Mode 3 with any of the
card issuance commands. The :meth:`request_response` method
returns the mode as an integer.
Command execution errors raise
:exc:`~nfc.tag.TagCommandError`.
"""
a, b, e = self.pmm[3] & 7, self.pmm[3] >> 3 & 7, self.pmm[3] >> 6
timeout = 302E-6 * (b + 1 + a + 1) * 4**e
data = self.send_cmd_recv_rsp(0x04, '', timeout, check_status=False)
if len(data) != 1:
log.debug("insufficient data received from tag")
raise tt3.Type3TagCommandError(tt3.DATA_SIZE_ERROR)
return data[0] | Verify that a card is still present and get its operating mode.
The Request Response command returns the current operating
state of the card. The operating state changes with the
authentication process, a card is in Mode 0 after power-up or
a Polling command, transitions to Mode 1 with Authentication1,
to Mode 2 with Authentication2, and Mode 3 with any of the
card issuance commands. The :meth:`request_response` method
returns the mode as an integer.
Command execution errors raise
:exc:`~nfc.tag.TagCommandError`. |
def compute_batch(self, duplicate_manager=None, context_manager=None):
"""
Computes the elements of the batch sequentially by penalizing the acquisition.
"""
from ...acquisitions import AcquisitionLP
assert isinstance(self.acquisition, AcquisitionLP)
self.acquisition.update_batches(None,None,None)
# --- GET first element in the batch
X_batch = self.acquisition.optimize()[0]
k=1
if self.batch_size >1:
# ---------- Approximate the constants of the the method
L = estimate_L(self.acquisition.model.model,self.acquisition.space.get_bounds())
Min = self.acquisition.model.model.Y.min()
# --- GET the remaining elements
while k<self.batch_size:
self.acquisition.update_batches(X_batch,L,Min)
new_sample = self.acquisition.optimize()[0]
X_batch = np.vstack((X_batch,new_sample))
k +=1
# --- Back to the non-penalized acquisition
self.acquisition.update_batches(None,None,None)
return X_batch | Computes the elements of the batch sequentially by penalizing the acquisition. |
def filter_incomplete_spectra(self, flimit=1000, percAccept=85):
"""Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet!
"""
assert percAccept > 0 and percAccept < 100
def _retain_only_complete_spectra(item, fmax, acceptN):
"""Function called using pd.filter, applied to all spectra in the
data set. Return true if the number of data points <= **fmax** in
item is equal, or larger, than **acceptN**.
Parameters
----------
item : :py:class:`pandas.DataFrame`
dataframe containing one spectrum
fmax : float
maximum frequency up to which data points are counted
acceptN : int
the number of data points required to pass this test
Returns
-------
true : bool
if enough data points are present
false : bool
if not enough data points are present
"""
frequencies = item['frequency'].loc[item['frequency'] < fmax]
fN = frequencies.size
if fN >= acceptN:
return True
return False
group_abmn = self.data.groupby(['a', 'b', 'm', 'n'])
frequencies = np.array(
list(sorted(self.data.groupby('frequency').groups.keys()))
)
assert flimit >= frequencies.min() and flimit <= frequencies.max()
Nlimit = len(np.where(frequencies <= flimit)[0])
Naccept = np.ceil(Nlimit * percAccept / 100.0)
self.data = group_abmn.filter(
_retain_only_complete_spectra, fmax=flimit, acceptN=Naccept
).copy() | Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet! |
def ExportClientsByKeywords(keywords, filename, token=None):
r"""A script to export clients summaries selected by a keyword search.
This script does a client search for machines matching all of keywords and
writes a .csv summary of the results to filename. Multi-value fields are '\n'
separated.
Args:
keywords: a list of keywords to search for
filename: the name of the file to write to, will be replaced if already
present
token: datastore token.
"""
index = client_index.CreateClientIndex(token=token)
client_list = index.LookupClients(keywords)
logging.info("found %d clients", len(client_list))
if not client_list:
return
writer = csv.DictWriter([
u"client_id",
u"hostname",
u"last_seen",
u"os",
u"os_release",
u"os_version",
u"users",
u"ips",
u"macs",
])
writer.WriteHeader()
for client in aff4.FACTORY.MultiOpen(client_list, token=token):
s = client.Schema
writer.WriteRow({
u"client_id": client.urn.Basename(),
u"hostname": client.Get(s.HOSTNAME),
u"os": client.Get(s.SYSTEM),
u"os_release": client.Get(s.OS_RELEASE),
u"os_version": client.Get(s.OS_VERSION),
u"ips": client.Get(s.HOST_IPS),
u"macs": client.Get(s.MAC_ADDRESS),
u"users": "\n".join(client.Get(s.USERNAMES, [])),
u"last_seen": client.Get(s.PING),
})
with io.open(filename, "w") as csv_out:
csv_out.write(writer.Content()) | r"""A script to export clients summaries selected by a keyword search.
This script does a client search for machines matching all of keywords and
writes a .csv summary of the results to filename. Multi-value fields are '\n'
separated.
Args:
keywords: a list of keywords to search for
filename: the name of the file to write to, will be replaced if already
present
token: datastore token. |
def on_exception(wait_gen,
exception,
max_tries=None,
jitter=full_jitter,
giveup=lambda e: False,
on_success=None,
on_backoff=None,
on_giveup=None,
**wait_gen_kwargs):
"""Returns decorator for backoff and retry triggered by exception.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
exception: An exception type (or tuple of types) which triggers
backoff.
max_tries: The maximum number of attempts to make before giving
up. Once exhausted, the exception will be allowed to escape.
The default value of None means their is no limit to the
number of tries. If a callable is passed, it will be
evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
giveup: Function accepting an exception instance and
returning whether or not to give up. Optional. The default
is to always continue.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration.
"""
success_hdlrs = _handlers(on_success)
backoff_hdlrs = _handlers(on_backoff, _log_backoff)
giveup_hdlrs = _handlers(on_giveup, _log_giveup)
def decorate(target):
@functools.wraps(target)
def retry(*args, **kwargs):
# change names because python 2.x doesn't have nonlocal
max_tries_ = _maybe_call(max_tries)
# there are no dictionary comprehensions in python 2.6
wait = wait_gen(**dict((k, _maybe_call(v))
for k, v in wait_gen_kwargs.items()))
tries = 0
while True:
try:
tries += 1
ret = target(*args, **kwargs)
except exception as e:
if giveup(e) or tries == max_tries_:
for hdlr in giveup_hdlrs:
hdlr({'target': target,
'args': args,
'kwargs': kwargs,
'tries': tries})
raise
value = next(wait)
try:
if jitter is not None:
seconds = jitter(value)
else:
seconds = value
except TypeError:
# support deprecated nullary jitter function signature
# which returns a delta rather than a jittered value
seconds = value + jitter()
for hdlr in backoff_hdlrs:
hdlr({'target': target,
'args': args,
'kwargs': kwargs,
'tries': tries,
'wait': seconds})
time.sleep(seconds)
else:
for hdlr in success_hdlrs:
hdlr({'target': target,
'args': args,
'kwargs': kwargs,
'tries': tries})
return ret
return retry
# Return a function which decorates a target with a retry loop.
return decorate | Returns decorator for backoff and retry triggered by exception.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
exception: An exception type (or tuple of types) which triggers
backoff.
max_tries: The maximum number of attempts to make before giving
up. Once exhausted, the exception will be allowed to escape.
The default value of None means their is no limit to the
number of tries. If a callable is passed, it will be
evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
giveup: Function accepting an exception instance and
returning whether or not to give up. Optional. The default
is to always continue.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration. |
def randbetween(lower: int, upper: int) -> int:
"""Return a random int in the range [lower, upper].
Raises ValueError if any is lower than 0, and TypeError if any is not an
integer.
"""
if not isinstance(lower, int) or not isinstance(upper, int):
raise TypeError('lower and upper must be integers')
if lower < 0 or upper <= 0:
raise ValueError('lower and upper must be greater than zero')
return randbelow(upper - lower + 1) + lower | Return a random int in the range [lower, upper].
Raises ValueError if any is lower than 0, and TypeError if any is not an
integer. |
def read_frames(self, nframes, dtype=np.float64):
"""Read nframes frames of the file.
:Parameters:
nframes : int
number of frames to read.
dtype : numpy dtype
dtype of the returned array containing read data (see note).
Notes
-----
- read_frames updates the read pointer.
- One column is one channel (one row per channel after 0.9)
- if float are requested when the file contains integer data, you will
get normalized data (that is the max possible integer will be 1.0,
and the minimal possible value -1.0).
- if integers are requested when the file contains floating point data,
it may give wrong results because there is an ambiguity: if the
floating data are normalized, you can get a file with only 0 !
Getting integer data from files encoded in normalized floating point
is not supported (yet: sndfile supports it)."""
return self._sndfile.read_frames(nframes, dtype) | Read nframes frames of the file.
:Parameters:
nframes : int
number of frames to read.
dtype : numpy dtype
dtype of the returned array containing read data (see note).
Notes
-----
- read_frames updates the read pointer.
- One column is one channel (one row per channel after 0.9)
- if float are requested when the file contains integer data, you will
get normalized data (that is the max possible integer will be 1.0,
and the minimal possible value -1.0).
- if integers are requested when the file contains floating point data,
it may give wrong results because there is an ambiguity: if the
floating data are normalized, you can get a file with only 0 !
Getting integer data from files encoded in normalized floating point
is not supported (yet: sndfile supports it). |
def restore_catalog_to_ckan(catalog, origin_portal_url, destination_portal_url,
apikey, download_strategy=None,
generate_new_access_url=None):
"""Restaura los datasets de un catálogo original al portal pasado
por parámetro. Si hay temas presentes en el DataJson que no están en
el portal de CKAN, los genera.
Args:
catalog (DataJson): El catálogo de origen que se restaura.
origin_portal_url (str): La URL del portal CKAN de origen.
destination_portal_url (str): La URL del portal CKAN de
destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
download_strategy(callable): Una función
(catálogo, distribución)-> bool. Sobre las distribuciones
que evalúa True, descarga el recurso en el downloadURL y lo
sube al portal de destino. Por default no sube ninguna
distribución.
generate_new_access_url(list): Se pasan los ids de las
distribuciones cuyo accessURL se regenerar en el portal de
destino. Para el resto, el portal debe mantiene el valor
pasado en el DataJson.
Returns:
dict: Diccionario con key organización y value la lista de ids
de datasets subidos a esa organización
"""
catalog['homepage'] = catalog.get('homepage') or origin_portal_url
res = {}
origin_portal = RemoteCKAN(origin_portal_url)
try:
org_list = origin_portal.action.organization_list()
except CKANAPIError as e:
logger.exception(
'Ocurrió un error buscando las organizaciones del portal {}: {}'
.format(origin_portal_url, str(e)))
print(e)
return res
for org in org_list:
print("Restaurando organizacion {}".format(org))
response = origin_portal.action.organization_show(
id=org, include_datasets=True)
datasets = [package['id'] for package in response['packages']]
pushed_datasets = restore_organization_to_ckan(
catalog, org, destination_portal_url, apikey,
dataset_list=datasets, download_strategy=download_strategy,
generate_new_access_url=generate_new_access_url
)
res[org] = pushed_datasets
return res | Restaura los datasets de un catálogo original al portal pasado
por parámetro. Si hay temas presentes en el DataJson que no están en
el portal de CKAN, los genera.
Args:
catalog (DataJson): El catálogo de origen que se restaura.
origin_portal_url (str): La URL del portal CKAN de origen.
destination_portal_url (str): La URL del portal CKAN de
destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
download_strategy(callable): Una función
(catálogo, distribución)-> bool. Sobre las distribuciones
que evalúa True, descarga el recurso en el downloadURL y lo
sube al portal de destino. Por default no sube ninguna
distribución.
generate_new_access_url(list): Se pasan los ids de las
distribuciones cuyo accessURL se regenerar en el portal de
destino. Para el resto, el portal debe mantiene el valor
pasado en el DataJson.
Returns:
dict: Diccionario con key organización y value la lista de ids
de datasets subidos a esa organización |
def set_uint_info(self, field, data):
"""Set uint type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array ofdata to be set
"""
_check_call(_LIB.XGDMatrixSetUIntInfo(self.handle,
c_str(field),
c_array(ctypes.c_uint, data),
len(data))) | Set uint type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array ofdata to be set |
def resizeEvent(self, event):
"""
Reimplements the :meth:`QWidget.resizeEvent` method.
:param event: QEvent.
:type event: QEvent
"""
LOGGER.debug("> Application resize event accepted!")
self.size_changed.emit(event)
event.accept() | Reimplements the :meth:`QWidget.resizeEvent` method.
:param event: QEvent.
:type event: QEvent |
def build(self, js_path):
super(Script, self).build()
"""
:param js_path: Javascript source code.
"""
self.source = js_path | :param js_path: Javascript source code. |
def PublishMultipleEvents(cls, events, token=None):
"""Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
token: ACL token.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
"""
event_name_map = registry.EventRegistry.EVENT_NAME_MAP
for event_name, messages in iteritems(events):
if not isinstance(event_name, string_types):
raise ValueError(
"Event names should be string, got: %s" % type(event_name))
for msg in messages:
if not isinstance(msg, rdfvalue.RDFValue):
raise ValueError("Can only publish RDFValue instances.")
for event_cls in event_name_map.get(event_name, []):
event_cls().ProcessMessages(messages, token=token) | Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
token: ACL token.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage. |
def isinstance(self, instance, class_name):
"""Check if a BaseNode is an instance of a registered dynamic class"""
if isinstance(instance, BaseNode):
klass = self.dynamic_node_classes.get(class_name, None)
if klass:
return isinstance(instance, klass)
# Not an instance of a class in the registry
return False
else:
raise TypeError("This function can only be used for BaseNode objects") | Check if a BaseNode is an instance of a registered dynamic class |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.