code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create_contentkey_authorization_policy_options(access_token, key_delivery_type="2", \
name="HLS Open Authorization Policy", key_restriction_type="0"):
'''Create Media Service Content Key Authorization Policy Options.
Args:
access_token (str): A valid Azure authentication token.
key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.
name (str): A Media Service Contenty Key Authorization Policy Name.
key_restiction_type (str): A Media Service Contenty Key Restriction Type.
Returns:
HTTP response. JSON body.
'''
path = '/ContentKeyAuthorizationPolicyOptions'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name":"policy",\
"KeyDeliveryType":"' + key_delivery_type + '", \
"KeyDeliveryConfiguration":"", \
"Restrictions":[{ \
"Name":"' + name + '", \
"KeyRestrictionType":"' + key_restriction_type + '", \
"Requirements":null \
}] \
}'
return do_ams_post(endpoint, path, body, access_token, "json_only") | Create Media Service Content Key Authorization Policy Options.
Args:
access_token (str): A valid Azure authentication token.
key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.
name (str): A Media Service Contenty Key Authorization Policy Name.
key_restiction_type (str): A Media Service Contenty Key Restriction Type.
Returns:
HTTP response. JSON body. | Below is the the instruction that describes the task:
### Input:
Create Media Service Content Key Authorization Policy Options.
Args:
access_token (str): A valid Azure authentication token.
key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.
name (str): A Media Service Contenty Key Authorization Policy Name.
key_restiction_type (str): A Media Service Contenty Key Restriction Type.
Returns:
HTTP response. JSON body.
### Response:
def create_contentkey_authorization_policy_options(access_token, key_delivery_type="2", \
name="HLS Open Authorization Policy", key_restriction_type="0"):
'''Create Media Service Content Key Authorization Policy Options.
Args:
access_token (str): A valid Azure authentication token.
key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.
name (str): A Media Service Contenty Key Authorization Policy Name.
key_restiction_type (str): A Media Service Contenty Key Restriction Type.
Returns:
HTTP response. JSON body.
'''
path = '/ContentKeyAuthorizationPolicyOptions'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name":"policy",\
"KeyDeliveryType":"' + key_delivery_type + '", \
"KeyDeliveryConfiguration":"", \
"Restrictions":[{ \
"Name":"' + name + '", \
"KeyRestrictionType":"' + key_restriction_type + '", \
"Requirements":null \
}] \
}'
return do_ams_post(endpoint, path, body, access_token, "json_only") |
def getChild(self, name, ns=None, default=None):
"""
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
"""
if self.__root is None:
return default
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.__root.resolvePrefix(prefix)
if self.__root.match(name, ns):
return self.__root
else:
return default | Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element} | Below is the the instruction that describes the task:
### Input:
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
### Response:
def getChild(self, name, ns=None, default=None):
"""
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
"""
if self.__root is None:
return default
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.__root.resolvePrefix(prefix)
if self.__root.match(name, ns):
return self.__root
else:
return default |
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children | Return the children of `m` and its direct parameters not registered in modules. | Below is the the instruction that describes the task:
### Input:
Return the children of `m` and its direct parameters not registered in modules.
### Response:
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children |
def get_metadata(feature_name, etextno):
"""Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)
return frozenset(metadata_values) | Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name. | Below is the the instruction that describes the task:
### Input:
Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
### Response:
def get_metadata(feature_name, etextno):
"""Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)
return frozenset(metadata_values) |
def intersection(self, *others):
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if new_multiplicity < multiplicity:
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity - new_multiplicity
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result | r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets. | Below is the the instruction that describes the task:
### Input:
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
### Response:
def intersection(self, *others):
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if new_multiplicity < multiplicity:
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity - new_multiplicity
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result |
def _condition_as_text(lambda_inspection: icontract._represent.ConditionLambdaInspection) -> str:
"""Format condition lambda function as reST."""
lambda_ast_node = lambda_inspection.node
assert isinstance(lambda_ast_node, ast.Lambda)
body_node = lambda_ast_node.body
text = None # type: Optional[str]
if isinstance(body_node, ast.BoolOp) and isinstance(body_node.op, ast.Or) and len(body_node.values) == 2:
left, right = body_node.values
if isinstance(left, ast.UnaryOp) and isinstance(left.op, ast.Not):
# Handle the case: not A or B is transformed to A => B
text = ':code:`{}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left.operand), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.UnaryOp, ast.BinOp, ast.GeneratorExp, ast.IfExp)):
text = ':code:`not ({})` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, ast.Compare) and len(left.ops) == 1:
text = ':code:`{}` β :code:`{}`'.format(
_negate_compare_text(atok=lambda_inspection.atok, node=left),
lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.Call, ast.Attribute, ast.Name, ast.Subscript, ast.Index, ast.Slice, ast.ExtSlice,
ast.ListComp, ast.SetComp, ast.DictComp)):
text = ':code:`not {}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(body_node, ast.IfExp) and isinstance(body_node.orelse, ast.NameConstant) and body_node.orelse.value:
text = ':code:`{}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=body_node.test), lambda_inspection.atok.get_text(node=body_node.body))
if text is None:
# None of the previous reformatings worked, take the default approach.
text = ':code:`{}`'.format(lambda_inspection.atok.get_text(node=body_node))
return text | Format condition lambda function as reST. | Below is the the instruction that describes the task:
### Input:
Format condition lambda function as reST.
### Response:
def _condition_as_text(lambda_inspection: icontract._represent.ConditionLambdaInspection) -> str:
"""Format condition lambda function as reST."""
lambda_ast_node = lambda_inspection.node
assert isinstance(lambda_ast_node, ast.Lambda)
body_node = lambda_ast_node.body
text = None # type: Optional[str]
if isinstance(body_node, ast.BoolOp) and isinstance(body_node.op, ast.Or) and len(body_node.values) == 2:
left, right = body_node.values
if isinstance(left, ast.UnaryOp) and isinstance(left.op, ast.Not):
# Handle the case: not A or B is transformed to A => B
text = ':code:`{}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left.operand), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.UnaryOp, ast.BinOp, ast.GeneratorExp, ast.IfExp)):
text = ':code:`not ({})` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, ast.Compare) and len(left.ops) == 1:
text = ':code:`{}` β :code:`{}`'.format(
_negate_compare_text(atok=lambda_inspection.atok, node=left),
lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.Call, ast.Attribute, ast.Name, ast.Subscript, ast.Index, ast.Slice, ast.ExtSlice,
ast.ListComp, ast.SetComp, ast.DictComp)):
text = ':code:`not {}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(body_node, ast.IfExp) and isinstance(body_node.orelse, ast.NameConstant) and body_node.orelse.value:
text = ':code:`{}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=body_node.test), lambda_inspection.atok.get_text(node=body_node.body))
if text is None:
# None of the previous reformatings worked, take the default approach.
text = ':code:`{}`'.format(lambda_inspection.atok.get_text(node=body_node))
return text |
async def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True):
"""
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException.
"""
# the first time sending the commands we send all of the commands that were queued up.
# if we have to run through it again, we only retry the commands that failed.
attempt = sorted(stack, key=lambda x: x.position)
# build a list of node objects based on node names we need to
nodes = {}
# as we move through each command that still needs to be processed,
# we figure out the slot number that command maps to, then from the slot determine the node.
for c in attempt:
# refer to our internal node -> slot table that tells us where a given
# command should route to.
slot = self._determine_slot(*c.args)
node = self.connection_pool.get_node_by_slot(slot)
# little hack to make sure the node name is populated. probably could clean this up.
self.connection_pool.nodes.set_node_name(node)
# now that we know the name of the node ( it's just a string in the form of host:port )
# we can build a list of commands for each node.
node_name = node['name']
if node_name not in nodes:
nodes[node_name] = NodeCommands(self.parse_response, self.connection_pool.get_connection_by_node(node))
nodes[node_name].append(c)
# send the commands in sequence.
# we write to all the open sockets for each node first, before reading anything
# this allows us to flush all the requests out across the network essentially in parallel
# so that we can read them all in parallel as they come back.
# we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference.
node_commands = nodes.values()
for n in node_commands:
await n.write()
for n in node_commands:
await n.read()
# release all of the redis connections we allocated earlier back into the connection pool.
# we used to do this step as part of a try/finally block, but it is really dangerous to
# release connections back into the pool if for some reason the socket has data still left in it
# from a previous operation. The write and read operations already have try/catch around them for
# all known types of errors including connection and socket level errors.
# So if we hit an exception, something really bad happened and putting any of
# these connections back into the pool is a very bad idea.
# the socket might have unread buffer still sitting in it, and then the
# next time we read from it we pass the buffered result back from a previous
# command and every single request after to that connection will always get
# a mismatched result. (not just theoretical, I saw this happen on production x.x).
for n in nodes.values():
self.connection_pool.release(n.connection)
# if the response isn't an exception it is a valid response from the node
# we're all done with that command, YAY!
# if we have more commands to attempt, we've run into problems.
# collect all the commands we are allowed to retry.
# (MOVED, ASK, or connection errors or timeout errors)
attempt = sorted([c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)], key=lambda x: x.position)
if attempt and allow_redirections:
# RETRY MAGIC HAPPENS HERE!
# send these remaing comamnds one at a time using `execute_command`
# in the main client. This keeps our retry logic in one place mostly,
# and allows us to be more confident in correctness of behavior.
# at this point any speed gains from pipelining have been lost
# anyway, so we might as well make the best attempt to get the correct
# behavior.
#
# The client command will handle retries for each individual command
# sequentially as we pass each one into `execute_command`. Any exceptions
# that bubble out should only appear once all retries have been exhausted.
#
# If a lot of commands have failed, we'll be setting the
# flag to rebuild the slots table from scratch. So MOVED errors should
# correct themselves fairly quickly.
await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt))
for c in attempt:
try:
# send each command individually like we do in the main client.
c.result = await super(StrictClusterPipeline, self).execute_command(*c.args, **c.options)
except RedisError as e:
c.result = e
# turn the response back into a simple flat array that corresponds
# to the sequence of commands issued in the stack in pipeline.execute()
response = [c.result for c in sorted(stack, key=lambda x: x.position)]
if raise_on_error:
self.raise_first_error(stack)
return response | Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException. | Below is the the instruction that describes the task:
### Input:
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException.
### Response:
async def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True):
"""
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException.
"""
# the first time sending the commands we send all of the commands that were queued up.
# if we have to run through it again, we only retry the commands that failed.
attempt = sorted(stack, key=lambda x: x.position)
# build a list of node objects based on node names we need to
nodes = {}
# as we move through each command that still needs to be processed,
# we figure out the slot number that command maps to, then from the slot determine the node.
for c in attempt:
# refer to our internal node -> slot table that tells us where a given
# command should route to.
slot = self._determine_slot(*c.args)
node = self.connection_pool.get_node_by_slot(slot)
# little hack to make sure the node name is populated. probably could clean this up.
self.connection_pool.nodes.set_node_name(node)
# now that we know the name of the node ( it's just a string in the form of host:port )
# we can build a list of commands for each node.
node_name = node['name']
if node_name not in nodes:
nodes[node_name] = NodeCommands(self.parse_response, self.connection_pool.get_connection_by_node(node))
nodes[node_name].append(c)
# send the commands in sequence.
# we write to all the open sockets for each node first, before reading anything
# this allows us to flush all the requests out across the network essentially in parallel
# so that we can read them all in parallel as they come back.
# we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference.
node_commands = nodes.values()
for n in node_commands:
await n.write()
for n in node_commands:
await n.read()
# release all of the redis connections we allocated earlier back into the connection pool.
# we used to do this step as part of a try/finally block, but it is really dangerous to
# release connections back into the pool if for some reason the socket has data still left in it
# from a previous operation. The write and read operations already have try/catch around them for
# all known types of errors including connection and socket level errors.
# So if we hit an exception, something really bad happened and putting any of
# these connections back into the pool is a very bad idea.
# the socket might have unread buffer still sitting in it, and then the
# next time we read from it we pass the buffered result back from a previous
# command and every single request after to that connection will always get
# a mismatched result. (not just theoretical, I saw this happen on production x.x).
for n in nodes.values():
self.connection_pool.release(n.connection)
# if the response isn't an exception it is a valid response from the node
# we're all done with that command, YAY!
# if we have more commands to attempt, we've run into problems.
# collect all the commands we are allowed to retry.
# (MOVED, ASK, or connection errors or timeout errors)
attempt = sorted([c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)], key=lambda x: x.position)
if attempt and allow_redirections:
# RETRY MAGIC HAPPENS HERE!
# send these remaing comamnds one at a time using `execute_command`
# in the main client. This keeps our retry logic in one place mostly,
# and allows us to be more confident in correctness of behavior.
# at this point any speed gains from pipelining have been lost
# anyway, so we might as well make the best attempt to get the correct
# behavior.
#
# The client command will handle retries for each individual command
# sequentially as we pass each one into `execute_command`. Any exceptions
# that bubble out should only appear once all retries have been exhausted.
#
# If a lot of commands have failed, we'll be setting the
# flag to rebuild the slots table from scratch. So MOVED errors should
# correct themselves fairly quickly.
await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt))
for c in attempt:
try:
# send each command individually like we do in the main client.
c.result = await super(StrictClusterPipeline, self).execute_command(*c.args, **c.options)
except RedisError as e:
c.result = e
# turn the response back into a simple flat array that corresponds
# to the sequence of commands issued in the stack in pipeline.execute()
response = [c.result for c in sorted(stack, key=lambda x: x.position)]
if raise_on_error:
self.raise_first_error(stack)
return response |
def build_channel(namespace, name, user_ids):
""" Creates complete channel information as described here https://fzambia.gitbooks.io/centrifugal/content/server/channels.html. """
ids = ','.join(map(str, user_ids))
return "{0}:{1}#{2}".format(namespace, name, ids) | Creates complete channel information as described here https://fzambia.gitbooks.io/centrifugal/content/server/channels.html. | Below is the the instruction that describes the task:
### Input:
Creates complete channel information as described here https://fzambia.gitbooks.io/centrifugal/content/server/channels.html.
### Response:
def build_channel(namespace, name, user_ids):
""" Creates complete channel information as described here https://fzambia.gitbooks.io/centrifugal/content/server/channels.html. """
ids = ','.join(map(str, user_ids))
return "{0}:{1}#{2}".format(namespace, name, ids) |
def pack(self, grads):
"""
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
"""
for i, g in enumerate(grads):
assert g.shape == self._shapes[i]
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads')
# concat_grads = tf.cast(concat_grads, tf.float16)
grad_packs = tf.split(concat_grads, self._split_sizes)
return grad_packs | Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated. | Below is the the instruction that describes the task:
### Input:
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
### Response:
def pack(self, grads):
"""
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
"""
for i, g in enumerate(grads):
assert g.shape == self._shapes[i]
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads')
# concat_grads = tf.cast(concat_grads, tf.float16)
grad_packs = tf.split(concat_grads, self._split_sizes)
return grad_packs |
def page_guiref(arg_s=None):
"""Show a basic reference about the GUI Console."""
from IPython.core import page
page.page(gui_reference, auto_html=True) | Show a basic reference about the GUI Console. | Below is the the instruction that describes the task:
### Input:
Show a basic reference about the GUI Console.
### Response:
def page_guiref(arg_s=None):
"""Show a basic reference about the GUI Console."""
from IPython.core import page
page.page(gui_reference, auto_html=True) |
def crypto_hash_sha256(message):
"""
Hashes and returns the message ``message``.
:param message: bytes
:rtype: bytes
"""
digest = ffi.new("unsigned char[]", crypto_hash_sha256_BYTES)
rc = lib.crypto_hash_sha256(digest, message, len(message))
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(digest, crypto_hash_sha256_BYTES)[:] | Hashes and returns the message ``message``.
:param message: bytes
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Hashes and returns the message ``message``.
:param message: bytes
:rtype: bytes
### Response:
def crypto_hash_sha256(message):
"""
Hashes and returns the message ``message``.
:param message: bytes
:rtype: bytes
"""
digest = ffi.new("unsigned char[]", crypto_hash_sha256_BYTES)
rc = lib.crypto_hash_sha256(digest, message, len(message))
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(digest, crypto_hash_sha256_BYTES)[:] |
def read(self, obj):
"""
Returns
object: fragment
"""
path, frag = [], obj
for part in self.parts:
path.append(part)
if isinstance(frag, dict):
try:
frag = frag[part]
except KeyError as error:
raise NotFound(':'.join(path)) from error
elif isinstance(frag, (list, tuple)):
try:
frag = frag[int(part)]
except IndexError as error:
raise NotFound(':'.join(path)) from error
except ValueError as error:
raise WrongType(':'.join(path)) from error
elif isinstance(frag, (str, int)):
raise WrongType(':'.join(path))
else:
raise NotFound(':'.join(path))
return frag | Returns
object: fragment | Below is the the instruction that describes the task:
### Input:
Returns
object: fragment
### Response:
def read(self, obj):
"""
Returns
object: fragment
"""
path, frag = [], obj
for part in self.parts:
path.append(part)
if isinstance(frag, dict):
try:
frag = frag[part]
except KeyError as error:
raise NotFound(':'.join(path)) from error
elif isinstance(frag, (list, tuple)):
try:
frag = frag[int(part)]
except IndexError as error:
raise NotFound(':'.join(path)) from error
except ValueError as error:
raise WrongType(':'.join(path)) from error
elif isinstance(frag, (str, int)):
raise WrongType(':'.join(path))
else:
raise NotFound(':'.join(path))
return frag |
def _real_time_thread(self):
"""Handles real-time updates to the order book."""
while self.ws_client.connected():
if self.die:
break
if self.pause:
sleep(5)
continue
message = self.ws_client.receive()
if message is None:
break
message_type = message['type']
if message_type == 'error':
continue
if message['sequence'] <= self.sequence:
continue
if message_type == 'open':
self._handle_open(message)
elif message_type == 'match':
self._handle_match(message)
elif message_type == 'done':
self._handle_done(message)
elif message_type == 'change':
self._handle_change(message)
else:
continue
self.ws_client.disconnect() | Handles real-time updates to the order book. | Below is the the instruction that describes the task:
### Input:
Handles real-time updates to the order book.
### Response:
def _real_time_thread(self):
"""Handles real-time updates to the order book."""
while self.ws_client.connected():
if self.die:
break
if self.pause:
sleep(5)
continue
message = self.ws_client.receive()
if message is None:
break
message_type = message['type']
if message_type == 'error':
continue
if message['sequence'] <= self.sequence:
continue
if message_type == 'open':
self._handle_open(message)
elif message_type == 'match':
self._handle_match(message)
elif message_type == 'done':
self._handle_done(message)
elif message_type == 'change':
self._handle_change(message)
else:
continue
self.ws_client.disconnect() |
def _handle_array(toks):
"""
Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str
"""
if len(toks) == 5 and toks[1] == '{' and toks[4] == '}':
subtree = toks[2:4]
signature = ''.join(s for (_, s) in subtree)
[key_func, value_func] = [f for (f, _) in subtree]
def the_dict_func(a_dict, variant=0):
"""
Function for generating a Dictionary from a dict.
:param a_dict: the dictionary to transform
:type a_dict: dict of (`a * `b)
:param int variant: variant level
:returns: a dbus dictionary of transformed values and level
:rtype: Dictionary * int
"""
elements = \
[(key_func(x), value_func(y)) for (x, y) in a_dict.items()]
level = 0 if elements == [] \
else max(max(x, y) for ((_, x), (_, y)) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Dictionary(
((x, y) for ((x, _), (y, _)) in elements),
signature=signature,
variant_level=obj_level), func_level)
return (the_dict_func, 'a{' + signature + '}')
if len(toks) == 2:
(func, sig) = toks[1]
def the_array_func(a_list, variant=0):
"""
Function for generating an Array from a list.
:param a_list: the list to transform
:type a_list: list of `a
:param int variant: variant level of the value
:returns: a dbus Array of transformed values and variant level
:rtype: Array * int
"""
if isinstance(a_list, dict):
raise IntoDPValueError(a_list, "a_list",
"is a dict, must be an array")
elements = [func(x) for x in a_list]
level = 0 if elements == [] else max(x for (_, x) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Array(
(x for (x, _) in elements),
signature=sig,
variant_level=obj_level), func_level)
return (the_array_func, 'a' + sig)
raise IntoDPValueError(toks, "toks",
"unexpected tokens") | Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str | Below is the the instruction that describes the task:
### Input:
Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str
### Response:
def _handle_array(toks):
"""
Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str
"""
if len(toks) == 5 and toks[1] == '{' and toks[4] == '}':
subtree = toks[2:4]
signature = ''.join(s for (_, s) in subtree)
[key_func, value_func] = [f for (f, _) in subtree]
def the_dict_func(a_dict, variant=0):
"""
Function for generating a Dictionary from a dict.
:param a_dict: the dictionary to transform
:type a_dict: dict of (`a * `b)
:param int variant: variant level
:returns: a dbus dictionary of transformed values and level
:rtype: Dictionary * int
"""
elements = \
[(key_func(x), value_func(y)) for (x, y) in a_dict.items()]
level = 0 if elements == [] \
else max(max(x, y) for ((_, x), (_, y)) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Dictionary(
((x, y) for ((x, _), (y, _)) in elements),
signature=signature,
variant_level=obj_level), func_level)
return (the_dict_func, 'a{' + signature + '}')
if len(toks) == 2:
(func, sig) = toks[1]
def the_array_func(a_list, variant=0):
"""
Function for generating an Array from a list.
:param a_list: the list to transform
:type a_list: list of `a
:param int variant: variant level of the value
:returns: a dbus Array of transformed values and variant level
:rtype: Array * int
"""
if isinstance(a_list, dict):
raise IntoDPValueError(a_list, "a_list",
"is a dict, must be an array")
elements = [func(x) for x in a_list]
level = 0 if elements == [] else max(x for (_, x) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Array(
(x for (x, _) in elements),
signature=sig,
variant_level=obj_level), func_level)
return (the_array_func, 'a' + sig)
raise IntoDPValueError(toks, "toks",
"unexpected tokens") |
def set_write_buffer_limits(self, high=None, low=None):
"""Set the low and high watermark for the write buffer."""
if high is None:
high = self.write_buffer_size
if low is None:
low = high // 2
if low > high:
low = high
self._write_buffer_high = high
self._write_buffer_low = low | Set the low and high watermark for the write buffer. | Below is the the instruction that describes the task:
### Input:
Set the low and high watermark for the write buffer.
### Response:
def set_write_buffer_limits(self, high=None, low=None):
"""Set the low and high watermark for the write buffer."""
if high is None:
high = self.write_buffer_size
if low is None:
low = high // 2
if low > high:
low = high
self._write_buffer_high = high
self._write_buffer_low = low |
def check_encoding(proof_req: dict, proof: dict) -> bool:
"""
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
"""
LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof)
cd_id2proof_id = {} # invert proof['identifiers'] per cd_id
p_preds = {} # cd_id and attr to bound
for idx in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][idx]['cred_def_id']
cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def
p_preds[cd_id] = {
ge_proof['predicate']['attr_name']: ge_proof['predicate']['value']
for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs']
}
for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr
canon_attr = canon(req_attr['name'])
proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']]
enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get(
canon_attr)
if not enco:
continue # requested but declined from revelation in proof: must appear in a predicate
if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']):
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred
canon_attr = canon(req_pred['name'])
if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
LOGGER.debug('Verifier.check_proof_encoding <<< True')
return True | Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch | Below is the the instruction that describes the task:
### Input:
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
### Response:
def check_encoding(proof_req: dict, proof: dict) -> bool:
"""
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
"""
LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof)
cd_id2proof_id = {} # invert proof['identifiers'] per cd_id
p_preds = {} # cd_id and attr to bound
for idx in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][idx]['cred_def_id']
cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def
p_preds[cd_id] = {
ge_proof['predicate']['attr_name']: ge_proof['predicate']['value']
for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs']
}
for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr
canon_attr = canon(req_attr['name'])
proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']]
enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get(
canon_attr)
if not enco:
continue # requested but declined from revelation in proof: must appear in a predicate
if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']):
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred
canon_attr = canon(req_pred['name'])
if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
LOGGER.debug('Verifier.check_proof_encoding <<< True')
return True |
def _GetRecordValue(self, record, value_entry):
"""Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
"""
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError('Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return None
if column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError('Boolean value support not implemented yet.')
if column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError('Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
if column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError('Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
if column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
if column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError('GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry) | Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported. | Below is the the instruction that describes the task:
### Input:
Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
### Response:
def _GetRecordValue(self, record, value_entry):
"""Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
"""
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError('Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return None
if column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError('Boolean value support not implemented yet.')
if column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError('Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
if column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError('Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
if column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
if column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError('GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry) |
def securitygroupid(vm_):
'''
Returns the SecurityGroupId
'''
securitygroupid_set = set()
securitygroupid_list = config.get_cloud_config_value(
'securitygroupid',
vm_,
__opts__,
search_global=False
)
# If the list is None, then the set will remain empty
# If the list is already a set then calling 'set' on it is a no-op
# If the list is a string, then calling 'set' generates a one-element set
# If the list is anything else, stacktrace
if securitygroupid_list:
securitygroupid_set = securitygroupid_set.union(set(securitygroupid_list))
securitygroupname_list = config.get_cloud_config_value(
'securitygroupname', vm_, __opts__, search_global=False
)
if securitygroupname_list:
if not isinstance(securitygroupname_list, list):
securitygroupname_list = [securitygroupname_list]
params = {'Action': 'DescribeSecurityGroups'}
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set) | Returns the SecurityGroupId | Below is the the instruction that describes the task:
### Input:
Returns the SecurityGroupId
### Response:
def securitygroupid(vm_):
'''
Returns the SecurityGroupId
'''
securitygroupid_set = set()
securitygroupid_list = config.get_cloud_config_value(
'securitygroupid',
vm_,
__opts__,
search_global=False
)
# If the list is None, then the set will remain empty
# If the list is already a set then calling 'set' on it is a no-op
# If the list is a string, then calling 'set' generates a one-element set
# If the list is anything else, stacktrace
if securitygroupid_list:
securitygroupid_set = securitygroupid_set.union(set(securitygroupid_list))
securitygroupname_list = config.get_cloud_config_value(
'securitygroupname', vm_, __opts__, search_global=False
)
if securitygroupname_list:
if not isinstance(securitygroupname_list, list):
securitygroupname_list = [securitygroupname_list]
params = {'Action': 'DescribeSecurityGroups'}
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set) |
def observe(M, C, obs_mesh, obs_vals, obs_V=0, lintrans=None,
cross_validate=True):
"""
(M, C, obs_mesh, obs_vals[, obs_V = 0, lintrans = None, cross_validate = True])
Imposes observation of the value of obs_vals on M and C, where
obs_vals ~ N(lintrans * f(obs_mesh), V)
f ~ GP(M,C)
:Arguments:
- `M`: The mean function
- `C`: The covariance function
- `obs_mesh`: The places where f has been evaluated.
- `obs_vals`: The values of f that were observed there.
- `obs_V`: The observation variance. If None, assumed to be infinite
(observations made with no error).
- `lintrans`: A linear transformation. If None, assumed to be the
identity transformation (pretend it doesn't exist).
- `cross_validate`: A flag indicating whether a check should be done to
see if the data could have arisen from M and C with positive probability.
"""
obs_mesh = regularize_array(obs_mesh)
# print_(obs_mesh)
obs_V = resize(obs_V, obs_mesh.shape[0])
obs_vals = resize(obs_vals, obs_mesh.shape[0])
# First observe C.
relevant_slice, obs_mesh_new = C.observe(obs_mesh, obs_V, output_type='o')
# Then observe M from C.
M.observe(C, obs_mesh_new, obs_vals.ravel()[relevant_slice])
# Cross-validate if not asked not to.
if obs_mesh_new.shape[0] < obs_mesh.shape[0]:
if cross_validate:
if not predictive_check(
obs_vals, obs_mesh, M, C.obs_piv, sqrt(C.relative_precision)):
raise ValueError(
"These data seem extremely improbable given your GP prior. \n Suggestions: decrease observation precision, or adjust the covariance to \n allow the function to be less smooth.") | (M, C, obs_mesh, obs_vals[, obs_V = 0, lintrans = None, cross_validate = True])
Imposes observation of the value of obs_vals on M and C, where
obs_vals ~ N(lintrans * f(obs_mesh), V)
f ~ GP(M,C)
:Arguments:
- `M`: The mean function
- `C`: The covariance function
- `obs_mesh`: The places where f has been evaluated.
- `obs_vals`: The values of f that were observed there.
- `obs_V`: The observation variance. If None, assumed to be infinite
(observations made with no error).
- `lintrans`: A linear transformation. If None, assumed to be the
identity transformation (pretend it doesn't exist).
- `cross_validate`: A flag indicating whether a check should be done to
see if the data could have arisen from M and C with positive probability. | Below is the the instruction that describes the task:
### Input:
(M, C, obs_mesh, obs_vals[, obs_V = 0, lintrans = None, cross_validate = True])
Imposes observation of the value of obs_vals on M and C, where
obs_vals ~ N(lintrans * f(obs_mesh), V)
f ~ GP(M,C)
:Arguments:
- `M`: The mean function
- `C`: The covariance function
- `obs_mesh`: The places where f has been evaluated.
- `obs_vals`: The values of f that were observed there.
- `obs_V`: The observation variance. If None, assumed to be infinite
(observations made with no error).
- `lintrans`: A linear transformation. If None, assumed to be the
identity transformation (pretend it doesn't exist).
- `cross_validate`: A flag indicating whether a check should be done to
see if the data could have arisen from M and C with positive probability.
### Response:
def observe(M, C, obs_mesh, obs_vals, obs_V=0, lintrans=None,
cross_validate=True):
"""
(M, C, obs_mesh, obs_vals[, obs_V = 0, lintrans = None, cross_validate = True])
Imposes observation of the value of obs_vals on M and C, where
obs_vals ~ N(lintrans * f(obs_mesh), V)
f ~ GP(M,C)
:Arguments:
- `M`: The mean function
- `C`: The covariance function
- `obs_mesh`: The places where f has been evaluated.
- `obs_vals`: The values of f that were observed there.
- `obs_V`: The observation variance. If None, assumed to be infinite
(observations made with no error).
- `lintrans`: A linear transformation. If None, assumed to be the
identity transformation (pretend it doesn't exist).
- `cross_validate`: A flag indicating whether a check should be done to
see if the data could have arisen from M and C with positive probability.
"""
obs_mesh = regularize_array(obs_mesh)
# print_(obs_mesh)
obs_V = resize(obs_V, obs_mesh.shape[0])
obs_vals = resize(obs_vals, obs_mesh.shape[0])
# First observe C.
relevant_slice, obs_mesh_new = C.observe(obs_mesh, obs_V, output_type='o')
# Then observe M from C.
M.observe(C, obs_mesh_new, obs_vals.ravel()[relevant_slice])
# Cross-validate if not asked not to.
if obs_mesh_new.shape[0] < obs_mesh.shape[0]:
if cross_validate:
if not predictive_check(
obs_vals, obs_mesh, M, C.obs_piv, sqrt(C.relative_precision)):
raise ValueError(
"These data seem extremely improbable given your GP prior. \n Suggestions: decrease observation precision, or adjust the covariance to \n allow the function to be less smooth.") |
def list_all_geo_zones(cls, **kwargs):
"""List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_geo_zones_with_http_info(**kwargs)
else:
(data) = cls._list_all_geo_zones_with_http_info(**kwargs)
return data | List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread.
### Response:
def list_all_geo_zones(cls, **kwargs):
"""List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_geo_zones_with_http_info(**kwargs)
else:
(data) = cls._list_all_geo_zones_with_http_info(**kwargs)
return data |
def getBool(t):
"""If t is of type bool, return it, otherwise raise InvalidTypeError.
"""
b = c_int()
if PL_get_long(t, byref(b)):
return bool(b.value)
else:
raise InvalidTypeError("bool") | If t is of type bool, return it, otherwise raise InvalidTypeError. | Below is the the instruction that describes the task:
### Input:
If t is of type bool, return it, otherwise raise InvalidTypeError.
### Response:
def getBool(t):
"""If t is of type bool, return it, otherwise raise InvalidTypeError.
"""
b = c_int()
if PL_get_long(t, byref(b)):
return bool(b.value)
else:
raise InvalidTypeError("bool") |
def make_decoder(num_topics, num_words):
"""Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
"""
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[num_topics, num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
def decoder(topics):
word_probs = tf.matmul(topics, topics_words)
# The observations are bag of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
return tfd.OneHotCategorical(probs=word_probs,
name="bag_of_words")
return decoder, topics_words | Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words. | Below is the the instruction that describes the task:
### Input:
Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
### Response:
def make_decoder(num_topics, num_words):
"""Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
"""
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[num_topics, num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
def decoder(topics):
word_probs = tf.matmul(topics, topics_words)
# The observations are bag of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
return tfd.OneHotCategorical(probs=word_probs,
name="bag_of_words")
return decoder, topics_words |
def _import_next_layer(self, proto, length):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 0x0806:
from pcapkit.protocols.link.arp import ARP as Protocol
elif proto == 0x8035:
from pcapkit.protocols.link.rarp import RARP as Protocol
elif proto == 0x8100:
from pcapkit.protocols.link.vlan import VLAN as Protocol
elif proto == 0x0800:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 0x86DD:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 0x8137:
from pcapkit.protocols.internet.ipx import IPX as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, error=self._onerror,
layer=self._exlayer, protocol=self._exproto)
return next_ | Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer | Below is the the instruction that describes the task:
### Input:
Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
### Response:
def _import_next_layer(self, proto, length):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 0x0806:
from pcapkit.protocols.link.arp import ARP as Protocol
elif proto == 0x8035:
from pcapkit.protocols.link.rarp import RARP as Protocol
elif proto == 0x8100:
from pcapkit.protocols.link.vlan import VLAN as Protocol
elif proto == 0x0800:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 0x86DD:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 0x8137:
from pcapkit.protocols.internet.ipx import IPX as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, error=self._onerror,
layer=self._exlayer, protocol=self._exproto)
return next_ |
def minimum_sys(cls, inherit_path):
"""Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
"""
site_libs = set(cls.site_libs())
for site_lib in site_libs:
TRACER.log('Found site-library: %s' % site_lib)
for extras_path in cls._extras_paths():
TRACER.log('Found site extra: %s' % extras_path)
site_libs.add(extras_path)
site_libs = set(os.path.normpath(path) for path in site_libs)
sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)
sys_modules = cls.minimum_sys_modules(site_libs)
return sys_path, sys_path_importer_cache, sys_modules | Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation. | Below is the the instruction that describes the task:
### Input:
Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
### Response:
def minimum_sys(cls, inherit_path):
"""Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
"""
site_libs = set(cls.site_libs())
for site_lib in site_libs:
TRACER.log('Found site-library: %s' % site_lib)
for extras_path in cls._extras_paths():
TRACER.log('Found site extra: %s' % extras_path)
site_libs.add(extras_path)
site_libs = set(os.path.normpath(path) for path in site_libs)
sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)
sys_modules = cls.minimum_sys_modules(site_libs)
return sys_path, sys_path_importer_cache, sys_modules |
def force_horizontal_padding_after(
self, index: int, padding: Union[int, float]) -> None:
"""Change the padding after the given column."""
self.horizontal_padding[index] = padding | Change the padding after the given column. | Below is the the instruction that describes the task:
### Input:
Change the padding after the given column.
### Response:
def force_horizontal_padding_after(
self, index: int, padding: Union[int, float]) -> None:
"""Change the padding after the given column."""
self.horizontal_padding[index] = padding |
def timeseries(self, dataframe=False):
"""Get the date histogram aggregations.
:param dataframe: if true, return a pandas.DataFrame object
"""
self.query.get_cardinality("author_uuid").by_period()
return super().timeseries(dataframe) | Get the date histogram aggregations.
:param dataframe: if true, return a pandas.DataFrame object | Below is the the instruction that describes the task:
### Input:
Get the date histogram aggregations.
:param dataframe: if true, return a pandas.DataFrame object
### Response:
def timeseries(self, dataframe=False):
"""Get the date histogram aggregations.
:param dataframe: if true, return a pandas.DataFrame object
"""
self.query.get_cardinality("author_uuid").by_period()
return super().timeseries(dataframe) |
def check(self, batch_size):
"""Returns True if the logging frequency has been met."""
self.increment(batch_size)
return self.unit_count >= self.config["log_train_every"] | Returns True if the logging frequency has been met. | Below is the the instruction that describes the task:
### Input:
Returns True if the logging frequency has been met.
### Response:
def check(self, batch_size):
"""Returns True if the logging frequency has been met."""
self.increment(batch_size)
return self.unit_count >= self.config["log_train_every"] |
def _threaded(self, *args, **kwargs):
"""Call the target and put the result in the Queue."""
for target in self.targets:
result = target(*args, **kwargs)
self.queue.put(result) | Call the target and put the result in the Queue. | Below is the the instruction that describes the task:
### Input:
Call the target and put the result in the Queue.
### Response:
def _threaded(self, *args, **kwargs):
"""Call the target and put the result in the Queue."""
for target in self.targets:
result = target(*args, **kwargs)
self.queue.put(result) |
def register_view(self, view):
"""Called when the View was registered"""
super(TopToolBarController, self).register_view(view)
view['maximize_button'].connect('clicked', self.on_maximize_button_clicked)
self.update_maximize_button() | Called when the View was registered | Below is the the instruction that describes the task:
### Input:
Called when the View was registered
### Response:
def register_view(self, view):
"""Called when the View was registered"""
super(TopToolBarController, self).register_view(view)
view['maximize_button'].connect('clicked', self.on_maximize_button_clicked)
self.update_maximize_button() |
def check(dependency=None, timeout=60):
"""Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
"""
def decorator(check):
# Modules are evaluated from the top of the file down, so _check_names will
# contain the names of the checks in the order in which they are declared
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(checks_root, dependency_state):
# Result template
result = CheckResult.from_check(check)
# Any shared (returned) state
state = None
try:
# Setup check environment, copying disk state from dependency
internal.run_dir = checks_root / check.__name__
src_dir = checks_root / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
# Run registered functions before/after running check and set timeout
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!")}
log(repr(e))
for line in traceback.format_tb(e.__traceback__):
log(line.rstrip())
log(_("Contact [email protected] with the URL of this check!"))
else:
result.passed = True
finally:
result.log = _log
result.data = _data
return result, state
return wrapper
return decorator | Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit() | Below is the the instruction that describes the task:
### Input:
Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
### Response:
def check(dependency=None, timeout=60):
"""Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
"""
def decorator(check):
# Modules are evaluated from the top of the file down, so _check_names will
# contain the names of the checks in the order in which they are declared
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(checks_root, dependency_state):
# Result template
result = CheckResult.from_check(check)
# Any shared (returned) state
state = None
try:
# Setup check environment, copying disk state from dependency
internal.run_dir = checks_root / check.__name__
src_dir = checks_root / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
# Run registered functions before/after running check and set timeout
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!")}
log(repr(e))
for line in traceback.format_tb(e.__traceback__):
log(line.rstrip())
log(_("Contact [email protected] with the URL of this check!"))
else:
result.passed = True
finally:
result.log = _log
result.data = _data
return result, state
return wrapper
return decorator |
def read(self):
"""Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
"""
if self.default_file:
self.read_default_config()
return self.read_config_files(self.all_config_files()) | Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file. | Below is the the instruction that describes the task:
### Input:
Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
### Response:
def read(self):
"""Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
"""
if self.default_file:
self.read_default_config()
return self.read_config_files(self.all_config_files()) |
def index(index, length):
"""Generates an index.
:param index: The index, can be positive or negative.
:param length: The length of the sequence to index.
:raises: IndexError
Negative indices are typically used to index a sequence in reverse order.
But to use them, the indexed object must convert them to the correct,
positive index. This function can be used to do this.
"""
if index < 0:
index += length
if 0 <= index < length:
return index
raise IndexError() | Generates an index.
:param index: The index, can be positive or negative.
:param length: The length of the sequence to index.
:raises: IndexError
Negative indices are typically used to index a sequence in reverse order.
But to use them, the indexed object must convert them to the correct,
positive index. This function can be used to do this. | Below is the the instruction that describes the task:
### Input:
Generates an index.
:param index: The index, can be positive or negative.
:param length: The length of the sequence to index.
:raises: IndexError
Negative indices are typically used to index a sequence in reverse order.
But to use them, the indexed object must convert them to the correct,
positive index. This function can be used to do this.
### Response:
def index(index, length):
"""Generates an index.
:param index: The index, can be positive or negative.
:param length: The length of the sequence to index.
:raises: IndexError
Negative indices are typically used to index a sequence in reverse order.
But to use them, the indexed object must convert them to the correct,
positive index. This function can be used to do this.
"""
if index < 0:
index += length
if 0 <= index < length:
return index
raise IndexError() |
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else [] | Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list | Below is the the instruction that describes the task:
### Input:
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
### Response:
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else [] |
def _execute_single_level_task(self):
""" Execute a single-level task """
self.log(u"Executing single level task...")
try:
# load audio file, extract MFCCs from real wave, clear audio file
self._step_begin(u"extract MFCC real wave")
real_wave_mfcc = self._extract_mfcc(
file_path=self.task.audio_file_path_absolute,
file_format=None,
)
self._step_end()
# compute head and/or tail and set it
self._step_begin(u"compute head tail")
(head_length, process_length, tail_length) = self._compute_head_process_tail(real_wave_mfcc)
real_wave_mfcc.set_head_middle_tail(head_length, process_length, tail_length)
self._step_end()
# compute alignment, outputting a tree of time intervals
self._set_synthesizer()
sync_root = Tree()
self._execute_inner(
real_wave_mfcc,
self.task.text_file,
sync_root=sync_root,
force_aba_auto=False,
log=True,
leaf_level=True
)
self._clear_cache_synthesizer()
# create syncmap and add it to task
self._step_begin(u"create sync map")
self._create_sync_map(sync_root=sync_root)
self._step_end()
# log total
self._step_total()
self.log(u"Executing single level task... done")
except Exception as exc:
self._step_failure(exc) | Execute a single-level task | Below is the the instruction that describes the task:
### Input:
Execute a single-level task
### Response:
def _execute_single_level_task(self):
""" Execute a single-level task """
self.log(u"Executing single level task...")
try:
# load audio file, extract MFCCs from real wave, clear audio file
self._step_begin(u"extract MFCC real wave")
real_wave_mfcc = self._extract_mfcc(
file_path=self.task.audio_file_path_absolute,
file_format=None,
)
self._step_end()
# compute head and/or tail and set it
self._step_begin(u"compute head tail")
(head_length, process_length, tail_length) = self._compute_head_process_tail(real_wave_mfcc)
real_wave_mfcc.set_head_middle_tail(head_length, process_length, tail_length)
self._step_end()
# compute alignment, outputting a tree of time intervals
self._set_synthesizer()
sync_root = Tree()
self._execute_inner(
real_wave_mfcc,
self.task.text_file,
sync_root=sync_root,
force_aba_auto=False,
log=True,
leaf_level=True
)
self._clear_cache_synthesizer()
# create syncmap and add it to task
self._step_begin(u"create sync map")
self._create_sync_map(sync_root=sync_root)
self._step_end()
# log total
self._step_total()
self.log(u"Executing single level task... done")
except Exception as exc:
self._step_failure(exc) |
def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
region=None, key=None, keyid=None, profile=None):
'''
Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
{updated: False} if the alias was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {}
if FunctionVersion:
args['FunctionVersion'] = FunctionVersion
if Description:
args['Description'] = Description
r = conn.update_alias(FunctionName=FunctionName, Name=Name, **args)
if r:
keys = ('Name', 'FunctionVersion', 'Description')
return {'updated': True, 'alias': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Alias was not updated')
return {'updated': False}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
{updated: False} if the alias was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST | Below is the the instruction that describes the task:
### Input:
Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
{updated: False} if the alias was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
### Response:
def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
region=None, key=None, keyid=None, profile=None):
'''
Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
{updated: False} if the alias was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {}
if FunctionVersion:
args['FunctionVersion'] = FunctionVersion
if Description:
args['Description'] = Description
r = conn.update_alias(FunctionName=FunctionName, Name=Name, **args)
if r:
keys = ('Name', 'FunctionVersion', 'Description')
return {'updated': True, 'alias': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Alias was not updated')
return {'updated': False}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} |
def get_events(conn, stackname):
"""Get the events in batches and return in chronological order"""
next = None
event_list = []
while 1:
events = conn.describe_stack_events(stackname, next)
event_list.append(events)
if events.next_token is None:
break
next = events.next_token
time.sleep(1)
return reversed(sum(event_list, [])) | Get the events in batches and return in chronological order | Below is the the instruction that describes the task:
### Input:
Get the events in batches and return in chronological order
### Response:
def get_events(conn, stackname):
"""Get the events in batches and return in chronological order"""
next = None
event_list = []
while 1:
events = conn.describe_stack_events(stackname, next)
event_list.append(events)
if events.next_token is None:
break
next = events.next_token
time.sleep(1)
return reversed(sum(event_list, [])) |
def nsx_controller_connection_addr_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
method = ET.SubElement(connection_addr, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def nsx_controller_connection_addr_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
method = ET.SubElement(connection_addr, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def valid_token(token):
"""Asserts a provided string is a valid duration token representation
:param token: duration representation token
:type token: string
"""
is_scale = False
# Check if the token represents a scale
# If it doesn't set a flag accordingly
try:
Scale(token)
is_scale = True
except ScaleFormatError:
pass
# If token either represents a numerical value, a
# separator token, or a scale, it is considered valid
if any([token.isdigit(), token in SEPARATOR_TOKENS, is_scale]):
return True
return False | Asserts a provided string is a valid duration token representation
:param token: duration representation token
:type token: string | Below is the the instruction that describes the task:
### Input:
Asserts a provided string is a valid duration token representation
:param token: duration representation token
:type token: string
### Response:
def valid_token(token):
"""Asserts a provided string is a valid duration token representation
:param token: duration representation token
:type token: string
"""
is_scale = False
# Check if the token represents a scale
# If it doesn't set a flag accordingly
try:
Scale(token)
is_scale = True
except ScaleFormatError:
pass
# If token either represents a numerical value, a
# separator token, or a scale, it is considered valid
if any([token.isdigit(), token in SEPARATOR_TOKENS, is_scale]):
return True
return False |
def find_node(self, node, path):
"""Finds a node by the given path from the given node."""
for hash_value in path:
if isinstance(node, LeafStatisticsNode):
break
for stats in node.get_child_keys():
if hash(stats) == hash_value:
node = node.get_child_node(stats)
break
else:
break
return node | Finds a node by the given path from the given node. | Below is the the instruction that describes the task:
### Input:
Finds a node by the given path from the given node.
### Response:
def find_node(self, node, path):
"""Finds a node by the given path from the given node."""
for hash_value in path:
if isinstance(node, LeafStatisticsNode):
break
for stats in node.get_child_keys():
if hash(stats) == hash_value:
node = node.get_child_node(stats)
break
else:
break
return node |
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
"""
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive | Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol) | Below is the the instruction that describes the task:
### Input:
Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
### Response:
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
"""
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive |
def execute_command_with_connection(self, context, command, *args):
"""
Note: session & vcenter_data_model & reservation id objects will be injected dynamically to the command
:param command:
:param context: instance of ResourceCommandContext or AutoLoadCommandContext
:type context: cloudshell.shell.core.context.ResourceCommandContext
:param args:
"""
logger = self.context_based_logger_factory.create_logger_for_context(
logger_name='vCenterShell',
context=context)
if not command:
logger.error(COMMAND_CANNOT_BE_NONE)
raise Exception(COMMAND_CANNOT_BE_NONE)
try:
command_name = command.__name__
logger.info(LOG_FORMAT.format(START, command_name))
command_args = []
si = None
session = None
connection_details = None
vcenter_data_model = None
# get connection details
if context:
with CloudShellSessionContext(context) as cloudshell_session:
session = cloudshell_session
vcenter_data_model = self.resource_model_parser.convert_to_vcenter_model(context.resource)
connection_details = ResourceConnectionDetailsRetriever.get_connection_details(session=session,
vcenter_resource_model=vcenter_data_model,
resource_context=context.resource)
if connection_details:
logger.info(INFO_CONNECTING_TO_VCENTER.format(connection_details.host))
logger.debug(
DEBUG_CONNECTION_INFO.format(connection_details.host,
connection_details.username,
connection_details.port))
si = self.get_py_service_connection(connection_details, logger)
if si:
logger.info(CONNECTED_TO_CENTER.format(connection_details.host))
command_args.append(si)
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=session,
arg_name='session')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=vcenter_data_model,
arg_name='vcenter_data_model')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=self._get_reservation_id(context),
arg_name='reservation_id')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=logger,
arg_name='logger')
command_args.extend(args)
logger.info(EXECUTING_COMMAND.format(command_name))
logger.debug(DEBUG_COMMAND_PARAMS.format(COMMA.join([str(x) for x in command_args])))
results = command(*tuple(command_args))
logger.info(FINISHED_EXECUTING_COMMAND.format(command_name))
logger.debug(DEBUG_COMMAND_RESULT.format(str(results)))
return results
except Exception as ex:
logger.exception(COMMAND_ERROR.format(command_name))
logger.exception(str(type(ex)) + ': ' + str(ex))
raise
finally:
logger.info(LOG_FORMAT.format(END, command_name)) | Note: session & vcenter_data_model & reservation id objects will be injected dynamically to the command
:param command:
:param context: instance of ResourceCommandContext or AutoLoadCommandContext
:type context: cloudshell.shell.core.context.ResourceCommandContext
:param args: | Below is the the instruction that describes the task:
### Input:
Note: session & vcenter_data_model & reservation id objects will be injected dynamically to the command
:param command:
:param context: instance of ResourceCommandContext or AutoLoadCommandContext
:type context: cloudshell.shell.core.context.ResourceCommandContext
:param args:
### Response:
def execute_command_with_connection(self, context, command, *args):
"""
Note: session & vcenter_data_model & reservation id objects will be injected dynamically to the command
:param command:
:param context: instance of ResourceCommandContext or AutoLoadCommandContext
:type context: cloudshell.shell.core.context.ResourceCommandContext
:param args:
"""
logger = self.context_based_logger_factory.create_logger_for_context(
logger_name='vCenterShell',
context=context)
if not command:
logger.error(COMMAND_CANNOT_BE_NONE)
raise Exception(COMMAND_CANNOT_BE_NONE)
try:
command_name = command.__name__
logger.info(LOG_FORMAT.format(START, command_name))
command_args = []
si = None
session = None
connection_details = None
vcenter_data_model = None
# get connection details
if context:
with CloudShellSessionContext(context) as cloudshell_session:
session = cloudshell_session
vcenter_data_model = self.resource_model_parser.convert_to_vcenter_model(context.resource)
connection_details = ResourceConnectionDetailsRetriever.get_connection_details(session=session,
vcenter_resource_model=vcenter_data_model,
resource_context=context.resource)
if connection_details:
logger.info(INFO_CONNECTING_TO_VCENTER.format(connection_details.host))
logger.debug(
DEBUG_CONNECTION_INFO.format(connection_details.host,
connection_details.username,
connection_details.port))
si = self.get_py_service_connection(connection_details, logger)
if si:
logger.info(CONNECTED_TO_CENTER.format(connection_details.host))
command_args.append(si)
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=session,
arg_name='session')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=vcenter_data_model,
arg_name='vcenter_data_model')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=self._get_reservation_id(context),
arg_name='reservation_id')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=logger,
arg_name='logger')
command_args.extend(args)
logger.info(EXECUTING_COMMAND.format(command_name))
logger.debug(DEBUG_COMMAND_PARAMS.format(COMMA.join([str(x) for x in command_args])))
results = command(*tuple(command_args))
logger.info(FINISHED_EXECUTING_COMMAND.format(command_name))
logger.debug(DEBUG_COMMAND_RESULT.format(str(results)))
return results
except Exception as ex:
logger.exception(COMMAND_ERROR.format(command_name))
logger.exception(str(type(ex)) + ': ' + str(ex))
raise
finally:
logger.info(LOG_FORMAT.format(END, command_name)) |
def wait_for_servers(session, servers):
"""Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
"""
nclient = nova.Client(NOVA_VERSION, session=session,
region_name=os.environ['OS_REGION_NAME'])
while True:
deployed = []
undeployed = []
for server in servers:
c = nclient.servers.get(server.id)
if c.addresses != {} and c.status == 'ACTIVE':
deployed.append(server)
if c.status == 'ERROR':
undeployed.append(server)
logger.info("[nova]: Polling the Deployment")
logger.info("[nova]: %s deployed servers" % len(deployed))
logger.info("[nova]: %s undeployed servers" % len(undeployed))
if len(deployed) + len(undeployed) >= len(servers):
break
time.sleep(3)
return deployed, undeployed | Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready. | Below is the the instruction that describes the task:
### Input:
Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
### Response:
def wait_for_servers(session, servers):
"""Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
"""
nclient = nova.Client(NOVA_VERSION, session=session,
region_name=os.environ['OS_REGION_NAME'])
while True:
deployed = []
undeployed = []
for server in servers:
c = nclient.servers.get(server.id)
if c.addresses != {} and c.status == 'ACTIVE':
deployed.append(server)
if c.status == 'ERROR':
undeployed.append(server)
logger.info("[nova]: Polling the Deployment")
logger.info("[nova]: %s deployed servers" % len(deployed))
logger.info("[nova]: %s undeployed servers" % len(undeployed))
if len(deployed) + len(undeployed) >= len(servers):
break
time.sleep(3)
return deployed, undeployed |
def get_group_dict(user=None, include_default=True):
'''
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
'''
if HAS_GRP is False or HAS_PWD is False:
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict | Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27} | Below is the the instruction that describes the task:
### Input:
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
### Response:
def get_group_dict(user=None, include_default=True):
'''
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
'''
if HAS_GRP is False or HAS_PWD is False:
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict |
def add(self, node):
"""Add one node as descendant
"""
self.sons.append(node)
node.parent = self | Add one node as descendant | Below is the the instruction that describes the task:
### Input:
Add one node as descendant
### Response:
def add(self, node):
"""Add one node as descendant
"""
self.sons.append(node)
node.parent = self |
def nan_empty(self, col: str):
"""
Fill empty values with NaN values
:param col: name of the colum
:type col: str
:example: ``ds.nan_empty("mycol")``
"""
try:
self.df[col] = self.df[col].replace('', nan)
self.ok("Filled empty values with nan in column " + col)
except Exception as e:
self.err(e, "Can not fill empty values with nan") | Fill empty values with NaN values
:param col: name of the colum
:type col: str
:example: ``ds.nan_empty("mycol")`` | Below is the the instruction that describes the task:
### Input:
Fill empty values with NaN values
:param col: name of the colum
:type col: str
:example: ``ds.nan_empty("mycol")``
### Response:
def nan_empty(self, col: str):
"""
Fill empty values with NaN values
:param col: name of the colum
:type col: str
:example: ``ds.nan_empty("mycol")``
"""
try:
self.df[col] = self.df[col].replace('', nan)
self.ok("Filled empty values with nan in column " + col)
except Exception as e:
self.err(e, "Can not fill empty values with nan") |
def widget_from_single_value(o):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return Text(value=unicode_type(o))
elif isinstance(o, bool):
return Checkbox(value=o)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, min=min, max=max)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, min=min, max=max)
else:
return None | Make widgets from single values, which can be used as parameter defaults. | Below is the the instruction that describes the task:
### Input:
Make widgets from single values, which can be used as parameter defaults.
### Response:
def widget_from_single_value(o):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return Text(value=unicode_type(o))
elif isinstance(o, bool):
return Checkbox(value=o)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, min=min, max=max)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, min=min, max=max)
else:
return None |
def nfa_union(nfa_1: dict, nfa_2: dict) -> dict:
""" Returns a NFA that reads the union of the NFAs in input.
Let :math:`A_1 = (Ξ£,S_1,S_1^0,Ο_1,F_1)` and :math:`A_2 =(Ξ£,
S_2,S_2^0,Ο_2,F_2)` be two NFAs. here is a NFA
:math:`A_β¨` that nondeterministically chooses :math:`A_1` or
:math:`A_2` and runs it on the input word.
It is defined as:
:math:`A_β¨ = (Ξ£, S, S_0 , Ο, F )`
where:
β’ :math:`S = S_1 βͺ S_2`
β’ :math:`S_0 = S_1^0 βͺ S_2^0`
β’ :math:`F = F_1 βͺ F_2`
β’ :math:`Ο = Ο_1 βͺ Ο_2` , that is :math:`(s, a, s' ) β Ο` if
:math:`[ s β S_1\ and\ (s, a, s' ) β Ο_1 ]` OR :math:`[ s β
S_2\ and\ (s, a, s' ) β Ο_2 ]`
Pay attention to avoid having the NFAs with state names in common, in case
use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function.
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA.
:return: *(dict)* representing the united NFA.
"""
union = {
'alphabet': nfa_1['alphabet'].union(nfa_2['alphabet']),
'states': nfa_1['states'].union(nfa_2['states']),
'initial_states':
nfa_1['initial_states'].union(nfa_2['initial_states']),
'accepting_states':
nfa_1['accepting_states'].union(nfa_2['accepting_states']),
'transitions': nfa_1['transitions'].copy()}
for trans in nfa_2['transitions']:
for elem in nfa_2['transitions'][trans]:
union['transitions'].setdefault(trans, set()).add(elem)
return union | Returns a NFA that reads the union of the NFAs in input.
Let :math:`A_1 = (Ξ£,S_1,S_1^0,Ο_1,F_1)` and :math:`A_2 =(Ξ£,
S_2,S_2^0,Ο_2,F_2)` be two NFAs. here is a NFA
:math:`A_β¨` that nondeterministically chooses :math:`A_1` or
:math:`A_2` and runs it on the input word.
It is defined as:
:math:`A_β¨ = (Ξ£, S, S_0 , Ο, F )`
where:
β’ :math:`S = S_1 βͺ S_2`
β’ :math:`S_0 = S_1^0 βͺ S_2^0`
β’ :math:`F = F_1 βͺ F_2`
β’ :math:`Ο = Ο_1 βͺ Ο_2` , that is :math:`(s, a, s' ) β Ο` if
:math:`[ s β S_1\ and\ (s, a, s' ) β Ο_1 ]` OR :math:`[ s β
S_2\ and\ (s, a, s' ) β Ο_2 ]`
Pay attention to avoid having the NFAs with state names in common, in case
use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function.
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA.
:return: *(dict)* representing the united NFA. | Below is the the instruction that describes the task:
### Input:
Returns a NFA that reads the union of the NFAs in input.
Let :math:`A_1 = (Ξ£,S_1,S_1^0,Ο_1,F_1)` and :math:`A_2 =(Ξ£,
S_2,S_2^0,Ο_2,F_2)` be two NFAs. here is a NFA
:math:`A_β¨` that nondeterministically chooses :math:`A_1` or
:math:`A_2` and runs it on the input word.
It is defined as:
:math:`A_β¨ = (Ξ£, S, S_0 , Ο, F )`
where:
β’ :math:`S = S_1 βͺ S_2`
β’ :math:`S_0 = S_1^0 βͺ S_2^0`
β’ :math:`F = F_1 βͺ F_2`
β’ :math:`Ο = Ο_1 βͺ Ο_2` , that is :math:`(s, a, s' ) β Ο` if
:math:`[ s β S_1\ and\ (s, a, s' ) β Ο_1 ]` OR :math:`[ s β
S_2\ and\ (s, a, s' ) β Ο_2 ]`
Pay attention to avoid having the NFAs with state names in common, in case
use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function.
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA.
:return: *(dict)* representing the united NFA.
### Response:
def nfa_union(nfa_1: dict, nfa_2: dict) -> dict:
""" Returns a NFA that reads the union of the NFAs in input.
Let :math:`A_1 = (Ξ£,S_1,S_1^0,Ο_1,F_1)` and :math:`A_2 =(Ξ£,
S_2,S_2^0,Ο_2,F_2)` be two NFAs. here is a NFA
:math:`A_β¨` that nondeterministically chooses :math:`A_1` or
:math:`A_2` and runs it on the input word.
It is defined as:
:math:`A_β¨ = (Ξ£, S, S_0 , Ο, F )`
where:
β’ :math:`S = S_1 βͺ S_2`
β’ :math:`S_0 = S_1^0 βͺ S_2^0`
β’ :math:`F = F_1 βͺ F_2`
β’ :math:`Ο = Ο_1 βͺ Ο_2` , that is :math:`(s, a, s' ) β Ο` if
:math:`[ s β S_1\ and\ (s, a, s' ) β Ο_1 ]` OR :math:`[ s β
S_2\ and\ (s, a, s' ) β Ο_2 ]`
Pay attention to avoid having the NFAs with state names in common, in case
use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function.
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA.
:return: *(dict)* representing the united NFA.
"""
union = {
'alphabet': nfa_1['alphabet'].union(nfa_2['alphabet']),
'states': nfa_1['states'].union(nfa_2['states']),
'initial_states':
nfa_1['initial_states'].union(nfa_2['initial_states']),
'accepting_states':
nfa_1['accepting_states'].union(nfa_2['accepting_states']),
'transitions': nfa_1['transitions'].copy()}
for trans in nfa_2['transitions']:
for elem in nfa_2['transitions'][trans]:
union['transitions'].setdefault(trans, set()).add(elem)
return union |
def with_run_kwargs(self, **kwargs: Dict[str, Any]) -> 'Optimization':
"""
Replace Tensorflow session run kwargs.
Check Tensorflow session run [documentation](https://www.tensorflow.org/api_docs/python/tf/Session).
:param kwargs: Dictionary of tensors as keys and numpy arrays or
primitive python types as values.
:return: Optimization instance self reference.
"""
self._run_kwargs = kwargs
return self | Replace Tensorflow session run kwargs.
Check Tensorflow session run [documentation](https://www.tensorflow.org/api_docs/python/tf/Session).
:param kwargs: Dictionary of tensors as keys and numpy arrays or
primitive python types as values.
:return: Optimization instance self reference. | Below is the the instruction that describes the task:
### Input:
Replace Tensorflow session run kwargs.
Check Tensorflow session run [documentation](https://www.tensorflow.org/api_docs/python/tf/Session).
:param kwargs: Dictionary of tensors as keys and numpy arrays or
primitive python types as values.
:return: Optimization instance self reference.
### Response:
def with_run_kwargs(self, **kwargs: Dict[str, Any]) -> 'Optimization':
"""
Replace Tensorflow session run kwargs.
Check Tensorflow session run [documentation](https://www.tensorflow.org/api_docs/python/tf/Session).
:param kwargs: Dictionary of tensors as keys and numpy arrays or
primitive python types as values.
:return: Optimization instance self reference.
"""
self._run_kwargs = kwargs
return self |
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points. Currently limited to 10 distinct values (colors).
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
if v:
df = self.v
else:
df = self.u
if color is not None:
colormap = pd.Series(dict(zip(set(color.values),
tableau20[0:2 * len(set(color)):2])))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(zip(
set(s.values), range(30, 351)[0::50][0:len(set(s)) + 1])))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(zip(set(marker.values), markers)))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = pd.Series('o', index=df.index)
marker_legend = False
if ax is None:
fig, ax = plt.subplots(1, 1)
for m in set(marker.values):
mse = marker[marker == m]
cse = color[mse.index]
sse = s[mse.index]
ax.scatter(df.ix[mse.index, pc1], df.ix[mse.index, pc2],
s=sse.values, color=list(cse.values), marker=m,
alpha=0.8)
ax.set_title('{} vs. {}'.format(pc1, pc2))
ax.set_xlabel(pc1)
ax.set_ylabel(pc2)
if color_legend:
legend_rects = make_color_legend_rects(colormap)
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index,
title=color_name,
loc='upper left',
bbox_to_anchor=(1, 1))
if s_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in smap.index:
ax.scatter([xb + 1], [yb + 1], marker='o',
s=smap[i], color='black', label=i)
lgd = ax.legend(title=s_name, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
if marker_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in markermap.index:
t = ax.scatter([xb + 1], [yb + 1], marker=markermap[i],
s=sse.min(), color='black', label=i)
handles, labels = ax.get_legend_handles_labels()
if s_legend:
handles = handles[len(smap):]
labels = labels[len(smap):]
lgd = ax.legend(handles, labels, title=marker_name,
loc='lower left', bbox_to_anchor=(1, 0))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
# fig.tight_layout()
return fig, ax | Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points. Currently limited to 10 distinct values (colors).
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points. | Below is the the instruction that describes the task:
### Input:
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points. Currently limited to 10 distinct values (colors).
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
### Response:
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points. Currently limited to 10 distinct values (colors).
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
if v:
df = self.v
else:
df = self.u
if color is not None:
colormap = pd.Series(dict(zip(set(color.values),
tableau20[0:2 * len(set(color)):2])))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(zip(
set(s.values), range(30, 351)[0::50][0:len(set(s)) + 1])))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(zip(set(marker.values), markers)))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = pd.Series('o', index=df.index)
marker_legend = False
if ax is None:
fig, ax = plt.subplots(1, 1)
for m in set(marker.values):
mse = marker[marker == m]
cse = color[mse.index]
sse = s[mse.index]
ax.scatter(df.ix[mse.index, pc1], df.ix[mse.index, pc2],
s=sse.values, color=list(cse.values), marker=m,
alpha=0.8)
ax.set_title('{} vs. {}'.format(pc1, pc2))
ax.set_xlabel(pc1)
ax.set_ylabel(pc2)
if color_legend:
legend_rects = make_color_legend_rects(colormap)
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index,
title=color_name,
loc='upper left',
bbox_to_anchor=(1, 1))
if s_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in smap.index:
ax.scatter([xb + 1], [yb + 1], marker='o',
s=smap[i], color='black', label=i)
lgd = ax.legend(title=s_name, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
if marker_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in markermap.index:
t = ax.scatter([xb + 1], [yb + 1], marker=markermap[i],
s=sse.min(), color='black', label=i)
handles, labels = ax.get_legend_handles_labels()
if s_legend:
handles = handles[len(smap):]
labels = labels[len(smap):]
lgd = ax.legend(handles, labels, title=marker_name,
loc='lower left', bbox_to_anchor=(1, 0))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
# fig.tight_layout()
return fig, ax |
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1) | Push item onto heap, maintaining the heap invariant. | Below is the the instruction that describes the task:
### Input:
Push item onto heap, maintaining the heap invariant.
### Response:
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1) |
def add_update_callback(self, group=None, name=None, cb=None):
"""
Add a callback for a specific parameter name. This callback will be
executed when a new value is read from the Crazyflie.
"""
if not group and not name:
self.all_update_callback.add_callback(cb)
elif not name:
if group not in self.group_update_callbacks:
self.group_update_callbacks[group] = Caller()
self.group_update_callbacks[group].add_callback(cb)
else:
paramname = '{}.{}'.format(group, name)
if paramname not in self.param_update_callbacks:
self.param_update_callbacks[paramname] = Caller()
self.param_update_callbacks[paramname].add_callback(cb) | Add a callback for a specific parameter name. This callback will be
executed when a new value is read from the Crazyflie. | Below is the the instruction that describes the task:
### Input:
Add a callback for a specific parameter name. This callback will be
executed when a new value is read from the Crazyflie.
### Response:
def add_update_callback(self, group=None, name=None, cb=None):
"""
Add a callback for a specific parameter name. This callback will be
executed when a new value is read from the Crazyflie.
"""
if not group and not name:
self.all_update_callback.add_callback(cb)
elif not name:
if group not in self.group_update_callbacks:
self.group_update_callbacks[group] = Caller()
self.group_update_callbacks[group].add_callback(cb)
else:
paramname = '{}.{}'.format(group, name)
if paramname not in self.param_update_callbacks:
self.param_update_callbacks[paramname] = Caller()
self.param_update_callbacks[paramname].add_callback(cb) |
def make_sentence(self, init_state=None, **kwargs):
"""
Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit.
"""
tries = kwargs.get('tries', DEFAULT_TRIES)
mor = kwargs.get('max_overlap_ratio', DEFAULT_MAX_OVERLAP_RATIO)
mot = kwargs.get('max_overlap_total', DEFAULT_MAX_OVERLAP_TOTAL)
test_output = kwargs.get('test_output', True)
max_words = kwargs.get('max_words', None)
if init_state != None:
prefix = list(init_state)
for word in prefix:
if word == BEGIN:
prefix = prefix[1:]
else:
break
else:
prefix = []
for _ in range(tries):
words = prefix + self.chain.walk(init_state)
if max_words != None and len(words) > max_words:
continue
if test_output and hasattr(self, "rejoined_text"):
if self.test_sentence_output(words, mor, mot):
return self.word_join(words)
else:
return self.word_join(words)
return None | Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit. | Below is the the instruction that describes the task:
### Input:
Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit.
### Response:
def make_sentence(self, init_state=None, **kwargs):
"""
Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit.
"""
tries = kwargs.get('tries', DEFAULT_TRIES)
mor = kwargs.get('max_overlap_ratio', DEFAULT_MAX_OVERLAP_RATIO)
mot = kwargs.get('max_overlap_total', DEFAULT_MAX_OVERLAP_TOTAL)
test_output = kwargs.get('test_output', True)
max_words = kwargs.get('max_words', None)
if init_state != None:
prefix = list(init_state)
for word in prefix:
if word == BEGIN:
prefix = prefix[1:]
else:
break
else:
prefix = []
for _ in range(tries):
words = prefix + self.chain.walk(init_state)
if max_words != None and len(words) > max_words:
continue
if test_output and hasattr(self, "rejoined_text"):
if self.test_sentence_output(words, mor, mot):
return self.word_join(words)
else:
return self.word_join(words)
return None |
def __load_parcov(self):
"""private method to set the parcov attribute from:
a pest control file (parameter bounds)
a pst object
a matrix object
an uncert file
an ascii matrix file
"""
# if the parcov arg was not passed but the pst arg was,
# reset and use parbounds to build parcov
if not self.parcov_arg:
if self.pst_arg:
self.parcov_arg = self.pst_arg
else:
raise Exception("linear_analysis.__load_parcov(): " +
"parcov_arg is None")
if isinstance(self.parcov_arg, Matrix):
self.__parcov = self.parcov_arg
return
if isinstance(self.parcov_arg, np.ndarray):
# if the passed array is a vector,
# then assume it is the diagonal of the parcov matrix
if len(self.parcov_arg.shape) == 1:
assert self.parcov_arg.shape[0] == self.jco.shape[1]
isdiagonal = True
else:
assert self.parcov_arg.shape[0] == self.jco.shape[1]
assert self.parcov_arg.shape[1] == self.jco.shape[1]
isdiagonal = False
self.logger.warn("linear_analysis.__load_parcov(): " +
"instantiating parcov from ndarray, can't " +
"verify parameters alignment with jco")
self.__parcov = Matrix(x=self.parcov_arg,
isdiagonal=isdiagonal,
row_names=self.jco.col_names,
col_names=self.jco.col_names)
self.log("loading parcov")
if isinstance(self.parcov_arg,str):
# if the arg is a string ending with "pst"
# then load parcov from parbounds
if self.parcov_arg.lower().endswith(".pst"):
self.__parcov = Cov.from_parbounds(self.parcov_arg,
sigma_range=self.sigma_range,
scale_offset=self.scale_offset)
else:
self.__parcov = self.__fromfile(self.parcov_arg, astype=Cov)
# if the arg is a pst object
elif isinstance(self.parcov_arg,Pst):
self.__parcov = Cov.from_parameter_data(self.parcov_arg,
sigma_range=self.sigma_range,
scale_offset=self.scale_offset)
else:
raise Exception("linear_analysis.__load_parcov(): " +
"parcov_arg must be a " +
"matrix object or a file name: " +
str(self.parcov_arg))
self.log("loading parcov") | private method to set the parcov attribute from:
a pest control file (parameter bounds)
a pst object
a matrix object
an uncert file
an ascii matrix file | Below is the the instruction that describes the task:
### Input:
private method to set the parcov attribute from:
a pest control file (parameter bounds)
a pst object
a matrix object
an uncert file
an ascii matrix file
### Response:
def __load_parcov(self):
"""private method to set the parcov attribute from:
a pest control file (parameter bounds)
a pst object
a matrix object
an uncert file
an ascii matrix file
"""
# if the parcov arg was not passed but the pst arg was,
# reset and use parbounds to build parcov
if not self.parcov_arg:
if self.pst_arg:
self.parcov_arg = self.pst_arg
else:
raise Exception("linear_analysis.__load_parcov(): " +
"parcov_arg is None")
if isinstance(self.parcov_arg, Matrix):
self.__parcov = self.parcov_arg
return
if isinstance(self.parcov_arg, np.ndarray):
# if the passed array is a vector,
# then assume it is the diagonal of the parcov matrix
if len(self.parcov_arg.shape) == 1:
assert self.parcov_arg.shape[0] == self.jco.shape[1]
isdiagonal = True
else:
assert self.parcov_arg.shape[0] == self.jco.shape[1]
assert self.parcov_arg.shape[1] == self.jco.shape[1]
isdiagonal = False
self.logger.warn("linear_analysis.__load_parcov(): " +
"instantiating parcov from ndarray, can't " +
"verify parameters alignment with jco")
self.__parcov = Matrix(x=self.parcov_arg,
isdiagonal=isdiagonal,
row_names=self.jco.col_names,
col_names=self.jco.col_names)
self.log("loading parcov")
if isinstance(self.parcov_arg,str):
# if the arg is a string ending with "pst"
# then load parcov from parbounds
if self.parcov_arg.lower().endswith(".pst"):
self.__parcov = Cov.from_parbounds(self.parcov_arg,
sigma_range=self.sigma_range,
scale_offset=self.scale_offset)
else:
self.__parcov = self.__fromfile(self.parcov_arg, astype=Cov)
# if the arg is a pst object
elif isinstance(self.parcov_arg,Pst):
self.__parcov = Cov.from_parameter_data(self.parcov_arg,
sigma_range=self.sigma_range,
scale_offset=self.scale_offset)
else:
raise Exception("linear_analysis.__load_parcov(): " +
"parcov_arg must be a " +
"matrix object or a file name: " +
str(self.parcov_arg))
self.log("loading parcov") |
def write_ioc_to_file(self, output_dir=None, force=False):
"""
Serialize the IOC to a .ioc file.
:param output_dir: Directory to write the ioc out to. default is the current working directory.
:param force: If specified, will not validate the root node of the IOC is 'OpenIOC'.
:return:
"""
return write_ioc(self.root, output_dir, force=force) | Serialize the IOC to a .ioc file.
:param output_dir: Directory to write the ioc out to. default is the current working directory.
:param force: If specified, will not validate the root node of the IOC is 'OpenIOC'.
:return: | Below is the the instruction that describes the task:
### Input:
Serialize the IOC to a .ioc file.
:param output_dir: Directory to write the ioc out to. default is the current working directory.
:param force: If specified, will not validate the root node of the IOC is 'OpenIOC'.
:return:
### Response:
def write_ioc_to_file(self, output_dir=None, force=False):
"""
Serialize the IOC to a .ioc file.
:param output_dir: Directory to write the ioc out to. default is the current working directory.
:param force: If specified, will not validate the root node of the IOC is 'OpenIOC'.
:return:
"""
return write_ioc(self.root, output_dir, force=force) |
def get_dates_file(path):
""" parse dates file of dates and probability of choosing"""
with open(path) as f:
dates = f.readlines()
return [(convert_time_string(date_string.split(" ")[0]), float(date_string.split(" ")[1]))
for date_string in dates] | parse dates file of dates and probability of choosing | Below is the the instruction that describes the task:
### Input:
parse dates file of dates and probability of choosing
### Response:
def get_dates_file(path):
""" parse dates file of dates and probability of choosing"""
with open(path) as f:
dates = f.readlines()
return [(convert_time_string(date_string.split(" ")[0]), float(date_string.split(" ")[1]))
for date_string in dates] |
def set_tag(self, tag):
'''
Sets the tag.
If the Entity belongs to the world it will check for tag conflicts.
'''
if self._world:
if self._world.get_entity_by_tag(tag):
raise NonUniqueTagError(tag)
self._tag = tag | Sets the tag.
If the Entity belongs to the world it will check for tag conflicts. | Below is the the instruction that describes the task:
### Input:
Sets the tag.
If the Entity belongs to the world it will check for tag conflicts.
### Response:
def set_tag(self, tag):
'''
Sets the tag.
If the Entity belongs to the world it will check for tag conflicts.
'''
if self._world:
if self._world.get_entity_by_tag(tag):
raise NonUniqueTagError(tag)
self._tag = tag |
def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set)
) | An asset is restricted for all dts if it is in the static list. | Below is the the instruction that describes the task:
### Input:
An asset is restricted for all dts if it is in the static list.
### Response:
def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set)
) |
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t) | Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z)) | Below is the the instruction that describes the task:
### Input:
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
### Response:
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t) |
def _spill(self):
"""
dump already partitioned data into disks.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(path)
used_memory = get_used_memory()
if not self.pdata:
# The data has not been partitioned, it will iterator the
# data once, write them into different files, has no
# additional memory. It only called when the memory goes
# above limit at the first time.
# open all the files for writing
streams = [open(os.path.join(path, str(i)), 'wb')
for i in range(self.partitions)]
# If the number of keys is small, then the overhead of sort is small
# sort them before dumping into disks
self._sorted = len(self.data) < self.SORT_KEY_LIMIT
if self._sorted:
self.serializer = self.flattened_serializer()
for k in sorted(self.data.keys()):
h = self._partition(k)
self.serializer.dump_stream([(k, self.data[k])], streams[h])
else:
for k, v in self.data.items():
h = self._partition(k)
self.serializer.dump_stream([(k, v)], streams[h])
for s in streams:
DiskBytesSpilled += s.tell()
s.close()
self.data.clear()
# self.pdata is cached in `mergeValues` and `mergeCombiners`
self.pdata.extend([{} for i in range(self.partitions)])
else:
for i in range(self.partitions):
p = os.path.join(path, str(i))
with open(p, "wb") as f:
# dump items in batch
if self._sorted:
# sort by key only (stable)
sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0))
self.serializer.dump_stream(sorted_items, f)
else:
self.serializer.dump_stream(self.pdata[i].items(), f)
self.pdata[i].clear()
DiskBytesSpilled += os.path.getsize(p)
self.spills += 1
gc.collect() # release the memory as much as possible
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 | dump already partitioned data into disks. | Below is the the instruction that describes the task:
### Input:
dump already partitioned data into disks.
### Response:
def _spill(self):
"""
dump already partitioned data into disks.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(path)
used_memory = get_used_memory()
if not self.pdata:
# The data has not been partitioned, it will iterator the
# data once, write them into different files, has no
# additional memory. It only called when the memory goes
# above limit at the first time.
# open all the files for writing
streams = [open(os.path.join(path, str(i)), 'wb')
for i in range(self.partitions)]
# If the number of keys is small, then the overhead of sort is small
# sort them before dumping into disks
self._sorted = len(self.data) < self.SORT_KEY_LIMIT
if self._sorted:
self.serializer = self.flattened_serializer()
for k in sorted(self.data.keys()):
h = self._partition(k)
self.serializer.dump_stream([(k, self.data[k])], streams[h])
else:
for k, v in self.data.items():
h = self._partition(k)
self.serializer.dump_stream([(k, v)], streams[h])
for s in streams:
DiskBytesSpilled += s.tell()
s.close()
self.data.clear()
# self.pdata is cached in `mergeValues` and `mergeCombiners`
self.pdata.extend([{} for i in range(self.partitions)])
else:
for i in range(self.partitions):
p = os.path.join(path, str(i))
with open(p, "wb") as f:
# dump items in batch
if self._sorted:
# sort by key only (stable)
sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0))
self.serializer.dump_stream(sorted_items, f)
else:
self.serializer.dump_stream(self.pdata[i].items(), f)
self.pdata[i].clear()
DiskBytesSpilled += os.path.getsize(p)
self.spills += 1
gc.collect() # release the memory as much as possible
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 |
def key_to_int(key, base=BASE62):
"""
Convert the following key to an integer.
@param key: a key.
@param base: a sequence of characters that was used to encode the
integer value.
@return: the integer value corresponding to the given key.
@raise ValueError: if one character of the specified key doesn't match
any character of the specified base.
"""
base_length = len(base)
value = 0
for c in reversed(key):
value = (value * base_length) + base.index(c)
return value | Convert the following key to an integer.
@param key: a key.
@param base: a sequence of characters that was used to encode the
integer value.
@return: the integer value corresponding to the given key.
@raise ValueError: if one character of the specified key doesn't match
any character of the specified base. | Below is the the instruction that describes the task:
### Input:
Convert the following key to an integer.
@param key: a key.
@param base: a sequence of characters that was used to encode the
integer value.
@return: the integer value corresponding to the given key.
@raise ValueError: if one character of the specified key doesn't match
any character of the specified base.
### Response:
def key_to_int(key, base=BASE62):
"""
Convert the following key to an integer.
@param key: a key.
@param base: a sequence of characters that was used to encode the
integer value.
@return: the integer value corresponding to the given key.
@raise ValueError: if one character of the specified key doesn't match
any character of the specified base.
"""
base_length = len(base)
value = 0
for c in reversed(key):
value = (value * base_length) + base.index(c)
return value |
def indent(txt, spacing=4):
"""
Indent given text using custom spacing, default is set to 4.
"""
return prefix(str(txt), ''.join([' ' for _ in range(spacing)])) | Indent given text using custom spacing, default is set to 4. | Below is the the instruction that describes the task:
### Input:
Indent given text using custom spacing, default is set to 4.
### Response:
def indent(txt, spacing=4):
"""
Indent given text using custom spacing, default is set to 4.
"""
return prefix(str(txt), ''.join([' ' for _ in range(spacing)])) |
def create_build_context(image, inputs, wdir):
"""
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
"""
assert os.path.isabs(wdir)
dockerlines = ["FROM %s" % image,
"RUN mkdir -p %s" % wdir]
build_context = {}
# This loop creates a Build Context for building the provisioned image
# We create a tar archive to be added to the root of the image filesystem
if inputs:
dockerlines.append('COPY root /')
for ifile, (path, obj) in enumerate(inputs.items()):
if not os.path.isabs(path):
path = os.path.join(wdir, path)
assert path[0] == '/'
build_context['root' + path] = obj
dockerstring = '\n'.join(dockerlines)
build_context['Dockerfile'] = pyccc.BytesContainer(dockerstring.encode('utf-8'))
return build_context | Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory | Below is the the instruction that describes the task:
### Input:
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
### Response:
def create_build_context(image, inputs, wdir):
"""
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
"""
assert os.path.isabs(wdir)
dockerlines = ["FROM %s" % image,
"RUN mkdir -p %s" % wdir]
build_context = {}
# This loop creates a Build Context for building the provisioned image
# We create a tar archive to be added to the root of the image filesystem
if inputs:
dockerlines.append('COPY root /')
for ifile, (path, obj) in enumerate(inputs.items()):
if not os.path.isabs(path):
path = os.path.join(wdir, path)
assert path[0] == '/'
build_context['root' + path] = obj
dockerstring = '\n'.join(dockerlines)
build_context['Dockerfile'] = pyccc.BytesContainer(dockerstring.encode('utf-8'))
return build_context |
def pick_enclosure_link(post, parameter=''):
'''Override URL of the Post to point to url of the first enclosure with
href attribute non-empty and type matching specified regexp parameter (empty=any).
Missing "type" attribute for enclosure will be matched as an empty string.
If none of the enclosures match, link won't be updated.'''
for e in (post.enclosures or list()):
href = e.get('href')
if not href: continue
if parameter and not re.search(parameter, e.get('type', '')): continue
return dict(link=href) | Override URL of the Post to point to url of the first enclosure with
href attribute non-empty and type matching specified regexp parameter (empty=any).
Missing "type" attribute for enclosure will be matched as an empty string.
If none of the enclosures match, link won't be updated. | Below is the the instruction that describes the task:
### Input:
Override URL of the Post to point to url of the first enclosure with
href attribute non-empty and type matching specified regexp parameter (empty=any).
Missing "type" attribute for enclosure will be matched as an empty string.
If none of the enclosures match, link won't be updated.
### Response:
def pick_enclosure_link(post, parameter=''):
'''Override URL of the Post to point to url of the first enclosure with
href attribute non-empty and type matching specified regexp parameter (empty=any).
Missing "type" attribute for enclosure will be matched as an empty string.
If none of the enclosures match, link won't be updated.'''
for e in (post.enclosures or list()):
href = e.get('href')
if not href: continue
if parameter and not re.search(parameter, e.get('type', '')): continue
return dict(link=href) |
def get_settings():
'''
Get all currently loaded settings.
'''
settings = {}
for config_file in config_files():
config_contents = load_config(config_file)
if config_contents is not None:
settings = deep_merge(settings, config_contents)
return settings | Get all currently loaded settings. | Below is the the instruction that describes the task:
### Input:
Get all currently loaded settings.
### Response:
def get_settings():
'''
Get all currently loaded settings.
'''
settings = {}
for config_file in config_files():
config_contents = load_config(config_file)
if config_contents is not None:
settings = deep_merge(settings, config_contents)
return settings |
def is_default_port(self):
"""A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
"""
if self.port is None:
return False
default = DEFAULT_PORTS.get(self.scheme)
if default is None:
return False
return self.port == default | A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise. | Below is the the instruction that describes the task:
### Input:
A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
### Response:
def is_default_port(self):
"""A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
"""
if self.port is None:
return False
default = DEFAULT_PORTS.get(self.scheme)
if default is None:
return False
return self.port == default |
def bytes(self):
"""Emit the address in bytes format."""
addrbyte = b'\x00\x00\x00'
if self.addr is not None:
addrbyte = self.addr
return addrbyte | Emit the address in bytes format. | Below is the the instruction that describes the task:
### Input:
Emit the address in bytes format.
### Response:
def bytes(self):
"""Emit the address in bytes format."""
addrbyte = b'\x00\x00\x00'
if self.addr is not None:
addrbyte = self.addr
return addrbyte |
def envs(self):
'''
Return a list of available environments
'''
load = {'cmd': '_file_envs'}
return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
else self.channel.send(load) | Return a list of available environments | Below is the the instruction that describes the task:
### Input:
Return a list of available environments
### Response:
def envs(self):
'''
Return a list of available environments
'''
load = {'cmd': '_file_envs'}
return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
else self.channel.send(load) |
def rm_subtitles(path):
""" delete all subtitles in path recursively
"""
sub_exts = ['ass', 'srt', 'sub']
count = 0
for root, dirs, files in os.walk(path):
for f in files:
_, ext = os.path.splitext(f)
ext = ext[1:]
if ext in sub_exts:
p = os.path.join(root, f)
count += 1
print('Delete {}'.format(p))
os.remove(p)
return count | delete all subtitles in path recursively | Below is the the instruction that describes the task:
### Input:
delete all subtitles in path recursively
### Response:
def rm_subtitles(path):
""" delete all subtitles in path recursively
"""
sub_exts = ['ass', 'srt', 'sub']
count = 0
for root, dirs, files in os.walk(path):
for f in files:
_, ext = os.path.splitext(f)
ext = ext[1:]
if ext in sub_exts:
p = os.path.join(root, f)
count += 1
print('Delete {}'.format(p))
os.remove(p)
return count |
def apply(self, name, foci):
""" Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
"""
if name in self.transformations:
return transform(foci, self.transformations[name])
else:
logger.info(
"No transformation named '%s' found; coordinates left "
"untransformed." % name)
return foci | Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed. | Below is the the instruction that describes the task:
### Input:
Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
### Response:
def apply(self, name, foci):
""" Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
"""
if name in self.transformations:
return transform(foci, self.transformations[name])
else:
logger.info(
"No transformation named '%s' found; coordinates left "
"untransformed." % name)
return foci |
def gps_inject_data_send(self, target_system, target_component, len, data, force_mavlink1=False):
'''
data for injecting into the onboard GPS (used for DGPS)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
len : data length (uint8_t)
data : raw data (110 is enough for 12 satellites of RTCMv2) (uint8_t)
'''
return self.send(self.gps_inject_data_encode(target_system, target_component, len, data), force_mavlink1=force_mavlink1) | data for injecting into the onboard GPS (used for DGPS)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
len : data length (uint8_t)
data : raw data (110 is enough for 12 satellites of RTCMv2) (uint8_t) | Below is the the instruction that describes the task:
### Input:
data for injecting into the onboard GPS (used for DGPS)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
len : data length (uint8_t)
data : raw data (110 is enough for 12 satellites of RTCMv2) (uint8_t)
### Response:
def gps_inject_data_send(self, target_system, target_component, len, data, force_mavlink1=False):
'''
data for injecting into the onboard GPS (used for DGPS)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
len : data length (uint8_t)
data : raw data (110 is enough for 12 satellites of RTCMv2) (uint8_t)
'''
return self.send(self.gps_inject_data_encode(target_system, target_component, len, data), force_mavlink1=force_mavlink1) |
def basic_auth_user(self, realm, user_name, password, environ):
"""Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication."""
user = self._get_realm_entry(realm, user_name)
if user is not None and password == user.get("password"):
environ["wsgidav.auth.roles"] = user.get("roles", [])
return True
return False | Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication. | Below is the the instruction that describes the task:
### Input:
Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication.
### Response:
def basic_auth_user(self, realm, user_name, password, environ):
"""Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication."""
user = self._get_realm_entry(realm, user_name)
if user is not None and password == user.get("password"):
environ["wsgidav.auth.roles"] = user.get("roles", [])
return True
return False |
def rangecalc(x, y=None, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin([np.nanmin(x), np.nanmin(y)])
mx = np.nanmax([np.nanmax(x), np.nanmax(y)])
rn = mx - mn
return (mn - pad * rn, mx + pad * rn) | Calculate padded range limits for axes. | Below is the the instruction that describes the task:
### Input:
Calculate padded range limits for axes.
### Response:
def rangecalc(x, y=None, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin([np.nanmin(x), np.nanmin(y)])
mx = np.nanmax([np.nanmax(x), np.nanmax(y)])
rn = mx - mn
return (mn - pad * rn, mx + pad * rn) |
def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower() | Validate a protocol, a string, and return it. | Below is the the instruction that describes the task:
### Input:
Validate a protocol, a string, and return it.
### Response:
def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower() |
def make_bucket(self, bucket_name, location='us-east-1'):
"""
Make a new bucket on the server.
Optionally include Location.
['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1',
'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1',
'cn-north-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'ap-northeast-2']
Examples:
minio.make_bucket('foo')
minio.make_bucket('foo', 'us-west-1')
:param bucket_name: Bucket to create on server
:param location: Location to create bucket on
"""
is_valid_bucket_name(bucket_name)
# Default region for all requests.
region = 'us-east-1'
if self._region:
region = self._region
# Validate if caller requested bucket location is same as current region
if self._region != location:
raise InvalidArgumentError("Configured region {0}, requested"
" {1}".format(self._region,
location))
method = 'PUT'
# Set user agent once before the request.
headers = {'User-Agent': self._user_agent}
content = None
if location and location != 'us-east-1':
content = xml_marshal_bucket_constraint(location)
headers['Content-Length'] = str(len(content))
content_sha256_hex = get_sha256_hexdigest(content)
if content:
headers['Content-Md5'] = get_md5_base64digest(content)
# In case of Amazon S3. The make bucket issued on already
# existing bucket would fail with 'AuthorizationMalformed'
# error if virtual style is used. So we default to 'path
# style' as that is the preferred method here. The final
# location of the 'bucket' is provided through XML
# LocationConstraint data with the request.
# Construct target url.
url = self._endpoint_url + '/' + bucket_name + '/'
# Get signature headers if any.
headers = sign_v4(method, url, region,
headers, self._access_key,
self._secret_key,
self._session_token,
content_sha256_hex)
response = self._http.urlopen(method, url,
body=content,
headers=headers)
if response.status != 200:
raise ResponseError(response, method, bucket_name).get_exception()
self._set_bucket_region(bucket_name, region=location) | Make a new bucket on the server.
Optionally include Location.
['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1',
'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1',
'cn-north-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'ap-northeast-2']
Examples:
minio.make_bucket('foo')
minio.make_bucket('foo', 'us-west-1')
:param bucket_name: Bucket to create on server
:param location: Location to create bucket on | Below is the the instruction that describes the task:
### Input:
Make a new bucket on the server.
Optionally include Location.
['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1',
'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1',
'cn-north-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'ap-northeast-2']
Examples:
minio.make_bucket('foo')
minio.make_bucket('foo', 'us-west-1')
:param bucket_name: Bucket to create on server
:param location: Location to create bucket on
### Response:
def make_bucket(self, bucket_name, location='us-east-1'):
"""
Make a new bucket on the server.
Optionally include Location.
['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1',
'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1',
'cn-north-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'ap-northeast-2']
Examples:
minio.make_bucket('foo')
minio.make_bucket('foo', 'us-west-1')
:param bucket_name: Bucket to create on server
:param location: Location to create bucket on
"""
is_valid_bucket_name(bucket_name)
# Default region for all requests.
region = 'us-east-1'
if self._region:
region = self._region
# Validate if caller requested bucket location is same as current region
if self._region != location:
raise InvalidArgumentError("Configured region {0}, requested"
" {1}".format(self._region,
location))
method = 'PUT'
# Set user agent once before the request.
headers = {'User-Agent': self._user_agent}
content = None
if location and location != 'us-east-1':
content = xml_marshal_bucket_constraint(location)
headers['Content-Length'] = str(len(content))
content_sha256_hex = get_sha256_hexdigest(content)
if content:
headers['Content-Md5'] = get_md5_base64digest(content)
# In case of Amazon S3. The make bucket issued on already
# existing bucket would fail with 'AuthorizationMalformed'
# error if virtual style is used. So we default to 'path
# style' as that is the preferred method here. The final
# location of the 'bucket' is provided through XML
# LocationConstraint data with the request.
# Construct target url.
url = self._endpoint_url + '/' + bucket_name + '/'
# Get signature headers if any.
headers = sign_v4(method, url, region,
headers, self._access_key,
self._secret_key,
self._session_token,
content_sha256_hex)
response = self._http.urlopen(method, url,
body=content,
headers=headers)
if response.status != 200:
raise ResponseError(response, method, bucket_name).get_exception()
self._set_bucket_region(bucket_name, region=location) |
def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n') | Save items from list to the file. | Below is the the instruction that describes the task:
### Input:
Save items from list to the file.
### Response:
def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n') |
def is_multipart(header_dict):
"""
Args:
header_dict : CaseInsensitiveDict
Returns:
bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with
value that begins with 'multipart'.
"""
return (
{k.lower(): v for k, v in header_dict.items()}
.get('content-type', '')
.startswith('multipart')
) | Args:
header_dict : CaseInsensitiveDict
Returns:
bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with
value that begins with 'multipart'. | Below is the the instruction that describes the task:
### Input:
Args:
header_dict : CaseInsensitiveDict
Returns:
bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with
value that begins with 'multipart'.
### Response:
def is_multipart(header_dict):
"""
Args:
header_dict : CaseInsensitiveDict
Returns:
bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with
value that begins with 'multipart'.
"""
return (
{k.lower(): v for k, v in header_dict.items()}
.get('content-type', '')
.startswith('multipart')
) |
def nd_sort_samples(samples):
"""
Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version.
"""
# Check the shape of the sample list.
assert len(samples.shape) == 2
# Build a KD-tree on the samples.
tree = cKDTree(samples)
# Compute the distances.
d, i = tree.query(samples[0], k=len(samples))
return i | Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version. | Below is the the instruction that describes the task:
### Input:
Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version.
### Response:
def nd_sort_samples(samples):
"""
Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version.
"""
# Check the shape of the sample list.
assert len(samples.shape) == 2
# Build a KD-tree on the samples.
tree = cKDTree(samples)
# Compute the distances.
d, i = tree.query(samples[0], k=len(samples))
return i |
def cbpdnmd_ustep(k):
"""Do the U step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
mp_Z_U0[k] += mp_DX[k] - mp_Z_Y0[k] - mp_S[k]
mp_Z_U1[k] += mp_Z_X[k] - mp_Z_Y1[k] | Do the U step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables. | Below is the the instruction that describes the task:
### Input:
Do the U step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
### Response:
def cbpdnmd_ustep(k):
"""Do the U step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
mp_Z_U0[k] += mp_DX[k] - mp_Z_Y0[k] - mp_S[k]
mp_Z_U1[k] += mp_Z_X[k] - mp_Z_Y1[k] |
def set_ground_width(self, ground_width):
'''set ground width of view'''
state = self.state
state.ground_width = ground_width
state.panel.re_center(state.width/2, state.height/2, state.lat, state.lon) | set ground width of view | Below is the the instruction that describes the task:
### Input:
set ground width of view
### Response:
def set_ground_width(self, ground_width):
'''set ground width of view'''
state = self.state
state.ground_width = ground_width
state.panel.re_center(state.width/2, state.height/2, state.lat, state.lon) |
def evaluate(self, sequence, transformations):
"""
Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
"""
result = sequence
parallel = partial(
parallelize, processes=self.processes, partition_size=self.partition_size)
staged = []
for transform in transformations:
strategies = transform.execution_strategies or {}
if ExecutionStrategies.PARALLEL in strategies:
staged.insert(0, transform.function)
else:
if staged:
result = parallel(compose(*staged), result)
staged = []
if ExecutionStrategies.PRE_COMPUTE in strategies:
result = list(result)
result = transform.function(result)
if staged:
result = parallel(compose(*staged), result)
return iter(result) | Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value | Below is the the instruction that describes the task:
### Input:
Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
### Response:
def evaluate(self, sequence, transformations):
"""
Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
"""
result = sequence
parallel = partial(
parallelize, processes=self.processes, partition_size=self.partition_size)
staged = []
for transform in transformations:
strategies = transform.execution_strategies or {}
if ExecutionStrategies.PARALLEL in strategies:
staged.insert(0, transform.function)
else:
if staged:
result = parallel(compose(*staged), result)
staged = []
if ExecutionStrategies.PRE_COMPUTE in strategies:
result = list(result)
result = transform.function(result)
if staged:
result = parallel(compose(*staged), result)
return iter(result) |
def _get_autoscaling_min_max(template, parameters, asg_name):
"""Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
"""
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
asg = template.get('Resources', {}).get(asg_name, None)
if asg:
assert asg['Type'] == 'AWS::AutoScaling::AutoScalingGroup'
min = asg.get('Properties', {}).get('MinSize', None)
max = asg.get('Properties', {}).get('MaxSize', None)
if 'Ref' in min:
min = params.get(min['Ref'], None)
if 'Ref' in max:
max = params.get(max['Ref'], None)
if min and max:
return int(min), int(max) | Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize | Below is the the instruction that describes the task:
### Input:
Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
### Response:
def _get_autoscaling_min_max(template, parameters, asg_name):
"""Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
"""
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
asg = template.get('Resources', {}).get(asg_name, None)
if asg:
assert asg['Type'] == 'AWS::AutoScaling::AutoScalingGroup'
min = asg.get('Properties', {}).get('MinSize', None)
max = asg.get('Properties', {}).get('MaxSize', None)
if 'Ref' in min:
min = params.get(min['Ref'], None)
if 'Ref' in max:
max = params.get(max['Ref'], None)
if min and max:
return int(min), int(max) |
def decode(self, encoded_packet):
"""Decode a transmitted package.
The return value indicates how many binary attachment packets are
necessary to fully decode the packet.
"""
ep = encoded_packet
try:
self.packet_type = int(ep[0:1])
except TypeError:
self.packet_type = ep
ep = ''
self.namespace = None
self.data = None
ep = ep[1:]
dash = ep.find('-')
attachment_count = 0
if dash > 0 and ep[0:dash].isdigit():
attachment_count = int(ep[0:dash])
ep = ep[dash + 1:]
if ep and ep[0:1] == '/':
sep = ep.find(',')
if sep == -1:
self.namespace = ep
ep = ''
else:
self.namespace = ep[0:sep]
ep = ep[sep + 1:]
q = self.namespace.find('?')
if q != -1:
self.namespace = self.namespace[0:q]
if ep and ep[0].isdigit():
self.id = 0
while ep and ep[0].isdigit():
self.id = self.id * 10 + int(ep[0])
ep = ep[1:]
if ep:
self.data = self.json.loads(ep)
return attachment_count | Decode a transmitted package.
The return value indicates how many binary attachment packets are
necessary to fully decode the packet. | Below is the the instruction that describes the task:
### Input:
Decode a transmitted package.
The return value indicates how many binary attachment packets are
necessary to fully decode the packet.
### Response:
def decode(self, encoded_packet):
"""Decode a transmitted package.
The return value indicates how many binary attachment packets are
necessary to fully decode the packet.
"""
ep = encoded_packet
try:
self.packet_type = int(ep[0:1])
except TypeError:
self.packet_type = ep
ep = ''
self.namespace = None
self.data = None
ep = ep[1:]
dash = ep.find('-')
attachment_count = 0
if dash > 0 and ep[0:dash].isdigit():
attachment_count = int(ep[0:dash])
ep = ep[dash + 1:]
if ep and ep[0:1] == '/':
sep = ep.find(',')
if sep == -1:
self.namespace = ep
ep = ''
else:
self.namespace = ep[0:sep]
ep = ep[sep + 1:]
q = self.namespace.find('?')
if q != -1:
self.namespace = self.namespace[0:q]
if ep and ep[0].isdigit():
self.id = 0
while ep and ep[0].isdigit():
self.id = self.id * 10 + int(ep[0])
ep = ep[1:]
if ep:
self.data = self.json.loads(ep)
return attachment_count |
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
kernel_mat = self._get_kernel(X, self.fit_X_)
val = numpy.dot(kernel_mat, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val | Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks. | Below is the the instruction that describes the task:
### Input:
Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
### Response:
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
kernel_mat = self._get_kernel(X, self.fit_X_)
val = numpy.dot(kernel_mat, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val |
def parse_stations(html):
"""
Strips JS code, loads JSON
"""
html = html.replace('SLs.sls=', '').replace(';SLs.showSuggestion();', '')
html = json.loads(html)
return html['suggestions'] | Strips JS code, loads JSON | Below is the the instruction that describes the task:
### Input:
Strips JS code, loads JSON
### Response:
def parse_stations(html):
"""
Strips JS code, loads JSON
"""
html = html.replace('SLs.sls=', '').replace(';SLs.showSuggestion();', '')
html = json.loads(html)
return html['suggestions'] |
def get_stations(self):
"""
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
"""
status, data = self.http_client.get_json(
STATIONS_URI,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.stations_parser.parse_dict(item) for item in data] | Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects | Below is the the instruction that describes the task:
### Input:
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
### Response:
def get_stations(self):
"""
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
"""
status, data = self.http_client.get_json(
STATIONS_URI,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.stations_parser.parse_dict(item) for item in data] |
def parse(self, limit=None):
"""
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
self._process_diseases(limit)
self._process_genes(limit)
self._process_genes_kegg2ncbi(limit)
self._process_omim2gene(limit)
self._process_omim2disease(limit)
self._process_kegg_disease2gene(limit)
self._process_pathways(limit)
self._process_pathway_pubmed(limit)
# self._process_pathway_pathway(limit)
self._process_pathway_disease(limit)
self._process_pathway_ko(limit)
self._process_ortholog_classes(limit)
# TODO add in when refactoring for #141
# for f in ['hsa_orthologs', 'mmu_orthologs', 'rno_orthologs',
# 'dme_orthologs','dre_orthologs','cel_orthologs']:
# file = '/'.join((self.rawdir, self.files[f]['file']))
# self._process_orthologs(file, limit) # DONE #
LOG.info("Finished parsing")
return | :param limit:
:return: | Below is the the instruction that describes the task:
### Input:
:param limit:
:return:
### Response:
def parse(self, limit=None):
"""
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
self._process_diseases(limit)
self._process_genes(limit)
self._process_genes_kegg2ncbi(limit)
self._process_omim2gene(limit)
self._process_omim2disease(limit)
self._process_kegg_disease2gene(limit)
self._process_pathways(limit)
self._process_pathway_pubmed(limit)
# self._process_pathway_pathway(limit)
self._process_pathway_disease(limit)
self._process_pathway_ko(limit)
self._process_ortholog_classes(limit)
# TODO add in when refactoring for #141
# for f in ['hsa_orthologs', 'mmu_orthologs', 'rno_orthologs',
# 'dme_orthologs','dre_orthologs','cel_orthologs']:
# file = '/'.join((self.rawdir, self.files[f]['file']))
# self._process_orthologs(file, limit) # DONE #
LOG.info("Finished parsing")
return |
def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
"""bad_line_end is a human readable string."""
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context,
context2=self._context, type=type)
self.AddToAccumulator(e) | bad_line_end is a human readable string. | Below is the the instruction that describes the task:
### Input:
bad_line_end is a human readable string.
### Response:
def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
"""bad_line_end is a human readable string."""
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context,
context2=self._context, type=type)
self.AddToAccumulator(e) |
def find_egg(self, egg_dist):
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = "{0}.egg-link".format(egg_dist.project_name)
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg | Find an egg by name in the given environment | Below is the the instruction that describes the task:
### Input:
Find an egg by name in the given environment
### Response:
def find_egg(self, egg_dist):
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = "{0}.egg-link".format(egg_dist.project_name)
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg |
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None):
"""Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional
"""
mem_value = encoder_outputs
decoder_states = [mem_value]
mem_length = mem_value.shape[1]
if encoder_valid_length is not None:
dtype = encoder_valid_length.dtype
ctx = encoder_valid_length.context
mem_masks = mx.nd.broadcast_lesser(
mx.nd.arange(mem_length, ctx=ctx, dtype=dtype).reshape((1, -1)),
encoder_valid_length.reshape((-1, 1)))
decoder_states.append(mem_masks)
self._encoder_valid_length = encoder_valid_length
return decoder_states | Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional | Below is the the instruction that describes the task:
### Input:
Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional
### Response:
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None):
"""Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional
"""
mem_value = encoder_outputs
decoder_states = [mem_value]
mem_length = mem_value.shape[1]
if encoder_valid_length is not None:
dtype = encoder_valid_length.dtype
ctx = encoder_valid_length.context
mem_masks = mx.nd.broadcast_lesser(
mx.nd.arange(mem_length, ctx=ctx, dtype=dtype).reshape((1, -1)),
encoder_valid_length.reshape((-1, 1)))
decoder_states.append(mem_masks)
self._encoder_valid_length = encoder_valid_length
return decoder_states |
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.cipher_mode:
string_parts.append('cipher_mode: {0:s}'.format(self.cipher_mode))
if self.encryption_method:
string_parts.append('encryption_method: {0:s}'.format(
self.encryption_method))
if self.initialization_vector:
initialization_vector = codecs.encode(self.initialization_vector, 'hex')
initialization_vector = initialization_vector.decode('ascii')
string_parts.append('initialization_vector: {0:s}'.format(
initialization_vector))
if self.key:
key = codecs.encode(self.key, 'hex')
key = key.decode('ascii')
string_parts.append('key: {0:s}'.format(key))
return self._GetComparable(sub_comparable_string=', '.join(string_parts)) | str: comparable representation of the path specification. | Below is the the instruction that describes the task:
### Input:
str: comparable representation of the path specification.
### Response:
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.cipher_mode:
string_parts.append('cipher_mode: {0:s}'.format(self.cipher_mode))
if self.encryption_method:
string_parts.append('encryption_method: {0:s}'.format(
self.encryption_method))
if self.initialization_vector:
initialization_vector = codecs.encode(self.initialization_vector, 'hex')
initialization_vector = initialization_vector.decode('ascii')
string_parts.append('initialization_vector: {0:s}'.format(
initialization_vector))
if self.key:
key = codecs.encode(self.key, 'hex')
key = key.decode('ascii')
string_parts.append('key: {0:s}'.format(key))
return self._GetComparable(sub_comparable_string=', '.join(string_parts)) |
def get_config(config_file):
"""Get configuration from a file."""
def load(fp):
try:
return yaml.safe_load(fp)
except yaml.YAMLError as e:
sys.stderr.write(text_type(e))
sys.exit(1) # TODO document exit codes
if config_file == '-':
return load(sys.stdin)
if not os.path.exists(config_file):
sys.stderr.write('ERROR: Must either run next to config.yaml or'
' specify a config file.\n' + __doc__)
sys.exit(2)
with open(config_file) as fp:
return load(fp) | Get configuration from a file. | Below is the the instruction that describes the task:
### Input:
Get configuration from a file.
### Response:
def get_config(config_file):
"""Get configuration from a file."""
def load(fp):
try:
return yaml.safe_load(fp)
except yaml.YAMLError as e:
sys.stderr.write(text_type(e))
sys.exit(1) # TODO document exit codes
if config_file == '-':
return load(sys.stdin)
if not os.path.exists(config_file):
sys.stderr.write('ERROR: Must either run next to config.yaml or'
' specify a config file.\n' + __doc__)
sys.exit(2)
with open(config_file) as fp:
return load(fp) |
def to_table(components, topo_info):
""" normalize raw logical plan info to table """
inputs, outputs = defaultdict(list), defaultdict(list)
for ctype, component in components.items():
if ctype == 'bolts':
for component_name, component_info in component.items():
for input_stream in component_info['inputs']:
input_name = input_stream['component_name']
inputs[component_name].append(input_name)
outputs[input_name].append(component_name)
info = []
spouts_instance = topo_info['physical_plan']['spouts']
bolts_instance = topo_info['physical_plan']['bolts']
for ctype, component in components.items():
# stages is an int so keep going
if ctype == "stages":
continue
for component_name, component_info in component.items():
row = [ctype[:-1], component_name]
if ctype == 'spouts':
row.append(len(spouts_instance[component_name]))
else:
row.append(len(bolts_instance[component_name]))
row.append(','.join(inputs.get(component_name, ['-'])))
row.append(','.join(outputs.get(component_name, ['-'])))
info.append(row)
header = ['type', 'name', 'parallelism', 'input', 'output']
return info, header | normalize raw logical plan info to table | Below is the the instruction that describes the task:
### Input:
normalize raw logical plan info to table
### Response:
def to_table(components, topo_info):
""" normalize raw logical plan info to table """
inputs, outputs = defaultdict(list), defaultdict(list)
for ctype, component in components.items():
if ctype == 'bolts':
for component_name, component_info in component.items():
for input_stream in component_info['inputs']:
input_name = input_stream['component_name']
inputs[component_name].append(input_name)
outputs[input_name].append(component_name)
info = []
spouts_instance = topo_info['physical_plan']['spouts']
bolts_instance = topo_info['physical_plan']['bolts']
for ctype, component in components.items():
# stages is an int so keep going
if ctype == "stages":
continue
for component_name, component_info in component.items():
row = [ctype[:-1], component_name]
if ctype == 'spouts':
row.append(len(spouts_instance[component_name]))
else:
row.append(len(bolts_instance[component_name]))
row.append(','.join(inputs.get(component_name, ['-'])))
row.append(','.join(outputs.get(component_name, ['-'])))
info.append(row)
header = ['type', 'name', 'parallelism', 'input', 'output']
return info, header |
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}!
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
return -link_f + y*np.log(link_f) - special.gammaln(y+1) | Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}!
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float | Below is the the instruction that describes the task:
### Input:
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}!
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
### Response:
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}!
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
return -link_f + y*np.log(link_f) - special.gammaln(y+1) |
def _load_profile(self, profile_name):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['display_name'] == profile_name:
break
else:
if profile_name:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
profile_name, ', '.join(p['display_name'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v) | Load a profile by name
Called by load_user_options | Below is the the instruction that describes the task:
### Input:
Load a profile by name
Called by load_user_options
### Response:
def _load_profile(self, profile_name):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['display_name'] == profile_name:
break
else:
if profile_name:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
profile_name, ', '.join(p['display_name'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v) |
def language_match(self, cpeset, cpel_dom=None):
"""
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
"""
# Root element tag
TAG_ROOT = '#document'
# A container for child platform definitions
TAG_PLATSPEC = 'cpe:platform-specification'
# Information about a platform definition
TAG_PLATFORM = 'cpe:platform'
TAG_LOGITEST = 'cpe:logical-test'
TAG_CPE = 'cpe:fact-ref'
TAG_CHECK_CPE = 'check-fact-ref'
# Tag attributes
ATT_NAME = 'name'
ATT_OP = 'operator'
ATT_NEGATE = 'negate'
# Attribute values
ATT_OP_AND = 'AND'
ATT_OP_OR = 'OR'
ATT_NEGATE_TRUE = 'TRUE'
# Constant associated with an error in language matching
ERROR = 2
if cpel_dom is None:
cpel_dom = self.document
# Identify the root element
if cpel_dom.nodeName == TAG_ROOT or cpel_dom.nodeName == TAG_PLATSPEC:
for node in cpel_dom.childNodes:
if node.nodeName == TAG_PLATSPEC:
return self.language_match(cpeset, node)
if node.nodeName == TAG_PLATFORM:
return self.language_match(cpeset, node)
# Identify a platform element
elif cpel_dom.nodeName == TAG_PLATFORM:
# Parse through E's elements and ignore all but logical-test
for node in cpel_dom.childNodes:
if node.nodeName == TAG_LOGITEST:
# Call the function again, but with logical-test
# as the root element
return self.language_match(cpeset, node)
# Identify a CPE element
elif cpel_dom.nodeName == TAG_CPE:
# fact-ref's name attribute is a bound name,
# so we unbind it to a WFN before passing it
cpename = cpel_dom.getAttribute(ATT_NAME)
wfn = CPELanguage2_3._unbind(cpename)
return CPELanguage2_3._fact_ref_eval(cpeset, wfn)
# Identify a check of CPE names (OVAL, OCIL...)
elif cpel_dom.nodeName == TAG_CHECK_CPE:
return CPELanguage2_3._check_fact_ref_Eval(cpel_dom)
# Identify a logical operator element
elif cpel_dom.nodeName == TAG_LOGITEST:
count = 0
len = 0
answer = False
for node in cpel_dom.childNodes:
if node.nodeName.find("#") == 0:
continue
len = len + 1
result = self.language_match(cpeset, node)
if result:
count = count + 1
elif result == ERROR:
answer = ERROR
operator = cpel_dom.getAttribute(ATT_OP).upper()
if operator == ATT_OP_AND:
if count == len:
answer = True
elif operator == ATT_OP_OR:
if count > 0:
answer = True
operator_not = cpel_dom.getAttribute(ATT_NEGATE)
if operator_not:
if ((operator_not.upper() == ATT_NEGATE_TRUE) and
(answer != ERROR)):
answer = not answer
return answer
else:
return False | Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
### Response:
def language_match(self, cpeset, cpel_dom=None):
"""
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
"""
# Root element tag
TAG_ROOT = '#document'
# A container for child platform definitions
TAG_PLATSPEC = 'cpe:platform-specification'
# Information about a platform definition
TAG_PLATFORM = 'cpe:platform'
TAG_LOGITEST = 'cpe:logical-test'
TAG_CPE = 'cpe:fact-ref'
TAG_CHECK_CPE = 'check-fact-ref'
# Tag attributes
ATT_NAME = 'name'
ATT_OP = 'operator'
ATT_NEGATE = 'negate'
# Attribute values
ATT_OP_AND = 'AND'
ATT_OP_OR = 'OR'
ATT_NEGATE_TRUE = 'TRUE'
# Constant associated with an error in language matching
ERROR = 2
if cpel_dom is None:
cpel_dom = self.document
# Identify the root element
if cpel_dom.nodeName == TAG_ROOT or cpel_dom.nodeName == TAG_PLATSPEC:
for node in cpel_dom.childNodes:
if node.nodeName == TAG_PLATSPEC:
return self.language_match(cpeset, node)
if node.nodeName == TAG_PLATFORM:
return self.language_match(cpeset, node)
# Identify a platform element
elif cpel_dom.nodeName == TAG_PLATFORM:
# Parse through E's elements and ignore all but logical-test
for node in cpel_dom.childNodes:
if node.nodeName == TAG_LOGITEST:
# Call the function again, but with logical-test
# as the root element
return self.language_match(cpeset, node)
# Identify a CPE element
elif cpel_dom.nodeName == TAG_CPE:
# fact-ref's name attribute is a bound name,
# so we unbind it to a WFN before passing it
cpename = cpel_dom.getAttribute(ATT_NAME)
wfn = CPELanguage2_3._unbind(cpename)
return CPELanguage2_3._fact_ref_eval(cpeset, wfn)
# Identify a check of CPE names (OVAL, OCIL...)
elif cpel_dom.nodeName == TAG_CHECK_CPE:
return CPELanguage2_3._check_fact_ref_Eval(cpel_dom)
# Identify a logical operator element
elif cpel_dom.nodeName == TAG_LOGITEST:
count = 0
len = 0
answer = False
for node in cpel_dom.childNodes:
if node.nodeName.find("#") == 0:
continue
len = len + 1
result = self.language_match(cpeset, node)
if result:
count = count + 1
elif result == ERROR:
answer = ERROR
operator = cpel_dom.getAttribute(ATT_OP).upper()
if operator == ATT_OP_AND:
if count == len:
answer = True
elif operator == ATT_OP_OR:
if count > 0:
answer = True
operator_not = cpel_dom.getAttribute(ATT_NEGATE)
if operator_not:
if ((operator_not.upper() == ATT_NEGATE_TRUE) and
(answer != ERROR)):
answer = not answer
return answer
else:
return False |
def fake2db_logger():
'''creates a logger obj'''
# Pull the local ip and username for meaningful logging
username = getpass.getuser()
# Set the logger
FORMAT = '%(asctime)-15s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
extra_information = {'user': username}
logger = logging.getLogger('fake2db_logger')
# --------------------
return logger, extra_information | creates a logger obj | Below is the the instruction that describes the task:
### Input:
creates a logger obj
### Response:
def fake2db_logger():
'''creates a logger obj'''
# Pull the local ip and username for meaningful logging
username = getpass.getuser()
# Set the logger
FORMAT = '%(asctime)-15s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
extra_information = {'user': username}
logger = logging.getLogger('fake2db_logger')
# --------------------
return logger, extra_information |
def index(self, axes):
"""
:param axes: The Axes instance to find the index of.
:type axes: Axes
:rtype: int
"""
return None if axes is self._colormap_axes else self._axes.index(axes) | :param axes: The Axes instance to find the index of.
:type axes: Axes
:rtype: int | Below is the the instruction that describes the task:
### Input:
:param axes: The Axes instance to find the index of.
:type axes: Axes
:rtype: int
### Response:
def index(self, axes):
"""
:param axes: The Axes instance to find the index of.
:type axes: Axes
:rtype: int
"""
return None if axes is self._colormap_axes else self._axes.index(axes) |
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1) | yesterday once more | Below is the the instruction that describes the task:
### Input:
yesterday once more
### Response:
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1) |
def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows | Return our current root node and appropriate adapter for it | Below is the the instruction that describes the task:
### Input:
Return our current root node and appropriate adapter for it
### Response:
def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows |
def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the RepeatTimer instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
next_run = timedelta(seconds=self.interval_current) + self.activation_dt
return next_run - utc_now
else:
return None | :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the RepeatTimer instance is not running | Below is the the instruction that describes the task:
### Input:
:param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the RepeatTimer instance is not running
### Response:
def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the RepeatTimer instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
next_run = timedelta(seconds=self.interval_current) + self.activation_dt
return next_run - utc_now
else:
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.