repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
gbowerman/azurerm
|
azurerm/amsrp.py
|
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/amsrp.py#L520-L545
|
def create_contentkey_authorization_policy_options(access_token, key_delivery_type="2", \
name="HLS Open Authorization Policy", key_restriction_type="0"):
'''Create Media Service Content Key Authorization Policy Options.
Args:
access_token (str): A valid Azure authentication token.
key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.
name (str): A Media Service Contenty Key Authorization Policy Name.
key_restiction_type (str): A Media Service Contenty Key Restriction Type.
Returns:
HTTP response. JSON body.
'''
path = '/ContentKeyAuthorizationPolicyOptions'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name":"policy",\
"KeyDeliveryType":"' + key_delivery_type + '", \
"KeyDeliveryConfiguration":"", \
"Restrictions":[{ \
"Name":"' + name + '", \
"KeyRestrictionType":"' + key_restriction_type + '", \
"Requirements":null \
}] \
}'
return do_ams_post(endpoint, path, body, access_token, "json_only")
|
[
"def",
"create_contentkey_authorization_policy_options",
"(",
"access_token",
",",
"key_delivery_type",
"=",
"\"2\"",
",",
"name",
"=",
"\"HLS Open Authorization Policy\"",
",",
"key_restriction_type",
"=",
"\"0\"",
")",
":",
"path",
"=",
"'/ContentKeyAuthorizationPolicyOptions'",
"endpoint",
"=",
"''",
".",
"join",
"(",
"[",
"ams_rest_endpoint",
",",
"path",
"]",
")",
"body",
"=",
"'{ \\\n\t\t\"Name\":\"policy\",\\\n\t\t\"KeyDeliveryType\":\"'",
"+",
"key_delivery_type",
"+",
"'\", \\\n\t\t\"KeyDeliveryConfiguration\":\"\", \\\n\t\t\t\"Restrictions\":[{ \\\n\t\t\t\"Name\":\"'",
"+",
"name",
"+",
"'\", \\\n\t\t\t\"KeyRestrictionType\":\"'",
"+",
"key_restriction_type",
"+",
"'\", \\\n\t\t\t\"Requirements\":null \\\n\t\t}] \\\n\t}'",
"return",
"do_ams_post",
"(",
"endpoint",
",",
"path",
",",
"body",
",",
"access_token",
",",
"\"json_only\"",
")"
] |
Create Media Service Content Key Authorization Policy Options.
Args:
access_token (str): A valid Azure authentication token.
key_delivery_type (str): A Media Service Content Key Authorization Policy Delivery Type.
name (str): A Media Service Contenty Key Authorization Policy Name.
key_restiction_type (str): A Media Service Contenty Key Restriction Type.
Returns:
HTTP response. JSON body.
|
[
"Create",
"Media",
"Service",
"Content",
"Key",
"Authorization",
"Policy",
"Options",
"."
] |
python
|
train
| 39.423077 |
suds-community/suds
|
suds/sax/document.py
|
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/sax/document.py#L62-L85
|
def getChild(self, name, ns=None, default=None):
"""
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
"""
if self.__root is None:
return default
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.__root.resolvePrefix(prefix)
if self.__root.match(name, ns):
return self.__root
else:
return default
|
[
"def",
"getChild",
"(",
"self",
",",
"name",
",",
"ns",
"=",
"None",
",",
"default",
"=",
"None",
")",
":",
"if",
"self",
".",
"__root",
"is",
"None",
":",
"return",
"default",
"if",
"ns",
"is",
"None",
":",
"prefix",
",",
"name",
"=",
"splitPrefix",
"(",
"name",
")",
"if",
"prefix",
"is",
"None",
":",
"ns",
"=",
"None",
"else",
":",
"ns",
"=",
"self",
".",
"__root",
".",
"resolvePrefix",
"(",
"prefix",
")",
"if",
"self",
".",
"__root",
".",
"match",
"(",
"name",
",",
"ns",
")",
":",
"return",
"self",
".",
"__root",
"else",
":",
"return",
"default"
] |
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
|
[
"Get",
"a",
"child",
"by",
"(",
"optional",
")",
"name",
"and",
"/",
"or",
"(",
"optional",
")",
"namespace",
"."
] |
python
|
train
| 36.541667 |
fastai/fastai
|
fastai/torch_core.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L161-L167
|
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
|
[
"def",
"children_and_parameters",
"(",
"m",
":",
"nn",
".",
"Module",
")",
":",
"children",
"=",
"list",
"(",
"m",
".",
"children",
"(",
")",
")",
"children_p",
"=",
"sum",
"(",
"[",
"[",
"id",
"(",
"p",
")",
"for",
"p",
"in",
"c",
".",
"parameters",
"(",
")",
"]",
"for",
"c",
"in",
"m",
".",
"children",
"(",
")",
"]",
",",
"[",
"]",
")",
"for",
"p",
"in",
"m",
".",
"parameters",
"(",
")",
":",
"if",
"id",
"(",
"p",
")",
"not",
"in",
"children_p",
":",
"children",
".",
"append",
"(",
"ParameterModule",
"(",
"p",
")",
")",
"return",
"children"
] |
Return the children of `m` and its direct parameters not registered in modules.
|
[
"Return",
"the",
"children",
"of",
"m",
"and",
"its",
"direct",
"parameters",
"not",
"registered",
"in",
"modules",
"."
] |
python
|
train
| 51 |
c-w/gutenberg
|
gutenberg/query/api.py
|
https://github.com/c-w/gutenberg/blob/d1ef3da6fba6c3636d452479ed6bcb17c7d4d246/gutenberg/query/api.py#L20-L38
|
def get_metadata(feature_name, etextno):
"""Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
"""
metadata_values = MetadataExtractor.get(feature_name).get_metadata(etextno)
return frozenset(metadata_values)
|
[
"def",
"get_metadata",
"(",
"feature_name",
",",
"etextno",
")",
":",
"metadata_values",
"=",
"MetadataExtractor",
".",
"get",
"(",
"feature_name",
")",
".",
"get_metadata",
"(",
"etextno",
")",
"return",
"frozenset",
"(",
"metadata_values",
")"
] |
Looks up the value of a meta-data feature for a given text.
Arguments:
feature_name (str): The name of the meta-data to look up.
etextno (int): The identifier of the Gutenberg text for which to look
up the meta-data.
Returns:
frozenset: The values of the meta-data for the text or an empty set if
the text does not have meta-data associated with the feature.
Raises:
UnsupportedFeature: If there is no MetadataExtractor registered that
can extract meta-data for the given feature name.
|
[
"Looks",
"up",
"the",
"value",
"of",
"a",
"meta",
"-",
"data",
"feature",
"for",
"a",
"given",
"text",
"."
] |
python
|
train
| 38.157895 |
wheerd/multiset
|
multiset.py
|
https://github.com/wheerd/multiset/blob/1f002397096edae3da32d004e3159345a476999c/multiset.py#L316-L354
|
def intersection(self, *others):
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
"""
result = self.__copy__()
_elements = result._elements
_total = result._total
for other in map(self._as_mapping, others):
for element, multiplicity in list(_elements.items()):
new_multiplicity = other.get(element, 0)
if new_multiplicity < multiplicity:
if new_multiplicity > 0:
_elements[element] = new_multiplicity
_total -= multiplicity - new_multiplicity
else:
del _elements[element]
_total -= multiplicity
result._total = _total
return result
|
[
"def",
"intersection",
"(",
"self",
",",
"*",
"others",
")",
":",
"result",
"=",
"self",
".",
"__copy__",
"(",
")",
"_elements",
"=",
"result",
".",
"_elements",
"_total",
"=",
"result",
".",
"_total",
"for",
"other",
"in",
"map",
"(",
"self",
".",
"_as_mapping",
",",
"others",
")",
":",
"for",
"element",
",",
"multiplicity",
"in",
"list",
"(",
"_elements",
".",
"items",
"(",
")",
")",
":",
"new_multiplicity",
"=",
"other",
".",
"get",
"(",
"element",
",",
"0",
")",
"if",
"new_multiplicity",
"<",
"multiplicity",
":",
"if",
"new_multiplicity",
">",
"0",
":",
"_elements",
"[",
"element",
"]",
"=",
"new_multiplicity",
"_total",
"-=",
"multiplicity",
"-",
"new_multiplicity",
"else",
":",
"del",
"_elements",
"[",
"element",
"]",
"_total",
"-=",
"multiplicity",
"result",
".",
"_total",
"=",
"_total",
"return",
"result"
] |
r"""Return a new multiset with elements common to the multiset and all others.
>>> ms = Multiset('aab')
>>> sorted(ms.intersection('abc'))
['a', 'b']
You can also use the ``&`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> sorted(ms & Multiset('aaac'))
['a', 'a']
For a variant of the operation which modifies the multiset in place see
:meth:`intersection_update`.
Args:
others: The other sets intersect with the multiset. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
Returns:
The multiset resulting from the intersection of the sets.
|
[
"r",
"Return",
"a",
"new",
"multiset",
"with",
"elements",
"common",
"to",
"the",
"multiset",
"and",
"all",
"others",
"."
] |
python
|
train
| 40.666667 |
Parquery/sphinx-icontract
|
sphinx_icontract/__init__.py
|
https://github.com/Parquery/sphinx-icontract/blob/92918f23a8ea1873112e9b7446c64cd6f12ee04b/sphinx_icontract/__init__.py#L87-L125
|
def _condition_as_text(lambda_inspection: icontract._represent.ConditionLambdaInspection) -> str:
"""Format condition lambda function as reST."""
lambda_ast_node = lambda_inspection.node
assert isinstance(lambda_ast_node, ast.Lambda)
body_node = lambda_ast_node.body
text = None # type: Optional[str]
if isinstance(body_node, ast.BoolOp) and isinstance(body_node.op, ast.Or) and len(body_node.values) == 2:
left, right = body_node.values
if isinstance(left, ast.UnaryOp) and isinstance(left.op, ast.Not):
# Handle the case: not A or B is transformed to A => B
text = ':code:`{}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left.operand), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.UnaryOp, ast.BinOp, ast.GeneratorExp, ast.IfExp)):
text = ':code:`not ({})` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(left, ast.Compare) and len(left.ops) == 1:
text = ':code:`{}` β :code:`{}`'.format(
_negate_compare_text(atok=lambda_inspection.atok, node=left),
lambda_inspection.atok.get_text(node=right))
elif isinstance(left, (ast.Call, ast.Attribute, ast.Name, ast.Subscript, ast.Index, ast.Slice, ast.ExtSlice,
ast.ListComp, ast.SetComp, ast.DictComp)):
text = ':code:`not {}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=left), lambda_inspection.atok.get_text(node=right))
elif isinstance(body_node, ast.IfExp) and isinstance(body_node.orelse, ast.NameConstant) and body_node.orelse.value:
text = ':code:`{}` β :code:`{}`'.format(
lambda_inspection.atok.get_text(node=body_node.test), lambda_inspection.atok.get_text(node=body_node.body))
if text is None:
# None of the previous reformatings worked, take the default approach.
text = ':code:`{}`'.format(lambda_inspection.atok.get_text(node=body_node))
return text
|
[
"def",
"_condition_as_text",
"(",
"lambda_inspection",
":",
"icontract",
".",
"_represent",
".",
"ConditionLambdaInspection",
")",
"->",
"str",
":",
"lambda_ast_node",
"=",
"lambda_inspection",
".",
"node",
"assert",
"isinstance",
"(",
"lambda_ast_node",
",",
"ast",
".",
"Lambda",
")",
"body_node",
"=",
"lambda_ast_node",
".",
"body",
"text",
"=",
"None",
"# type: Optional[str]",
"if",
"isinstance",
"(",
"body_node",
",",
"ast",
".",
"BoolOp",
")",
"and",
"isinstance",
"(",
"body_node",
".",
"op",
",",
"ast",
".",
"Or",
")",
"and",
"len",
"(",
"body_node",
".",
"values",
")",
"==",
"2",
":",
"left",
",",
"right",
"=",
"body_node",
".",
"values",
"if",
"isinstance",
"(",
"left",
",",
"ast",
".",
"UnaryOp",
")",
"and",
"isinstance",
"(",
"left",
".",
"op",
",",
"ast",
".",
"Not",
")",
":",
"# Handle the case: not A or B is transformed to A => B",
"text",
"=",
"':code:`{}` β :code:`{}`'.f",
"o",
"rmat(",
"",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"left",
".",
"operand",
")",
",",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"right",
")",
")",
"elif",
"isinstance",
"(",
"left",
",",
"(",
"ast",
".",
"UnaryOp",
",",
"ast",
".",
"BinOp",
",",
"ast",
".",
"GeneratorExp",
",",
"ast",
".",
"IfExp",
")",
")",
":",
"text",
"=",
"':code:`not ({})` β :code:`{}`'.f",
"o",
"rmat(",
"",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"left",
")",
",",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"right",
")",
")",
"elif",
"isinstance",
"(",
"left",
",",
"ast",
".",
"Compare",
")",
"and",
"len",
"(",
"left",
".",
"ops",
")",
"==",
"1",
":",
"text",
"=",
"':code:`{}` β :code:`{}`'.f",
"o",
"rmat(",
"",
"_negate_compare_text",
"(",
"atok",
"=",
"lambda_inspection",
".",
"atok",
",",
"node",
"=",
"left",
")",
",",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"right",
")",
")",
"elif",
"isinstance",
"(",
"left",
",",
"(",
"ast",
".",
"Call",
",",
"ast",
".",
"Attribute",
",",
"ast",
".",
"Name",
",",
"ast",
".",
"Subscript",
",",
"ast",
".",
"Index",
",",
"ast",
".",
"Slice",
",",
"ast",
".",
"ExtSlice",
",",
"ast",
".",
"ListComp",
",",
"ast",
".",
"SetComp",
",",
"ast",
".",
"DictComp",
")",
")",
":",
"text",
"=",
"':code:`not {}` β :code:`{}`'.f",
"o",
"rmat(",
"",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"left",
")",
",",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"right",
")",
")",
"elif",
"isinstance",
"(",
"body_node",
",",
"ast",
".",
"IfExp",
")",
"and",
"isinstance",
"(",
"body_node",
".",
"orelse",
",",
"ast",
".",
"NameConstant",
")",
"and",
"body_node",
".",
"orelse",
".",
"value",
":",
"text",
"=",
"':code:`{}` β :code:`{}`'.f",
"o",
"rmat(",
"",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"body_node",
".",
"test",
")",
",",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"body_node",
".",
"body",
")",
")",
"if",
"text",
"is",
"None",
":",
"# None of the previous reformatings worked, take the default approach.",
"text",
"=",
"':code:`{}`'",
".",
"format",
"(",
"lambda_inspection",
".",
"atok",
".",
"get_text",
"(",
"node",
"=",
"body_node",
")",
")",
"return",
"text"
] |
Format condition lambda function as reST.
|
[
"Format",
"condition",
"lambda",
"function",
"as",
"reST",
"."
] |
python
|
train
| 53.871795 |
NoneGG/aredis
|
aredis/pipeline.py
|
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/pipeline.py#L481-L577
|
async def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True):
"""
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException.
"""
# the first time sending the commands we send all of the commands that were queued up.
# if we have to run through it again, we only retry the commands that failed.
attempt = sorted(stack, key=lambda x: x.position)
# build a list of node objects based on node names we need to
nodes = {}
# as we move through each command that still needs to be processed,
# we figure out the slot number that command maps to, then from the slot determine the node.
for c in attempt:
# refer to our internal node -> slot table that tells us where a given
# command should route to.
slot = self._determine_slot(*c.args)
node = self.connection_pool.get_node_by_slot(slot)
# little hack to make sure the node name is populated. probably could clean this up.
self.connection_pool.nodes.set_node_name(node)
# now that we know the name of the node ( it's just a string in the form of host:port )
# we can build a list of commands for each node.
node_name = node['name']
if node_name not in nodes:
nodes[node_name] = NodeCommands(self.parse_response, self.connection_pool.get_connection_by_node(node))
nodes[node_name].append(c)
# send the commands in sequence.
# we write to all the open sockets for each node first, before reading anything
# this allows us to flush all the requests out across the network essentially in parallel
# so that we can read them all in parallel as they come back.
# we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference.
node_commands = nodes.values()
for n in node_commands:
await n.write()
for n in node_commands:
await n.read()
# release all of the redis connections we allocated earlier back into the connection pool.
# we used to do this step as part of a try/finally block, but it is really dangerous to
# release connections back into the pool if for some reason the socket has data still left in it
# from a previous operation. The write and read operations already have try/catch around them for
# all known types of errors including connection and socket level errors.
# So if we hit an exception, something really bad happened and putting any of
# these connections back into the pool is a very bad idea.
# the socket might have unread buffer still sitting in it, and then the
# next time we read from it we pass the buffered result back from a previous
# command and every single request after to that connection will always get
# a mismatched result. (not just theoretical, I saw this happen on production x.x).
for n in nodes.values():
self.connection_pool.release(n.connection)
# if the response isn't an exception it is a valid response from the node
# we're all done with that command, YAY!
# if we have more commands to attempt, we've run into problems.
# collect all the commands we are allowed to retry.
# (MOVED, ASK, or connection errors or timeout errors)
attempt = sorted([c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)], key=lambda x: x.position)
if attempt and allow_redirections:
# RETRY MAGIC HAPPENS HERE!
# send these remaing comamnds one at a time using `execute_command`
# in the main client. This keeps our retry logic in one place mostly,
# and allows us to be more confident in correctness of behavior.
# at this point any speed gains from pipelining have been lost
# anyway, so we might as well make the best attempt to get the correct
# behavior.
#
# The client command will handle retries for each individual command
# sequentially as we pass each one into `execute_command`. Any exceptions
# that bubble out should only appear once all retries have been exhausted.
#
# If a lot of commands have failed, we'll be setting the
# flag to rebuild the slots table from scratch. So MOVED errors should
# correct themselves fairly quickly.
await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt))
for c in attempt:
try:
# send each command individually like we do in the main client.
c.result = await super(StrictClusterPipeline, self).execute_command(*c.args, **c.options)
except RedisError as e:
c.result = e
# turn the response back into a simple flat array that corresponds
# to the sequence of commands issued in the stack in pipeline.execute()
response = [c.result for c in sorted(stack, key=lambda x: x.position)]
if raise_on_error:
self.raise_first_error(stack)
return response
|
[
"async",
"def",
"send_cluster_commands",
"(",
"self",
",",
"stack",
",",
"raise_on_error",
"=",
"True",
",",
"allow_redirections",
"=",
"True",
")",
":",
"# the first time sending the commands we send all of the commands that were queued up.",
"# if we have to run through it again, we only retry the commands that failed.",
"attempt",
"=",
"sorted",
"(",
"stack",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"position",
")",
"# build a list of node objects based on node names we need to",
"nodes",
"=",
"{",
"}",
"# as we move through each command that still needs to be processed,",
"# we figure out the slot number that command maps to, then from the slot determine the node.",
"for",
"c",
"in",
"attempt",
":",
"# refer to our internal node -> slot table that tells us where a given",
"# command should route to.",
"slot",
"=",
"self",
".",
"_determine_slot",
"(",
"*",
"c",
".",
"args",
")",
"node",
"=",
"self",
".",
"connection_pool",
".",
"get_node_by_slot",
"(",
"slot",
")",
"# little hack to make sure the node name is populated. probably could clean this up.",
"self",
".",
"connection_pool",
".",
"nodes",
".",
"set_node_name",
"(",
"node",
")",
"# now that we know the name of the node ( it's just a string in the form of host:port )",
"# we can build a list of commands for each node.",
"node_name",
"=",
"node",
"[",
"'name'",
"]",
"if",
"node_name",
"not",
"in",
"nodes",
":",
"nodes",
"[",
"node_name",
"]",
"=",
"NodeCommands",
"(",
"self",
".",
"parse_response",
",",
"self",
".",
"connection_pool",
".",
"get_connection_by_node",
"(",
"node",
")",
")",
"nodes",
"[",
"node_name",
"]",
".",
"append",
"(",
"c",
")",
"# send the commands in sequence.",
"# we write to all the open sockets for each node first, before reading anything",
"# this allows us to flush all the requests out across the network essentially in parallel",
"# so that we can read them all in parallel as they come back.",
"# we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference.",
"node_commands",
"=",
"nodes",
".",
"values",
"(",
")",
"for",
"n",
"in",
"node_commands",
":",
"await",
"n",
".",
"write",
"(",
")",
"for",
"n",
"in",
"node_commands",
":",
"await",
"n",
".",
"read",
"(",
")",
"# release all of the redis connections we allocated earlier back into the connection pool.",
"# we used to do this step as part of a try/finally block, but it is really dangerous to",
"# release connections back into the pool if for some reason the socket has data still left in it",
"# from a previous operation. The write and read operations already have try/catch around them for",
"# all known types of errors including connection and socket level errors.",
"# So if we hit an exception, something really bad happened and putting any of",
"# these connections back into the pool is a very bad idea.",
"# the socket might have unread buffer still sitting in it, and then the",
"# next time we read from it we pass the buffered result back from a previous",
"# command and every single request after to that connection will always get",
"# a mismatched result. (not just theoretical, I saw this happen on production x.x).",
"for",
"n",
"in",
"nodes",
".",
"values",
"(",
")",
":",
"self",
".",
"connection_pool",
".",
"release",
"(",
"n",
".",
"connection",
")",
"# if the response isn't an exception it is a valid response from the node",
"# we're all done with that command, YAY!",
"# if we have more commands to attempt, we've run into problems.",
"# collect all the commands we are allowed to retry.",
"# (MOVED, ASK, or connection errors or timeout errors)",
"attempt",
"=",
"sorted",
"(",
"[",
"c",
"for",
"c",
"in",
"attempt",
"if",
"isinstance",
"(",
"c",
".",
"result",
",",
"ERRORS_ALLOW_RETRY",
")",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"position",
")",
"if",
"attempt",
"and",
"allow_redirections",
":",
"# RETRY MAGIC HAPPENS HERE!",
"# send these remaing comamnds one at a time using `execute_command`",
"# in the main client. This keeps our retry logic in one place mostly,",
"# and allows us to be more confident in correctness of behavior.",
"# at this point any speed gains from pipelining have been lost",
"# anyway, so we might as well make the best attempt to get the correct",
"# behavior.",
"#",
"# The client command will handle retries for each individual command",
"# sequentially as we pass each one into `execute_command`. Any exceptions",
"# that bubble out should only appear once all retries have been exhausted.",
"#",
"# If a lot of commands have failed, we'll be setting the",
"# flag to rebuild the slots table from scratch. So MOVED errors should",
"# correct themselves fairly quickly.",
"await",
"self",
".",
"connection_pool",
".",
"nodes",
".",
"increment_reinitialize_counter",
"(",
"len",
"(",
"attempt",
")",
")",
"for",
"c",
"in",
"attempt",
":",
"try",
":",
"# send each command individually like we do in the main client.",
"c",
".",
"result",
"=",
"await",
"super",
"(",
"StrictClusterPipeline",
",",
"self",
")",
".",
"execute_command",
"(",
"*",
"c",
".",
"args",
",",
"*",
"*",
"c",
".",
"options",
")",
"except",
"RedisError",
"as",
"e",
":",
"c",
".",
"result",
"=",
"e",
"# turn the response back into a simple flat array that corresponds",
"# to the sequence of commands issued in the stack in pipeline.execute()",
"response",
"=",
"[",
"c",
".",
"result",
"for",
"c",
"in",
"sorted",
"(",
"stack",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"position",
")",
"]",
"if",
"raise_on_error",
":",
"self",
".",
"raise_first_error",
"(",
"stack",
")",
"return",
"response"
] |
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses
automatically. If set to false it will raise RedisClusterException.
|
[
"Send",
"a",
"bunch",
"of",
"cluster",
"commands",
"to",
"the",
"redis",
"cluster",
"."
] |
python
|
train
| 55.43299 |
raphaelgyory/django-rest-messaging-centrifugo
|
rest_messaging_centrifugo/utils.py
|
https://github.com/raphaelgyory/django-rest-messaging-centrifugo/blob/f44022cd9fc83e84ab573fe8a8385c85f6e77380/rest_messaging_centrifugo/utils.py#L8-L11
|
def build_channel(namespace, name, user_ids):
""" Creates complete channel information as described here https://fzambia.gitbooks.io/centrifugal/content/server/channels.html. """
ids = ','.join(map(str, user_ids))
return "{0}:{1}#{2}".format(namespace, name, ids)
|
[
"def",
"build_channel",
"(",
"namespace",
",",
"name",
",",
"user_ids",
")",
":",
"ids",
"=",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"user_ids",
")",
")",
"return",
"\"{0}:{1}#{2}\"",
".",
"format",
"(",
"namespace",
",",
"name",
",",
"ids",
")"
] |
Creates complete channel information as described here https://fzambia.gitbooks.io/centrifugal/content/server/channels.html.
|
[
"Creates",
"complete",
"channel",
"information",
"as",
"described",
"here",
"https",
":",
"//",
"fzambia",
".",
"gitbooks",
".",
"io",
"/",
"centrifugal",
"/",
"content",
"/",
"server",
"/",
"channels",
".",
"html",
"."
] |
python
|
train
| 68 |
tensorpack/tensorpack
|
tensorpack/graph_builder/utils.py
|
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L366-L381
|
def pack(self, grads):
"""
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
"""
for i, g in enumerate(grads):
assert g.shape == self._shapes[i]
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads')
# concat_grads = tf.cast(concat_grads, tf.float16)
grad_packs = tf.split(concat_grads, self._split_sizes)
return grad_packs
|
[
"def",
"pack",
"(",
"self",
",",
"grads",
")",
":",
"for",
"i",
",",
"g",
"in",
"enumerate",
"(",
"grads",
")",
":",
"assert",
"g",
".",
"shape",
"==",
"self",
".",
"_shapes",
"[",
"i",
"]",
"with",
"cached_name_scope",
"(",
"\"GradientPacker\"",
",",
"top_level",
"=",
"False",
")",
":",
"concat_grads",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"reshape",
"(",
"g",
",",
"[",
"-",
"1",
"]",
")",
"for",
"g",
"in",
"grads",
"]",
",",
"0",
",",
"name",
"=",
"'concatenated_grads'",
")",
"# concat_grads = tf.cast(concat_grads, tf.float16)",
"grad_packs",
"=",
"tf",
".",
"split",
"(",
"concat_grads",
",",
"self",
".",
"_split_sizes",
")",
"return",
"grad_packs"
] |
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
|
[
"Args",
":",
"grads",
"(",
"list",
")",
":",
"list",
"of",
"gradient",
"tensors"
] |
python
|
train
| 37.0625 |
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/core/usage.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/usage.py#L561-L564
|
def page_guiref(arg_s=None):
"""Show a basic reference about the GUI Console."""
from IPython.core import page
page.page(gui_reference, auto_html=True)
|
[
"def",
"page_guiref",
"(",
"arg_s",
"=",
"None",
")",
":",
"from",
"IPython",
".",
"core",
"import",
"page",
"page",
".",
"page",
"(",
"gui_reference",
",",
"auto_html",
"=",
"True",
")"
] |
Show a basic reference about the GUI Console.
|
[
"Show",
"a",
"basic",
"reference",
"about",
"the",
"GUI",
"Console",
"."
] |
python
|
test
| 40 |
pyca/pynacl
|
src/nacl/bindings/crypto_hash.py
|
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/bindings/crypto_hash.py#L43-L55
|
def crypto_hash_sha256(message):
"""
Hashes and returns the message ``message``.
:param message: bytes
:rtype: bytes
"""
digest = ffi.new("unsigned char[]", crypto_hash_sha256_BYTES)
rc = lib.crypto_hash_sha256(digest, message, len(message))
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return ffi.buffer(digest, crypto_hash_sha256_BYTES)[:]
|
[
"def",
"crypto_hash_sha256",
"(",
"message",
")",
":",
"digest",
"=",
"ffi",
".",
"new",
"(",
"\"unsigned char[]\"",
",",
"crypto_hash_sha256_BYTES",
")",
"rc",
"=",
"lib",
".",
"crypto_hash_sha256",
"(",
"digest",
",",
"message",
",",
"len",
"(",
"message",
")",
")",
"ensure",
"(",
"rc",
"==",
"0",
",",
"'Unexpected library error'",
",",
"raising",
"=",
"exc",
".",
"RuntimeError",
")",
"return",
"ffi",
".",
"buffer",
"(",
"digest",
",",
"crypto_hash_sha256_BYTES",
")",
"[",
":",
"]"
] |
Hashes and returns the message ``message``.
:param message: bytes
:rtype: bytes
|
[
"Hashes",
"and",
"returns",
"the",
"message",
"message",
"."
] |
python
|
train
| 31.769231 |
johnnoone/facts
|
facts/targeting.py
|
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/targeting.py#L50-L74
|
def read(self, obj):
"""
Returns
object: fragment
"""
path, frag = [], obj
for part in self.parts:
path.append(part)
if isinstance(frag, dict):
try:
frag = frag[part]
except KeyError as error:
raise NotFound(':'.join(path)) from error
elif isinstance(frag, (list, tuple)):
try:
frag = frag[int(part)]
except IndexError as error:
raise NotFound(':'.join(path)) from error
except ValueError as error:
raise WrongType(':'.join(path)) from error
elif isinstance(frag, (str, int)):
raise WrongType(':'.join(path))
else:
raise NotFound(':'.join(path))
return frag
|
[
"def",
"read",
"(",
"self",
",",
"obj",
")",
":",
"path",
",",
"frag",
"=",
"[",
"]",
",",
"obj",
"for",
"part",
"in",
"self",
".",
"parts",
":",
"path",
".",
"append",
"(",
"part",
")",
"if",
"isinstance",
"(",
"frag",
",",
"dict",
")",
":",
"try",
":",
"frag",
"=",
"frag",
"[",
"part",
"]",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"NotFound",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"from",
"error",
"elif",
"isinstance",
"(",
"frag",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"try",
":",
"frag",
"=",
"frag",
"[",
"int",
"(",
"part",
")",
"]",
"except",
"IndexError",
"as",
"error",
":",
"raise",
"NotFound",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"from",
"error",
"except",
"ValueError",
"as",
"error",
":",
"raise",
"WrongType",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"from",
"error",
"elif",
"isinstance",
"(",
"frag",
",",
"(",
"str",
",",
"int",
")",
")",
":",
"raise",
"WrongType",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"else",
":",
"raise",
"NotFound",
"(",
"':'",
".",
"join",
"(",
"path",
")",
")",
"return",
"frag"
] |
Returns
object: fragment
|
[
"Returns",
"object",
":",
"fragment"
] |
python
|
train
| 34.6 |
agsimeonov/cbexchange
|
cbexchange/orderbook.py
|
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/orderbook.py#L120-L153
|
def _real_time_thread(self):
"""Handles real-time updates to the order book."""
while self.ws_client.connected():
if self.die:
break
if self.pause:
sleep(5)
continue
message = self.ws_client.receive()
if message is None:
break
message_type = message['type']
if message_type == 'error':
continue
if message['sequence'] <= self.sequence:
continue
if message_type == 'open':
self._handle_open(message)
elif message_type == 'match':
self._handle_match(message)
elif message_type == 'done':
self._handle_done(message)
elif message_type == 'change':
self._handle_change(message)
else:
continue
self.ws_client.disconnect()
|
[
"def",
"_real_time_thread",
"(",
"self",
")",
":",
"while",
"self",
".",
"ws_client",
".",
"connected",
"(",
")",
":",
"if",
"self",
".",
"die",
":",
"break",
"if",
"self",
".",
"pause",
":",
"sleep",
"(",
"5",
")",
"continue",
"message",
"=",
"self",
".",
"ws_client",
".",
"receive",
"(",
")",
"if",
"message",
"is",
"None",
":",
"break",
"message_type",
"=",
"message",
"[",
"'type'",
"]",
"if",
"message_type",
"==",
"'error'",
":",
"continue",
"if",
"message",
"[",
"'sequence'",
"]",
"<=",
"self",
".",
"sequence",
":",
"continue",
"if",
"message_type",
"==",
"'open'",
":",
"self",
".",
"_handle_open",
"(",
"message",
")",
"elif",
"message_type",
"==",
"'match'",
":",
"self",
".",
"_handle_match",
"(",
"message",
")",
"elif",
"message_type",
"==",
"'done'",
":",
"self",
".",
"_handle_done",
"(",
"message",
")",
"elif",
"message_type",
"==",
"'change'",
":",
"self",
".",
"_handle_change",
"(",
"message",
")",
"else",
":",
"continue",
"self",
".",
"ws_client",
".",
"disconnect",
"(",
")"
] |
Handles real-time updates to the order book.
|
[
"Handles",
"real",
"-",
"time",
"updates",
"to",
"the",
"order",
"book",
"."
] |
python
|
valid
| 22.588235 |
stratis-storage/into-dbus-python
|
src/into_dbus_python/_xformer.py
|
https://github.com/stratis-storage/into-dbus-python/blob/81366049671f79116bbb81c97bf621800a2f6315/src/into_dbus_python/_xformer.py#L106-L173
|
def _handle_array(toks):
"""
Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str
"""
if len(toks) == 5 and toks[1] == '{' and toks[4] == '}':
subtree = toks[2:4]
signature = ''.join(s for (_, s) in subtree)
[key_func, value_func] = [f for (f, _) in subtree]
def the_dict_func(a_dict, variant=0):
"""
Function for generating a Dictionary from a dict.
:param a_dict: the dictionary to transform
:type a_dict: dict of (`a * `b)
:param int variant: variant level
:returns: a dbus dictionary of transformed values and level
:rtype: Dictionary * int
"""
elements = \
[(key_func(x), value_func(y)) for (x, y) in a_dict.items()]
level = 0 if elements == [] \
else max(max(x, y) for ((_, x), (_, y)) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Dictionary(
((x, y) for ((x, _), (y, _)) in elements),
signature=signature,
variant_level=obj_level), func_level)
return (the_dict_func, 'a{' + signature + '}')
if len(toks) == 2:
(func, sig) = toks[1]
def the_array_func(a_list, variant=0):
"""
Function for generating an Array from a list.
:param a_list: the list to transform
:type a_list: list of `a
:param int variant: variant level of the value
:returns: a dbus Array of transformed values and variant level
:rtype: Array * int
"""
if isinstance(a_list, dict):
raise IntoDPValueError(a_list, "a_list",
"is a dict, must be an array")
elements = [func(x) for x in a_list]
level = 0 if elements == [] else max(x for (_, x) in elements)
(obj_level, func_level) = \
_ToDbusXformer._variant_levels(level, variant)
return (dbus.types.Array(
(x for (x, _) in elements),
signature=sig,
variant_level=obj_level), func_level)
return (the_array_func, 'a' + sig)
raise IntoDPValueError(toks, "toks",
"unexpected tokens")
|
[
"def",
"_handle_array",
"(",
"toks",
")",
":",
"if",
"len",
"(",
"toks",
")",
"==",
"5",
"and",
"toks",
"[",
"1",
"]",
"==",
"'{'",
"and",
"toks",
"[",
"4",
"]",
"==",
"'}'",
":",
"subtree",
"=",
"toks",
"[",
"2",
":",
"4",
"]",
"signature",
"=",
"''",
".",
"join",
"(",
"s",
"for",
"(",
"_",
",",
"s",
")",
"in",
"subtree",
")",
"[",
"key_func",
",",
"value_func",
"]",
"=",
"[",
"f",
"for",
"(",
"f",
",",
"_",
")",
"in",
"subtree",
"]",
"def",
"the_dict_func",
"(",
"a_dict",
",",
"variant",
"=",
"0",
")",
":",
"\"\"\"\n Function for generating a Dictionary from a dict.\n\n :param a_dict: the dictionary to transform\n :type a_dict: dict of (`a * `b)\n :param int variant: variant level\n\n :returns: a dbus dictionary of transformed values and level\n :rtype: Dictionary * int\n \"\"\"",
"elements",
"=",
"[",
"(",
"key_func",
"(",
"x",
")",
",",
"value_func",
"(",
"y",
")",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"a_dict",
".",
"items",
"(",
")",
"]",
"level",
"=",
"0",
"if",
"elements",
"==",
"[",
"]",
"else",
"max",
"(",
"max",
"(",
"x",
",",
"y",
")",
"for",
"(",
"(",
"_",
",",
"x",
")",
",",
"(",
"_",
",",
"y",
")",
")",
"in",
"elements",
")",
"(",
"obj_level",
",",
"func_level",
")",
"=",
"_ToDbusXformer",
".",
"_variant_levels",
"(",
"level",
",",
"variant",
")",
"return",
"(",
"dbus",
".",
"types",
".",
"Dictionary",
"(",
"(",
"(",
"x",
",",
"y",
")",
"for",
"(",
"(",
"x",
",",
"_",
")",
",",
"(",
"y",
",",
"_",
")",
")",
"in",
"elements",
")",
",",
"signature",
"=",
"signature",
",",
"variant_level",
"=",
"obj_level",
")",
",",
"func_level",
")",
"return",
"(",
"the_dict_func",
",",
"'a{'",
"+",
"signature",
"+",
"'}'",
")",
"if",
"len",
"(",
"toks",
")",
"==",
"2",
":",
"(",
"func",
",",
"sig",
")",
"=",
"toks",
"[",
"1",
"]",
"def",
"the_array_func",
"(",
"a_list",
",",
"variant",
"=",
"0",
")",
":",
"\"\"\"\n Function for generating an Array from a list.\n\n :param a_list: the list to transform\n :type a_list: list of `a\n :param int variant: variant level of the value\n :returns: a dbus Array of transformed values and variant level\n :rtype: Array * int\n \"\"\"",
"if",
"isinstance",
"(",
"a_list",
",",
"dict",
")",
":",
"raise",
"IntoDPValueError",
"(",
"a_list",
",",
"\"a_list\"",
",",
"\"is a dict, must be an array\"",
")",
"elements",
"=",
"[",
"func",
"(",
"x",
")",
"for",
"x",
"in",
"a_list",
"]",
"level",
"=",
"0",
"if",
"elements",
"==",
"[",
"]",
"else",
"max",
"(",
"x",
"for",
"(",
"_",
",",
"x",
")",
"in",
"elements",
")",
"(",
"obj_level",
",",
"func_level",
")",
"=",
"_ToDbusXformer",
".",
"_variant_levels",
"(",
"level",
",",
"variant",
")",
"return",
"(",
"dbus",
".",
"types",
".",
"Array",
"(",
"(",
"x",
"for",
"(",
"x",
",",
"_",
")",
"in",
"elements",
")",
",",
"signature",
"=",
"sig",
",",
"variant_level",
"=",
"obj_level",
")",
",",
"func_level",
")",
"return",
"(",
"the_array_func",
",",
"'a'",
"+",
"sig",
")",
"raise",
"IntoDPValueError",
"(",
"toks",
",",
"\"toks\"",
",",
"\"unexpected tokens\"",
")"
] |
Generate the correct function for an array signature.
:param toks: the list of parsed tokens
:returns: function that returns an Array or Dictionary value
:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str
|
[
"Generate",
"the",
"correct",
"function",
"for",
"an",
"array",
"signature",
"."
] |
python
|
valid
| 40.132353 |
geertj/gruvi
|
lib/gruvi/transports.py
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/transports.py#L96-L105
|
def set_write_buffer_limits(self, high=None, low=None):
"""Set the low and high watermark for the write buffer."""
if high is None:
high = self.write_buffer_size
if low is None:
low = high // 2
if low > high:
low = high
self._write_buffer_high = high
self._write_buffer_low = low
|
[
"def",
"set_write_buffer_limits",
"(",
"self",
",",
"high",
"=",
"None",
",",
"low",
"=",
"None",
")",
":",
"if",
"high",
"is",
"None",
":",
"high",
"=",
"self",
".",
"write_buffer_size",
"if",
"low",
"is",
"None",
":",
"low",
"=",
"high",
"//",
"2",
"if",
"low",
">",
"high",
":",
"low",
"=",
"high",
"self",
".",
"_write_buffer_high",
"=",
"high",
"self",
".",
"_write_buffer_low",
"=",
"low"
] |
Set the low and high watermark for the write buffer.
|
[
"Set",
"the",
"low",
"and",
"high",
"watermark",
"for",
"the",
"write",
"buffer",
"."
] |
python
|
train
| 35.4 |
PSPC-SPAC-buyandsell/von_anchor
|
von_anchor/anchor/verifier.py
|
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/verifier.py#L414-L457
|
def check_encoding(proof_req: dict, proof: dict) -> bool:
"""
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
"""
LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof)
cd_id2proof_id = {} # invert proof['identifiers'] per cd_id
p_preds = {} # cd_id and attr to bound
for idx in range(len(proof['identifiers'])):
cd_id = proof['identifiers'][idx]['cred_def_id']
cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def
p_preds[cd_id] = {
ge_proof['predicate']['attr_name']: ge_proof['predicate']['value']
for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs']
}
for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr
canon_attr = canon(req_attr['name'])
proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']]
enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get(
canon_attr)
if not enco:
continue # requested but declined from revelation in proof: must appear in a predicate
if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']):
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred
canon_attr = canon(req_pred['name'])
if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']:
LOGGER.debug('Verifier.check_proof_encoding <<< False')
return False
LOGGER.debug('Verifier.check_proof_encoding <<< True')
return True
|
[
"def",
"check_encoding",
"(",
"proof_req",
":",
"dict",
",",
"proof",
":",
"dict",
")",
"->",
"bool",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.check_encoding <<< proof_req: %s, proof: %s'",
",",
"proof_req",
",",
"proof",
")",
"cd_id2proof_id",
"=",
"{",
"}",
"# invert proof['identifiers'] per cd_id",
"p_preds",
"=",
"{",
"}",
"# cd_id and attr to bound",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"proof",
"[",
"'identifiers'",
"]",
")",
")",
":",
"cd_id",
"=",
"proof",
"[",
"'identifiers'",
"]",
"[",
"idx",
"]",
"[",
"'cred_def_id'",
"]",
"cd_id2proof_id",
"[",
"cd_id",
"]",
"=",
"idx",
"# since at most 1 cred per cred def",
"p_preds",
"[",
"cd_id",
"]",
"=",
"{",
"ge_proof",
"[",
"'predicate'",
"]",
"[",
"'attr_name'",
"]",
":",
"ge_proof",
"[",
"'predicate'",
"]",
"[",
"'value'",
"]",
"for",
"ge_proof",
"in",
"proof",
"[",
"'proof'",
"]",
"[",
"'proofs'",
"]",
"[",
"idx",
"]",
"[",
"'primary_proof'",
"]",
"[",
"'ge_proofs'",
"]",
"}",
"for",
"(",
"uuid",
",",
"req_attr",
")",
"in",
"proof_req",
"[",
"'requested_attributes'",
"]",
".",
"items",
"(",
")",
":",
"# proof req xref proof per revealed attr",
"canon_attr",
"=",
"canon",
"(",
"req_attr",
"[",
"'name'",
"]",
")",
"proof_ident_idx",
"=",
"cd_id2proof_id",
"[",
"req_attr",
"[",
"'restrictions'",
"]",
"[",
"0",
"]",
"[",
"'cred_def_id'",
"]",
"]",
"enco",
"=",
"proof",
"[",
"'proof'",
"]",
"[",
"'proofs'",
"]",
"[",
"proof_ident_idx",
"]",
"[",
"'primary_proof'",
"]",
"[",
"'eq_proof'",
"]",
"[",
"'revealed_attrs'",
"]",
".",
"get",
"(",
"canon_attr",
")",
"if",
"not",
"enco",
":",
"continue",
"# requested but declined from revelation in proof: must appear in a predicate",
"if",
"enco",
"!=",
"proof",
"[",
"'requested_proof'",
"]",
"[",
"'revealed_attrs'",
"]",
"[",
"uuid",
"]",
"[",
"'encoded'",
"]",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.check_proof_encoding <<< False'",
")",
"return",
"False",
"if",
"enco",
"!=",
"encode",
"(",
"proof",
"[",
"'requested_proof'",
"]",
"[",
"'revealed_attrs'",
"]",
"[",
"uuid",
"]",
"[",
"'raw'",
"]",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.check_proof_encoding <<< False'",
")",
"return",
"False",
"for",
"(",
"uuid",
",",
"req_pred",
")",
"in",
"proof_req",
"[",
"'requested_predicates'",
"]",
".",
"items",
"(",
")",
":",
"# proof req xref proof per pred",
"canon_attr",
"=",
"canon",
"(",
"req_pred",
"[",
"'name'",
"]",
")",
"if",
"p_preds",
"[",
"req_pred",
"[",
"'restrictions'",
"]",
"[",
"0",
"]",
"[",
"'cred_def_id'",
"]",
"]",
".",
"get",
"(",
"canon_attr",
")",
"!=",
"req_pred",
"[",
"'p_value'",
"]",
":",
"LOGGER",
".",
"debug",
"(",
"'Verifier.check_proof_encoding <<< False'",
")",
"return",
"False",
"LOGGER",
".",
"debug",
"(",
"'Verifier.check_proof_encoding <<< True'",
")",
"return",
"True"
] |
Return whether the proof's raw values correspond to their encodings
as cross-referenced against proof request.
:param proof request: proof request
:param proof: corresponding proof to check
:return: True if OK, False for encoding mismatch
|
[
"Return",
"whether",
"the",
"proof",
"s",
"raw",
"values",
"correspond",
"to",
"their",
"encodings",
"as",
"cross",
"-",
"referenced",
"against",
"proof",
"request",
"."
] |
python
|
train
| 52.113636 |
log2timeline/plaso
|
plaso/parsers/esedb_plugins/interface.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/esedb_plugins/interface.py#L170-L221
|
def _GetRecordValue(self, record, value_entry):
"""Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
"""
column_type = record.get_column_type(value_entry)
long_value = None
if record.is_long_value(value_entry):
long_value = record.get_value_data_as_long_value(value_entry)
if record.is_multi_value(value_entry):
# TODO: implement
raise ValueError('Multi value support not implemented yet.')
if column_type == pyesedb.column_types.NULL:
return None
if column_type == pyesedb.column_types.BOOLEAN:
# TODO: implement
raise ValueError('Boolean value support not implemented yet.')
if column_type in self.INTEGER_COLUMN_TYPES:
if long_value:
raise ValueError('Long integer value not supported.')
return record.get_value_data_as_integer(value_entry)
if column_type in self.FLOATING_POINT_COLUMN_TYPES:
if long_value:
raise ValueError('Long floating point value not supported.')
return record.get_value_data_as_floating_point(value_entry)
if column_type in self.STRING_COLUMN_TYPES:
if long_value:
return long_value.get_data_as_string()
return record.get_value_data_as_string(value_entry)
if column_type == pyesedb.column_types.GUID:
# TODO: implement
raise ValueError('GUID value support not implemented yet.')
if long_value:
return long_value.get_data()
return record.get_value_data(value_entry)
|
[
"def",
"_GetRecordValue",
"(",
"self",
",",
"record",
",",
"value_entry",
")",
":",
"column_type",
"=",
"record",
".",
"get_column_type",
"(",
"value_entry",
")",
"long_value",
"=",
"None",
"if",
"record",
".",
"is_long_value",
"(",
"value_entry",
")",
":",
"long_value",
"=",
"record",
".",
"get_value_data_as_long_value",
"(",
"value_entry",
")",
"if",
"record",
".",
"is_multi_value",
"(",
"value_entry",
")",
":",
"# TODO: implement",
"raise",
"ValueError",
"(",
"'Multi value support not implemented yet.'",
")",
"if",
"column_type",
"==",
"pyesedb",
".",
"column_types",
".",
"NULL",
":",
"return",
"None",
"if",
"column_type",
"==",
"pyesedb",
".",
"column_types",
".",
"BOOLEAN",
":",
"# TODO: implement",
"raise",
"ValueError",
"(",
"'Boolean value support not implemented yet.'",
")",
"if",
"column_type",
"in",
"self",
".",
"INTEGER_COLUMN_TYPES",
":",
"if",
"long_value",
":",
"raise",
"ValueError",
"(",
"'Long integer value not supported.'",
")",
"return",
"record",
".",
"get_value_data_as_integer",
"(",
"value_entry",
")",
"if",
"column_type",
"in",
"self",
".",
"FLOATING_POINT_COLUMN_TYPES",
":",
"if",
"long_value",
":",
"raise",
"ValueError",
"(",
"'Long floating point value not supported.'",
")",
"return",
"record",
".",
"get_value_data_as_floating_point",
"(",
"value_entry",
")",
"if",
"column_type",
"in",
"self",
".",
"STRING_COLUMN_TYPES",
":",
"if",
"long_value",
":",
"return",
"long_value",
".",
"get_data_as_string",
"(",
")",
"return",
"record",
".",
"get_value_data_as_string",
"(",
"value_entry",
")",
"if",
"column_type",
"==",
"pyesedb",
".",
"column_types",
".",
"GUID",
":",
"# TODO: implement",
"raise",
"ValueError",
"(",
"'GUID value support not implemented yet.'",
")",
"if",
"long_value",
":",
"return",
"long_value",
".",
"get_data",
"(",
")",
"return",
"record",
".",
"get_value_data",
"(",
"value_entry",
")"
] |
Retrieves a specific value from the record.
Args:
record (pyesedb.record): ESE record.
value_entry (int): value entry.
Returns:
object: value.
Raises:
ValueError: if the value is not supported.
|
[
"Retrieves",
"a",
"specific",
"value",
"from",
"the",
"record",
"."
] |
python
|
train
| 30.788462 |
saltstack/salt
|
salt/cloud/clouds/ec2.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L1296-L1329
|
def securitygroupid(vm_):
'''
Returns the SecurityGroupId
'''
securitygroupid_set = set()
securitygroupid_list = config.get_cloud_config_value(
'securitygroupid',
vm_,
__opts__,
search_global=False
)
# If the list is None, then the set will remain empty
# If the list is already a set then calling 'set' on it is a no-op
# If the list is a string, then calling 'set' generates a one-element set
# If the list is anything else, stacktrace
if securitygroupid_list:
securitygroupid_set = securitygroupid_set.union(set(securitygroupid_list))
securitygroupname_list = config.get_cloud_config_value(
'securitygroupname', vm_, __opts__, search_global=False
)
if securitygroupname_list:
if not isinstance(securitygroupname_list, list):
securitygroupname_list = [securitygroupname_list]
params = {'Action': 'DescribeSecurityGroups'}
for sg in aws.query(params, location=get_location(),
provider=get_provider(), opts=__opts__, sigver='4'):
if sg['groupName'] in securitygroupname_list:
log.debug(
'AWS SecurityGroup ID of %s is %s',
sg['groupName'], sg['groupId']
)
securitygroupid_set.add(sg['groupId'])
return list(securitygroupid_set)
|
[
"def",
"securitygroupid",
"(",
"vm_",
")",
":",
"securitygroupid_set",
"=",
"set",
"(",
")",
"securitygroupid_list",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'securitygroupid'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
"# If the list is None, then the set will remain empty",
"# If the list is already a set then calling 'set' on it is a no-op",
"# If the list is a string, then calling 'set' generates a one-element set",
"# If the list is anything else, stacktrace",
"if",
"securitygroupid_list",
":",
"securitygroupid_set",
"=",
"securitygroupid_set",
".",
"union",
"(",
"set",
"(",
"securitygroupid_list",
")",
")",
"securitygroupname_list",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'securitygroupname'",
",",
"vm_",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
"if",
"securitygroupname_list",
":",
"if",
"not",
"isinstance",
"(",
"securitygroupname_list",
",",
"list",
")",
":",
"securitygroupname_list",
"=",
"[",
"securitygroupname_list",
"]",
"params",
"=",
"{",
"'Action'",
":",
"'DescribeSecurityGroups'",
"}",
"for",
"sg",
"in",
"aws",
".",
"query",
"(",
"params",
",",
"location",
"=",
"get_location",
"(",
")",
",",
"provider",
"=",
"get_provider",
"(",
")",
",",
"opts",
"=",
"__opts__",
",",
"sigver",
"=",
"'4'",
")",
":",
"if",
"sg",
"[",
"'groupName'",
"]",
"in",
"securitygroupname_list",
":",
"log",
".",
"debug",
"(",
"'AWS SecurityGroup ID of %s is %s'",
",",
"sg",
"[",
"'groupName'",
"]",
",",
"sg",
"[",
"'groupId'",
"]",
")",
"securitygroupid_set",
".",
"add",
"(",
"sg",
"[",
"'groupId'",
"]",
")",
"return",
"list",
"(",
"securitygroupid_set",
")"
] |
Returns the SecurityGroupId
|
[
"Returns",
"the",
"SecurityGroupId"
] |
python
|
train
| 40.205882 |
pymc-devs/pymc
|
pymc/gp/GPutils.py
|
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/GPutils.py#L264-L313
|
def observe(M, C, obs_mesh, obs_vals, obs_V=0, lintrans=None,
cross_validate=True):
"""
(M, C, obs_mesh, obs_vals[, obs_V = 0, lintrans = None, cross_validate = True])
Imposes observation of the value of obs_vals on M and C, where
obs_vals ~ N(lintrans * f(obs_mesh), V)
f ~ GP(M,C)
:Arguments:
- `M`: The mean function
- `C`: The covariance function
- `obs_mesh`: The places where f has been evaluated.
- `obs_vals`: The values of f that were observed there.
- `obs_V`: The observation variance. If None, assumed to be infinite
(observations made with no error).
- `lintrans`: A linear transformation. If None, assumed to be the
identity transformation (pretend it doesn't exist).
- `cross_validate`: A flag indicating whether a check should be done to
see if the data could have arisen from M and C with positive probability.
"""
obs_mesh = regularize_array(obs_mesh)
# print_(obs_mesh)
obs_V = resize(obs_V, obs_mesh.shape[0])
obs_vals = resize(obs_vals, obs_mesh.shape[0])
# First observe C.
relevant_slice, obs_mesh_new = C.observe(obs_mesh, obs_V, output_type='o')
# Then observe M from C.
M.observe(C, obs_mesh_new, obs_vals.ravel()[relevant_slice])
# Cross-validate if not asked not to.
if obs_mesh_new.shape[0] < obs_mesh.shape[0]:
if cross_validate:
if not predictive_check(
obs_vals, obs_mesh, M, C.obs_piv, sqrt(C.relative_precision)):
raise ValueError(
"These data seem extremely improbable given your GP prior. \n Suggestions: decrease observation precision, or adjust the covariance to \n allow the function to be less smooth.")
|
[
"def",
"observe",
"(",
"M",
",",
"C",
",",
"obs_mesh",
",",
"obs_vals",
",",
"obs_V",
"=",
"0",
",",
"lintrans",
"=",
"None",
",",
"cross_validate",
"=",
"True",
")",
":",
"obs_mesh",
"=",
"regularize_array",
"(",
"obs_mesh",
")",
"# print_(obs_mesh)",
"obs_V",
"=",
"resize",
"(",
"obs_V",
",",
"obs_mesh",
".",
"shape",
"[",
"0",
"]",
")",
"obs_vals",
"=",
"resize",
"(",
"obs_vals",
",",
"obs_mesh",
".",
"shape",
"[",
"0",
"]",
")",
"# First observe C.",
"relevant_slice",
",",
"obs_mesh_new",
"=",
"C",
".",
"observe",
"(",
"obs_mesh",
",",
"obs_V",
",",
"output_type",
"=",
"'o'",
")",
"# Then observe M from C.",
"M",
".",
"observe",
"(",
"C",
",",
"obs_mesh_new",
",",
"obs_vals",
".",
"ravel",
"(",
")",
"[",
"relevant_slice",
"]",
")",
"# Cross-validate if not asked not to.",
"if",
"obs_mesh_new",
".",
"shape",
"[",
"0",
"]",
"<",
"obs_mesh",
".",
"shape",
"[",
"0",
"]",
":",
"if",
"cross_validate",
":",
"if",
"not",
"predictive_check",
"(",
"obs_vals",
",",
"obs_mesh",
",",
"M",
",",
"C",
".",
"obs_piv",
",",
"sqrt",
"(",
"C",
".",
"relative_precision",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"These data seem extremely improbable given your GP prior. \\n Suggestions: decrease observation precision, or adjust the covariance to \\n allow the function to be less smooth.\"",
")"
] |
(M, C, obs_mesh, obs_vals[, obs_V = 0, lintrans = None, cross_validate = True])
Imposes observation of the value of obs_vals on M and C, where
obs_vals ~ N(lintrans * f(obs_mesh), V)
f ~ GP(M,C)
:Arguments:
- `M`: The mean function
- `C`: The covariance function
- `obs_mesh`: The places where f has been evaluated.
- `obs_vals`: The values of f that were observed there.
- `obs_V`: The observation variance. If None, assumed to be infinite
(observations made with no error).
- `lintrans`: A linear transformation. If None, assumed to be the
identity transformation (pretend it doesn't exist).
- `cross_validate`: A flag indicating whether a check should be done to
see if the data could have arisen from M and C with positive probability.
|
[
"(",
"M",
"C",
"obs_mesh",
"obs_vals",
"[",
"obs_V",
"=",
"0",
"lintrans",
"=",
"None",
"cross_validate",
"=",
"True",
"]",
")"
] |
python
|
train
| 35.44 |
tradenity/python-sdk
|
tradenity/resources/geo_zone.py
|
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/geo_zone.py#L259-L281
|
def list_all_geo_zones(cls, **kwargs):
"""List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_geo_zones_with_http_info(**kwargs)
else:
(data) = cls._list_all_geo_zones_with_http_info(**kwargs)
return data
|
[
"def",
"list_all_geo_zones",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_list_all_geo_zones_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_list_all_geo_zones_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"return",
"data"
] |
List GeoZones
Return a list of GeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[GeoZone]
If the method is called asynchronously,
returns the request thread.
|
[
"List",
"GeoZones"
] |
python
|
train
| 36.347826 |
yuce/pyswip
|
pyswip/easy.py
|
https://github.com/yuce/pyswip/blob/f7c1f1e8c3a13b90bd775861d374788a8b5677d8/pyswip/easy.py#L349-L356
|
def getBool(t):
"""If t is of type bool, return it, otherwise raise InvalidTypeError.
"""
b = c_int()
if PL_get_long(t, byref(b)):
return bool(b.value)
else:
raise InvalidTypeError("bool")
|
[
"def",
"getBool",
"(",
"t",
")",
":",
"b",
"=",
"c_int",
"(",
")",
"if",
"PL_get_long",
"(",
"t",
",",
"byref",
"(",
"b",
")",
")",
":",
"return",
"bool",
"(",
"b",
".",
"value",
")",
"else",
":",
"raise",
"InvalidTypeError",
"(",
"\"bool\"",
")"
] |
If t is of type bool, return it, otherwise raise InvalidTypeError.
|
[
"If",
"t",
"is",
"of",
"type",
"bool",
"return",
"it",
"otherwise",
"raise",
"InvalidTypeError",
"."
] |
python
|
train
| 27.125 |
tensorflow/probability
|
tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py
|
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/latent_dirichlet_allocation_distributions.py#L197-L222
|
def make_decoder(num_topics, num_words):
"""Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
"""
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[num_topics, num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
def decoder(topics):
word_probs = tf.matmul(topics, topics_words)
# The observations are bag of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
return tfd.OneHotCategorical(probs=word_probs,
name="bag_of_words")
return decoder, topics_words
|
[
"def",
"make_decoder",
"(",
"num_topics",
",",
"num_words",
")",
":",
"topics_words_logits",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"get_variable",
"(",
"\"topics_words_logits\"",
",",
"shape",
"=",
"[",
"num_topics",
",",
"num_words",
"]",
",",
"initializer",
"=",
"tf",
".",
"compat",
".",
"v1",
".",
"glorot_normal_initializer",
"(",
")",
")",
"topics_words",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"topics_words_logits",
",",
"axis",
"=",
"-",
"1",
")",
"def",
"decoder",
"(",
"topics",
")",
":",
"word_probs",
"=",
"tf",
".",
"matmul",
"(",
"topics",
",",
"topics_words",
")",
"# The observations are bag of words and therefore not one-hot. However,",
"# log_prob of OneHotCategorical computes the probability correctly in",
"# this case.",
"return",
"tfd",
".",
"OneHotCategorical",
"(",
"probs",
"=",
"word_probs",
",",
"name",
"=",
"\"bag_of_words\"",
")",
"return",
"decoder",
",",
"topics_words"
] |
Create the decoder function.
Args:
num_topics: The number of topics.
num_words: The number of words.
Returns:
decoder: A `callable` mapping a `Tensor` of encodings to a
`tfd.Distribution` instance over words.
|
[
"Create",
"the",
"decoder",
"function",
"."
] |
python
|
test
| 33.615385 |
JarryShaw/PyPCAPKit
|
src/protocols/link/link.py
|
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/link/link.py#L79-L121
|
def _import_next_layer(self, proto, length):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 0x0806:
from pcapkit.protocols.link.arp import ARP as Protocol
elif proto == 0x8035:
from pcapkit.protocols.link.rarp import RARP as Protocol
elif proto == 0x8100:
from pcapkit.protocols.link.vlan import VLAN as Protocol
elif proto == 0x0800:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 0x86DD:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 0x8137:
from pcapkit.protocols.internet.ipx import IPX as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, error=self._onerror,
layer=self._exlayer, protocol=self._exproto)
return next_
|
[
"def",
"_import_next_layer",
"(",
"self",
",",
"proto",
",",
"length",
")",
":",
"if",
"length",
"==",
"0",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"null",
"import",
"NoPayload",
"as",
"Protocol",
"elif",
"self",
".",
"_sigterm",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"raw",
"import",
"Raw",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x0806",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"link",
".",
"arp",
"import",
"ARP",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x8035",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"link",
".",
"rarp",
"import",
"RARP",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x8100",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"link",
".",
"vlan",
"import",
"VLAN",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x0800",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"internet",
".",
"ipv4",
"import",
"IPv4",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x86DD",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"internet",
".",
"ipv6",
"import",
"IPv6",
"as",
"Protocol",
"elif",
"proto",
"==",
"0x8137",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"internet",
".",
"ipx",
"import",
"IPX",
"as",
"Protocol",
"else",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"raw",
"import",
"Raw",
"as",
"Protocol",
"next_",
"=",
"Protocol",
"(",
"self",
".",
"_file",
",",
"length",
",",
"error",
"=",
"self",
".",
"_onerror",
",",
"layer",
"=",
"self",
".",
"_exlayer",
",",
"protocol",
"=",
"self",
".",
"_exproto",
")",
"return",
"next_"
] |
Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* ARP -- data link layer
* RARP -- data link layer
* VLAN -- data link layer
* IPv4 -- internet layer
* IPv6 -- internet layer
* IPX -- internet layer
|
[
"Import",
"next",
"layer",
"extractor",
"."
] |
python
|
train
| 39.023256 |
pantsbuild/pex
|
pex/pex.py
|
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/pex.py#L235-L252
|
def minimum_sys(cls, inherit_path):
"""Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
"""
site_libs = set(cls.site_libs())
for site_lib in site_libs:
TRACER.log('Found site-library: %s' % site_lib)
for extras_path in cls._extras_paths():
TRACER.log('Found site extra: %s' % extras_path)
site_libs.add(extras_path)
site_libs = set(os.path.normpath(path) for path in site_libs)
sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)
sys_modules = cls.minimum_sys_modules(site_libs)
return sys_path, sys_path_importer_cache, sys_modules
|
[
"def",
"minimum_sys",
"(",
"cls",
",",
"inherit_path",
")",
":",
"site_libs",
"=",
"set",
"(",
"cls",
".",
"site_libs",
"(",
")",
")",
"for",
"site_lib",
"in",
"site_libs",
":",
"TRACER",
".",
"log",
"(",
"'Found site-library: %s'",
"%",
"site_lib",
")",
"for",
"extras_path",
"in",
"cls",
".",
"_extras_paths",
"(",
")",
":",
"TRACER",
".",
"log",
"(",
"'Found site extra: %s'",
"%",
"extras_path",
")",
"site_libs",
".",
"add",
"(",
"extras_path",
")",
"site_libs",
"=",
"set",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"path",
")",
"for",
"path",
"in",
"site_libs",
")",
"sys_path",
",",
"sys_path_importer_cache",
"=",
"cls",
".",
"minimum_sys_path",
"(",
"site_libs",
",",
"inherit_path",
")",
"sys_modules",
"=",
"cls",
".",
"minimum_sys_modules",
"(",
"site_libs",
")",
"return",
"sys_path",
",",
"sys_path_importer_cache",
",",
"sys_modules"
] |
Return the minimum sys necessary to run this interpreter, a la python -S.
:returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
bare python installation.
|
[
"Return",
"the",
"minimum",
"sys",
"necessary",
"to",
"run",
"this",
"interpreter",
"a",
"la",
"python",
"-",
"S",
"."
] |
python
|
train
| 40.722222 |
quantumlib/Cirq
|
cirq/circuits/text_diagram_drawer.py
|
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/text_diagram_drawer.py#L179-L182
|
def force_horizontal_padding_after(
self, index: int, padding: Union[int, float]) -> None:
"""Change the padding after the given column."""
self.horizontal_padding[index] = padding
|
[
"def",
"force_horizontal_padding_after",
"(",
"self",
",",
"index",
":",
"int",
",",
"padding",
":",
"Union",
"[",
"int",
",",
"float",
"]",
")",
"->",
"None",
":",
"self",
".",
"horizontal_padding",
"[",
"index",
"]",
"=",
"padding"
] |
Change the padding after the given column.
|
[
"Change",
"the",
"padding",
"after",
"the",
"given",
"column",
"."
] |
python
|
train
| 51.25 |
chaoss/grimoirelab-manuscripts
|
manuscripts2/metrics/git.py
|
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/metrics/git.py#L107-L114
|
def timeseries(self, dataframe=False):
"""Get the date histogram aggregations.
:param dataframe: if true, return a pandas.DataFrame object
"""
self.query.get_cardinality("author_uuid").by_period()
return super().timeseries(dataframe)
|
[
"def",
"timeseries",
"(",
"self",
",",
"dataframe",
"=",
"False",
")",
":",
"self",
".",
"query",
".",
"get_cardinality",
"(",
"\"author_uuid\"",
")",
".",
"by_period",
"(",
")",
"return",
"super",
"(",
")",
".",
"timeseries",
"(",
"dataframe",
")"
] |
Get the date histogram aggregations.
:param dataframe: if true, return a pandas.DataFrame object
|
[
"Get",
"the",
"date",
"histogram",
"aggregations",
"."
] |
python
|
train
| 33.5 |
HazyResearch/metal
|
metal/logging/logger.py
|
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/logging/logger.py#L37-L40
|
def check(self, batch_size):
"""Returns True if the logging frequency has been met."""
self.increment(batch_size)
return self.unit_count >= self.config["log_train_every"]
|
[
"def",
"check",
"(",
"self",
",",
"batch_size",
")",
":",
"self",
".",
"increment",
"(",
"batch_size",
")",
"return",
"self",
".",
"unit_count",
">=",
"self",
".",
"config",
"[",
"\"log_train_every\"",
"]"
] |
Returns True if the logging frequency has been met.
|
[
"Returns",
"True",
"if",
"the",
"logging",
"frequency",
"has",
"been",
"met",
"."
] |
python
|
train
| 47.75 |
b3j0f/annotation
|
b3j0f/annotation/async.py
|
https://github.com/b3j0f/annotation/blob/738035a974e4092696d9dc1bbd149faa21c8c51f/b3j0f/annotation/async.py#L97-L102
|
def _threaded(self, *args, **kwargs):
"""Call the target and put the result in the Queue."""
for target in self.targets:
result = target(*args, **kwargs)
self.queue.put(result)
|
[
"def",
"_threaded",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"target",
"in",
"self",
".",
"targets",
":",
"result",
"=",
"target",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"queue",
".",
"put",
"(",
"result",
")"
] |
Call the target and put the result in the Queue.
|
[
"Call",
"the",
"target",
"and",
"put",
"the",
"result",
"in",
"the",
"Queue",
"."
] |
python
|
train
| 35.333333 |
DLR-RM/RAFCON
|
source/rafcon/gui/controllers/top_tool_bar.py
|
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/top_tool_bar.py#L51-L55
|
def register_view(self, view):
"""Called when the View was registered"""
super(TopToolBarController, self).register_view(view)
view['maximize_button'].connect('clicked', self.on_maximize_button_clicked)
self.update_maximize_button()
|
[
"def",
"register_view",
"(",
"self",
",",
"view",
")",
":",
"super",
"(",
"TopToolBarController",
",",
"self",
")",
".",
"register_view",
"(",
"view",
")",
"view",
"[",
"'maximize_button'",
"]",
".",
"connect",
"(",
"'clicked'",
",",
"self",
".",
"on_maximize_button_clicked",
")",
"self",
".",
"update_maximize_button",
"(",
")"
] |
Called when the View was registered
|
[
"Called",
"when",
"the",
"View",
"was",
"registered"
] |
python
|
train
| 52 |
cs50/check50
|
check50/runner.py
|
https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/runner.py#L76-L153
|
def check(dependency=None, timeout=60):
"""Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
"""
def decorator(check):
# Modules are evaluated from the top of the file down, so _check_names will
# contain the names of the checks in the order in which they are declared
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(checks_root, dependency_state):
# Result template
result = CheckResult.from_check(check)
# Any shared (returned) state
state = None
try:
# Setup check environment, copying disk state from dependency
internal.run_dir = checks_root / check.__name__
src_dir = checks_root / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
# Run registered functions before/after running check and set timeout
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!")}
log(repr(e))
for line in traceback.format_tb(e.__traceback__):
log(line.rstrip())
log(_("Contact [email protected] with the URL of this check!"))
else:
result.passed = True
finally:
result.log = _log
result.data = _data
return result, state
return wrapper
return decorator
|
[
"def",
"check",
"(",
"dependency",
"=",
"None",
",",
"timeout",
"=",
"60",
")",
":",
"def",
"decorator",
"(",
"check",
")",
":",
"# Modules are evaluated from the top of the file down, so _check_names will",
"# contain the names of the checks in the order in which they are declared",
"_check_names",
".",
"append",
"(",
"check",
".",
"__name__",
")",
"check",
".",
"_check_dependency",
"=",
"dependency",
"@",
"functools",
".",
"wraps",
"(",
"check",
")",
"def",
"wrapper",
"(",
"checks_root",
",",
"dependency_state",
")",
":",
"# Result template",
"result",
"=",
"CheckResult",
".",
"from_check",
"(",
"check",
")",
"# Any shared (returned) state",
"state",
"=",
"None",
"try",
":",
"# Setup check environment, copying disk state from dependency",
"internal",
".",
"run_dir",
"=",
"checks_root",
"/",
"check",
".",
"__name__",
"src_dir",
"=",
"checks_root",
"/",
"(",
"dependency",
".",
"__name__",
"if",
"dependency",
"else",
"\"-\"",
")",
"shutil",
".",
"copytree",
"(",
"src_dir",
",",
"internal",
".",
"run_dir",
")",
"os",
".",
"chdir",
"(",
"internal",
".",
"run_dir",
")",
"# Run registered functions before/after running check and set timeout",
"with",
"internal",
".",
"register",
",",
"_timeout",
"(",
"seconds",
"=",
"timeout",
")",
":",
"args",
"=",
"(",
"dependency_state",
",",
")",
"if",
"inspect",
".",
"getfullargspec",
"(",
"check",
")",
".",
"args",
"else",
"(",
")",
"state",
"=",
"check",
"(",
"*",
"args",
")",
"except",
"Failure",
"as",
"e",
":",
"result",
".",
"passed",
"=",
"False",
"result",
".",
"cause",
"=",
"e",
".",
"payload",
"except",
"BaseException",
"as",
"e",
":",
"result",
".",
"passed",
"=",
"None",
"result",
".",
"cause",
"=",
"{",
"\"rationale\"",
":",
"_",
"(",
"\"check50 ran into an error while running checks!\"",
")",
"}",
"log",
"(",
"repr",
"(",
"e",
")",
")",
"for",
"line",
"in",
"traceback",
".",
"format_tb",
"(",
"e",
".",
"__traceback__",
")",
":",
"log",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"log",
"(",
"_",
"(",
"\"Contact [email protected] with the URL of this check!\"",
")",
")",
"else",
":",
"result",
".",
"passed",
"=",
"True",
"finally",
":",
"result",
".",
"log",
"=",
"_log",
"result",
".",
"data",
"=",
"_data",
"return",
"result",
",",
"state",
"return",
"wrapper",
"return",
"decorator"
] |
Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
|
[
"Mark",
"function",
"as",
"a",
"check",
"."
] |
python
|
train
| 42.089744 |
dbcli/cli_helpers
|
cli_helpers/config.py
|
https://github.com/dbcli/cli_helpers/blob/3ebd891ac0c02bad061182dbcb54a47fb21980ae/cli_helpers/config.py#L103-L111
|
def read(self):
"""Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
"""
if self.default_file:
self.read_default_config()
return self.read_config_files(self.all_config_files())
|
[
"def",
"read",
"(",
"self",
")",
":",
"if",
"self",
".",
"default_file",
":",
"self",
".",
"read_default_config",
"(",
")",
"return",
"self",
".",
"read_config_files",
"(",
"self",
".",
"all_config_files",
"(",
")",
")"
] |
Read the default, additional, system, and user config files.
:raises DefaultConfigValidationError: There was a validation error with
the *default* file.
|
[
"Read",
"the",
"default",
"additional",
"system",
"and",
"user",
"config",
"files",
"."
] |
python
|
test
| 41.111111 |
p3trus/slave
|
slave/misc.py
|
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/misc.py#L78-L95
|
def index(index, length):
"""Generates an index.
:param index: The index, can be positive or negative.
:param length: The length of the sequence to index.
:raises: IndexError
Negative indices are typically used to index a sequence in reverse order.
But to use them, the indexed object must convert them to the correct,
positive index. This function can be used to do this.
"""
if index < 0:
index += length
if 0 <= index < length:
return index
raise IndexError()
|
[
"def",
"index",
"(",
"index",
",",
"length",
")",
":",
"if",
"index",
"<",
"0",
":",
"index",
"+=",
"length",
"if",
"0",
"<=",
"index",
"<",
"length",
":",
"return",
"index",
"raise",
"IndexError",
"(",
")"
] |
Generates an index.
:param index: The index, can be positive or negative.
:param length: The length of the sequence to index.
:raises: IndexError
Negative indices are typically used to index a sequence in reverse order.
But to use them, the indexed object must convert them to the correct,
positive index. This function can be used to do this.
|
[
"Generates",
"an",
"index",
"."
] |
python
|
train
| 28.277778 |
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L552-L612
|
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
|
[
"def",
"type_list",
"(",
"signature",
",",
"doc",
",",
"header",
")",
":",
"lines",
"=",
"[",
"]",
"docced",
"=",
"set",
"(",
")",
"lines",
".",
"append",
"(",
"header",
")",
"try",
":",
"for",
"names",
",",
"types",
",",
"description",
"in",
"doc",
":",
"names",
",",
"types",
"=",
"_get_names",
"(",
"names",
",",
"types",
")",
"unannotated",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"docced",
".",
"add",
"(",
"name",
")",
"try",
":",
"typ",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"annotation",
"if",
"typ",
"==",
"inspect",
".",
"_empty",
":",
"raise",
"AttributeError",
"default",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"default",
"type_string",
"=",
"string_annotation",
"(",
"typ",
",",
"default",
")",
"lines",
".",
"append",
"(",
"f\"- `{name}`: {type_string}\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"unannotated",
".",
"append",
"(",
"name",
")",
"# No annotation",
"if",
"len",
"(",
"unannotated",
")",
">",
"0",
":",
"lines",
".",
"append",
"(",
"\"- \"",
")",
"lines",
".",
"append",
"(",
"\", \"",
".",
"join",
"(",
"f\"`{name}`\"",
"for",
"name",
"in",
"unannotated",
")",
")",
"if",
"types",
"!=",
"\"\"",
"and",
"len",
"(",
"unannotated",
")",
">",
"0",
":",
"lines",
".",
"append",
"(",
"f\": {mangle_types(types)}\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"lines",
".",
"append",
"(",
"f\" {' '.join(description)}\\n\\n\"",
")",
"for",
"names",
",",
"types",
",",
"description",
"in",
"doc",
":",
"names",
",",
"types",
"=",
"_get_names",
"(",
"names",
",",
"types",
")",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"not",
"in",
"docced",
":",
"try",
":",
"typ",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"annotation",
"default",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"default",
"type_string",
"=",
"string_annotation",
"(",
"typ",
",",
"default",
")",
"lines",
".",
"append",
"(",
"f\"- `{name}`: {type_string}\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"lines",
".",
"append",
"(",
"f\"- `{name}`\"",
")",
"lines",
".",
"append",
"(",
"\"\\n\\n\"",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"lines",
"if",
"len",
"(",
"lines",
")",
">",
"1",
"else",
"[",
"]"
] |
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
|
[
"Construct",
"a",
"list",
"of",
"types",
"preferring",
"type",
"annotations",
"to",
"docstrings",
"if",
"they",
"are",
"available",
"."
] |
python
|
train
| 36.295082 |
readbeyond/aeneas
|
aeneas/executetask.py
|
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/executetask.py#L185-L225
|
def _execute_single_level_task(self):
""" Execute a single-level task """
self.log(u"Executing single level task...")
try:
# load audio file, extract MFCCs from real wave, clear audio file
self._step_begin(u"extract MFCC real wave")
real_wave_mfcc = self._extract_mfcc(
file_path=self.task.audio_file_path_absolute,
file_format=None,
)
self._step_end()
# compute head and/or tail and set it
self._step_begin(u"compute head tail")
(head_length, process_length, tail_length) = self._compute_head_process_tail(real_wave_mfcc)
real_wave_mfcc.set_head_middle_tail(head_length, process_length, tail_length)
self._step_end()
# compute alignment, outputting a tree of time intervals
self._set_synthesizer()
sync_root = Tree()
self._execute_inner(
real_wave_mfcc,
self.task.text_file,
sync_root=sync_root,
force_aba_auto=False,
log=True,
leaf_level=True
)
self._clear_cache_synthesizer()
# create syncmap and add it to task
self._step_begin(u"create sync map")
self._create_sync_map(sync_root=sync_root)
self._step_end()
# log total
self._step_total()
self.log(u"Executing single level task... done")
except Exception as exc:
self._step_failure(exc)
|
[
"def",
"_execute_single_level_task",
"(",
"self",
")",
":",
"self",
".",
"log",
"(",
"u\"Executing single level task...\"",
")",
"try",
":",
"# load audio file, extract MFCCs from real wave, clear audio file",
"self",
".",
"_step_begin",
"(",
"u\"extract MFCC real wave\"",
")",
"real_wave_mfcc",
"=",
"self",
".",
"_extract_mfcc",
"(",
"file_path",
"=",
"self",
".",
"task",
".",
"audio_file_path_absolute",
",",
"file_format",
"=",
"None",
",",
")",
"self",
".",
"_step_end",
"(",
")",
"# compute head and/or tail and set it",
"self",
".",
"_step_begin",
"(",
"u\"compute head tail\"",
")",
"(",
"head_length",
",",
"process_length",
",",
"tail_length",
")",
"=",
"self",
".",
"_compute_head_process_tail",
"(",
"real_wave_mfcc",
")",
"real_wave_mfcc",
".",
"set_head_middle_tail",
"(",
"head_length",
",",
"process_length",
",",
"tail_length",
")",
"self",
".",
"_step_end",
"(",
")",
"# compute alignment, outputting a tree of time intervals",
"self",
".",
"_set_synthesizer",
"(",
")",
"sync_root",
"=",
"Tree",
"(",
")",
"self",
".",
"_execute_inner",
"(",
"real_wave_mfcc",
",",
"self",
".",
"task",
".",
"text_file",
",",
"sync_root",
"=",
"sync_root",
",",
"force_aba_auto",
"=",
"False",
",",
"log",
"=",
"True",
",",
"leaf_level",
"=",
"True",
")",
"self",
".",
"_clear_cache_synthesizer",
"(",
")",
"# create syncmap and add it to task",
"self",
".",
"_step_begin",
"(",
"u\"create sync map\"",
")",
"self",
".",
"_create_sync_map",
"(",
"sync_root",
"=",
"sync_root",
")",
"self",
".",
"_step_end",
"(",
")",
"# log total",
"self",
".",
"_step_total",
"(",
")",
"self",
".",
"log",
"(",
"u\"Executing single level task... done\"",
")",
"except",
"Exception",
"as",
"exc",
":",
"self",
".",
"_step_failure",
"(",
"exc",
")"
] |
Execute a single-level task
|
[
"Execute",
"a",
"single",
"-",
"level",
"task"
] |
python
|
train
| 37.853659 |
saltstack/salt
|
salt/modules/boto_lambda.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_lambda.py#L788-L819
|
def update_alias(FunctionName, Name, FunctionVersion=None, Description=None,
region=None, key=None, keyid=None, profile=None):
'''
Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
{updated: False} if the alias was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
args = {}
if FunctionVersion:
args['FunctionVersion'] = FunctionVersion
if Description:
args['Description'] = Description
r = conn.update_alias(FunctionName=FunctionName, Name=Name, **args)
if r:
keys = ('Name', 'FunctionVersion', 'Description')
return {'updated': True, 'alias': dict([(k, r.get(k)) for k in keys])}
else:
log.warning('Alias was not updated')
return {'updated': False}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
|
[
"def",
"update_alias",
"(",
"FunctionName",
",",
"Name",
",",
"FunctionVersion",
"=",
"None",
",",
"Description",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"args",
"=",
"{",
"}",
"if",
"FunctionVersion",
":",
"args",
"[",
"'FunctionVersion'",
"]",
"=",
"FunctionVersion",
"if",
"Description",
":",
"args",
"[",
"'Description'",
"]",
"=",
"Description",
"r",
"=",
"conn",
".",
"update_alias",
"(",
"FunctionName",
"=",
"FunctionName",
",",
"Name",
"=",
"Name",
",",
"*",
"*",
"args",
")",
"if",
"r",
":",
"keys",
"=",
"(",
"'Name'",
",",
"'FunctionVersion'",
",",
"'Description'",
")",
"return",
"{",
"'updated'",
":",
"True",
",",
"'alias'",
":",
"dict",
"(",
"[",
"(",
"k",
",",
"r",
".",
"get",
"(",
"k",
")",
")",
"for",
"k",
"in",
"keys",
"]",
")",
"}",
"else",
":",
"log",
".",
"warning",
"(",
"'Alias was not updated'",
")",
"return",
"{",
"'updated'",
":",
"False",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
] |
Update the named alias to the configuration.
Returns {updated: true} if the alias was updated and returns
{updated: False} if the alias was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.update_alias my_lambda my_alias $LATEST
|
[
"Update",
"the",
"named",
"alias",
"to",
"the",
"configuration",
"."
] |
python
|
train
| 34.78125 |
cloudtools/troposphere
|
troposphere/utils.py
|
https://github.com/cloudtools/troposphere/blob/f7ea5591a7c287a843adc9c184d2f56064cfc632/troposphere/utils.py#L8-L19
|
def get_events(conn, stackname):
"""Get the events in batches and return in chronological order"""
next = None
event_list = []
while 1:
events = conn.describe_stack_events(stackname, next)
event_list.append(events)
if events.next_token is None:
break
next = events.next_token
time.sleep(1)
return reversed(sum(event_list, []))
|
[
"def",
"get_events",
"(",
"conn",
",",
"stackname",
")",
":",
"next",
"=",
"None",
"event_list",
"=",
"[",
"]",
"while",
"1",
":",
"events",
"=",
"conn",
".",
"describe_stack_events",
"(",
"stackname",
",",
"next",
")",
"event_list",
".",
"append",
"(",
"events",
")",
"if",
"events",
".",
"next_token",
"is",
"None",
":",
"break",
"next",
"=",
"events",
".",
"next_token",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"reversed",
"(",
"sum",
"(",
"event_list",
",",
"[",
"]",
")",
")"
] |
Get the events in batches and return in chronological order
|
[
"Get",
"the",
"events",
"in",
"batches",
"and",
"return",
"in",
"chronological",
"order"
] |
python
|
train
| 32.25 |
brocade/pynos
|
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L51-L63
|
def nsx_controller_connection_addr_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
method = ET.SubElement(connection_addr, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"nsx_controller_connection_addr_method",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"nsx_controller",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"nsx-controller\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-tunnels\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"nsx_controller",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"connection_addr",
"=",
"ET",
".",
"SubElement",
"(",
"nsx_controller",
",",
"\"connection-addr\"",
")",
"method",
"=",
"ET",
".",
"SubElement",
"(",
"connection_addr",
",",
"\"method\"",
")",
"method",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'method'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 46.538462 |
oleiade/durations
|
durations/parser.py
|
https://github.com/oleiade/durations/blob/62c176dfa7d36d5c59bf93bdebfdc80ab53757bd/durations/parser.py#L10-L31
|
def valid_token(token):
"""Asserts a provided string is a valid duration token representation
:param token: duration representation token
:type token: string
"""
is_scale = False
# Check if the token represents a scale
# If it doesn't set a flag accordingly
try:
Scale(token)
is_scale = True
except ScaleFormatError:
pass
# If token either represents a numerical value, a
# separator token, or a scale, it is considered valid
if any([token.isdigit(), token in SEPARATOR_TOKENS, is_scale]):
return True
return False
|
[
"def",
"valid_token",
"(",
"token",
")",
":",
"is_scale",
"=",
"False",
"# Check if the token represents a scale",
"# If it doesn't set a flag accordingly",
"try",
":",
"Scale",
"(",
"token",
")",
"is_scale",
"=",
"True",
"except",
"ScaleFormatError",
":",
"pass",
"# If token either represents a numerical value, a",
"# separator token, or a scale, it is considered valid",
"if",
"any",
"(",
"[",
"token",
".",
"isdigit",
"(",
")",
",",
"token",
"in",
"SEPARATOR_TOKENS",
",",
"is_scale",
"]",
")",
":",
"return",
"True",
"return",
"False"
] |
Asserts a provided string is a valid duration token representation
:param token: duration representation token
:type token: string
|
[
"Asserts",
"a",
"provided",
"string",
"is",
"a",
"valid",
"duration",
"token",
"representation"
] |
python
|
train
| 26.545455 |
what-studio/profiling
|
profiling/viewer.py
|
https://github.com/what-studio/profiling/blob/49666ba3ea295eb73782ae6c18a4ec7929d7d8b7/profiling/viewer.py#L563-L574
|
def find_node(self, node, path):
"""Finds a node by the given path from the given node."""
for hash_value in path:
if isinstance(node, LeafStatisticsNode):
break
for stats in node.get_child_keys():
if hash(stats) == hash_value:
node = node.get_child_node(stats)
break
else:
break
return node
|
[
"def",
"find_node",
"(",
"self",
",",
"node",
",",
"path",
")",
":",
"for",
"hash_value",
"in",
"path",
":",
"if",
"isinstance",
"(",
"node",
",",
"LeafStatisticsNode",
")",
":",
"break",
"for",
"stats",
"in",
"node",
".",
"get_child_keys",
"(",
")",
":",
"if",
"hash",
"(",
"stats",
")",
"==",
"hash_value",
":",
"node",
"=",
"node",
".",
"get_child_node",
"(",
"stats",
")",
"break",
"else",
":",
"break",
"return",
"node"
] |
Finds a node by the given path from the given node.
|
[
"Finds",
"a",
"node",
"by",
"the",
"given",
"path",
"from",
"the",
"given",
"node",
"."
] |
python
|
train
| 35.666667 |
CameronLonsdale/lantern
|
lantern/modules/shift.py
|
https://github.com/CameronLonsdale/lantern/blob/235e163e96bf0719d49c54204ee576b2ca93abb6/lantern/modules/shift.py#L9-L43
|
def make_shift_function(alphabet):
"""Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
"""
def shift_case_sensitive(shift, symbol):
case = [case for case in alphabet if symbol in case]
if not case:
return symbol
case = case[0]
index = case.index(symbol)
return case[(index - shift) % len(case)]
return shift_case_sensitive
|
[
"def",
"make_shift_function",
"(",
"alphabet",
")",
":",
"def",
"shift_case_sensitive",
"(",
"shift",
",",
"symbol",
")",
":",
"case",
"=",
"[",
"case",
"for",
"case",
"in",
"alphabet",
"if",
"symbol",
"in",
"case",
"]",
"if",
"not",
"case",
":",
"return",
"symbol",
"case",
"=",
"case",
"[",
"0",
"]",
"index",
"=",
"case",
".",
"index",
"(",
"symbol",
")",
"return",
"case",
"[",
"(",
"index",
"-",
"shift",
")",
"%",
"len",
"(",
"case",
")",
"]",
"return",
"shift_case_sensitive"
] |
Construct a shift function from an alphabet.
Examples:
Shift cases independently
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase])
<function make_shift_function.<locals>.shift_case_sensitive>
Additionally shift punctuation characters
>>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation])
<function make_shift_function.<locals>.shift_case_sensitive>
Shift entire ASCII range, overflowing cases
>>> make_shift_function([''.join(chr(x) for x in range(32, 127))])
<function make_shift_function.<locals>.shift_case_sensitive>
Args:
alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet
Returns:
Function (shift, symbol)
|
[
"Construct",
"a",
"shift",
"function",
"from",
"an",
"alphabet",
"."
] |
python
|
train
| 32.4 |
QualiSystems/vCenterShell
|
package/cloudshell/cp/vcenter/common/wrappers/command_wrapper.py
|
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/wrappers/command_wrapper.py#L54-L135
|
def execute_command_with_connection(self, context, command, *args):
"""
Note: session & vcenter_data_model & reservation id objects will be injected dynamically to the command
:param command:
:param context: instance of ResourceCommandContext or AutoLoadCommandContext
:type context: cloudshell.shell.core.context.ResourceCommandContext
:param args:
"""
logger = self.context_based_logger_factory.create_logger_for_context(
logger_name='vCenterShell',
context=context)
if not command:
logger.error(COMMAND_CANNOT_BE_NONE)
raise Exception(COMMAND_CANNOT_BE_NONE)
try:
command_name = command.__name__
logger.info(LOG_FORMAT.format(START, command_name))
command_args = []
si = None
session = None
connection_details = None
vcenter_data_model = None
# get connection details
if context:
with CloudShellSessionContext(context) as cloudshell_session:
session = cloudshell_session
vcenter_data_model = self.resource_model_parser.convert_to_vcenter_model(context.resource)
connection_details = ResourceConnectionDetailsRetriever.get_connection_details(session=session,
vcenter_resource_model=vcenter_data_model,
resource_context=context.resource)
if connection_details:
logger.info(INFO_CONNECTING_TO_VCENTER.format(connection_details.host))
logger.debug(
DEBUG_CONNECTION_INFO.format(connection_details.host,
connection_details.username,
connection_details.port))
si = self.get_py_service_connection(connection_details, logger)
if si:
logger.info(CONNECTED_TO_CENTER.format(connection_details.host))
command_args.append(si)
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=session,
arg_name='session')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=vcenter_data_model,
arg_name='vcenter_data_model')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=self._get_reservation_id(context),
arg_name='reservation_id')
self._try_inject_arg(command=command,
command_args=command_args,
arg_object=logger,
arg_name='logger')
command_args.extend(args)
logger.info(EXECUTING_COMMAND.format(command_name))
logger.debug(DEBUG_COMMAND_PARAMS.format(COMMA.join([str(x) for x in command_args])))
results = command(*tuple(command_args))
logger.info(FINISHED_EXECUTING_COMMAND.format(command_name))
logger.debug(DEBUG_COMMAND_RESULT.format(str(results)))
return results
except Exception as ex:
logger.exception(COMMAND_ERROR.format(command_name))
logger.exception(str(type(ex)) + ': ' + str(ex))
raise
finally:
logger.info(LOG_FORMAT.format(END, command_name))
|
[
"def",
"execute_command_with_connection",
"(",
"self",
",",
"context",
",",
"command",
",",
"*",
"args",
")",
":",
"logger",
"=",
"self",
".",
"context_based_logger_factory",
".",
"create_logger_for_context",
"(",
"logger_name",
"=",
"'vCenterShell'",
",",
"context",
"=",
"context",
")",
"if",
"not",
"command",
":",
"logger",
".",
"error",
"(",
"COMMAND_CANNOT_BE_NONE",
")",
"raise",
"Exception",
"(",
"COMMAND_CANNOT_BE_NONE",
")",
"try",
":",
"command_name",
"=",
"command",
".",
"__name__",
"logger",
".",
"info",
"(",
"LOG_FORMAT",
".",
"format",
"(",
"START",
",",
"command_name",
")",
")",
"command_args",
"=",
"[",
"]",
"si",
"=",
"None",
"session",
"=",
"None",
"connection_details",
"=",
"None",
"vcenter_data_model",
"=",
"None",
"# get connection details",
"if",
"context",
":",
"with",
"CloudShellSessionContext",
"(",
"context",
")",
"as",
"cloudshell_session",
":",
"session",
"=",
"cloudshell_session",
"vcenter_data_model",
"=",
"self",
".",
"resource_model_parser",
".",
"convert_to_vcenter_model",
"(",
"context",
".",
"resource",
")",
"connection_details",
"=",
"ResourceConnectionDetailsRetriever",
".",
"get_connection_details",
"(",
"session",
"=",
"session",
",",
"vcenter_resource_model",
"=",
"vcenter_data_model",
",",
"resource_context",
"=",
"context",
".",
"resource",
")",
"if",
"connection_details",
":",
"logger",
".",
"info",
"(",
"INFO_CONNECTING_TO_VCENTER",
".",
"format",
"(",
"connection_details",
".",
"host",
")",
")",
"logger",
".",
"debug",
"(",
"DEBUG_CONNECTION_INFO",
".",
"format",
"(",
"connection_details",
".",
"host",
",",
"connection_details",
".",
"username",
",",
"connection_details",
".",
"port",
")",
")",
"si",
"=",
"self",
".",
"get_py_service_connection",
"(",
"connection_details",
",",
"logger",
")",
"if",
"si",
":",
"logger",
".",
"info",
"(",
"CONNECTED_TO_CENTER",
".",
"format",
"(",
"connection_details",
".",
"host",
")",
")",
"command_args",
".",
"append",
"(",
"si",
")",
"self",
".",
"_try_inject_arg",
"(",
"command",
"=",
"command",
",",
"command_args",
"=",
"command_args",
",",
"arg_object",
"=",
"session",
",",
"arg_name",
"=",
"'session'",
")",
"self",
".",
"_try_inject_arg",
"(",
"command",
"=",
"command",
",",
"command_args",
"=",
"command_args",
",",
"arg_object",
"=",
"vcenter_data_model",
",",
"arg_name",
"=",
"'vcenter_data_model'",
")",
"self",
".",
"_try_inject_arg",
"(",
"command",
"=",
"command",
",",
"command_args",
"=",
"command_args",
",",
"arg_object",
"=",
"self",
".",
"_get_reservation_id",
"(",
"context",
")",
",",
"arg_name",
"=",
"'reservation_id'",
")",
"self",
".",
"_try_inject_arg",
"(",
"command",
"=",
"command",
",",
"command_args",
"=",
"command_args",
",",
"arg_object",
"=",
"logger",
",",
"arg_name",
"=",
"'logger'",
")",
"command_args",
".",
"extend",
"(",
"args",
")",
"logger",
".",
"info",
"(",
"EXECUTING_COMMAND",
".",
"format",
"(",
"command_name",
")",
")",
"logger",
".",
"debug",
"(",
"DEBUG_COMMAND_PARAMS",
".",
"format",
"(",
"COMMA",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"command_args",
"]",
")",
")",
")",
"results",
"=",
"command",
"(",
"*",
"tuple",
"(",
"command_args",
")",
")",
"logger",
".",
"info",
"(",
"FINISHED_EXECUTING_COMMAND",
".",
"format",
"(",
"command_name",
")",
")",
"logger",
".",
"debug",
"(",
"DEBUG_COMMAND_RESULT",
".",
"format",
"(",
"str",
"(",
"results",
")",
")",
")",
"return",
"results",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"exception",
"(",
"COMMAND_ERROR",
".",
"format",
"(",
"command_name",
")",
")",
"logger",
".",
"exception",
"(",
"str",
"(",
"type",
"(",
"ex",
")",
")",
"+",
"': '",
"+",
"str",
"(",
"ex",
")",
")",
"raise",
"finally",
":",
"logger",
".",
"info",
"(",
"LOG_FORMAT",
".",
"format",
"(",
"END",
",",
"command_name",
")",
")"
] |
Note: session & vcenter_data_model & reservation id objects will be injected dynamically to the command
:param command:
:param context: instance of ResourceCommandContext or AutoLoadCommandContext
:type context: cloudshell.shell.core.context.ResourceCommandContext
:param args:
|
[
"Note",
":",
"session",
"&",
"vcenter_data_model",
"&",
"reservation",
"id",
"objects",
"will",
"be",
"injected",
"dynamically",
"to",
"the",
"command",
":",
"param",
"command",
":",
":",
"param",
"context",
":",
"instance",
"of",
"ResourceCommandContext",
"or",
"AutoLoadCommandContext",
":",
"type",
"context",
":",
"cloudshell",
".",
"shell",
".",
"core",
".",
"context",
".",
"ResourceCommandContext",
":",
"param",
"args",
":"
] |
python
|
train
| 45.804878 |
BeyondTheClouds/enoslib
|
enoslib/infra/enos_openstack/provider.py
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_openstack/provider.py#L208-L230
|
def wait_for_servers(session, servers):
"""Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
"""
nclient = nova.Client(NOVA_VERSION, session=session,
region_name=os.environ['OS_REGION_NAME'])
while True:
deployed = []
undeployed = []
for server in servers:
c = nclient.servers.get(server.id)
if c.addresses != {} and c.status == 'ACTIVE':
deployed.append(server)
if c.status == 'ERROR':
undeployed.append(server)
logger.info("[nova]: Polling the Deployment")
logger.info("[nova]: %s deployed servers" % len(deployed))
logger.info("[nova]: %s undeployed servers" % len(undeployed))
if len(deployed) + len(undeployed) >= len(servers):
break
time.sleep(3)
return deployed, undeployed
|
[
"def",
"wait_for_servers",
"(",
"session",
",",
"servers",
")",
":",
"nclient",
"=",
"nova",
".",
"Client",
"(",
"NOVA_VERSION",
",",
"session",
"=",
"session",
",",
"region_name",
"=",
"os",
".",
"environ",
"[",
"'OS_REGION_NAME'",
"]",
")",
"while",
"True",
":",
"deployed",
"=",
"[",
"]",
"undeployed",
"=",
"[",
"]",
"for",
"server",
"in",
"servers",
":",
"c",
"=",
"nclient",
".",
"servers",
".",
"get",
"(",
"server",
".",
"id",
")",
"if",
"c",
".",
"addresses",
"!=",
"{",
"}",
"and",
"c",
".",
"status",
"==",
"'ACTIVE'",
":",
"deployed",
".",
"append",
"(",
"server",
")",
"if",
"c",
".",
"status",
"==",
"'ERROR'",
":",
"undeployed",
".",
"append",
"(",
"server",
")",
"logger",
".",
"info",
"(",
"\"[nova]: Polling the Deployment\"",
")",
"logger",
".",
"info",
"(",
"\"[nova]: %s deployed servers\"",
"%",
"len",
"(",
"deployed",
")",
")",
"logger",
".",
"info",
"(",
"\"[nova]: %s undeployed servers\"",
"%",
"len",
"(",
"undeployed",
")",
")",
"if",
"len",
"(",
"deployed",
")",
"+",
"len",
"(",
"undeployed",
")",
">=",
"len",
"(",
"servers",
")",
":",
"break",
"time",
".",
"sleep",
"(",
"3",
")",
"return",
"deployed",
",",
"undeployed"
] |
Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
|
[
"Wait",
"for",
"the",
"servers",
"to",
"be",
"ready",
"."
] |
python
|
train
| 39.26087 |
saltstack/salt
|
salt/utils/user.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/user.py#L329-L341
|
def get_group_dict(user=None, include_default=True):
'''
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
'''
if HAS_GRP is False or HAS_PWD is False:
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict
|
[
"def",
"get_group_dict",
"(",
"user",
"=",
"None",
",",
"include_default",
"=",
"True",
")",
":",
"if",
"HAS_GRP",
"is",
"False",
"or",
"HAS_PWD",
"is",
"False",
":",
"return",
"{",
"}",
"group_dict",
"=",
"{",
"}",
"group_names",
"=",
"get_group_list",
"(",
"user",
",",
"include_default",
"=",
"include_default",
")",
"for",
"group",
"in",
"group_names",
":",
"group_dict",
".",
"update",
"(",
"{",
"group",
":",
"grp",
".",
"getgrnam",
"(",
"group",
")",
".",
"gr_gid",
"}",
")",
"return",
"group_dict"
] |
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
|
[
"Returns",
"a",
"dict",
"of",
"all",
"of",
"the",
"system",
"groups",
"as",
"keys",
"and",
"group",
"ids",
"as",
"values",
"of",
"which",
"the",
"user",
"is",
"a",
"member",
".",
"E",
".",
"g",
".",
":",
"{",
"staff",
":",
"501",
"sudo",
":",
"27",
"}"
] |
python
|
train
| 36.846154 |
jilljenn/tryalgo
|
tryalgo/pq_tree.py
|
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/pq_tree.py#L73-L77
|
def add(self, node):
"""Add one node as descendant
"""
self.sons.append(node)
node.parent = self
|
[
"def",
"add",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"sons",
".",
"append",
"(",
"node",
")",
"node",
".",
"parent",
"=",
"self"
] |
Add one node as descendant
|
[
"Add",
"one",
"node",
"as",
"descendant"
] |
python
|
train
| 24.8 |
synw/dataswim
|
dataswim/data/clean.py
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L37-L50
|
def nan_empty(self, col: str):
"""
Fill empty values with NaN values
:param col: name of the colum
:type col: str
:example: ``ds.nan_empty("mycol")``
"""
try:
self.df[col] = self.df[col].replace('', nan)
self.ok("Filled empty values with nan in column " + col)
except Exception as e:
self.err(e, "Can not fill empty values with nan")
|
[
"def",
"nan_empty",
"(",
"self",
",",
"col",
":",
"str",
")",
":",
"try",
":",
"self",
".",
"df",
"[",
"col",
"]",
"=",
"self",
".",
"df",
"[",
"col",
"]",
".",
"replace",
"(",
"''",
",",
"nan",
")",
"self",
".",
"ok",
"(",
"\"Filled empty values with nan in column \"",
"+",
"col",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not fill empty values with nan\"",
")"
] |
Fill empty values with NaN values
:param col: name of the colum
:type col: str
:example: ``ds.nan_empty("mycol")``
|
[
"Fill",
"empty",
"values",
"with",
"NaN",
"values"
] |
python
|
train
| 30.142857 |
jupyter-widgets/ipywidgets
|
ipywidgets/widgets/interaction.py
|
https://github.com/jupyter-widgets/ipywidgets/blob/36fe37594cd5a268def228709ca27e37b99ac606/ipywidgets/widgets/interaction.py#L341-L354
|
def widget_from_single_value(o):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return Text(value=unicode_type(o))
elif isinstance(o, bool):
return Checkbox(value=o)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, min=min, max=max)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, min=min, max=max)
else:
return None
|
[
"def",
"widget_from_single_value",
"(",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"string_types",
")",
":",
"return",
"Text",
"(",
"value",
"=",
"unicode_type",
"(",
"o",
")",
")",
"elif",
"isinstance",
"(",
"o",
",",
"bool",
")",
":",
"return",
"Checkbox",
"(",
"value",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"Integral",
")",
":",
"min",
",",
"max",
",",
"value",
"=",
"_get_min_max_value",
"(",
"None",
",",
"None",
",",
"o",
")",
"return",
"IntSlider",
"(",
"value",
"=",
"o",
",",
"min",
"=",
"min",
",",
"max",
"=",
"max",
")",
"elif",
"isinstance",
"(",
"o",
",",
"Real",
")",
":",
"min",
",",
"max",
",",
"value",
"=",
"_get_min_max_value",
"(",
"None",
",",
"None",
",",
"o",
")",
"return",
"FloatSlider",
"(",
"value",
"=",
"o",
",",
"min",
"=",
"min",
",",
"max",
"=",
"max",
")",
"else",
":",
"return",
"None"
] |
Make widgets from single values, which can be used as parameter defaults.
|
[
"Make",
"widgets",
"from",
"single",
"values",
"which",
"can",
"be",
"used",
"as",
"parameter",
"defaults",
"."
] |
python
|
train
| 44.071429 |
Oneiroe/PySimpleAutomata
|
PySimpleAutomata/NFA.py
|
https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/NFA.py#L102-L142
|
def nfa_union(nfa_1: dict, nfa_2: dict) -> dict:
""" Returns a NFA that reads the union of the NFAs in input.
Let :math:`A_1 = (Ξ£,S_1,S_1^0,Ο_1,F_1)` and :math:`A_2 =(Ξ£,
S_2,S_2^0,Ο_2,F_2)` be two NFAs. here is a NFA
:math:`A_β¨` that nondeterministically chooses :math:`A_1` or
:math:`A_2` and runs it on the input word.
It is defined as:
:math:`A_β¨ = (Ξ£, S, S_0 , Ο, F )`
where:
β’ :math:`S = S_1 βͺ S_2`
β’ :math:`S_0 = S_1^0 βͺ S_2^0`
β’ :math:`F = F_1 βͺ F_2`
β’ :math:`Ο = Ο_1 βͺ Ο_2` , that is :math:`(s, a, s' ) β Ο` if
:math:`[ s β S_1\ and\ (s, a, s' ) β Ο_1 ]` OR :math:`[ s β
S_2\ and\ (s, a, s' ) β Ο_2 ]`
Pay attention to avoid having the NFAs with state names in common, in case
use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function.
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA.
:return: *(dict)* representing the united NFA.
"""
union = {
'alphabet': nfa_1['alphabet'].union(nfa_2['alphabet']),
'states': nfa_1['states'].union(nfa_2['states']),
'initial_states':
nfa_1['initial_states'].union(nfa_2['initial_states']),
'accepting_states':
nfa_1['accepting_states'].union(nfa_2['accepting_states']),
'transitions': nfa_1['transitions'].copy()}
for trans in nfa_2['transitions']:
for elem in nfa_2['transitions'][trans]:
union['transitions'].setdefault(trans, set()).add(elem)
return union
|
[
"def",
"nfa_union",
"(",
"nfa_1",
":",
"dict",
",",
"nfa_2",
":",
"dict",
")",
"->",
"dict",
":",
"union",
"=",
"{",
"'alphabet'",
":",
"nfa_1",
"[",
"'alphabet'",
"]",
".",
"union",
"(",
"nfa_2",
"[",
"'alphabet'",
"]",
")",
",",
"'states'",
":",
"nfa_1",
"[",
"'states'",
"]",
".",
"union",
"(",
"nfa_2",
"[",
"'states'",
"]",
")",
",",
"'initial_states'",
":",
"nfa_1",
"[",
"'initial_states'",
"]",
".",
"union",
"(",
"nfa_2",
"[",
"'initial_states'",
"]",
")",
",",
"'accepting_states'",
":",
"nfa_1",
"[",
"'accepting_states'",
"]",
".",
"union",
"(",
"nfa_2",
"[",
"'accepting_states'",
"]",
")",
",",
"'transitions'",
":",
"nfa_1",
"[",
"'transitions'",
"]",
".",
"copy",
"(",
")",
"}",
"for",
"trans",
"in",
"nfa_2",
"[",
"'transitions'",
"]",
":",
"for",
"elem",
"in",
"nfa_2",
"[",
"'transitions'",
"]",
"[",
"trans",
"]",
":",
"union",
"[",
"'transitions'",
"]",
".",
"setdefault",
"(",
"trans",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"elem",
")",
"return",
"union"
] |
Returns a NFA that reads the union of the NFAs in input.
Let :math:`A_1 = (Ξ£,S_1,S_1^0,Ο_1,F_1)` and :math:`A_2 =(Ξ£,
S_2,S_2^0,Ο_2,F_2)` be two NFAs. here is a NFA
:math:`A_β¨` that nondeterministically chooses :math:`A_1` or
:math:`A_2` and runs it on the input word.
It is defined as:
:math:`A_β¨ = (Ξ£, S, S_0 , Ο, F )`
where:
β’ :math:`S = S_1 βͺ S_2`
β’ :math:`S_0 = S_1^0 βͺ S_2^0`
β’ :math:`F = F_1 βͺ F_2`
β’ :math:`Ο = Ο_1 βͺ Ο_2` , that is :math:`(s, a, s' ) β Ο` if
:math:`[ s β S_1\ and\ (s, a, s' ) β Ο_1 ]` OR :math:`[ s β
S_2\ and\ (s, a, s' ) β Ο_2 ]`
Pay attention to avoid having the NFAs with state names in common, in case
use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function.
:param dict nfa_1: first input NFA;
:param dict nfa_2: second input NFA.
:return: *(dict)* representing the united NFA.
|
[
"Returns",
"a",
"NFA",
"that",
"reads",
"the",
"union",
"of",
"the",
"NFAs",
"in",
"input",
"."
] |
python
|
train
| 35.97561 |
GPflow/GPflow
|
gpflow/actions.py
|
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/actions.py#L335-L345
|
def with_run_kwargs(self, **kwargs: Dict[str, Any]) -> 'Optimization':
"""
Replace Tensorflow session run kwargs.
Check Tensorflow session run [documentation](https://www.tensorflow.org/api_docs/python/tf/Session).
:param kwargs: Dictionary of tensors as keys and numpy arrays or
primitive python types as values.
:return: Optimization instance self reference.
"""
self._run_kwargs = kwargs
return self
|
[
"def",
"with_run_kwargs",
"(",
"self",
",",
"*",
"*",
"kwargs",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"'Optimization'",
":",
"self",
".",
"_run_kwargs",
"=",
"kwargs",
"return",
"self"
] |
Replace Tensorflow session run kwargs.
Check Tensorflow session run [documentation](https://www.tensorflow.org/api_docs/python/tf/Session).
:param kwargs: Dictionary of tensors as keys and numpy arrays or
primitive python types as values.
:return: Optimization instance self reference.
|
[
"Replace",
"Tensorflow",
"session",
"run",
"kwargs",
".",
"Check",
"Tensorflow",
"session",
"run",
"[",
"documentation",
"]",
"(",
"https",
":",
"//",
"www",
".",
"tensorflow",
".",
"org",
"/",
"api_docs",
"/",
"python",
"/",
"tf",
"/",
"Session",
")",
"."
] |
python
|
train
| 43.727273 |
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L755-L909
|
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points. Currently limited to 10 distinct values (colors).
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
if v:
df = self.v
else:
df = self.u
if color is not None:
colormap = pd.Series(dict(zip(set(color.values),
tableau20[0:2 * len(set(color)):2])))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(zip(
set(s.values), range(30, 351)[0::50][0:len(set(s)) + 1])))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(zip(set(marker.values), markers)))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = pd.Series('o', index=df.index)
marker_legend = False
if ax is None:
fig, ax = plt.subplots(1, 1)
for m in set(marker.values):
mse = marker[marker == m]
cse = color[mse.index]
sse = s[mse.index]
ax.scatter(df.ix[mse.index, pc1], df.ix[mse.index, pc2],
s=sse.values, color=list(cse.values), marker=m,
alpha=0.8)
ax.set_title('{} vs. {}'.format(pc1, pc2))
ax.set_xlabel(pc1)
ax.set_ylabel(pc2)
if color_legend:
legend_rects = make_color_legend_rects(colormap)
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index,
title=color_name,
loc='upper left',
bbox_to_anchor=(1, 1))
if s_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in smap.index:
ax.scatter([xb + 1], [yb + 1], marker='o',
s=smap[i], color='black', label=i)
lgd = ax.legend(title=s_name, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
if marker_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in markermap.index:
t = ax.scatter([xb + 1], [yb + 1], marker=markermap[i],
s=sse.min(), color='black', label=i)
handles, labels = ax.get_legend_handles_labels()
if s_legend:
handles = handles[len(smap):]
labels = labels[len(smap):]
lgd = ax.legend(handles, labels, title=marker_name,
loc='lower left', bbox_to_anchor=(1, 0))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
# fig.tight_layout()
return fig, ax
|
[
"def",
"plot_pc_scatter",
"(",
"self",
",",
"pc1",
",",
"pc2",
",",
"v",
"=",
"True",
",",
"subset",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"color",
"=",
"None",
",",
"s",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"color_name",
"=",
"None",
",",
"s_name",
"=",
"None",
",",
"marker_name",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"v",
":",
"df",
"=",
"self",
".",
"v",
"else",
":",
"df",
"=",
"self",
".",
"u",
"if",
"color",
"is",
"not",
"None",
":",
"colormap",
"=",
"pd",
".",
"Series",
"(",
"dict",
"(",
"zip",
"(",
"set",
"(",
"color",
".",
"values",
")",
",",
"tableau20",
"[",
"0",
":",
"2",
"*",
"len",
"(",
"set",
"(",
"color",
")",
")",
":",
"2",
"]",
")",
")",
")",
"color",
"=",
"pd",
".",
"Series",
"(",
"[",
"colormap",
"[",
"x",
"]",
"for",
"x",
"in",
"color",
".",
"values",
"]",
",",
"index",
"=",
"color",
".",
"index",
")",
"color_legend",
"=",
"True",
"if",
"not",
"color_name",
":",
"color_name",
"=",
"color",
".",
"index",
".",
"name",
"else",
":",
"color",
"=",
"pd",
".",
"Series",
"(",
"[",
"tableau20",
"[",
"0",
"]",
"]",
"*",
"df",
".",
"shape",
"[",
"0",
"]",
",",
"index",
"=",
"df",
".",
"index",
")",
"color_legend",
"=",
"False",
"if",
"s",
"is",
"not",
"None",
":",
"smap",
"=",
"pd",
".",
"Series",
"(",
"dict",
"(",
"zip",
"(",
"set",
"(",
"s",
".",
"values",
")",
",",
"range",
"(",
"30",
",",
"351",
")",
"[",
"0",
":",
":",
"50",
"]",
"[",
"0",
":",
"len",
"(",
"set",
"(",
"s",
")",
")",
"+",
"1",
"]",
")",
")",
")",
"s",
"=",
"pd",
".",
"Series",
"(",
"[",
"smap",
"[",
"x",
"]",
"for",
"x",
"in",
"s",
".",
"values",
"]",
",",
"index",
"=",
"s",
".",
"index",
")",
"s_legend",
"=",
"True",
"if",
"not",
"s_name",
":",
"s_name",
"=",
"s",
".",
"index",
".",
"name",
"else",
":",
"s",
"=",
"pd",
".",
"Series",
"(",
"30",
",",
"index",
"=",
"df",
".",
"index",
")",
"s_legend",
"=",
"False",
"markers",
"=",
"[",
"'o'",
",",
"'*'",
",",
"'s'",
",",
"'v'",
",",
"'+'",
",",
"'x'",
",",
"'d'",
",",
"'p'",
",",
"'2'",
",",
"'<'",
",",
"'|'",
",",
"'>'",
",",
"'_'",
",",
"'h'",
",",
"'1'",
",",
"'2'",
",",
"'3'",
",",
"'4'",
",",
"'8'",
",",
"'^'",
",",
"'D'",
"]",
"if",
"marker",
"is",
"not",
"None",
":",
"markermap",
"=",
"pd",
".",
"Series",
"(",
"dict",
"(",
"zip",
"(",
"set",
"(",
"marker",
".",
"values",
")",
",",
"markers",
")",
")",
")",
"marker",
"=",
"pd",
".",
"Series",
"(",
"[",
"markermap",
"[",
"x",
"]",
"for",
"x",
"in",
"marker",
".",
"values",
"]",
",",
"index",
"=",
"marker",
".",
"index",
")",
"marker_legend",
"=",
"True",
"if",
"not",
"marker_name",
":",
"marker_name",
"=",
"marker",
".",
"index",
".",
"name",
"else",
":",
"marker",
"=",
"pd",
".",
"Series",
"(",
"'o'",
",",
"index",
"=",
"df",
".",
"index",
")",
"marker_legend",
"=",
"False",
"if",
"ax",
"is",
"None",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
")",
"for",
"m",
"in",
"set",
"(",
"marker",
".",
"values",
")",
":",
"mse",
"=",
"marker",
"[",
"marker",
"==",
"m",
"]",
"cse",
"=",
"color",
"[",
"mse",
".",
"index",
"]",
"sse",
"=",
"s",
"[",
"mse",
".",
"index",
"]",
"ax",
".",
"scatter",
"(",
"df",
".",
"ix",
"[",
"mse",
".",
"index",
",",
"pc1",
"]",
",",
"df",
".",
"ix",
"[",
"mse",
".",
"index",
",",
"pc2",
"]",
",",
"s",
"=",
"sse",
".",
"values",
",",
"color",
"=",
"list",
"(",
"cse",
".",
"values",
")",
",",
"marker",
"=",
"m",
",",
"alpha",
"=",
"0.8",
")",
"ax",
".",
"set_title",
"(",
"'{} vs. {}'",
".",
"format",
"(",
"pc1",
",",
"pc2",
")",
")",
"ax",
".",
"set_xlabel",
"(",
"pc1",
")",
"ax",
".",
"set_ylabel",
"(",
"pc2",
")",
"if",
"color_legend",
":",
"legend_rects",
"=",
"make_color_legend_rects",
"(",
"colormap",
")",
"for",
"r",
"in",
"legend_rects",
":",
"ax",
".",
"add_patch",
"(",
"r",
")",
"lgd",
"=",
"ax",
".",
"legend",
"(",
"legend_rects",
".",
"values",
",",
"labels",
"=",
"legend_rects",
".",
"index",
",",
"title",
"=",
"color_name",
",",
"loc",
"=",
"'upper left'",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"1",
")",
")",
"if",
"s_legend",
":",
"if",
"lgd",
":",
"lgd",
"=",
"ax",
".",
"add_artist",
"(",
"lgd",
")",
"xa",
",",
"xb",
"=",
"ax",
".",
"get_xlim",
"(",
")",
"ya",
",",
"yb",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"for",
"i",
"in",
"smap",
".",
"index",
":",
"ax",
".",
"scatter",
"(",
"[",
"xb",
"+",
"1",
"]",
",",
"[",
"yb",
"+",
"1",
"]",
",",
"marker",
"=",
"'o'",
",",
"s",
"=",
"smap",
"[",
"i",
"]",
",",
"color",
"=",
"'black'",
",",
"label",
"=",
"i",
")",
"lgd",
"=",
"ax",
".",
"legend",
"(",
"title",
"=",
"s_name",
",",
"loc",
"=",
"'center left'",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"0.5",
")",
")",
"ax",
".",
"set_xlim",
"(",
"xa",
",",
"xb",
")",
"ax",
".",
"set_ylim",
"(",
"ya",
",",
"yb",
")",
"if",
"marker_legend",
":",
"if",
"lgd",
":",
"lgd",
"=",
"ax",
".",
"add_artist",
"(",
"lgd",
")",
"xa",
",",
"xb",
"=",
"ax",
".",
"get_xlim",
"(",
")",
"ya",
",",
"yb",
"=",
"ax",
".",
"get_ylim",
"(",
")",
"for",
"i",
"in",
"markermap",
".",
"index",
":",
"t",
"=",
"ax",
".",
"scatter",
"(",
"[",
"xb",
"+",
"1",
"]",
",",
"[",
"yb",
"+",
"1",
"]",
",",
"marker",
"=",
"markermap",
"[",
"i",
"]",
",",
"s",
"=",
"sse",
".",
"min",
"(",
")",
",",
"color",
"=",
"'black'",
",",
"label",
"=",
"i",
")",
"handles",
",",
"labels",
"=",
"ax",
".",
"get_legend_handles_labels",
"(",
")",
"if",
"s_legend",
":",
"handles",
"=",
"handles",
"[",
"len",
"(",
"smap",
")",
":",
"]",
"labels",
"=",
"labels",
"[",
"len",
"(",
"smap",
")",
":",
"]",
"lgd",
"=",
"ax",
".",
"legend",
"(",
"handles",
",",
"labels",
",",
"title",
"=",
"marker_name",
",",
"loc",
"=",
"'lower left'",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"0",
")",
")",
"ax",
".",
"set_xlim",
"(",
"xa",
",",
"xb",
")",
"ax",
".",
"set_ylim",
"(",
"ya",
",",
"yb",
")",
"# fig.tight_layout()",
"return",
"fig",
",",
"ax"
] |
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points. Currently limited to 10 distinct values (colors).
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
|
[
"Make",
"a",
"scatter",
"plot",
"of",
"two",
"principal",
"components",
".",
"You",
"can",
"create",
"differently",
"colored",
"sized",
"or",
"marked",
"scatter",
"points",
"."
] |
python
|
train
| 37.780645 |
apache/spark
|
python/pyspark/heapq3.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L411-L414
|
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
|
[
"def",
"heappush",
"(",
"heap",
",",
"item",
")",
":",
"heap",
".",
"append",
"(",
"item",
")",
"_siftdown",
"(",
"heap",
",",
"0",
",",
"len",
"(",
"heap",
")",
"-",
"1",
")"
] |
Push item onto heap, maintaining the heap invariant.
|
[
"Push",
"item",
"onto",
"heap",
"maintaining",
"the",
"heap",
"invariant",
"."
] |
python
|
train
| 35.75 |
bitcraze/crazyflie-lib-python
|
cflib/crazyflie/param.py
|
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/param.py#L215-L230
|
def add_update_callback(self, group=None, name=None, cb=None):
"""
Add a callback for a specific parameter name. This callback will be
executed when a new value is read from the Crazyflie.
"""
if not group and not name:
self.all_update_callback.add_callback(cb)
elif not name:
if group not in self.group_update_callbacks:
self.group_update_callbacks[group] = Caller()
self.group_update_callbacks[group].add_callback(cb)
else:
paramname = '{}.{}'.format(group, name)
if paramname not in self.param_update_callbacks:
self.param_update_callbacks[paramname] = Caller()
self.param_update_callbacks[paramname].add_callback(cb)
|
[
"def",
"add_update_callback",
"(",
"self",
",",
"group",
"=",
"None",
",",
"name",
"=",
"None",
",",
"cb",
"=",
"None",
")",
":",
"if",
"not",
"group",
"and",
"not",
"name",
":",
"self",
".",
"all_update_callback",
".",
"add_callback",
"(",
"cb",
")",
"elif",
"not",
"name",
":",
"if",
"group",
"not",
"in",
"self",
".",
"group_update_callbacks",
":",
"self",
".",
"group_update_callbacks",
"[",
"group",
"]",
"=",
"Caller",
"(",
")",
"self",
".",
"group_update_callbacks",
"[",
"group",
"]",
".",
"add_callback",
"(",
"cb",
")",
"else",
":",
"paramname",
"=",
"'{}.{}'",
".",
"format",
"(",
"group",
",",
"name",
")",
"if",
"paramname",
"not",
"in",
"self",
".",
"param_update_callbacks",
":",
"self",
".",
"param_update_callbacks",
"[",
"paramname",
"]",
"=",
"Caller",
"(",
")",
"self",
".",
"param_update_callbacks",
"[",
"paramname",
"]",
".",
"add_callback",
"(",
"cb",
")"
] |
Add a callback for a specific parameter name. This callback will be
executed when a new value is read from the Crazyflie.
|
[
"Add",
"a",
"callback",
"for",
"a",
"specific",
"parameter",
"name",
".",
"This",
"callback",
"will",
"be",
"executed",
"when",
"a",
"new",
"value",
"is",
"read",
"from",
"the",
"Crazyflie",
"."
] |
python
|
train
| 47.8125 |
jsvine/markovify
|
markovify/text.py
|
https://github.com/jsvine/markovify/blob/6968649a4c5d80f8a1b2279734417348013789e5/markovify/text.py#L150-L193
|
def make_sentence(self, init_state=None, **kwargs):
"""
Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit.
"""
tries = kwargs.get('tries', DEFAULT_TRIES)
mor = kwargs.get('max_overlap_ratio', DEFAULT_MAX_OVERLAP_RATIO)
mot = kwargs.get('max_overlap_total', DEFAULT_MAX_OVERLAP_TOTAL)
test_output = kwargs.get('test_output', True)
max_words = kwargs.get('max_words', None)
if init_state != None:
prefix = list(init_state)
for word in prefix:
if word == BEGIN:
prefix = prefix[1:]
else:
break
else:
prefix = []
for _ in range(tries):
words = prefix + self.chain.walk(init_state)
if max_words != None and len(words) > max_words:
continue
if test_output and hasattr(self, "rejoined_text"):
if self.test_sentence_output(words, mor, mot):
return self.word_join(words)
else:
return self.word_join(words)
return None
|
[
"def",
"make_sentence",
"(",
"self",
",",
"init_state",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"tries",
"=",
"kwargs",
".",
"get",
"(",
"'tries'",
",",
"DEFAULT_TRIES",
")",
"mor",
"=",
"kwargs",
".",
"get",
"(",
"'max_overlap_ratio'",
",",
"DEFAULT_MAX_OVERLAP_RATIO",
")",
"mot",
"=",
"kwargs",
".",
"get",
"(",
"'max_overlap_total'",
",",
"DEFAULT_MAX_OVERLAP_TOTAL",
")",
"test_output",
"=",
"kwargs",
".",
"get",
"(",
"'test_output'",
",",
"True",
")",
"max_words",
"=",
"kwargs",
".",
"get",
"(",
"'max_words'",
",",
"None",
")",
"if",
"init_state",
"!=",
"None",
":",
"prefix",
"=",
"list",
"(",
"init_state",
")",
"for",
"word",
"in",
"prefix",
":",
"if",
"word",
"==",
"BEGIN",
":",
"prefix",
"=",
"prefix",
"[",
"1",
":",
"]",
"else",
":",
"break",
"else",
":",
"prefix",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"tries",
")",
":",
"words",
"=",
"prefix",
"+",
"self",
".",
"chain",
".",
"walk",
"(",
"init_state",
")",
"if",
"max_words",
"!=",
"None",
"and",
"len",
"(",
"words",
")",
">",
"max_words",
":",
"continue",
"if",
"test_output",
"and",
"hasattr",
"(",
"self",
",",
"\"rejoined_text\"",
")",
":",
"if",
"self",
".",
"test_sentence_output",
"(",
"words",
",",
"mor",
",",
"mot",
")",
":",
"return",
"self",
".",
"word_join",
"(",
"words",
")",
"else",
":",
"return",
"self",
".",
"word_join",
"(",
"words",
")",
"return",
"None"
] |
Attempts `tries` (default: 10) times to generate a valid sentence,
based on the model and `test_sentence_output`. Passes `max_overlap_ratio`
and `max_overlap_total` to `test_sentence_output`.
If successful, returns the sentence as a string. If not, returns None.
If `init_state` (a tuple of `self.chain.state_size` words) is not specified,
this method chooses a sentence-start at random, in accordance with
the model.
If `test_output` is set as False then the `test_sentence_output` check
will be skipped.
If `max_words` is specified, the word count for the sentence will be
evaluated against the provided limit.
|
[
"Attempts",
"tries",
"(",
"default",
":",
"10",
")",
"times",
"to",
"generate",
"a",
"valid",
"sentence",
"based",
"on",
"the",
"model",
"and",
"test_sentence_output",
".",
"Passes",
"max_overlap_ratio",
"and",
"max_overlap_total",
"to",
"test_sentence_output",
"."
] |
python
|
train
| 39.386364 |
jtwhite79/pyemu
|
pyemu/la.py
|
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/la.py#L260-L316
|
def __load_parcov(self):
"""private method to set the parcov attribute from:
a pest control file (parameter bounds)
a pst object
a matrix object
an uncert file
an ascii matrix file
"""
# if the parcov arg was not passed but the pst arg was,
# reset and use parbounds to build parcov
if not self.parcov_arg:
if self.pst_arg:
self.parcov_arg = self.pst_arg
else:
raise Exception("linear_analysis.__load_parcov(): " +
"parcov_arg is None")
if isinstance(self.parcov_arg, Matrix):
self.__parcov = self.parcov_arg
return
if isinstance(self.parcov_arg, np.ndarray):
# if the passed array is a vector,
# then assume it is the diagonal of the parcov matrix
if len(self.parcov_arg.shape) == 1:
assert self.parcov_arg.shape[0] == self.jco.shape[1]
isdiagonal = True
else:
assert self.parcov_arg.shape[0] == self.jco.shape[1]
assert self.parcov_arg.shape[1] == self.jco.shape[1]
isdiagonal = False
self.logger.warn("linear_analysis.__load_parcov(): " +
"instantiating parcov from ndarray, can't " +
"verify parameters alignment with jco")
self.__parcov = Matrix(x=self.parcov_arg,
isdiagonal=isdiagonal,
row_names=self.jco.col_names,
col_names=self.jco.col_names)
self.log("loading parcov")
if isinstance(self.parcov_arg,str):
# if the arg is a string ending with "pst"
# then load parcov from parbounds
if self.parcov_arg.lower().endswith(".pst"):
self.__parcov = Cov.from_parbounds(self.parcov_arg,
sigma_range=self.sigma_range,
scale_offset=self.scale_offset)
else:
self.__parcov = self.__fromfile(self.parcov_arg, astype=Cov)
# if the arg is a pst object
elif isinstance(self.parcov_arg,Pst):
self.__parcov = Cov.from_parameter_data(self.parcov_arg,
sigma_range=self.sigma_range,
scale_offset=self.scale_offset)
else:
raise Exception("linear_analysis.__load_parcov(): " +
"parcov_arg must be a " +
"matrix object or a file name: " +
str(self.parcov_arg))
self.log("loading parcov")
|
[
"def",
"__load_parcov",
"(",
"self",
")",
":",
"# if the parcov arg was not passed but the pst arg was,",
"# reset and use parbounds to build parcov",
"if",
"not",
"self",
".",
"parcov_arg",
":",
"if",
"self",
".",
"pst_arg",
":",
"self",
".",
"parcov_arg",
"=",
"self",
".",
"pst_arg",
"else",
":",
"raise",
"Exception",
"(",
"\"linear_analysis.__load_parcov(): \"",
"+",
"\"parcov_arg is None\"",
")",
"if",
"isinstance",
"(",
"self",
".",
"parcov_arg",
",",
"Matrix",
")",
":",
"self",
".",
"__parcov",
"=",
"self",
".",
"parcov_arg",
"return",
"if",
"isinstance",
"(",
"self",
".",
"parcov_arg",
",",
"np",
".",
"ndarray",
")",
":",
"# if the passed array is a vector,",
"# then assume it is the diagonal of the parcov matrix",
"if",
"len",
"(",
"self",
".",
"parcov_arg",
".",
"shape",
")",
"==",
"1",
":",
"assert",
"self",
".",
"parcov_arg",
".",
"shape",
"[",
"0",
"]",
"==",
"self",
".",
"jco",
".",
"shape",
"[",
"1",
"]",
"isdiagonal",
"=",
"True",
"else",
":",
"assert",
"self",
".",
"parcov_arg",
".",
"shape",
"[",
"0",
"]",
"==",
"self",
".",
"jco",
".",
"shape",
"[",
"1",
"]",
"assert",
"self",
".",
"parcov_arg",
".",
"shape",
"[",
"1",
"]",
"==",
"self",
".",
"jco",
".",
"shape",
"[",
"1",
"]",
"isdiagonal",
"=",
"False",
"self",
".",
"logger",
".",
"warn",
"(",
"\"linear_analysis.__load_parcov(): \"",
"+",
"\"instantiating parcov from ndarray, can't \"",
"+",
"\"verify parameters alignment with jco\"",
")",
"self",
".",
"__parcov",
"=",
"Matrix",
"(",
"x",
"=",
"self",
".",
"parcov_arg",
",",
"isdiagonal",
"=",
"isdiagonal",
",",
"row_names",
"=",
"self",
".",
"jco",
".",
"col_names",
",",
"col_names",
"=",
"self",
".",
"jco",
".",
"col_names",
")",
"self",
".",
"log",
"(",
"\"loading parcov\"",
")",
"if",
"isinstance",
"(",
"self",
".",
"parcov_arg",
",",
"str",
")",
":",
"# if the arg is a string ending with \"pst\"",
"# then load parcov from parbounds",
"if",
"self",
".",
"parcov_arg",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".pst\"",
")",
":",
"self",
".",
"__parcov",
"=",
"Cov",
".",
"from_parbounds",
"(",
"self",
".",
"parcov_arg",
",",
"sigma_range",
"=",
"self",
".",
"sigma_range",
",",
"scale_offset",
"=",
"self",
".",
"scale_offset",
")",
"else",
":",
"self",
".",
"__parcov",
"=",
"self",
".",
"__fromfile",
"(",
"self",
".",
"parcov_arg",
",",
"astype",
"=",
"Cov",
")",
"# if the arg is a pst object",
"elif",
"isinstance",
"(",
"self",
".",
"parcov_arg",
",",
"Pst",
")",
":",
"self",
".",
"__parcov",
"=",
"Cov",
".",
"from_parameter_data",
"(",
"self",
".",
"parcov_arg",
",",
"sigma_range",
"=",
"self",
".",
"sigma_range",
",",
"scale_offset",
"=",
"self",
".",
"scale_offset",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"linear_analysis.__load_parcov(): \"",
"+",
"\"parcov_arg must be a \"",
"+",
"\"matrix object or a file name: \"",
"+",
"str",
"(",
"self",
".",
"parcov_arg",
")",
")",
"self",
".",
"log",
"(",
"\"loading parcov\"",
")"
] |
private method to set the parcov attribute from:
a pest control file (parameter bounds)
a pst object
a matrix object
an uncert file
an ascii matrix file
|
[
"private",
"method",
"to",
"set",
"the",
"parcov",
"attribute",
"from",
":",
"a",
"pest",
"control",
"file",
"(",
"parameter",
"bounds",
")",
"a",
"pst",
"object",
"a",
"matrix",
"object",
"an",
"uncert",
"file",
"an",
"ascii",
"matrix",
"file"
] |
python
|
train
| 49.842105 |
mandiant/ioc_writer
|
ioc_writer/ioc_api.py
|
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_api.py#L575-L583
|
def write_ioc_to_file(self, output_dir=None, force=False):
"""
Serialize the IOC to a .ioc file.
:param output_dir: Directory to write the ioc out to. default is the current working directory.
:param force: If specified, will not validate the root node of the IOC is 'OpenIOC'.
:return:
"""
return write_ioc(self.root, output_dir, force=force)
|
[
"def",
"write_ioc_to_file",
"(",
"self",
",",
"output_dir",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"return",
"write_ioc",
"(",
"self",
".",
"root",
",",
"output_dir",
",",
"force",
"=",
"force",
")"
] |
Serialize the IOC to a .ioc file.
:param output_dir: Directory to write the ioc out to. default is the current working directory.
:param force: If specified, will not validate the root node of the IOC is 'OpenIOC'.
:return:
|
[
"Serialize",
"the",
"IOC",
"to",
"a",
".",
"ioc",
"file",
"."
] |
python
|
train
| 43.666667 |
jmbhughes/suvi-trainer
|
suvitrainer/fileio.py
|
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/fileio.py#L36-L41
|
def get_dates_file(path):
""" parse dates file of dates and probability of choosing"""
with open(path) as f:
dates = f.readlines()
return [(convert_time_string(date_string.split(" ")[0]), float(date_string.split(" ")[1]))
for date_string in dates]
|
[
"def",
"get_dates_file",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"dates",
"=",
"f",
".",
"readlines",
"(",
")",
"return",
"[",
"(",
"convert_time_string",
"(",
"date_string",
".",
"split",
"(",
"\" \"",
")",
"[",
"0",
"]",
")",
",",
"float",
"(",
"date_string",
".",
"split",
"(",
"\" \"",
")",
"[",
"1",
"]",
")",
")",
"for",
"date_string",
"in",
"dates",
"]"
] |
parse dates file of dates and probability of choosing
|
[
"parse",
"dates",
"file",
"of",
"dates",
"and",
"probability",
"of",
"choosing"
] |
python
|
train
| 45.666667 |
timothyhahn/rui
|
rui/rui.py
|
https://github.com/timothyhahn/rui/blob/ac9f587fb486760d77332866c6e876f78a810f74/rui/rui.py#L215-L223
|
def set_tag(self, tag):
'''
Sets the tag.
If the Entity belongs to the world it will check for tag conflicts.
'''
if self._world:
if self._world.get_entity_by_tag(tag):
raise NonUniqueTagError(tag)
self._tag = tag
|
[
"def",
"set_tag",
"(",
"self",
",",
"tag",
")",
":",
"if",
"self",
".",
"_world",
":",
"if",
"self",
".",
"_world",
".",
"get_entity_by_tag",
"(",
"tag",
")",
":",
"raise",
"NonUniqueTagError",
"(",
"tag",
")",
"self",
".",
"_tag",
"=",
"tag"
] |
Sets the tag.
If the Entity belongs to the world it will check for tag conflicts.
|
[
"Sets",
"the",
"tag",
".",
"If",
"the",
"Entity",
"belongs",
"to",
"the",
"world",
"it",
"will",
"check",
"for",
"tag",
"conflicts",
"."
] |
python
|
train
| 31.222222 |
quantopian/zipline
|
zipline/finance/asset_restrictions.py
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/asset_restrictions.py#L143-L152
|
def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set)
)
|
[
"def",
"is_restricted",
"(",
"self",
",",
"assets",
",",
"dt",
")",
":",
"if",
"isinstance",
"(",
"assets",
",",
"Asset",
")",
":",
"return",
"assets",
"in",
"self",
".",
"_restricted_set",
"return",
"pd",
".",
"Series",
"(",
"index",
"=",
"pd",
".",
"Index",
"(",
"assets",
")",
",",
"data",
"=",
"vectorized_is_element",
"(",
"assets",
",",
"self",
".",
"_restricted_set",
")",
")"
] |
An asset is restricted for all dts if it is in the static list.
|
[
"An",
"asset",
"is",
"restricted",
"for",
"all",
"dts",
"if",
"it",
"is",
"in",
"the",
"static",
"list",
"."
] |
python
|
train
| 35.2 |
pysal/spglm
|
spglm/links.py
|
https://github.com/pysal/spglm/blob/1339898adcb7e1638f1da83d57aa37392525f018/spglm/links.py#L149-L169
|
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
|
[
"def",
"inverse",
"(",
"self",
",",
"z",
")",
":",
"z",
"=",
"np",
".",
"asarray",
"(",
"z",
")",
"t",
"=",
"np",
".",
"exp",
"(",
"-",
"z",
")",
"return",
"1.",
"/",
"(",
"1.",
"+",
"t",
")"
] |
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
|
[
"Inverse",
"of",
"the",
"logit",
"transform"
] |
python
|
train
| 19.047619 |
apache/spark
|
python/pyspark/shuffle.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L709-L766
|
def _spill(self):
"""
dump already partitioned data into disks.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(path)
used_memory = get_used_memory()
if not self.pdata:
# The data has not been partitioned, it will iterator the
# data once, write them into different files, has no
# additional memory. It only called when the memory goes
# above limit at the first time.
# open all the files for writing
streams = [open(os.path.join(path, str(i)), 'wb')
for i in range(self.partitions)]
# If the number of keys is small, then the overhead of sort is small
# sort them before dumping into disks
self._sorted = len(self.data) < self.SORT_KEY_LIMIT
if self._sorted:
self.serializer = self.flattened_serializer()
for k in sorted(self.data.keys()):
h = self._partition(k)
self.serializer.dump_stream([(k, self.data[k])], streams[h])
else:
for k, v in self.data.items():
h = self._partition(k)
self.serializer.dump_stream([(k, v)], streams[h])
for s in streams:
DiskBytesSpilled += s.tell()
s.close()
self.data.clear()
# self.pdata is cached in `mergeValues` and `mergeCombiners`
self.pdata.extend([{} for i in range(self.partitions)])
else:
for i in range(self.partitions):
p = os.path.join(path, str(i))
with open(p, "wb") as f:
# dump items in batch
if self._sorted:
# sort by key only (stable)
sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0))
self.serializer.dump_stream(sorted_items, f)
else:
self.serializer.dump_stream(self.pdata[i].items(), f)
self.pdata[i].clear()
DiskBytesSpilled += os.path.getsize(p)
self.spills += 1
gc.collect() # release the memory as much as possible
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
|
[
"def",
"_spill",
"(",
"self",
")",
":",
"global",
"MemoryBytesSpilled",
",",
"DiskBytesSpilled",
"path",
"=",
"self",
".",
"_get_spill_dir",
"(",
"self",
".",
"spills",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"used_memory",
"=",
"get_used_memory",
"(",
")",
"if",
"not",
"self",
".",
"pdata",
":",
"# The data has not been partitioned, it will iterator the",
"# data once, write them into different files, has no",
"# additional memory. It only called when the memory goes",
"# above limit at the first time.",
"# open all the files for writing",
"streams",
"=",
"[",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"i",
")",
")",
",",
"'wb'",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
"]",
"# If the number of keys is small, then the overhead of sort is small",
"# sort them before dumping into disks",
"self",
".",
"_sorted",
"=",
"len",
"(",
"self",
".",
"data",
")",
"<",
"self",
".",
"SORT_KEY_LIMIT",
"if",
"self",
".",
"_sorted",
":",
"self",
".",
"serializer",
"=",
"self",
".",
"flattened_serializer",
"(",
")",
"for",
"k",
"in",
"sorted",
"(",
"self",
".",
"data",
".",
"keys",
"(",
")",
")",
":",
"h",
"=",
"self",
".",
"_partition",
"(",
"k",
")",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"[",
"(",
"k",
",",
"self",
".",
"data",
"[",
"k",
"]",
")",
"]",
",",
"streams",
"[",
"h",
"]",
")",
"else",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"data",
".",
"items",
"(",
")",
":",
"h",
"=",
"self",
".",
"_partition",
"(",
"k",
")",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"[",
"(",
"k",
",",
"v",
")",
"]",
",",
"streams",
"[",
"h",
"]",
")",
"for",
"s",
"in",
"streams",
":",
"DiskBytesSpilled",
"+=",
"s",
".",
"tell",
"(",
")",
"s",
".",
"close",
"(",
")",
"self",
".",
"data",
".",
"clear",
"(",
")",
"# self.pdata is cached in `mergeValues` and `mergeCombiners`",
"self",
".",
"pdata",
".",
"extend",
"(",
"[",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"i",
")",
")",
"with",
"open",
"(",
"p",
",",
"\"wb\"",
")",
"as",
"f",
":",
"# dump items in batch",
"if",
"self",
".",
"_sorted",
":",
"# sort by key only (stable)",
"sorted_items",
"=",
"sorted",
"(",
"self",
".",
"pdata",
"[",
"i",
"]",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"sorted_items",
",",
"f",
")",
"else",
":",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"self",
".",
"pdata",
"[",
"i",
"]",
".",
"items",
"(",
")",
",",
"f",
")",
"self",
".",
"pdata",
"[",
"i",
"]",
".",
"clear",
"(",
")",
"DiskBytesSpilled",
"+=",
"os",
".",
"path",
".",
"getsize",
"(",
"p",
")",
"self",
".",
"spills",
"+=",
"1",
"gc",
".",
"collect",
"(",
")",
"# release the memory as much as possible",
"MemoryBytesSpilled",
"+=",
"max",
"(",
"used_memory",
"-",
"get_used_memory",
"(",
")",
",",
"0",
")",
"<<",
"20"
] |
dump already partitioned data into disks.
|
[
"dump",
"already",
"partitioned",
"data",
"into",
"disks",
"."
] |
python
|
train
| 41.431034 |
dcaune/perseus-lib-python-common
|
majormode/perseus/utils/key_util.py
|
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/utils/key_util.py#L115-L134
|
def key_to_int(key, base=BASE62):
"""
Convert the following key to an integer.
@param key: a key.
@param base: a sequence of characters that was used to encode the
integer value.
@return: the integer value corresponding to the given key.
@raise ValueError: if one character of the specified key doesn't match
any character of the specified base.
"""
base_length = len(base)
value = 0
for c in reversed(key):
value = (value * base_length) + base.index(c)
return value
|
[
"def",
"key_to_int",
"(",
"key",
",",
"base",
"=",
"BASE62",
")",
":",
"base_length",
"=",
"len",
"(",
"base",
")",
"value",
"=",
"0",
"for",
"c",
"in",
"reversed",
"(",
"key",
")",
":",
"value",
"=",
"(",
"value",
"*",
"base_length",
")",
"+",
"base",
".",
"index",
"(",
"c",
")",
"return",
"value"
] |
Convert the following key to an integer.
@param key: a key.
@param base: a sequence of characters that was used to encode the
integer value.
@return: the integer value corresponding to the given key.
@raise ValueError: if one character of the specified key doesn't match
any character of the specified base.
|
[
"Convert",
"the",
"following",
"key",
"to",
"an",
"integer",
"."
] |
python
|
train
| 26.3 |
lukaskubis/crayon
|
crayon.py
|
https://github.com/lukaskubis/crayon/blob/7b6926000e08ad029049419b564e34bc735d0e6c/crayon.py#L84-L88
|
def indent(txt, spacing=4):
"""
Indent given text using custom spacing, default is set to 4.
"""
return prefix(str(txt), ''.join([' ' for _ in range(spacing)]))
|
[
"def",
"indent",
"(",
"txt",
",",
"spacing",
"=",
"4",
")",
":",
"return",
"prefix",
"(",
"str",
"(",
"txt",
")",
",",
"''",
".",
"join",
"(",
"[",
"' '",
"for",
"_",
"in",
"range",
"(",
"spacing",
")",
"]",
")",
")"
] |
Indent given text using custom spacing, default is set to 4.
|
[
"Indent",
"given",
"text",
"using",
"custom",
"spacing",
"default",
"is",
"set",
"to",
"4",
"."
] |
python
|
train
| 34.4 |
Autodesk/pyccc
|
pyccc/docker_utils.py
|
https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/docker_utils.py#L45-L68
|
def create_build_context(image, inputs, wdir):
"""
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
"""
assert os.path.isabs(wdir)
dockerlines = ["FROM %s" % image,
"RUN mkdir -p %s" % wdir]
build_context = {}
# This loop creates a Build Context for building the provisioned image
# We create a tar archive to be added to the root of the image filesystem
if inputs:
dockerlines.append('COPY root /')
for ifile, (path, obj) in enumerate(inputs.items()):
if not os.path.isabs(path):
path = os.path.join(wdir, path)
assert path[0] == '/'
build_context['root' + path] = obj
dockerstring = '\n'.join(dockerlines)
build_context['Dockerfile'] = pyccc.BytesContainer(dockerstring.encode('utf-8'))
return build_context
|
[
"def",
"create_build_context",
"(",
"image",
",",
"inputs",
",",
"wdir",
")",
":",
"assert",
"os",
".",
"path",
".",
"isabs",
"(",
"wdir",
")",
"dockerlines",
"=",
"[",
"\"FROM %s\"",
"%",
"image",
",",
"\"RUN mkdir -p %s\"",
"%",
"wdir",
"]",
"build_context",
"=",
"{",
"}",
"# This loop creates a Build Context for building the provisioned image",
"# We create a tar archive to be added to the root of the image filesystem",
"if",
"inputs",
":",
"dockerlines",
".",
"append",
"(",
"'COPY root /'",
")",
"for",
"ifile",
",",
"(",
"path",
",",
"obj",
")",
"in",
"enumerate",
"(",
"inputs",
".",
"items",
"(",
")",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"wdir",
",",
"path",
")",
"assert",
"path",
"[",
"0",
"]",
"==",
"'/'",
"build_context",
"[",
"'root'",
"+",
"path",
"]",
"=",
"obj",
"dockerstring",
"=",
"'\\n'",
".",
"join",
"(",
"dockerlines",
")",
"build_context",
"[",
"'Dockerfile'",
"]",
"=",
"pyccc",
".",
"BytesContainer",
"(",
"dockerstring",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"build_context"
] |
Creates a tar archive with a dockerfile and a directory called "inputs"
The Dockerfile will copy the "inputs" directory to the chosen working directory
|
[
"Creates",
"a",
"tar",
"archive",
"with",
"a",
"dockerfile",
"and",
"a",
"directory",
"called",
"inputs",
"The",
"Dockerfile",
"will",
"copy",
"the",
"inputs",
"directory",
"to",
"the",
"chosen",
"working",
"directory"
] |
python
|
train
| 38.833333 |
mk-fg/feedjack
|
feedjack/filters.py
|
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/filters.py#L83-L92
|
def pick_enclosure_link(post, parameter=''):
'''Override URL of the Post to point to url of the first enclosure with
href attribute non-empty and type matching specified regexp parameter (empty=any).
Missing "type" attribute for enclosure will be matched as an empty string.
If none of the enclosures match, link won't be updated.'''
for e in (post.enclosures or list()):
href = e.get('href')
if not href: continue
if parameter and not re.search(parameter, e.get('type', '')): continue
return dict(link=href)
|
[
"def",
"pick_enclosure_link",
"(",
"post",
",",
"parameter",
"=",
"''",
")",
":",
"for",
"e",
"in",
"(",
"post",
".",
"enclosures",
"or",
"list",
"(",
")",
")",
":",
"href",
"=",
"e",
".",
"get",
"(",
"'href'",
")",
"if",
"not",
"href",
":",
"continue",
"if",
"parameter",
"and",
"not",
"re",
".",
"search",
"(",
"parameter",
",",
"e",
".",
"get",
"(",
"'type'",
",",
"''",
")",
")",
":",
"continue",
"return",
"dict",
"(",
"link",
"=",
"href",
")"
] |
Override URL of the Post to point to url of the first enclosure with
href attribute non-empty and type matching specified regexp parameter (empty=any).
Missing "type" attribute for enclosure will be matched as an empty string.
If none of the enclosures match, link won't be updated.
|
[
"Override",
"URL",
"of",
"the",
"Post",
"to",
"point",
"to",
"url",
"of",
"the",
"first",
"enclosure",
"with",
"href",
"attribute",
"non",
"-",
"empty",
"and",
"type",
"matching",
"specified",
"regexp",
"parameter",
"(",
"empty",
"=",
"any",
")",
".",
"Missing",
"type",
"attribute",
"for",
"enclosure",
"will",
"be",
"matched",
"as",
"an",
"empty",
"string",
".",
"If",
"none",
"of",
"the",
"enclosures",
"match",
"link",
"won",
"t",
"be",
"updated",
"."
] |
python
|
train
| 51.6 |
sensu-plugins/sensu-plugin-python
|
sensu_plugin/utils.py
|
https://github.com/sensu-plugins/sensu-plugin-python/blob/bd43a5ea4d191e5e63494c8679aab02ac072d9ed/sensu_plugin/utils.py#L37-L46
|
def get_settings():
'''
Get all currently loaded settings.
'''
settings = {}
for config_file in config_files():
config_contents = load_config(config_file)
if config_contents is not None:
settings = deep_merge(settings, config_contents)
return settings
|
[
"def",
"get_settings",
"(",
")",
":",
"settings",
"=",
"{",
"}",
"for",
"config_file",
"in",
"config_files",
"(",
")",
":",
"config_contents",
"=",
"load_config",
"(",
"config_file",
")",
"if",
"config_contents",
"is",
"not",
"None",
":",
"settings",
"=",
"deep_merge",
"(",
"settings",
",",
"config_contents",
")",
"return",
"settings"
] |
Get all currently loaded settings.
|
[
"Get",
"all",
"currently",
"loaded",
"settings",
"."
] |
python
|
train
| 29.4 |
aio-libs/yarl
|
yarl/__init__.py
|
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L332-L345
|
def is_default_port(self):
"""A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
"""
if self.port is None:
return False
default = DEFAULT_PORTS.get(self.scheme)
if default is None:
return False
return self.port == default
|
[
"def",
"is_default_port",
"(",
"self",
")",
":",
"if",
"self",
".",
"port",
"is",
"None",
":",
"return",
"False",
"default",
"=",
"DEFAULT_PORTS",
".",
"get",
"(",
"self",
".",
"scheme",
")",
"if",
"default",
"is",
"None",
":",
"return",
"False",
"return",
"self",
".",
"port",
"==",
"default"
] |
A check for default port.
Return True if port is default for specified scheme,
e.g. 'http://python.org' or 'http://python.org:80', False
otherwise.
|
[
"A",
"check",
"for",
"default",
"port",
"."
] |
python
|
train
| 28.785714 |
nugget/python-insteonplm
|
insteonplm/address.py
|
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L124-L129
|
def bytes(self):
"""Emit the address in bytes format."""
addrbyte = b'\x00\x00\x00'
if self.addr is not None:
addrbyte = self.addr
return addrbyte
|
[
"def",
"bytes",
"(",
"self",
")",
":",
"addrbyte",
"=",
"b'\\x00\\x00\\x00'",
"if",
"self",
".",
"addr",
"is",
"not",
"None",
":",
"addrbyte",
"=",
"self",
".",
"addr",
"return",
"addrbyte"
] |
Emit the address in bytes format.
|
[
"Emit",
"the",
"address",
"in",
"bytes",
"format",
"."
] |
python
|
train
| 30.833333 |
saltstack/salt
|
salt/fileclient.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L1371-L1377
|
def envs(self):
'''
Return a list of available environments
'''
load = {'cmd': '_file_envs'}
return salt.utils.data.decode(self.channel.send(load)) if six.PY2 \
else self.channel.send(load)
|
[
"def",
"envs",
"(",
"self",
")",
":",
"load",
"=",
"{",
"'cmd'",
":",
"'_file_envs'",
"}",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"self",
".",
"channel",
".",
"send",
"(",
"load",
")",
")",
"if",
"six",
".",
"PY2",
"else",
"self",
".",
"channel",
".",
"send",
"(",
"load",
")"
] |
Return a list of available environments
|
[
"Return",
"a",
"list",
"of",
"available",
"environments"
] |
python
|
train
| 33.571429 |
ausaki/subfinder
|
subfinder/utils.py
|
https://github.com/ausaki/subfinder/blob/b74b79214f618c603a551b9334ebb110ccf9684c/subfinder/utils.py#L11-L25
|
def rm_subtitles(path):
""" delete all subtitles in path recursively
"""
sub_exts = ['ass', 'srt', 'sub']
count = 0
for root, dirs, files in os.walk(path):
for f in files:
_, ext = os.path.splitext(f)
ext = ext[1:]
if ext in sub_exts:
p = os.path.join(root, f)
count += 1
print('Delete {}'.format(p))
os.remove(p)
return count
|
[
"def",
"rm_subtitles",
"(",
"path",
")",
":",
"sub_exts",
"=",
"[",
"'ass'",
",",
"'srt'",
",",
"'sub'",
"]",
"count",
"=",
"0",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"f",
"in",
"files",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"f",
")",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"if",
"ext",
"in",
"sub_exts",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"count",
"+=",
"1",
"print",
"(",
"'Delete {}'",
".",
"format",
"(",
"p",
")",
")",
"os",
".",
"remove",
"(",
"p",
")",
"return",
"count"
] |
delete all subtitles in path recursively
|
[
"delete",
"all",
"subtitles",
"in",
"path",
"recursively"
] |
python
|
train
| 29.6 |
neurosynth/neurosynth
|
neurosynth/base/transformations.py
|
https://github.com/neurosynth/neurosynth/blob/948ce7edce15d7df693446e76834e0c23bfe8f11/neurosynth/base/transformations.py#L59-L70
|
def apply(self, name, foci):
""" Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
"""
if name in self.transformations:
return transform(foci, self.transformations[name])
else:
logger.info(
"No transformation named '%s' found; coordinates left "
"untransformed." % name)
return foci
|
[
"def",
"apply",
"(",
"self",
",",
"name",
",",
"foci",
")",
":",
"if",
"name",
"in",
"self",
".",
"transformations",
":",
"return",
"transform",
"(",
"foci",
",",
"self",
".",
"transformations",
"[",
"name",
"]",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"No transformation named '%s' found; coordinates left \"",
"\"untransformed.\"",
"%",
"name",
")",
"return",
"foci"
] |
Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
|
[
"Apply",
"a",
"named",
"transformation",
"to",
"a",
"set",
"of",
"foci",
"."
] |
python
|
test
| 37.25 |
JdeRobot/base
|
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
|
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11763-L11773
|
def gps_inject_data_send(self, target_system, target_component, len, data, force_mavlink1=False):
'''
data for injecting into the onboard GPS (used for DGPS)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
len : data length (uint8_t)
data : raw data (110 is enough for 12 satellites of RTCMv2) (uint8_t)
'''
return self.send(self.gps_inject_data_encode(target_system, target_component, len, data), force_mavlink1=force_mavlink1)
|
[
"def",
"gps_inject_data_send",
"(",
"self",
",",
"target_system",
",",
"target_component",
",",
"len",
",",
"data",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"gps_inject_data_encode",
"(",
"target_system",
",",
"target_component",
",",
"len",
",",
"data",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] |
data for injecting into the onboard GPS (used for DGPS)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
len : data length (uint8_t)
data : raw data (110 is enough for 12 satellites of RTCMv2) (uint8_t)
|
[
"data",
"for",
"injecting",
"into",
"the",
"onboard",
"GPS",
"(",
"used",
"for",
"DGPS",
")"
] |
python
|
train
| 58.363636 |
mar10/wsgidav
|
wsgidav/dc/simple_dc.py
|
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/dc/simple_dc.py#L119-L127
|
def basic_auth_user(self, realm, user_name, password, environ):
"""Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication."""
user = self._get_realm_entry(realm, user_name)
if user is not None and password == user.get("password"):
environ["wsgidav.auth.roles"] = user.get("roles", [])
return True
return False
|
[
"def",
"basic_auth_user",
"(",
"self",
",",
"realm",
",",
"user_name",
",",
"password",
",",
"environ",
")",
":",
"user",
"=",
"self",
".",
"_get_realm_entry",
"(",
"realm",
",",
"user_name",
")",
"if",
"user",
"is",
"not",
"None",
"and",
"password",
"==",
"user",
".",
"get",
"(",
"\"password\"",
")",
":",
"environ",
"[",
"\"wsgidav.auth.roles\"",
"]",
"=",
"user",
".",
"get",
"(",
"\"roles\"",
",",
"[",
"]",
")",
"return",
"True",
"return",
"False"
] |
Returns True if this user_name/password pair is valid for the realm,
False otherwise. Used for basic authentication.
|
[
"Returns",
"True",
"if",
"this",
"user_name",
"/",
"password",
"pair",
"is",
"valid",
"for",
"the",
"realm",
"False",
"otherwise",
".",
"Used",
"for",
"basic",
"authentication",
"."
] |
python
|
valid
| 47.444444 |
oscarbranson/latools
|
Supplement/comparison_tools/plots.py
|
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/Supplement/comparison_tools/plots.py#L19-L27
|
def rangecalc(x, y=None, pad=0.05):
"""
Calculate padded range limits for axes.
"""
mn = np.nanmin([np.nanmin(x), np.nanmin(y)])
mx = np.nanmax([np.nanmax(x), np.nanmax(y)])
rn = mx - mn
return (mn - pad * rn, mx + pad * rn)
|
[
"def",
"rangecalc",
"(",
"x",
",",
"y",
"=",
"None",
",",
"pad",
"=",
"0.05",
")",
":",
"mn",
"=",
"np",
".",
"nanmin",
"(",
"[",
"np",
".",
"nanmin",
"(",
"x",
")",
",",
"np",
".",
"nanmin",
"(",
"y",
")",
"]",
")",
"mx",
"=",
"np",
".",
"nanmax",
"(",
"[",
"np",
".",
"nanmax",
"(",
"x",
")",
",",
"np",
".",
"nanmax",
"(",
"y",
")",
"]",
")",
"rn",
"=",
"mx",
"-",
"mn",
"return",
"(",
"mn",
"-",
"pad",
"*",
"rn",
",",
"mx",
"+",
"pad",
"*",
"rn",
")"
] |
Calculate padded range limits for axes.
|
[
"Calculate",
"padded",
"range",
"limits",
"for",
"axes",
"."
] |
python
|
test
| 28.555556 |
kyuupichan/aiorpcX
|
aiorpcx/util.py
|
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/util.py#L93-L97
|
def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower()
|
[
"def",
"validate_protocol",
"(",
"protocol",
")",
":",
"if",
"not",
"re",
".",
"match",
"(",
"PROTOCOL_REGEX",
",",
"protocol",
")",
":",
"raise",
"ValueError",
"(",
"f'invalid protocol: {protocol}'",
")",
"return",
"protocol",
".",
"lower",
"(",
")"
] |
Validate a protocol, a string, and return it.
|
[
"Validate",
"a",
"protocol",
"a",
"string",
"and",
"return",
"it",
"."
] |
python
|
train
| 43.4 |
minio/minio-py
|
minio/api.py
|
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L239-L304
|
def make_bucket(self, bucket_name, location='us-east-1'):
"""
Make a new bucket on the server.
Optionally include Location.
['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1',
'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1',
'cn-north-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'ap-northeast-2']
Examples:
minio.make_bucket('foo')
minio.make_bucket('foo', 'us-west-1')
:param bucket_name: Bucket to create on server
:param location: Location to create bucket on
"""
is_valid_bucket_name(bucket_name)
# Default region for all requests.
region = 'us-east-1'
if self._region:
region = self._region
# Validate if caller requested bucket location is same as current region
if self._region != location:
raise InvalidArgumentError("Configured region {0}, requested"
" {1}".format(self._region,
location))
method = 'PUT'
# Set user agent once before the request.
headers = {'User-Agent': self._user_agent}
content = None
if location and location != 'us-east-1':
content = xml_marshal_bucket_constraint(location)
headers['Content-Length'] = str(len(content))
content_sha256_hex = get_sha256_hexdigest(content)
if content:
headers['Content-Md5'] = get_md5_base64digest(content)
# In case of Amazon S3. The make bucket issued on already
# existing bucket would fail with 'AuthorizationMalformed'
# error if virtual style is used. So we default to 'path
# style' as that is the preferred method here. The final
# location of the 'bucket' is provided through XML
# LocationConstraint data with the request.
# Construct target url.
url = self._endpoint_url + '/' + bucket_name + '/'
# Get signature headers if any.
headers = sign_v4(method, url, region,
headers, self._access_key,
self._secret_key,
self._session_token,
content_sha256_hex)
response = self._http.urlopen(method, url,
body=content,
headers=headers)
if response.status != 200:
raise ResponseError(response, method, bucket_name).get_exception()
self._set_bucket_region(bucket_name, region=location)
|
[
"def",
"make_bucket",
"(",
"self",
",",
"bucket_name",
",",
"location",
"=",
"'us-east-1'",
")",
":",
"is_valid_bucket_name",
"(",
"bucket_name",
")",
"# Default region for all requests.",
"region",
"=",
"'us-east-1'",
"if",
"self",
".",
"_region",
":",
"region",
"=",
"self",
".",
"_region",
"# Validate if caller requested bucket location is same as current region",
"if",
"self",
".",
"_region",
"!=",
"location",
":",
"raise",
"InvalidArgumentError",
"(",
"\"Configured region {0}, requested\"",
"\" {1}\"",
".",
"format",
"(",
"self",
".",
"_region",
",",
"location",
")",
")",
"method",
"=",
"'PUT'",
"# Set user agent once before the request.",
"headers",
"=",
"{",
"'User-Agent'",
":",
"self",
".",
"_user_agent",
"}",
"content",
"=",
"None",
"if",
"location",
"and",
"location",
"!=",
"'us-east-1'",
":",
"content",
"=",
"xml_marshal_bucket_constraint",
"(",
"location",
")",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"str",
"(",
"len",
"(",
"content",
")",
")",
"content_sha256_hex",
"=",
"get_sha256_hexdigest",
"(",
"content",
")",
"if",
"content",
":",
"headers",
"[",
"'Content-Md5'",
"]",
"=",
"get_md5_base64digest",
"(",
"content",
")",
"# In case of Amazon S3. The make bucket issued on already",
"# existing bucket would fail with 'AuthorizationMalformed'",
"# error if virtual style is used. So we default to 'path",
"# style' as that is the preferred method here. The final",
"# location of the 'bucket' is provided through XML",
"# LocationConstraint data with the request.",
"# Construct target url.",
"url",
"=",
"self",
".",
"_endpoint_url",
"+",
"'/'",
"+",
"bucket_name",
"+",
"'/'",
"# Get signature headers if any.",
"headers",
"=",
"sign_v4",
"(",
"method",
",",
"url",
",",
"region",
",",
"headers",
",",
"self",
".",
"_access_key",
",",
"self",
".",
"_secret_key",
",",
"self",
".",
"_session_token",
",",
"content_sha256_hex",
")",
"response",
"=",
"self",
".",
"_http",
".",
"urlopen",
"(",
"method",
",",
"url",
",",
"body",
"=",
"content",
",",
"headers",
"=",
"headers",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"raise",
"ResponseError",
"(",
"response",
",",
"method",
",",
"bucket_name",
")",
".",
"get_exception",
"(",
")",
"self",
".",
"_set_bucket_region",
"(",
"bucket_name",
",",
"region",
"=",
"location",
")"
] |
Make a new bucket on the server.
Optionally include Location.
['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-west-1',
'eu-west-2', 'ca-central-1', 'eu-central-1', 'sa-east-1',
'cn-north-1', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-1', 'ap-northeast-2']
Examples:
minio.make_bucket('foo')
minio.make_bucket('foo', 'us-west-1')
:param bucket_name: Bucket to create on server
:param location: Location to create bucket on
|
[
"Make",
"a",
"new",
"bucket",
"on",
"the",
"server",
"."
] |
python
|
train
| 39.878788 |
lorien/grab
|
grab/script/crawl.py
|
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/script/crawl.py#L57-L69
|
def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n')
|
[
"def",
"save_list",
"(",
"lst",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"out",
":",
"lines",
"=",
"[",
"]",
"for",
"item",
"in",
"lst",
":",
"if",
"isinstance",
"(",
"item",
",",
"(",
"six",
".",
"text_type",
",",
"six",
".",
"binary_type",
")",
")",
":",
"lines",
".",
"append",
"(",
"make_str",
"(",
"item",
")",
")",
"else",
":",
"lines",
".",
"append",
"(",
"make_str",
"(",
"json",
".",
"dumps",
"(",
"item",
")",
")",
")",
"out",
".",
"write",
"(",
"b'\\n'",
".",
"join",
"(",
"lines",
")",
"+",
"b'\\n'",
")"
] |
Save items from list to the file.
|
[
"Save",
"items",
"from",
"list",
"to",
"the",
"file",
"."
] |
python
|
train
| 29.076923 |
DataONEorg/d1_python
|
lib_common/src/d1_common/multipart.py
|
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/multipart.py#L90-L103
|
def is_multipart(header_dict):
"""
Args:
header_dict : CaseInsensitiveDict
Returns:
bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with
value that begins with 'multipart'.
"""
return (
{k.lower(): v for k, v in header_dict.items()}
.get('content-type', '')
.startswith('multipart')
)
|
[
"def",
"is_multipart",
"(",
"header_dict",
")",
":",
"return",
"(",
"{",
"k",
".",
"lower",
"(",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"header_dict",
".",
"items",
"(",
")",
"}",
".",
"get",
"(",
"'content-type'",
",",
"''",
")",
".",
"startswith",
"(",
"'multipart'",
")",
")"
] |
Args:
header_dict : CaseInsensitiveDict
Returns:
bool: ``True`` if ``header_dict`` has a Content-Type key (case insensitive) with
value that begins with 'multipart'.
|
[
"Args",
":",
"header_dict",
":",
"CaseInsensitiveDict"
] |
python
|
train
| 25.285714 |
dfm/george
|
george/utils.py
|
https://github.com/dfm/george/blob/44819680036387625ee89f81c55104f3c1600759/george/utils.py#L36-L56
|
def nd_sort_samples(samples):
"""
Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version.
"""
# Check the shape of the sample list.
assert len(samples.shape) == 2
# Build a KD-tree on the samples.
tree = cKDTree(samples)
# Compute the distances.
d, i = tree.query(samples[0], k=len(samples))
return i
|
[
"def",
"nd_sort_samples",
"(",
"samples",
")",
":",
"# Check the shape of the sample list.",
"assert",
"len",
"(",
"samples",
".",
"shape",
")",
"==",
"2",
"# Build a KD-tree on the samples.",
"tree",
"=",
"cKDTree",
"(",
"samples",
")",
"# Compute the distances.",
"d",
",",
"i",
"=",
"tree",
".",
"query",
"(",
"samples",
"[",
"0",
"]",
",",
"k",
"=",
"len",
"(",
"samples",
")",
")",
"return",
"i"
] |
Sort an N-dimensional list of samples using a KDTree.
:param samples: ``(nsamples, ndim)``
The list of samples. This must be a two-dimensional array.
:returns i: ``(nsamples,)``
The list of indices into the original array that return the correctly
sorted version.
|
[
"Sort",
"an",
"N",
"-",
"dimensional",
"list",
"of",
"samples",
"using",
"a",
"KDTree",
"."
] |
python
|
train
| 26.904762 |
bwohlberg/sporco
|
sporco/dictlrn/prlcnscdl.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/prlcnscdl.py#L704-L711
|
def cbpdnmd_ustep(k):
"""Do the U step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
mp_Z_U0[k] += mp_DX[k] - mp_Z_Y0[k] - mp_S[k]
mp_Z_U1[k] += mp_Z_X[k] - mp_Z_Y1[k]
|
[
"def",
"cbpdnmd_ustep",
"(",
"k",
")",
":",
"mp_Z_U0",
"[",
"k",
"]",
"+=",
"mp_DX",
"[",
"k",
"]",
"-",
"mp_Z_Y0",
"[",
"k",
"]",
"-",
"mp_S",
"[",
"k",
"]",
"mp_Z_U1",
"[",
"k",
"]",
"+=",
"mp_Z_X",
"[",
"k",
"]",
"-",
"mp_Z_Y1",
"[",
"k",
"]"
] |
Do the U step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
|
[
"Do",
"the",
"U",
"step",
"of",
"the",
"cbpdn",
"stage",
".",
"The",
"only",
"parameter",
"is",
"the",
"slice",
"index",
"k",
"and",
"there",
"are",
"no",
"return",
"values",
";",
"all",
"inputs",
"and",
"outputs",
"are",
"from",
"and",
"to",
"global",
"variables",
"."
] |
python
|
train
| 36.75 |
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_map/mp_slipmap_ui.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/mp_slipmap_ui.py#L387-L391
|
def set_ground_width(self, ground_width):
'''set ground width of view'''
state = self.state
state.ground_width = ground_width
state.panel.re_center(state.width/2, state.height/2, state.lat, state.lon)
|
[
"def",
"set_ground_width",
"(",
"self",
",",
"ground_width",
")",
":",
"state",
"=",
"self",
".",
"state",
"state",
".",
"ground_width",
"=",
"ground_width",
"state",
".",
"panel",
".",
"re_center",
"(",
"state",
".",
"width",
"/",
"2",
",",
"state",
".",
"height",
"/",
"2",
",",
"state",
".",
"lat",
",",
"state",
".",
"lon",
")"
] |
set ground width of view
|
[
"set",
"ground",
"width",
"of",
"view"
] |
python
|
train
| 45.6 |
EntilZha/PyFunctional
|
functional/execution.py
|
https://github.com/EntilZha/PyFunctional/blob/ac04e4a8552b0c464a7f492f7c9862424867b63e/functional/execution.py#L48-L72
|
def evaluate(self, sequence, transformations):
"""
Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
"""
result = sequence
parallel = partial(
parallelize, processes=self.processes, partition_size=self.partition_size)
staged = []
for transform in transformations:
strategies = transform.execution_strategies or {}
if ExecutionStrategies.PARALLEL in strategies:
staged.insert(0, transform.function)
else:
if staged:
result = parallel(compose(*staged), result)
staged = []
if ExecutionStrategies.PRE_COMPUTE in strategies:
result = list(result)
result = transform.function(result)
if staged:
result = parallel(compose(*staged), result)
return iter(result)
|
[
"def",
"evaluate",
"(",
"self",
",",
"sequence",
",",
"transformations",
")",
":",
"result",
"=",
"sequence",
"parallel",
"=",
"partial",
"(",
"parallelize",
",",
"processes",
"=",
"self",
".",
"processes",
",",
"partition_size",
"=",
"self",
".",
"partition_size",
")",
"staged",
"=",
"[",
"]",
"for",
"transform",
"in",
"transformations",
":",
"strategies",
"=",
"transform",
".",
"execution_strategies",
"or",
"{",
"}",
"if",
"ExecutionStrategies",
".",
"PARALLEL",
"in",
"strategies",
":",
"staged",
".",
"insert",
"(",
"0",
",",
"transform",
".",
"function",
")",
"else",
":",
"if",
"staged",
":",
"result",
"=",
"parallel",
"(",
"compose",
"(",
"*",
"staged",
")",
",",
"result",
")",
"staged",
"=",
"[",
"]",
"if",
"ExecutionStrategies",
".",
"PRE_COMPUTE",
"in",
"strategies",
":",
"result",
"=",
"list",
"(",
"result",
")",
"result",
"=",
"transform",
".",
"function",
"(",
"result",
")",
"if",
"staged",
":",
"result",
"=",
"parallel",
"(",
"compose",
"(",
"*",
"staged",
")",
",",
"result",
")",
"return",
"iter",
"(",
"result",
")"
] |
Execute the sequence of transformations in parallel
:param sequence: Sequence to evaluation
:param transformations: Transformations to apply
:return: Resulting sequence or value
|
[
"Execute",
"the",
"sequence",
"of",
"transformations",
"in",
"parallel",
":",
"param",
"sequence",
":",
"Sequence",
"to",
"evaluation",
":",
"param",
"transformations",
":",
"Transformations",
"to",
"apply",
":",
"return",
":",
"Resulting",
"sequence",
"or",
"value"
] |
python
|
train
| 41.48 |
glomex/gcdt
|
gcdt/kumo_start_stop.py
|
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/kumo_start_stop.py#L287-L307
|
def _get_autoscaling_min_max(template, parameters, asg_name):
"""Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
"""
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
asg = template.get('Resources', {}).get(asg_name, None)
if asg:
assert asg['Type'] == 'AWS::AutoScaling::AutoScalingGroup'
min = asg.get('Properties', {}).get('MinSize', None)
max = asg.get('Properties', {}).get('MaxSize', None)
if 'Ref' in min:
min = params.get(min['Ref'], None)
if 'Ref' in max:
max = params.get(max['Ref'], None)
if min and max:
return int(min), int(max)
|
[
"def",
"_get_autoscaling_min_max",
"(",
"template",
",",
"parameters",
",",
"asg_name",
")",
":",
"params",
"=",
"{",
"e",
"[",
"'ParameterKey'",
"]",
":",
"e",
"[",
"'ParameterValue'",
"]",
"for",
"e",
"in",
"parameters",
"}",
"asg",
"=",
"template",
".",
"get",
"(",
"'Resources'",
",",
"{",
"}",
")",
".",
"get",
"(",
"asg_name",
",",
"None",
")",
"if",
"asg",
":",
"assert",
"asg",
"[",
"'Type'",
"]",
"==",
"'AWS::AutoScaling::AutoScalingGroup'",
"min",
"=",
"asg",
".",
"get",
"(",
"'Properties'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'MinSize'",
",",
"None",
")",
"max",
"=",
"asg",
".",
"get",
"(",
"'Properties'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'MaxSize'",
",",
"None",
")",
"if",
"'Ref'",
"in",
"min",
":",
"min",
"=",
"params",
".",
"get",
"(",
"min",
"[",
"'Ref'",
"]",
",",
"None",
")",
"if",
"'Ref'",
"in",
"max",
":",
"max",
"=",
"params",
".",
"get",
"(",
"max",
"[",
"'Ref'",
"]",
",",
"None",
")",
"if",
"min",
"and",
"max",
":",
"return",
"int",
"(",
"min",
")",
",",
"int",
"(",
"max",
")"
] |
Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
|
[
"Helper",
"to",
"extract",
"the",
"configured",
"MinSize",
"MaxSize",
"attributes",
"from",
"the",
"template",
"."
] |
python
|
train
| 43.333333 |
miguelgrinberg/python-socketio
|
socketio/packet.py
|
https://github.com/miguelgrinberg/python-socketio/blob/c0c1bf8d21e3597389b18938550a0724dd9676b7/socketio/packet.py#L76-L114
|
def decode(self, encoded_packet):
"""Decode a transmitted package.
The return value indicates how many binary attachment packets are
necessary to fully decode the packet.
"""
ep = encoded_packet
try:
self.packet_type = int(ep[0:1])
except TypeError:
self.packet_type = ep
ep = ''
self.namespace = None
self.data = None
ep = ep[1:]
dash = ep.find('-')
attachment_count = 0
if dash > 0 and ep[0:dash].isdigit():
attachment_count = int(ep[0:dash])
ep = ep[dash + 1:]
if ep and ep[0:1] == '/':
sep = ep.find(',')
if sep == -1:
self.namespace = ep
ep = ''
else:
self.namespace = ep[0:sep]
ep = ep[sep + 1:]
q = self.namespace.find('?')
if q != -1:
self.namespace = self.namespace[0:q]
if ep and ep[0].isdigit():
self.id = 0
while ep and ep[0].isdigit():
self.id = self.id * 10 + int(ep[0])
ep = ep[1:]
if ep:
self.data = self.json.loads(ep)
return attachment_count
|
[
"def",
"decode",
"(",
"self",
",",
"encoded_packet",
")",
":",
"ep",
"=",
"encoded_packet",
"try",
":",
"self",
".",
"packet_type",
"=",
"int",
"(",
"ep",
"[",
"0",
":",
"1",
"]",
")",
"except",
"TypeError",
":",
"self",
".",
"packet_type",
"=",
"ep",
"ep",
"=",
"''",
"self",
".",
"namespace",
"=",
"None",
"self",
".",
"data",
"=",
"None",
"ep",
"=",
"ep",
"[",
"1",
":",
"]",
"dash",
"=",
"ep",
".",
"find",
"(",
"'-'",
")",
"attachment_count",
"=",
"0",
"if",
"dash",
">",
"0",
"and",
"ep",
"[",
"0",
":",
"dash",
"]",
".",
"isdigit",
"(",
")",
":",
"attachment_count",
"=",
"int",
"(",
"ep",
"[",
"0",
":",
"dash",
"]",
")",
"ep",
"=",
"ep",
"[",
"dash",
"+",
"1",
":",
"]",
"if",
"ep",
"and",
"ep",
"[",
"0",
":",
"1",
"]",
"==",
"'/'",
":",
"sep",
"=",
"ep",
".",
"find",
"(",
"','",
")",
"if",
"sep",
"==",
"-",
"1",
":",
"self",
".",
"namespace",
"=",
"ep",
"ep",
"=",
"''",
"else",
":",
"self",
".",
"namespace",
"=",
"ep",
"[",
"0",
":",
"sep",
"]",
"ep",
"=",
"ep",
"[",
"sep",
"+",
"1",
":",
"]",
"q",
"=",
"self",
".",
"namespace",
".",
"find",
"(",
"'?'",
")",
"if",
"q",
"!=",
"-",
"1",
":",
"self",
".",
"namespace",
"=",
"self",
".",
"namespace",
"[",
"0",
":",
"q",
"]",
"if",
"ep",
"and",
"ep",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"id",
"=",
"0",
"while",
"ep",
"and",
"ep",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"id",
"=",
"self",
".",
"id",
"*",
"10",
"+",
"int",
"(",
"ep",
"[",
"0",
"]",
")",
"ep",
"=",
"ep",
"[",
"1",
":",
"]",
"if",
"ep",
":",
"self",
".",
"data",
"=",
"self",
".",
"json",
".",
"loads",
"(",
"ep",
")",
"return",
"attachment_count"
] |
Decode a transmitted package.
The return value indicates how many binary attachment packets are
necessary to fully decode the packet.
|
[
"Decode",
"a",
"transmitted",
"package",
"."
] |
python
|
train
| 31.435897 |
sebp/scikit-survival
|
sksurv/svm/survival_svm.py
|
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/svm/survival_svm.py#L1000-L1028
|
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
"""
kernel_mat = self._get_kernel(X, self.fit_X_)
val = numpy.dot(kernel_mat, self.coef_)
if hasattr(self, "intercept_"):
val += self.intercept_
# Order by increasing survival time if objective is pure ranking
if self.rank_ratio == 1:
val *= -1
else:
# model was fitted on log(time), transform to original scale
val = numpy.exp(val)
return val
|
[
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"kernel_mat",
"=",
"self",
".",
"_get_kernel",
"(",
"X",
",",
"self",
".",
"fit_X_",
")",
"val",
"=",
"numpy",
".",
"dot",
"(",
"kernel_mat",
",",
"self",
".",
"coef_",
")",
"if",
"hasattr",
"(",
"self",
",",
"\"intercept_\"",
")",
":",
"val",
"+=",
"self",
".",
"intercept_",
"# Order by increasing survival time if objective is pure ranking",
"if",
"self",
".",
"rank_ratio",
"==",
"1",
":",
"val",
"*=",
"-",
"1",
"else",
":",
"# model was fitted on log(time), transform to original scale",
"val",
"=",
"numpy",
".",
"exp",
"(",
"val",
")",
"return",
"val"
] |
Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape = (n_samples,)
Predicted ranks.
|
[
"Rank",
"samples",
"according",
"to",
"survival",
"times"
] |
python
|
train
| 28 |
kennell/schiene
|
schiene/schiene.py
|
https://github.com/kennell/schiene/blob/a8f1ba2bd30f9f4a373c7b0ced589bd60121aa1f/schiene/schiene.py#L40-L46
|
def parse_stations(html):
"""
Strips JS code, loads JSON
"""
html = html.replace('SLs.sls=', '').replace(';SLs.showSuggestion();', '')
html = json.loads(html)
return html['suggestions']
|
[
"def",
"parse_stations",
"(",
"html",
")",
":",
"html",
"=",
"html",
".",
"replace",
"(",
"'SLs.sls='",
",",
"''",
")",
".",
"replace",
"(",
"';SLs.showSuggestion();'",
",",
"''",
")",
"html",
"=",
"json",
".",
"loads",
"(",
"html",
")",
"return",
"html",
"[",
"'suggestions'",
"]"
] |
Strips JS code, loads JSON
|
[
"Strips",
"JS",
"code",
"loads",
"JSON"
] |
python
|
train
| 29.571429 |
csparpa/pyowm
|
pyowm/stationsapi30/stations_manager.py
|
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/stationsapi30/stations_manager.py#L39-L51
|
def get_stations(self):
"""
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
"""
status, data = self.http_client.get_json(
STATIONS_URI,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.stations_parser.parse_dict(item) for item in data]
|
[
"def",
"get_stations",
"(",
"self",
")",
":",
"status",
",",
"data",
"=",
"self",
".",
"http_client",
".",
"get_json",
"(",
"STATIONS_URI",
",",
"params",
"=",
"{",
"'appid'",
":",
"self",
".",
"API_key",
"}",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")",
"return",
"[",
"self",
".",
"stations_parser",
".",
"parse_dict",
"(",
"item",
")",
"for",
"item",
"in",
"data",
"]"
] |
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
|
[
"Retrieves",
"all",
"of",
"the",
"user",
"s",
"stations",
"registered",
"on",
"the",
"Stations",
"API",
"."
] |
python
|
train
| 33.615385 |
monarch-initiative/dipper
|
dipper/sources/KEGG.py
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L137-L173
|
def parse(self, limit=None):
"""
:param limit:
:return:
"""
if limit is not None:
LOG.info("Only parsing first %s rows fo each file", str(limit))
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
self._process_diseases(limit)
self._process_genes(limit)
self._process_genes_kegg2ncbi(limit)
self._process_omim2gene(limit)
self._process_omim2disease(limit)
self._process_kegg_disease2gene(limit)
self._process_pathways(limit)
self._process_pathway_pubmed(limit)
# self._process_pathway_pathway(limit)
self._process_pathway_disease(limit)
self._process_pathway_ko(limit)
self._process_ortholog_classes(limit)
# TODO add in when refactoring for #141
# for f in ['hsa_orthologs', 'mmu_orthologs', 'rno_orthologs',
# 'dme_orthologs','dre_orthologs','cel_orthologs']:
# file = '/'.join((self.rawdir, self.files[f]['file']))
# self._process_orthologs(file, limit) # DONE #
LOG.info("Finished parsing")
return
|
[
"def",
"parse",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"limit",
"is",
"not",
"None",
":",
"LOG",
".",
"info",
"(",
"\"Only parsing first %s rows fo each file\"",
",",
"str",
"(",
"limit",
")",
")",
"LOG",
".",
"info",
"(",
"\"Parsing files...\"",
")",
"if",
"self",
".",
"test_only",
":",
"self",
".",
"test_mode",
"=",
"True",
"self",
".",
"_process_diseases",
"(",
"limit",
")",
"self",
".",
"_process_genes",
"(",
"limit",
")",
"self",
".",
"_process_genes_kegg2ncbi",
"(",
"limit",
")",
"self",
".",
"_process_omim2gene",
"(",
"limit",
")",
"self",
".",
"_process_omim2disease",
"(",
"limit",
")",
"self",
".",
"_process_kegg_disease2gene",
"(",
"limit",
")",
"self",
".",
"_process_pathways",
"(",
"limit",
")",
"self",
".",
"_process_pathway_pubmed",
"(",
"limit",
")",
"# self._process_pathway_pathway(limit)",
"self",
".",
"_process_pathway_disease",
"(",
"limit",
")",
"self",
".",
"_process_pathway_ko",
"(",
"limit",
")",
"self",
".",
"_process_ortholog_classes",
"(",
"limit",
")",
"# TODO add in when refactoring for #141",
"# for f in ['hsa_orthologs', 'mmu_orthologs', 'rno_orthologs',",
"# 'dme_orthologs','dre_orthologs','cel_orthologs']:",
"# file = '/'.join((self.rawdir, self.files[f]['file']))",
"# self._process_orthologs(file, limit) # DONE #",
"LOG",
".",
"info",
"(",
"\"Finished parsing\"",
")",
"return"
] |
:param limit:
:return:
|
[
":",
"param",
"limit",
":",
":",
"return",
":"
] |
python
|
train
| 30.891892 |
google/transitfeed
|
transitfeed/problems.py
|
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L268-L272
|
def InvalidLineEnd(self, bad_line_end, context=None, type=TYPE_WARNING):
"""bad_line_end is a human readable string."""
e = InvalidLineEnd(bad_line_end=bad_line_end, context=context,
context2=self._context, type=type)
self.AddToAccumulator(e)
|
[
"def",
"InvalidLineEnd",
"(",
"self",
",",
"bad_line_end",
",",
"context",
"=",
"None",
",",
"type",
"=",
"TYPE_WARNING",
")",
":",
"e",
"=",
"InvalidLineEnd",
"(",
"bad_line_end",
"=",
"bad_line_end",
",",
"context",
"=",
"context",
",",
"context2",
"=",
"self",
".",
"_context",
",",
"type",
"=",
"type",
")",
"self",
".",
"AddToAccumulator",
"(",
"e",
")"
] |
bad_line_end is a human readable string.
|
[
"bad_line_end",
"is",
"a",
"human",
"readable",
"string",
"."
] |
python
|
train
| 54.6 |
pypa/pipenv
|
pipenv/environment.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L278-L290
|
def find_egg(self, egg_dist):
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = "{0}.egg-link".format(egg_dist.project_name)
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg
|
[
"def",
"find_egg",
"(",
"self",
",",
"egg_dist",
")",
":",
"site_packages",
"=",
"self",
".",
"libdir",
"[",
"1",
"]",
"search_filename",
"=",
"\"{0}.egg-link\"",
".",
"format",
"(",
"egg_dist",
".",
"project_name",
")",
"try",
":",
"user_site",
"=",
"site",
".",
"getusersitepackages",
"(",
")",
"except",
"AttributeError",
":",
"user_site",
"=",
"site",
".",
"USER_SITE",
"search_locations",
"=",
"[",
"site_packages",
",",
"user_site",
"]",
"for",
"site_directory",
"in",
"search_locations",
":",
"egg",
"=",
"os",
".",
"path",
".",
"join",
"(",
"site_directory",
",",
"search_filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"egg",
")",
":",
"return",
"egg"
] |
Find an egg by name in the given environment
|
[
"Find",
"an",
"egg",
"by",
"name",
"in",
"the",
"given",
"environment"
] |
python
|
train
| 42.230769 |
dmlc/gluon-nlp
|
src/gluonnlp/model/transformer.py
|
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L905-L932
|
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None):
"""Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional
"""
mem_value = encoder_outputs
decoder_states = [mem_value]
mem_length = mem_value.shape[1]
if encoder_valid_length is not None:
dtype = encoder_valid_length.dtype
ctx = encoder_valid_length.context
mem_masks = mx.nd.broadcast_lesser(
mx.nd.arange(mem_length, ctx=ctx, dtype=dtype).reshape((1, -1)),
encoder_valid_length.reshape((-1, 1)))
decoder_states.append(mem_masks)
self._encoder_valid_length = encoder_valid_length
return decoder_states
|
[
"def",
"init_state_from_encoder",
"(",
"self",
",",
"encoder_outputs",
",",
"encoder_valid_length",
"=",
"None",
")",
":",
"mem_value",
"=",
"encoder_outputs",
"decoder_states",
"=",
"[",
"mem_value",
"]",
"mem_length",
"=",
"mem_value",
".",
"shape",
"[",
"1",
"]",
"if",
"encoder_valid_length",
"is",
"not",
"None",
":",
"dtype",
"=",
"encoder_valid_length",
".",
"dtype",
"ctx",
"=",
"encoder_valid_length",
".",
"context",
"mem_masks",
"=",
"mx",
".",
"nd",
".",
"broadcast_lesser",
"(",
"mx",
".",
"nd",
".",
"arange",
"(",
"mem_length",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"encoder_valid_length",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"decoder_states",
".",
"append",
"(",
"mem_masks",
")",
"self",
".",
"_encoder_valid_length",
"=",
"encoder_valid_length",
"return",
"decoder_states"
] |
Initialize the state from the encoder outputs.
Parameters
----------
encoder_outputs : list
encoder_valid_length : NDArray or None
Returns
-------
decoder_states : list
The decoder states, includes:
- mem_value : NDArray
- mem_masks : NDArray, optional
|
[
"Initialize",
"the",
"state",
"from",
"the",
"encoder",
"outputs",
"."
] |
python
|
train
| 35.392857 |
log2timeline/dfvfs
|
dfvfs/path/encrypted_stream_path_spec.py
|
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/path/encrypted_stream_path_spec.py#L52-L74
|
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.cipher_mode:
string_parts.append('cipher_mode: {0:s}'.format(self.cipher_mode))
if self.encryption_method:
string_parts.append('encryption_method: {0:s}'.format(
self.encryption_method))
if self.initialization_vector:
initialization_vector = codecs.encode(self.initialization_vector, 'hex')
initialization_vector = initialization_vector.decode('ascii')
string_parts.append('initialization_vector: {0:s}'.format(
initialization_vector))
if self.key:
key = codecs.encode(self.key, 'hex')
key = key.decode('ascii')
string_parts.append('key: {0:s}'.format(key))
return self._GetComparable(sub_comparable_string=', '.join(string_parts))
|
[
"def",
"comparable",
"(",
"self",
")",
":",
"string_parts",
"=",
"[",
"]",
"if",
"self",
".",
"cipher_mode",
":",
"string_parts",
".",
"append",
"(",
"'cipher_mode: {0:s}'",
".",
"format",
"(",
"self",
".",
"cipher_mode",
")",
")",
"if",
"self",
".",
"encryption_method",
":",
"string_parts",
".",
"append",
"(",
"'encryption_method: {0:s}'",
".",
"format",
"(",
"self",
".",
"encryption_method",
")",
")",
"if",
"self",
".",
"initialization_vector",
":",
"initialization_vector",
"=",
"codecs",
".",
"encode",
"(",
"self",
".",
"initialization_vector",
",",
"'hex'",
")",
"initialization_vector",
"=",
"initialization_vector",
".",
"decode",
"(",
"'ascii'",
")",
"string_parts",
".",
"append",
"(",
"'initialization_vector: {0:s}'",
".",
"format",
"(",
"initialization_vector",
")",
")",
"if",
"self",
".",
"key",
":",
"key",
"=",
"codecs",
".",
"encode",
"(",
"self",
".",
"key",
",",
"'hex'",
")",
"key",
"=",
"key",
".",
"decode",
"(",
"'ascii'",
")",
"string_parts",
".",
"append",
"(",
"'key: {0:s}'",
".",
"format",
"(",
"key",
")",
")",
"return",
"self",
".",
"_GetComparable",
"(",
"sub_comparable_string",
"=",
"', '",
".",
"join",
"(",
"string_parts",
")",
")"
] |
str: comparable representation of the path specification.
|
[
"str",
":",
"comparable",
"representation",
"of",
"the",
"path",
"specification",
"."
] |
python
|
train
| 35.73913 |
crccheck/cloudwatch-to-graphite
|
leadbutt.py
|
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/leadbutt.py#L51-L67
|
def get_config(config_file):
"""Get configuration from a file."""
def load(fp):
try:
return yaml.safe_load(fp)
except yaml.YAMLError as e:
sys.stderr.write(text_type(e))
sys.exit(1) # TODO document exit codes
if config_file == '-':
return load(sys.stdin)
if not os.path.exists(config_file):
sys.stderr.write('ERROR: Must either run next to config.yaml or'
' specify a config file.\n' + __doc__)
sys.exit(2)
with open(config_file) as fp:
return load(fp)
|
[
"def",
"get_config",
"(",
"config_file",
")",
":",
"def",
"load",
"(",
"fp",
")",
":",
"try",
":",
"return",
"yaml",
".",
"safe_load",
"(",
"fp",
")",
"except",
"yaml",
".",
"YAMLError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"text_type",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# TODO document exit codes",
"if",
"config_file",
"==",
"'-'",
":",
"return",
"load",
"(",
"sys",
".",
"stdin",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config_file",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'ERROR: Must either run next to config.yaml or'",
"' specify a config file.\\n'",
"+",
"__doc__",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"with",
"open",
"(",
"config_file",
")",
"as",
"fp",
":",
"return",
"load",
"(",
"fp",
")"
] |
Get configuration from a file.
|
[
"Get",
"configuration",
"from",
"a",
"file",
"."
] |
python
|
train
| 32.588235 |
apache/incubator-heron
|
heron/tools/explorer/src/python/logicalplan.py
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/explorer/src/python/logicalplan.py#L62-L89
|
def to_table(components, topo_info):
""" normalize raw logical plan info to table """
inputs, outputs = defaultdict(list), defaultdict(list)
for ctype, component in components.items():
if ctype == 'bolts':
for component_name, component_info in component.items():
for input_stream in component_info['inputs']:
input_name = input_stream['component_name']
inputs[component_name].append(input_name)
outputs[input_name].append(component_name)
info = []
spouts_instance = topo_info['physical_plan']['spouts']
bolts_instance = topo_info['physical_plan']['bolts']
for ctype, component in components.items():
# stages is an int so keep going
if ctype == "stages":
continue
for component_name, component_info in component.items():
row = [ctype[:-1], component_name]
if ctype == 'spouts':
row.append(len(spouts_instance[component_name]))
else:
row.append(len(bolts_instance[component_name]))
row.append(','.join(inputs.get(component_name, ['-'])))
row.append(','.join(outputs.get(component_name, ['-'])))
info.append(row)
header = ['type', 'name', 'parallelism', 'input', 'output']
return info, header
|
[
"def",
"to_table",
"(",
"components",
",",
"topo_info",
")",
":",
"inputs",
",",
"outputs",
"=",
"defaultdict",
"(",
"list",
")",
",",
"defaultdict",
"(",
"list",
")",
"for",
"ctype",
",",
"component",
"in",
"components",
".",
"items",
"(",
")",
":",
"if",
"ctype",
"==",
"'bolts'",
":",
"for",
"component_name",
",",
"component_info",
"in",
"component",
".",
"items",
"(",
")",
":",
"for",
"input_stream",
"in",
"component_info",
"[",
"'inputs'",
"]",
":",
"input_name",
"=",
"input_stream",
"[",
"'component_name'",
"]",
"inputs",
"[",
"component_name",
"]",
".",
"append",
"(",
"input_name",
")",
"outputs",
"[",
"input_name",
"]",
".",
"append",
"(",
"component_name",
")",
"info",
"=",
"[",
"]",
"spouts_instance",
"=",
"topo_info",
"[",
"'physical_plan'",
"]",
"[",
"'spouts'",
"]",
"bolts_instance",
"=",
"topo_info",
"[",
"'physical_plan'",
"]",
"[",
"'bolts'",
"]",
"for",
"ctype",
",",
"component",
"in",
"components",
".",
"items",
"(",
")",
":",
"# stages is an int so keep going",
"if",
"ctype",
"==",
"\"stages\"",
":",
"continue",
"for",
"component_name",
",",
"component_info",
"in",
"component",
".",
"items",
"(",
")",
":",
"row",
"=",
"[",
"ctype",
"[",
":",
"-",
"1",
"]",
",",
"component_name",
"]",
"if",
"ctype",
"==",
"'spouts'",
":",
"row",
".",
"append",
"(",
"len",
"(",
"spouts_instance",
"[",
"component_name",
"]",
")",
")",
"else",
":",
"row",
".",
"append",
"(",
"len",
"(",
"bolts_instance",
"[",
"component_name",
"]",
")",
")",
"row",
".",
"append",
"(",
"','",
".",
"join",
"(",
"inputs",
".",
"get",
"(",
"component_name",
",",
"[",
"'-'",
"]",
")",
")",
")",
"row",
".",
"append",
"(",
"','",
".",
"join",
"(",
"outputs",
".",
"get",
"(",
"component_name",
",",
"[",
"'-'",
"]",
")",
")",
")",
"info",
".",
"append",
"(",
"row",
")",
"header",
"=",
"[",
"'type'",
",",
"'name'",
",",
"'parallelism'",
",",
"'input'",
",",
"'output'",
"]",
"return",
"info",
",",
"header"
] |
normalize raw logical plan info to table
|
[
"normalize",
"raw",
"logical",
"plan",
"info",
"to",
"table"
] |
python
|
valid
| 42.821429 |
SheffieldML/GPy
|
GPy/likelihoods/poisson.py
|
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/poisson.py#L52-L68
|
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}!
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
return -link_f + y*np.log(link_f) - special.gammaln(y+1)
|
[
"def",
"logpdf_link",
"(",
"self",
",",
"link_f",
",",
"y",
",",
"Y_metadata",
"=",
"None",
")",
":",
"return",
"-",
"link_f",
"+",
"y",
"*",
"np",
".",
"log",
"(",
"link_f",
")",
"-",
"special",
".",
"gammaln",
"(",
"y",
"+",
"1",
")"
] |
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = -\\lambda(f_{i}) + y_{i}\\log \\lambda(f_{i}) - \\log y_{i}!
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
|
[
"Log",
"Likelihood",
"Function",
"given",
"link",
"(",
"f",
")"
] |
python
|
train
| 34.235294 |
jupyterhub/kubespawner
|
kubespawner/spawner.py
|
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1876-L1909
|
def _load_profile(self, profile_name):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['display_name'] == profile_name:
break
else:
if profile_name:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
profile_name, ', '.join(p['display_name'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v)
|
[
"def",
"_load_profile",
"(",
"self",
",",
"profile_name",
")",
":",
"# find the profile",
"default_profile",
"=",
"self",
".",
"_profile_list",
"[",
"0",
"]",
"for",
"profile",
"in",
"self",
".",
"_profile_list",
":",
"if",
"profile",
".",
"get",
"(",
"'default'",
",",
"False",
")",
":",
"# explicit default, not the first",
"default_profile",
"=",
"profile",
"if",
"profile",
"[",
"'display_name'",
"]",
"==",
"profile_name",
":",
"break",
"else",
":",
"if",
"profile_name",
":",
"# name specified, but not found",
"raise",
"ValueError",
"(",
"\"No such profile: %s. Options include: %s\"",
"%",
"(",
"profile_name",
",",
"', '",
".",
"join",
"(",
"p",
"[",
"'display_name'",
"]",
"for",
"p",
"in",
"self",
".",
"_profile_list",
")",
")",
")",
"else",
":",
"# no name specified, use the default",
"profile",
"=",
"default_profile",
"self",
".",
"log",
".",
"debug",
"(",
"\"Applying KubeSpawner override for profile '%s'\"",
",",
"profile",
"[",
"'display_name'",
"]",
")",
"kubespawner_override",
"=",
"profile",
".",
"get",
"(",
"'kubespawner_override'",
",",
"{",
"}",
")",
"for",
"k",
",",
"v",
"in",
"kubespawner_override",
".",
"items",
"(",
")",
":",
"if",
"callable",
"(",
"v",
")",
":",
"v",
"=",
"v",
"(",
"self",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\".. overriding KubeSpawner value %s=%s (callable result)\"",
",",
"k",
",",
"v",
")",
"else",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\".. overriding KubeSpawner value %s=%s\"",
",",
"k",
",",
"v",
")",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")"
] |
Load a profile by name
Called by load_user_options
|
[
"Load",
"a",
"profile",
"by",
"name"
] |
python
|
train
| 38.764706 |
nilp0inter/cpe
|
cpe/cpelang2_3.py
|
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpelang2_3.py#L172-L277
|
def language_match(self, cpeset, cpel_dom=None):
"""
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
"""
# Root element tag
TAG_ROOT = '#document'
# A container for child platform definitions
TAG_PLATSPEC = 'cpe:platform-specification'
# Information about a platform definition
TAG_PLATFORM = 'cpe:platform'
TAG_LOGITEST = 'cpe:logical-test'
TAG_CPE = 'cpe:fact-ref'
TAG_CHECK_CPE = 'check-fact-ref'
# Tag attributes
ATT_NAME = 'name'
ATT_OP = 'operator'
ATT_NEGATE = 'negate'
# Attribute values
ATT_OP_AND = 'AND'
ATT_OP_OR = 'OR'
ATT_NEGATE_TRUE = 'TRUE'
# Constant associated with an error in language matching
ERROR = 2
if cpel_dom is None:
cpel_dom = self.document
# Identify the root element
if cpel_dom.nodeName == TAG_ROOT or cpel_dom.nodeName == TAG_PLATSPEC:
for node in cpel_dom.childNodes:
if node.nodeName == TAG_PLATSPEC:
return self.language_match(cpeset, node)
if node.nodeName == TAG_PLATFORM:
return self.language_match(cpeset, node)
# Identify a platform element
elif cpel_dom.nodeName == TAG_PLATFORM:
# Parse through E's elements and ignore all but logical-test
for node in cpel_dom.childNodes:
if node.nodeName == TAG_LOGITEST:
# Call the function again, but with logical-test
# as the root element
return self.language_match(cpeset, node)
# Identify a CPE element
elif cpel_dom.nodeName == TAG_CPE:
# fact-ref's name attribute is a bound name,
# so we unbind it to a WFN before passing it
cpename = cpel_dom.getAttribute(ATT_NAME)
wfn = CPELanguage2_3._unbind(cpename)
return CPELanguage2_3._fact_ref_eval(cpeset, wfn)
# Identify a check of CPE names (OVAL, OCIL...)
elif cpel_dom.nodeName == TAG_CHECK_CPE:
return CPELanguage2_3._check_fact_ref_Eval(cpel_dom)
# Identify a logical operator element
elif cpel_dom.nodeName == TAG_LOGITEST:
count = 0
len = 0
answer = False
for node in cpel_dom.childNodes:
if node.nodeName.find("#") == 0:
continue
len = len + 1
result = self.language_match(cpeset, node)
if result:
count = count + 1
elif result == ERROR:
answer = ERROR
operator = cpel_dom.getAttribute(ATT_OP).upper()
if operator == ATT_OP_AND:
if count == len:
answer = True
elif operator == ATT_OP_OR:
if count > 0:
answer = True
operator_not = cpel_dom.getAttribute(ATT_NEGATE)
if operator_not:
if ((operator_not.upper() == ATT_NEGATE_TRUE) and
(answer != ERROR)):
answer = not answer
return answer
else:
return False
|
[
"def",
"language_match",
"(",
"self",
",",
"cpeset",
",",
"cpel_dom",
"=",
"None",
")",
":",
"# Root element tag",
"TAG_ROOT",
"=",
"'#document'",
"# A container for child platform definitions",
"TAG_PLATSPEC",
"=",
"'cpe:platform-specification'",
"# Information about a platform definition",
"TAG_PLATFORM",
"=",
"'cpe:platform'",
"TAG_LOGITEST",
"=",
"'cpe:logical-test'",
"TAG_CPE",
"=",
"'cpe:fact-ref'",
"TAG_CHECK_CPE",
"=",
"'check-fact-ref'",
"# Tag attributes",
"ATT_NAME",
"=",
"'name'",
"ATT_OP",
"=",
"'operator'",
"ATT_NEGATE",
"=",
"'negate'",
"# Attribute values",
"ATT_OP_AND",
"=",
"'AND'",
"ATT_OP_OR",
"=",
"'OR'",
"ATT_NEGATE_TRUE",
"=",
"'TRUE'",
"# Constant associated with an error in language matching",
"ERROR",
"=",
"2",
"if",
"cpel_dom",
"is",
"None",
":",
"cpel_dom",
"=",
"self",
".",
"document",
"# Identify the root element",
"if",
"cpel_dom",
".",
"nodeName",
"==",
"TAG_ROOT",
"or",
"cpel_dom",
".",
"nodeName",
"==",
"TAG_PLATSPEC",
":",
"for",
"node",
"in",
"cpel_dom",
".",
"childNodes",
":",
"if",
"node",
".",
"nodeName",
"==",
"TAG_PLATSPEC",
":",
"return",
"self",
".",
"language_match",
"(",
"cpeset",
",",
"node",
")",
"if",
"node",
".",
"nodeName",
"==",
"TAG_PLATFORM",
":",
"return",
"self",
".",
"language_match",
"(",
"cpeset",
",",
"node",
")",
"# Identify a platform element",
"elif",
"cpel_dom",
".",
"nodeName",
"==",
"TAG_PLATFORM",
":",
"# Parse through E's elements and ignore all but logical-test",
"for",
"node",
"in",
"cpel_dom",
".",
"childNodes",
":",
"if",
"node",
".",
"nodeName",
"==",
"TAG_LOGITEST",
":",
"# Call the function again, but with logical-test",
"# as the root element",
"return",
"self",
".",
"language_match",
"(",
"cpeset",
",",
"node",
")",
"# Identify a CPE element",
"elif",
"cpel_dom",
".",
"nodeName",
"==",
"TAG_CPE",
":",
"# fact-ref's name attribute is a bound name,",
"# so we unbind it to a WFN before passing it",
"cpename",
"=",
"cpel_dom",
".",
"getAttribute",
"(",
"ATT_NAME",
")",
"wfn",
"=",
"CPELanguage2_3",
".",
"_unbind",
"(",
"cpename",
")",
"return",
"CPELanguage2_3",
".",
"_fact_ref_eval",
"(",
"cpeset",
",",
"wfn",
")",
"# Identify a check of CPE names (OVAL, OCIL...)",
"elif",
"cpel_dom",
".",
"nodeName",
"==",
"TAG_CHECK_CPE",
":",
"return",
"CPELanguage2_3",
".",
"_check_fact_ref_Eval",
"(",
"cpel_dom",
")",
"# Identify a logical operator element",
"elif",
"cpel_dom",
".",
"nodeName",
"==",
"TAG_LOGITEST",
":",
"count",
"=",
"0",
"len",
"=",
"0",
"answer",
"=",
"False",
"for",
"node",
"in",
"cpel_dom",
".",
"childNodes",
":",
"if",
"node",
".",
"nodeName",
".",
"find",
"(",
"\"#\"",
")",
"==",
"0",
":",
"continue",
"len",
"=",
"len",
"+",
"1",
"result",
"=",
"self",
".",
"language_match",
"(",
"cpeset",
",",
"node",
")",
"if",
"result",
":",
"count",
"=",
"count",
"+",
"1",
"elif",
"result",
"==",
"ERROR",
":",
"answer",
"=",
"ERROR",
"operator",
"=",
"cpel_dom",
".",
"getAttribute",
"(",
"ATT_OP",
")",
".",
"upper",
"(",
")",
"if",
"operator",
"==",
"ATT_OP_AND",
":",
"if",
"count",
"==",
"len",
":",
"answer",
"=",
"True",
"elif",
"operator",
"==",
"ATT_OP_OR",
":",
"if",
"count",
">",
"0",
":",
"answer",
"=",
"True",
"operator_not",
"=",
"cpel_dom",
".",
"getAttribute",
"(",
"ATT_NEGATE",
")",
"if",
"operator_not",
":",
"if",
"(",
"(",
"operator_not",
".",
"upper",
"(",
")",
"==",
"ATT_NEGATE_TRUE",
")",
"and",
"(",
"answer",
"!=",
"ERROR",
")",
")",
":",
"answer",
"=",
"not",
"answer",
"return",
"answer",
"else",
":",
"return",
"False"
] |
Accepts a set of known CPE Names and an expression in the CPE language,
and delivers the answer True if the expression matches with the set.
Otherwise, it returns False.
:param CPELanguage self: An expression in the CPE Applicability
Language, represented as the XML infoset for the platform element.
:param CPESet cpeset: CPE set object to match with self expression.
:param string cpel_dom: An expression in the CPE Applicability
Language, represented as DOM tree.
:returns: True if self expression can be satisfied by language matching
against cpeset, False otherwise.
:rtype: boolean
|
[
"Accepts",
"a",
"set",
"of",
"known",
"CPE",
"Names",
"and",
"an",
"expression",
"in",
"the",
"CPE",
"language",
"and",
"delivers",
"the",
"answer",
"True",
"if",
"the",
"expression",
"matches",
"with",
"the",
"set",
".",
"Otherwise",
"it",
"returns",
"False",
"."
] |
python
|
train
| 36.103774 |
emirozer/fake2db
|
fake2db/helpers.py
|
https://github.com/emirozer/fake2db/blob/568cf42afb3ac10fc15c4faaa1cdb84fc1f4946c/fake2db/helpers.py#L8-L19
|
def fake2db_logger():
'''creates a logger obj'''
# Pull the local ip and username for meaningful logging
username = getpass.getuser()
# Set the logger
FORMAT = '%(asctime)-15s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
extra_information = {'user': username}
logger = logging.getLogger('fake2db_logger')
# --------------------
return logger, extra_information
|
[
"def",
"fake2db_logger",
"(",
")",
":",
"# Pull the local ip and username for meaningful logging",
"username",
"=",
"getpass",
".",
"getuser",
"(",
")",
"# Set the logger",
"FORMAT",
"=",
"'%(asctime)-15s %(user)-8s %(message)s'",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"FORMAT",
")",
"extra_information",
"=",
"{",
"'user'",
":",
"username",
"}",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'fake2db_logger'",
")",
"# --------------------",
"return",
"logger",
",",
"extra_information"
] |
creates a logger obj
|
[
"creates",
"a",
"logger",
"obj"
] |
python
|
train
| 34 |
equinor/segyviewer
|
src/segyviewlib/layoutfigure.py
|
https://github.com/equinor/segyviewer/blob/994d402a8326f30608d98103f8831dee9e3c5850/src/segyviewlib/layoutfigure.py#L36-L42
|
def index(self, axes):
"""
:param axes: The Axes instance to find the index of.
:type axes: Axes
:rtype: int
"""
return None if axes is self._colormap_axes else self._axes.index(axes)
|
[
"def",
"index",
"(",
"self",
",",
"axes",
")",
":",
"return",
"None",
"if",
"axes",
"is",
"self",
".",
"_colormap_axes",
"else",
"self",
".",
"_axes",
".",
"index",
"(",
"axes",
")"
] |
:param axes: The Axes instance to find the index of.
:type axes: Axes
:rtype: int
|
[
":",
"param",
"axes",
":",
"The",
"Axes",
"instance",
"to",
"find",
"the",
"index",
"of",
".",
":",
"type",
"axes",
":",
"Axes",
":",
"rtype",
":",
"int"
] |
python
|
train
| 32.142857 |
shinux/PyTime
|
pytime/pytime.py
|
https://github.com/shinux/PyTime/blob/f2b9f877507e2a1dddf5dd255fdff243a5dbed48/pytime/pytime.py#L64-L70
|
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
|
[
"def",
"yesterday",
"(",
"date",
"=",
"None",
")",
":",
"if",
"not",
"date",
":",
"return",
"_date",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"else",
":",
"current_date",
"=",
"parse",
"(",
"date",
")",
"return",
"current_date",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")"
] |
yesterday once more
|
[
"yesterday",
"once",
"more"
] |
python
|
train
| 31.142857 |
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/runsnake.py#L719-L727
|
def RootNode(self):
"""Return our current root node and appropriate adapter for it"""
tree = self.loader.get_root( self.viewType )
adapter = self.loader.get_adapter( self.viewType )
rows = self.loader.get_rows( self.viewType )
adapter.SetPercentage(self.percentageView, adapter.value( tree ))
return adapter, tree, rows
|
[
"def",
"RootNode",
"(",
"self",
")",
":",
"tree",
"=",
"self",
".",
"loader",
".",
"get_root",
"(",
"self",
".",
"viewType",
")",
"adapter",
"=",
"self",
".",
"loader",
".",
"get_adapter",
"(",
"self",
".",
"viewType",
")",
"rows",
"=",
"self",
".",
"loader",
".",
"get_rows",
"(",
"self",
".",
"viewType",
")",
"adapter",
".",
"SetPercentage",
"(",
"self",
".",
"percentageView",
",",
"adapter",
".",
"value",
"(",
"tree",
")",
")",
"return",
"adapter",
",",
"tree",
",",
"rows"
] |
Return our current root node and appropriate adapter for it
|
[
"Return",
"our",
"current",
"root",
"node",
"and",
"appropriate",
"adapter",
"for",
"it"
] |
python
|
train
| 41.888889 |
mushkevych/scheduler
|
synergy/system/repeat_timer.py
|
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/repeat_timer.py#L66-L77
|
def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the RepeatTimer instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
next_run = timedelta(seconds=self.interval_current) + self.activation_dt
return next_run - utc_now
else:
return None
|
[
"def",
"next_run_in",
"(",
"self",
",",
"utc_now",
"=",
"None",
")",
":",
"if",
"utc_now",
"is",
"None",
":",
"utc_now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"self",
".",
"is_alive",
"(",
")",
":",
"next_run",
"=",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"interval_current",
")",
"+",
"self",
".",
"activation_dt",
"return",
"next_run",
"-",
"utc_now",
"else",
":",
"return",
"None"
] |
:param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the RepeatTimer instance is not running
|
[
":",
"param",
"utc_now",
":",
"optional",
"parameter",
"to",
"be",
"used",
"by",
"Unit",
"Tests",
"as",
"a",
"definition",
"of",
"now",
":",
"return",
":",
"timedelta",
"instance",
"presenting",
"amount",
"of",
"time",
"before",
"the",
"trigger",
"is",
"triggered",
"next",
"time",
"or",
"None",
"if",
"the",
"RepeatTimer",
"instance",
"is",
"not",
"running"
] |
python
|
train
| 46 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.