repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
pandeylab/pythomics
|
pythomics/genomics/structures.py
|
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/genomics/structures.py#L63-L70
|
def parse_entry(self, row):
"""Parse an individual VCF entry and return a VCFEntry which contains information about
the call (such as alternative allele, zygosity, etc.)
"""
var_call = VCFEntry(self.individuals)
var_call.parse_entry(row)
return var_call
|
[
"def",
"parse_entry",
"(",
"self",
",",
"row",
")",
":",
"var_call",
"=",
"VCFEntry",
"(",
"self",
".",
"individuals",
")",
"var_call",
".",
"parse_entry",
"(",
"row",
")",
"return",
"var_call"
] |
Parse an individual VCF entry and return a VCFEntry which contains information about
the call (such as alternative allele, zygosity, etc.)
|
[
"Parse",
"an",
"individual",
"VCF",
"entry",
"and",
"return",
"a",
"VCFEntry",
"which",
"contains",
"information",
"about",
"the",
"call",
"(",
"such",
"as",
"alternative",
"allele",
"zygosity",
"etc",
".",
")"
] |
python
|
train
| 36.875 |
coldfix/udiskie
|
udiskie/mount.py
|
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/mount.py#L691-L700
|
def get_all_handleable_roots(self):
"""
Get list of all handleable devices, return only those that represent
root nodes within the filtered device tree.
"""
nodes = self.get_device_tree()
return [node.device
for node in sorted(nodes.values(), key=DevNode._sort_key)
if not node.ignored and node.device
and (node.root == '/' or nodes[node.root].ignored)]
|
[
"def",
"get_all_handleable_roots",
"(",
"self",
")",
":",
"nodes",
"=",
"self",
".",
"get_device_tree",
"(",
")",
"return",
"[",
"node",
".",
"device",
"for",
"node",
"in",
"sorted",
"(",
"nodes",
".",
"values",
"(",
")",
",",
"key",
"=",
"DevNode",
".",
"_sort_key",
")",
"if",
"not",
"node",
".",
"ignored",
"and",
"node",
".",
"device",
"and",
"(",
"node",
".",
"root",
"==",
"'/'",
"or",
"nodes",
"[",
"node",
".",
"root",
"]",
".",
"ignored",
")",
"]"
] |
Get list of all handleable devices, return only those that represent
root nodes within the filtered device tree.
|
[
"Get",
"list",
"of",
"all",
"handleable",
"devices",
"return",
"only",
"those",
"that",
"represent",
"root",
"nodes",
"within",
"the",
"filtered",
"device",
"tree",
"."
] |
python
|
train
| 44 |
fabioz/PyDev.Debugger
|
pydevd_attach_to_process/winappdbg/sql.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/sql.py#L314-L320
|
def Transactional(fn, self, *argv, **argd):
"""
Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}.
"""
return self._transactional(fn, *argv, **argd)
|
[
"def",
"Transactional",
"(",
"fn",
",",
"self",
",",
"*",
"argv",
",",
"*",
"*",
"argd",
")",
":",
"return",
"self",
".",
"_transactional",
"(",
"fn",
",",
"*",
"argv",
",",
"*",
"*",
"argd",
")"
] |
Decorator that wraps DAO methods to handle transactions automatically.
It may only work with subclasses of L{BaseDAO}.
|
[
"Decorator",
"that",
"wraps",
"DAO",
"methods",
"to",
"handle",
"transactions",
"automatically",
"."
] |
python
|
train
| 33 |
ianmiell/shutit
|
shutit_pexpect.py
|
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L1884-L1904
|
def get_env_pass(self,user=None,msg=None,note=None):
"""Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there
"""
shutit = self.shutit
shutit.handle_note(note)
user = user or self.whoami()
# cygwin does not have root
pw = ''
if self.current_environment.distro == 'cygwin':
return pw
if user not in self.current_environment.users.keys():
self.current_environment.users.update({user:None})
if not self.current_environment.users[user] and user != 'root':
msg = msg or 'Please input the sudo password for user: ' + user
pw = shutit_util.get_input(msg,ispass=True)
self.current_environment.users[user] = pw
shutit_global.shutit_global_object.secret_words_set.add(self.current_environment.users[user])
return pw
|
[
"def",
"get_env_pass",
"(",
"self",
",",
"user",
"=",
"None",
",",
"msg",
"=",
"None",
",",
"note",
"=",
"None",
")",
":",
"shutit",
"=",
"self",
".",
"shutit",
"shutit",
".",
"handle_note",
"(",
"note",
")",
"user",
"=",
"user",
"or",
"self",
".",
"whoami",
"(",
")",
"# cygwin does not have root",
"pw",
"=",
"''",
"if",
"self",
".",
"current_environment",
".",
"distro",
"==",
"'cygwin'",
":",
"return",
"pw",
"if",
"user",
"not",
"in",
"self",
".",
"current_environment",
".",
"users",
".",
"keys",
"(",
")",
":",
"self",
".",
"current_environment",
".",
"users",
".",
"update",
"(",
"{",
"user",
":",
"None",
"}",
")",
"if",
"not",
"self",
".",
"current_environment",
".",
"users",
"[",
"user",
"]",
"and",
"user",
"!=",
"'root'",
":",
"msg",
"=",
"msg",
"or",
"'Please input the sudo password for user: '",
"+",
"user",
"pw",
"=",
"shutit_util",
".",
"get_input",
"(",
"msg",
",",
"ispass",
"=",
"True",
")",
"self",
".",
"current_environment",
".",
"users",
"[",
"user",
"]",
"=",
"pw",
"shutit_global",
".",
"shutit_global_object",
".",
"secret_words_set",
".",
"add",
"(",
"self",
".",
"current_environment",
".",
"users",
"[",
"user",
"]",
")",
"return",
"pw"
] |
Gets a password from the user if one is not already recorded for this environment.
@param user: username we are getting password for
@param msg: message to put out there
|
[
"Gets",
"a",
"password",
"from",
"the",
"user",
"if",
"one",
"is",
"not",
"already",
"recorded",
"for",
"this",
"environment",
"."
] |
python
|
train
| 40.619048 |
ml4ai/delphi
|
delphi/AnalysisGraph.py
|
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/AnalysisGraph.py#L151-L157
|
def from_uncharted_json_file(cls, file):
""" Construct an AnalysisGraph object from a file containing INDRA
statements serialized exported by Uncharted's CauseMos webapp.
"""
with open(file, "r") as f:
_dict = json.load(f)
return cls.from_uncharted_json_serialized_dict(_dict)
|
[
"def",
"from_uncharted_json_file",
"(",
"cls",
",",
"file",
")",
":",
"with",
"open",
"(",
"file",
",",
"\"r\"",
")",
"as",
"f",
":",
"_dict",
"=",
"json",
".",
"load",
"(",
"f",
")",
"return",
"cls",
".",
"from_uncharted_json_serialized_dict",
"(",
"_dict",
")"
] |
Construct an AnalysisGraph object from a file containing INDRA
statements serialized exported by Uncharted's CauseMos webapp.
|
[
"Construct",
"an",
"AnalysisGraph",
"object",
"from",
"a",
"file",
"containing",
"INDRA",
"statements",
"serialized",
"exported",
"by",
"Uncharted",
"s",
"CauseMos",
"webapp",
"."
] |
python
|
train
| 46 |
aleju/imgaug
|
imgaug/augmentables/segmaps.py
|
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/segmaps.py#L424-L455
|
def resize(self, sizes, interpolation="cubic"):
"""
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
"""
arr_resized = ia.imresize_single_image(self.arr, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_resized = np.clip(arr_resized, 0.0, 1.0)
segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)
segmap.input_was = self.input_was
return segmap
|
[
"def",
"resize",
"(",
"self",
",",
"sizes",
",",
"interpolation",
"=",
"\"cubic\"",
")",
":",
"arr_resized",
"=",
"ia",
".",
"imresize_single_image",
"(",
"self",
".",
"arr",
",",
"sizes",
",",
"interpolation",
"=",
"interpolation",
")",
"# cubic interpolation can lead to values outside of [0.0, 1.0],",
"# see https://github.com/opencv/opencv/issues/7195",
"# TODO area interpolation too?",
"arr_resized",
"=",
"np",
".",
"clip",
"(",
"arr_resized",
",",
"0.0",
",",
"1.0",
")",
"segmap",
"=",
"SegmentationMapOnImage",
"(",
"arr_resized",
",",
"shape",
"=",
"self",
".",
"shape",
")",
"segmap",
".",
"input_was",
"=",
"self",
".",
"input_was",
"return",
"segmap"
] |
Resize the segmentation map array to the provided size given the provided interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`imgaug.imgaug.imresize_single_image` for details.
Note: The segmentation map is internally stored as multiple float-based heatmaps,
making smooth interpolations potentially more reasonable than nearest neighbour
interpolation.
Returns
-------
segmap : imgaug.SegmentationMapOnImage
Resized segmentation map object.
|
[
"Resize",
"the",
"segmentation",
"map",
"array",
"to",
"the",
"provided",
"size",
"given",
"the",
"provided",
"interpolation",
"."
] |
python
|
valid
| 41.84375 |
cuihantao/andes
|
andes/system.py
|
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/system.py#L466-L494
|
def dump_config(self, file_path):
"""
Dump system and routine configurations to an rc-formatted file.
Parameters
----------
file_path : str
path to the configuration file. The user will be prompted if the
file already exists.
Returns
-------
None
"""
if os.path.isfile(file_path):
logger.debug('File {} alreay exist. Overwrite? [y/N]'.format(file_path))
choice = input('File {} alreay exist. Overwrite? [y/N]'.format(file_path)).lower()
if len(choice) == 0 or choice[0] != 'y':
logger.info('File not overwritten.')
return
conf = self.config.dump_conf()
for r in routines.__all__:
conf = self.__dict__[r.lower()].config.dump_conf(conf)
with open(file_path, 'w') as f:
conf.write(f)
logger.info('Config written to {}'.format(file_path))
|
[
"def",
"dump_config",
"(",
"self",
",",
"file_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"logger",
".",
"debug",
"(",
"'File {} alreay exist. Overwrite? [y/N]'",
".",
"format",
"(",
"file_path",
")",
")",
"choice",
"=",
"input",
"(",
"'File {} alreay exist. Overwrite? [y/N]'",
".",
"format",
"(",
"file_path",
")",
")",
".",
"lower",
"(",
")",
"if",
"len",
"(",
"choice",
")",
"==",
"0",
"or",
"choice",
"[",
"0",
"]",
"!=",
"'y'",
":",
"logger",
".",
"info",
"(",
"'File not overwritten.'",
")",
"return",
"conf",
"=",
"self",
".",
"config",
".",
"dump_conf",
"(",
")",
"for",
"r",
"in",
"routines",
".",
"__all__",
":",
"conf",
"=",
"self",
".",
"__dict__",
"[",
"r",
".",
"lower",
"(",
")",
"]",
".",
"config",
".",
"dump_conf",
"(",
"conf",
")",
"with",
"open",
"(",
"file_path",
",",
"'w'",
")",
"as",
"f",
":",
"conf",
".",
"write",
"(",
"f",
")",
"logger",
".",
"info",
"(",
"'Config written to {}'",
".",
"format",
"(",
"file_path",
")",
")"
] |
Dump system and routine configurations to an rc-formatted file.
Parameters
----------
file_path : str
path to the configuration file. The user will be prompted if the
file already exists.
Returns
-------
None
|
[
"Dump",
"system",
"and",
"routine",
"configurations",
"to",
"an",
"rc",
"-",
"formatted",
"file",
"."
] |
python
|
train
| 32.37931 |
mitsei/dlkit
|
dlkit/services/relationship.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/relationship.py#L1107-L1115
|
def get_relationship_form(self, *args, **kwargs):
"""Pass through to provider RelationshipAdminSession.get_relationship_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'relationship_record_types' in kwargs:
return self.get_relationship_form_for_create(*args, **kwargs)
else:
return self.get_relationship_form_for_update(*args, **kwargs)
|
[
"def",
"get_relationship_form",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Implemented from kitosid template for -",
"# osid.resource.ResourceAdminSession.get_resource_form_for_update",
"# This method might be a bit sketchy. Time will tell.",
"if",
"isinstance",
"(",
"args",
"[",
"-",
"1",
"]",
",",
"list",
")",
"or",
"'relationship_record_types'",
"in",
"kwargs",
":",
"return",
"self",
".",
"get_relationship_form_for_create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"self",
".",
"get_relationship_form_for_update",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Pass through to provider RelationshipAdminSession.get_relationship_form_for_update
|
[
"Pass",
"through",
"to",
"provider",
"RelationshipAdminSession",
".",
"get_relationship_form_for_update"
] |
python
|
train
| 62.888889 |
Tivix/django-common
|
django_common/decorators.py
|
https://github.com/Tivix/django-common/blob/407d208121011a8425139e541629554114d96c18/django_common/decorators.py#L47-L59
|
def anonymous_required(view, redirect_to=None):
"""
Only allow if user is NOT authenticated.
"""
if redirect_to is None:
redirect_to = settings.LOGIN_REDIRECT_URL
@wraps(view)
def wrapper(request, *a, **k):
if request.user and request.user.is_authenticated():
return HttpResponseRedirect(redirect_to)
return view(request, *a, **k)
return wrapper
|
[
"def",
"anonymous_required",
"(",
"view",
",",
"redirect_to",
"=",
"None",
")",
":",
"if",
"redirect_to",
"is",
"None",
":",
"redirect_to",
"=",
"settings",
".",
"LOGIN_REDIRECT_URL",
"@",
"wraps",
"(",
"view",
")",
"def",
"wrapper",
"(",
"request",
",",
"*",
"a",
",",
"*",
"*",
"k",
")",
":",
"if",
"request",
".",
"user",
"and",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"return",
"HttpResponseRedirect",
"(",
"redirect_to",
")",
"return",
"view",
"(",
"request",
",",
"*",
"a",
",",
"*",
"*",
"k",
")",
"return",
"wrapper"
] |
Only allow if user is NOT authenticated.
|
[
"Only",
"allow",
"if",
"user",
"is",
"NOT",
"authenticated",
"."
] |
python
|
train
| 30.615385 |
aws/aws-dynamodb-encryption-python
|
src/dynamodb_encryption_sdk/transform.py
|
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/transform.py#L25-L35
|
def dict_to_ddb(item):
# type: (Dict[str, Any]) -> Dict[str, Any]
# TODO: narrow these types down
"""Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
"""
serializer = TypeSerializer()
return {key: serializer.serialize(value) for key, value in item.items()}
|
[
"def",
"dict_to_ddb",
"(",
"item",
")",
":",
"# type: (Dict[str, Any]) -> Dict[str, Any]",
"# TODO: narrow these types down",
"serializer",
"=",
"TypeSerializer",
"(",
")",
"return",
"{",
"key",
":",
"serializer",
".",
"serialize",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"item",
".",
"items",
"(",
")",
"}"
] |
Converts a native Python dictionary to a raw DynamoDB item.
:param dict item: Native item
:returns: DynamoDB item
:rtype: dict
|
[
"Converts",
"a",
"native",
"Python",
"dictionary",
"to",
"a",
"raw",
"DynamoDB",
"item",
"."
] |
python
|
train
| 32.818182 |
pmacosta/ptrie
|
ptrie/ptrie.py
|
https://github.com/pmacosta/ptrie/blob/c176d3ee810b7b5243c7ff2bbf2f1af0b0fff2a8/ptrie/ptrie.py#L1030-L1075
|
def make_root(self, name): # noqa: D302
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
if (name != self.root_name) and (self._node_in_tree(name)):
for key in [node for node in self.nodes if node.find(name) != 0]:
del self._db[key]
self._db[name]["parent"] = ""
self._root = name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
)
|
[
"def",
"make_root",
"(",
"self",
",",
"name",
")",
":",
"# noqa: D302",
"if",
"self",
".",
"_validate_node_name",
"(",
"name",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Argument `name` is not valid\"",
")",
"if",
"(",
"name",
"!=",
"self",
".",
"root_name",
")",
"and",
"(",
"self",
".",
"_node_in_tree",
"(",
"name",
")",
")",
":",
"for",
"key",
"in",
"[",
"node",
"for",
"node",
"in",
"self",
".",
"nodes",
"if",
"node",
".",
"find",
"(",
"name",
")",
"!=",
"0",
"]",
":",
"del",
"self",
".",
"_db",
"[",
"key",
"]",
"self",
".",
"_db",
"[",
"name",
"]",
"[",
"\"parent\"",
"]",
"=",
"\"\"",
"self",
".",
"_root",
"=",
"name",
"self",
".",
"_root_hierarchy_length",
"=",
"len",
"(",
"self",
".",
"root_name",
".",
"split",
"(",
"self",
".",
"_node_separator",
")",
")"
] |
r"""
Make a sub-node the root node of the tree.
All nodes not belonging to the sub-tree are deleted
:param name: New root node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> tobj.make_root('root.branch1')
>>> print(tobj)
root.branch1 (*)
├leaf1
│└subleaf1 (*)
└leaf2 (*)
└subleaf2
|
[
"r",
"Make",
"a",
"sub",
"-",
"node",
"the",
"root",
"node",
"of",
"the",
"tree",
"."
] |
python
|
train
| 31.695652 |
clld/pycdstar
|
src/pycdstar/media.py
|
https://github.com/clld/pycdstar/blob/1a225b472c4e6bf9b8078fa3198f939395c53d22/src/pycdstar/media.py#L85-L106
|
def create_object(self, api, metadata=None):
"""
Create an object using the CDSTAR API, with the file content as bitstream.
:param api:
:return:
"""
metadata = {k: v for k, v in (metadata or {}).items()}
metadata.setdefault('creator', '{0.__name__} {0.__version__}'.format(pycdstar))
metadata.setdefault('path', '%s' % self.path)
metadata.update(self.add_metadata())
bitstream_specs = [self] + self.add_bitstreams()
obj = api.get_object()
res = {}
try:
obj.metadata = metadata
for file_ in bitstream_specs:
res[file_.bitstream_type] = file_.add_as_bitstream(obj)
except: # noqa: E722
obj.delete()
raise
return obj, metadata, res
|
[
"def",
"create_object",
"(",
"self",
",",
"api",
",",
"metadata",
"=",
"None",
")",
":",
"metadata",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"(",
"metadata",
"or",
"{",
"}",
")",
".",
"items",
"(",
")",
"}",
"metadata",
".",
"setdefault",
"(",
"'creator'",
",",
"'{0.__name__} {0.__version__}'",
".",
"format",
"(",
"pycdstar",
")",
")",
"metadata",
".",
"setdefault",
"(",
"'path'",
",",
"'%s'",
"%",
"self",
".",
"path",
")",
"metadata",
".",
"update",
"(",
"self",
".",
"add_metadata",
"(",
")",
")",
"bitstream_specs",
"=",
"[",
"self",
"]",
"+",
"self",
".",
"add_bitstreams",
"(",
")",
"obj",
"=",
"api",
".",
"get_object",
"(",
")",
"res",
"=",
"{",
"}",
"try",
":",
"obj",
".",
"metadata",
"=",
"metadata",
"for",
"file_",
"in",
"bitstream_specs",
":",
"res",
"[",
"file_",
".",
"bitstream_type",
"]",
"=",
"file_",
".",
"add_as_bitstream",
"(",
"obj",
")",
"except",
":",
"# noqa: E722",
"obj",
".",
"delete",
"(",
")",
"raise",
"return",
"obj",
",",
"metadata",
",",
"res"
] |
Create an object using the CDSTAR API, with the file content as bitstream.
:param api:
:return:
|
[
"Create",
"an",
"object",
"using",
"the",
"CDSTAR",
"API",
"with",
"the",
"file",
"content",
"as",
"bitstream",
"."
] |
python
|
train
| 36.045455 |
notifiers/notifiers
|
notifiers/utils/helpers.py
|
https://github.com/notifiers/notifiers/blob/6dd8aafff86935dbb4763db9c56f9cdd7fc08b65/notifiers/utils/helpers.py#L9-L18
|
def text_to_bool(value: str) -> bool:
"""
Tries to convert a text value to a bool. If unsuccessful returns if value is None or not
:param value: Value to check
"""
try:
return bool(strtobool(value))
except (ValueError, AttributeError):
return value is not None
|
[
"def",
"text_to_bool",
"(",
"value",
":",
"str",
")",
"->",
"bool",
":",
"try",
":",
"return",
"bool",
"(",
"strtobool",
"(",
"value",
")",
")",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"return",
"value",
"is",
"not",
"None"
] |
Tries to convert a text value to a bool. If unsuccessful returns if value is None or not
:param value: Value to check
|
[
"Tries",
"to",
"convert",
"a",
"text",
"value",
"to",
"a",
"bool",
".",
"If",
"unsuccessful",
"returns",
"if",
"value",
"is",
"None",
"or",
"not"
] |
python
|
train
| 29.2 |
CalebBell/ht
|
ht/conduction.py
|
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conduction.py#L493-L538
|
def S_isothermal_pipe_to_two_planes(D, Z, L=1.):
r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
return 2.*pi*L/log(8.*Z/(pi*D))
|
[
"def",
"S_isothermal_pipe_to_two_planes",
"(",
"D",
",",
"Z",
",",
"L",
"=",
"1.",
")",
":",
"return",
"2.",
"*",
"pi",
"*",
"L",
"/",
"log",
"(",
"8.",
"*",
"Z",
"/",
"(",
"pi",
"*",
"D",
")",
")"
] |
r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D` which is `Z` distance from two infinite
isothermal planes of equal temperatures, parallel to each other and
enclosing the pipe. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\ln\frac{8z}{\pi D}}
Parameters
----------
D : float
Diameter of the pipe, [m]
Z : float
Distance from the middle of the pipe to either of the planes, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_two_planes(.1, 5, 1)
1.2963749299921428
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Shape Factors for Heat Conduction Through Bodies with Isothermal or
Convective Boundary Conditions, J. E. Sunderland, K. R. Johnson, ASHRAE
Transactions, Vol. 70, 1964.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
|
[
"r",
"Returns",
"the",
"Shape",
"factor",
"S",
"of",
"a",
"pipe",
"of",
"constant",
"outer",
"temperature",
"and",
"of",
"outer",
"diameter",
"D",
"which",
"is",
"Z",
"distance",
"from",
"two",
"infinite",
"isothermal",
"planes",
"of",
"equal",
"temperatures",
"parallel",
"to",
"each",
"other",
"and",
"enclosing",
"the",
"pipe",
".",
"Length",
"L",
"must",
"be",
"provided",
"but",
"can",
"be",
"set",
"to",
"1",
"to",
"obtain",
"a",
"dimensionless",
"shape",
"factor",
"used",
"in",
"some",
"sources",
"."
] |
python
|
train
| 30.826087 |
tmr232/Sark
|
sark/qt.py
|
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L56-L66
|
def get_window():
"""Get IDA's top level window."""
tform = idaapi.get_current_tform()
# Required sometimes when closing IDBs and not IDA.
if not tform:
tform = idaapi.find_tform("Output window")
widget = form_to_widget(tform)
window = widget.window()
return window
|
[
"def",
"get_window",
"(",
")",
":",
"tform",
"=",
"idaapi",
".",
"get_current_tform",
"(",
")",
"# Required sometimes when closing IDBs and not IDA.",
"if",
"not",
"tform",
":",
"tform",
"=",
"idaapi",
".",
"find_tform",
"(",
"\"Output window\"",
")",
"widget",
"=",
"form_to_widget",
"(",
"tform",
")",
"window",
"=",
"widget",
".",
"window",
"(",
")",
"return",
"window"
] |
Get IDA's top level window.
|
[
"Get",
"IDA",
"s",
"top",
"level",
"window",
"."
] |
python
|
train
| 26.636364 |
watson-developer-cloud/python-sdk
|
ibm_watson/compare_comply_v1.py
|
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L3453-L3460
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value._to_dict()
return _dict
|
[
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'key'",
")",
"and",
"self",
".",
"key",
"is",
"not",
"None",
":",
"_dict",
"[",
"'key'",
"]",
"=",
"self",
".",
"key",
".",
"_to_dict",
"(",
")",
"if",
"hasattr",
"(",
"self",
",",
"'value'",
")",
"and",
"self",
".",
"value",
"is",
"not",
"None",
":",
"_dict",
"[",
"'value'",
"]",
"=",
"self",
".",
"value",
".",
"_to_dict",
"(",
")",
"return",
"_dict"
] |
Return a json dictionary representing this model.
|
[
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] |
python
|
train
| 41.75 |
lsbardel/python-stdnet
|
stdnet/odm/query.py
|
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/query.py#L445-L456
|
def union(self, *queries):
'''Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
'''
q = self._clone()
q.unions += queries
return q
|
[
"def",
"union",
"(",
"self",
",",
"*",
"queries",
")",
":",
"q",
"=",
"self",
".",
"_clone",
"(",
")",
"q",
".",
"unions",
"+=",
"queries",
"return",
"q"
] |
Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
|
[
"Return",
"a",
"new",
":",
"class",
":",
"Query",
"obtained",
"form",
"the",
"union",
"of",
"this",
":",
"class",
":",
"Query",
"with",
"one",
"or",
"more",
"*",
"queries",
"*",
".",
"For",
"example",
"lets",
"say",
"we",
"want",
"to",
"have",
"the",
"union",
"of",
"two",
"queries",
"obtained",
"from",
"the",
":",
"meth",
":",
"filter",
"method",
"::",
"query",
"=",
"session",
".",
"query",
"(",
"MyModel",
")",
"qs",
"=",
"query",
".",
"filter",
"(",
"field1",
"=",
"bla",
")",
".",
"union",
"(",
"query",
".",
"filter",
"(",
"field2",
"=",
"foo",
"))"
] |
python
|
train
| 35.833333 |
UCBerkeleySETI/blimpy
|
blimpy/waterfall.py
|
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/waterfall.py#L473-L496
|
def __get_chunk_dimensions(self):
""" Sets the chunking dimmentions depending on the file type.
"""
#Usually '.0000.' is in self.filename
if np.abs(self.header[b'foff']) < 1e-5:
logger.info('Detecting high frequency resolution data.')
chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel.
return chunk_dim
#Usually '.0001.' is in self.filename
elif np.abs(self.header[b'tsamp']) < 1e-3:
logger.info('Detecting high time resolution data.')
chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00)
return chunk_dim
#Usually '.0002.' is in self.filename
elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5:
logger.info('Detecting intermediate frequency and time resolution data.')
chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00)
# chunk_dim = (1,1,65536/4)
return chunk_dim
else:
logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.')
chunk_dim = (1,1,512)
return chunk_dim
|
[
"def",
"__get_chunk_dimensions",
"(",
"self",
")",
":",
"#Usually '.0000.' is in self.filename",
"if",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"<",
"1e-5",
":",
"logger",
".",
"info",
"(",
"'Detecting high frequency resolution data.'",
")",
"chunk_dim",
"=",
"(",
"1",
",",
"1",
",",
"1048576",
")",
"#1048576 is the number of channels in a coarse channel.",
"return",
"chunk_dim",
"#Usually '.0001.' is in self.filename",
"elif",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'tsamp'",
"]",
")",
"<",
"1e-3",
":",
"logger",
".",
"info",
"(",
"'Detecting high time resolution data.'",
")",
"chunk_dim",
"=",
"(",
"2048",
",",
"1",
",",
"512",
")",
"#512 is the total number of channels per single band (ie. blc00)",
"return",
"chunk_dim",
"#Usually '.0002.' is in self.filename",
"elif",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
"<",
"1e-2",
"and",
"np",
".",
"abs",
"(",
"self",
".",
"header",
"[",
"b'foff'",
"]",
")",
">=",
"1e-5",
":",
"logger",
".",
"info",
"(",
"'Detecting intermediate frequency and time resolution data.'",
")",
"chunk_dim",
"=",
"(",
"10",
",",
"1",
",",
"65536",
")",
"#65536 is the total number of channels per single band (ie. blc00)",
"# chunk_dim = (1,1,65536/4)",
"return",
"chunk_dim",
"else",
":",
"logger",
".",
"warning",
"(",
"'File format not known. Will use minimum chunking. NOT OPTIMAL.'",
")",
"chunk_dim",
"=",
"(",
"1",
",",
"1",
",",
"512",
")",
"return",
"chunk_dim"
] |
Sets the chunking dimmentions depending on the file type.
|
[
"Sets",
"the",
"chunking",
"dimmentions",
"depending",
"on",
"the",
"file",
"type",
"."
] |
python
|
test
| 51.583333 |
ianclegg/ntlmlib
|
ntlmlib/security.py
|
https://github.com/ianclegg/ntlmlib/blob/49eadfe4701bcce84a4ca9cbab5b6d5d72eaad05/ntlmlib/security.py#L260-L288
|
def sign(self, message):
"""
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
"""
hmac_context = hmac.new(self.outgoing_signing_key)
hmac_context.update(struct.pack('<i', self.outgoing_sequence) + message)
# If a key exchange key is negotiated the first 8 bytes of the HMAC MD5 are encrypted with RC4
if self.key_exchange:
checksum = self.outgoing_seal.update(hmac_context.digest()[:8])
else:
checksum = hmac_context.digest()[:8]
mac = _Ntlm2MessageSignature()
mac['checksum'] = struct.unpack('<q', checksum)[0]
mac['sequence'] = self.outgoing_sequence
#logger.debug("Signing Sequence Number: %s", str(self.outgoing_sequence))
# Increment the sequence number after signing each message
self.outgoing_sequence += 1
return str(mac)
|
[
"def",
"sign",
"(",
"self",
",",
"message",
")",
":",
"hmac_context",
"=",
"hmac",
".",
"new",
"(",
"self",
".",
"outgoing_signing_key",
")",
"hmac_context",
".",
"update",
"(",
"struct",
".",
"pack",
"(",
"'<i'",
",",
"self",
".",
"outgoing_sequence",
")",
"+",
"message",
")",
"# If a key exchange key is negotiated the first 8 bytes of the HMAC MD5 are encrypted with RC4",
"if",
"self",
".",
"key_exchange",
":",
"checksum",
"=",
"self",
".",
"outgoing_seal",
".",
"update",
"(",
"hmac_context",
".",
"digest",
"(",
")",
"[",
":",
"8",
"]",
")",
"else",
":",
"checksum",
"=",
"hmac_context",
".",
"digest",
"(",
")",
"[",
":",
"8",
"]",
"mac",
"=",
"_Ntlm2MessageSignature",
"(",
")",
"mac",
"[",
"'checksum'",
"]",
"=",
"struct",
".",
"unpack",
"(",
"'<q'",
",",
"checksum",
")",
"[",
"0",
"]",
"mac",
"[",
"'sequence'",
"]",
"=",
"self",
".",
"outgoing_sequence",
"#logger.debug(\"Signing Sequence Number: %s\", str(self.outgoing_sequence))",
"# Increment the sequence number after signing each message",
"self",
".",
"outgoing_sequence",
"+=",
"1",
"return",
"str",
"(",
"mac",
")"
] |
Generates a signature for the supplied message using NTLM2 Session Security
Note: [MS-NLMP] Section 3.4.4
The message signature for NTLM with extended session security is a 16-byte value that contains the following
components, as described by the NTLMSSP_MESSAGE_SIGNATURE structure:
- A 4-byte version-number value that is set to 1
- The first eight bytes of the message's HMAC_MD5
- The 4-byte sequence number (SeqNum)
:param message: The message to be signed
:return: The signature for supplied message
|
[
"Generates",
"a",
"signature",
"for",
"the",
"supplied",
"message",
"using",
"NTLM2",
"Session",
"Security",
"Note",
":",
"[",
"MS",
"-",
"NLMP",
"]",
"Section",
"3",
".",
"4",
".",
"4",
"The",
"message",
"signature",
"for",
"NTLM",
"with",
"extended",
"session",
"security",
"is",
"a",
"16",
"-",
"byte",
"value",
"that",
"contains",
"the",
"following",
"components",
"as",
"described",
"by",
"the",
"NTLMSSP_MESSAGE_SIGNATURE",
"structure",
":",
"-",
"A",
"4",
"-",
"byte",
"version",
"-",
"number",
"value",
"that",
"is",
"set",
"to",
"1",
"-",
"The",
"first",
"eight",
"bytes",
"of",
"the",
"message",
"s",
"HMAC_MD5",
"-",
"The",
"4",
"-",
"byte",
"sequence",
"number",
"(",
"SeqNum",
")",
":",
"param",
"message",
":",
"The",
"message",
"to",
"be",
"signed",
":",
"return",
":",
"The",
"signature",
"for",
"supplied",
"message"
] |
python
|
train
| 47.310345 |
deeplearning4j/pydl4j
|
pydl4j/downloader.py
|
https://github.com/deeplearning4j/pydl4j/blob/63f8a1cae2afb4b08dbfe28ef8e08de741f0d3cd/pydl4j/downloader.py#L23-L82
|
def download(url, file_name):
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-length'])
'''
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
'''
file_exists = False
if os.path.isfile(file_name):
local_file_size = os.path.getsize(file_name)
if local_file_size == file_size:
sha1_file = file_name + '.sha1'
if os.path.isfile(sha1_file):
print('sha1 found')
with open(sha1_file) as f:
expected_sha1 = f.read()
BLOCKSIZE = 65536
sha1 = hashlib.sha1()
with open(file_name) as f:
buff = f.read(BLOCKSIZE)
while len(buff) > 0:
sha1.update(buff)
buff = f.read(BLOCKSIZE)
if expected_sha1 == sha1:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
else:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
if not file_exists:
factor = int(math.floor(math.log(file_size) / math.log(1024)))
display_file_size = str(file_size / 1024 ** factor) + \
['B', 'KB', 'MB', 'GB', 'TB', 'PB'][factor]
print("Source: " + url)
print("Destination " + file_name)
print("Size: " + display_file_size)
file_size_dl = 0
block_sz = 8192
f = open(file_name, 'wb')
pbar = ProgressBar(file_size)
for chunk in r.iter_content(chunk_size=block_sz):
if not chunk:
continue
chunk_size = len(chunk)
file_size_dl += chunk_size
f.write(chunk)
pbar.update(chunk_size)
# status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
# status = status + chr(8)*(len(status)+1)
# print(status)
f.close()
else:
print("File already exists - " + file_name)
return True
|
[
"def",
"download",
"(",
"url",
",",
"file_name",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"file_size",
"=",
"int",
"(",
"r",
".",
"headers",
"[",
"'Content-length'",
"]",
")",
"file_exists",
"=",
"False",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"local_file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"file_name",
")",
"if",
"local_file_size",
"==",
"file_size",
":",
"sha1_file",
"=",
"file_name",
"+",
"'.sha1'",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"sha1_file",
")",
":",
"print",
"(",
"'sha1 found'",
")",
"with",
"open",
"(",
"sha1_file",
")",
"as",
"f",
":",
"expected_sha1",
"=",
"f",
".",
"read",
"(",
")",
"BLOCKSIZE",
"=",
"65536",
"sha1",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"with",
"open",
"(",
"file_name",
")",
"as",
"f",
":",
"buff",
"=",
"f",
".",
"read",
"(",
"BLOCKSIZE",
")",
"while",
"len",
"(",
"buff",
")",
">",
"0",
":",
"sha1",
".",
"update",
"(",
"buff",
")",
"buff",
"=",
"f",
".",
"read",
"(",
"BLOCKSIZE",
")",
"if",
"expected_sha1",
"==",
"sha1",
":",
"file_exists",
"=",
"True",
"else",
":",
"print",
"(",
"\"File corrupt. Downloading again.\"",
")",
"os",
".",
"remove",
"(",
"file_name",
")",
"else",
":",
"file_exists",
"=",
"True",
"else",
":",
"print",
"(",
"\"File corrupt. Downloading again.\"",
")",
"os",
".",
"remove",
"(",
"file_name",
")",
"if",
"not",
"file_exists",
":",
"factor",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"math",
".",
"log",
"(",
"file_size",
")",
"/",
"math",
".",
"log",
"(",
"1024",
")",
")",
")",
"display_file_size",
"=",
"str",
"(",
"file_size",
"/",
"1024",
"**",
"factor",
")",
"+",
"[",
"'B'",
",",
"'KB'",
",",
"'MB'",
",",
"'GB'",
",",
"'TB'",
",",
"'PB'",
"]",
"[",
"factor",
"]",
"print",
"(",
"\"Source: \"",
"+",
"url",
")",
"print",
"(",
"\"Destination \"",
"+",
"file_name",
")",
"print",
"(",
"\"Size: \"",
"+",
"display_file_size",
")",
"file_size_dl",
"=",
"0",
"block_sz",
"=",
"8192",
"f",
"=",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"pbar",
"=",
"ProgressBar",
"(",
"file_size",
")",
"for",
"chunk",
"in",
"r",
".",
"iter_content",
"(",
"chunk_size",
"=",
"block_sz",
")",
":",
"if",
"not",
"chunk",
":",
"continue",
"chunk_size",
"=",
"len",
"(",
"chunk",
")",
"file_size_dl",
"+=",
"chunk_size",
"f",
".",
"write",
"(",
"chunk",
")",
"pbar",
".",
"update",
"(",
"chunk_size",
")",
"# status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size)\r",
"# status = status + chr(8)*(len(status)+1)\r",
"# print(status)\r",
"f",
".",
"close",
"(",
")",
"else",
":",
"print",
"(",
"\"File already exists - \"",
"+",
"file_name",
")",
"return",
"True"
] |
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
|
[
"if",
"py3",
":",
"file_size",
"=",
"int",
"(",
"u",
".",
"getheader",
"(",
"Content",
"-",
"Length",
")",
"[",
"0",
"]",
")",
"else",
":",
"file_size",
"=",
"int",
"(",
"u",
".",
"info",
"()",
".",
"getheaders",
"(",
"Content",
"-",
"Length",
")",
"[",
"0",
"]",
")"
] |
python
|
train
| 37.816667 |
libtcod/python-tcod
|
tcod/image.py
|
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/image.py#L59-L66
|
def clear(self, color: Tuple[int, int, int]) -> None:
"""Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
"""
lib.TCOD_image_clear(self.image_c, color)
|
[
"def",
"clear",
"(",
"self",
",",
"color",
":",
"Tuple",
"[",
"int",
",",
"int",
",",
"int",
"]",
")",
"->",
"None",
":",
"lib",
".",
"TCOD_image_clear",
"(",
"self",
".",
"image_c",
",",
"color",
")"
] |
Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
|
[
"Fill",
"this",
"entire",
"Image",
"with",
"color",
"."
] |
python
|
train
| 36.25 |
has2k1/plotnine
|
plotnine/guides/guide_colorbar.py
|
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L295-L321
|
def add_segmented_colorbar(da, colors, direction):
"""
Add 'non-rastered' colorbar to DrawingArea
"""
nbreak = len(colors)
if direction == 'vertical':
linewidth = da.height/nbreak
verts = [None] * nbreak
x1, x2 = 0, da.width
for i, color in enumerate(colors):
y1 = i * linewidth
y2 = y1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
linewidth = da.width/nbreak
verts = [None] * nbreak
y1, y2 = 0, da.height
for i, color in enumerate(colors):
x1 = i * linewidth
x2 = x1 + linewidth
verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
coll = mcoll.PolyCollection(verts,
facecolors=colors,
linewidth=0,
antialiased=False)
da.add_artist(coll)
|
[
"def",
"add_segmented_colorbar",
"(",
"da",
",",
"colors",
",",
"direction",
")",
":",
"nbreak",
"=",
"len",
"(",
"colors",
")",
"if",
"direction",
"==",
"'vertical'",
":",
"linewidth",
"=",
"da",
".",
"height",
"/",
"nbreak",
"verts",
"=",
"[",
"None",
"]",
"*",
"nbreak",
"x1",
",",
"x2",
"=",
"0",
",",
"da",
".",
"width",
"for",
"i",
",",
"color",
"in",
"enumerate",
"(",
"colors",
")",
":",
"y1",
"=",
"i",
"*",
"linewidth",
"y2",
"=",
"y1",
"+",
"linewidth",
"verts",
"[",
"i",
"]",
"=",
"(",
"(",
"x1",
",",
"y1",
")",
",",
"(",
"x1",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y1",
")",
")",
"else",
":",
"linewidth",
"=",
"da",
".",
"width",
"/",
"nbreak",
"verts",
"=",
"[",
"None",
"]",
"*",
"nbreak",
"y1",
",",
"y2",
"=",
"0",
",",
"da",
".",
"height",
"for",
"i",
",",
"color",
"in",
"enumerate",
"(",
"colors",
")",
":",
"x1",
"=",
"i",
"*",
"linewidth",
"x2",
"=",
"x1",
"+",
"linewidth",
"verts",
"[",
"i",
"]",
"=",
"(",
"(",
"x1",
",",
"y1",
")",
",",
"(",
"x1",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y2",
")",
",",
"(",
"x2",
",",
"y1",
")",
")",
"coll",
"=",
"mcoll",
".",
"PolyCollection",
"(",
"verts",
",",
"facecolors",
"=",
"colors",
",",
"linewidth",
"=",
"0",
",",
"antialiased",
"=",
"False",
")",
"da",
".",
"add_artist",
"(",
"coll",
")"
] |
Add 'non-rastered' colorbar to DrawingArea
|
[
"Add",
"non",
"-",
"rastered",
"colorbar",
"to",
"DrawingArea"
] |
python
|
train
| 33.37037 |
genialis/resolwe
|
resolwe/flow/models/entity.py
|
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/entity.py#L34-L37
|
def move_to_collection(self, source_collection, destination_collection):
"""Move entities from source to destination collection."""
for entity in self:
entity.move_to_collection(source_collection, destination_collection)
|
[
"def",
"move_to_collection",
"(",
"self",
",",
"source_collection",
",",
"destination_collection",
")",
":",
"for",
"entity",
"in",
"self",
":",
"entity",
".",
"move_to_collection",
"(",
"source_collection",
",",
"destination_collection",
")"
] |
Move entities from source to destination collection.
|
[
"Move",
"entities",
"from",
"source",
"to",
"destination",
"collection",
"."
] |
python
|
train
| 61.25 |
elifesciences/elife-tools
|
elifetools/parseJATS.py
|
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L298-L308
|
def subject_area(soup):
"""
Find the subject areas from article-categories subject tags
"""
subject_area = []
tags = raw_parser.subject_area(soup)
for tag in tags:
subject_area.append(node_text(tag))
return subject_area
|
[
"def",
"subject_area",
"(",
"soup",
")",
":",
"subject_area",
"=",
"[",
"]",
"tags",
"=",
"raw_parser",
".",
"subject_area",
"(",
"soup",
")",
"for",
"tag",
"in",
"tags",
":",
"subject_area",
".",
"append",
"(",
"node_text",
"(",
"tag",
")",
")",
"return",
"subject_area"
] |
Find the subject areas from article-categories subject tags
|
[
"Find",
"the",
"subject",
"areas",
"from",
"article",
"-",
"categories",
"subject",
"tags"
] |
python
|
train
| 22.454545 |
saltstack/salt
|
salt/loader.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/loader.py#L515-L528
|
def roster(opts, runner=None, utils=None, whitelist=None):
'''
Returns the roster modules
'''
return LazyLoader(
_module_dirs(opts, 'roster'),
opts,
tag='roster',
whitelist=whitelist,
pack={
'__runner__': runner,
'__utils__': utils,
},
)
|
[
"def",
"roster",
"(",
"opts",
",",
"runner",
"=",
"None",
",",
"utils",
"=",
"None",
",",
"whitelist",
"=",
"None",
")",
":",
"return",
"LazyLoader",
"(",
"_module_dirs",
"(",
"opts",
",",
"'roster'",
")",
",",
"opts",
",",
"tag",
"=",
"'roster'",
",",
"whitelist",
"=",
"whitelist",
",",
"pack",
"=",
"{",
"'__runner__'",
":",
"runner",
",",
"'__utils__'",
":",
"utils",
",",
"}",
",",
")"
] |
Returns the roster modules
|
[
"Returns",
"the",
"roster",
"modules"
] |
python
|
train
| 22.571429 |
nuclio/nuclio-sdk-py
|
nuclio_sdk/event.py
|
https://github.com/nuclio/nuclio-sdk-py/blob/5af9ffc19a0d96255ff430bc358be9cd7a57f424/nuclio_sdk/event.py#L74-L99
|
def from_json(data):
"""Decode event encoded as JSON by processor"""
parsed_data = json.loads(data)
trigger = TriggerInfo(
parsed_data['trigger']['class'],
parsed_data['trigger']['kind'],
)
# extract content type, needed to decode body
content_type = parsed_data['content_type']
return Event(body=Event.decode_body(parsed_data['body'], content_type),
content_type=content_type,
trigger=trigger,
fields=parsed_data.get('fields'),
headers=parsed_data.get('headers'),
_id=parsed_data['id'],
method=parsed_data['method'],
path=parsed_data['path'],
size=parsed_data['size'],
timestamp=datetime.datetime.utcfromtimestamp(parsed_data['timestamp']),
url=parsed_data['url'],
_type=parsed_data['type'],
type_version=parsed_data['type_version'],
version=parsed_data['version'])
|
[
"def",
"from_json",
"(",
"data",
")",
":",
"parsed_data",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"trigger",
"=",
"TriggerInfo",
"(",
"parsed_data",
"[",
"'trigger'",
"]",
"[",
"'class'",
"]",
",",
"parsed_data",
"[",
"'trigger'",
"]",
"[",
"'kind'",
"]",
",",
")",
"# extract content type, needed to decode body",
"content_type",
"=",
"parsed_data",
"[",
"'content_type'",
"]",
"return",
"Event",
"(",
"body",
"=",
"Event",
".",
"decode_body",
"(",
"parsed_data",
"[",
"'body'",
"]",
",",
"content_type",
")",
",",
"content_type",
"=",
"content_type",
",",
"trigger",
"=",
"trigger",
",",
"fields",
"=",
"parsed_data",
".",
"get",
"(",
"'fields'",
")",
",",
"headers",
"=",
"parsed_data",
".",
"get",
"(",
"'headers'",
")",
",",
"_id",
"=",
"parsed_data",
"[",
"'id'",
"]",
",",
"method",
"=",
"parsed_data",
"[",
"'method'",
"]",
",",
"path",
"=",
"parsed_data",
"[",
"'path'",
"]",
",",
"size",
"=",
"parsed_data",
"[",
"'size'",
"]",
",",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"parsed_data",
"[",
"'timestamp'",
"]",
")",
",",
"url",
"=",
"parsed_data",
"[",
"'url'",
"]",
",",
"_type",
"=",
"parsed_data",
"[",
"'type'",
"]",
",",
"type_version",
"=",
"parsed_data",
"[",
"'type_version'",
"]",
",",
"version",
"=",
"parsed_data",
"[",
"'version'",
"]",
")"
] |
Decode event encoded as JSON by processor
|
[
"Decode",
"event",
"encoded",
"as",
"JSON",
"by",
"processor"
] |
python
|
train
| 42.192308 |
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L750-L866
|
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size)
|
[
"def",
"build_from_token_counts",
"(",
"self",
",",
"token_counts",
",",
"min_count",
",",
"num_iterations",
"=",
"4",
",",
"reserved_tokens",
"=",
"None",
",",
"max_subtoken_length",
"=",
"None",
")",
":",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"RESERVED_TOKENS",
"else",
":",
"# There is not complete freedom in replacing RESERVED_TOKENS.",
"for",
"default",
",",
"proposed",
"in",
"zip",
"(",
"RESERVED_TOKENS",
",",
"reserved_tokens",
")",
":",
"if",
"default",
"!=",
"proposed",
":",
"raise",
"ValueError",
"(",
"\"RESERVED_TOKENS must be a prefix of \"",
"\"reserved_tokens.\"",
")",
"# Initialize the alphabet. Note, this must include reserved tokens or it can",
"# result in encoding failures.",
"alphabet_tokens",
"=",
"chain",
"(",
"six",
".",
"iterkeys",
"(",
"token_counts",
")",
",",
"[",
"native_to_unicode",
"(",
"t",
")",
"for",
"t",
"in",
"reserved_tokens",
"]",
")",
"self",
".",
"_init_alphabet_from_tokens",
"(",
"alphabet_tokens",
")",
"# Bootstrap the initial list of subtokens with the characters from the",
"# alphabet plus the escaping characters.",
"self",
".",
"_init_subtokens_from_list",
"(",
"list",
"(",
"self",
".",
"_alphabet",
")",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"# We build iteratively. On each iteration, we segment all the words,",
"# then count the resulting potential subtokens, keeping the ones",
"# with high enough counts for our new vocabulary.",
"if",
"min_count",
"<",
"1",
":",
"min_count",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"num_iterations",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Iteration {0}\"",
".",
"format",
"(",
"i",
")",
")",
"# Collect all substrings of the encoded token that break along current",
"# subtoken boundaries.",
"subtoken_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"token",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"token_counts",
")",
":",
"iter_start_time",
"=",
"time",
".",
"time",
"(",
")",
"escaped_token",
"=",
"_escape_token",
"(",
"token",
",",
"self",
".",
"_alphabet",
")",
"subtokens",
"=",
"self",
".",
"_escaped_token_to_subtoken_strings",
"(",
"escaped_token",
")",
"start",
"=",
"0",
"for",
"subtoken",
"in",
"subtokens",
":",
"last_position",
"=",
"len",
"(",
"escaped_token",
")",
"+",
"1",
"if",
"max_subtoken_length",
"is",
"not",
"None",
":",
"last_position",
"=",
"min",
"(",
"last_position",
",",
"start",
"+",
"max_subtoken_length",
")",
"for",
"end",
"in",
"range",
"(",
"start",
"+",
"1",
",",
"last_position",
")",
":",
"new_subtoken",
"=",
"escaped_token",
"[",
"start",
":",
"end",
"]",
"subtoken_counts",
"[",
"new_subtoken",
"]",
"+=",
"count",
"start",
"+=",
"len",
"(",
"subtoken",
")",
"iter_time_secs",
"=",
"time",
".",
"time",
"(",
")",
"-",
"iter_start_time",
"if",
"iter_time_secs",
">",
"0.1",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"u\"Processing token [{0}] took {1} seconds, consider \"",
"\"setting Text2TextProblem.max_subtoken_length to a \"",
"\"smaller value.\"",
".",
"format",
"(",
"token",
",",
"iter_time_secs",
")",
")",
"# Array of sets of candidate subtoken strings, by length.",
"len_to_subtoken_strings",
"=",
"[",
"]",
"for",
"subtoken_string",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"subtoken_counts",
")",
":",
"lsub",
"=",
"len",
"(",
"subtoken_string",
")",
"if",
"count",
">=",
"min_count",
":",
"while",
"len",
"(",
"len_to_subtoken_strings",
")",
"<=",
"lsub",
":",
"len_to_subtoken_strings",
".",
"append",
"(",
"set",
"(",
")",
")",
"len_to_subtoken_strings",
"[",
"lsub",
"]",
".",
"add",
"(",
"subtoken_string",
")",
"# Consider the candidates longest to shortest, so that if we accept",
"# a longer subtoken string, we can decrement the counts of its prefixes.",
"new_subtoken_strings",
"=",
"[",
"]",
"for",
"lsub",
"in",
"range",
"(",
"len",
"(",
"len_to_subtoken_strings",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"subtoken_strings",
"=",
"len_to_subtoken_strings",
"[",
"lsub",
"]",
"for",
"subtoken_string",
"in",
"subtoken_strings",
":",
"count",
"=",
"subtoken_counts",
"[",
"subtoken_string",
"]",
"if",
"count",
">=",
"min_count",
":",
"# Exclude alphabet tokens here, as they must be included later,",
"# explicitly, regardless of count.",
"if",
"subtoken_string",
"not",
"in",
"self",
".",
"_alphabet",
":",
"new_subtoken_strings",
".",
"append",
"(",
"(",
"count",
",",
"subtoken_string",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"lsub",
")",
":",
"subtoken_counts",
"[",
"subtoken_string",
"[",
":",
"l",
"]",
"]",
"-=",
"count",
"# Include the alphabet explicitly to guarantee all strings are encodable.",
"new_subtoken_strings",
".",
"extend",
"(",
"(",
"subtoken_counts",
".",
"get",
"(",
"a",
",",
"0",
")",
",",
"a",
")",
"for",
"a",
"in",
"self",
".",
"_alphabet",
")",
"new_subtoken_strings",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"# Reinitialize to the candidate vocabulary.",
"new_subtoken_strings",
"=",
"[",
"subtoken",
"for",
"_",
",",
"subtoken",
"in",
"new_subtoken_strings",
"]",
"if",
"reserved_tokens",
":",
"escaped_reserved_tokens",
"=",
"[",
"_escape_token",
"(",
"native_to_unicode",
"(",
"t",
")",
",",
"self",
".",
"_alphabet",
")",
"for",
"t",
"in",
"reserved_tokens",
"]",
"new_subtoken_strings",
"=",
"escaped_reserved_tokens",
"+",
"new_subtoken_strings",
"self",
".",
"_init_subtokens_from_list",
"(",
"new_subtoken_strings",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"vocab_size = %d\"",
"%",
"self",
".",
"vocab_size",
")"
] |
Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
|
[
"Train",
"a",
"SubwordTextEncoder",
"based",
"on",
"a",
"dictionary",
"of",
"word",
"counts",
"."
] |
python
|
train
| 46.393162 |
Alignak-monitoring/alignak
|
alignak/external_command.py
|
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1014-L1048
|
def add_svc_comment(self, service, author, comment):
"""Add a service comment
Format of the line that triggers function call::
ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment>
:param service: service to add the comment
:type service: alignak.objects.service.Service
:param author: author name
:type author: str
:param comment: text comment
:type comment: str
:return: None
"""
data = {
'author': author, 'comment': comment, 'comment_type': 2, 'entry_type': 1, 'source': 1,
'expires': False, 'ref': service.uuid
}
comm = Comment(data)
service.add_comment(comm)
self.send_an_element(service.get_update_status_brok())
try:
brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s"
% (self.hosts[service.host].get_name(),
service.get_name(),
str(author, 'utf-8'), str(comment, 'utf-8')))
except TypeError:
brok = make_monitoring_log('info', "SERVICE COMMENT: %s;%s;%s;%s"
% (self.hosts[service.host].get_name(),
service.get_name(), author, comment))
self.send_an_element(brok)
self.send_an_element(comm.get_comment_brok(
self.hosts[service.host].get_name(), service.get_name()))
|
[
"def",
"add_svc_comment",
"(",
"self",
",",
"service",
",",
"author",
",",
"comment",
")",
":",
"data",
"=",
"{",
"'author'",
":",
"author",
",",
"'comment'",
":",
"comment",
",",
"'comment_type'",
":",
"2",
",",
"'entry_type'",
":",
"1",
",",
"'source'",
":",
"1",
",",
"'expires'",
":",
"False",
",",
"'ref'",
":",
"service",
".",
"uuid",
"}",
"comm",
"=",
"Comment",
"(",
"data",
")",
"service",
".",
"add_comment",
"(",
"comm",
")",
"self",
".",
"send_an_element",
"(",
"service",
".",
"get_update_status_brok",
"(",
")",
")",
"try",
":",
"brok",
"=",
"make_monitoring_log",
"(",
"'info'",
",",
"\"SERVICE COMMENT: %s;%s;%s;%s\"",
"%",
"(",
"self",
".",
"hosts",
"[",
"service",
".",
"host",
"]",
".",
"get_name",
"(",
")",
",",
"service",
".",
"get_name",
"(",
")",
",",
"str",
"(",
"author",
",",
"'utf-8'",
")",
",",
"str",
"(",
"comment",
",",
"'utf-8'",
")",
")",
")",
"except",
"TypeError",
":",
"brok",
"=",
"make_monitoring_log",
"(",
"'info'",
",",
"\"SERVICE COMMENT: %s;%s;%s;%s\"",
"%",
"(",
"self",
".",
"hosts",
"[",
"service",
".",
"host",
"]",
".",
"get_name",
"(",
")",
",",
"service",
".",
"get_name",
"(",
")",
",",
"author",
",",
"comment",
")",
")",
"self",
".",
"send_an_element",
"(",
"brok",
")",
"self",
".",
"send_an_element",
"(",
"comm",
".",
"get_comment_brok",
"(",
"self",
".",
"hosts",
"[",
"service",
".",
"host",
"]",
".",
"get_name",
"(",
")",
",",
"service",
".",
"get_name",
"(",
")",
")",
")"
] |
Add a service comment
Format of the line that triggers function call::
ADD_SVC_COMMENT;<host_name>;<service_description>;<persistent:obsolete>;<author>;<comment>
:param service: service to add the comment
:type service: alignak.objects.service.Service
:param author: author name
:type author: str
:param comment: text comment
:type comment: str
:return: None
|
[
"Add",
"a",
"service",
"comment",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] |
python
|
train
| 43.371429 |
observermedia/django-wordpress-rest
|
wordpress/loading.py
|
https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L463-L498
|
def load_posts(self, post_type=None, max_pages=200, status=None):
"""
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
"""
logger.info("loading posts with post_type=%s", post_type)
# clear them all out so we don't get dupes
if self.purge_first:
Post.objects.filter(site_id=self.site_id, post_type=post_type).delete()
path = "sites/{}/posts".format(self.site_id)
# type allows us to pull information about pages, attachments, guest-authors, etc.
# you know, posts that aren't posts... thank you WordPress!
if not post_type:
post_type = "post"
if not status:
status = "publish"
params = {"number": self.batch_size, "type": post_type, "status": status}
self.set_posts_param_modified_after(params, post_type, status)
# get first page
response = self.get(path, params)
if not response.ok:
logger.warning("Response NOT OK! status_code=%s\n%s", response.status_code, response.text)
# process all posts in the response
self.process_posts_response(response, path, params, max_pages)
|
[
"def",
"load_posts",
"(",
"self",
",",
"post_type",
"=",
"None",
",",
"max_pages",
"=",
"200",
",",
"status",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"loading posts with post_type=%s\"",
",",
"post_type",
")",
"# clear them all out so we don't get dupes",
"if",
"self",
".",
"purge_first",
":",
"Post",
".",
"objects",
".",
"filter",
"(",
"site_id",
"=",
"self",
".",
"site_id",
",",
"post_type",
"=",
"post_type",
")",
".",
"delete",
"(",
")",
"path",
"=",
"\"sites/{}/posts\"",
".",
"format",
"(",
"self",
".",
"site_id",
")",
"# type allows us to pull information about pages, attachments, guest-authors, etc.",
"# you know, posts that aren't posts... thank you WordPress!",
"if",
"not",
"post_type",
":",
"post_type",
"=",
"\"post\"",
"if",
"not",
"status",
":",
"status",
"=",
"\"publish\"",
"params",
"=",
"{",
"\"number\"",
":",
"self",
".",
"batch_size",
",",
"\"type\"",
":",
"post_type",
",",
"\"status\"",
":",
"status",
"}",
"self",
".",
"set_posts_param_modified_after",
"(",
"params",
",",
"post_type",
",",
"status",
")",
"# get first page",
"response",
"=",
"self",
".",
"get",
"(",
"path",
",",
"params",
")",
"if",
"not",
"response",
".",
"ok",
":",
"logger",
".",
"warning",
"(",
"\"Response NOT OK! status_code=%s\\n%s\"",
",",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
"# process all posts in the response",
"self",
".",
"process_posts_response",
"(",
"response",
",",
"path",
",",
"params",
",",
"max_pages",
")"
] |
Load all WordPress posts of a given post_type from a site.
:param post_type: post, page, attachment, or any custom post type set up in the WP API
:param max_pages: kill counter to avoid infinite looping
:param status: load posts with the given status,
including any of: "publish", "private", "draft", "pending", "future", and "trash", or simply "any"
Note: non public statuses require authentication
:return: None
|
[
"Load",
"all",
"WordPress",
"posts",
"of",
"a",
"given",
"post_type",
"from",
"a",
"site",
"."
] |
python
|
train
| 43.277778 |
ANTsX/ANTsPy
|
ants/registration/apply_transforms.py
|
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/registration/apply_transforms.py#L196-L311
|
def apply_transforms_to_points( dim, points, transformlist,
whichtoinvert=None, verbose=False ):
"""
Apply a transform list to map a pointset from one domain to
another. In registration, one computes mappings between pairs of
domains. These transforms are often a sequence of increasingly
complex maps, e.g. from translation, to rigid, to affine to
deformation. The list of such transforms is passed to this
function to interpolate one image domain into the next image
domain, as below. The order matters strongly and the user is
advised to familiarize with the standards established in examples.
Importantly, point mapping goes the opposite direction of image
mapping, for both reasons of convention and engineering.
ANTsR function: `antsApplyTransformsToPoints`
Arguments
---------
dim: integer
dimensionality of the transformation.
points: data frame
moving point set with n-points in rows of at least dim
columns - we maintain extra information in additional
columns. this should be a data frame with columns names x, y, z, t.
transformlist : list of strings
list of transforms generated by ants.registration where each transform is a filename.
whichtoinvert : list of booleans (optional)
Must be same length as transformlist.
whichtoinvert[i] is True if transformlist[i] is a matrix,
and the matrix should be inverted. If transformlist[i] is a
warp field, whichtoinvert[i] must be False.
If the transform list is a matrix followed by a warp field,
whichtoinvert defaults to (True,False). Otherwise it defaults
to [False]*len(transformlist)).
verbose : boolean
Returns
-------
data frame of transformed points
Example
-------
>>> import ants
>>> fixed = ants.image_read( ants.get_ants_data('r16') )
>>> moving = ants.image_read( ants.get_ants_data('r27') )
>>> reg = ants.registration( fixed, moving, 'Affine' )
>>> d = {'x': [128, 127], 'y': [101, 111]}
>>> pts = pd.DataFrame(data=d)
>>> ptsw = ants.apply_transforms_to_points( 2, pts, reg['fwdtransforms'])
"""
if not isinstance(transformlist, (tuple, list)) and (transformlist is not None):
transformlist = [transformlist]
args = [dim, points, transformlist, whichtoinvert]
for tl_path in transformlist:
if not os.path.exists(tl_path):
raise Exception('Transform %s does not exist' % tl_path)
mytx = []
if whichtoinvert is None or (isinstance(whichtoinvert, (tuple,list)) and (sum([w is not None for w in whichtoinvert])==0)):
if (len(transformlist) == 2) and ('.mat' in transformlist[0]) and ('.mat' not in transformlist[1]):
whichtoinvert = (True, False)
else:
whichtoinvert = tuple([False]*len(transformlist))
if len(whichtoinvert) != len(transformlist):
raise ValueError('Transform list and inversion list must be the same length')
for i in range(len(transformlist)):
ismat = False
if '.mat' in transformlist[i]:
ismat = True
if whichtoinvert[i] and (not ismat):
raise ValueError('Cannot invert transform %i (%s) because it is not a matrix' % (i, transformlist[i]))
if whichtoinvert[i]:
mytx = mytx + ['-t', '[%s,1]' % (transformlist[i])]
else:
mytx = mytx + ['-t', transformlist[i]]
if dim == 2:
pointsSub = points[['x','y']]
if dim == 3:
pointsSub = points[['x','y','z']]
if dim == 4:
pointsSub = points[['x','y','z','t']]
pointImage = core.make_image( pointsSub.shape, pointsSub.values.flatten())
pointsOut = pointImage.clone()
args = ['-d', dim,
'-i', pointImage,
'-o', pointsOut ]
args = args + mytx
myargs = utils._int_antsProcessArguments(args)
myverb = int(verbose)
if verbose:
print(myargs)
processed_args = myargs + [ '-f', str(1), '--precision', str(0)]
libfn = utils.get_lib_fn('antsApplyTransformsToPoints')
libfn(processed_args)
mynp = pointsOut.numpy()
pointsOutDF = points.copy()
pointsOutDF['x'] = mynp[:,0]
if dim >= 2:
pointsOutDF['y'] = mynp[:,1]
if dim >= 3:
pointsOutDF['z'] = mynp[:,2]
if dim >= 4:
pointsOutDF['t'] = mynp[:,3]
return pointsOutDF
|
[
"def",
"apply_transforms_to_points",
"(",
"dim",
",",
"points",
",",
"transformlist",
",",
"whichtoinvert",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"transformlist",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"(",
"transformlist",
"is",
"not",
"None",
")",
":",
"transformlist",
"=",
"[",
"transformlist",
"]",
"args",
"=",
"[",
"dim",
",",
"points",
",",
"transformlist",
",",
"whichtoinvert",
"]",
"for",
"tl_path",
"in",
"transformlist",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"tl_path",
")",
":",
"raise",
"Exception",
"(",
"'Transform %s does not exist'",
"%",
"tl_path",
")",
"mytx",
"=",
"[",
"]",
"if",
"whichtoinvert",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"whichtoinvert",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"(",
"sum",
"(",
"[",
"w",
"is",
"not",
"None",
"for",
"w",
"in",
"whichtoinvert",
"]",
")",
"==",
"0",
")",
")",
":",
"if",
"(",
"len",
"(",
"transformlist",
")",
"==",
"2",
")",
"and",
"(",
"'.mat'",
"in",
"transformlist",
"[",
"0",
"]",
")",
"and",
"(",
"'.mat'",
"not",
"in",
"transformlist",
"[",
"1",
"]",
")",
":",
"whichtoinvert",
"=",
"(",
"True",
",",
"False",
")",
"else",
":",
"whichtoinvert",
"=",
"tuple",
"(",
"[",
"False",
"]",
"*",
"len",
"(",
"transformlist",
")",
")",
"if",
"len",
"(",
"whichtoinvert",
")",
"!=",
"len",
"(",
"transformlist",
")",
":",
"raise",
"ValueError",
"(",
"'Transform list and inversion list must be the same length'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"transformlist",
")",
")",
":",
"ismat",
"=",
"False",
"if",
"'.mat'",
"in",
"transformlist",
"[",
"i",
"]",
":",
"ismat",
"=",
"True",
"if",
"whichtoinvert",
"[",
"i",
"]",
"and",
"(",
"not",
"ismat",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot invert transform %i (%s) because it is not a matrix'",
"%",
"(",
"i",
",",
"transformlist",
"[",
"i",
"]",
")",
")",
"if",
"whichtoinvert",
"[",
"i",
"]",
":",
"mytx",
"=",
"mytx",
"+",
"[",
"'-t'",
",",
"'[%s,1]'",
"%",
"(",
"transformlist",
"[",
"i",
"]",
")",
"]",
"else",
":",
"mytx",
"=",
"mytx",
"+",
"[",
"'-t'",
",",
"transformlist",
"[",
"i",
"]",
"]",
"if",
"dim",
"==",
"2",
":",
"pointsSub",
"=",
"points",
"[",
"[",
"'x'",
",",
"'y'",
"]",
"]",
"if",
"dim",
"==",
"3",
":",
"pointsSub",
"=",
"points",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"]",
"if",
"dim",
"==",
"4",
":",
"pointsSub",
"=",
"points",
"[",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'t'",
"]",
"]",
"pointImage",
"=",
"core",
".",
"make_image",
"(",
"pointsSub",
".",
"shape",
",",
"pointsSub",
".",
"values",
".",
"flatten",
"(",
")",
")",
"pointsOut",
"=",
"pointImage",
".",
"clone",
"(",
")",
"args",
"=",
"[",
"'-d'",
",",
"dim",
",",
"'-i'",
",",
"pointImage",
",",
"'-o'",
",",
"pointsOut",
"]",
"args",
"=",
"args",
"+",
"mytx",
"myargs",
"=",
"utils",
".",
"_int_antsProcessArguments",
"(",
"args",
")",
"myverb",
"=",
"int",
"(",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"myargs",
")",
"processed_args",
"=",
"myargs",
"+",
"[",
"'-f'",
",",
"str",
"(",
"1",
")",
",",
"'--precision'",
",",
"str",
"(",
"0",
")",
"]",
"libfn",
"=",
"utils",
".",
"get_lib_fn",
"(",
"'antsApplyTransformsToPoints'",
")",
"libfn",
"(",
"processed_args",
")",
"mynp",
"=",
"pointsOut",
".",
"numpy",
"(",
")",
"pointsOutDF",
"=",
"points",
".",
"copy",
"(",
")",
"pointsOutDF",
"[",
"'x'",
"]",
"=",
"mynp",
"[",
":",
",",
"0",
"]",
"if",
"dim",
">=",
"2",
":",
"pointsOutDF",
"[",
"'y'",
"]",
"=",
"mynp",
"[",
":",
",",
"1",
"]",
"if",
"dim",
">=",
"3",
":",
"pointsOutDF",
"[",
"'z'",
"]",
"=",
"mynp",
"[",
":",
",",
"2",
"]",
"if",
"dim",
">=",
"4",
":",
"pointsOutDF",
"[",
"'t'",
"]",
"=",
"mynp",
"[",
":",
",",
"3",
"]",
"return",
"pointsOutDF"
] |
Apply a transform list to map a pointset from one domain to
another. In registration, one computes mappings between pairs of
domains. These transforms are often a sequence of increasingly
complex maps, e.g. from translation, to rigid, to affine to
deformation. The list of such transforms is passed to this
function to interpolate one image domain into the next image
domain, as below. The order matters strongly and the user is
advised to familiarize with the standards established in examples.
Importantly, point mapping goes the opposite direction of image
mapping, for both reasons of convention and engineering.
ANTsR function: `antsApplyTransformsToPoints`
Arguments
---------
dim: integer
dimensionality of the transformation.
points: data frame
moving point set with n-points in rows of at least dim
columns - we maintain extra information in additional
columns. this should be a data frame with columns names x, y, z, t.
transformlist : list of strings
list of transforms generated by ants.registration where each transform is a filename.
whichtoinvert : list of booleans (optional)
Must be same length as transformlist.
whichtoinvert[i] is True if transformlist[i] is a matrix,
and the matrix should be inverted. If transformlist[i] is a
warp field, whichtoinvert[i] must be False.
If the transform list is a matrix followed by a warp field,
whichtoinvert defaults to (True,False). Otherwise it defaults
to [False]*len(transformlist)).
verbose : boolean
Returns
-------
data frame of transformed points
Example
-------
>>> import ants
>>> fixed = ants.image_read( ants.get_ants_data('r16') )
>>> moving = ants.image_read( ants.get_ants_data('r27') )
>>> reg = ants.registration( fixed, moving, 'Affine' )
>>> d = {'x': [128, 127], 'y': [101, 111]}
>>> pts = pd.DataFrame(data=d)
>>> ptsw = ants.apply_transforms_to_points( 2, pts, reg['fwdtransforms'])
|
[
"Apply",
"a",
"transform",
"list",
"to",
"map",
"a",
"pointset",
"from",
"one",
"domain",
"to",
"another",
".",
"In",
"registration",
"one",
"computes",
"mappings",
"between",
"pairs",
"of",
"domains",
".",
"These",
"transforms",
"are",
"often",
"a",
"sequence",
"of",
"increasingly",
"complex",
"maps",
"e",
".",
"g",
".",
"from",
"translation",
"to",
"rigid",
"to",
"affine",
"to",
"deformation",
".",
"The",
"list",
"of",
"such",
"transforms",
"is",
"passed",
"to",
"this",
"function",
"to",
"interpolate",
"one",
"image",
"domain",
"into",
"the",
"next",
"image",
"domain",
"as",
"below",
".",
"The",
"order",
"matters",
"strongly",
"and",
"the",
"user",
"is",
"advised",
"to",
"familiarize",
"with",
"the",
"standards",
"established",
"in",
"examples",
".",
"Importantly",
"point",
"mapping",
"goes",
"the",
"opposite",
"direction",
"of",
"image",
"mapping",
"for",
"both",
"reasons",
"of",
"convention",
"and",
"engineering",
"."
] |
python
|
train
| 37.491379 |
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_0/work/work_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L448-L481
|
def replace_capacities(self, capacities, team_context, iteration_id):
"""ReplaceCapacities.
Replace a team's capacity
:param [TeamMemberCapacity] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacity]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'str')
content = self._serialize.body(capacities, '[TeamMemberCapacity]')
response = self._send(http_method='PUT',
location_id='74412d15-8c1a-4352-a48d-ef1ed5587d57',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('[TeamMemberCapacity]', self._unwrap_collection(response))
|
[
"def",
"replace_capacities",
"(",
"self",
",",
"capacities",
",",
"team_context",
",",
"iteration_id",
")",
":",
"project",
"=",
"None",
"team",
"=",
"None",
"if",
"team_context",
"is",
"not",
"None",
":",
"if",
"team_context",
".",
"project_id",
":",
"project",
"=",
"team_context",
".",
"project_id",
"else",
":",
"project",
"=",
"team_context",
".",
"project",
"if",
"team_context",
".",
"team_id",
":",
"team",
"=",
"team_context",
".",
"team_id",
"else",
":",
"team",
"=",
"team_context",
".",
"team",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'string'",
")",
"if",
"team",
"is",
"not",
"None",
":",
"route_values",
"[",
"'team'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'team'",
",",
"team",
",",
"'string'",
")",
"if",
"iteration_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'iterationId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'iteration_id'",
",",
"iteration_id",
",",
"'str'",
")",
"content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"capacities",
",",
"'[TeamMemberCapacity]'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'PUT'",
",",
"location_id",
"=",
"'74412d15-8c1a-4352-a48d-ef1ed5587d57'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
",",
"content",
"=",
"content",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[TeamMemberCapacity]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] |
ReplaceCapacities.
Replace a team's capacity
:param [TeamMemberCapacity] capacities: Team capacity to replace
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str iteration_id: ID of the iteration
:rtype: [TeamMemberCapacity]
|
[
"ReplaceCapacities",
".",
"Replace",
"a",
"team",
"s",
"capacity",
":",
"param",
"[",
"TeamMemberCapacity",
"]",
"capacities",
":",
"Team",
"capacity",
"to",
"replace",
":",
"param",
":",
"class",
":",
"<TeamContext",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamContext",
">",
"team_context",
":",
"The",
"team",
"context",
"for",
"the",
"operation",
":",
"param",
"str",
"iteration_id",
":",
"ID",
"of",
"the",
"iteration",
":",
"rtype",
":",
"[",
"TeamMemberCapacity",
"]"
] |
python
|
train
| 47.617647 |
hannorein/rebound
|
rebound/simulation.py
|
https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1154-L1188
|
def remove(self, index=None, hash=None, keepSorted=True):
"""
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
"""
if index is not None:
clibrebound.reb_remove(byref(self), index, keepSorted)
if hash is not None:
hash_types = c_uint32, c_uint, c_ulong
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
int_types = int,
else:
string_types = basestring,
int_types = int, long
if isinstance(hash, string_types):
clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted)
elif isinstance(hash, int_types):
clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted)
elif isinstance(hash, hash_types):
clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted)
if hasattr(self, '_widgets'):
self._display_heartbeat(pointer(self))
self.process_messages()
|
[
"def",
"remove",
"(",
"self",
",",
"index",
"=",
"None",
",",
"hash",
"=",
"None",
",",
"keepSorted",
"=",
"True",
")",
":",
"if",
"index",
"is",
"not",
"None",
":",
"clibrebound",
".",
"reb_remove",
"(",
"byref",
"(",
"self",
")",
",",
"index",
",",
"keepSorted",
")",
"if",
"hash",
"is",
"not",
"None",
":",
"hash_types",
"=",
"c_uint32",
",",
"c_uint",
",",
"c_ulong",
"PY3",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"3",
"if",
"PY3",
":",
"string_types",
"=",
"str",
",",
"int_types",
"=",
"int",
",",
"else",
":",
"string_types",
"=",
"basestring",
",",
"int_types",
"=",
"int",
",",
"long",
"if",
"isinstance",
"(",
"hash",
",",
"string_types",
")",
":",
"clibrebound",
".",
"reb_remove_by_hash",
"(",
"byref",
"(",
"self",
")",
",",
"rebhash",
"(",
"hash",
")",
",",
"keepSorted",
")",
"elif",
"isinstance",
"(",
"hash",
",",
"int_types",
")",
":",
"clibrebound",
".",
"reb_remove_by_hash",
"(",
"byref",
"(",
"self",
")",
",",
"c_uint32",
"(",
"hash",
")",
",",
"keepSorted",
")",
"elif",
"isinstance",
"(",
"hash",
",",
"hash_types",
")",
":",
"clibrebound",
".",
"reb_remove_by_hash",
"(",
"byref",
"(",
"self",
")",
",",
"hash",
",",
"keepSorted",
")",
"if",
"hasattr",
"(",
"self",
",",
"'_widgets'",
")",
":",
"self",
".",
"_display_heartbeat",
"(",
"pointer",
"(",
"self",
")",
")",
"self",
".",
"process_messages",
"(",
")"
] |
Removes a particle from the simulation.
Parameters
----------
index : int, optional
Specify particle to remove by index.
hash : c_uint32 or string, optional
Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated).
keepSorted : bool, optional
By default, remove preserves the order of particles in the particles array.
Might set it to zero in cases with many particles and many removals to speed things up.
|
[
"Removes",
"a",
"particle",
"from",
"the",
"simulation",
"."
] |
python
|
train
| 43.342857 |
saltstack/salt
|
salt/utils/files.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/files.py#L714-L726
|
def list_files(directory):
'''
Return a list of all files found under directory (and its subdirectories)
'''
ret = set()
ret.add(directory)
for root, dirs, files in safe_walk(directory):
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return list(ret)
|
[
"def",
"list_files",
"(",
"directory",
")",
":",
"ret",
"=",
"set",
"(",
")",
"ret",
".",
"add",
"(",
"directory",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"safe_walk",
"(",
"directory",
")",
":",
"for",
"name",
"in",
"files",
":",
"ret",
".",
"add",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
")",
"for",
"name",
"in",
"dirs",
":",
"ret",
".",
"add",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
")",
"return",
"list",
"(",
"ret",
")"
] |
Return a list of all files found under directory (and its subdirectories)
|
[
"Return",
"a",
"list",
"of",
"all",
"files",
"found",
"under",
"directory",
"(",
"and",
"its",
"subdirectories",
")"
] |
python
|
train
| 28.076923 |
Microsoft/knack
|
knack/cli.py
|
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/cli.py#L164-L170
|
def exception_handler(self, ex): # pylint: disable=no-self-use
""" The default exception handler """
if isinstance(ex, CLIError):
logger.error(ex)
else:
logger.exception(ex)
return 1
|
[
"def",
"exception_handler",
"(",
"self",
",",
"ex",
")",
":",
"# pylint: disable=no-self-use",
"if",
"isinstance",
"(",
"ex",
",",
"CLIError",
")",
":",
"logger",
".",
"error",
"(",
"ex",
")",
"else",
":",
"logger",
".",
"exception",
"(",
"ex",
")",
"return",
"1"
] |
The default exception handler
|
[
"The",
"default",
"exception",
"handler"
] |
python
|
train
| 33.285714 |
hsolbrig/PyShEx
|
pyshex/utils/schema_loader.py
|
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/utils/schema_loader.py#L47-L58
|
def loads(self, schema_txt: str) -> ShExJ.Schema:
""" Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
"""
self.schema_text = schema_txt
if schema_txt.strip()[0] == '{':
# TODO: figure out how to propagate self.base_location into this parse
return cast(ShExJ.Schema, loads(schema_txt, ShExJ))
else:
return generate_shexj.parse(schema_txt, self.base_location)
|
[
"def",
"loads",
"(",
"self",
",",
"schema_txt",
":",
"str",
")",
"->",
"ShExJ",
".",
"Schema",
":",
"self",
".",
"schema_text",
"=",
"schema_txt",
"if",
"schema_txt",
".",
"strip",
"(",
")",
"[",
"0",
"]",
"==",
"'{'",
":",
"# TODO: figure out how to propagate self.base_location into this parse",
"return",
"cast",
"(",
"ShExJ",
".",
"Schema",
",",
"loads",
"(",
"schema_txt",
",",
"ShExJ",
")",
")",
"else",
":",
"return",
"generate_shexj",
".",
"parse",
"(",
"schema_txt",
",",
"self",
".",
"base_location",
")"
] |
Parse and return schema as a ShExJ Schema
:param schema_txt: ShExC or ShExJ representation of a ShEx Schema
:return: ShEx Schema representation of schema
|
[
"Parse",
"and",
"return",
"schema",
"as",
"a",
"ShExJ",
"Schema"
] |
python
|
train
| 45.416667 |
WZBSocialScienceCenter/tmtoolkit
|
tmtoolkit/preprocess.py
|
https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/preprocess.py#L476-L532
|
def apply_custom_filter(self, filter_func, to_ngrams=False):
"""
Apply a custom filter function `filter_func` to all tokens or ngrams (if `to_ngrams` is True).
`filter_func` must accept a single parameter: a dictionary of structure `{<doc_label>: <tokens list>}`. It
must return a dictionary with the same structure.
This function can only be run on a single process, hence it could be slow for large corpora.
"""
# Because it is not possible to send a function to the workers, all tokens must be fetched from the workers
# first and then the custom function is called and run in a single process (the main process). After that, the
# filtered tokens are send back to the worker processes.
if not callable(filter_func):
raise ValueError('`filter_func` must be callable')
self._require_tokens()
if to_ngrams:
self._require_ngrams()
get_task = 'get_ngrams_with_worker_id'
set_task = 'set_ngrams'
set_task_param = 'ngrams'
self._invalidate_workers_ngrams()
else:
get_task = 'get_tokens_with_worker_id'
set_task = 'set_tokens'
set_task_param = 'tokens'
self._invalidate_workers_tokens()
self._send_task_to_workers(get_task)
docs_of_workers = {}
for _ in range(self.n_workers):
pair = self.results_queue.get()
docs_of_workers[pair[0]] = pair[1]
assert len(docs_of_workers) == self.n_workers
tok = {}
for docs in docs_of_workers.values():
tok.update(docs)
logger.info('applying custom filter function to tokens')
new_tok = filter_func(tok)
require_dictlike(new_tok)
if set(new_tok.keys()) != set(tok.keys()):
raise ValueError('the document labels and number of documents must stay unchanged during custom filtering')
logger.debug('sending task `%s` to all workers' % set_task)
for w_id, docs in docs_of_workers.items():
new_w_docs = {dl: new_tok.pop(dl) for dl in docs}
self.tasks_queues[w_id].put((set_task, {set_task_param: new_w_docs}))
[q.join() for q in self.tasks_queues]
return self
|
[
"def",
"apply_custom_filter",
"(",
"self",
",",
"filter_func",
",",
"to_ngrams",
"=",
"False",
")",
":",
"# Because it is not possible to send a function to the workers, all tokens must be fetched from the workers",
"# first and then the custom function is called and run in a single process (the main process). After that, the",
"# filtered tokens are send back to the worker processes.",
"if",
"not",
"callable",
"(",
"filter_func",
")",
":",
"raise",
"ValueError",
"(",
"'`filter_func` must be callable'",
")",
"self",
".",
"_require_tokens",
"(",
")",
"if",
"to_ngrams",
":",
"self",
".",
"_require_ngrams",
"(",
")",
"get_task",
"=",
"'get_ngrams_with_worker_id'",
"set_task",
"=",
"'set_ngrams'",
"set_task_param",
"=",
"'ngrams'",
"self",
".",
"_invalidate_workers_ngrams",
"(",
")",
"else",
":",
"get_task",
"=",
"'get_tokens_with_worker_id'",
"set_task",
"=",
"'set_tokens'",
"set_task_param",
"=",
"'tokens'",
"self",
".",
"_invalidate_workers_tokens",
"(",
")",
"self",
".",
"_send_task_to_workers",
"(",
"get_task",
")",
"docs_of_workers",
"=",
"{",
"}",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"n_workers",
")",
":",
"pair",
"=",
"self",
".",
"results_queue",
".",
"get",
"(",
")",
"docs_of_workers",
"[",
"pair",
"[",
"0",
"]",
"]",
"=",
"pair",
"[",
"1",
"]",
"assert",
"len",
"(",
"docs_of_workers",
")",
"==",
"self",
".",
"n_workers",
"tok",
"=",
"{",
"}",
"for",
"docs",
"in",
"docs_of_workers",
".",
"values",
"(",
")",
":",
"tok",
".",
"update",
"(",
"docs",
")",
"logger",
".",
"info",
"(",
"'applying custom filter function to tokens'",
")",
"new_tok",
"=",
"filter_func",
"(",
"tok",
")",
"require_dictlike",
"(",
"new_tok",
")",
"if",
"set",
"(",
"new_tok",
".",
"keys",
"(",
")",
")",
"!=",
"set",
"(",
"tok",
".",
"keys",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'the document labels and number of documents must stay unchanged during custom filtering'",
")",
"logger",
".",
"debug",
"(",
"'sending task `%s` to all workers'",
"%",
"set_task",
")",
"for",
"w_id",
",",
"docs",
"in",
"docs_of_workers",
".",
"items",
"(",
")",
":",
"new_w_docs",
"=",
"{",
"dl",
":",
"new_tok",
".",
"pop",
"(",
"dl",
")",
"for",
"dl",
"in",
"docs",
"}",
"self",
".",
"tasks_queues",
"[",
"w_id",
"]",
".",
"put",
"(",
"(",
"set_task",
",",
"{",
"set_task_param",
":",
"new_w_docs",
"}",
")",
")",
"[",
"q",
".",
"join",
"(",
")",
"for",
"q",
"in",
"self",
".",
"tasks_queues",
"]",
"return",
"self"
] |
Apply a custom filter function `filter_func` to all tokens or ngrams (if `to_ngrams` is True).
`filter_func` must accept a single parameter: a dictionary of structure `{<doc_label>: <tokens list>}`. It
must return a dictionary with the same structure.
This function can only be run on a single process, hence it could be slow for large corpora.
|
[
"Apply",
"a",
"custom",
"filter",
"function",
"filter_func",
"to",
"all",
"tokens",
"or",
"ngrams",
"(",
"if",
"to_ngrams",
"is",
"True",
")",
".",
"filter_func",
"must",
"accept",
"a",
"single",
"parameter",
":",
"a",
"dictionary",
"of",
"structure",
"{",
"<doc_label",
">",
":",
"<tokens",
"list",
">",
"}",
".",
"It",
"must",
"return",
"a",
"dictionary",
"with",
"the",
"same",
"structure",
"."
] |
python
|
train
| 39.438596 |
Arello-Mobile/swagger2rst
|
swg2rst/utils/exampilators.py
|
https://github.com/Arello-Mobile/swagger2rst/blob/e519f70701477dcc9f0bb237ee5b8e08e848701b/swg2rst/utils/exampilators.py#L204-L253
|
def get_property_example(cls, property_, nested=None, **kw):
""" Get example for property
:param dict property_:
:param set nested:
:return: example value
"""
paths = kw.get('paths', [])
name = kw.get('name', '')
result = None
if name and paths:
paths = list(map(lambda path: '.'.join((path, name)), paths))
result, path = cls._get_custom_example(paths)
if result is not None and property_['type'] in PRIMITIVE_TYPES:
cls._example_validate(
path, result, property_['type'], property_['type_format'])
return result
if SchemaObjects.contains(property_['type']):
schema = SchemaObjects.get(property_['type'])
if result is not None:
if schema.is_array:
if not isinstance(result, list):
result = [result] * cls.EXAMPLE_ARRAY_ITEMS_COUNT
else:
if isinstance(result, list):
cls.logger.warning(
'Example type mismatch in path {}'.format(schema.ref_path))
else:
result = cls.get_example_by_schema(schema, **kw)
if (not result) and schema.nested_schemas:
for _schema_id in schema.nested_schemas:
_schema = SchemaObjects.get(_schema_id)
if _schema:
if isinstance(_schema, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema, **kw)
elif _schema.nested_schemas:
for _schema__id in _schema.nested_schemas:
_schema_ = SchemaObjects.get(_schema__id)
if isinstance(_schema_, SchemaMapWrapper):
result[_schema.name] = cls.get_example_by_schema(_schema_, **kw)
else:
result = cls.get_example_value_for_primitive_type(
property_['type'],
property_['type_properties'],
property_['type_format'],
**kw
)
return result
|
[
"def",
"get_property_example",
"(",
"cls",
",",
"property_",
",",
"nested",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"paths",
"=",
"kw",
".",
"get",
"(",
"'paths'",
",",
"[",
"]",
")",
"name",
"=",
"kw",
".",
"get",
"(",
"'name'",
",",
"''",
")",
"result",
"=",
"None",
"if",
"name",
"and",
"paths",
":",
"paths",
"=",
"list",
"(",
"map",
"(",
"lambda",
"path",
":",
"'.'",
".",
"join",
"(",
"(",
"path",
",",
"name",
")",
")",
",",
"paths",
")",
")",
"result",
",",
"path",
"=",
"cls",
".",
"_get_custom_example",
"(",
"paths",
")",
"if",
"result",
"is",
"not",
"None",
"and",
"property_",
"[",
"'type'",
"]",
"in",
"PRIMITIVE_TYPES",
":",
"cls",
".",
"_example_validate",
"(",
"path",
",",
"result",
",",
"property_",
"[",
"'type'",
"]",
",",
"property_",
"[",
"'type_format'",
"]",
")",
"return",
"result",
"if",
"SchemaObjects",
".",
"contains",
"(",
"property_",
"[",
"'type'",
"]",
")",
":",
"schema",
"=",
"SchemaObjects",
".",
"get",
"(",
"property_",
"[",
"'type'",
"]",
")",
"if",
"result",
"is",
"not",
"None",
":",
"if",
"schema",
".",
"is_array",
":",
"if",
"not",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"result",
"=",
"[",
"result",
"]",
"*",
"cls",
".",
"EXAMPLE_ARRAY_ITEMS_COUNT",
"else",
":",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"cls",
".",
"logger",
".",
"warning",
"(",
"'Example type mismatch in path {}'",
".",
"format",
"(",
"schema",
".",
"ref_path",
")",
")",
"else",
":",
"result",
"=",
"cls",
".",
"get_example_by_schema",
"(",
"schema",
",",
"*",
"*",
"kw",
")",
"if",
"(",
"not",
"result",
")",
"and",
"schema",
".",
"nested_schemas",
":",
"for",
"_schema_id",
"in",
"schema",
".",
"nested_schemas",
":",
"_schema",
"=",
"SchemaObjects",
".",
"get",
"(",
"_schema_id",
")",
"if",
"_schema",
":",
"if",
"isinstance",
"(",
"_schema",
",",
"SchemaMapWrapper",
")",
":",
"result",
"[",
"_schema",
".",
"name",
"]",
"=",
"cls",
".",
"get_example_by_schema",
"(",
"_schema",
",",
"*",
"*",
"kw",
")",
"elif",
"_schema",
".",
"nested_schemas",
":",
"for",
"_schema__id",
"in",
"_schema",
".",
"nested_schemas",
":",
"_schema_",
"=",
"SchemaObjects",
".",
"get",
"(",
"_schema__id",
")",
"if",
"isinstance",
"(",
"_schema_",
",",
"SchemaMapWrapper",
")",
":",
"result",
"[",
"_schema",
".",
"name",
"]",
"=",
"cls",
".",
"get_example_by_schema",
"(",
"_schema_",
",",
"*",
"*",
"kw",
")",
"else",
":",
"result",
"=",
"cls",
".",
"get_example_value_for_primitive_type",
"(",
"property_",
"[",
"'type'",
"]",
",",
"property_",
"[",
"'type_properties'",
"]",
",",
"property_",
"[",
"'type_format'",
"]",
",",
"*",
"*",
"kw",
")",
"return",
"result"
] |
Get example for property
:param dict property_:
:param set nested:
:return: example value
|
[
"Get",
"example",
"for",
"property"
] |
python
|
train
| 44.32 |
shi-cong/PYSTUDY
|
PYSTUDY/encryptlib.py
|
https://github.com/shi-cong/PYSTUDY/blob/c8da7128ea18ecaa5849f2066d321e70d6f97f70/PYSTUDY/encryptlib.py#L44-L57
|
def reverse_cipher(message):
"""
反转加密法
:param message: 待加密字符串
:return: 被加密字符串
"""
translated = ''
i = len(message) - 1
while i >= 0:
translated = translated + message[i]
i = i - 1
return translated
|
[
"def",
"reverse_cipher",
"(",
"message",
")",
":",
"translated",
"=",
"''",
"i",
"=",
"len",
"(",
"message",
")",
"-",
"1",
"while",
"i",
">=",
"0",
":",
"translated",
"=",
"translated",
"+",
"message",
"[",
"i",
"]",
"i",
"=",
"i",
"-",
"1",
"return",
"translated"
] |
反转加密法
:param message: 待加密字符串
:return: 被加密字符串
|
[
"反转加密法",
":",
"param",
"message",
":",
"待加密字符串",
":",
"return",
":",
"被加密字符串"
] |
python
|
train
| 17 |
nickmckay/LiPD-utilities
|
Python/lipd/lpd_noaa.py
|
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L638-L655
|
def __put_year_col_first(d):
"""
Always write year column first. Reorder dictionary so that year is first
:param dict d: data
:return dict: Reordered data
"""
if "year" in d:
D = OrderedDict()
# store the year column first
D["year"] = d["year"]
for k,v in d.items():
if k != "year":
# store the other columns
D[k] = v
return D
else:
# year is not found, return data as-is
return d
|
[
"def",
"__put_year_col_first",
"(",
"d",
")",
":",
"if",
"\"year\"",
"in",
"d",
":",
"D",
"=",
"OrderedDict",
"(",
")",
"# store the year column first",
"D",
"[",
"\"year\"",
"]",
"=",
"d",
"[",
"\"year\"",
"]",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"k",
"!=",
"\"year\"",
":",
"# store the other columns",
"D",
"[",
"k",
"]",
"=",
"v",
"return",
"D",
"else",
":",
"# year is not found, return data as-is",
"return",
"d"
] |
Always write year column first. Reorder dictionary so that year is first
:param dict d: data
:return dict: Reordered data
|
[
"Always",
"write",
"year",
"column",
"first",
".",
"Reorder",
"dictionary",
"so",
"that",
"year",
"is",
"first",
":",
"param",
"dict",
"d",
":",
"data",
":",
"return",
"dict",
":",
"Reordered",
"data"
] |
python
|
train
| 31.055556 |
thomasdelaet/python-velbus
|
velbus/module.py
|
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/module.py#L147-L158
|
def _name_messages_complete(self):
"""
Check if all name messages have been received
"""
for channel in range(1, self.number_of_channels() + 1):
try:
for name_index in range(1, 4):
if not isinstance(self._name_data[channel][name_index], str):
return False
except Exception:
return False
return True
|
[
"def",
"_name_messages_complete",
"(",
"self",
")",
":",
"for",
"channel",
"in",
"range",
"(",
"1",
",",
"self",
".",
"number_of_channels",
"(",
")",
"+",
"1",
")",
":",
"try",
":",
"for",
"name_index",
"in",
"range",
"(",
"1",
",",
"4",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_name_data",
"[",
"channel",
"]",
"[",
"name_index",
"]",
",",
"str",
")",
":",
"return",
"False",
"except",
"Exception",
":",
"return",
"False",
"return",
"True"
] |
Check if all name messages have been received
|
[
"Check",
"if",
"all",
"name",
"messages",
"have",
"been",
"received"
] |
python
|
train
| 35.583333 |
push-things/django-th
|
th_evernote/evernote_mgr.py
|
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_evernote/evernote_mgr.py#L161-L175
|
def set_note_footer(data, trigger):
"""
handle the footer of the note
"""
footer = ''
if data.get('link'):
provided_by = _('Provided by')
provided_from = _('from')
footer_from = "<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>"
footer = footer_from.format(
provided_by, trigger.trigger.description, provided_from,
data.get('link'), data.get('link'))
return footer
|
[
"def",
"set_note_footer",
"(",
"data",
",",
"trigger",
")",
":",
"footer",
"=",
"''",
"if",
"data",
".",
"get",
"(",
"'link'",
")",
":",
"provided_by",
"=",
"_",
"(",
"'Provided by'",
")",
"provided_from",
"=",
"_",
"(",
"'from'",
")",
"footer_from",
"=",
"\"<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>\"",
"footer",
"=",
"footer_from",
".",
"format",
"(",
"provided_by",
",",
"trigger",
".",
"trigger",
".",
"description",
",",
"provided_from",
",",
"data",
".",
"get",
"(",
"'link'",
")",
",",
"data",
".",
"get",
"(",
"'link'",
")",
")",
"return",
"footer"
] |
handle the footer of the note
|
[
"handle",
"the",
"footer",
"of",
"the",
"note"
] |
python
|
train
| 32.2 |
gmr/rejected
|
rejected/mcp.py
|
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/mcp.py#L443-L469
|
def poll(self):
"""Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
"""
self.set_state(self.STATE_ACTIVE)
# If we don't have any active consumers, spawn new ones
if not self.total_process_count:
LOGGER.debug('Did not find any active consumers in poll')
return self.check_process_counts()
# Start our data collection dict
self.poll_data = {'timestamp': time.time(), 'processes': list()}
# Iterate through all of the consumers
for proc in list(self.active_processes()):
if proc == multiprocessing.current_process():
continue
# Send the profile signal
os.kill(int(proc.pid), signal.SIGPROF)
self.poll_data['processes'].append(proc.name)
# Check if we need to start more processes
self.check_process_counts()
|
[
"def",
"poll",
"(",
"self",
")",
":",
"self",
".",
"set_state",
"(",
"self",
".",
"STATE_ACTIVE",
")",
"# If we don't have any active consumers, spawn new ones",
"if",
"not",
"self",
".",
"total_process_count",
":",
"LOGGER",
".",
"debug",
"(",
"'Did not find any active consumers in poll'",
")",
"return",
"self",
".",
"check_process_counts",
"(",
")",
"# Start our data collection dict",
"self",
".",
"poll_data",
"=",
"{",
"'timestamp'",
":",
"time",
".",
"time",
"(",
")",
",",
"'processes'",
":",
"list",
"(",
")",
"}",
"# Iterate through all of the consumers",
"for",
"proc",
"in",
"list",
"(",
"self",
".",
"active_processes",
"(",
")",
")",
":",
"if",
"proc",
"==",
"multiprocessing",
".",
"current_process",
"(",
")",
":",
"continue",
"# Send the profile signal",
"os",
".",
"kill",
"(",
"int",
"(",
"proc",
".",
"pid",
")",
",",
"signal",
".",
"SIGPROF",
")",
"self",
".",
"poll_data",
"[",
"'processes'",
"]",
".",
"append",
"(",
"proc",
".",
"name",
")",
"# Check if we need to start more processes",
"self",
".",
"check_process_counts",
"(",
")"
] |
Start the poll process by invoking the get_stats method of the
consumers. If we hit this after another interval without fully
processing, note it with a warning.
|
[
"Start",
"the",
"poll",
"process",
"by",
"invoking",
"the",
"get_stats",
"method",
"of",
"the",
"consumers",
".",
"If",
"we",
"hit",
"this",
"after",
"another",
"interval",
"without",
"fully",
"processing",
"note",
"it",
"with",
"a",
"warning",
"."
] |
python
|
train
| 36.62963 |
python/performance
|
performance/benchmarks/bm_nqueens.py
|
https://github.com/python/performance/blob/2a9524c0a5714e85106671bc61d750e800fe17db/performance/benchmarks/bm_nqueens.py#L9-L30
|
def permutations(iterable, r=None):
"""permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)"""
pool = tuple(iterable)
n = len(pool)
if r is None:
r = n
indices = list(range(n))
cycles = list(range(n - r + 1, n + 1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i + 1:] + indices[i:i + 1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
|
[
"def",
"permutations",
"(",
"iterable",
",",
"r",
"=",
"None",
")",
":",
"pool",
"=",
"tuple",
"(",
"iterable",
")",
"n",
"=",
"len",
"(",
"pool",
")",
"if",
"r",
"is",
"None",
":",
"r",
"=",
"n",
"indices",
"=",
"list",
"(",
"range",
"(",
"n",
")",
")",
"cycles",
"=",
"list",
"(",
"range",
"(",
"n",
"-",
"r",
"+",
"1",
",",
"n",
"+",
"1",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
"yield",
"tuple",
"(",
"pool",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"[",
":",
"r",
"]",
")",
"while",
"n",
":",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"r",
")",
")",
":",
"cycles",
"[",
"i",
"]",
"-=",
"1",
"if",
"cycles",
"[",
"i",
"]",
"==",
"0",
":",
"indices",
"[",
"i",
":",
"]",
"=",
"indices",
"[",
"i",
"+",
"1",
":",
"]",
"+",
"indices",
"[",
"i",
":",
"i",
"+",
"1",
"]",
"cycles",
"[",
"i",
"]",
"=",
"n",
"-",
"i",
"else",
":",
"j",
"=",
"cycles",
"[",
"i",
"]",
"indices",
"[",
"i",
"]",
",",
"indices",
"[",
"-",
"j",
"]",
"=",
"indices",
"[",
"-",
"j",
"]",
",",
"indices",
"[",
"i",
"]",
"yield",
"tuple",
"(",
"pool",
"[",
"i",
"]",
"for",
"i",
"in",
"indices",
"[",
":",
"r",
"]",
")",
"break",
"else",
":",
"return"
] |
permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)
|
[
"permutations",
"(",
"range",
"(",
"3",
")",
"2",
")",
"--",
">",
"(",
"0",
"1",
")",
"(",
"0",
"2",
")",
"(",
"1",
"0",
")",
"(",
"1",
"2",
")",
"(",
"2",
"0",
")",
"(",
"2",
"1",
")"
] |
python
|
test
| 32.954545 |
andrenarchy/krypy
|
krypy/utils.py
|
https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/utils.py#L612-L621
|
def operator(self):
"""Get a ``LinearOperator`` corresponding to apply().
:return: a LinearOperator that calls apply().
"""
# is projection the zero operator?
if self.V.shape[1] == 0:
N = self.V.shape[0]
return ZeroLinearOperator((N, N))
return self._get_operator(self.apply, self.apply_adj)
|
[
"def",
"operator",
"(",
"self",
")",
":",
"# is projection the zero operator?",
"if",
"self",
".",
"V",
".",
"shape",
"[",
"1",
"]",
"==",
"0",
":",
"N",
"=",
"self",
".",
"V",
".",
"shape",
"[",
"0",
"]",
"return",
"ZeroLinearOperator",
"(",
"(",
"N",
",",
"N",
")",
")",
"return",
"self",
".",
"_get_operator",
"(",
"self",
".",
"apply",
",",
"self",
".",
"apply_adj",
")"
] |
Get a ``LinearOperator`` corresponding to apply().
:return: a LinearOperator that calls apply().
|
[
"Get",
"a",
"LinearOperator",
"corresponding",
"to",
"apply",
"()",
"."
] |
python
|
train
| 35.5 |
chrislit/abydos
|
abydos/phonetic/_beider_morse.py
|
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/phonetic/_beider_morse.py#L936-L1025
|
def bmpm(
word,
language_arg=0,
name_mode='gen',
match_mode='approx',
concat=False,
filter_langs=False,
):
"""Return the Beider-Morse Phonetic Matching encoding(s) of a term.
This is a wrapper for :py:meth:`BeiderMorse.encode`.
Parameters
----------
word : str
The word to transform
language_arg : str
The language of the term; supported values include:
- ``any``
- ``arabic``
- ``cyrillic``
- ``czech``
- ``dutch``
- ``english``
- ``french``
- ``german``
- ``greek``
- ``greeklatin``
- ``hebrew``
- ``hungarian``
- ``italian``
- ``latvian``
- ``polish``
- ``portuguese``
- ``romanian``
- ``russian``
- ``spanish``
- ``turkish``
name_mode : str
The name mode of the algorithm:
- ``gen`` -- general (default)
- ``ash`` -- Ashkenazi
- ``sep`` -- Sephardic
match_mode : str
Matching mode: ``approx`` or ``exact``
concat : bool
Concatenation mode
filter_langs : bool
Filter out incompatible languages
Returns
-------
tuple
The Beider-Morse phonetic value(s)
Examples
--------
>>> bmpm('Christopher')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir xristopi xritopir xritopi xristofi xritofir xritofi
tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir
zristofi zritofir zritofi'
>>> bmpm('Niall')
'nial niol'
>>> bmpm('Smith')
'zmit'
>>> bmpm('Schmidt')
'zmit stzmit'
>>> bmpm('Christopher', language_arg='German')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='English')
'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir
xristafir xrQstafir'
>>> bmpm('Christopher', language_arg='German', name_mode='ash')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='German', match_mode='exact')
'xriStopher xriStofer xristopher xristofer'
"""
return BeiderMorse().encode(
word, language_arg, name_mode, match_mode, concat, filter_langs
)
|
[
"def",
"bmpm",
"(",
"word",
",",
"language_arg",
"=",
"0",
",",
"name_mode",
"=",
"'gen'",
",",
"match_mode",
"=",
"'approx'",
",",
"concat",
"=",
"False",
",",
"filter_langs",
"=",
"False",
",",
")",
":",
"return",
"BeiderMorse",
"(",
")",
".",
"encode",
"(",
"word",
",",
"language_arg",
",",
"name_mode",
",",
"match_mode",
",",
"concat",
",",
"filter_langs",
")"
] |
Return the Beider-Morse Phonetic Matching encoding(s) of a term.
This is a wrapper for :py:meth:`BeiderMorse.encode`.
Parameters
----------
word : str
The word to transform
language_arg : str
The language of the term; supported values include:
- ``any``
- ``arabic``
- ``cyrillic``
- ``czech``
- ``dutch``
- ``english``
- ``french``
- ``german``
- ``greek``
- ``greeklatin``
- ``hebrew``
- ``hungarian``
- ``italian``
- ``latvian``
- ``polish``
- ``portuguese``
- ``romanian``
- ``russian``
- ``spanish``
- ``turkish``
name_mode : str
The name mode of the algorithm:
- ``gen`` -- general (default)
- ``ash`` -- Ashkenazi
- ``sep`` -- Sephardic
match_mode : str
Matching mode: ``approx`` or ``exact``
concat : bool
Concatenation mode
filter_langs : bool
Filter out incompatible languages
Returns
-------
tuple
The Beider-Morse phonetic value(s)
Examples
--------
>>> bmpm('Christopher')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir xristopi xritopir xritopi xristofi xritofir xritofi
tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir
zristofi zritofir zritofi'
>>> bmpm('Niall')
'nial niol'
>>> bmpm('Smith')
'zmit'
>>> bmpm('Schmidt')
'zmit stzmit'
>>> bmpm('Christopher', language_arg='German')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='English')
'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir
xristafir xrQstafir'
>>> bmpm('Christopher', language_arg='German', name_mode='ash')
'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir
xristYfir'
>>> bmpm('Christopher', language_arg='German', match_mode='exact')
'xriStopher xriStofer xristopher xristofer'
|
[
"Return",
"the",
"Beider",
"-",
"Morse",
"Phonetic",
"Matching",
"encoding",
"(",
"s",
")",
"of",
"a",
"term",
"."
] |
python
|
valid
| 26.411111 |
johnbywater/eventsourcing
|
eventsourcing/contrib/paxos/composable.py
|
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L360-L408
|
def receive_accepted(self, msg):
'''
Called when an Accepted message is received from an acceptor. Once the final value
is determined, the return value of this method will be a Resolution message containing
the consentual value. Subsequent calls after the resolution is chosen will continue to add
new Acceptors to the final_acceptors set and return Resolution messages.
'''
if self.final_value is not None:
if msg.proposal_id >= self.final_proposal_id and msg.proposal_value == self.final_value:
self.final_acceptors.add(msg.from_uid)
return Resolution(self.network_uid, self.final_value)
last_pn = self.acceptors.get(msg.from_uid)
if last_pn is not None and msg.proposal_id <= last_pn:
return # Old message
self.acceptors[msg.from_uid] = msg.proposal_id
if last_pn is not None:
# String proposal_key, need string keys for JSON.
proposal_key = str(last_pn)
ps = self.proposals[proposal_key]
ps.retain_count -= 1
ps.acceptors.remove(msg.from_uid)
if ps.retain_count == 0:
del self.proposals[proposal_key]
# String proposal_key, need string keys for JSON.
proposal_key = str(msg.proposal_id)
if not proposal_key in self.proposals:
self.proposals[proposal_key] = ProposalStatus(msg.proposal_value)
ps = self.proposals[proposal_key]
assert msg.proposal_value == ps.value, 'Value mismatch for single proposal!'
ps.accept_count += 1
ps.retain_count += 1
ps.acceptors.add(msg.from_uid)
if ps.accept_count == self.quorum_size:
self.final_proposal_id = msg.proposal_id
self.final_value = msg.proposal_value
self.final_acceptors = ps.acceptors
self.proposals = None
self.acceptors = None
return Resolution(self.network_uid, self.final_value)
|
[
"def",
"receive_accepted",
"(",
"self",
",",
"msg",
")",
":",
"if",
"self",
".",
"final_value",
"is",
"not",
"None",
":",
"if",
"msg",
".",
"proposal_id",
">=",
"self",
".",
"final_proposal_id",
"and",
"msg",
".",
"proposal_value",
"==",
"self",
".",
"final_value",
":",
"self",
".",
"final_acceptors",
".",
"add",
"(",
"msg",
".",
"from_uid",
")",
"return",
"Resolution",
"(",
"self",
".",
"network_uid",
",",
"self",
".",
"final_value",
")",
"last_pn",
"=",
"self",
".",
"acceptors",
".",
"get",
"(",
"msg",
".",
"from_uid",
")",
"if",
"last_pn",
"is",
"not",
"None",
"and",
"msg",
".",
"proposal_id",
"<=",
"last_pn",
":",
"return",
"# Old message",
"self",
".",
"acceptors",
"[",
"msg",
".",
"from_uid",
"]",
"=",
"msg",
".",
"proposal_id",
"if",
"last_pn",
"is",
"not",
"None",
":",
"# String proposal_key, need string keys for JSON.",
"proposal_key",
"=",
"str",
"(",
"last_pn",
")",
"ps",
"=",
"self",
".",
"proposals",
"[",
"proposal_key",
"]",
"ps",
".",
"retain_count",
"-=",
"1",
"ps",
".",
"acceptors",
".",
"remove",
"(",
"msg",
".",
"from_uid",
")",
"if",
"ps",
".",
"retain_count",
"==",
"0",
":",
"del",
"self",
".",
"proposals",
"[",
"proposal_key",
"]",
"# String proposal_key, need string keys for JSON.",
"proposal_key",
"=",
"str",
"(",
"msg",
".",
"proposal_id",
")",
"if",
"not",
"proposal_key",
"in",
"self",
".",
"proposals",
":",
"self",
".",
"proposals",
"[",
"proposal_key",
"]",
"=",
"ProposalStatus",
"(",
"msg",
".",
"proposal_value",
")",
"ps",
"=",
"self",
".",
"proposals",
"[",
"proposal_key",
"]",
"assert",
"msg",
".",
"proposal_value",
"==",
"ps",
".",
"value",
",",
"'Value mismatch for single proposal!'",
"ps",
".",
"accept_count",
"+=",
"1",
"ps",
".",
"retain_count",
"+=",
"1",
"ps",
".",
"acceptors",
".",
"add",
"(",
"msg",
".",
"from_uid",
")",
"if",
"ps",
".",
"accept_count",
"==",
"self",
".",
"quorum_size",
":",
"self",
".",
"final_proposal_id",
"=",
"msg",
".",
"proposal_id",
"self",
".",
"final_value",
"=",
"msg",
".",
"proposal_value",
"self",
".",
"final_acceptors",
"=",
"ps",
".",
"acceptors",
"self",
".",
"proposals",
"=",
"None",
"self",
".",
"acceptors",
"=",
"None",
"return",
"Resolution",
"(",
"self",
".",
"network_uid",
",",
"self",
".",
"final_value",
")"
] |
Called when an Accepted message is received from an acceptor. Once the final value
is determined, the return value of this method will be a Resolution message containing
the consentual value. Subsequent calls after the resolution is chosen will continue to add
new Acceptors to the final_acceptors set and return Resolution messages.
|
[
"Called",
"when",
"an",
"Accepted",
"message",
"is",
"received",
"from",
"an",
"acceptor",
".",
"Once",
"the",
"final",
"value",
"is",
"determined",
"the",
"return",
"value",
"of",
"this",
"method",
"will",
"be",
"a",
"Resolution",
"message",
"containing",
"the",
"consentual",
"value",
".",
"Subsequent",
"calls",
"after",
"the",
"resolution",
"is",
"chosen",
"will",
"continue",
"to",
"add",
"new",
"Acceptors",
"to",
"the",
"final_acceptors",
"set",
"and",
"return",
"Resolution",
"messages",
"."
] |
python
|
train
| 40.387755 |
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/parallel/client/client.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/client/client.py#L797-L810
|
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s"%msg.msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
|
[
"def",
"_flush_notifications",
"(",
"self",
")",
":",
"idents",
",",
"msg",
"=",
"self",
".",
"session",
".",
"recv",
"(",
"self",
".",
"_notification_socket",
",",
"mode",
"=",
"zmq",
".",
"NOBLOCK",
")",
"while",
"msg",
"is",
"not",
"None",
":",
"if",
"self",
".",
"debug",
":",
"pprint",
"(",
"msg",
")",
"msg_type",
"=",
"msg",
"[",
"'header'",
"]",
"[",
"'msg_type'",
"]",
"handler",
"=",
"self",
".",
"_notification_handlers",
".",
"get",
"(",
"msg_type",
",",
"None",
")",
"if",
"handler",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Unhandled message type: %s\"",
"%",
"msg",
".",
"msg_type",
")",
"else",
":",
"handler",
"(",
"msg",
")",
"idents",
",",
"msg",
"=",
"self",
".",
"session",
".",
"recv",
"(",
"self",
".",
"_notification_socket",
",",
"mode",
"=",
"zmq",
".",
"NOBLOCK",
")"
] |
Flush notifications of engine registrations waiting
in ZMQ queue.
|
[
"Flush",
"notifications",
"of",
"engine",
"registrations",
"waiting",
"in",
"ZMQ",
"queue",
"."
] |
python
|
test
| 45.5 |
nornir-automation/nornir
|
nornir/core/task.py
|
https://github.com/nornir-automation/nornir/blob/3425c47fd870db896cb80f619bae23bd98d50c74/nornir/core/task.py#L216-L218
|
def failed_hosts(self):
"""Hosts that failed during the execution of the task."""
return {h: r for h, r in self.items() if r.failed}
|
[
"def",
"failed_hosts",
"(",
"self",
")",
":",
"return",
"{",
"h",
":",
"r",
"for",
"h",
",",
"r",
"in",
"self",
".",
"items",
"(",
")",
"if",
"r",
".",
"failed",
"}"
] |
Hosts that failed during the execution of the task.
|
[
"Hosts",
"that",
"failed",
"during",
"the",
"execution",
"of",
"the",
"task",
"."
] |
python
|
train
| 48.666667 |
royi1000/py-libhdate
|
hdate/date.py
|
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/date.py#L130-L134
|
def _jdn(self):
"""Return the Julian date number for the given date."""
if self._last_updated == "gdate":
return conv.gdate_to_jdn(self.gdate)
return conv.hdate_to_jdn(self.hdate)
|
[
"def",
"_jdn",
"(",
"self",
")",
":",
"if",
"self",
".",
"_last_updated",
"==",
"\"gdate\"",
":",
"return",
"conv",
".",
"gdate_to_jdn",
"(",
"self",
".",
"gdate",
")",
"return",
"conv",
".",
"hdate_to_jdn",
"(",
"self",
".",
"hdate",
")"
] |
Return the Julian date number for the given date.
|
[
"Return",
"the",
"Julian",
"date",
"number",
"for",
"the",
"given",
"date",
"."
] |
python
|
train
| 42.2 |
sckott/pygbif
|
pygbif/occurrences/download.py
|
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L18-L29
|
def _check_environ(variable, value):
"""check if a variable is present in the environmental variables"""
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
""" not supplied and no entry in environmental
variables"""]))
else:
return value
|
[
"def",
"_check_environ",
"(",
"variable",
",",
"value",
")",
":",
"if",
"is_not_none",
"(",
"value",
")",
":",
"return",
"value",
"else",
":",
"value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"variable",
")",
"if",
"is_none",
"(",
"value",
")",
":",
"stop",
"(",
"''",
".",
"join",
"(",
"[",
"variable",
",",
"\"\"\" not supplied and no entry in environmental\n variables\"\"\"",
"]",
")",
")",
"else",
":",
"return",
"value"
] |
check if a variable is present in the environmental variables
|
[
"check",
"if",
"a",
"variable",
"is",
"present",
"in",
"the",
"environmental",
"variables"
] |
python
|
train
| 34.5 |
kevinconway/iface
|
iface/checks.py
|
https://github.com/kevinconway/iface/blob/2687f7965eed155b9594a298ffa260a2f9f821f9/iface/checks.py#L9-L25
|
def _ensure_ifaces_tuple(ifaces):
"""Convert to a tuple of interfaces and raise if not interfaces."""
try:
ifaces = tuple(ifaces)
except TypeError:
ifaces = (ifaces,)
for iface in ifaces:
if not _issubclass(iface, ibc.Iface):
raise TypeError('Can only compare against interfaces.')
return ifaces
|
[
"def",
"_ensure_ifaces_tuple",
"(",
"ifaces",
")",
":",
"try",
":",
"ifaces",
"=",
"tuple",
"(",
"ifaces",
")",
"except",
"TypeError",
":",
"ifaces",
"=",
"(",
"ifaces",
",",
")",
"for",
"iface",
"in",
"ifaces",
":",
"if",
"not",
"_issubclass",
"(",
"iface",
",",
"ibc",
".",
"Iface",
")",
":",
"raise",
"TypeError",
"(",
"'Can only compare against interfaces.'",
")",
"return",
"ifaces"
] |
Convert to a tuple of interfaces and raise if not interfaces.
|
[
"Convert",
"to",
"a",
"tuple",
"of",
"interfaces",
"and",
"raise",
"if",
"not",
"interfaces",
"."
] |
python
|
train
| 20.117647 |
PredixDev/predixpy
|
predix/security/uaa.py
|
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/security/uaa.py#L226-L239
|
def logout(self):
"""
Log currently authenticated user out, invalidating any existing tokens.
"""
# Remove token from local cache
# MAINT: need to expire token on server
data = self._read_uaa_cache()
if self.uri in data:
for client in data[self.uri]:
if client['id'] == self.client['id']:
data[self.uri].remove(client)
with open(self._cache_path, 'w') as output:
output.write(json.dumps(data, sort_keys=True, indent=4))
|
[
"def",
"logout",
"(",
"self",
")",
":",
"# Remove token from local cache",
"# MAINT: need to expire token on server",
"data",
"=",
"self",
".",
"_read_uaa_cache",
"(",
")",
"if",
"self",
".",
"uri",
"in",
"data",
":",
"for",
"client",
"in",
"data",
"[",
"self",
".",
"uri",
"]",
":",
"if",
"client",
"[",
"'id'",
"]",
"==",
"self",
".",
"client",
"[",
"'id'",
"]",
":",
"data",
"[",
"self",
".",
"uri",
"]",
".",
"remove",
"(",
"client",
")",
"with",
"open",
"(",
"self",
".",
"_cache_path",
",",
"'w'",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"data",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")",
")"
] |
Log currently authenticated user out, invalidating any existing tokens.
|
[
"Log",
"currently",
"authenticated",
"user",
"out",
"invalidating",
"any",
"existing",
"tokens",
"."
] |
python
|
train
| 37.928571 |
manns/pyspread
|
pyspread/src/lib/xrect.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/xrect.py#L47-L57
|
def is_bbox_not_intersecting(self, other):
"""Returns False iif bounding boxed of self and other intersect"""
self_x_min, self_x_max, self_y_min, self_y_max = self.get_bbox()
other_x_min, other_x_max, other_y_min, other_y_max = other.get_bbox()
return \
self_x_min > other_x_max or \
other_x_min > self_x_max or \
self_y_min > other_y_max or \
other_y_min > self_y_max
|
[
"def",
"is_bbox_not_intersecting",
"(",
"self",
",",
"other",
")",
":",
"self_x_min",
",",
"self_x_max",
",",
"self_y_min",
",",
"self_y_max",
"=",
"self",
".",
"get_bbox",
"(",
")",
"other_x_min",
",",
"other_x_max",
",",
"other_y_min",
",",
"other_y_max",
"=",
"other",
".",
"get_bbox",
"(",
")",
"return",
"self_x_min",
">",
"other_x_max",
"or",
"other_x_min",
">",
"self_x_max",
"or",
"self_y_min",
">",
"other_y_max",
"or",
"other_y_min",
">",
"self_y_max"
] |
Returns False iif bounding boxed of self and other intersect
|
[
"Returns",
"False",
"iif",
"bounding",
"boxed",
"of",
"self",
"and",
"other",
"intersect"
] |
python
|
train
| 40 |
dvdotsenko/jsonrpc.py
|
jsonrpcparts/serializers.py
|
https://github.com/dvdotsenko/jsonrpc.py/blob/19673edd77a9518ac5655bd407f6b93ffbb2cafc/jsonrpcparts/serializers.py#L440-L467
|
def parse_request(cls, request_string):
"""JSONRPC allows for **batch** requests to be communicated
as array of dicts. This method parses out each individual
element in the batch and returns a list of tuples, each
tuple a result of parsing of each item in the batch.
:Returns: | tuple of (results, is_batch_mode_flag)
| where:
| - results is a tuple describing the request
| - Is_batch_mode_flag is a Bool indicating if the
| request came in in batch mode (as array of requests) or not.
:Raises: RPCParseError, RPCInvalidRequest
"""
try:
batch = cls.json_loads(request_string)
except ValueError as err:
raise errors.RPCParseError("No valid JSON. (%s)" % str(err))
if isinstance(batch, (list, tuple)) and batch:
# batch is true batch.
# list of parsed request objects, is_batch_mode_flag
return [cls._parse_single_request_trap_errors(request) for request in batch], True
elif isinstance(batch, dict):
# `batch` is actually single request object
return [cls._parse_single_request_trap_errors(batch)], False
raise errors.RPCInvalidRequest("Neither a batch array nor a single request object found in the request.")
|
[
"def",
"parse_request",
"(",
"cls",
",",
"request_string",
")",
":",
"try",
":",
"batch",
"=",
"cls",
".",
"json_loads",
"(",
"request_string",
")",
"except",
"ValueError",
"as",
"err",
":",
"raise",
"errors",
".",
"RPCParseError",
"(",
"\"No valid JSON. (%s)\"",
"%",
"str",
"(",
"err",
")",
")",
"if",
"isinstance",
"(",
"batch",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"batch",
":",
"# batch is true batch.",
"# list of parsed request objects, is_batch_mode_flag",
"return",
"[",
"cls",
".",
"_parse_single_request_trap_errors",
"(",
"request",
")",
"for",
"request",
"in",
"batch",
"]",
",",
"True",
"elif",
"isinstance",
"(",
"batch",
",",
"dict",
")",
":",
"# `batch` is actually single request object",
"return",
"[",
"cls",
".",
"_parse_single_request_trap_errors",
"(",
"batch",
")",
"]",
",",
"False",
"raise",
"errors",
".",
"RPCInvalidRequest",
"(",
"\"Neither a batch array nor a single request object found in the request.\"",
")"
] |
JSONRPC allows for **batch** requests to be communicated
as array of dicts. This method parses out each individual
element in the batch and returns a list of tuples, each
tuple a result of parsing of each item in the batch.
:Returns: | tuple of (results, is_batch_mode_flag)
| where:
| - results is a tuple describing the request
| - Is_batch_mode_flag is a Bool indicating if the
| request came in in batch mode (as array of requests) or not.
:Raises: RPCParseError, RPCInvalidRequest
|
[
"JSONRPC",
"allows",
"for",
"**",
"batch",
"**",
"requests",
"to",
"be",
"communicated",
"as",
"array",
"of",
"dicts",
".",
"This",
"method",
"parses",
"out",
"each",
"individual",
"element",
"in",
"the",
"batch",
"and",
"returns",
"a",
"list",
"of",
"tuples",
"each",
"tuple",
"a",
"result",
"of",
"parsing",
"of",
"each",
"item",
"in",
"the",
"batch",
"."
] |
python
|
train
| 48.357143 |
hozn/coilmq
|
coilmq/auth/simple.py
|
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/auth/simple.py#L29-L42
|
def make_simple():
"""
Create a L{SimpleAuthenticator} instance using values read from coilmq configuration.
@return: The configured L{SimpleAuthenticator}
@rtype: L{SimpleAuthenticator}
@raise ConfigError: If there is a configuration error.
"""
authfile = config.get('coilmq', 'auth.simple.file')
if not authfile:
raise ConfigError('Missing configuration parameter: auth.simple.file')
sa = SimpleAuthenticator()
sa.from_configfile(authfile)
return sa
|
[
"def",
"make_simple",
"(",
")",
":",
"authfile",
"=",
"config",
".",
"get",
"(",
"'coilmq'",
",",
"'auth.simple.file'",
")",
"if",
"not",
"authfile",
":",
"raise",
"ConfigError",
"(",
"'Missing configuration parameter: auth.simple.file'",
")",
"sa",
"=",
"SimpleAuthenticator",
"(",
")",
"sa",
".",
"from_configfile",
"(",
"authfile",
")",
"return",
"sa"
] |
Create a L{SimpleAuthenticator} instance using values read from coilmq configuration.
@return: The configured L{SimpleAuthenticator}
@rtype: L{SimpleAuthenticator}
@raise ConfigError: If there is a configuration error.
|
[
"Create",
"a",
"L",
"{",
"SimpleAuthenticator",
"}",
"instance",
"using",
"values",
"read",
"from",
"coilmq",
"configuration",
"."
] |
python
|
train
| 35.071429 |
WhyNotHugo/django-renderpdf
|
django_renderpdf/views.py
|
https://github.com/WhyNotHugo/django-renderpdf/blob/56de11326e61d317b5eb08c340790ef9955778e3/django_renderpdf/views.py#L88-L108
|
def render(self, request, template, context):
"""
Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML.
"""
if self.allow_force_html and self.request.GET.get('html', False):
html = get_template(template).render(context)
return HttpResponse(html)
else:
response = HttpResponse(content_type='application/pdf')
if self.prompt_download:
response['Content-Disposition'] = 'attachment; filename="{}"' \
.format(self.get_download_name())
helpers.render_pdf(
template=template,
file_=response,
url_fetcher=self.url_fetcher,
context=context,
)
return response
|
[
"def",
"render",
"(",
"self",
",",
"request",
",",
"template",
",",
"context",
")",
":",
"if",
"self",
".",
"allow_force_html",
"and",
"self",
".",
"request",
".",
"GET",
".",
"get",
"(",
"'html'",
",",
"False",
")",
":",
"html",
"=",
"get_template",
"(",
"template",
")",
".",
"render",
"(",
"context",
")",
"return",
"HttpResponse",
"(",
"html",
")",
"else",
":",
"response",
"=",
"HttpResponse",
"(",
"content_type",
"=",
"'application/pdf'",
")",
"if",
"self",
".",
"prompt_download",
":",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename=\"{}\"'",
".",
"format",
"(",
"self",
".",
"get_download_name",
"(",
")",
")",
"helpers",
".",
"render_pdf",
"(",
"template",
"=",
"template",
",",
"file_",
"=",
"response",
",",
"url_fetcher",
"=",
"self",
".",
"url_fetcher",
",",
"context",
"=",
"context",
",",
")",
"return",
"response"
] |
Returns a response. By default, this will contain the rendered PDF, but
if both ``allow_force_html`` is ``True`` and the querystring
``html=true`` was set it will return a plain HTML.
|
[
"Returns",
"a",
"response",
".",
"By",
"default",
"this",
"will",
"contain",
"the",
"rendered",
"PDF",
"but",
"if",
"both",
"allow_force_html",
"is",
"True",
"and",
"the",
"querystring",
"html",
"=",
"true",
"was",
"set",
"it",
"will",
"return",
"a",
"plain",
"HTML",
"."
] |
python
|
train
| 42.857143 |
spyder-ide/spyder
|
spyder/app/mainwindow.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2741-L2755
|
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
|
[
"def",
"open_file",
"(",
"self",
",",
"fname",
",",
"external",
"=",
"False",
")",
":",
"fname",
"=",
"to_text_string",
"(",
"fname",
")",
"ext",
"=",
"osp",
".",
"splitext",
"(",
"fname",
")",
"[",
"1",
"]",
"if",
"encoding",
".",
"is_text_file",
"(",
"fname",
")",
":",
"self",
".",
"editor",
".",
"load",
"(",
"fname",
")",
"elif",
"self",
".",
"variableexplorer",
"is",
"not",
"None",
"and",
"ext",
"in",
"IMPORT_EXT",
":",
"self",
".",
"variableexplorer",
".",
"import_data",
"(",
"fname",
")",
"elif",
"not",
"external",
":",
"fname",
"=",
"file_uri",
"(",
"fname",
")",
"programs",
".",
"start_file",
"(",
"fname",
")"
] |
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
|
[
"Open",
"filename",
"with",
"the",
"appropriate",
"application",
"Redirect",
"to",
"the",
"right",
"widget",
"(",
"txt",
"-",
">",
"editor",
"spydata",
"-",
">",
"workspace",
"...",
")",
"or",
"open",
"file",
"outside",
"Spyder",
"(",
"if",
"extension",
"is",
"not",
"supported",
")"
] |
python
|
train
| 43.133333 |
ladybug-tools/ladybug
|
ladybug/designday.py
|
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L194-L199
|
def to_json(self):
"""Convert the Design Day to a dictionary."""
return {
'location': self.location.to_json(),
'design_days': [des_d.to_json() for des_d in self.design_days]
}
|
[
"def",
"to_json",
"(",
"self",
")",
":",
"return",
"{",
"'location'",
":",
"self",
".",
"location",
".",
"to_json",
"(",
")",
",",
"'design_days'",
":",
"[",
"des_d",
".",
"to_json",
"(",
")",
"for",
"des_d",
"in",
"self",
".",
"design_days",
"]",
"}"
] |
Convert the Design Day to a dictionary.
|
[
"Convert",
"the",
"Design",
"Day",
"to",
"a",
"dictionary",
"."
] |
python
|
train
| 36.333333 |
psss/did
|
did/plugins/trello.py
|
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/trello.py#L114-L125
|
def board_links_to_ids(self):
""" Convert board links to ids """
resp = self.stats.session.open(
"{0}/members/{1}/boards?{2}".format(
self.stats.url, self.username, urllib.urlencode({
"key": self.key,
"token": self.token,
"fields": "shortLink"})))
boards = json.loads(resp.read())
return [board['id'] for board in boards if self.board_links == [""]
or board['shortLink'] in self.board_links]
|
[
"def",
"board_links_to_ids",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"stats",
".",
"session",
".",
"open",
"(",
"\"{0}/members/{1}/boards?{2}\"",
".",
"format",
"(",
"self",
".",
"stats",
".",
"url",
",",
"self",
".",
"username",
",",
"urllib",
".",
"urlencode",
"(",
"{",
"\"key\"",
":",
"self",
".",
"key",
",",
"\"token\"",
":",
"self",
".",
"token",
",",
"\"fields\"",
":",
"\"shortLink\"",
"}",
")",
")",
")",
"boards",
"=",
"json",
".",
"loads",
"(",
"resp",
".",
"read",
"(",
")",
")",
"return",
"[",
"board",
"[",
"'id'",
"]",
"for",
"board",
"in",
"boards",
"if",
"self",
".",
"board_links",
"==",
"[",
"\"\"",
"]",
"or",
"board",
"[",
"'shortLink'",
"]",
"in",
"self",
".",
"board_links",
"]"
] |
Convert board links to ids
|
[
"Convert",
"board",
"links",
"to",
"ids"
] |
python
|
train
| 43.083333 |
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py#L149-L161
|
def police_priority_map_exceed_map_pri1_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri1_exceed = ET.SubElement(exceed, "map-pri1-exceed")
map_pri1_exceed.text = kwargs.pop('map_pri1_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"police_priority_map_exceed_map_pri1_exceed",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"police_priority_map",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"police-priority-map\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-policer\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"police_priority_map",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"exceed",
"=",
"ET",
".",
"SubElement",
"(",
"police_priority_map",
",",
"\"exceed\"",
")",
"map_pri1_exceed",
"=",
"ET",
".",
"SubElement",
"(",
"exceed",
",",
"\"map-pri1-exceed\"",
")",
"map_pri1_exceed",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'map_pri1_exceed'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 49.153846 |
rstoneback/pysatMagVect
|
pysatMagVect/satellite.py
|
https://github.com/rstoneback/pysatMagVect/blob/3fdc87ffbe05be58123f80f880d1237c2f34c7be/pysatMagVect/satellite.py#L171-L342
|
def add_mag_drift_unit_vectors(inst, max_steps=40000, step_size=10.):
"""Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
"""
# vectors are returned in geo/ecef coordinate system
add_mag_drift_unit_vectors_ecef(inst, max_steps=max_steps, step_size=step_size)
# convert them to S/C using transformation supplied by OA
inst['unit_zon_x'], inst['unit_zon_y'], inst['unit_zon_z'] = project_ecef_vector_onto_basis(inst['unit_zon_ecef_x'], inst['unit_zon_ecef_y'], inst['unit_zon_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_fa_x'], inst['unit_fa_y'], inst['unit_fa_z'] = project_ecef_vector_onto_basis(inst['unit_fa_ecef_x'], inst['unit_fa_ecef_y'], inst['unit_fa_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst['unit_mer_x'], inst['unit_mer_y'], inst['unit_mer_z'] = project_ecef_vector_onto_basis(inst['unit_mer_ecef_x'], inst['unit_mer_ecef_y'], inst['unit_mer_ecef_z'],
inst['sc_xhat_x'], inst['sc_xhat_y'], inst['sc_xhat_z'],
inst['sc_yhat_x'], inst['sc_yhat_y'], inst['sc_yhat_z'],
inst['sc_zhat_x'], inst['sc_zhat_y'], inst['sc_zhat_z'])
inst.meta['unit_zon_x'] = { 'long_name':'Zonal direction along IVM-x',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-X component',
'axis': 'Zonal Unit Vector: IVM-X component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_y'] = {'long_name':'Zonal direction along IVM-y',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Y component',
'axis': 'Zonal Unit Vector: IVM-Y component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_zon_z'] = {'long_name':'Zonal direction along IVM-z',
'desc': 'Unit vector for the zonal geomagnetic direction.',
'label': 'Zonal Unit Vector: IVM-Z component',
'axis': 'Zonal Unit Vector: IVM-Z component',
'notes': ('Positive towards the east. Zonal vector is normal to magnetic meridian plane. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_x'] = {'long_name':'Field-aligned direction along IVM-x',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-X component',
'axis': 'Field Aligned Unit Vector: IVM-X component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_y'] = {'long_name':'Field-aligned direction along IVM-y',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Y component',
'axis': 'Field Aligned Unit Vector: IVM-Y component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_fa_z'] = {'long_name':'Field-aligned direction along IVM-z',
'desc': 'Unit vector for the geomagnetic field line direction.',
'label': 'Field Aligned Unit Vector: IVM-Z component',
'axis': 'Field Aligned Unit Vector: IVM-Z component',
'notes': ('Positive along the field, generally northward. Unit vector is along the geomagnetic field. '
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_x'] = {'long_name':'Meridional direction along IVM-x',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-X component',
'axis': 'Meridional Unit Vector: IVM-X component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_y'] = {'long_name':'Meridional direction along IVM-y',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Y component',
'axis': 'Meridional Unit Vector: IVM-Y component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
inst.meta['unit_mer_z'] = {'long_name':'Meridional direction along IVM-z',
'desc': 'Unit vector for the geomagnetic meridional direction.',
'label': 'Meridional Unit Vector: IVM-Z component',
'axis': 'Meridional Unit Vector: IVM-Z component',
'notes': ('Positive is aligned with vertical at '
'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '
'and in the plane of the meridian.'
'The unit vector is expressed in the IVM coordinate system, x - along RAM, '
'z - towards nadir, y - completes the system, generally southward. '
'Calculated using the corresponding unit vector in ECEF and the orientation '
'of the IVM also expressed in ECEF (sc_*hat_*).'),
'scale': 'linear',
'units': '',
'value_min':-1.,
'value_max':1}
return
|
[
"def",
"add_mag_drift_unit_vectors",
"(",
"inst",
",",
"max_steps",
"=",
"40000",
",",
"step_size",
"=",
"10.",
")",
":",
"# vectors are returned in geo/ecef coordinate system",
"add_mag_drift_unit_vectors_ecef",
"(",
"inst",
",",
"max_steps",
"=",
"max_steps",
",",
"step_size",
"=",
"step_size",
")",
"# convert them to S/C using transformation supplied by OA",
"inst",
"[",
"'unit_zon_x'",
"]",
",",
"inst",
"[",
"'unit_zon_y'",
"]",
",",
"inst",
"[",
"'unit_zon_z'",
"]",
"=",
"project_ecef_vector_onto_basis",
"(",
"inst",
"[",
"'unit_zon_ecef_x'",
"]",
",",
"inst",
"[",
"'unit_zon_ecef_y'",
"]",
",",
"inst",
"[",
"'unit_zon_ecef_z'",
"]",
",",
"inst",
"[",
"'sc_xhat_x'",
"]",
",",
"inst",
"[",
"'sc_xhat_y'",
"]",
",",
"inst",
"[",
"'sc_xhat_z'",
"]",
",",
"inst",
"[",
"'sc_yhat_x'",
"]",
",",
"inst",
"[",
"'sc_yhat_y'",
"]",
",",
"inst",
"[",
"'sc_yhat_z'",
"]",
",",
"inst",
"[",
"'sc_zhat_x'",
"]",
",",
"inst",
"[",
"'sc_zhat_y'",
"]",
",",
"inst",
"[",
"'sc_zhat_z'",
"]",
")",
"inst",
"[",
"'unit_fa_x'",
"]",
",",
"inst",
"[",
"'unit_fa_y'",
"]",
",",
"inst",
"[",
"'unit_fa_z'",
"]",
"=",
"project_ecef_vector_onto_basis",
"(",
"inst",
"[",
"'unit_fa_ecef_x'",
"]",
",",
"inst",
"[",
"'unit_fa_ecef_y'",
"]",
",",
"inst",
"[",
"'unit_fa_ecef_z'",
"]",
",",
"inst",
"[",
"'sc_xhat_x'",
"]",
",",
"inst",
"[",
"'sc_xhat_y'",
"]",
",",
"inst",
"[",
"'sc_xhat_z'",
"]",
",",
"inst",
"[",
"'sc_yhat_x'",
"]",
",",
"inst",
"[",
"'sc_yhat_y'",
"]",
",",
"inst",
"[",
"'sc_yhat_z'",
"]",
",",
"inst",
"[",
"'sc_zhat_x'",
"]",
",",
"inst",
"[",
"'sc_zhat_y'",
"]",
",",
"inst",
"[",
"'sc_zhat_z'",
"]",
")",
"inst",
"[",
"'unit_mer_x'",
"]",
",",
"inst",
"[",
"'unit_mer_y'",
"]",
",",
"inst",
"[",
"'unit_mer_z'",
"]",
"=",
"project_ecef_vector_onto_basis",
"(",
"inst",
"[",
"'unit_mer_ecef_x'",
"]",
",",
"inst",
"[",
"'unit_mer_ecef_y'",
"]",
",",
"inst",
"[",
"'unit_mer_ecef_z'",
"]",
",",
"inst",
"[",
"'sc_xhat_x'",
"]",
",",
"inst",
"[",
"'sc_xhat_y'",
"]",
",",
"inst",
"[",
"'sc_xhat_z'",
"]",
",",
"inst",
"[",
"'sc_yhat_x'",
"]",
",",
"inst",
"[",
"'sc_yhat_y'",
"]",
",",
"inst",
"[",
"'sc_yhat_z'",
"]",
",",
"inst",
"[",
"'sc_zhat_x'",
"]",
",",
"inst",
"[",
"'sc_zhat_y'",
"]",
",",
"inst",
"[",
"'sc_zhat_z'",
"]",
")",
"inst",
".",
"meta",
"[",
"'unit_zon_x'",
"]",
"=",
"{",
"'long_name'",
":",
"'Zonal direction along IVM-x'",
",",
"'desc'",
":",
"'Unit vector for the zonal geomagnetic direction.'",
",",
"'label'",
":",
"'Zonal Unit Vector: IVM-X component'",
",",
"'axis'",
":",
"'Zonal Unit Vector: IVM-X component'",
",",
"'notes'",
":",
"(",
"'Positive towards the east. Zonal vector is normal to magnetic meridian plane. '",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_zon_y'",
"]",
"=",
"{",
"'long_name'",
":",
"'Zonal direction along IVM-y'",
",",
"'desc'",
":",
"'Unit vector for the zonal geomagnetic direction.'",
",",
"'label'",
":",
"'Zonal Unit Vector: IVM-Y component'",
",",
"'axis'",
":",
"'Zonal Unit Vector: IVM-Y component'",
",",
"'notes'",
":",
"(",
"'Positive towards the east. Zonal vector is normal to magnetic meridian plane. '",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_zon_z'",
"]",
"=",
"{",
"'long_name'",
":",
"'Zonal direction along IVM-z'",
",",
"'desc'",
":",
"'Unit vector for the zonal geomagnetic direction.'",
",",
"'label'",
":",
"'Zonal Unit Vector: IVM-Z component'",
",",
"'axis'",
":",
"'Zonal Unit Vector: IVM-Z component'",
",",
"'notes'",
":",
"(",
"'Positive towards the east. Zonal vector is normal to magnetic meridian plane. '",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_fa_x'",
"]",
"=",
"{",
"'long_name'",
":",
"'Field-aligned direction along IVM-x'",
",",
"'desc'",
":",
"'Unit vector for the geomagnetic field line direction.'",
",",
"'label'",
":",
"'Field Aligned Unit Vector: IVM-X component'",
",",
"'axis'",
":",
"'Field Aligned Unit Vector: IVM-X component'",
",",
"'notes'",
":",
"(",
"'Positive along the field, generally northward. Unit vector is along the geomagnetic field. '",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_fa_y'",
"]",
"=",
"{",
"'long_name'",
":",
"'Field-aligned direction along IVM-y'",
",",
"'desc'",
":",
"'Unit vector for the geomagnetic field line direction.'",
",",
"'label'",
":",
"'Field Aligned Unit Vector: IVM-Y component'",
",",
"'axis'",
":",
"'Field Aligned Unit Vector: IVM-Y component'",
",",
"'notes'",
":",
"(",
"'Positive along the field, generally northward. Unit vector is along the geomagnetic field. '",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_fa_z'",
"]",
"=",
"{",
"'long_name'",
":",
"'Field-aligned direction along IVM-z'",
",",
"'desc'",
":",
"'Unit vector for the geomagnetic field line direction.'",
",",
"'label'",
":",
"'Field Aligned Unit Vector: IVM-Z component'",
",",
"'axis'",
":",
"'Field Aligned Unit Vector: IVM-Z component'",
",",
"'notes'",
":",
"(",
"'Positive along the field, generally northward. Unit vector is along the geomagnetic field. '",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_mer_x'",
"]",
"=",
"{",
"'long_name'",
":",
"'Meridional direction along IVM-x'",
",",
"'desc'",
":",
"'Unit vector for the geomagnetic meridional direction.'",
",",
"'label'",
":",
"'Meridional Unit Vector: IVM-X component'",
",",
"'axis'",
":",
"'Meridional Unit Vector: IVM-X component'",
",",
"'notes'",
":",
"(",
"'Positive is aligned with vertical at '",
"'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '",
"'and in the plane of the meridian.'",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_mer_y'",
"]",
"=",
"{",
"'long_name'",
":",
"'Meridional direction along IVM-y'",
",",
"'desc'",
":",
"'Unit vector for the geomagnetic meridional direction.'",
",",
"'label'",
":",
"'Meridional Unit Vector: IVM-Y component'",
",",
"'axis'",
":",
"'Meridional Unit Vector: IVM-Y component'",
",",
"'notes'",
":",
"(",
"'Positive is aligned with vertical at '",
"'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '",
"'and in the plane of the meridian.'",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"inst",
".",
"meta",
"[",
"'unit_mer_z'",
"]",
"=",
"{",
"'long_name'",
":",
"'Meridional direction along IVM-z'",
",",
"'desc'",
":",
"'Unit vector for the geomagnetic meridional direction.'",
",",
"'label'",
":",
"'Meridional Unit Vector: IVM-Z component'",
",",
"'axis'",
":",
"'Meridional Unit Vector: IVM-Z component'",
",",
"'notes'",
":",
"(",
"'Positive is aligned with vertical at '",
"'geomagnetic equator. Unit vector is perpendicular to the geomagnetic field '",
"'and in the plane of the meridian.'",
"'The unit vector is expressed in the IVM coordinate system, x - along RAM, '",
"'z - towards nadir, y - completes the system, generally southward. '",
"'Calculated using the corresponding unit vector in ECEF and the orientation '",
"'of the IVM also expressed in ECEF (sc_*hat_*).'",
")",
",",
"'scale'",
":",
"'linear'",
",",
"'units'",
":",
"''",
",",
"'value_min'",
":",
"-",
"1.",
",",
"'value_max'",
":",
"1",
"}",
"return"
] |
Add unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in S/C coordinates.
Interally, routine calls add_mag_drift_unit_vectors_ecef.
See function for input parameter description.
Requires the orientation of the S/C basis vectors in ECEF using naming,
'sc_xhat_x' where *hat (*=x,y,z) is the S/C basis vector and _* (*=x,y,z)
is the ECEF direction.
Parameters
----------
inst : pysat.Instrument object
Instrument object to be modified
max_steps : int
Maximum number of steps taken for field line integration
step_size : float
Maximum step size (km) allowed for field line tracer
Returns
-------
None
Modifies instrument object in place. Adds 'unit_zon_*' where * = x,y,z
'unit_fa_*' and 'unit_mer_*' for zonal, field aligned, and meridional
directions. Note that vector components are expressed in the S/C basis.
|
[
"Add",
"unit",
"vectors",
"expressing",
"the",
"ion",
"drift",
"coordinate",
"system",
"organized",
"by",
"the",
"geomagnetic",
"field",
".",
"Unit",
"vectors",
"are",
"expressed",
"in",
"S",
"/",
"C",
"coordinates",
".",
"Interally",
"routine",
"calls",
"add_mag_drift_unit_vectors_ecef",
".",
"See",
"function",
"for",
"input",
"parameter",
"description",
".",
"Requires",
"the",
"orientation",
"of",
"the",
"S",
"/",
"C",
"basis",
"vectors",
"in",
"ECEF",
"using",
"naming",
"sc_xhat_x",
"where",
"*",
"hat",
"(",
"*",
"=",
"x",
"y",
"z",
")",
"is",
"the",
"S",
"/",
"C",
"basis",
"vector",
"and",
"_",
"*",
"(",
"*",
"=",
"x",
"y",
"z",
")",
"is",
"the",
"ECEF",
"direction",
".",
"Parameters",
"----------",
"inst",
":",
"pysat",
".",
"Instrument",
"object",
"Instrument",
"object",
"to",
"be",
"modified",
"max_steps",
":",
"int",
"Maximum",
"number",
"of",
"steps",
"taken",
"for",
"field",
"line",
"integration",
"step_size",
":",
"float",
"Maximum",
"step",
"size",
"(",
"km",
")",
"allowed",
"for",
"field",
"line",
"tracer",
"Returns",
"-------",
"None",
"Modifies",
"instrument",
"object",
"in",
"place",
".",
"Adds",
"unit_zon_",
"*",
"where",
"*",
"=",
"x",
"y",
"z",
"unit_fa_",
"*",
"and",
"unit_mer_",
"*",
"for",
"zonal",
"field",
"aligned",
"and",
"meridional",
"directions",
".",
"Note",
"that",
"vector",
"components",
"are",
"expressed",
"in",
"the",
"S",
"/",
"C",
"basis",
"."
] |
python
|
train
| 77.936047 |
samluescher/django-media-tree
|
media_tree/contrib/views/mixin_base.py
|
https://github.com/samluescher/django-media-tree/blob/3eb6345faaf57e2fbe35ca431d4d133f950f2b5f/media_tree/contrib/views/mixin_base.py#L24-L52
|
def get_view(self, request, view_class, opts=None):
"""
Instantiates and returns the view class that will generate the
actual context for this plugin.
"""
kwargs = {}
if opts:
if not isinstance(opts, dict):
opts = opts.__dict__
else:
opts = {}
if not view_class in VALID_MIXIN_OPTIONS:
valid_options = view_class.__dict__.keys()
for cls in view_class.__bases__:
if cls != object:
valid_options += cls.__dict__.keys()
VALID_MIXIN_OPTIONS[view_class] = valid_options
for key in VALID_MIXIN_OPTIONS[view_class]:
if key in opts:
kwargs[key] = opts[key]
elif hasattr(self, key):
kwargs[key] = getattr(self, key)
view = view_class(**kwargs)
view.request = request
view.kwargs = {}
return view
|
[
"def",
"get_view",
"(",
"self",
",",
"request",
",",
"view_class",
",",
"opts",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"opts",
":",
"if",
"not",
"isinstance",
"(",
"opts",
",",
"dict",
")",
":",
"opts",
"=",
"opts",
".",
"__dict__",
"else",
":",
"opts",
"=",
"{",
"}",
"if",
"not",
"view_class",
"in",
"VALID_MIXIN_OPTIONS",
":",
"valid_options",
"=",
"view_class",
".",
"__dict__",
".",
"keys",
"(",
")",
"for",
"cls",
"in",
"view_class",
".",
"__bases__",
":",
"if",
"cls",
"!=",
"object",
":",
"valid_options",
"+=",
"cls",
".",
"__dict__",
".",
"keys",
"(",
")",
"VALID_MIXIN_OPTIONS",
"[",
"view_class",
"]",
"=",
"valid_options",
"for",
"key",
"in",
"VALID_MIXIN_OPTIONS",
"[",
"view_class",
"]",
":",
"if",
"key",
"in",
"opts",
":",
"kwargs",
"[",
"key",
"]",
"=",
"opts",
"[",
"key",
"]",
"elif",
"hasattr",
"(",
"self",
",",
"key",
")",
":",
"kwargs",
"[",
"key",
"]",
"=",
"getattr",
"(",
"self",
",",
"key",
")",
"view",
"=",
"view_class",
"(",
"*",
"*",
"kwargs",
")",
"view",
".",
"request",
"=",
"request",
"view",
".",
"kwargs",
"=",
"{",
"}",
"return",
"view"
] |
Instantiates and returns the view class that will generate the
actual context for this plugin.
|
[
"Instantiates",
"and",
"returns",
"the",
"view",
"class",
"that",
"will",
"generate",
"the",
"actual",
"context",
"for",
"this",
"plugin",
"."
] |
python
|
train
| 32.172414 |
markokr/rarfile
|
rarfile.py
|
https://github.com/markokr/rarfile/blob/2704344e8d7a1658c96c8ed8f449d7ba01bedea3/rarfile.py#L685-L695
|
def setpassword(self, password):
"""Sets the password to use when extracting.
"""
self._password = password
if self._file_parser:
if self._file_parser.has_header_encryption():
self._file_parser = None
if not self._file_parser:
self._parse()
else:
self._file_parser.setpassword(self._password)
|
[
"def",
"setpassword",
"(",
"self",
",",
"password",
")",
":",
"self",
".",
"_password",
"=",
"password",
"if",
"self",
".",
"_file_parser",
":",
"if",
"self",
".",
"_file_parser",
".",
"has_header_encryption",
"(",
")",
":",
"self",
".",
"_file_parser",
"=",
"None",
"if",
"not",
"self",
".",
"_file_parser",
":",
"self",
".",
"_parse",
"(",
")",
"else",
":",
"self",
".",
"_file_parser",
".",
"setpassword",
"(",
"self",
".",
"_password",
")"
] |
Sets the password to use when extracting.
|
[
"Sets",
"the",
"password",
"to",
"use",
"when",
"extracting",
"."
] |
python
|
train
| 34.727273 |
StackStorm/pybind
|
pybind/nos/v6_0_2f/brocade_nameserver_rpc/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_nameserver_rpc/__init__.py#L189-L213
|
def _set_get_nameserver_detail(self, v, load=False):
"""
Setter method for get_nameserver_detail, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_nameserver_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_nameserver_detail() directly.
YANG Description: A function to display the detailed information of
the devices stored in the Name Server database.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name="get-nameserver-detail", rest_name="get-nameserver-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_nameserver_detail must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name="get-nameserver-detail", rest_name="get-nameserver-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)""",
})
self.__get_nameserver_detail = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_get_nameserver_detail",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"get_nameserver_detail",
".",
"get_nameserver_detail",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"get-nameserver-detail\"",
",",
"rest_name",
"=",
"\"get-nameserver-detail\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'display detail device information.'",
",",
"u'hidden'",
":",
"u'rpccmd'",
",",
"u'actionpoint'",
":",
"u'show_ns_detail'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-nameserver'",
",",
"defining_module",
"=",
"'brocade-nameserver'",
",",
"yang_type",
"=",
"'rpc'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"get_nameserver_detail must be of a type compatible with rpc\"\"\"",
",",
"'defined-type'",
":",
"\"rpc\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=get_nameserver_detail.get_nameserver_detail, is_leaf=True, yang_name=\"get-nameserver-detail\", rest_name=\"get-nameserver-detail\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'display detail device information.', u'hidden': u'rpccmd', u'actionpoint': u'show_ns_detail'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='rpc', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__get_nameserver_detail",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for get_nameserver_detail, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_nameserver_detail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_nameserver_detail() directly.
YANG Description: A function to display the detailed information of
the devices stored in the Name Server database.
|
[
"Setter",
"method",
"for",
"get_nameserver_detail",
"mapped",
"from",
"YANG",
"variable",
"/",
"brocade_nameserver_rpc",
"/",
"get_nameserver_detail",
"(",
"rpc",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_get_nameserver_detail",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_get_nameserver_detail",
"()",
"directly",
"."
] |
python
|
train
| 78.16 |
wiheto/teneto
|
teneto/temporalcommunity/integration.py
|
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/integration.py#L5-L45
|
def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff
|
[
"def",
"integration",
"(",
"temporalcommunities",
",",
"staticcommunities",
")",
":",
"# make sure the static and temporal communities have the same number of nodes",
"if",
"staticcommunities",
".",
"shape",
"[",
"0",
"]",
"!=",
"temporalcommunities",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Temporal and static communities have different dimensions'",
")",
"alleg",
"=",
"allegiance",
"(",
"temporalcommunities",
")",
"Icoeff",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"staticcommunities",
")",
")",
"# calc integration for each node",
"for",
"i",
",",
"statcom",
"in",
"enumerate",
"(",
"len",
"(",
"staticcommunities",
")",
")",
":",
"Icoeff",
"[",
"i",
"]",
"=",
"np",
".",
"mean",
"(",
"alleg",
"[",
"i",
",",
"staticcommunities",
"!=",
"statcom",
"]",
")",
"return",
"Icoeff"
] |
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
|
[
"Calculates",
"the",
"integration",
"coefficient",
"for",
"each",
"node",
".",
"Measures",
"the",
"average",
"probability",
"that",
"a",
"node",
"is",
"in",
"the",
"same",
"community",
"as",
"nodes",
"from",
"other",
"systems",
"."
] |
python
|
train
| 34.195122 |
StackStorm/pybind
|
pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/__init__.py#L126-L147
|
def _set_wait_for_bgp(self, v, load=False):
"""
Setter method for wait_for_bgp, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/wait_for_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait_for_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait_for_bgp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name="wait-for-bgp", rest_name="wait-for-bgp", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """wait_for_bgp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name="wait-for-bgp", rest_name="wait-for-bgp", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__wait_for_bgp = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_wait_for_bgp",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"wait_for_bgp",
".",
"wait_for_bgp",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"True",
",",
"yang_name",
"=",
"\"wait-for-bgp\"",
",",
"rest_name",
"=",
"\"wait-for-bgp\"",
",",
"parent",
"=",
"self",
",",
"choice",
"=",
"(",
"u'ch-on-startup'",
",",
"u'ca-on-startup-wfbgp'",
")",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"None",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-isis'",
",",
"defining_module",
"=",
"'brocade-isis'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"wait_for_bgp must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=wait_for_bgp.wait_for_bgp, is_container='container', presence=True, yang_name=\"wait-for-bgp\", rest_name=\"wait-for-bgp\", parent=self, choice=(u'ch-on-startup', u'ca-on-startup-wfbgp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__wait_for_bgp",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for wait_for_bgp, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/router_isis_attributes/set_overload_bit/on_startup/wait_for_bgp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait_for_bgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait_for_bgp() directly.
|
[
"Setter",
"method",
"for",
"wait_for_bgp",
"mapped",
"from",
"YANG",
"variable",
"/",
"routing_system",
"/",
"router",
"/",
"isis",
"/",
"router_isis_cmds_holder",
"/",
"router_isis_attributes",
"/",
"set_overload_bit",
"/",
"on_startup",
"/",
"wait_for_bgp",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_wait_for_bgp",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_wait_for_bgp",
"()",
"directly",
"."
] |
python
|
train
| 77.272727 |
Element-34/py.saunter
|
saunter/matchers.py
|
https://github.com/Element-34/py.saunter/blob/bdc8480b1453e082872c80d3382d42565b8ed9c0/saunter/matchers.py#L100-L114
|
def verify_false(self, expr, msg=None):
"""
Soft assert for whether the condition is false
:params expr: the statement to evaluate
:params msg: (Optional) msg explaining the difference
"""
try:
self.assert_false(expr, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m)
|
[
"def",
"verify_false",
"(",
"self",
",",
"expr",
",",
"msg",
"=",
"None",
")",
":",
"try",
":",
"self",
".",
"assert_false",
"(",
"expr",
",",
"msg",
")",
"except",
"AssertionError",
",",
"e",
":",
"if",
"msg",
":",
"m",
"=",
"\"%s:\\n%s\"",
"%",
"(",
"msg",
",",
"str",
"(",
"e",
")",
")",
"else",
":",
"m",
"=",
"str",
"(",
"e",
")",
"self",
".",
"verification_erorrs",
".",
"append",
"(",
"m",
")"
] |
Soft assert for whether the condition is false
:params expr: the statement to evaluate
:params msg: (Optional) msg explaining the difference
|
[
"Soft",
"assert",
"for",
"whether",
"the",
"condition",
"is",
"false"
] |
python
|
train
| 30.733333 |
liampauling/betfair
|
betfairlightweight/streaming/cache.py
|
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/cache.py#L223-L253
|
def serialise(self):
"""Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
"""
return {
'marketId': self.market_id,
'totalAvailable': None,
'isMarketDataDelayed': None,
'lastMatchTime': None,
'betDelay': self.market_definition.get('betDelay'),
'version': self.market_definition.get('version'),
'complete': self.market_definition.get('complete'),
'runnersVoidable': self.market_definition.get('runnersVoidable'),
'totalMatched': self.total_matched,
'status': self.market_definition.get('status'),
'bspReconciled': self.market_definition.get('bspReconciled'),
'crossMatching': self.market_definition.get('crossMatching'),
'inplay': self.market_definition.get('inPlay'),
'numberOfWinners': self.market_definition.get('numberOfWinners'),
'numberOfRunners': len(self.market_definition.get('runners')),
'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'),
'runners': [
runner.serialise(
self.market_definition_runner_dict[(runner.selection_id, runner.handicap)]
) for runner in self.runners
],
'publishTime': self.publish_time,
'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'),
'keyLineDescription': self.market_definition.get('keyLineDefinition'),
'marketDefinition': self.market_definition, # used in lightweight
}
|
[
"def",
"serialise",
"(",
"self",
")",
":",
"return",
"{",
"'marketId'",
":",
"self",
".",
"market_id",
",",
"'totalAvailable'",
":",
"None",
",",
"'isMarketDataDelayed'",
":",
"None",
",",
"'lastMatchTime'",
":",
"None",
",",
"'betDelay'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'betDelay'",
")",
",",
"'version'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'version'",
")",
",",
"'complete'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'complete'",
")",
",",
"'runnersVoidable'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'runnersVoidable'",
")",
",",
"'totalMatched'",
":",
"self",
".",
"total_matched",
",",
"'status'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'status'",
")",
",",
"'bspReconciled'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'bspReconciled'",
")",
",",
"'crossMatching'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'crossMatching'",
")",
",",
"'inplay'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'inPlay'",
")",
",",
"'numberOfWinners'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'numberOfWinners'",
")",
",",
"'numberOfRunners'",
":",
"len",
"(",
"self",
".",
"market_definition",
".",
"get",
"(",
"'runners'",
")",
")",
",",
"'numberOfActiveRunners'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'numberOfActiveRunners'",
")",
",",
"'runners'",
":",
"[",
"runner",
".",
"serialise",
"(",
"self",
".",
"market_definition_runner_dict",
"[",
"(",
"runner",
".",
"selection_id",
",",
"runner",
".",
"handicap",
")",
"]",
")",
"for",
"runner",
"in",
"self",
".",
"runners",
"]",
",",
"'publishTime'",
":",
"self",
".",
"publish_time",
",",
"'priceLadderDefinition'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'priceLadderDefinition'",
")",
",",
"'keyLineDescription'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'keyLineDefinition'",
")",
",",
"'marketDefinition'",
":",
"self",
".",
"market_definition",
",",
"# used in lightweight",
"}"
] |
Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
|
[
"Creates",
"standard",
"market",
"book",
"json",
"response",
"will",
"error",
"if",
"EX_MARKET_DEF",
"not",
"incl",
"."
] |
python
|
train
| 52.290323 |
user-cont/conu
|
conu/backend/k8s/utils.py
|
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/utils.py#L23-L38
|
def k8s_ports_to_metadata_ports(k8s_ports):
"""
:param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
"""
ports = []
for k8s_port in k8s_ports:
if k8s_port.protocol is not None:
ports.append("%s/%s" % (k8s_port.port, k8s_port.protocol.lower()))
else:
ports.append(str(k8s_port.port))
return ports
|
[
"def",
"k8s_ports_to_metadata_ports",
"(",
"k8s_ports",
")",
":",
"ports",
"=",
"[",
"]",
"for",
"k8s_port",
"in",
"k8s_ports",
":",
"if",
"k8s_port",
".",
"protocol",
"is",
"not",
"None",
":",
"ports",
".",
"append",
"(",
"\"%s/%s\"",
"%",
"(",
"k8s_port",
".",
"port",
",",
"k8s_port",
".",
"protocol",
".",
"lower",
"(",
")",
")",
")",
"else",
":",
"ports",
".",
"append",
"(",
"str",
"(",
"k8s_port",
".",
"port",
")",
")",
"return",
"ports"
] |
:param k8s_ports: list of V1ServicePort
:return: list of str, list of exposed ports, example:
- ['1234/tcp', '8080/udp']
|
[
":",
"param",
"k8s_ports",
":",
"list",
"of",
"V1ServicePort",
":",
"return",
":",
"list",
"of",
"str",
"list",
"of",
"exposed",
"ports",
"example",
":",
"-",
"[",
"1234",
"/",
"tcp",
"8080",
"/",
"udp",
"]"
] |
python
|
train
| 26.9375 |
biolink/ontobio
|
ontobio/sim/api/semsearch.py
|
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/semsearch.py#L204-L212
|
def compare(self,
query_classes: Set,
reference_classes: Set,
method: Optional) -> SimResult:
"""
Given two lists of entites (classes, individual)
return their similarity
"""
raise NotImplementedError
|
[
"def",
"compare",
"(",
"self",
",",
"query_classes",
":",
"Set",
",",
"reference_classes",
":",
"Set",
",",
"method",
":",
"Optional",
")",
"->",
"SimResult",
":",
"raise",
"NotImplementedError"
] |
Given two lists of entites (classes, individual)
return their similarity
|
[
"Given",
"two",
"lists",
"of",
"entites",
"(",
"classes",
"individual",
")",
"return",
"their",
"similarity"
] |
python
|
train
| 31.111111 |
larsyencken/csvdiff
|
csvdiff/__init__.py
|
https://github.com/larsyencken/csvdiff/blob/163dd9da676a8e5f926a935803726340261f03ae/csvdiff/__init__.py#L81-L84
|
def _nice_fieldnames(all_columns, index_columns):
"Indexes on the left, other fields in alphabetical order on the right."
non_index_columns = set(all_columns).difference(index_columns)
return index_columns + sorted(non_index_columns)
|
[
"def",
"_nice_fieldnames",
"(",
"all_columns",
",",
"index_columns",
")",
":",
"non_index_columns",
"=",
"set",
"(",
"all_columns",
")",
".",
"difference",
"(",
"index_columns",
")",
"return",
"index_columns",
"+",
"sorted",
"(",
"non_index_columns",
")"
] |
Indexes on the left, other fields in alphabetical order on the right.
|
[
"Indexes",
"on",
"the",
"left",
"other",
"fields",
"in",
"alphabetical",
"order",
"on",
"the",
"right",
"."
] |
python
|
train
| 60.5 |
Erotemic/utool
|
utool/util_dev.py
|
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2499-L2509
|
def execstr_funckw(func):
"""
for doctests kwargs
SeeAlso:
ut.exec_func_src
ut.argparse_funckw
"""
import utool as ut
funckw = ut.get_func_kwargs(func)
return ut.execstr_dict(funckw, explicit=True)
|
[
"def",
"execstr_funckw",
"(",
"func",
")",
":",
"import",
"utool",
"as",
"ut",
"funckw",
"=",
"ut",
".",
"get_func_kwargs",
"(",
"func",
")",
"return",
"ut",
".",
"execstr_dict",
"(",
"funckw",
",",
"explicit",
"=",
"True",
")"
] |
for doctests kwargs
SeeAlso:
ut.exec_func_src
ut.argparse_funckw
|
[
"for",
"doctests",
"kwargs"
] |
python
|
train
| 21.090909 |
geertj/gruvi
|
lib/gruvi/dbus.py
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/dbus.py#L188-L203
|
def parse_dbus_header(header):
"""Parse a D-BUS header. Return the message size."""
if six.indexbytes(header, 0) == ord('l'):
endian = '<'
elif six.indexbytes(header, 0) == ord('B'):
endian = '>'
else:
raise ValueError('illegal endianness')
if not 1 <= six.indexbytes(header, 1) <= 4:
raise ValueError('illegel message type')
if struct.unpack(endian + 'I', header[8:12])[0] == 0:
raise ValueError('illegal serial number')
harrlen = struct.unpack(endian + 'I', header[12:16])[0]
padlen = (8 - harrlen) % 8
bodylen = struct.unpack(endian + 'I', header[4:8])[0]
return 16 + harrlen + padlen + bodylen
|
[
"def",
"parse_dbus_header",
"(",
"header",
")",
":",
"if",
"six",
".",
"indexbytes",
"(",
"header",
",",
"0",
")",
"==",
"ord",
"(",
"'l'",
")",
":",
"endian",
"=",
"'<'",
"elif",
"six",
".",
"indexbytes",
"(",
"header",
",",
"0",
")",
"==",
"ord",
"(",
"'B'",
")",
":",
"endian",
"=",
"'>'",
"else",
":",
"raise",
"ValueError",
"(",
"'illegal endianness'",
")",
"if",
"not",
"1",
"<=",
"six",
".",
"indexbytes",
"(",
"header",
",",
"1",
")",
"<=",
"4",
":",
"raise",
"ValueError",
"(",
"'illegel message type'",
")",
"if",
"struct",
".",
"unpack",
"(",
"endian",
"+",
"'I'",
",",
"header",
"[",
"8",
":",
"12",
"]",
")",
"[",
"0",
"]",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'illegal serial number'",
")",
"harrlen",
"=",
"struct",
".",
"unpack",
"(",
"endian",
"+",
"'I'",
",",
"header",
"[",
"12",
":",
"16",
"]",
")",
"[",
"0",
"]",
"padlen",
"=",
"(",
"8",
"-",
"harrlen",
")",
"%",
"8",
"bodylen",
"=",
"struct",
".",
"unpack",
"(",
"endian",
"+",
"'I'",
",",
"header",
"[",
"4",
":",
"8",
"]",
")",
"[",
"0",
"]",
"return",
"16",
"+",
"harrlen",
"+",
"padlen",
"+",
"bodylen"
] |
Parse a D-BUS header. Return the message size.
|
[
"Parse",
"a",
"D",
"-",
"BUS",
"header",
".",
"Return",
"the",
"message",
"size",
"."
] |
python
|
train
| 41.375 |
myaooo/pysbrl
|
pysbrl/rule_list.py
|
https://github.com/myaooo/pysbrl/blob/74bba8c6913a7f82e32313108f8c3e025b89d9c7/pysbrl/rule_list.py#L227-L242
|
def from_raw(self, rule_ids, outputs, raw_rules):
"""
A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return:
"""
self._rule_pool = [([], [])] + raw_rules
self._rule_list = []
for i, idx in enumerate(rule_ids):
rule = Rule([Clause(f, c) for f, c in zip(*self._rule_pool[idx])], outputs[i])
self._rule_list.append(rule)
# self._rule_list.append(rule_str2rule(_rule_name, outputs[i]))
self._rule_ids = rule_ids
self._rule_outputs = outputs
|
[
"def",
"from_raw",
"(",
"self",
",",
"rule_ids",
",",
"outputs",
",",
"raw_rules",
")",
":",
"self",
".",
"_rule_pool",
"=",
"[",
"(",
"[",
"]",
",",
"[",
"]",
")",
"]",
"+",
"raw_rules",
"self",
".",
"_rule_list",
"=",
"[",
"]",
"for",
"i",
",",
"idx",
"in",
"enumerate",
"(",
"rule_ids",
")",
":",
"rule",
"=",
"Rule",
"(",
"[",
"Clause",
"(",
"f",
",",
"c",
")",
"for",
"f",
",",
"c",
"in",
"zip",
"(",
"*",
"self",
".",
"_rule_pool",
"[",
"idx",
"]",
")",
"]",
",",
"outputs",
"[",
"i",
"]",
")",
"self",
".",
"_rule_list",
".",
"append",
"(",
"rule",
")",
"# self._rule_list.append(rule_str2rule(_rule_name, outputs[i]))",
"self",
".",
"_rule_ids",
"=",
"rule_ids",
"self",
".",
"_rule_outputs",
"=",
"outputs"
] |
A helper function that converts the results returned from C function
:param rule_ids:
:param outputs:
:param raw_rules:
:return:
|
[
"A",
"helper",
"function",
"that",
"converts",
"the",
"results",
"returned",
"from",
"C",
"function",
":",
"param",
"rule_ids",
":",
":",
"param",
"outputs",
":",
":",
"param",
"raw_rules",
":",
":",
"return",
":"
] |
python
|
train
| 39.1875 |
ssato/python-anyconfig
|
src/anyconfig/utils.py
|
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/utils.py#L157-L176
|
def is_ioinfo(obj, keys=None):
"""
:return: True if given 'obj' is a 'IOInfo' namedtuple object.
>>> assert not is_ioinfo(1)
>>> assert not is_ioinfo("aaa")
>>> assert not is_ioinfo({})
>>> assert not is_ioinfo(('a', 1, {}))
>>> inp = anyconfig.globals.IOInfo("/etc/hosts", "path", "/etc/hosts",
... None, open)
>>> assert is_ioinfo(inp)
"""
if keys is None:
keys = anyconfig.globals.IOI_KEYS
if isinstance(obj, tuple) and getattr(obj, "_asdict", False):
return all(k in obj._asdict() for k in keys)
return False
|
[
"def",
"is_ioinfo",
"(",
"obj",
",",
"keys",
"=",
"None",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"anyconfig",
".",
"globals",
".",
"IOI_KEYS",
"if",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
"and",
"getattr",
"(",
"obj",
",",
"\"_asdict\"",
",",
"False",
")",
":",
"return",
"all",
"(",
"k",
"in",
"obj",
".",
"_asdict",
"(",
")",
"for",
"k",
"in",
"keys",
")",
"return",
"False"
] |
:return: True if given 'obj' is a 'IOInfo' namedtuple object.
>>> assert not is_ioinfo(1)
>>> assert not is_ioinfo("aaa")
>>> assert not is_ioinfo({})
>>> assert not is_ioinfo(('a', 1, {}))
>>> inp = anyconfig.globals.IOInfo("/etc/hosts", "path", "/etc/hosts",
... None, open)
>>> assert is_ioinfo(inp)
|
[
":",
"return",
":",
"True",
"if",
"given",
"obj",
"is",
"a",
"IOInfo",
"namedtuple",
"object",
"."
] |
python
|
train
| 29.8 |
flyingrub/scdl
|
scdl/scdl.py
|
https://github.com/flyingrub/scdl/blob/e833a22dd6676311b72fadd8a1c80f4a06acfad9/scdl/scdl.py#L323-L362
|
def download(user, dl_type, name):
"""
Download user items of dl_type (ie. all, playlists, liked, commented, etc.)
"""
username = user['username']
user_id = user['id']
logger.info(
'Retrieving all {0} of user {1}...'.format(name, username)
)
dl_url = url[dl_type].format(user_id)
logger.debug(dl_url)
resources = client.get_collection(dl_url, token)
del resources[:offset - 1]
logger.debug(resources)
total = len(resources)
logger.info('Retrieved {0} {1}'.format(total, name))
for counter, item in enumerate(resources, offset):
try:
logger.debug(item)
logger.info('{0} n°{1} of {2}'.format(
name.capitalize(), counter, total)
)
if dl_type == 'all':
item_name = item['type'].split('-')[0] # remove the '-repost'
uri = item[item_name]['uri']
parse_url(uri)
elif dl_type == 'playlists':
download_playlist(item)
elif dl_type == 'playlists-liked':
parse_url(item['playlist']['uri'])
elif dl_type == 'commented':
item = get_track_info(item['track_id'])
download_track(item)
else:
download_track(item)
except Exception as e:
logger.exception(e)
logger.info('Downloaded all {0} {1} of user {2}!'.format(
total, name, username)
)
|
[
"def",
"download",
"(",
"user",
",",
"dl_type",
",",
"name",
")",
":",
"username",
"=",
"user",
"[",
"'username'",
"]",
"user_id",
"=",
"user",
"[",
"'id'",
"]",
"logger",
".",
"info",
"(",
"'Retrieving all {0} of user {1}...'",
".",
"format",
"(",
"name",
",",
"username",
")",
")",
"dl_url",
"=",
"url",
"[",
"dl_type",
"]",
".",
"format",
"(",
"user_id",
")",
"logger",
".",
"debug",
"(",
"dl_url",
")",
"resources",
"=",
"client",
".",
"get_collection",
"(",
"dl_url",
",",
"token",
")",
"del",
"resources",
"[",
":",
"offset",
"-",
"1",
"]",
"logger",
".",
"debug",
"(",
"resources",
")",
"total",
"=",
"len",
"(",
"resources",
")",
"logger",
".",
"info",
"(",
"'Retrieved {0} {1}'",
".",
"format",
"(",
"total",
",",
"name",
")",
")",
"for",
"counter",
",",
"item",
"in",
"enumerate",
"(",
"resources",
",",
"offset",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"item",
")",
"logger",
".",
"info",
"(",
"'{0} n°{1} of {2}'.",
"f",
"ormat(",
"",
"name",
".",
"capitalize",
"(",
")",
",",
"counter",
",",
"total",
")",
")",
"if",
"dl_type",
"==",
"'all'",
":",
"item_name",
"=",
"item",
"[",
"'type'",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"# remove the '-repost'",
"uri",
"=",
"item",
"[",
"item_name",
"]",
"[",
"'uri'",
"]",
"parse_url",
"(",
"uri",
")",
"elif",
"dl_type",
"==",
"'playlists'",
":",
"download_playlist",
"(",
"item",
")",
"elif",
"dl_type",
"==",
"'playlists-liked'",
":",
"parse_url",
"(",
"item",
"[",
"'playlist'",
"]",
"[",
"'uri'",
"]",
")",
"elif",
"dl_type",
"==",
"'commented'",
":",
"item",
"=",
"get_track_info",
"(",
"item",
"[",
"'track_id'",
"]",
")",
"download_track",
"(",
"item",
")",
"else",
":",
"download_track",
"(",
"item",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"logger",
".",
"info",
"(",
"'Downloaded all {0} {1} of user {2}!'",
".",
"format",
"(",
"total",
",",
"name",
",",
"username",
")",
")"
] |
Download user items of dl_type (ie. all, playlists, liked, commented, etc.)
|
[
"Download",
"user",
"items",
"of",
"dl_type",
"(",
"ie",
".",
"all",
"playlists",
"liked",
"commented",
"etc",
".",
")"
] |
python
|
train
| 35.85 |
SavinaRoja/OpenAccess_EPUB
|
src/openaccess_epub/utils/epub.py
|
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/epub.py#L163-L191
|
def epub_zip(outdirect):
"""
Zips up the input file directory into an EPUB file.
"""
def recursive_zip(zipf, directory, folder=None):
if folder is None:
folder = ''
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item),
os.path.join(directory, item))
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item),
os.path.join(folder, item))
log.info('Zipping up the directory {0}'.format(outdirect))
epub_filename = outdirect + '.epub'
epub = zipfile.ZipFile(epub_filename, 'w')
current_dir = os.getcwd()
os.chdir(outdirect)
epub.write('mimetype')
log.info('Recursively zipping META-INF and EPUB')
for item in os.listdir('.'):
if item == 'mimetype':
continue
recursive_zip(epub, item)
os.chdir(current_dir)
epub.close()
|
[
"def",
"epub_zip",
"(",
"outdirect",
")",
":",
"def",
"recursive_zip",
"(",
"zipf",
",",
"directory",
",",
"folder",
"=",
"None",
")",
":",
"if",
"folder",
"is",
"None",
":",
"folder",
"=",
"''",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"item",
")",
")",
":",
"zipf",
".",
"write",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"item",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"item",
")",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"item",
")",
")",
":",
"recursive_zip",
"(",
"zipf",
",",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"item",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"item",
")",
")",
"log",
".",
"info",
"(",
"'Zipping up the directory {0}'",
".",
"format",
"(",
"outdirect",
")",
")",
"epub_filename",
"=",
"outdirect",
"+",
"'.epub'",
"epub",
"=",
"zipfile",
".",
"ZipFile",
"(",
"epub_filename",
",",
"'w'",
")",
"current_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"outdirect",
")",
"epub",
".",
"write",
"(",
"'mimetype'",
")",
"log",
".",
"info",
"(",
"'Recursively zipping META-INF and EPUB'",
")",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"'.'",
")",
":",
"if",
"item",
"==",
"'mimetype'",
":",
"continue",
"recursive_zip",
"(",
"epub",
",",
"item",
")",
"os",
".",
"chdir",
"(",
"current_dir",
")",
"epub",
".",
"close",
"(",
")"
] |
Zips up the input file directory into an EPUB file.
|
[
"Zips",
"up",
"the",
"input",
"file",
"directory",
"into",
"an",
"EPUB",
"file",
"."
] |
python
|
train
| 35.517241 |
Rapptz/discord.py
|
discord/client.py
|
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/client.py#L502-L511
|
async def start(self, *args, **kwargs):
"""|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
"""
bot = kwargs.pop('bot', True)
reconnect = kwargs.pop('reconnect', True)
await self.login(*args, bot=bot)
await self.connect(reconnect=reconnect)
|
[
"async",
"def",
"start",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"bot",
"=",
"kwargs",
".",
"pop",
"(",
"'bot'",
",",
"True",
")",
"reconnect",
"=",
"kwargs",
".",
"pop",
"(",
"'reconnect'",
",",
"True",
")",
"await",
"self",
".",
"login",
"(",
"*",
"args",
",",
"bot",
"=",
"bot",
")",
"await",
"self",
".",
"connect",
"(",
"reconnect",
"=",
"reconnect",
")"
] |
|coro|
A shorthand coroutine for :meth:`login` + :meth:`connect`.
|
[
"|coro|"
] |
python
|
train
| 30.6 |
github/octodns
|
octodns/provider/azuredns.py
|
https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/azuredns.py#L450-L467
|
def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar))
|
[
"def",
"_apply_Create",
"(",
"self",
",",
"change",
")",
":",
"ar",
"=",
"_AzureRecord",
"(",
"self",
".",
"_resource_group",
",",
"change",
".",
"new",
")",
"create",
"=",
"self",
".",
"_dns_client",
".",
"record_sets",
".",
"create_or_update",
"create",
"(",
"resource_group_name",
"=",
"ar",
".",
"resource_group",
",",
"zone_name",
"=",
"ar",
".",
"zone_name",
",",
"relative_record_set_name",
"=",
"ar",
".",
"relative_record_set_name",
",",
"record_type",
"=",
"ar",
".",
"record_type",
",",
"parameters",
"=",
"ar",
".",
"params",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'* Success Create/Update: {}'",
".",
"format",
"(",
"ar",
")",
")"
] |
A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
|
[
"A",
"record",
"from",
"change",
"must",
"be",
"created",
"."
] |
python
|
train
| 35.222222 |
numenta/nupic
|
examples/opf/tools/sp_plotter.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/opf/tools/sp_plotter.py#L166-L190
|
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal
|
[
"def",
"modifyBits",
"(",
"inputVal",
",",
"maxChanges",
")",
":",
"changes",
"=",
"np",
".",
"random",
".",
"random_integers",
"(",
"0",
",",
"maxChanges",
",",
"1",
")",
"[",
"0",
"]",
"if",
"changes",
"==",
"0",
":",
"return",
"inputVal",
"inputWidth",
"=",
"len",
"(",
"inputVal",
")",
"whatToChange",
"=",
"np",
".",
"random",
".",
"random_integers",
"(",
"0",
",",
"41",
",",
"changes",
")",
"runningIndex",
"=",
"-",
"1",
"numModsDone",
"=",
"0",
"for",
"i",
"in",
"xrange",
"(",
"inputWidth",
")",
":",
"if",
"numModsDone",
">=",
"changes",
":",
"break",
"if",
"inputVal",
"[",
"i",
"]",
"==",
"1",
":",
"runningIndex",
"+=",
"1",
"if",
"runningIndex",
"in",
"whatToChange",
":",
"if",
"i",
"!=",
"0",
"and",
"inputVal",
"[",
"i",
"-",
"1",
"]",
"==",
"0",
":",
"inputVal",
"[",
"i",
"-",
"1",
"]",
"=",
"1",
"inputVal",
"[",
"i",
"]",
"=",
"0",
"return",
"inputVal"
] |
Modifies up to maxChanges number of bits in the inputVal
|
[
"Modifies",
"up",
"to",
"maxChanges",
"number",
"of",
"bits",
"in",
"the",
"inputVal"
] |
python
|
valid
| 23.48 |
buildbot/buildbot
|
master/setup.py
|
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/setup.py#L42-L48
|
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)])
|
[
"def",
"include",
"(",
"d",
",",
"e",
")",
":",
"return",
"(",
"d",
",",
"[",
"f",
"for",
"f",
"in",
"glob",
".",
"glob",
"(",
"'%s/%s'",
"%",
"(",
"d",
",",
"e",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
"]",
")"
] |
Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern
|
[
"Generate",
"a",
"pair",
"of",
"(",
"directory",
"file",
"-",
"list",
")",
"for",
"installation",
"."
] |
python
|
train
| 30.142857 |
sunlightlabs/django-locksmith
|
locksmith/hub/models.py
|
https://github.com/sunlightlabs/django-locksmith/blob/eef5b7c25404560aaad50b6e622594f89239b74b/locksmith/hub/models.py#L82-L87
|
def mark_for_update(self):
'''
Note that a change has been made so all Statuses need update
'''
self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE)
push_key.delay(self)
|
[
"def",
"mark_for_update",
"(",
"self",
")",
":",
"self",
".",
"pub_statuses",
".",
"exclude",
"(",
"status",
"=",
"UNPUBLISHED",
")",
".",
"update",
"(",
"status",
"=",
"NEEDS_UPDATE",
")",
"push_key",
".",
"delay",
"(",
"self",
")"
] |
Note that a change has been made so all Statuses need update
|
[
"Note",
"that",
"a",
"change",
"has",
"been",
"made",
"so",
"all",
"Statuses",
"need",
"update"
] |
python
|
train
| 38.166667 |
cnt-dev/cnt.rulebase
|
cnt/rulebase/rules/sentence_segmentation/sentence_segmenter.py
|
https://github.com/cnt-dev/cnt.rulebase/blob/d1c767c356d8ee05b23ec5b04aaac84784ee547c/cnt/rulebase/rules/sentence_segmentation/sentence_segmenter.py#L108-L170
|
def result(self) -> workflow.IntervalGeneratorType:
"""
Generate intervals indicating the valid sentences.
"""
config = cast(SentenceSegementationConfig, self.config)
index = -1
labels = None
while True:
# 1. Find the start of the sentence.
start = -1
while True:
# Check the ``labels`` generated from step (2).
if labels is None:
# https://www.python.org/dev/peps/pep-0479/
try:
index, labels = next(self.index_labels_generator)
except StopIteration:
return
# Check if we found a valid sentence char.
if labels[SentenceValidCharacterLabeler]:
start = index
break
# Trigger next(...) action.
labels = None
index = -1
# 2. Find the ending.
end = -1
try:
while True:
index, labels = next(self.index_labels_generator)
# Detected invalid char.
if config.enable_strict_sentence_charset and \
not labels[SentenceValidCharacterLabeler] and \
not labels[WhitespaceLabeler]:
end = index
break
# Detected sentence ending.
if self._labels_indicate_sentence_ending(labels):
# Consume the ending span.
while True:
index, labels = next(self.index_labels_generator)
is_ending = (self._labels_indicate_sentence_ending(labels) or
(config.extend_ending_with_delimiters and
labels[DelimitersLabeler]))
if not is_ending:
end = index
break
# yeah we found the ending.
break
except StopIteration:
end = len(self.input_sequence)
# Trigger next(...) action.
labels = None
index = -1
yield start, end
|
[
"def",
"result",
"(",
"self",
")",
"->",
"workflow",
".",
"IntervalGeneratorType",
":",
"config",
"=",
"cast",
"(",
"SentenceSegementationConfig",
",",
"self",
".",
"config",
")",
"index",
"=",
"-",
"1",
"labels",
"=",
"None",
"while",
"True",
":",
"# 1. Find the start of the sentence.",
"start",
"=",
"-",
"1",
"while",
"True",
":",
"# Check the ``labels`` generated from step (2).",
"if",
"labels",
"is",
"None",
":",
"# https://www.python.org/dev/peps/pep-0479/",
"try",
":",
"index",
",",
"labels",
"=",
"next",
"(",
"self",
".",
"index_labels_generator",
")",
"except",
"StopIteration",
":",
"return",
"# Check if we found a valid sentence char.",
"if",
"labels",
"[",
"SentenceValidCharacterLabeler",
"]",
":",
"start",
"=",
"index",
"break",
"# Trigger next(...) action.",
"labels",
"=",
"None",
"index",
"=",
"-",
"1",
"# 2. Find the ending.",
"end",
"=",
"-",
"1",
"try",
":",
"while",
"True",
":",
"index",
",",
"labels",
"=",
"next",
"(",
"self",
".",
"index_labels_generator",
")",
"# Detected invalid char.",
"if",
"config",
".",
"enable_strict_sentence_charset",
"and",
"not",
"labels",
"[",
"SentenceValidCharacterLabeler",
"]",
"and",
"not",
"labels",
"[",
"WhitespaceLabeler",
"]",
":",
"end",
"=",
"index",
"break",
"# Detected sentence ending.",
"if",
"self",
".",
"_labels_indicate_sentence_ending",
"(",
"labels",
")",
":",
"# Consume the ending span.",
"while",
"True",
":",
"index",
",",
"labels",
"=",
"next",
"(",
"self",
".",
"index_labels_generator",
")",
"is_ending",
"=",
"(",
"self",
".",
"_labels_indicate_sentence_ending",
"(",
"labels",
")",
"or",
"(",
"config",
".",
"extend_ending_with_delimiters",
"and",
"labels",
"[",
"DelimitersLabeler",
"]",
")",
")",
"if",
"not",
"is_ending",
":",
"end",
"=",
"index",
"break",
"# yeah we found the ending.",
"break",
"except",
"StopIteration",
":",
"end",
"=",
"len",
"(",
"self",
".",
"input_sequence",
")",
"# Trigger next(...) action.",
"labels",
"=",
"None",
"index",
"=",
"-",
"1",
"yield",
"start",
",",
"end"
] |
Generate intervals indicating the valid sentences.
|
[
"Generate",
"intervals",
"indicating",
"the",
"valid",
"sentences",
"."
] |
python
|
train
| 37.206349 |
treycucco/bidon
|
bidon/db/access/model_access.py
|
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/model_access.py#L29-L34
|
def find_models(self, constructor, constraints=None, *, columns=None, order_by=None,
limiting=None, table_name=None):
"""Specialization of DataAccess.find_all that returns models instead of cursor objects."""
return self._find_models(
constructor, table_name or constructor.table_name, constraints, columns=columns,
order_by=order_by, limiting=limiting)
|
[
"def",
"find_models",
"(",
"self",
",",
"constructor",
",",
"constraints",
"=",
"None",
",",
"*",
",",
"columns",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"limiting",
"=",
"None",
",",
"table_name",
"=",
"None",
")",
":",
"return",
"self",
".",
"_find_models",
"(",
"constructor",
",",
"table_name",
"or",
"constructor",
".",
"table_name",
",",
"constraints",
",",
"columns",
"=",
"columns",
",",
"order_by",
"=",
"order_by",
",",
"limiting",
"=",
"limiting",
")"
] |
Specialization of DataAccess.find_all that returns models instead of cursor objects.
|
[
"Specialization",
"of",
"DataAccess",
".",
"find_all",
"that",
"returns",
"models",
"instead",
"of",
"cursor",
"objects",
"."
] |
python
|
train
| 64.333333 |
hasgeek/coaster
|
coaster/utils/misc.py
|
https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/utils/misc.py#L401-L437
|
def format_currency(value, decimals=2):
"""
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
"""
number, decimal = ((u'%%.%df' % decimals) % value).split(u'.')
parts = []
while len(number) > 3:
part, number = number[-3:], number[:-3]
parts.append(part)
parts.append(number)
parts.reverse()
if int(decimal) == 0:
return u','.join(parts)
else:
return u','.join(parts) + u'.' + decimal
|
[
"def",
"format_currency",
"(",
"value",
",",
"decimals",
"=",
"2",
")",
":",
"number",
",",
"decimal",
"=",
"(",
"(",
"u'%%.%df'",
"%",
"decimals",
")",
"%",
"value",
")",
".",
"split",
"(",
"u'.'",
")",
"parts",
"=",
"[",
"]",
"while",
"len",
"(",
"number",
")",
">",
"3",
":",
"part",
",",
"number",
"=",
"number",
"[",
"-",
"3",
":",
"]",
",",
"number",
"[",
":",
"-",
"3",
"]",
"parts",
".",
"append",
"(",
"part",
")",
"parts",
".",
"append",
"(",
"number",
")",
"parts",
".",
"reverse",
"(",
")",
"if",
"int",
"(",
"decimal",
")",
"==",
"0",
":",
"return",
"u','",
".",
"join",
"(",
"parts",
")",
"else",
":",
"return",
"u','",
".",
"join",
"(",
"parts",
")",
"+",
"u'.'",
"+",
"decimal"
] |
Return a number suitably formatted for display as currency, with
thousands separated by commas and up to two decimal points.
>>> format_currency(1000)
'1,000'
>>> format_currency(100)
'100'
>>> format_currency(999.95)
'999.95'
>>> format_currency(99.95)
'99.95'
>>> format_currency(100000)
'100,000'
>>> format_currency(1000.00)
'1,000'
>>> format_currency(1000.41)
'1,000.41'
>>> format_currency(23.21, decimals=3)
'23.210'
>>> format_currency(1000, decimals=3)
'1,000'
>>> format_currency(123456789.123456789)
'123,456,789.12'
|
[
"Return",
"a",
"number",
"suitably",
"formatted",
"for",
"display",
"as",
"currency",
"with",
"thousands",
"separated",
"by",
"commas",
"and",
"up",
"to",
"two",
"decimal",
"points",
"."
] |
python
|
train
| 26.567568 |
datosgobar/pydatajson
|
pydatajson/search.py
|
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/search.py#L101-L180
|
def get_distributions(catalog, filter_in=None, filter_out=None,
meta_field=None, exclude_meta_fields=None,
only_time_series=False):
"""Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo.
"""
filter_in = filter_in or {}
filter_out = filter_out or {}
catalog = read_catalog_obj(catalog)
distributions = []
for dataset in get_datasets(catalog, filter_in, filter_out):
for distribution in dataset.get("distribution", []):
# agrega el id del dataset
distribution["dataset_identifier"] = dataset["identifier"]
distributions.append(distribution)
filtered_distributions = [
distribution for distribution in distributions if
_filter_dictionary(distribution, filter_in.get("distribution"),
filter_out.get("distribution"))
]
# realiza filtros especiales
if only_time_series:
filtered_distributions = [distribution for distribution in
filtered_distributions if
distribution_has_time_index(distribution)]
if meta_field:
return [distribution[meta_field]
for distribution in filtered_distributions
if meta_field in distribution]
if exclude_meta_fields:
meta_filtered_distributions = []
for distribution in filtered_distributions:
distribution_meta_filtered = distribution.copy()
for excluded_meta_field in exclude_meta_fields:
distribution_meta_filtered.pop(excluded_meta_field, None)
meta_filtered_distributions.append(distribution_meta_filtered)
return meta_filtered_distributions
else:
return filtered_distributions
|
[
"def",
"get_distributions",
"(",
"catalog",
",",
"filter_in",
"=",
"None",
",",
"filter_out",
"=",
"None",
",",
"meta_field",
"=",
"None",
",",
"exclude_meta_fields",
"=",
"None",
",",
"only_time_series",
"=",
"False",
")",
":",
"filter_in",
"=",
"filter_in",
"or",
"{",
"}",
"filter_out",
"=",
"filter_out",
"or",
"{",
"}",
"catalog",
"=",
"read_catalog_obj",
"(",
"catalog",
")",
"distributions",
"=",
"[",
"]",
"for",
"dataset",
"in",
"get_datasets",
"(",
"catalog",
",",
"filter_in",
",",
"filter_out",
")",
":",
"for",
"distribution",
"in",
"dataset",
".",
"get",
"(",
"\"distribution\"",
",",
"[",
"]",
")",
":",
"# agrega el id del dataset",
"distribution",
"[",
"\"dataset_identifier\"",
"]",
"=",
"dataset",
"[",
"\"identifier\"",
"]",
"distributions",
".",
"append",
"(",
"distribution",
")",
"filtered_distributions",
"=",
"[",
"distribution",
"for",
"distribution",
"in",
"distributions",
"if",
"_filter_dictionary",
"(",
"distribution",
",",
"filter_in",
".",
"get",
"(",
"\"distribution\"",
")",
",",
"filter_out",
".",
"get",
"(",
"\"distribution\"",
")",
")",
"]",
"# realiza filtros especiales",
"if",
"only_time_series",
":",
"filtered_distributions",
"=",
"[",
"distribution",
"for",
"distribution",
"in",
"filtered_distributions",
"if",
"distribution_has_time_index",
"(",
"distribution",
")",
"]",
"if",
"meta_field",
":",
"return",
"[",
"distribution",
"[",
"meta_field",
"]",
"for",
"distribution",
"in",
"filtered_distributions",
"if",
"meta_field",
"in",
"distribution",
"]",
"if",
"exclude_meta_fields",
":",
"meta_filtered_distributions",
"=",
"[",
"]",
"for",
"distribution",
"in",
"filtered_distributions",
":",
"distribution_meta_filtered",
"=",
"distribution",
".",
"copy",
"(",
")",
"for",
"excluded_meta_field",
"in",
"exclude_meta_fields",
":",
"distribution_meta_filtered",
".",
"pop",
"(",
"excluded_meta_field",
",",
"None",
")",
"meta_filtered_distributions",
".",
"append",
"(",
"distribution_meta_filtered",
")",
"return",
"meta_filtered_distributions",
"else",
":",
"return",
"filtered_distributions"
] |
Devuelve lista de distribuciones del catálogo o de uno de sus metadatos.
Args:
catalog (dict, str or DataJson): Representación externa/interna de un
catálogo. Una representación _externa_ es un path local o una
URL remota a un archivo con la metadata de un catálogo, en
formato JSON o XLSX. La representación _interna_ de un catálogo
es un diccionario. Ejemplos: http://datos.gob.ar/data.json,
http://www.ign.gob.ar/descargas/geodatos/catalog.xlsx,
"/energia/catalog.xlsx".
filter_in (dict): Devuelve los distribuciones cuyos atributos
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que pertenezcan a un dataset
de ese publisher_name.
filter_out (dict): Devuelve los distribuciones cuyos atributos no
coinciden con los pasados en este diccionario. Ejemplo::
{
"dataset": {
"publisher": {"name": "Ministerio de Ambiente"}
}
}
Sólo se devolverán los distribuciones que no pertenezcan a un
dataset de ese publisher_name.
meta_field (str): Nombre de un metadato de Distribution. En lugar de
devolver los objetos completos Distribution, devuelve una lista de
valores para ese metadato presentes en el catálogo.
exclude_meta_fields (list): Metadatos de Distribution que se quieren
excluir de los objetos Distribution devueltos.
only_time_series (bool): Si es verdadero, sólo devuelve distribuciones
que sean distribuciones de series de tiempo.
|
[
"Devuelve",
"lista",
"de",
"distribuciones",
"del",
"catálogo",
"o",
"de",
"uno",
"de",
"sus",
"metadatos",
"."
] |
python
|
train
| 44.4625 |
shoebot/shoebot
|
lib/sbaudio/__init__.py
|
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/sbaudio/__init__.py#L55-L63
|
def scaled_fft(fft, scale=1.0):
"""
Produces a nicer graph, I'm not sure if this is correct
"""
data = np.zeros(len(fft))
for i, v in enumerate(fft):
data[i] = scale * (i * v) / NUM_SAMPLES
return data
|
[
"def",
"scaled_fft",
"(",
"fft",
",",
"scale",
"=",
"1.0",
")",
":",
"data",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"fft",
")",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"fft",
")",
":",
"data",
"[",
"i",
"]",
"=",
"scale",
"*",
"(",
"i",
"*",
"v",
")",
"/",
"NUM_SAMPLES",
"return",
"data"
] |
Produces a nicer graph, I'm not sure if this is correct
|
[
"Produces",
"a",
"nicer",
"graph",
"I",
"m",
"not",
"sure",
"if",
"this",
"is",
"correct"
] |
python
|
valid
| 25.111111 |
tensorpack/tensorpack
|
tensorpack/dataflow/format.py
|
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/dataflow/format.py#L167-L202
|
def CaffeLMDB(lmdb_path, shuffle=True, keys=None):
"""
Read a Caffe LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Returns:
a :class:`LMDBDataDecoder` instance.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
"""
cpb = get_caffe_pb()
lmdb_data = LMDBData(lmdb_path, shuffle, keys)
def decoder(k, v):
try:
datum = cpb.Datum()
datum.ParseFromString(v)
img = np.fromstring(datum.data, dtype=np.uint8)
img = img.reshape(datum.channels, datum.height, datum.width)
except Exception:
log_once("Cannot read key {}".format(k), 'warn')
return None
return [img.transpose(1, 2, 0), datum.label]
logger.warn("Caffe LMDB format doesn't store jpeg-compressed images, \
it's not recommended due to its inferior performance.")
return LMDBDataDecoder(lmdb_data, decoder)
|
[
"def",
"CaffeLMDB",
"(",
"lmdb_path",
",",
"shuffle",
"=",
"True",
",",
"keys",
"=",
"None",
")",
":",
"cpb",
"=",
"get_caffe_pb",
"(",
")",
"lmdb_data",
"=",
"LMDBData",
"(",
"lmdb_path",
",",
"shuffle",
",",
"keys",
")",
"def",
"decoder",
"(",
"k",
",",
"v",
")",
":",
"try",
":",
"datum",
"=",
"cpb",
".",
"Datum",
"(",
")",
"datum",
".",
"ParseFromString",
"(",
"v",
")",
"img",
"=",
"np",
".",
"fromstring",
"(",
"datum",
".",
"data",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"img",
"=",
"img",
".",
"reshape",
"(",
"datum",
".",
"channels",
",",
"datum",
".",
"height",
",",
"datum",
".",
"width",
")",
"except",
"Exception",
":",
"log_once",
"(",
"\"Cannot read key {}\"",
".",
"format",
"(",
"k",
")",
",",
"'warn'",
")",
"return",
"None",
"return",
"[",
"img",
".",
"transpose",
"(",
"1",
",",
"2",
",",
"0",
")",
",",
"datum",
".",
"label",
"]",
"logger",
".",
"warn",
"(",
"\"Caffe LMDB format doesn't store jpeg-compressed images, \\\n it's not recommended due to its inferior performance.\"",
")",
"return",
"LMDBDataDecoder",
"(",
"lmdb_data",
",",
"decoder",
")"
] |
Read a Caffe LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Returns:
a :class:`LMDBDataDecoder` instance.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
|
[
"Read",
"a",
"Caffe",
"LMDB",
"file",
"where",
"each",
"value",
"contains",
"a",
"caffe",
".",
"Datum",
"protobuf",
".",
"Produces",
"datapoints",
"of",
"the",
"format",
":",
"[",
"HWC",
"image",
"label",
"]",
"."
] |
python
|
train
| 33 |
jmgilman/Neolib
|
neolib/pyamf/util/pure.py
|
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/util/pure.py#L116-L123
|
def write(self, s, size=None):
"""
Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes
"""
self._buffer.write(s)
self._len_changed = True
|
[
"def",
"write",
"(",
"self",
",",
"s",
",",
"size",
"=",
"None",
")",
":",
"self",
".",
"_buffer",
".",
"write",
"(",
"s",
")",
"self",
".",
"_len_changed",
"=",
"True"
] |
Writes the content of the specified C{s} into this buffer.
@param s: Raw bytes
|
[
"Writes",
"the",
"content",
"of",
"the",
"specified",
"C",
"{",
"s",
"}",
"into",
"this",
"buffer",
"."
] |
python
|
train
| 25.75 |
astrocatalogs/astrocats
|
astrocats/catalog/gitter.py
|
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/gitter.py#L30-L56
|
def get_sha(path=None, log=None, short=False, timeout=None):
"""Use `git rev-parse HEAD <REPO>` to get current SHA.
"""
# git_command = "git rev-parse HEAD {}".format(repo_name).split()
# git_command = "git rev-parse HEAD".split()
git_command = ["git", "rev-parse"]
if short:
git_command.append("--short")
git_command.append("HEAD")
kwargs = {}
if path is not None:
kwargs['cwd'] = path
if timeout is not None:
kwargs['timeout'] = timeout
if log is not None:
log.debug("{} {}".format(git_command, str(kwargs)))
sha = subprocess.check_output(git_command, **kwargs)
try:
sha = sha.decode('ascii').strip()
except:
if log is not None:
log.debug("decode of '{}' failed".format(sha))
return sha
|
[
"def",
"get_sha",
"(",
"path",
"=",
"None",
",",
"log",
"=",
"None",
",",
"short",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"# git_command = \"git rev-parse HEAD {}\".format(repo_name).split()",
"# git_command = \"git rev-parse HEAD\".split()",
"git_command",
"=",
"[",
"\"git\"",
",",
"\"rev-parse\"",
"]",
"if",
"short",
":",
"git_command",
".",
"append",
"(",
"\"--short\"",
")",
"git_command",
".",
"append",
"(",
"\"HEAD\"",
")",
"kwargs",
"=",
"{",
"}",
"if",
"path",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'cwd'",
"]",
"=",
"path",
"if",
"timeout",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'timeout'",
"]",
"=",
"timeout",
"if",
"log",
"is",
"not",
"None",
":",
"log",
".",
"debug",
"(",
"\"{} {}\"",
".",
"format",
"(",
"git_command",
",",
"str",
"(",
"kwargs",
")",
")",
")",
"sha",
"=",
"subprocess",
".",
"check_output",
"(",
"git_command",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"sha",
"=",
"sha",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
"except",
":",
"if",
"log",
"is",
"not",
"None",
":",
"log",
".",
"debug",
"(",
"\"decode of '{}' failed\"",
".",
"format",
"(",
"sha",
")",
")",
"return",
"sha"
] |
Use `git rev-parse HEAD <REPO>` to get current SHA.
|
[
"Use",
"git",
"rev",
"-",
"parse",
"HEAD",
"<REPO",
">",
"to",
"get",
"current",
"SHA",
"."
] |
python
|
train
| 29.111111 |
gwastro/pycbc
|
pycbc/transforms.py
|
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L107-L161
|
def from_config(cls, cp, section, outputs, skip_opts=None,
additional_opts=None):
"""Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
tag = outputs
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
outputs = set(outputs.split(VARARGS_DELIM))
special_args = ['name'] + skip_opts + additional_opts.keys()
# get any extra arguments to pass to init
extra_args = {}
for opt in cp.options("-".join([section, tag])):
if opt in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, opt, tag)
try:
val = float(val)
except ValueError:
pass
# add option
extra_args.update({opt:val})
extra_args.update(additional_opts)
out = cls(**extra_args)
# check that the outputs matches
if outputs-out.outputs != set() or out.outputs-outputs != set():
raise ValueError("outputs of class do not match outputs specified "
"in section")
return out
|
[
"def",
"from_config",
"(",
"cls",
",",
"cp",
",",
"section",
",",
"outputs",
",",
"skip_opts",
"=",
"None",
",",
"additional_opts",
"=",
"None",
")",
":",
"tag",
"=",
"outputs",
"if",
"skip_opts",
"is",
"None",
":",
"skip_opts",
"=",
"[",
"]",
"if",
"additional_opts",
"is",
"None",
":",
"additional_opts",
"=",
"{",
"}",
"else",
":",
"additional_opts",
"=",
"additional_opts",
".",
"copy",
"(",
")",
"outputs",
"=",
"set",
"(",
"outputs",
".",
"split",
"(",
"VARARGS_DELIM",
")",
")",
"special_args",
"=",
"[",
"'name'",
"]",
"+",
"skip_opts",
"+",
"additional_opts",
".",
"keys",
"(",
")",
"# get any extra arguments to pass to init",
"extra_args",
"=",
"{",
"}",
"for",
"opt",
"in",
"cp",
".",
"options",
"(",
"\"-\"",
".",
"join",
"(",
"[",
"section",
",",
"tag",
"]",
")",
")",
":",
"if",
"opt",
"in",
"special_args",
":",
"continue",
"# check if option can be cast as a float",
"val",
"=",
"cp",
".",
"get_opt_tag",
"(",
"section",
",",
"opt",
",",
"tag",
")",
"try",
":",
"val",
"=",
"float",
"(",
"val",
")",
"except",
"ValueError",
":",
"pass",
"# add option",
"extra_args",
".",
"update",
"(",
"{",
"opt",
":",
"val",
"}",
")",
"extra_args",
".",
"update",
"(",
"additional_opts",
")",
"out",
"=",
"cls",
"(",
"*",
"*",
"extra_args",
")",
"# check that the outputs matches",
"if",
"outputs",
"-",
"out",
".",
"outputs",
"!=",
"set",
"(",
")",
"or",
"out",
".",
"outputs",
"-",
"outputs",
"!=",
"set",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"outputs of class do not match outputs specified \"",
"\"in section\"",
")",
"return",
"out"
] |
Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
|
[
"Initializes",
"a",
"transform",
"from",
"the",
"given",
"section",
"."
] |
python
|
train
| 38.818182 |
twidi/py-dataql
|
dataql/parsers/generic.py
|
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/generic.py#L373-L399
|
def visit_field(self, _, children):
"""A simple field.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
Returns
-------
.resources.Field
An instance of ``.resources.Field`` with the correct name.
Example
-------
>>> DataQLParser(r'foo', default_rule='FIELD').data
<Field[foo] />
>>> DataQLParser(r'foo(1)', default_rule='FIELD').data
<Field[foo] .foo(1) />
>>> DataQLParser(r'foo.bar()', default_rule='FIELD').data
<Field[foo] .foo.bar() />
"""
filters = children[0]
return self.Field(getattr(filters[0], 'name', None), filters=filters)
|
[
"def",
"visit_field",
"(",
"self",
",",
"_",
",",
"children",
")",
":",
"filters",
"=",
"children",
"[",
"0",
"]",
"return",
"self",
".",
"Field",
"(",
"getattr",
"(",
"filters",
"[",
"0",
"]",
",",
"'name'",
",",
"None",
")",
",",
"filters",
"=",
"filters",
")"
] |
A simple field.
Arguments
---------
_ (node) : parsimonious.nodes.Node.
children : list
- 0: for ``FILTERS``: list of instances of ``.resources.Field``.
Returns
-------
.resources.Field
An instance of ``.resources.Field`` with the correct name.
Example
-------
>>> DataQLParser(r'foo', default_rule='FIELD').data
<Field[foo] />
>>> DataQLParser(r'foo(1)', default_rule='FIELD').data
<Field[foo] .foo(1) />
>>> DataQLParser(r'foo.bar()', default_rule='FIELD').data
<Field[foo] .foo.bar() />
|
[
"A",
"simple",
"field",
"."
] |
python
|
train
| 28.851852 |
petebachant/PXL
|
pxl/timeseries.py
|
https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/timeseries.py#L91-L96
|
def average_over_area(q, x, y):
"""Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule"""
area = (np.max(x) - np.min(x))*(np.max(y) - np.min(y))
integral = np.trapz(np.trapz(q, y, axis=0), x)
return integral/area
|
[
"def",
"average_over_area",
"(",
"q",
",",
"x",
",",
"y",
")",
":",
"area",
"=",
"(",
"np",
".",
"max",
"(",
"x",
")",
"-",
"np",
".",
"min",
"(",
"x",
")",
")",
"*",
"(",
"np",
".",
"max",
"(",
"y",
")",
"-",
"np",
".",
"min",
"(",
"y",
")",
")",
"integral",
"=",
"np",
".",
"trapz",
"(",
"np",
".",
"trapz",
"(",
"q",
",",
"y",
",",
"axis",
"=",
"0",
")",
",",
"x",
")",
"return",
"integral",
"/",
"area"
] |
Averages a quantity `q` over a rectangular area given a 2D array and
the x and y vectors for sample locations, using the trapezoidal rule
|
[
"Averages",
"a",
"quantity",
"q",
"over",
"a",
"rectangular",
"area",
"given",
"a",
"2D",
"array",
"and",
"the",
"x",
"and",
"y",
"vectors",
"for",
"sample",
"locations",
"using",
"the",
"trapezoidal",
"rule"
] |
python
|
train
| 53 |
Stufinite/djangoApiDec
|
djangoApiDec/djangoApiDec.py
|
https://github.com/Stufinite/djangoApiDec/blob/8b2d5776b3413b1b850df12a92f30526c05c0a46/djangoApiDec/djangoApiDec.py#L38-L54
|
def queryString_required(strList):
""" An decorator checking whether queryString key is valid or not
Args:
str: allowed queryString key
Returns:
if contains invalid queryString key, it will raise exception.
"""
def _dec(function):
@wraps(function)
def _wrap(request, *args, **kwargs):
for i in strList:
if i not in request.GET:
raise Http404("api does not exist")
return function(request, *args, **kwargs)
return _wrap
return _dec
|
[
"def",
"queryString_required",
"(",
"strList",
")",
":",
"def",
"_dec",
"(",
"function",
")",
":",
"@",
"wraps",
"(",
"function",
")",
"def",
"_wrap",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"i",
"in",
"strList",
":",
"if",
"i",
"not",
"in",
"request",
".",
"GET",
":",
"raise",
"Http404",
"(",
"\"api does not exist\"",
")",
"return",
"function",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrap",
"return",
"_dec"
] |
An decorator checking whether queryString key is valid or not
Args:
str: allowed queryString key
Returns:
if contains invalid queryString key, it will raise exception.
|
[
"An",
"decorator",
"checking",
"whether",
"queryString",
"key",
"is",
"valid",
"or",
"not",
"Args",
":",
"str",
":",
"allowed",
"queryString",
"key"
] |
python
|
valid
| 26.235294 |
aiogram/aiogram
|
aiogram/utils/payload.py
|
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/utils/payload.py#L30-L43
|
def _normalize(obj):
"""
Normalize dicts and lists
:param obj:
:return: normalized object
"""
if isinstance(obj, list):
return [_normalize(item) for item in obj]
elif isinstance(obj, dict):
return {k: _normalize(v) for k, v in obj.items() if v is not None}
elif hasattr(obj, 'to_python'):
return obj.to_python()
return obj
|
[
"def",
"_normalize",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"return",
"[",
"_normalize",
"(",
"item",
")",
"for",
"item",
"in",
"obj",
"]",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"{",
"k",
":",
"_normalize",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"elif",
"hasattr",
"(",
"obj",
",",
"'to_python'",
")",
":",
"return",
"obj",
".",
"to_python",
"(",
")",
"return",
"obj"
] |
Normalize dicts and lists
:param obj:
:return: normalized object
|
[
"Normalize",
"dicts",
"and",
"lists"
] |
python
|
train
| 26.428571 |
guysv/txkernel
|
txkernel/connection.py
|
https://github.com/guysv/txkernel/blob/a0aa1591df347732264f594bb13bc10d8aaf0f23/txkernel/connection.py#L42-L50
|
def generate(cls, partial_props=None):
"""
Generate new connection file props from
defaults
"""
partial_props = partial_props or {}
props = partial_props.copy()
props.update(cls.DEFAULT_PROPERTIES)
return cls(props)
|
[
"def",
"generate",
"(",
"cls",
",",
"partial_props",
"=",
"None",
")",
":",
"partial_props",
"=",
"partial_props",
"or",
"{",
"}",
"props",
"=",
"partial_props",
".",
"copy",
"(",
")",
"props",
".",
"update",
"(",
"cls",
".",
"DEFAULT_PROPERTIES",
")",
"return",
"cls",
"(",
"props",
")"
] |
Generate new connection file props from
defaults
|
[
"Generate",
"new",
"connection",
"file",
"props",
"from",
"defaults"
] |
python
|
train
| 30.111111 |
pschmitt/pykeepass
|
pykeepass/kdbx_parsing/common.py
|
https://github.com/pschmitt/pykeepass/blob/85da3630d6e410b2a10d3e711cd69308b51d401d/pykeepass/kdbx_parsing/common.py#L101-L144
|
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
|
[
"def",
"compute_key_composite",
"(",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
")",
":",
"# hash the password",
"if",
"password",
":",
"password_composite",
"=",
"hashlib",
".",
"sha256",
"(",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"digest",
"(",
")",
"else",
":",
"password_composite",
"=",
"b''",
"# hash the keyfile",
"if",
"keyfile",
":",
"# try to read XML keyfile",
"try",
":",
"with",
"open",
"(",
"keyfile",
",",
"'r'",
")",
"as",
"f",
":",
"tree",
"=",
"etree",
".",
"parse",
"(",
"f",
")",
".",
"getroot",
"(",
")",
"keyfile_composite",
"=",
"base64",
".",
"b64decode",
"(",
"tree",
".",
"find",
"(",
"'Key/Data'",
")",
".",
"text",
")",
"# otherwise, try to read plain keyfile",
"except",
"(",
"etree",
".",
"XMLSyntaxError",
",",
"UnicodeDecodeError",
")",
":",
"try",
":",
"with",
"open",
"(",
"keyfile",
",",
"'rb'",
")",
"as",
"f",
":",
"key",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"int",
"(",
"key",
",",
"16",
")",
"is_hex",
"=",
"True",
"except",
"ValueError",
":",
"is_hex",
"=",
"False",
"# if the length is 32 bytes we assume it is the key",
"if",
"len",
"(",
"key",
")",
"==",
"32",
":",
"keyfile_composite",
"=",
"key",
"# if the length is 64 bytes we assume the key is hex encoded",
"elif",
"len",
"(",
"key",
")",
"==",
"64",
"and",
"is_hex",
":",
"keyfile_composite",
"=",
"codecs",
".",
"decode",
"(",
"key",
",",
"'hex'",
")",
"# anything else may be a file to hash for the key",
"else",
":",
"keyfile_composite",
"=",
"hashlib",
".",
"sha256",
"(",
"key",
")",
".",
"digest",
"(",
")",
"except",
":",
"raise",
"IOError",
"(",
"'Could not read keyfile'",
")",
"else",
":",
"keyfile_composite",
"=",
"b''",
"# create composite key from password and keyfile composites",
"return",
"hashlib",
".",
"sha256",
"(",
"password_composite",
"+",
"keyfile_composite",
")",
".",
"digest",
"(",
")"
] |
Compute composite key.
Used in header verification and payload decryption.
|
[
"Compute",
"composite",
"key",
".",
"Used",
"in",
"header",
"verification",
"and",
"payload",
"decryption",
"."
] |
python
|
train
| 38.568182 |
tomduck/pandoc-tablenos
|
pandoc_tablenos.py
|
https://github.com/tomduck/pandoc-tablenos/blob/b3c7b6a259eec5fb7c8420033d05b32640f1f266/pandoc_tablenos.py#L89-L107
|
def attach_attrs_table(key, value, fmt, meta):
"""Extracts attributes and attaches them to element."""
# We can't use attach_attrs_factory() because Table is a block-level element
if key in ['Table']:
assert len(value) == 5
caption = value[0] # caption, align, x, head, body
# Set n to the index where the attributes start
n = 0
while n < len(caption) and not \
(caption[n]['t'] == 'Str' and caption[n]['c'].startswith('{')):
n += 1
try:
attrs = extract_attrs(caption, n)
value.insert(0, attrs)
except (ValueError, IndexError):
pass
|
[
"def",
"attach_attrs_table",
"(",
"key",
",",
"value",
",",
"fmt",
",",
"meta",
")",
":",
"# We can't use attach_attrs_factory() because Table is a block-level element",
"if",
"key",
"in",
"[",
"'Table'",
"]",
":",
"assert",
"len",
"(",
"value",
")",
"==",
"5",
"caption",
"=",
"value",
"[",
"0",
"]",
"# caption, align, x, head, body",
"# Set n to the index where the attributes start",
"n",
"=",
"0",
"while",
"n",
"<",
"len",
"(",
"caption",
")",
"and",
"not",
"(",
"caption",
"[",
"n",
"]",
"[",
"'t'",
"]",
"==",
"'Str'",
"and",
"caption",
"[",
"n",
"]",
"[",
"'c'",
"]",
".",
"startswith",
"(",
"'{'",
")",
")",
":",
"n",
"+=",
"1",
"try",
":",
"attrs",
"=",
"extract_attrs",
"(",
"caption",
",",
"n",
")",
"value",
".",
"insert",
"(",
"0",
",",
"attrs",
")",
"except",
"(",
"ValueError",
",",
"IndexError",
")",
":",
"pass"
] |
Extracts attributes and attaches them to element.
|
[
"Extracts",
"attributes",
"and",
"attaches",
"them",
"to",
"element",
"."
] |
python
|
train
| 33.894737 |
wbond/vat_moss-python
|
vat_moss/exchange_rates.py
|
https://github.com/wbond/vat_moss-python/blob/5089dcf036eb2e9abc58e78186fd46b522a50620/vat_moss/exchange_rates.py#L26-L158
|
def fetch():
"""
Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD
"""
response = urlopen('https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml')
_, params = cgi.parse_header(response.headers['Content-Type'])
if 'charset' in params:
encoding = params['charset']
else:
encoding = 'utf-8'
return_xml = response.read().decode(encoding)
# Example return data
#
# <gesmes:Envelope xmlns:gesmes="http://www.gesmes.org/xml/2002-08-01" xmlns="http://www.ecb.int/vocabulary/2002-08-01/eurofxref">
# <gesmes:subject>Reference rates</gesmes:subject>
# <gesmes:Sender>
# <gesmes:name>European Central Bank</gesmes:name>
# </gesmes:Sender>
# <Cube>
# <Cube time="2015-01-09">
# <Cube currency="USD" rate="1.1813"/>
# <Cube currency="JPY" rate="140.81"/>
# <Cube currency="BGN" rate="1.9558"/>
# <Cube currency="CZK" rate="28.062"/>
# <Cube currency="DKK" rate="7.4393"/>
# <Cube currency="GBP" rate="0.77990"/>
# <Cube currency="HUF" rate="317.39"/>
# <Cube currency="PLN" rate="4.2699"/>
# <Cube currency="RON" rate="4.4892"/>
# <Cube currency="SEK" rate="9.4883"/>
# <Cube currency="CHF" rate="1.2010"/>
# <Cube currency="NOK" rate="9.0605"/>
# <Cube currency="HRK" rate="7.6780"/>
# <Cube currency="RUB" rate="72.8910"/>
# <Cube currency="TRY" rate="2.7154"/>
# <Cube currency="AUD" rate="1.4506"/>
# <Cube currency="BRL" rate="3.1389"/>
# <Cube currency="CAD" rate="1.3963"/>
# <Cube currency="CNY" rate="7.3321"/>
# <Cube currency="HKD" rate="9.1593"/>
# <Cube currency="IDR" rate="14925.34"/>
# <Cube currency="ILS" rate="4.6614"/>
# <Cube currency="INR" rate="73.6233"/>
# <Cube currency="KRW" rate="1290.29"/>
# <Cube currency="MXN" rate="17.3190"/>
# <Cube currency="MYR" rate="4.2054"/>
# <Cube currency="NZD" rate="1.5115"/>
# <Cube currency="PHP" rate="53.090"/>
# <Cube currency="SGD" rate="1.5789"/>
# <Cube currency="THB" rate="38.846"/>
# <Cube currency="ZAR" rate="13.6655"/>
# </Cube>
# </Cube>
# </gesmes:Envelope>
# If we don't explicitly recode to UTF-8, ElementTree stupidly uses
# ascii on Python 2.7
envelope = ElementTree.fromstring(return_xml.encode('utf-8'))
namespaces = {
'gesmes': 'http://www.gesmes.org/xml/2002-08-01',
'eurofxref': 'http://www.ecb.int/vocabulary/2002-08-01/eurofxref'
}
date_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube[@time]', namespaces)
if not date_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube time=""> tag in ECB XML')
date = date_elements[0].get('time')
if not isinstance(date, str_cls):
date = date.decode('utf-8')
currency_elements = envelope.findall('./eurofxref:Cube/eurofxref:Cube/eurofxref:Cube[@currency][@rate]', namespaces)
if not currency_elements:
# Fail loudly if the XML seems to have changed
raise WebServiceError('Unable to find <Cube currency="" rate=""> tags in ECB XML')
rates = {
'EUR': Decimal('1.0000')
}
applicable_currenties = {
'BGN': True,
'CZK': True,
'DKK': True,
'EUR': True,
'GBP': True,
'HRK': True,
'HUF': True,
'NOK': True,
'PLN': True,
'RON': True,
'SEK': True,
'USD': True
}
for currency_element in currency_elements:
code = currency_element.attrib.get('currency')
if code not in applicable_currenties:
continue
rate = currency_element.attrib.get('rate')
rates[code] = Decimal(rate)
return (date, rates)
|
[
"def",
"fetch",
"(",
")",
":",
"response",
"=",
"urlopen",
"(",
"'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml'",
")",
"_",
",",
"params",
"=",
"cgi",
".",
"parse_header",
"(",
"response",
".",
"headers",
"[",
"'Content-Type'",
"]",
")",
"if",
"'charset'",
"in",
"params",
":",
"encoding",
"=",
"params",
"[",
"'charset'",
"]",
"else",
":",
"encoding",
"=",
"'utf-8'",
"return_xml",
"=",
"response",
".",
"read",
"(",
")",
".",
"decode",
"(",
"encoding",
")",
"# Example return data",
"#",
"# <gesmes:Envelope xmlns:gesmes=\"http://www.gesmes.org/xml/2002-08-01\" xmlns=\"http://www.ecb.int/vocabulary/2002-08-01/eurofxref\">",
"# <gesmes:subject>Reference rates</gesmes:subject>",
"# <gesmes:Sender>",
"# <gesmes:name>European Central Bank</gesmes:name>",
"# </gesmes:Sender>",
"# <Cube>",
"# <Cube time=\"2015-01-09\">",
"# <Cube currency=\"USD\" rate=\"1.1813\"/>",
"# <Cube currency=\"JPY\" rate=\"140.81\"/>",
"# <Cube currency=\"BGN\" rate=\"1.9558\"/>",
"# <Cube currency=\"CZK\" rate=\"28.062\"/>",
"# <Cube currency=\"DKK\" rate=\"7.4393\"/>",
"# <Cube currency=\"GBP\" rate=\"0.77990\"/>",
"# <Cube currency=\"HUF\" rate=\"317.39\"/>",
"# <Cube currency=\"PLN\" rate=\"4.2699\"/>",
"# <Cube currency=\"RON\" rate=\"4.4892\"/>",
"# <Cube currency=\"SEK\" rate=\"9.4883\"/>",
"# <Cube currency=\"CHF\" rate=\"1.2010\"/>",
"# <Cube currency=\"NOK\" rate=\"9.0605\"/>",
"# <Cube currency=\"HRK\" rate=\"7.6780\"/>",
"# <Cube currency=\"RUB\" rate=\"72.8910\"/>",
"# <Cube currency=\"TRY\" rate=\"2.7154\"/>",
"# <Cube currency=\"AUD\" rate=\"1.4506\"/>",
"# <Cube currency=\"BRL\" rate=\"3.1389\"/>",
"# <Cube currency=\"CAD\" rate=\"1.3963\"/>",
"# <Cube currency=\"CNY\" rate=\"7.3321\"/>",
"# <Cube currency=\"HKD\" rate=\"9.1593\"/>",
"# <Cube currency=\"IDR\" rate=\"14925.34\"/>",
"# <Cube currency=\"ILS\" rate=\"4.6614\"/>",
"# <Cube currency=\"INR\" rate=\"73.6233\"/>",
"# <Cube currency=\"KRW\" rate=\"1290.29\"/>",
"# <Cube currency=\"MXN\" rate=\"17.3190\"/>",
"# <Cube currency=\"MYR\" rate=\"4.2054\"/>",
"# <Cube currency=\"NZD\" rate=\"1.5115\"/>",
"# <Cube currency=\"PHP\" rate=\"53.090\"/>",
"# <Cube currency=\"SGD\" rate=\"1.5789\"/>",
"# <Cube currency=\"THB\" rate=\"38.846\"/>",
"# <Cube currency=\"ZAR\" rate=\"13.6655\"/>",
"# </Cube>",
"# </Cube>",
"# </gesmes:Envelope>",
"# If we don't explicitly recode to UTF-8, ElementTree stupidly uses",
"# ascii on Python 2.7",
"envelope",
"=",
"ElementTree",
".",
"fromstring",
"(",
"return_xml",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"namespaces",
"=",
"{",
"'gesmes'",
":",
"'http://www.gesmes.org/xml/2002-08-01'",
",",
"'eurofxref'",
":",
"'http://www.ecb.int/vocabulary/2002-08-01/eurofxref'",
"}",
"date_elements",
"=",
"envelope",
".",
"findall",
"(",
"'./eurofxref:Cube/eurofxref:Cube[@time]'",
",",
"namespaces",
")",
"if",
"not",
"date_elements",
":",
"# Fail loudly if the XML seems to have changed",
"raise",
"WebServiceError",
"(",
"'Unable to find <Cube time=\"\"> tag in ECB XML'",
")",
"date",
"=",
"date_elements",
"[",
"0",
"]",
".",
"get",
"(",
"'time'",
")",
"if",
"not",
"isinstance",
"(",
"date",
",",
"str_cls",
")",
":",
"date",
"=",
"date",
".",
"decode",
"(",
"'utf-8'",
")",
"currency_elements",
"=",
"envelope",
".",
"findall",
"(",
"'./eurofxref:Cube/eurofxref:Cube/eurofxref:Cube[@currency][@rate]'",
",",
"namespaces",
")",
"if",
"not",
"currency_elements",
":",
"# Fail loudly if the XML seems to have changed",
"raise",
"WebServiceError",
"(",
"'Unable to find <Cube currency=\"\" rate=\"\"> tags in ECB XML'",
")",
"rates",
"=",
"{",
"'EUR'",
":",
"Decimal",
"(",
"'1.0000'",
")",
"}",
"applicable_currenties",
"=",
"{",
"'BGN'",
":",
"True",
",",
"'CZK'",
":",
"True",
",",
"'DKK'",
":",
"True",
",",
"'EUR'",
":",
"True",
",",
"'GBP'",
":",
"True",
",",
"'HRK'",
":",
"True",
",",
"'HUF'",
":",
"True",
",",
"'NOK'",
":",
"True",
",",
"'PLN'",
":",
"True",
",",
"'RON'",
":",
"True",
",",
"'SEK'",
":",
"True",
",",
"'USD'",
":",
"True",
"}",
"for",
"currency_element",
"in",
"currency_elements",
":",
"code",
"=",
"currency_element",
".",
"attrib",
".",
"get",
"(",
"'currency'",
")",
"if",
"code",
"not",
"in",
"applicable_currenties",
":",
"continue",
"rate",
"=",
"currency_element",
".",
"attrib",
".",
"get",
"(",
"'rate'",
")",
"rates",
"[",
"code",
"]",
"=",
"Decimal",
"(",
"rate",
")",
"return",
"(",
"date",
",",
"rates",
")"
] |
Fetches the latest exchange rate info from the European Central Bank. These
rates need to be used for displaying invoices since some countries require
local currency be quoted. Also useful to store the GBP rate of the VAT
collected at time of purchase to prevent fluctuations in exchange rates from
significantly altering the amount of tax due the HMRC (if you are using them
for VAT MOSS).
:return:
A dict with string keys that are currency codes and values that are
Decimals of the exchange rate with the base (1.0000) being the Euro
(EUR). The following currencies are included, based on this library
being build for EU and Norway VAT, plus USD for the author:
- BGN
- CZK
- DKK
- EUR
- GBP
- HUF
- HRK
- NOK
- PLN
- RON
- SEK
- USD
|
[
"Fetches",
"the",
"latest",
"exchange",
"rate",
"info",
"from",
"the",
"European",
"Central",
"Bank",
".",
"These",
"rates",
"need",
"to",
"be",
"used",
"for",
"displaying",
"invoices",
"since",
"some",
"countries",
"require",
"local",
"currency",
"be",
"quoted",
".",
"Also",
"useful",
"to",
"store",
"the",
"GBP",
"rate",
"of",
"the",
"VAT",
"collected",
"at",
"time",
"of",
"purchase",
"to",
"prevent",
"fluctuations",
"in",
"exchange",
"rates",
"from",
"significantly",
"altering",
"the",
"amount",
"of",
"tax",
"due",
"the",
"HMRC",
"(",
"if",
"you",
"are",
"using",
"them",
"for",
"VAT",
"MOSS",
")",
"."
] |
python
|
train
| 36.849624 |
SKA-ScienceDataProcessor/integration-prototype
|
sip/tango_control/tango_processing_block/app/delete_devices.py
|
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_processing_block/app/delete_devices.py#L11-L24
|
def delete_pb_devices():
"""Delete PBs devices from the Tango database."""
parser = argparse.ArgumentParser(description='Register PB devices.')
parser.add_argument('num_pb', type=int,
help='Number of PBs devices to register.')
args = parser.parse_args()
log = logging.getLogger('sip.tango_control.subarray')
tango_db = Database()
log.info("Deleting PB devices:")
for index in range(args.num_pb):
name = 'sip_sdp/pb/{:05d}'.format(index)
log.info("\t%s", name)
tango_db.delete_device(name)
|
[
"def",
"delete_pb_devices",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Register PB devices.'",
")",
"parser",
".",
"add_argument",
"(",
"'num_pb'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Number of PBs devices to register.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'sip.tango_control.subarray'",
")",
"tango_db",
"=",
"Database",
"(",
")",
"log",
".",
"info",
"(",
"\"Deleting PB devices:\"",
")",
"for",
"index",
"in",
"range",
"(",
"args",
".",
"num_pb",
")",
":",
"name",
"=",
"'sip_sdp/pb/{:05d}'",
".",
"format",
"(",
"index",
")",
"log",
".",
"info",
"(",
"\"\\t%s\"",
",",
"name",
")",
"tango_db",
".",
"delete_device",
"(",
"name",
")"
] |
Delete PBs devices from the Tango database.
|
[
"Delete",
"PBs",
"devices",
"from",
"the",
"Tango",
"database",
"."
] |
python
|
train
| 39.714286 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.