repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
Kappa-Dev/KaSim | python/kappy/kappa_common.py | https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L72-L77 | def toJSON(self):
"""Get a json dict of the attributes of this object."""
return {"id": self.id,
"compile": self.compile,
"position": self.position,
"version": self.version} | [
"def",
"toJSON",
"(",
"self",
")",
":",
"return",
"{",
"\"id\"",
":",
"self",
".",
"id",
",",
"\"compile\"",
":",
"self",
".",
"compile",
",",
"\"position\"",
":",
"self",
".",
"position",
",",
"\"version\"",
":",
"self",
".",
"version",
"}"
] | Get a json dict of the attributes of this object. | [
"Get",
"a",
"json",
"dict",
"of",
"the",
"attributes",
"of",
"this",
"object",
"."
] | python | valid |
HumanCellAtlas/cloud-blobstore | cloud_blobstore/s3.py | https://github.com/HumanCellAtlas/cloud-blobstore/blob/b8a60e8e8c0da0e39dda084cb467a34cd2d1ef0a/cloud_blobstore/s3.py#L295-L308 | def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime:
"""
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
"""
# An S3 object's creation date is stored in its LastModified field which stores the
# most recent value between the two.
return self.get_last_modified_date(bucket, key) | [
"def",
"get_creation_date",
"(",
"self",
",",
"bucket",
":",
"str",
",",
"key",
":",
"str",
",",
")",
"->",
"datetime",
":",
"# An S3 object's creation date is stored in its LastModified field which stores the",
"# most recent value between the two.",
"return",
"self",
".",
"get_last_modified_date",
"(",
"bucket",
",",
"key",
")"
] | Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date | [
"Retrieves",
"the",
"creation",
"date",
"for",
"a",
"given",
"key",
"in",
"a",
"given",
"bucket",
".",
":",
"param",
"bucket",
":",
"the",
"bucket",
"the",
"object",
"resides",
"in",
".",
":",
"param",
"key",
":",
"the",
"key",
"of",
"the",
"object",
"for",
"which",
"the",
"creation",
"date",
"is",
"being",
"retrieved",
".",
":",
"return",
":",
"the",
"creation",
"date"
] | python | train |
shoebot/shoebot | lib/graph/__init__.py | https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/graph/__init__.py#L551-L554 | def nodes_by_category(self, category):
""" Returns nodes with the given category attribute.
"""
return [n for n in self.nodes if n.category == category] | [
"def",
"nodes_by_category",
"(",
"self",
",",
"category",
")",
":",
"return",
"[",
"n",
"for",
"n",
"in",
"self",
".",
"nodes",
"if",
"n",
".",
"category",
"==",
"category",
"]"
] | Returns nodes with the given category attribute. | [
"Returns",
"nodes",
"with",
"the",
"given",
"category",
"attribute",
"."
] | python | valid |
VikParuchuri/percept | percept/utils/input.py | https://github.com/VikParuchuri/percept/blob/90304ba82053e2a9ad2bacaab3479403d3923bcf/percept/utils/input.py#L1-L11 | def import_from_string(import_string):
"""
Import a class from a string
import_string - string path to module to import using dot notation (foo.bar)
"""
import_split = import_string.split(".")
import_class = import_split[-1]
module_path = ".".join(import_split[:-1])
mod = __import__(module_path, fromlist=[import_class])
klass = getattr(mod, import_class)
return klass | [
"def",
"import_from_string",
"(",
"import_string",
")",
":",
"import_split",
"=",
"import_string",
".",
"split",
"(",
"\".\"",
")",
"import_class",
"=",
"import_split",
"[",
"-",
"1",
"]",
"module_path",
"=",
"\".\"",
".",
"join",
"(",
"import_split",
"[",
":",
"-",
"1",
"]",
")",
"mod",
"=",
"__import__",
"(",
"module_path",
",",
"fromlist",
"=",
"[",
"import_class",
"]",
")",
"klass",
"=",
"getattr",
"(",
"mod",
",",
"import_class",
")",
"return",
"klass"
] | Import a class from a string
import_string - string path to module to import using dot notation (foo.bar) | [
"Import",
"a",
"class",
"from",
"a",
"string",
"import_string",
"-",
"string",
"path",
"to",
"module",
"to",
"import",
"using",
"dot",
"notation",
"(",
"foo",
".",
"bar",
")"
] | python | train |
MisterWil/abodepy | abodepy/automation.py | https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/automation.py#L42-L54 | def trigger(self, only_manual=True):
"""Trigger a quick-action automation."""
if not self.is_quick_action and only_manual:
raise AbodeException((ERROR.TRIGGER_NON_QUICKACTION))
url = CONST.AUTOMATION_APPLY_URL
url = url.replace(
'$AUTOMATIONID$', self.automation_id)
self._abode.send_request(
method="put", url=url, data=self._automation)
return True | [
"def",
"trigger",
"(",
"self",
",",
"only_manual",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"is_quick_action",
"and",
"only_manual",
":",
"raise",
"AbodeException",
"(",
"(",
"ERROR",
".",
"TRIGGER_NON_QUICKACTION",
")",
")",
"url",
"=",
"CONST",
".",
"AUTOMATION_APPLY_URL",
"url",
"=",
"url",
".",
"replace",
"(",
"'$AUTOMATIONID$'",
",",
"self",
".",
"automation_id",
")",
"self",
".",
"_abode",
".",
"send_request",
"(",
"method",
"=",
"\"put\"",
",",
"url",
"=",
"url",
",",
"data",
"=",
"self",
".",
"_automation",
")",
"return",
"True"
] | Trigger a quick-action automation. | [
"Trigger",
"a",
"quick",
"-",
"action",
"automation",
"."
] | python | train |
bokeh/bokeh | bokeh/model.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/model.py#L734-L777 | def _to_json_like(self, include_defaults):
''' Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
This method does not convert "Bokeh types" into "plain JSON types,"
for example each child Model will still be a Model, rather
than turning into a reference, numpy isn't handled, etc.
That's what "json like" means.
This method should be considered "private" or "protected",
for use internal to Bokeh; use ``to_json()`` instead because
it gives you only plain JSON-compatible types.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default.
'''
all_attrs = self.properties_with_values(include_defaults=include_defaults)
# If __subtype__ is defined, then this model may introduce properties
# that don't exist on __view_model__ in bokehjs. Don't serialize such
# properties.
subtype = getattr(self.__class__, "__subtype__", None)
if subtype is not None and subtype != self.__class__.__view_model__:
attrs = {}
for attr, value in all_attrs.items():
if attr in self.__class__.__dict__:
continue
else:
attrs[attr] = value
else:
attrs = all_attrs
for (k, v) in attrs.items():
# we can't serialize Infinity, we send it as None and
# the other side has to fix it up. This transformation
# can't be in our json_encoder because the json
# module checks for inf before it calls the custom
# encoder.
if isinstance(v, float) and v == float('inf'):
attrs[k] = None
return attrs | [
"def",
"_to_json_like",
"(",
"self",
",",
"include_defaults",
")",
":",
"all_attrs",
"=",
"self",
".",
"properties_with_values",
"(",
"include_defaults",
"=",
"include_defaults",
")",
"# If __subtype__ is defined, then this model may introduce properties",
"# that don't exist on __view_model__ in bokehjs. Don't serialize such",
"# properties.",
"subtype",
"=",
"getattr",
"(",
"self",
".",
"__class__",
",",
"\"__subtype__\"",
",",
"None",
")",
"if",
"subtype",
"is",
"not",
"None",
"and",
"subtype",
"!=",
"self",
".",
"__class__",
".",
"__view_model__",
":",
"attrs",
"=",
"{",
"}",
"for",
"attr",
",",
"value",
"in",
"all_attrs",
".",
"items",
"(",
")",
":",
"if",
"attr",
"in",
"self",
".",
"__class__",
".",
"__dict__",
":",
"continue",
"else",
":",
"attrs",
"[",
"attr",
"]",
"=",
"value",
"else",
":",
"attrs",
"=",
"all_attrs",
"for",
"(",
"k",
",",
"v",
")",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"# we can't serialize Infinity, we send it as None and",
"# the other side has to fix it up. This transformation",
"# can't be in our json_encoder because the json",
"# module checks for inf before it calls the custom",
"# encoder.",
"if",
"isinstance",
"(",
"v",
",",
"float",
")",
"and",
"v",
"==",
"float",
"(",
"'inf'",
")",
":",
"attrs",
"[",
"k",
"]",
"=",
"None",
"return",
"attrs"
] | Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
This method does not convert "Bokeh types" into "plain JSON types,"
for example each child Model will still be a Model, rather
than turning into a reference, numpy isn't handled, etc.
That's what "json like" means.
This method should be considered "private" or "protected",
for use internal to Bokeh; use ``to_json()`` instead because
it gives you only plain JSON-compatible types.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default. | [
"Returns",
"a",
"dictionary",
"of",
"the",
"attributes",
"of",
"this",
"object",
"in",
"a",
"layout",
"corresponding",
"to",
"what",
"BokehJS",
"expects",
"at",
"unmarshalling",
"time",
"."
] | python | train |
annoviko/pyclustering | pyclustering/cluster/ga_maths.py | https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/ga_maths.py#L60-L66 | def get_centres(chromosomes, data, count_clusters):
"""!
"""
centres = ga_math.calc_centers(chromosomes, data, count_clusters)
return centres | [
"def",
"get_centres",
"(",
"chromosomes",
",",
"data",
",",
"count_clusters",
")",
":",
"centres",
"=",
"ga_math",
".",
"calc_centers",
"(",
"chromosomes",
",",
"data",
",",
"count_clusters",
")",
"return",
"centres"
] | ! | [
"!"
] | python | valid |
hearsaycorp/normalize | normalize/property/meta.py | https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/property/meta.py#L36-L113 | def has(selfie, self, args, kwargs):
"""This is called 'has' but is called indirectly. Each Property sub-class
is installed with this function which replaces their __new__.
It is called 'has', because it runs during property declaration, processes
the arguments and is responsible for returning an appropriate Property
subclass. As such it is identical to the 'has' function in Perl's Moose.
The API does not use the word, but the semantics are the same.
It is responsible for picking which sub-class of 'self' to invoke.
Unlike Moose, it will not dynamically create property types; if a type
does not exist it will be a hard error.
This function should *only* be concerned with picking the appropriate
object type, because unlike in Perl, python cannot re-bless objects from
one class to another.
"""
if args:
raise exc.PositionalArgumentsProhibited()
extra_traits = set(kwargs.pop('traits', tuple()))
safe_unless_ro = self.__safe_unless_ro__ or any(
x in kwargs for x in ("required", "isa", "check")
)
# detect initializer arguments only supported by a subclass and add
# them to extra_traits
for argname in kwargs:
if argname not in self.all_duckwargs:
# initializer does not support this arg. Do any subclasses?
implies_traits = set()
for traits, proptype in DUCKWARGS[argname]:
if isinstance(proptype, type(self)):
implies_traits.add(traits)
if proptype.__safe_unless_ro__:
safe_unless_ro = True
if len(implies_traits) > 1:
raise exc.AmbiguousPropertyTraitArg(
trait_arg=argname,
could_be=" ".join(
sorted(x.__name__ for x in implies_traits)
),
matched_traits=implies_traits,
)
elif not implies_traits:
raise exc.PropertyArgumentNotKnown(
badkwarg=argname,
badkwarg_value=kwargs[argname],
proptypename=self.__name__,
proptype=self,
)
else:
extra_traits.update(list(implies_traits)[0])
all_traits = set(self.traits) | extra_traits
if "unsafe" in all_traits and "safe" not in all_traits:
all_traits.remove("unsafe")
elif "ro" not in all_traits and safe_unless_ro:
all_traits.add("safe")
if "v1" not in all_traits:
if 'default' in kwargs and looks_like_v1_none(kwargs['default']):
all_traits.add("v1")
if 'safe' not in all_traits:
all_traits.add("safe")
trait_set_key = tuple(sorted(all_traits))
if trait_set_key not in PROPERTY_TYPES:
create_property_type_from_traits(trait_set_key)
property_type = PROPERTY_TYPES[trait_set_key]
if not isinstance(property_type, type(self)):
raise exc.PropertyTypeMismatch(
selected=type(property_type).__name__,
base=type(self).__name__,
)
return super(selfie, self).__new__(property_type) | [
"def",
"has",
"(",
"selfie",
",",
"self",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"args",
":",
"raise",
"exc",
".",
"PositionalArgumentsProhibited",
"(",
")",
"extra_traits",
"=",
"set",
"(",
"kwargs",
".",
"pop",
"(",
"'traits'",
",",
"tuple",
"(",
")",
")",
")",
"safe_unless_ro",
"=",
"self",
".",
"__safe_unless_ro__",
"or",
"any",
"(",
"x",
"in",
"kwargs",
"for",
"x",
"in",
"(",
"\"required\"",
",",
"\"isa\"",
",",
"\"check\"",
")",
")",
"# detect initializer arguments only supported by a subclass and add",
"# them to extra_traits",
"for",
"argname",
"in",
"kwargs",
":",
"if",
"argname",
"not",
"in",
"self",
".",
"all_duckwargs",
":",
"# initializer does not support this arg. Do any subclasses?",
"implies_traits",
"=",
"set",
"(",
")",
"for",
"traits",
",",
"proptype",
"in",
"DUCKWARGS",
"[",
"argname",
"]",
":",
"if",
"isinstance",
"(",
"proptype",
",",
"type",
"(",
"self",
")",
")",
":",
"implies_traits",
".",
"add",
"(",
"traits",
")",
"if",
"proptype",
".",
"__safe_unless_ro__",
":",
"safe_unless_ro",
"=",
"True",
"if",
"len",
"(",
"implies_traits",
")",
">",
"1",
":",
"raise",
"exc",
".",
"AmbiguousPropertyTraitArg",
"(",
"trait_arg",
"=",
"argname",
",",
"could_be",
"=",
"\" \"",
".",
"join",
"(",
"sorted",
"(",
"x",
".",
"__name__",
"for",
"x",
"in",
"implies_traits",
")",
")",
",",
"matched_traits",
"=",
"implies_traits",
",",
")",
"elif",
"not",
"implies_traits",
":",
"raise",
"exc",
".",
"PropertyArgumentNotKnown",
"(",
"badkwarg",
"=",
"argname",
",",
"badkwarg_value",
"=",
"kwargs",
"[",
"argname",
"]",
",",
"proptypename",
"=",
"self",
".",
"__name__",
",",
"proptype",
"=",
"self",
",",
")",
"else",
":",
"extra_traits",
".",
"update",
"(",
"list",
"(",
"implies_traits",
")",
"[",
"0",
"]",
")",
"all_traits",
"=",
"set",
"(",
"self",
".",
"traits",
")",
"|",
"extra_traits",
"if",
"\"unsafe\"",
"in",
"all_traits",
"and",
"\"safe\"",
"not",
"in",
"all_traits",
":",
"all_traits",
".",
"remove",
"(",
"\"unsafe\"",
")",
"elif",
"\"ro\"",
"not",
"in",
"all_traits",
"and",
"safe_unless_ro",
":",
"all_traits",
".",
"add",
"(",
"\"safe\"",
")",
"if",
"\"v1\"",
"not",
"in",
"all_traits",
":",
"if",
"'default'",
"in",
"kwargs",
"and",
"looks_like_v1_none",
"(",
"kwargs",
"[",
"'default'",
"]",
")",
":",
"all_traits",
".",
"add",
"(",
"\"v1\"",
")",
"if",
"'safe'",
"not",
"in",
"all_traits",
":",
"all_traits",
".",
"add",
"(",
"\"safe\"",
")",
"trait_set_key",
"=",
"tuple",
"(",
"sorted",
"(",
"all_traits",
")",
")",
"if",
"trait_set_key",
"not",
"in",
"PROPERTY_TYPES",
":",
"create_property_type_from_traits",
"(",
"trait_set_key",
")",
"property_type",
"=",
"PROPERTY_TYPES",
"[",
"trait_set_key",
"]",
"if",
"not",
"isinstance",
"(",
"property_type",
",",
"type",
"(",
"self",
")",
")",
":",
"raise",
"exc",
".",
"PropertyTypeMismatch",
"(",
"selected",
"=",
"type",
"(",
"property_type",
")",
".",
"__name__",
",",
"base",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
",",
")",
"return",
"super",
"(",
"selfie",
",",
"self",
")",
".",
"__new__",
"(",
"property_type",
")"
] | This is called 'has' but is called indirectly. Each Property sub-class
is installed with this function which replaces their __new__.
It is called 'has', because it runs during property declaration, processes
the arguments and is responsible for returning an appropriate Property
subclass. As such it is identical to the 'has' function in Perl's Moose.
The API does not use the word, but the semantics are the same.
It is responsible for picking which sub-class of 'self' to invoke.
Unlike Moose, it will not dynamically create property types; if a type
does not exist it will be a hard error.
This function should *only* be concerned with picking the appropriate
object type, because unlike in Perl, python cannot re-bless objects from
one class to another. | [
"This",
"is",
"called",
"has",
"but",
"is",
"called",
"indirectly",
".",
"Each",
"Property",
"sub",
"-",
"class",
"is",
"installed",
"with",
"this",
"function",
"which",
"replaces",
"their",
"__new__",
"."
] | python | train |
cocagne/txdbus | txdbus/client.py | https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/txdbus/client.py#L570-L575 | def _onMethodTimeout(self, serial, d):
"""
Called when a remote method invocation timeout occurs
"""
del self._pendingCalls[serial]
d.errback(error.TimeOut('Method call timed out')) | [
"def",
"_onMethodTimeout",
"(",
"self",
",",
"serial",
",",
"d",
")",
":",
"del",
"self",
".",
"_pendingCalls",
"[",
"serial",
"]",
"d",
".",
"errback",
"(",
"error",
".",
"TimeOut",
"(",
"'Method call timed out'",
")",
")"
] | Called when a remote method invocation timeout occurs | [
"Called",
"when",
"a",
"remote",
"method",
"invocation",
"timeout",
"occurs"
] | python | train |
zeaphoo/reston | reston/core/dvm.py | https://github.com/zeaphoo/reston/blob/96502487b2259572df55237c9526f92627465088/reston/core/dvm.py#L7938-L7948 | def get_fields(self):
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def:
for j in i.get_fields():
l.append(j)
return l | [
"def",
"get_fields",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
".",
"classes",
".",
"class_def",
":",
"for",
"j",
"in",
"i",
".",
"get_fields",
"(",
")",
":",
"l",
".",
"append",
"(",
"j",
")",
"return",
"l"
] | Return all field objects
:rtype: a list of :class:`EncodedField` objects | [
"Return",
"all",
"field",
"objects"
] | python | train |
jhuapl-boss/intern | intern/remote/boss/remote.py | https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/remote/boss/remote.py#L794-L809 | def create_metadata(self, resource, keys_vals):
"""
Associates new key-value pairs with the given resource.
Will attempt to add all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to assign to
given resource.
Raises:
HTTPErrorList on failure.
"""
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.create(resource, keys_vals) | [
"def",
"create_metadata",
"(",
"self",
",",
"resource",
",",
"keys_vals",
")",
":",
"self",
".",
"metadata_service",
".",
"set_auth",
"(",
"self",
".",
"_token_metadata",
")",
"self",
".",
"metadata_service",
".",
"create",
"(",
"resource",
",",
"keys_vals",
")"
] | Associates new key-value pairs with the given resource.
Will attempt to add all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to assign to
given resource.
Raises:
HTTPErrorList on failure. | [
"Associates",
"new",
"key",
"-",
"value",
"pairs",
"with",
"the",
"given",
"resource",
"."
] | python | train |
soravux/scoop | scoop/launch/workerLaunch.py | https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/launch/workerLaunch.py#L80-L106 | def _WorkerCommand_environment(self):
"""Return list of shell commands to prepare the environment for
bootstrap."""
worker = self.workersArguments
c = []
if worker.prolog:
c.extend([
"source",
worker.prolog,
"&&",
])
if worker.pythonPath and not self.isLocal():
# Tried to make it compliant to all shell variants.
c.extend([
"env",
"PYTHONPATH={0}:$PYTHONPATH".format(worker.pythonPath),
])
elif worker.pythonPath and self.isLocal():
# Tried to make it compliant to all shell variants.
c.extend([
"env",
"PYTHONPATH={0}:{1}".format(
worker.pythonPath,
os.environ.get("PYTHONPATH", ""),
),
])
return c | [
"def",
"_WorkerCommand_environment",
"(",
"self",
")",
":",
"worker",
"=",
"self",
".",
"workersArguments",
"c",
"=",
"[",
"]",
"if",
"worker",
".",
"prolog",
":",
"c",
".",
"extend",
"(",
"[",
"\"source\"",
",",
"worker",
".",
"prolog",
",",
"\"&&\"",
",",
"]",
")",
"if",
"worker",
".",
"pythonPath",
"and",
"not",
"self",
".",
"isLocal",
"(",
")",
":",
"# Tried to make it compliant to all shell variants.",
"c",
".",
"extend",
"(",
"[",
"\"env\"",
",",
"\"PYTHONPATH={0}:$PYTHONPATH\"",
".",
"format",
"(",
"worker",
".",
"pythonPath",
")",
",",
"]",
")",
"elif",
"worker",
".",
"pythonPath",
"and",
"self",
".",
"isLocal",
"(",
")",
":",
"# Tried to make it compliant to all shell variants.",
"c",
".",
"extend",
"(",
"[",
"\"env\"",
",",
"\"PYTHONPATH={0}:{1}\"",
".",
"format",
"(",
"worker",
".",
"pythonPath",
",",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYTHONPATH\"",
",",
"\"\"",
")",
",",
")",
",",
"]",
")",
"return",
"c"
] | Return list of shell commands to prepare the environment for
bootstrap. | [
"Return",
"list",
"of",
"shell",
"commands",
"to",
"prepare",
"the",
"environment",
"for",
"bootstrap",
"."
] | python | train |
square/connect-python-sdk | squareconnect/models/order_fulfillment_pickup_details.py | https://github.com/square/connect-python-sdk/blob/adc1d09e817986cdc607391580f71d6b48ed4066/squareconnect/models/order_fulfillment_pickup_details.py#L456-L470 | def cancel_reason(self, cancel_reason):
"""
Sets the cancel_reason of this OrderFulfillmentPickupDetails.
A description of why the pickup was canceled. Max length is 100 characters.
:param cancel_reason: The cancel_reason of this OrderFulfillmentPickupDetails.
:type: str
"""
if cancel_reason is None:
raise ValueError("Invalid value for `cancel_reason`, must not be `None`")
if len(cancel_reason) > 100:
raise ValueError("Invalid value for `cancel_reason`, length must be less than `100`")
self._cancel_reason = cancel_reason | [
"def",
"cancel_reason",
"(",
"self",
",",
"cancel_reason",
")",
":",
"if",
"cancel_reason",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `cancel_reason`, must not be `None`\"",
")",
"if",
"len",
"(",
"cancel_reason",
")",
">",
"100",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `cancel_reason`, length must be less than `100`\"",
")",
"self",
".",
"_cancel_reason",
"=",
"cancel_reason"
] | Sets the cancel_reason of this OrderFulfillmentPickupDetails.
A description of why the pickup was canceled. Max length is 100 characters.
:param cancel_reason: The cancel_reason of this OrderFulfillmentPickupDetails.
:type: str | [
"Sets",
"the",
"cancel_reason",
"of",
"this",
"OrderFulfillmentPickupDetails",
".",
"A",
"description",
"of",
"why",
"the",
"pickup",
"was",
"canceled",
".",
"Max",
"length",
"is",
"100",
"characters",
"."
] | python | train |
common-workflow-language/cwltool | cwltool/docker_id.py | https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/docker_id.py#L109-L120 | def docker_machine_id(): # type: () -> Tuple[Optional[int], Optional[int]]
"""
Asks docker-machine for active machine and gets the UID of the docker user
inside the vm
:return: tuple (UID, GID), or (None, None) if error (e.g. docker-machine not present or stopped)
"""
machine_name = docker_machine_name()
if not machine_name:
return (None, None)
uid = cmd_output_to_int(['docker-machine', 'ssh', machine_name, "id -u"])
gid = cmd_output_to_int(['docker-machine', 'ssh', machine_name, "id -g"])
return (uid, gid) | [
"def",
"docker_machine_id",
"(",
")",
":",
"# type: () -> Tuple[Optional[int], Optional[int]]",
"machine_name",
"=",
"docker_machine_name",
"(",
")",
"if",
"not",
"machine_name",
":",
"return",
"(",
"None",
",",
"None",
")",
"uid",
"=",
"cmd_output_to_int",
"(",
"[",
"'docker-machine'",
",",
"'ssh'",
",",
"machine_name",
",",
"\"id -u\"",
"]",
")",
"gid",
"=",
"cmd_output_to_int",
"(",
"[",
"'docker-machine'",
",",
"'ssh'",
",",
"machine_name",
",",
"\"id -g\"",
"]",
")",
"return",
"(",
"uid",
",",
"gid",
")"
] | Asks docker-machine for active machine and gets the UID of the docker user
inside the vm
:return: tuple (UID, GID), or (None, None) if error (e.g. docker-machine not present or stopped) | [
"Asks",
"docker",
"-",
"machine",
"for",
"active",
"machine",
"and",
"gets",
"the",
"UID",
"of",
"the",
"docker",
"user",
"inside",
"the",
"vm",
":",
"return",
":",
"tuple",
"(",
"UID",
"GID",
")",
"or",
"(",
"None",
"None",
")",
"if",
"error",
"(",
"e",
".",
"g",
".",
"docker",
"-",
"machine",
"not",
"present",
"or",
"stopped",
")"
] | python | train |
a1ezzz/wasp-general | wasp_general/cache.py | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/cache.py#L299-L331 | def put(self, result, decorated_function, *args, **kwargs):
""" :meth:`WCacheStorage.put` method implementation
"""
self.__check(decorated_function, *args, **kwargs)
ref = weakref.ref(args[0])
if decorated_function not in self._storage:
cache_entry = self._cache_record_cls.create(result, decorated_function, *args, **kwargs)
self._storage[decorated_function] = [{'instance': ref, 'result': cache_entry}]
else:
instance_found = False
for i in self._storage[decorated_function]:
if i['instance']() == args[0]:
cache_entry = i['result']
cache_entry.update(result, *args, **kwargs)
instance_found = True
break
if instance_found is False:
cache_entry = self._cache_record_cls.create(result, decorated_function, *args, **kwargs)
self._storage[decorated_function].append({'instance': ref, 'result': cache_entry})
def finalize_ref():
if decorated_function in self._storage:
fn_list = self._storage[decorated_function]
if len(fn_list) == 1 and fn_list[0]['instance'] == ref:
del self._storage[decorated_function]
for i in range(len(fn_list)):
if fn_list[i]['instance'] == ref:
fn_list.pop(i)
return
weakref.finalize(args[0], finalize_ref) | [
"def",
"put",
"(",
"self",
",",
"result",
",",
"decorated_function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"__check",
"(",
"decorated_function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"ref",
"=",
"weakref",
".",
"ref",
"(",
"args",
"[",
"0",
"]",
")",
"if",
"decorated_function",
"not",
"in",
"self",
".",
"_storage",
":",
"cache_entry",
"=",
"self",
".",
"_cache_record_cls",
".",
"create",
"(",
"result",
",",
"decorated_function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_storage",
"[",
"decorated_function",
"]",
"=",
"[",
"{",
"'instance'",
":",
"ref",
",",
"'result'",
":",
"cache_entry",
"}",
"]",
"else",
":",
"instance_found",
"=",
"False",
"for",
"i",
"in",
"self",
".",
"_storage",
"[",
"decorated_function",
"]",
":",
"if",
"i",
"[",
"'instance'",
"]",
"(",
")",
"==",
"args",
"[",
"0",
"]",
":",
"cache_entry",
"=",
"i",
"[",
"'result'",
"]",
"cache_entry",
".",
"update",
"(",
"result",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"instance_found",
"=",
"True",
"break",
"if",
"instance_found",
"is",
"False",
":",
"cache_entry",
"=",
"self",
".",
"_cache_record_cls",
".",
"create",
"(",
"result",
",",
"decorated_function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_storage",
"[",
"decorated_function",
"]",
".",
"append",
"(",
"{",
"'instance'",
":",
"ref",
",",
"'result'",
":",
"cache_entry",
"}",
")",
"def",
"finalize_ref",
"(",
")",
":",
"if",
"decorated_function",
"in",
"self",
".",
"_storage",
":",
"fn_list",
"=",
"self",
".",
"_storage",
"[",
"decorated_function",
"]",
"if",
"len",
"(",
"fn_list",
")",
"==",
"1",
"and",
"fn_list",
"[",
"0",
"]",
"[",
"'instance'",
"]",
"==",
"ref",
":",
"del",
"self",
".",
"_storage",
"[",
"decorated_function",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"fn_list",
")",
")",
":",
"if",
"fn_list",
"[",
"i",
"]",
"[",
"'instance'",
"]",
"==",
"ref",
":",
"fn_list",
".",
"pop",
"(",
"i",
")",
"return",
"weakref",
".",
"finalize",
"(",
"args",
"[",
"0",
"]",
",",
"finalize_ref",
")"
] | :meth:`WCacheStorage.put` method implementation | [
":",
"meth",
":",
"WCacheStorage",
".",
"put",
"method",
"implementation"
] | python | train |
emilydolson/avida-spatial-tools | avidaspatial/utils.py | https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/utils.py#L195-L214 | def convert_world_to_phenotype(world):
"""
Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings
"""
if set(world.resources) != set(world.tasks):
print("Warning: world phenotypes don't correspond to phenotypes")
if set(world.resources).issubset(set(world.tasks)):
conversion_func = function_with_args(res_set_to_phenotype, world.tasks)
else:
conversion_func = \
function_with_args(res_set_to_phenotype, world.resources)
grid = agg_grid(deepcopy(world), conversion_func)
return grid | [
"def",
"convert_world_to_phenotype",
"(",
"world",
")",
":",
"if",
"set",
"(",
"world",
".",
"resources",
")",
"!=",
"set",
"(",
"world",
".",
"tasks",
")",
":",
"print",
"(",
"\"Warning: world phenotypes don't correspond to phenotypes\"",
")",
"if",
"set",
"(",
"world",
".",
"resources",
")",
".",
"issubset",
"(",
"set",
"(",
"world",
".",
"tasks",
")",
")",
":",
"conversion_func",
"=",
"function_with_args",
"(",
"res_set_to_phenotype",
",",
"world",
".",
"tasks",
")",
"else",
":",
"conversion_func",
"=",
"function_with_args",
"(",
"res_set_to_phenotype",
",",
"world",
".",
"resources",
")",
"grid",
"=",
"agg_grid",
"(",
"deepcopy",
"(",
"world",
")",
",",
"conversion_func",
")",
"return",
"grid"
] | Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings | [
"Converts",
"sets",
"indicating",
"the",
"resources",
"present",
"in",
"a",
"single",
"cell",
"to",
"binary",
"strings",
"(",
"bit",
"order",
"is",
"based",
"on",
"the",
"order",
"of",
"resources",
"in",
"world",
".",
"resources",
")",
"."
] | python | train |
kdeldycke/maildir-deduplicate | maildir_deduplicate/mail.py | https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/maildir_deduplicate/mail.py#L117-L125 | def subject(self):
""" Normalized subject.
Only used for debugging and human-friendly logging.
"""
# Fetch subject from first message.
subject = self.message.get('Subject', '')
subject, _ = re.subn(r'\s+', ' ', subject)
return subject | [
"def",
"subject",
"(",
"self",
")",
":",
"# Fetch subject from first message.",
"subject",
"=",
"self",
".",
"message",
".",
"get",
"(",
"'Subject'",
",",
"''",
")",
"subject",
",",
"_",
"=",
"re",
".",
"subn",
"(",
"r'\\s+'",
",",
"' '",
",",
"subject",
")",
"return",
"subject"
] | Normalized subject.
Only used for debugging and human-friendly logging. | [
"Normalized",
"subject",
"."
] | python | train |
theiviaxx/Frog | frog/views/piece.py | https://github.com/theiviaxx/Frog/blob/a9475463a8eed1323fe3ef5d51f9751fb1dc9edd/frog/views/piece.py#L57-L67 | def image(request, obj_id):
"""Handles a request based on method and calls the appropriate function"""
obj = Image.objects.get(pk=obj_id)
if request.method == 'POST':
return post(request, obj)
elif request.method == 'PUT':
getPutData(request)
return put(request, obj)
elif request.method == 'DELETE':
getPutData(request)
return delete(request, obj) | [
"def",
"image",
"(",
"request",
",",
"obj_id",
")",
":",
"obj",
"=",
"Image",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"obj_id",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"return",
"post",
"(",
"request",
",",
"obj",
")",
"elif",
"request",
".",
"method",
"==",
"'PUT'",
":",
"getPutData",
"(",
"request",
")",
"return",
"put",
"(",
"request",
",",
"obj",
")",
"elif",
"request",
".",
"method",
"==",
"'DELETE'",
":",
"getPutData",
"(",
"request",
")",
"return",
"delete",
"(",
"request",
",",
"obj",
")"
] | Handles a request based on method and calls the appropriate function | [
"Handles",
"a",
"request",
"based",
"on",
"method",
"and",
"calls",
"the",
"appropriate",
"function"
] | python | train |
jf-parent/brome | brome/core/proxy_driver.py | https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/core/proxy_driver.py#L208-L303 | def find_all(self, selector, **kwargs):
"""Return all the elements found with a selector
Args:
selector (str): the selector used to find the element
Kwargs:
wait_until_present (bool) default configurable via
proxy_driver:wait_until_present_before_find
wait_until_visible (bool) default configurable via
proxy_driver:wait_until_visible_before_find
raise_exception (bool) default configurable via
proxy_driver:raise_exception
Returns:
empty list if no element was found
proxy_element_list when element are found
Raises:
this function might raise an exception depending on the
raise_exception kwargs
or
the config proxy_driver:raise_exception
"""
self.debug_log("Finding elements with selector: %s" % selector)
raise_exception = kwargs.get(
'raise_exception',
BROME_CONFIG['proxy_driver']['raise_exception']
)
self.debug_log("effective raise_exception: %s" % raise_exception)
wait_until_present = kwargs.get(
'wait_until_present',
BROME_CONFIG['proxy_driver']['wait_until_present_before_find']
)
self.debug_log(
"effective wait_until_present: %s" % wait_until_present
)
wait_until_visible = kwargs.get(
'wait_until_visible',
BROME_CONFIG['proxy_driver']['wait_until_visible_before_find']
)
self.debug_log(
"effective wait_until_visible: %s" % wait_until_visible
)
_selector = Selector(self, selector)
found = False
if wait_until_visible:
# we don't raise exception here otherwise none visible
# element will always raise exception
# TODO find a good way to make it configurable
found = self.wait_until_visible(selector, raise_exception=False)
if wait_until_present and not found:
found = self.wait_until_present(
selector,
raise_exception=raise_exception
)
if not found:
self.debug_log("find_all (%s): No element found" % _selector)
return []
try:
elements = getattr(
self._driver,
_selector.find_function
)(_selector.get_selector())
except exceptions.NoSuchElementException:
self.debug_log("find_all (%s): No element found" % _selector)
self.print_javascript_error()
if raise_exception:
raise exceptions.NoSuchElementException(_selector)
else:
return []
if type(elements) == list:
if len(elements):
self.debug_log("find_all (%s): Element found" % _selector)
return ProxyElementList(elements, _selector, self)
else:
msg = "find_all (%s): No element found" % _selector
self.debug_log(msg)
self.print_javascript_error()
if raise_exception:
raise exceptions.NoSuchElementException(msg)
else:
return []
else:
self.debug_log("find_all (%s): Element found" % _selector)
return [ProxyElement(elements, _selector, self)] | [
"def",
"find_all",
"(",
"self",
",",
"selector",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"debug_log",
"(",
"\"Finding elements with selector: %s\"",
"%",
"selector",
")",
"raise_exception",
"=",
"kwargs",
".",
"get",
"(",
"'raise_exception'",
",",
"BROME_CONFIG",
"[",
"'proxy_driver'",
"]",
"[",
"'raise_exception'",
"]",
")",
"self",
".",
"debug_log",
"(",
"\"effective raise_exception: %s\"",
"%",
"raise_exception",
")",
"wait_until_present",
"=",
"kwargs",
".",
"get",
"(",
"'wait_until_present'",
",",
"BROME_CONFIG",
"[",
"'proxy_driver'",
"]",
"[",
"'wait_until_present_before_find'",
"]",
")",
"self",
".",
"debug_log",
"(",
"\"effective wait_until_present: %s\"",
"%",
"wait_until_present",
")",
"wait_until_visible",
"=",
"kwargs",
".",
"get",
"(",
"'wait_until_visible'",
",",
"BROME_CONFIG",
"[",
"'proxy_driver'",
"]",
"[",
"'wait_until_visible_before_find'",
"]",
")",
"self",
".",
"debug_log",
"(",
"\"effective wait_until_visible: %s\"",
"%",
"wait_until_visible",
")",
"_selector",
"=",
"Selector",
"(",
"self",
",",
"selector",
")",
"found",
"=",
"False",
"if",
"wait_until_visible",
":",
"# we don't raise exception here otherwise none visible",
"# element will always raise exception",
"# TODO find a good way to make it configurable",
"found",
"=",
"self",
".",
"wait_until_visible",
"(",
"selector",
",",
"raise_exception",
"=",
"False",
")",
"if",
"wait_until_present",
"and",
"not",
"found",
":",
"found",
"=",
"self",
".",
"wait_until_present",
"(",
"selector",
",",
"raise_exception",
"=",
"raise_exception",
")",
"if",
"not",
"found",
":",
"self",
".",
"debug_log",
"(",
"\"find_all (%s): No element found\"",
"%",
"_selector",
")",
"return",
"[",
"]",
"try",
":",
"elements",
"=",
"getattr",
"(",
"self",
".",
"_driver",
",",
"_selector",
".",
"find_function",
")",
"(",
"_selector",
".",
"get_selector",
"(",
")",
")",
"except",
"exceptions",
".",
"NoSuchElementException",
":",
"self",
".",
"debug_log",
"(",
"\"find_all (%s): No element found\"",
"%",
"_selector",
")",
"self",
".",
"print_javascript_error",
"(",
")",
"if",
"raise_exception",
":",
"raise",
"exceptions",
".",
"NoSuchElementException",
"(",
"_selector",
")",
"else",
":",
"return",
"[",
"]",
"if",
"type",
"(",
"elements",
")",
"==",
"list",
":",
"if",
"len",
"(",
"elements",
")",
":",
"self",
".",
"debug_log",
"(",
"\"find_all (%s): Element found\"",
"%",
"_selector",
")",
"return",
"ProxyElementList",
"(",
"elements",
",",
"_selector",
",",
"self",
")",
"else",
":",
"msg",
"=",
"\"find_all (%s): No element found\"",
"%",
"_selector",
"self",
".",
"debug_log",
"(",
"msg",
")",
"self",
".",
"print_javascript_error",
"(",
")",
"if",
"raise_exception",
":",
"raise",
"exceptions",
".",
"NoSuchElementException",
"(",
"msg",
")",
"else",
":",
"return",
"[",
"]",
"else",
":",
"self",
".",
"debug_log",
"(",
"\"find_all (%s): Element found\"",
"%",
"_selector",
")",
"return",
"[",
"ProxyElement",
"(",
"elements",
",",
"_selector",
",",
"self",
")",
"]"
] | Return all the elements found with a selector
Args:
selector (str): the selector used to find the element
Kwargs:
wait_until_present (bool) default configurable via
proxy_driver:wait_until_present_before_find
wait_until_visible (bool) default configurable via
proxy_driver:wait_until_visible_before_find
raise_exception (bool) default configurable via
proxy_driver:raise_exception
Returns:
empty list if no element was found
proxy_element_list when element are found
Raises:
this function might raise an exception depending on the
raise_exception kwargs
or
the config proxy_driver:raise_exception | [
"Return",
"all",
"the",
"elements",
"found",
"with",
"a",
"selector"
] | python | train |
stevearc/dynamo3 | dynamo3/connection.py | https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L781-L811 | def batch_get(self, tablename, keys, attributes=None, alias=None,
consistent=False, return_capacity=None):
"""
Perform a batch get of many items in a table
Parameters
----------
tablename : str
Name of the table to fetch from
keys : list or iterable
List or iterable of primary key dicts that specify the hash key and
the optional range key of each item to fetch
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
keys = [self.dynamizer.encode_keys(k) for k in keys]
return_capacity = self._default_capacity(return_capacity)
ret = GetResultSet(self, tablename, keys,
consistent=consistent, attributes=attributes,
alias=alias, return_capacity=return_capacity)
return ret | [
"def",
"batch_get",
"(",
"self",
",",
"tablename",
",",
"keys",
",",
"attributes",
"=",
"None",
",",
"alias",
"=",
"None",
",",
"consistent",
"=",
"False",
",",
"return_capacity",
"=",
"None",
")",
":",
"keys",
"=",
"[",
"self",
".",
"dynamizer",
".",
"encode_keys",
"(",
"k",
")",
"for",
"k",
"in",
"keys",
"]",
"return_capacity",
"=",
"self",
".",
"_default_capacity",
"(",
"return_capacity",
")",
"ret",
"=",
"GetResultSet",
"(",
"self",
",",
"tablename",
",",
"keys",
",",
"consistent",
"=",
"consistent",
",",
"attributes",
"=",
"attributes",
",",
"alias",
"=",
"alias",
",",
"return_capacity",
"=",
"return_capacity",
")",
"return",
"ret"
] | Perform a batch get of many items in a table
Parameters
----------
tablename : str
Name of the table to fetch from
keys : list or iterable
List or iterable of primary key dicts that specify the hash key and
the optional range key of each item to fetch
attributes : str or list, optional
See docs for ProjectionExpression. If list, it will be joined by
commas.
alias : dict, optional
See docs for ExpressionAttributeNames
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE) | [
"Perform",
"a",
"batch",
"get",
"of",
"many",
"items",
"in",
"a",
"table"
] | python | train |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable/column_family.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/column_family.py#L242-L251 | def to_pb(self):
"""Converts the column family to a protobuf.
:rtype: :class:`.table_v2_pb2.ColumnFamily`
:returns: The converted current object.
"""
if self.gc_rule is None:
return table_v2_pb2.ColumnFamily()
else:
return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) | [
"def",
"to_pb",
"(",
"self",
")",
":",
"if",
"self",
".",
"gc_rule",
"is",
"None",
":",
"return",
"table_v2_pb2",
".",
"ColumnFamily",
"(",
")",
"else",
":",
"return",
"table_v2_pb2",
".",
"ColumnFamily",
"(",
"gc_rule",
"=",
"self",
".",
"gc_rule",
".",
"to_pb",
"(",
")",
")"
] | Converts the column family to a protobuf.
:rtype: :class:`.table_v2_pb2.ColumnFamily`
:returns: The converted current object. | [
"Converts",
"the",
"column",
"family",
"to",
"a",
"protobuf",
"."
] | python | train |
limix/glimix-core | glimix_core/cov/_linear.py | https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/cov/_linear.py#L79-L89 | def value(self):
"""
Covariance matrix.
Returns
-------
K : ndarray
s⋅XXᵀ.
"""
X = self.X
return self.scale * (X @ X.T) | [
"def",
"value",
"(",
"self",
")",
":",
"X",
"=",
"self",
".",
"X",
"return",
"self",
".",
"scale",
"*",
"(",
"X",
"@",
"X",
".",
"T",
")"
] | Covariance matrix.
Returns
-------
K : ndarray
s⋅XXᵀ. | [
"Covariance",
"matrix",
"."
] | python | valid |
amaas-fintech/amaas-core-sdk-python | amaascore/assets/asset.py | https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/assets/asset.py#L91-L97 | def maturity_date(self, value):
"""
The date on which the asset matures and no longer holds value
:param value:
:return:
"""
self._maturity_date = parse(value).date() if isinstance(value, type_check) else value | [
"def",
"maturity_date",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_maturity_date",
"=",
"parse",
"(",
"value",
")",
".",
"date",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"type_check",
")",
"else",
"value"
] | The date on which the asset matures and no longer holds value
:param value:
:return: | [
"The",
"date",
"on",
"which",
"the",
"asset",
"matures",
"and",
"no",
"longer",
"holds",
"value",
":",
"param",
"value",
":",
":",
"return",
":"
] | python | train |
nfcpy/nfcpy | src/nfc/snep/client.py | https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/snep/client.py#L223-L252 | def put(self, ndef_message, timeout=1.0):
"""Send an NDEF message to the server. Temporarily connects to
the default SNEP server if the client is not yet connected.
.. deprecated:: 0.13
Use :meth:`put_records` or :meth:`put_octets`.
"""
if not self.socket:
try:
self.connect('urn:nfc:sn:snep')
except nfc.llcp.ConnectRefused:
return False
else:
self.release_connection = True
else:
self.release_connection = False
try:
ndef_msgsize = struct.pack('>L', len(str(ndef_message)))
snep_request = b'\x10\x02' + ndef_msgsize + str(ndef_message)
if send_request(self.socket, snep_request, self.send_miu):
response = recv_response(self.socket, 0, timeout)
if response is not None:
if response[1] != 0x81:
raise SnepError(response[1])
return True
return False
finally:
if self.release_connection:
self.close() | [
"def",
"put",
"(",
"self",
",",
"ndef_message",
",",
"timeout",
"=",
"1.0",
")",
":",
"if",
"not",
"self",
".",
"socket",
":",
"try",
":",
"self",
".",
"connect",
"(",
"'urn:nfc:sn:snep'",
")",
"except",
"nfc",
".",
"llcp",
".",
"ConnectRefused",
":",
"return",
"False",
"else",
":",
"self",
".",
"release_connection",
"=",
"True",
"else",
":",
"self",
".",
"release_connection",
"=",
"False",
"try",
":",
"ndef_msgsize",
"=",
"struct",
".",
"pack",
"(",
"'>L'",
",",
"len",
"(",
"str",
"(",
"ndef_message",
")",
")",
")",
"snep_request",
"=",
"b'\\x10\\x02'",
"+",
"ndef_msgsize",
"+",
"str",
"(",
"ndef_message",
")",
"if",
"send_request",
"(",
"self",
".",
"socket",
",",
"snep_request",
",",
"self",
".",
"send_miu",
")",
":",
"response",
"=",
"recv_response",
"(",
"self",
".",
"socket",
",",
"0",
",",
"timeout",
")",
"if",
"response",
"is",
"not",
"None",
":",
"if",
"response",
"[",
"1",
"]",
"!=",
"0x81",
":",
"raise",
"SnepError",
"(",
"response",
"[",
"1",
"]",
")",
"return",
"True",
"return",
"False",
"finally",
":",
"if",
"self",
".",
"release_connection",
":",
"self",
".",
"close",
"(",
")"
] | Send an NDEF message to the server. Temporarily connects to
the default SNEP server if the client is not yet connected.
.. deprecated:: 0.13
Use :meth:`put_records` or :meth:`put_octets`. | [
"Send",
"an",
"NDEF",
"message",
"to",
"the",
"server",
".",
"Temporarily",
"connects",
"to",
"the",
"default",
"SNEP",
"server",
"if",
"the",
"client",
"is",
"not",
"yet",
"connected",
"."
] | python | train |
SuperCowPowers/workbench | workbench/utils/pcap_streamer.py | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/utils/pcap_streamer.py#L83-L98 | def store_file(self, filename):
''' Store a file into workbench '''
# Spin up workbench
self.workbench = zerorpc.Client(timeout=300, heartbeat=60)
self.workbench.connect("tcp://127.0.0.1:4242")
# Open the file and send it to workbench
storage_name = "streaming_pcap" + str(self.pcap_index)
print filename, storage_name
with open(filename,'rb') as f:
self.workbench.store_sample(f.read(), storage_name, 'pcap')
self.pcap_index += 1
# Close workbench client
self.workbench.close() | [
"def",
"store_file",
"(",
"self",
",",
"filename",
")",
":",
"# Spin up workbench",
"self",
".",
"workbench",
"=",
"zerorpc",
".",
"Client",
"(",
"timeout",
"=",
"300",
",",
"heartbeat",
"=",
"60",
")",
"self",
".",
"workbench",
".",
"connect",
"(",
"\"tcp://127.0.0.1:4242\"",
")",
"# Open the file and send it to workbench",
"storage_name",
"=",
"\"streaming_pcap\"",
"+",
"str",
"(",
"self",
".",
"pcap_index",
")",
"print",
"filename",
",",
"storage_name",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"self",
".",
"workbench",
".",
"store_sample",
"(",
"f",
".",
"read",
"(",
")",
",",
"storage_name",
",",
"'pcap'",
")",
"self",
".",
"pcap_index",
"+=",
"1",
"# Close workbench client",
"self",
".",
"workbench",
".",
"close",
"(",
")"
] | Store a file into workbench | [
"Store",
"a",
"file",
"into",
"workbench"
] | python | train |
pytroll/satpy | satpy/composites/__init__.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/composites/__init__.py#L114-L143 | def load_compositors(self, sensor_names):
"""Load all compositor configs for the provided sensors.
Args:
sensor_names (list of strings): Sensor names that have matching
``sensor_name.yaml`` config files.
Returns:
(comps, mods): Where `comps` is a dictionary:
sensor_name -> composite ID -> compositor object
And `mods` is a dictionary:
sensor_name -> modifier name -> (modifier class,
modifiers options)
Note that these dictionaries are copies of those cached in
this object.
"""
comps = {}
mods = {}
for sensor_name in sensor_names:
if sensor_name not in self.compositors:
self.load_sensor_composites(sensor_name)
if sensor_name in self.compositors:
comps[sensor_name] = DatasetDict(
self.compositors[sensor_name].copy())
mods[sensor_name] = self.modifiers[sensor_name].copy()
return comps, mods | [
"def",
"load_compositors",
"(",
"self",
",",
"sensor_names",
")",
":",
"comps",
"=",
"{",
"}",
"mods",
"=",
"{",
"}",
"for",
"sensor_name",
"in",
"sensor_names",
":",
"if",
"sensor_name",
"not",
"in",
"self",
".",
"compositors",
":",
"self",
".",
"load_sensor_composites",
"(",
"sensor_name",
")",
"if",
"sensor_name",
"in",
"self",
".",
"compositors",
":",
"comps",
"[",
"sensor_name",
"]",
"=",
"DatasetDict",
"(",
"self",
".",
"compositors",
"[",
"sensor_name",
"]",
".",
"copy",
"(",
")",
")",
"mods",
"[",
"sensor_name",
"]",
"=",
"self",
".",
"modifiers",
"[",
"sensor_name",
"]",
".",
"copy",
"(",
")",
"return",
"comps",
",",
"mods"
] | Load all compositor configs for the provided sensors.
Args:
sensor_names (list of strings): Sensor names that have matching
``sensor_name.yaml`` config files.
Returns:
(comps, mods): Where `comps` is a dictionary:
sensor_name -> composite ID -> compositor object
And `mods` is a dictionary:
sensor_name -> modifier name -> (modifier class,
modifiers options)
Note that these dictionaries are copies of those cached in
this object. | [
"Load",
"all",
"compositor",
"configs",
"for",
"the",
"provided",
"sensors",
"."
] | python | train |
MikaSoftware/django-starterkit | starterkit/auth/backends.py | https://github.com/MikaSoftware/django-starterkit/blob/b82c4cb56ab8ec0b46136e9efcc3d6481fca1eeb/starterkit/auth/backends.py#L27-L38 | def authenticate(self, username=None, password=None, **kwargs):
"""Allow users to log in with their email address or username."""
try:
# Try to fetch the user by searching the username or email field
user = get_user_model().objects.filter(Q(username=username)|Q(email=username))[0]
if check_password(password, user.password):
return user
else:
return None
except Exception as e:
# No user was found, return None - triggers default login failed
return None | [
"def",
"authenticate",
"(",
"self",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"# Try to fetch the user by searching the username or email field",
"user",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"Q",
"(",
"username",
"=",
"username",
")",
"|",
"Q",
"(",
"email",
"=",
"username",
")",
")",
"[",
"0",
"]",
"if",
"check_password",
"(",
"password",
",",
"user",
".",
"password",
")",
":",
"return",
"user",
"else",
":",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"# No user was found, return None - triggers default login failed",
"return",
"None"
] | Allow users to log in with their email address or username. | [
"Allow",
"users",
"to",
"log",
"in",
"with",
"their",
"email",
"address",
"or",
"username",
"."
] | python | train |
aparo/pyes | pyes/orm/queryset.py | https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/orm/queryset.py#L427-L435 | def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
self._insert(objs, batch_size=batch_size, return_id=False, force_insert=True)
self.refresh() | [
"def",
"bulk_create",
"(",
"self",
",",
"objs",
",",
"batch_size",
"=",
"None",
")",
":",
"self",
".",
"_insert",
"(",
"objs",
",",
"batch_size",
"=",
"batch_size",
",",
"return_id",
"=",
"False",
",",
"force_insert",
"=",
"True",
")",
"self",
".",
"refresh",
"(",
")"
] | Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field. | [
"Inserts",
"each",
"of",
"the",
"instances",
"into",
"the",
"database",
".",
"This",
"does",
"*",
"not",
"*",
"call",
"save",
"()",
"on",
"each",
"of",
"the",
"instances",
"does",
"not",
"send",
"any",
"pre",
"/",
"post",
"save",
"signals",
"and",
"does",
"not",
"set",
"the",
"primary",
"key",
"attribute",
"if",
"it",
"is",
"an",
"autoincrement",
"field",
"."
] | python | train |
nerdvegas/rez | src/rez/serialise.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/serialise.py#L369-L395 | def load_yaml(stream, **kwargs):
"""Load yaml-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict.
"""
# if there's an error parsing the yaml, and you pass yaml.load a string,
# it will print lines of context, but will print "<string>" instead of a
# filename; if you pass a stream, it will print the filename, but no lines
# of context.
# Get the best of both worlds, by passing it a string, then replacing
# "<string>" with the filename if there's an error...
content = stream.read()
try:
return yaml.load(content) or {}
except Exception, e:
if stream.name and stream.name != '<string>':
for mark_name in 'context_mark', 'problem_mark':
mark = getattr(e, mark_name, None)
if mark is None:
continue
if getattr(mark, 'name') == '<string>':
mark.name = stream.name
raise e | [
"def",
"load_yaml",
"(",
"stream",
",",
"*",
"*",
"kwargs",
")",
":",
"# if there's an error parsing the yaml, and you pass yaml.load a string,",
"# it will print lines of context, but will print \"<string>\" instead of a",
"# filename; if you pass a stream, it will print the filename, but no lines",
"# of context.",
"# Get the best of both worlds, by passing it a string, then replacing",
"# \"<string>\" with the filename if there's an error...",
"content",
"=",
"stream",
".",
"read",
"(",
")",
"try",
":",
"return",
"yaml",
".",
"load",
"(",
"content",
")",
"or",
"{",
"}",
"except",
"Exception",
",",
"e",
":",
"if",
"stream",
".",
"name",
"and",
"stream",
".",
"name",
"!=",
"'<string>'",
":",
"for",
"mark_name",
"in",
"'context_mark'",
",",
"'problem_mark'",
":",
"mark",
"=",
"getattr",
"(",
"e",
",",
"mark_name",
",",
"None",
")",
"if",
"mark",
"is",
"None",
":",
"continue",
"if",
"getattr",
"(",
"mark",
",",
"'name'",
")",
"==",
"'<string>'",
":",
"mark",
".",
"name",
"=",
"stream",
".",
"name",
"raise",
"e"
] | Load yaml-formatted data from a stream.
Args:
stream (file-like object).
Returns:
dict. | [
"Load",
"yaml",
"-",
"formatted",
"data",
"from",
"a",
"stream",
"."
] | python | train |
cloud-custodian/cloud-custodian | c7n/filters/offhours.py | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/filters/offhours.py#L448-L463 | def get_tag_value(self, i):
"""Get the resource's tag value specifying its schedule."""
# Look for the tag, Normalize tag key and tag value
found = False
for t in i.get('Tags', ()):
if t['Key'].lower() == self.tag_key:
found = t['Value']
break
if found is False:
return False
# enforce utf8, or do translate tables via unicode ord mapping
value = found.lower().encode('utf8').decode('utf8')
# Some folks seem to be interpreting the docs quote marks as
# literal for values.
value = value.strip("'").strip('"')
return value | [
"def",
"get_tag_value",
"(",
"self",
",",
"i",
")",
":",
"# Look for the tag, Normalize tag key and tag value",
"found",
"=",
"False",
"for",
"t",
"in",
"i",
".",
"get",
"(",
"'Tags'",
",",
"(",
")",
")",
":",
"if",
"t",
"[",
"'Key'",
"]",
".",
"lower",
"(",
")",
"==",
"self",
".",
"tag_key",
":",
"found",
"=",
"t",
"[",
"'Value'",
"]",
"break",
"if",
"found",
"is",
"False",
":",
"return",
"False",
"# enforce utf8, or do translate tables via unicode ord mapping",
"value",
"=",
"found",
".",
"lower",
"(",
")",
".",
"encode",
"(",
"'utf8'",
")",
".",
"decode",
"(",
"'utf8'",
")",
"# Some folks seem to be interpreting the docs quote marks as",
"# literal for values.",
"value",
"=",
"value",
".",
"strip",
"(",
"\"'\"",
")",
".",
"strip",
"(",
"'\"'",
")",
"return",
"value"
] | Get the resource's tag value specifying its schedule. | [
"Get",
"the",
"resource",
"s",
"tag",
"value",
"specifying",
"its",
"schedule",
"."
] | python | train |
QInfer/python-qinfer | src/qinfer/domains.py | https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/domains.py#L640-L657 | def values(self):
"""
Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
# This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565
partition_array = np.empty((self.n_members, self.n_elements), dtype=int)
masks = np.identity(self.n_elements, dtype=int)
for i, c in enumerate(combinations_with_replacement(masks, self.n_meas)):
partition_array[i,:] = sum(c)
# Convert to dtype before returning
return self.from_regular_array(partition_array) | [
"def",
"values",
"(",
"self",
")",
":",
"# This code comes from Jared Goguen at http://stackoverflow.com/a/37712597/1082565",
"partition_array",
"=",
"np",
".",
"empty",
"(",
"(",
"self",
".",
"n_members",
",",
"self",
".",
"n_elements",
")",
",",
"dtype",
"=",
"int",
")",
"masks",
"=",
"np",
".",
"identity",
"(",
"self",
".",
"n_elements",
",",
"dtype",
"=",
"int",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"combinations_with_replacement",
"(",
"masks",
",",
"self",
".",
"n_meas",
")",
")",
":",
"partition_array",
"[",
"i",
",",
":",
"]",
"=",
"sum",
"(",
"c",
")",
"# Convert to dtype before returning",
"return",
"self",
".",
"from_regular_array",
"(",
"partition_array",
")"
] | Returns an `np.array` of type `self.dtype` containing
some values from the domain.
For domains where ``is_finite`` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray` | [
"Returns",
"an",
"np",
".",
"array",
"of",
"type",
"self",
".",
"dtype",
"containing",
"some",
"values",
"from",
"the",
"domain",
".",
"For",
"domains",
"where",
"is_finite",
"is",
"True",
"all",
"elements",
"of",
"the",
"domain",
"will",
"be",
"yielded",
"exactly",
"once",
"."
] | python | train |
pyapi-gitlab/pyapi-gitlab | gitlab/__init__.py | https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/__init__.py#L63-L74 | def get_project(self, project):
"""
Get info for a project identified by id or namespace/project_name
:param project: The ID or URL-encoded path of the project
:return: Dictionary containing the Project
:raise: HttpError: If invalid response returned
"""
project = format_string(project)
return self.get(
'/projects/{project}'.format(project=project)) | [
"def",
"get_project",
"(",
"self",
",",
"project",
")",
":",
"project",
"=",
"format_string",
"(",
"project",
")",
"return",
"self",
".",
"get",
"(",
"'/projects/{project}'",
".",
"format",
"(",
"project",
"=",
"project",
")",
")"
] | Get info for a project identified by id or namespace/project_name
:param project: The ID or URL-encoded path of the project
:return: Dictionary containing the Project
:raise: HttpError: If invalid response returned | [
"Get",
"info",
"for",
"a",
"project",
"identified",
"by",
"id",
"or",
"namespace",
"/",
"project_name"
] | python | train |
gregmuellegger/django-superform | django_superform/fields.py | https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L301-L310 | def allow_blank(self, form, name):
"""
Allow blank determines if the form might be completely empty. If it's
empty it will result in a None as the saved value for the ForeignKey.
"""
if self.blank is not None:
return self.blank
model = form._meta.model
field = model._meta.get_field(self.get_field_name(form, name))
return field.blank | [
"def",
"allow_blank",
"(",
"self",
",",
"form",
",",
"name",
")",
":",
"if",
"self",
".",
"blank",
"is",
"not",
"None",
":",
"return",
"self",
".",
"blank",
"model",
"=",
"form",
".",
"_meta",
".",
"model",
"field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"self",
".",
"get_field_name",
"(",
"form",
",",
"name",
")",
")",
"return",
"field",
".",
"blank"
] | Allow blank determines if the form might be completely empty. If it's
empty it will result in a None as the saved value for the ForeignKey. | [
"Allow",
"blank",
"determines",
"if",
"the",
"form",
"might",
"be",
"completely",
"empty",
".",
"If",
"it",
"s",
"empty",
"it",
"will",
"result",
"in",
"a",
"None",
"as",
"the",
"saved",
"value",
"for",
"the",
"ForeignKey",
"."
] | python | train |
mabuchilab/QNET | src/qnet/algebra/pattern_matching/__init__.py | https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/pattern_matching/__init__.py#L604-L606 | def from_expr(cls, expr):
"""Instantiate proto-expression from the given Expression"""
return cls(expr.args, expr.kwargs, cls=expr.__class__) | [
"def",
"from_expr",
"(",
"cls",
",",
"expr",
")",
":",
"return",
"cls",
"(",
"expr",
".",
"args",
",",
"expr",
".",
"kwargs",
",",
"cls",
"=",
"expr",
".",
"__class__",
")"
] | Instantiate proto-expression from the given Expression | [
"Instantiate",
"proto",
"-",
"expression",
"from",
"the",
"given",
"Expression"
] | python | train |
gem/oq-engine | openquake/commands/abort.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/abort.py#L27-L53 | def abort(job_id):
"""
Abort the given job
"""
job = logs.dbcmd('get_job', job_id) # job_id can be negative
if job is None:
print('There is no job %d' % job_id)
return
elif job.status not in ('executing', 'running'):
print('Job %d is %s' % (job.id, job.status))
return
name = 'oq-job-%d' % job.id
for p in psutil.process_iter():
if p.name() == name:
try:
os.kill(p.pid, signal.SIGTERM)
logs.dbcmd('set_status', job.id, 'aborted')
print('Job %d aborted' % job.id)
except Exception as exc:
print(exc)
break
else: # no break
# set job as failed if it is set as 'executing' or 'running' in the db
# but the corresponding process is not running anymore
logs.dbcmd('set_status', job.id, 'failed')
print('Unable to find a process for job %d,'
' setting it as failed' % job.id) | [
"def",
"abort",
"(",
"job_id",
")",
":",
"job",
"=",
"logs",
".",
"dbcmd",
"(",
"'get_job'",
",",
"job_id",
")",
"# job_id can be negative",
"if",
"job",
"is",
"None",
":",
"print",
"(",
"'There is no job %d'",
"%",
"job_id",
")",
"return",
"elif",
"job",
".",
"status",
"not",
"in",
"(",
"'executing'",
",",
"'running'",
")",
":",
"print",
"(",
"'Job %d is %s'",
"%",
"(",
"job",
".",
"id",
",",
"job",
".",
"status",
")",
")",
"return",
"name",
"=",
"'oq-job-%d'",
"%",
"job",
".",
"id",
"for",
"p",
"in",
"psutil",
".",
"process_iter",
"(",
")",
":",
"if",
"p",
".",
"name",
"(",
")",
"==",
"name",
":",
"try",
":",
"os",
".",
"kill",
"(",
"p",
".",
"pid",
",",
"signal",
".",
"SIGTERM",
")",
"logs",
".",
"dbcmd",
"(",
"'set_status'",
",",
"job",
".",
"id",
",",
"'aborted'",
")",
"print",
"(",
"'Job %d aborted'",
"%",
"job",
".",
"id",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"exc",
")",
"break",
"else",
":",
"# no break",
"# set job as failed if it is set as 'executing' or 'running' in the db",
"# but the corresponding process is not running anymore",
"logs",
".",
"dbcmd",
"(",
"'set_status'",
",",
"job",
".",
"id",
",",
"'failed'",
")",
"print",
"(",
"'Unable to find a process for job %d,'",
"' setting it as failed'",
"%",
"job",
".",
"id",
")"
] | Abort the given job | [
"Abort",
"the",
"given",
"job"
] | python | train |
rbw/pysnow | pysnow/url_builder.py | https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/url_builder.py#L23-L36 | def validate_path(path):
"""Validates the provided path
:param path: path to validate (string)
:raise:
:InvalidUsage: If validation fails.
"""
if not isinstance(path, six.string_types) or not re.match('^/(?:[._a-zA-Z0-9-]/?)+[^/]$', path):
raise InvalidUsage(
"Path validation failed - Expected: '/<component>[/component], got: %s" % path
)
return True | [
"def",
"validate_path",
"(",
"path",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"six",
".",
"string_types",
")",
"or",
"not",
"re",
".",
"match",
"(",
"'^/(?:[._a-zA-Z0-9-]/?)+[^/]$'",
",",
"path",
")",
":",
"raise",
"InvalidUsage",
"(",
"\"Path validation failed - Expected: '/<component>[/component], got: %s\"",
"%",
"path",
")",
"return",
"True"
] | Validates the provided path
:param path: path to validate (string)
:raise:
:InvalidUsage: If validation fails. | [
"Validates",
"the",
"provided",
"path"
] | python | train |
albahnsen/CostSensitiveClassification | costcla/models/cost_tree.py | https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L185-L242 | def _calculate_gain(self, cost_base, y_true, X, cost_mat, split):
""" Private function to calculate the gain in cost of using split in the
current node.
Parameters
----------
cost_base : float
Cost of the naive prediction
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
split : tuple of len = 2
split[0] = feature to split = j
split[1] = where to split = l
Returns
-------
tuple(gain : float, left node prediction : int)
"""
# Check if cost_base == 0, then no gain is possible
#TODO: This must be check in _best_split
if cost_base == 0.0:
return 0.0, int(np.sign(y_true.mean() - 0.5) == 1) # In case cost_b==0 and pi_1!=(0,1)
j, l = split
filter_Xl = (X[:, j] <= l)
filter_Xr = ~filter_Xl
n_samples, n_features = X.shape
# Check if one of the leafs is empty
#TODO: This must be check in _best_split
if np.nonzero(filter_Xl)[0].shape[0] in [0, n_samples]: # One leaft is empty
return 0.0, 0.0
# Split X in Xl and Xr according to rule split
Xl_cost, Xl_pred, _ = self._node_cost(y_true[filter_Xl], cost_mat[filter_Xl, :])
Xr_cost, _, _ = self._node_cost(y_true[filter_Xr], cost_mat[filter_Xr, :])
if self.criterion_weight:
n_samples_Xl = np.nonzero(filter_Xl)[0].shape[0]
Xl_w = n_samples_Xl * 1.0 / n_samples
Xr_w = 1 - Xl_w
gain = round((cost_base - (Xl_w * Xl_cost + Xr_w * Xr_cost)) / cost_base, 6)
else:
gain = round((cost_base - (Xl_cost + Xr_cost)) / cost_base, 6)
return gain, Xl_pred | [
"def",
"_calculate_gain",
"(",
"self",
",",
"cost_base",
",",
"y_true",
",",
"X",
",",
"cost_mat",
",",
"split",
")",
":",
"# Check if cost_base == 0, then no gain is possible",
"#TODO: This must be check in _best_split",
"if",
"cost_base",
"==",
"0.0",
":",
"return",
"0.0",
",",
"int",
"(",
"np",
".",
"sign",
"(",
"y_true",
".",
"mean",
"(",
")",
"-",
"0.5",
")",
"==",
"1",
")",
"# In case cost_b==0 and pi_1!=(0,1)",
"j",
",",
"l",
"=",
"split",
"filter_Xl",
"=",
"(",
"X",
"[",
":",
",",
"j",
"]",
"<=",
"l",
")",
"filter_Xr",
"=",
"~",
"filter_Xl",
"n_samples",
",",
"n_features",
"=",
"X",
".",
"shape",
"# Check if one of the leafs is empty",
"#TODO: This must be check in _best_split",
"if",
"np",
".",
"nonzero",
"(",
"filter_Xl",
")",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"in",
"[",
"0",
",",
"n_samples",
"]",
":",
"# One leaft is empty",
"return",
"0.0",
",",
"0.0",
"# Split X in Xl and Xr according to rule split",
"Xl_cost",
",",
"Xl_pred",
",",
"_",
"=",
"self",
".",
"_node_cost",
"(",
"y_true",
"[",
"filter_Xl",
"]",
",",
"cost_mat",
"[",
"filter_Xl",
",",
":",
"]",
")",
"Xr_cost",
",",
"_",
",",
"_",
"=",
"self",
".",
"_node_cost",
"(",
"y_true",
"[",
"filter_Xr",
"]",
",",
"cost_mat",
"[",
"filter_Xr",
",",
":",
"]",
")",
"if",
"self",
".",
"criterion_weight",
":",
"n_samples_Xl",
"=",
"np",
".",
"nonzero",
"(",
"filter_Xl",
")",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
"Xl_w",
"=",
"n_samples_Xl",
"*",
"1.0",
"/",
"n_samples",
"Xr_w",
"=",
"1",
"-",
"Xl_w",
"gain",
"=",
"round",
"(",
"(",
"cost_base",
"-",
"(",
"Xl_w",
"*",
"Xl_cost",
"+",
"Xr_w",
"*",
"Xr_cost",
")",
")",
"/",
"cost_base",
",",
"6",
")",
"else",
":",
"gain",
"=",
"round",
"(",
"(",
"cost_base",
"-",
"(",
"Xl_cost",
"+",
"Xr_cost",
")",
")",
"/",
"cost_base",
",",
"6",
")",
"return",
"gain",
",",
"Xl_pred"
] | Private function to calculate the gain in cost of using split in the
current node.
Parameters
----------
cost_base : float
Cost of the naive prediction
y_true : array indicator matrix
Ground truth (correct) labels.
X : array-like of shape = [n_samples, n_features]
The input samples.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
split : tuple of len = 2
split[0] = feature to split = j
split[1] = where to split = l
Returns
-------
tuple(gain : float, left node prediction : int) | [
"Private",
"function",
"to",
"calculate",
"the",
"gain",
"in",
"cost",
"of",
"using",
"split",
"in",
"the",
"current",
"node",
"."
] | python | train |
saltstack/salt | salt/modules/npm.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/npm.py#L364-L406 | def cache_list(path=None, runas=None, env=None):
'''
List NPM cached packages.
If no path for a specific package is provided this will list all the cached packages.
path
The cache subpath to list, or None to list the entire cache
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' npm.cache_clean
'''
env = env or {}
if runas:
uid = salt.utils.user.get_uid(runas)
if uid:
env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ['npm', 'cache', 'ls']
if path:
cmd.append(path)
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](
cmd, cwd=None, runas=runas, env=env, python_shell=True, ignore_retcode=True)
if result['retcode'] != 0 and result['stderr']:
raise CommandExecutionError(result['stderr'])
return result['stdout'] | [
"def",
"cache_list",
"(",
"path",
"=",
"None",
",",
"runas",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"env",
"=",
"env",
"or",
"{",
"}",
"if",
"runas",
":",
"uid",
"=",
"salt",
".",
"utils",
".",
"user",
".",
"get_uid",
"(",
"runas",
")",
"if",
"uid",
":",
"env",
".",
"update",
"(",
"{",
"'SUDO_UID'",
":",
"uid",
",",
"'SUDO_USER'",
":",
"''",
"}",
")",
"cmd",
"=",
"[",
"'npm'",
",",
"'cache'",
",",
"'ls'",
"]",
"if",
"path",
":",
"cmd",
".",
"append",
"(",
"path",
")",
"cmd",
"=",
"' '",
".",
"join",
"(",
"cmd",
")",
"result",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"cwd",
"=",
"None",
",",
"runas",
"=",
"runas",
",",
"env",
"=",
"env",
",",
"python_shell",
"=",
"True",
",",
"ignore_retcode",
"=",
"True",
")",
"if",
"result",
"[",
"'retcode'",
"]",
"!=",
"0",
"and",
"result",
"[",
"'stderr'",
"]",
":",
"raise",
"CommandExecutionError",
"(",
"result",
"[",
"'stderr'",
"]",
")",
"return",
"result",
"[",
"'stdout'",
"]"
] | List NPM cached packages.
If no path for a specific package is provided this will list all the cached packages.
path
The cache subpath to list, or None to list the entire cache
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' npm.cache_clean | [
"List",
"NPM",
"cached",
"packages",
"."
] | python | train |
jay-johnson/antinex-client | antinex_client/log/setup_logging.py | https://github.com/jay-johnson/antinex-client/blob/850ba2a2fe21c836e071def618dcecc9caf5d59c/antinex_client/log/setup_logging.py#L6-L59 | def setup_logging(
default_level=logging.INFO,
default_path="{}/logging.json".format(
os.getenv(
"LOG_DIR",
os.path.dirname(os.path.realpath(__file__)))),
env_key="LOG_CFG",
config_name=None):
"""setup_logging
Setup logging configuration
:param default_level: level to log
:param default_path: path to config (optional)
:param env_key: path to config in this env var
:param config_name: filename for config
"""
path = default_path
file_name = default_path.split("/")[-1]
if config_name:
file_name = config_name
path = "{}/{}".format(
"/".join(default_path.split("/")[:-1]),
file_name)
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, "rt") as f:
config = json.load(f)
logging.config.dictConfig(config)
return
else:
cwd_path = os.getcwd() + "/antinex_client/log/{}".format(
file_name)
if os.path.exists(cwd_path):
with open(cwd_path, "rt") as f:
config = json.load(f)
logging.config.dictConfig(config)
return
rels_path = os.getcwd() + "/../log/{}".format(
file_name)
if os.path.exists(rels_path):
with open(rels_path, "rt") as f:
config = json.load(f)
logging.config.dictConfig(config)
return
else:
logging.basicConfig(level=default_level)
return | [
"def",
"setup_logging",
"(",
"default_level",
"=",
"logging",
".",
"INFO",
",",
"default_path",
"=",
"\"{}/logging.json\"",
".",
"format",
"(",
"os",
".",
"getenv",
"(",
"\"LOG_DIR\"",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
")",
",",
"env_key",
"=",
"\"LOG_CFG\"",
",",
"config_name",
"=",
"None",
")",
":",
"path",
"=",
"default_path",
"file_name",
"=",
"default_path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"if",
"config_name",
":",
"file_name",
"=",
"config_name",
"path",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"\"/\"",
".",
"join",
"(",
"default_path",
".",
"split",
"(",
"\"/\"",
")",
"[",
":",
"-",
"1",
"]",
")",
",",
"file_name",
")",
"value",
"=",
"os",
".",
"getenv",
"(",
"env_key",
",",
"None",
")",
"if",
"value",
":",
"path",
"=",
"value",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"rt\"",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"config",
")",
"return",
"else",
":",
"cwd_path",
"=",
"os",
".",
"getcwd",
"(",
")",
"+",
"\"/antinex_client/log/{}\"",
".",
"format",
"(",
"file_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cwd_path",
")",
":",
"with",
"open",
"(",
"cwd_path",
",",
"\"rt\"",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"config",
")",
"return",
"rels_path",
"=",
"os",
".",
"getcwd",
"(",
")",
"+",
"\"/../log/{}\"",
".",
"format",
"(",
"file_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"rels_path",
")",
":",
"with",
"open",
"(",
"rels_path",
",",
"\"rt\"",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"load",
"(",
"f",
")",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"config",
")",
"return",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"default_level",
")",
"return"
] | setup_logging
Setup logging configuration
:param default_level: level to log
:param default_path: path to config (optional)
:param env_key: path to config in this env var
:param config_name: filename for config | [
"setup_logging"
] | python | train |
a10networks/a10-neutron-lbaas | a10_neutron_lbaas/neutron_ext/extensions/a10Certificate.py | https://github.com/a10networks/a10-neutron-lbaas/blob/ff834c295c8019874ca4b209d864367e40cc9881/a10_neutron_lbaas/neutron_ext/extensions/a10Certificate.py#L78-L88 | def get_resources(cls):
"""Returns external resources."""
my_plurals = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attributes.PLURALS.update(my_plurals)
attr_map = RESOURCE_ATTRIBUTE_MAP
ext_resources = resource_helper.build_resource_info(my_plurals,
attr_map,
constants.A10_CERTIFICATE)
return ext_resources | [
"def",
"get_resources",
"(",
"cls",
")",
":",
"my_plurals",
"=",
"resource_helper",
".",
"build_plural_mappings",
"(",
"{",
"}",
",",
"RESOURCE_ATTRIBUTE_MAP",
")",
"attributes",
".",
"PLURALS",
".",
"update",
"(",
"my_plurals",
")",
"attr_map",
"=",
"RESOURCE_ATTRIBUTE_MAP",
"ext_resources",
"=",
"resource_helper",
".",
"build_resource_info",
"(",
"my_plurals",
",",
"attr_map",
",",
"constants",
".",
"A10_CERTIFICATE",
")",
"return",
"ext_resources"
] | Returns external resources. | [
"Returns",
"external",
"resources",
"."
] | python | train |
inveniosoftware/invenio-oauth2server | invenio_oauth2server/views/server.py | https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/views/server.py#L108-L116 | def errors():
"""Error view in case of invalid oauth requests."""
from oauthlib.oauth2.rfc6749.errors import raise_from_error
try:
error = None
raise_from_error(request.values.get('error'), params=dict())
except OAuth2Error as raised:
error = raised
return render_template('invenio_oauth2server/errors.html', error=error) | [
"def",
"errors",
"(",
")",
":",
"from",
"oauthlib",
".",
"oauth2",
".",
"rfc6749",
".",
"errors",
"import",
"raise_from_error",
"try",
":",
"error",
"=",
"None",
"raise_from_error",
"(",
"request",
".",
"values",
".",
"get",
"(",
"'error'",
")",
",",
"params",
"=",
"dict",
"(",
")",
")",
"except",
"OAuth2Error",
"as",
"raised",
":",
"error",
"=",
"raised",
"return",
"render_template",
"(",
"'invenio_oauth2server/errors.html'",
",",
"error",
"=",
"error",
")"
] | Error view in case of invalid oauth requests. | [
"Error",
"view",
"in",
"case",
"of",
"invalid",
"oauth",
"requests",
"."
] | python | train |
libnano/primer3-py | primer3/bindings.py | https://github.com/libnano/primer3-py/blob/0901c0ef3ac17afd69329d23db71136c00bcb635/primer3/bindings.py#L70-L97 | def calcHairpin(seq, mv_conc=50.0, dv_conc=0.0, dntp_conc=0.8, dna_conc=50.0,
temp_c=37, max_loop=30):
''' Calculate the hairpin formation thermodynamics of a DNA sequence.
**Note that the maximum length of `seq` is 60 bp.** This is a cap suggested
by the Primer3 team as the longest reasonable sequence length for which
a two-state NN model produces reliable results (see primer3/src/libnano/thal.h:50).
Args:
seq (str): DNA sequence to analyze for hairpin formation
mv_conc (float/int, optional): Monovalent cation conc. (mM)
dv_conc (float/int, optional): Divalent cation conc. (mM)
dntp_conc (float/int, optional): dNTP conc. (mM)
dna_conc (float/int, optional): DNA conc. (nM)
temp_c (int, optional): Simulation temperature for dG (Celsius)
max_loop(int, optional): Maximum size of loops in the structure
Returns:
A `ThermoResult` object with thermodynamic characteristics of the
hairpin formation.
Raises:
``RuntimeError``
'''
_setThermoArgs(**locals())
return _THERMO_ANALYSIS.calcHairpin(seq).checkExc() | [
"def",
"calcHairpin",
"(",
"seq",
",",
"mv_conc",
"=",
"50.0",
",",
"dv_conc",
"=",
"0.0",
",",
"dntp_conc",
"=",
"0.8",
",",
"dna_conc",
"=",
"50.0",
",",
"temp_c",
"=",
"37",
",",
"max_loop",
"=",
"30",
")",
":",
"_setThermoArgs",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"return",
"_THERMO_ANALYSIS",
".",
"calcHairpin",
"(",
"seq",
")",
".",
"checkExc",
"(",
")"
] | Calculate the hairpin formation thermodynamics of a DNA sequence.
**Note that the maximum length of `seq` is 60 bp.** This is a cap suggested
by the Primer3 team as the longest reasonable sequence length for which
a two-state NN model produces reliable results (see primer3/src/libnano/thal.h:50).
Args:
seq (str): DNA sequence to analyze for hairpin formation
mv_conc (float/int, optional): Monovalent cation conc. (mM)
dv_conc (float/int, optional): Divalent cation conc. (mM)
dntp_conc (float/int, optional): dNTP conc. (mM)
dna_conc (float/int, optional): DNA conc. (nM)
temp_c (int, optional): Simulation temperature for dG (Celsius)
max_loop(int, optional): Maximum size of loops in the structure
Returns:
A `ThermoResult` object with thermodynamic characteristics of the
hairpin formation.
Raises:
``RuntimeError`` | [
"Calculate",
"the",
"hairpin",
"formation",
"thermodynamics",
"of",
"a",
"DNA",
"sequence",
"."
] | python | train |
sys-git/certifiable | certifiable/operators.py | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/operators.py#L58-L83 | def NAND(*args, **kwargs):
"""
ALL args must raise an exception when called overall.
Raise the specified exception on failure OR the first exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
"""
errors = []
for arg in args:
try:
arg()
except CertifierError as e:
errors.append(e)
if (len(errors) != len(args)) and len(args) > 1:
exc = kwargs.get(
'exc',
CertifierValueError('Expecting no certified values'),
)
if exc is not None:
raise exc | [
"def",
"NAND",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"errors",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"try",
":",
"arg",
"(",
")",
"except",
"CertifierError",
"as",
"e",
":",
"errors",
".",
"append",
"(",
"e",
")",
"if",
"(",
"len",
"(",
"errors",
")",
"!=",
"len",
"(",
"args",
")",
")",
"and",
"len",
"(",
"args",
")",
">",
"1",
":",
"exc",
"=",
"kwargs",
".",
"get",
"(",
"'exc'",
",",
"CertifierValueError",
"(",
"'Expecting no certified values'",
")",
",",
")",
"if",
"exc",
"is",
"not",
"None",
":",
"raise",
"exc"
] | ALL args must raise an exception when called overall.
Raise the specified exception on failure OR the first exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise. | [
"ALL",
"args",
"must",
"raise",
"an",
"exception",
"when",
"called",
"overall",
".",
"Raise",
"the",
"specified",
"exception",
"on",
"failure",
"OR",
"the",
"first",
"exception",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xorbbrowserwidget/xorbquerywidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbbrowserwidget/xorbquerywidget.py#L609-L629 | def query( self ):
"""
Returns the query this widget is representing from the tree widget.
:return <Query> || <QueryCompound> || None
"""
# build a query if not searching all
q = Q()
operator = 'and'
for i in range(self.uiQueryTREE.topLevelItemCount()):
item = self.uiQueryTREE.topLevelItem(i)
if ( isinstance(item, XQueryItem) ):
if ( operator == 'and' ):
q &= item.query()
else:
q |= item.query()
else:
operator = nativestring(item.text(0))
return q | [
"def",
"query",
"(",
"self",
")",
":",
"# build a query if not searching all\r",
"q",
"=",
"Q",
"(",
")",
"operator",
"=",
"'and'",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"uiQueryTREE",
".",
"topLevelItemCount",
"(",
")",
")",
":",
"item",
"=",
"self",
".",
"uiQueryTREE",
".",
"topLevelItem",
"(",
"i",
")",
"if",
"(",
"isinstance",
"(",
"item",
",",
"XQueryItem",
")",
")",
":",
"if",
"(",
"operator",
"==",
"'and'",
")",
":",
"q",
"&=",
"item",
".",
"query",
"(",
")",
"else",
":",
"q",
"|=",
"item",
".",
"query",
"(",
")",
"else",
":",
"operator",
"=",
"nativestring",
"(",
"item",
".",
"text",
"(",
"0",
")",
")",
"return",
"q"
] | Returns the query this widget is representing from the tree widget.
:return <Query> || <QueryCompound> || None | [
"Returns",
"the",
"query",
"this",
"widget",
"is",
"representing",
"from",
"the",
"tree",
"widget",
".",
":",
"return",
"<Query",
">",
"||",
"<QueryCompound",
">",
"||",
"None"
] | python | train |
PMEAL/OpenPNM | openpnm/models/physics/capillary_pressure.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/physics/capillary_pressure.py#L279-L340 | def purcell_bidirectional(target, r_toroid,
num_points=1e2,
surface_tension='pore.surface_tension',
contact_angle='pore.contact_angle',
throat_diameter='throat.diameter',
pore_diameter='pore.diameter'):
r"""
Computes the throat capillary entry pressure assuming the throat is a
toroid. Makes use of the toroidal meniscus model with mode touch.
This model accounts for mensicus protrusion into adjacent pores and
touching solid features.
It is bidirectional becauase the connected pores generally have different
sizes and this determines how far the meniscus can protrude.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
r_toroid : float or array_like
The radius of the toroid surrounding the pore
num_points : float (Default 100)
The number of divisions to make along the profile length to assess the
meniscus properties in order to find the touch length.
surface_tension : dict key (string)
The dictionary key containing the surface tension values to be used.
If a pore property is given, it is interpolated to a throat list.
contact_angle : dict key (string)
The dictionary key containing the contact angle values to be used.
If a pore property is given, it is interpolated to a throat list.
throat_diameter : dict key (string)
The dictionary key containing the throat diameter values to be used.
pore_diameter : dict key (string)
The dictionary key containing the pore diameter values to be used.
Notes
"""
network = target.project.network
conns = network['throat.conns']
values = {}
for p in range(2):
network['throat.temp_diameter'] = network[pore_diameter][conns[:, p]]
key = 'throat.touch_pore_'+str(p)
target.add_model(propname=key,
model=pm.meniscus.toroidal,
mode='touch',
r_toroid=r_toroid,
num_points=num_points,
throat_diameter=throat_diameter,
surface_tension=surface_tension,
contact_angle=contact_angle,
touch_length='throat.temp_diameter')
values[p] = target[key]
target.remove_model(key)
del network['throat.temp_diameter']
return np.vstack((values[0], values[1])).T | [
"def",
"purcell_bidirectional",
"(",
"target",
",",
"r_toroid",
",",
"num_points",
"=",
"1e2",
",",
"surface_tension",
"=",
"'pore.surface_tension'",
",",
"contact_angle",
"=",
"'pore.contact_angle'",
",",
"throat_diameter",
"=",
"'throat.diameter'",
",",
"pore_diameter",
"=",
"'pore.diameter'",
")",
":",
"network",
"=",
"target",
".",
"project",
".",
"network",
"conns",
"=",
"network",
"[",
"'throat.conns'",
"]",
"values",
"=",
"{",
"}",
"for",
"p",
"in",
"range",
"(",
"2",
")",
":",
"network",
"[",
"'throat.temp_diameter'",
"]",
"=",
"network",
"[",
"pore_diameter",
"]",
"[",
"conns",
"[",
":",
",",
"p",
"]",
"]",
"key",
"=",
"'throat.touch_pore_'",
"+",
"str",
"(",
"p",
")",
"target",
".",
"add_model",
"(",
"propname",
"=",
"key",
",",
"model",
"=",
"pm",
".",
"meniscus",
".",
"toroidal",
",",
"mode",
"=",
"'touch'",
",",
"r_toroid",
"=",
"r_toroid",
",",
"num_points",
"=",
"num_points",
",",
"throat_diameter",
"=",
"throat_diameter",
",",
"surface_tension",
"=",
"surface_tension",
",",
"contact_angle",
"=",
"contact_angle",
",",
"touch_length",
"=",
"'throat.temp_diameter'",
")",
"values",
"[",
"p",
"]",
"=",
"target",
"[",
"key",
"]",
"target",
".",
"remove_model",
"(",
"key",
")",
"del",
"network",
"[",
"'throat.temp_diameter'",
"]",
"return",
"np",
".",
"vstack",
"(",
"(",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
"]",
")",
")",
".",
"T"
] | r"""
Computes the throat capillary entry pressure assuming the throat is a
toroid. Makes use of the toroidal meniscus model with mode touch.
This model accounts for mensicus protrusion into adjacent pores and
touching solid features.
It is bidirectional becauase the connected pores generally have different
sizes and this determines how far the meniscus can protrude.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
r_toroid : float or array_like
The radius of the toroid surrounding the pore
num_points : float (Default 100)
The number of divisions to make along the profile length to assess the
meniscus properties in order to find the touch length.
surface_tension : dict key (string)
The dictionary key containing the surface tension values to be used.
If a pore property is given, it is interpolated to a throat list.
contact_angle : dict key (string)
The dictionary key containing the contact angle values to be used.
If a pore property is given, it is interpolated to a throat list.
throat_diameter : dict key (string)
The dictionary key containing the throat diameter values to be used.
pore_diameter : dict key (string)
The dictionary key containing the pore diameter values to be used.
Notes | [
"r",
"Computes",
"the",
"throat",
"capillary",
"entry",
"pressure",
"assuming",
"the",
"throat",
"is",
"a",
"toroid",
".",
"Makes",
"use",
"of",
"the",
"toroidal",
"meniscus",
"model",
"with",
"mode",
"touch",
".",
"This",
"model",
"accounts",
"for",
"mensicus",
"protrusion",
"into",
"adjacent",
"pores",
"and",
"touching",
"solid",
"features",
".",
"It",
"is",
"bidirectional",
"becauase",
"the",
"connected",
"pores",
"generally",
"have",
"different",
"sizes",
"and",
"this",
"determines",
"how",
"far",
"the",
"meniscus",
"can",
"protrude",
"."
] | python | train |
mcash/merchant-api-python-sdk | mcash/mapi_client/mapi_client.py | https://github.com/mcash/merchant-api-python-sdk/blob/ebe8734126790354b71077aca519ff263235944e/mcash/mapi_client/mapi_client.py#L777-L784 | def upload_receipt(self, url, data):
"""Upload a receipt to the give url
:param url:
:param data:
:return:
"""
return self.upload_attachment(url=url, data=data, mime_type='application/vnd.mcash.receipt.v1+json') | [
"def",
"upload_receipt",
"(",
"self",
",",
"url",
",",
"data",
")",
":",
"return",
"self",
".",
"upload_attachment",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"data",
",",
"mime_type",
"=",
"'application/vnd.mcash.receipt.v1+json'",
")"
] | Upload a receipt to the give url
:param url:
:param data:
:return: | [
"Upload",
"a",
"receipt",
"to",
"the",
"give",
"url"
] | python | train |
pybel/pybel | src/pybel/canonicalize.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/canonicalize.py#L244-L273 | def _to_bel_lines_footer(graph) -> Iterable[str]:
"""Iterate the lines of a BEL graph's corresponding BEL script's footer.
:param pybel.BELGraph graph: A BEL graph
"""
unqualified_edges_to_serialize = [
(u, v, d)
for u, v, d in graph.edges(data=True)
if d[RELATION] in UNQUALIFIED_EDGES and EVIDENCE not in d
]
isolated_nodes_to_serialize = [
node
for node in graph
if not graph.pred[node] and not graph.succ[node]
]
if unqualified_edges_to_serialize or isolated_nodes_to_serialize:
yield '###############################################\n'
yield 'SET Citation = {"PubMed","Added by PyBEL","29048466"}'
yield 'SET SupportingText = "{}"'.format(PYBEL_AUTOEVIDENCE)
for u, v, data in unqualified_edges_to_serialize:
yield '{} {} {}'.format(u.as_bel(), data[RELATION], v.as_bel())
for node in isolated_nodes_to_serialize:
yield node.as_bel()
yield 'UNSET SupportingText'
yield 'UNSET Citation' | [
"def",
"_to_bel_lines_footer",
"(",
"graph",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"unqualified_edges_to_serialize",
"=",
"[",
"(",
"u",
",",
"v",
",",
"d",
")",
"for",
"u",
",",
"v",
",",
"d",
"in",
"graph",
".",
"edges",
"(",
"data",
"=",
"True",
")",
"if",
"d",
"[",
"RELATION",
"]",
"in",
"UNQUALIFIED_EDGES",
"and",
"EVIDENCE",
"not",
"in",
"d",
"]",
"isolated_nodes_to_serialize",
"=",
"[",
"node",
"for",
"node",
"in",
"graph",
"if",
"not",
"graph",
".",
"pred",
"[",
"node",
"]",
"and",
"not",
"graph",
".",
"succ",
"[",
"node",
"]",
"]",
"if",
"unqualified_edges_to_serialize",
"or",
"isolated_nodes_to_serialize",
":",
"yield",
"'###############################################\\n'",
"yield",
"'SET Citation = {\"PubMed\",\"Added by PyBEL\",\"29048466\"}'",
"yield",
"'SET SupportingText = \"{}\"'",
".",
"format",
"(",
"PYBEL_AUTOEVIDENCE",
")",
"for",
"u",
",",
"v",
",",
"data",
"in",
"unqualified_edges_to_serialize",
":",
"yield",
"'{} {} {}'",
".",
"format",
"(",
"u",
".",
"as_bel",
"(",
")",
",",
"data",
"[",
"RELATION",
"]",
",",
"v",
".",
"as_bel",
"(",
")",
")",
"for",
"node",
"in",
"isolated_nodes_to_serialize",
":",
"yield",
"node",
".",
"as_bel",
"(",
")",
"yield",
"'UNSET SupportingText'",
"yield",
"'UNSET Citation'"
] | Iterate the lines of a BEL graph's corresponding BEL script's footer.
:param pybel.BELGraph graph: A BEL graph | [
"Iterate",
"the",
"lines",
"of",
"a",
"BEL",
"graph",
"s",
"corresponding",
"BEL",
"script",
"s",
"footer",
"."
] | python | train |
quantopian/zipline | zipline/pipeline/factors/factor.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L285-L308 | def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_doc(func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc | [
"def",
"function_application",
"(",
"func",
")",
":",
"if",
"func",
"not",
"in",
"NUMEXPR_MATH_FUNCS",
":",
"raise",
"ValueError",
"(",
"\"Unsupported mathematical function '%s'\"",
"%",
"func",
")",
"@",
"with_doc",
"(",
"func",
")",
"@",
"with_name",
"(",
"func",
")",
"def",
"mathfunc",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"return",
"NumExprFactor",
"(",
"\"{func}({expr})\"",
".",
"format",
"(",
"func",
"=",
"func",
",",
"expr",
"=",
"self",
".",
"_expr",
")",
",",
"self",
".",
"inputs",
",",
"dtype",
"=",
"float64_dtype",
",",
")",
"else",
":",
"return",
"NumExprFactor",
"(",
"\"{func}(x_0)\"",
".",
"format",
"(",
"func",
"=",
"func",
")",
",",
"(",
"self",
",",
")",
",",
"dtype",
"=",
"float64_dtype",
",",
")",
"return",
"mathfunc"
] | Factory function for producing function application methods for Factor
subclasses. | [
"Factory",
"function",
"for",
"producing",
"function",
"application",
"methods",
"for",
"Factor",
"subclasses",
"."
] | python | train |
dmlc/gluon-nlp | scripts/parsing/common/utils.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/parsing/common/utils.py#L56-L144 | def update(self, current, values=[], exact=[], strict=[]):
"""
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for cells in exact:
k, v, w = cells[0], cells[1], 4
if len(cells) == 3:
w = cells[2]
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1, w]
for k, v in strict:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = v
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = 0 if self.target == 0 or math.isnan(self.target) else int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = 0 if self.target == 0 else float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if type(self.sum_values[k]) is list:
info += (' - %s: %.' + str(self.sum_values[k][2]) + 'f') % (
k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + "\n") | [
"def",
"update",
"(",
"self",
",",
"current",
",",
"values",
"=",
"[",
"]",
",",
"exact",
"=",
"[",
"]",
",",
"strict",
"=",
"[",
"]",
")",
":",
"for",
"k",
",",
"v",
"in",
"values",
":",
"if",
"k",
"not",
"in",
"self",
".",
"sum_values",
":",
"self",
".",
"sum_values",
"[",
"k",
"]",
"=",
"[",
"v",
"*",
"(",
"current",
"-",
"self",
".",
"seen_so_far",
")",
",",
"current",
"-",
"self",
".",
"seen_so_far",
"]",
"self",
".",
"unique_values",
".",
"append",
"(",
"k",
")",
"else",
":",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"0",
"]",
"+=",
"v",
"*",
"(",
"current",
"-",
"self",
".",
"seen_so_far",
")",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"1",
"]",
"+=",
"(",
"current",
"-",
"self",
".",
"seen_so_far",
")",
"for",
"cells",
"in",
"exact",
":",
"k",
",",
"v",
",",
"w",
"=",
"cells",
"[",
"0",
"]",
",",
"cells",
"[",
"1",
"]",
",",
"4",
"if",
"len",
"(",
"cells",
")",
"==",
"3",
":",
"w",
"=",
"cells",
"[",
"2",
"]",
"if",
"k",
"not",
"in",
"self",
".",
"sum_values",
":",
"self",
".",
"unique_values",
".",
"append",
"(",
"k",
")",
"self",
".",
"sum_values",
"[",
"k",
"]",
"=",
"[",
"v",
",",
"1",
",",
"w",
"]",
"for",
"k",
",",
"v",
"in",
"strict",
":",
"if",
"k",
"not",
"in",
"self",
".",
"sum_values",
":",
"self",
".",
"unique_values",
".",
"append",
"(",
"k",
")",
"self",
".",
"sum_values",
"[",
"k",
"]",
"=",
"v",
"self",
".",
"seen_so_far",
"=",
"current",
"now",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"verbose",
"==",
"1",
":",
"prev_total_width",
"=",
"self",
".",
"total_width",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\b\"",
"*",
"prev_total_width",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"numdigits",
"=",
"0",
"if",
"self",
".",
"target",
"==",
"0",
"or",
"math",
".",
"isnan",
"(",
"self",
".",
"target",
")",
"else",
"int",
"(",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"self",
".",
"target",
")",
")",
")",
"+",
"1",
"barstr",
"=",
"'%%%dd/%%%dd ['",
"%",
"(",
"numdigits",
",",
"numdigits",
")",
"bar",
"=",
"barstr",
"%",
"(",
"current",
",",
"self",
".",
"target",
")",
"prog",
"=",
"0",
"if",
"self",
".",
"target",
"==",
"0",
"else",
"float",
"(",
"current",
")",
"/",
"self",
".",
"target",
"prog_width",
"=",
"int",
"(",
"self",
".",
"width",
"*",
"prog",
")",
"if",
"prog_width",
">",
"0",
":",
"bar",
"+=",
"(",
"'='",
"*",
"(",
"prog_width",
"-",
"1",
")",
")",
"if",
"current",
"<",
"self",
".",
"target",
":",
"bar",
"+=",
"'>'",
"else",
":",
"bar",
"+=",
"'='",
"bar",
"+=",
"(",
"'.'",
"*",
"(",
"self",
".",
"width",
"-",
"prog_width",
")",
")",
"bar",
"+=",
"']'",
"sys",
".",
"stdout",
".",
"write",
"(",
"bar",
")",
"self",
".",
"total_width",
"=",
"len",
"(",
"bar",
")",
"if",
"current",
":",
"time_per_unit",
"=",
"(",
"now",
"-",
"self",
".",
"start",
")",
"/",
"current",
"else",
":",
"time_per_unit",
"=",
"0",
"eta",
"=",
"time_per_unit",
"*",
"(",
"self",
".",
"target",
"-",
"current",
")",
"info",
"=",
"''",
"if",
"current",
"<",
"self",
".",
"target",
":",
"info",
"+=",
"' - ETA: %ds'",
"%",
"eta",
"else",
":",
"info",
"+=",
"' - %ds'",
"%",
"(",
"now",
"-",
"self",
".",
"start",
")",
"for",
"k",
"in",
"self",
".",
"unique_values",
":",
"if",
"type",
"(",
"self",
".",
"sum_values",
"[",
"k",
"]",
")",
"is",
"list",
":",
"info",
"+=",
"(",
"' - %s: %.'",
"+",
"str",
"(",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"2",
"]",
")",
"+",
"'f'",
")",
"%",
"(",
"k",
",",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"0",
"]",
"/",
"max",
"(",
"1",
",",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"1",
"]",
")",
")",
"else",
":",
"info",
"+=",
"' - %s: %s'",
"%",
"(",
"k",
",",
"self",
".",
"sum_values",
"[",
"k",
"]",
")",
"self",
".",
"total_width",
"+=",
"len",
"(",
"info",
")",
"if",
"prev_total_width",
">",
"self",
".",
"total_width",
":",
"info",
"+=",
"(",
"(",
"prev_total_width",
"-",
"self",
".",
"total_width",
")",
"*",
"\" \"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"info",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"current",
">=",
"self",
".",
"target",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
")",
"if",
"self",
".",
"verbose",
"==",
"2",
":",
"if",
"current",
">=",
"self",
".",
"target",
":",
"info",
"=",
"'%ds'",
"%",
"(",
"now",
"-",
"self",
".",
"start",
")",
"for",
"k",
"in",
"self",
".",
"unique_values",
":",
"info",
"+=",
"' - %s: %.4f'",
"%",
"(",
"k",
",",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"0",
"]",
"/",
"max",
"(",
"1",
",",
"self",
".",
"sum_values",
"[",
"k",
"]",
"[",
"1",
"]",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"info",
"+",
"\"\\n\"",
")"
] | Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly. | [
"Updates",
"the",
"progress",
"bar",
".",
"#",
"Arguments",
"current",
":",
"Index",
"of",
"current",
"step",
".",
"values",
":",
"List",
"of",
"tuples",
"(",
"name",
"value_for_last_step",
")",
".",
"The",
"progress",
"bar",
"will",
"display",
"averages",
"for",
"these",
"values",
".",
"exact",
":",
"List",
"of",
"tuples",
"(",
"name",
"value_for_last_step",
")",
".",
"The",
"progress",
"bar",
"will",
"display",
"these",
"values",
"directly",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/models/research/transformer_symshard.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_symshard.py#L227-L339 | def _layer_stack(mp,
inputs,
self_attention_bias,
layers,
hparams,
encoder_output=None,
encoder_decoder_attention_bias=None):
"""A stack of layers.
Args:
mp: a Parallelism object
inputs: a list of Tensors
self_attention_bias: list of bias Tensor for self-attention
(see common_attention.attention_bias())
layers: a string
hparams: hyperparameters for model
encoder_output: optional list of tensors
encoder_decoder_attention_bias: optional list of tensors
Returns:
y: a list of Tensors
"""
layers = layers.strip(",").split(",")
# scaled_dot_product_attention_with_projections uses a 3d attention bias
# (no heads), where multihead_attention uses 4d attention bias.
self_attention_bias_3d = mp(tf.squeeze, self_attention_bias, 1)
if encoder_decoder_attention_bias is not None:
encoder_decoder_attention_bias_3d = mp(
tf.squeeze, encoder_decoder_attention_bias, 1)
relu_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "relu_dropout_broadcast_dims", "")))
mix_size = int(hparams.mix_fraction * hparams.hidden_size)
accumulator = inputs
x = inputs
for layer_num, layer_type in enumerate(layers):
with tf.variable_scope("%s_%d" % (layer_type, layer_num)):
tf.logging.info("%s_%d" % (layer_type, layer_num))
if layer_type == "a":
# accumulate
accumulator = mp(tf.add, x, accumulator)
x = accumulator
elif layer_type == "n":
# normalize
x = mp(common_layers.apply_norm,
x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon)
elif layer_type == "d":
# dropout
x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)
elif layer_type == "m":
if mix_size > 0:
# mix across shards
def _split(t):
return tuple(tf.split(
t, [mix_size, hparams.hidden_size - mix_size], 2))
to_mix, to_keep = mp(_split, x)
mixed = expert_utils.all_reduce_ring(to_mix, mp)
mixed = mp(tf.multiply, mixed, mp.n ** -0.5)
x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep)
elif layer_type == "att":
# single-head attention
q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,
name="q_transform")
x = mp(
common_attention.scaled_dot_product_attention_simple,
q, x, x, self_attention_bias_3d)
x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,
name="o_transform")
elif layer_type == "enc-att":
# single-head attention over encoder
q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,
name="q_transform")
assert encoder_output is not None
x = mp(
common_attention.scaled_dot_product_attention_simple,
q, encoder_output, encoder_output,
encoder_decoder_attention_bias_3d)
x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False,
name="o_transform")
elif layer_type == "multihead-att":
# multi-head attention
x = mp(
common_attention.multihead_attention,
x,
None,
self_attention_bias, # bias
hparams.multihead_attention_key_channels or hparams.hidden_size,
hparams.multihead_attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.multihead_attention_num_heads,
hparams.attention_dropout)
elif layer_type == "enc-multihead-att":
# multi-head attention
x = mp(
common_attention.multihead_attention,
x,
encoder_output,
encoder_decoder_attention_bias, # bias
hparams.multihead_attention_key_channels or hparams.hidden_size,
hparams.multihead_attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.multihead_attention_num_heads,
hparams.attention_dropout)
elif layer_type == "ffn":
x = mp(
common_layers.dense_relu_dense, x,
hparams.filter_size, hparams.hidden_size,
dropout=hparams.relu_dropout,
dropout_broadcast_dims=[relu_dropout_broadcast_dims] * mp.n)
else:
assert False, "unknown sublayer %s" % layer_type
return x | [
"def",
"_layer_stack",
"(",
"mp",
",",
"inputs",
",",
"self_attention_bias",
",",
"layers",
",",
"hparams",
",",
"encoder_output",
"=",
"None",
",",
"encoder_decoder_attention_bias",
"=",
"None",
")",
":",
"layers",
"=",
"layers",
".",
"strip",
"(",
"\",\"",
")",
".",
"split",
"(",
"\",\"",
")",
"# scaled_dot_product_attention_with_projections uses a 3d attention bias",
"# (no heads), where multihead_attention uses 4d attention bias.",
"self_attention_bias_3d",
"=",
"mp",
"(",
"tf",
".",
"squeeze",
",",
"self_attention_bias",
",",
"1",
")",
"if",
"encoder_decoder_attention_bias",
"is",
"not",
"None",
":",
"encoder_decoder_attention_bias_3d",
"=",
"mp",
"(",
"tf",
".",
"squeeze",
",",
"encoder_decoder_attention_bias",
",",
"1",
")",
"relu_dropout_broadcast_dims",
"=",
"(",
"common_layers",
".",
"comma_separated_string_to_integer_list",
"(",
"getattr",
"(",
"hparams",
",",
"\"relu_dropout_broadcast_dims\"",
",",
"\"\"",
")",
")",
")",
"mix_size",
"=",
"int",
"(",
"hparams",
".",
"mix_fraction",
"*",
"hparams",
".",
"hidden_size",
")",
"accumulator",
"=",
"inputs",
"x",
"=",
"inputs",
"for",
"layer_num",
",",
"layer_type",
"in",
"enumerate",
"(",
"layers",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"%s_%d\"",
"%",
"(",
"layer_type",
",",
"layer_num",
")",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"%s_%d\"",
"%",
"(",
"layer_type",
",",
"layer_num",
")",
")",
"if",
"layer_type",
"==",
"\"a\"",
":",
"# accumulate",
"accumulator",
"=",
"mp",
"(",
"tf",
".",
"add",
",",
"x",
",",
"accumulator",
")",
"x",
"=",
"accumulator",
"elif",
"layer_type",
"==",
"\"n\"",
":",
"# normalize",
"x",
"=",
"mp",
"(",
"common_layers",
".",
"apply_norm",
",",
"x",
",",
"hparams",
".",
"norm_type",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"norm_epsilon",
")",
"elif",
"layer_type",
"==",
"\"d\"",
":",
"# dropout",
"x",
"=",
"mp",
"(",
"tf",
".",
"nn",
".",
"dropout",
",",
"x",
",",
"1.0",
"-",
"hparams",
".",
"layer_prepostprocess_dropout",
")",
"elif",
"layer_type",
"==",
"\"m\"",
":",
"if",
"mix_size",
">",
"0",
":",
"# mix across shards",
"def",
"_split",
"(",
"t",
")",
":",
"return",
"tuple",
"(",
"tf",
".",
"split",
"(",
"t",
",",
"[",
"mix_size",
",",
"hparams",
".",
"hidden_size",
"-",
"mix_size",
"]",
",",
"2",
")",
")",
"to_mix",
",",
"to_keep",
"=",
"mp",
"(",
"_split",
",",
"x",
")",
"mixed",
"=",
"expert_utils",
".",
"all_reduce_ring",
"(",
"to_mix",
",",
"mp",
")",
"mixed",
"=",
"mp",
"(",
"tf",
".",
"multiply",
",",
"mixed",
",",
"mp",
".",
"n",
"**",
"-",
"0.5",
")",
"x",
"=",
"mp",
"(",
"lambda",
"a",
",",
"b",
":",
"tf",
".",
"concat",
"(",
"[",
"a",
",",
"b",
"]",
",",
"2",
")",
",",
"mixed",
",",
"to_keep",
")",
"elif",
"layer_type",
"==",
"\"att\"",
":",
"# single-head attention",
"q",
"=",
"mp",
"(",
"tf",
".",
"layers",
".",
"dense",
",",
"x",
",",
"hparams",
".",
"hidden_size",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"q_transform\"",
")",
"x",
"=",
"mp",
"(",
"common_attention",
".",
"scaled_dot_product_attention_simple",
",",
"q",
",",
"x",
",",
"x",
",",
"self_attention_bias_3d",
")",
"x",
"=",
"mp",
"(",
"tf",
".",
"layers",
".",
"dense",
",",
"x",
",",
"hparams",
".",
"hidden_size",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"o_transform\"",
")",
"elif",
"layer_type",
"==",
"\"enc-att\"",
":",
"# single-head attention over encoder",
"q",
"=",
"mp",
"(",
"tf",
".",
"layers",
".",
"dense",
",",
"x",
",",
"hparams",
".",
"hidden_size",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"q_transform\"",
")",
"assert",
"encoder_output",
"is",
"not",
"None",
"x",
"=",
"mp",
"(",
"common_attention",
".",
"scaled_dot_product_attention_simple",
",",
"q",
",",
"encoder_output",
",",
"encoder_output",
",",
"encoder_decoder_attention_bias_3d",
")",
"x",
"=",
"mp",
"(",
"tf",
".",
"layers",
".",
"dense",
",",
"x",
",",
"hparams",
".",
"hidden_size",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"o_transform\"",
")",
"elif",
"layer_type",
"==",
"\"multihead-att\"",
":",
"# multi-head attention",
"x",
"=",
"mp",
"(",
"common_attention",
".",
"multihead_attention",
",",
"x",
",",
"None",
",",
"self_attention_bias",
",",
"# bias",
"hparams",
".",
"multihead_attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"multihead_attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"multihead_attention_num_heads",
",",
"hparams",
".",
"attention_dropout",
")",
"elif",
"layer_type",
"==",
"\"enc-multihead-att\"",
":",
"# multi-head attention",
"x",
"=",
"mp",
"(",
"common_attention",
".",
"multihead_attention",
",",
"x",
",",
"encoder_output",
",",
"encoder_decoder_attention_bias",
",",
"# bias",
"hparams",
".",
"multihead_attention_key_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"multihead_attention_value_channels",
"or",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"multihead_attention_num_heads",
",",
"hparams",
".",
"attention_dropout",
")",
"elif",
"layer_type",
"==",
"\"ffn\"",
":",
"x",
"=",
"mp",
"(",
"common_layers",
".",
"dense_relu_dense",
",",
"x",
",",
"hparams",
".",
"filter_size",
",",
"hparams",
".",
"hidden_size",
",",
"dropout",
"=",
"hparams",
".",
"relu_dropout",
",",
"dropout_broadcast_dims",
"=",
"[",
"relu_dropout_broadcast_dims",
"]",
"*",
"mp",
".",
"n",
")",
"else",
":",
"assert",
"False",
",",
"\"unknown sublayer %s\"",
"%",
"layer_type",
"return",
"x"
] | A stack of layers.
Args:
mp: a Parallelism object
inputs: a list of Tensors
self_attention_bias: list of bias Tensor for self-attention
(see common_attention.attention_bias())
layers: a string
hparams: hyperparameters for model
encoder_output: optional list of tensors
encoder_decoder_attention_bias: optional list of tensors
Returns:
y: a list of Tensors | [
"A",
"stack",
"of",
"layers",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1463-L1485 | def change_normal_host_check_interval(self, host, check_interval):
"""Modify host check interval
Format of the line that triggers function call::
CHANGE_NORMAL_HOST_CHECK_INTERVAL;<host_name>;<check_interval>
:param host: host to edit
:type host: alignak.objects.host.Host
:param check_interval: new value to set
:type check_interval:
:return: None
"""
host.modified_attributes |= DICT_MODATTR["MODATTR_NORMAL_CHECK_INTERVAL"].value
old_interval = host.check_interval
host.check_interval = check_interval
# If there were no regular checks (interval=0), then schedule
# a check immediately.
if old_interval == 0 and host.checks_enabled:
host.schedule(self.daemon.hosts, self.daemon.services,
self.daemon.timeperiods, self.daemon.macromodulations,
self.daemon.checkmodulations, self.daemon.checks,
force=False, force_time=int(time.time()))
self.send_an_element(host.get_update_status_brok()) | [
"def",
"change_normal_host_check_interval",
"(",
"self",
",",
"host",
",",
"check_interval",
")",
":",
"host",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_NORMAL_CHECK_INTERVAL\"",
"]",
".",
"value",
"old_interval",
"=",
"host",
".",
"check_interval",
"host",
".",
"check_interval",
"=",
"check_interval",
"# If there were no regular checks (interval=0), then schedule",
"# a check immediately.",
"if",
"old_interval",
"==",
"0",
"and",
"host",
".",
"checks_enabled",
":",
"host",
".",
"schedule",
"(",
"self",
".",
"daemon",
".",
"hosts",
",",
"self",
".",
"daemon",
".",
"services",
",",
"self",
".",
"daemon",
".",
"timeperiods",
",",
"self",
".",
"daemon",
".",
"macromodulations",
",",
"self",
".",
"daemon",
".",
"checkmodulations",
",",
"self",
".",
"daemon",
".",
"checks",
",",
"force",
"=",
"False",
",",
"force_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"self",
".",
"send_an_element",
"(",
"host",
".",
"get_update_status_brok",
"(",
")",
")"
] | Modify host check interval
Format of the line that triggers function call::
CHANGE_NORMAL_HOST_CHECK_INTERVAL;<host_name>;<check_interval>
:param host: host to edit
:type host: alignak.objects.host.Host
:param check_interval: new value to set
:type check_interval:
:return: None | [
"Modify",
"host",
"check",
"interval",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/annotation.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/annotation.py#L17-L46 | def get_gatk_annotations(config, include_depth=True, include_baseqranksum=True,
gatk_input=True):
"""Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
BaseQRankSum can cause issues with some MuTect2 and other runs, so we
provide option to skip it.
"""
broad_runner = broad.runner_from_config(config)
anns = ["MappingQualityRankSumTest", "MappingQualityZero",
"QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"]
if include_baseqranksum:
anns += ["BaseQualityRankSumTest"]
# Some annotations not working correctly with external datasets and GATK 3
if gatk_input or broad_runner.gatk_type() == "gatk4":
anns += ["FisherStrand"]
if broad_runner.gatk_type() == "gatk4":
anns += ["MappingQuality"]
else:
anns += ["GCContent", "HaplotypeScore", "HomopolymerRun"]
if include_depth:
anns += ["DepthPerAlleleBySample"]
if broad_runner.gatk_type() in ["restricted", "gatk4"]:
anns += ["Coverage"]
else:
anns += ["DepthOfCoverage"]
return anns | [
"def",
"get_gatk_annotations",
"(",
"config",
",",
"include_depth",
"=",
"True",
",",
"include_baseqranksum",
"=",
"True",
",",
"gatk_input",
"=",
"True",
")",
":",
"broad_runner",
"=",
"broad",
".",
"runner_from_config",
"(",
"config",
")",
"anns",
"=",
"[",
"\"MappingQualityRankSumTest\"",
",",
"\"MappingQualityZero\"",
",",
"\"QualByDepth\"",
",",
"\"ReadPosRankSumTest\"",
",",
"\"RMSMappingQuality\"",
"]",
"if",
"include_baseqranksum",
":",
"anns",
"+=",
"[",
"\"BaseQualityRankSumTest\"",
"]",
"# Some annotations not working correctly with external datasets and GATK 3",
"if",
"gatk_input",
"or",
"broad_runner",
".",
"gatk_type",
"(",
")",
"==",
"\"gatk4\"",
":",
"anns",
"+=",
"[",
"\"FisherStrand\"",
"]",
"if",
"broad_runner",
".",
"gatk_type",
"(",
")",
"==",
"\"gatk4\"",
":",
"anns",
"+=",
"[",
"\"MappingQuality\"",
"]",
"else",
":",
"anns",
"+=",
"[",
"\"GCContent\"",
",",
"\"HaplotypeScore\"",
",",
"\"HomopolymerRun\"",
"]",
"if",
"include_depth",
":",
"anns",
"+=",
"[",
"\"DepthPerAlleleBySample\"",
"]",
"if",
"broad_runner",
".",
"gatk_type",
"(",
")",
"in",
"[",
"\"restricted\"",
",",
"\"gatk4\"",
"]",
":",
"anns",
"+=",
"[",
"\"Coverage\"",
"]",
"else",
":",
"anns",
"+=",
"[",
"\"DepthOfCoverage\"",
"]",
"return",
"anns"
] | Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
BaseQRankSum can cause issues with some MuTect2 and other runs, so we
provide option to skip it. | [
"Retrieve",
"annotations",
"to",
"use",
"for",
"GATK",
"VariantAnnotator",
"."
] | python | train |
nutechsoftware/alarmdecoder | alarmdecoder/messages/lrr/message.py | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/messages/lrr/message.py#L59-L94 | def _parse_message(self, data):
"""
Parses the raw message from the device.
:param data: message data to parse
:type data: string
:raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
"""
try:
_, values = data.split(':')
values = values.split(',')
# Handle older-format events
if len(values) <= 3:
self.event_data, self.partition, self.event_type = values
self.version = 1
# Newer-format events
else:
self.event_data, self.partition, self.event_type, self.report_code = values
self.version = 2
event_type_data = self.event_type.split('_')
self.event_prefix = event_type_data[0] # Ex: CID
self.event_source = get_event_source(self.event_prefix) # Ex: LRR_EVENT_TYPE.CID
self.event_status = int(event_type_data[1][0]) # Ex: 1 or 3
self.event_code = int(event_type_data[1][1:], 16) # Ex: 0x100 = Medical
# replace last 2 digits of event_code with report_code, if applicable.
if not self.skip_report_override and self.report_code not in ['00', 'ff']:
self.event_code = int(event_type_data[1][1] + self.report_code, 16)
self.event_description = get_event_description(self.event_source, self.event_code)
except ValueError:
raise InvalidMessageError('Received invalid message: {0}'.format(data)) | [
"def",
"_parse_message",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"_",
",",
"values",
"=",
"data",
".",
"split",
"(",
"':'",
")",
"values",
"=",
"values",
".",
"split",
"(",
"','",
")",
"# Handle older-format events",
"if",
"len",
"(",
"values",
")",
"<=",
"3",
":",
"self",
".",
"event_data",
",",
"self",
".",
"partition",
",",
"self",
".",
"event_type",
"=",
"values",
"self",
".",
"version",
"=",
"1",
"# Newer-format events",
"else",
":",
"self",
".",
"event_data",
",",
"self",
".",
"partition",
",",
"self",
".",
"event_type",
",",
"self",
".",
"report_code",
"=",
"values",
"self",
".",
"version",
"=",
"2",
"event_type_data",
"=",
"self",
".",
"event_type",
".",
"split",
"(",
"'_'",
")",
"self",
".",
"event_prefix",
"=",
"event_type_data",
"[",
"0",
"]",
"# Ex: CID",
"self",
".",
"event_source",
"=",
"get_event_source",
"(",
"self",
".",
"event_prefix",
")",
"# Ex: LRR_EVENT_TYPE.CID",
"self",
".",
"event_status",
"=",
"int",
"(",
"event_type_data",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"# Ex: 1 or 3",
"self",
".",
"event_code",
"=",
"int",
"(",
"event_type_data",
"[",
"1",
"]",
"[",
"1",
":",
"]",
",",
"16",
")",
"# Ex: 0x100 = Medical",
"# replace last 2 digits of event_code with report_code, if applicable.",
"if",
"not",
"self",
".",
"skip_report_override",
"and",
"self",
".",
"report_code",
"not",
"in",
"[",
"'00'",
",",
"'ff'",
"]",
":",
"self",
".",
"event_code",
"=",
"int",
"(",
"event_type_data",
"[",
"1",
"]",
"[",
"1",
"]",
"+",
"self",
".",
"report_code",
",",
"16",
")",
"self",
".",
"event_description",
"=",
"get_event_description",
"(",
"self",
".",
"event_source",
",",
"self",
".",
"event_code",
")",
"except",
"ValueError",
":",
"raise",
"InvalidMessageError",
"(",
"'Received invalid message: {0}'",
".",
"format",
"(",
"data",
")",
")"
] | Parses the raw message from the device.
:param data: message data to parse
:type data: string
:raises: :py:class:`~alarmdecoder.util.InvalidMessageError` | [
"Parses",
"the",
"raw",
"message",
"from",
"the",
"device",
"."
] | python | train |
rigetti/pyquil | pyquil/gates.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/gates.py#L290-L304 | def CPHASE00(angle, control, target):
"""Produces a controlled-phase gate that phases the ``|00>`` state::
CPHASE00(phi) = diag([exp(1j * phi), 1, 1, 1])
This gate applies to two qubit arguments to produce the variant of the controlled phase
instruction that affects the state 00.
:param angle: The input phase angle to apply when both qubits are in the ``|0>`` state.
:param control: Qubit 1.
:param target: Qubit 2.
:returns: A Gate object.
"""
qubits = [unpack_qubit(q) for q in (control, target)]
return Gate(name="CPHASE00", params=[angle], qubits=qubits) | [
"def",
"CPHASE00",
"(",
"angle",
",",
"control",
",",
"target",
")",
":",
"qubits",
"=",
"[",
"unpack_qubit",
"(",
"q",
")",
"for",
"q",
"in",
"(",
"control",
",",
"target",
")",
"]",
"return",
"Gate",
"(",
"name",
"=",
"\"CPHASE00\"",
",",
"params",
"=",
"[",
"angle",
"]",
",",
"qubits",
"=",
"qubits",
")"
] | Produces a controlled-phase gate that phases the ``|00>`` state::
CPHASE00(phi) = diag([exp(1j * phi), 1, 1, 1])
This gate applies to two qubit arguments to produce the variant of the controlled phase
instruction that affects the state 00.
:param angle: The input phase angle to apply when both qubits are in the ``|0>`` state.
:param control: Qubit 1.
:param target: Qubit 2.
:returns: A Gate object. | [
"Produces",
"a",
"controlled",
"-",
"phase",
"gate",
"that",
"phases",
"the",
"|00",
">",
"state",
"::"
] | python | train |
ggravlingen/pytradfri | examples/debug_info.py | https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/examples/debug_info.py#L115-L125 | def print_lamps():
"""Print all lamp devices as JSON"""
print("Printing information about all lamps paired to the Gateway")
lights = [dev for dev in devices if dev.has_light_control]
if len(lights) == 0:
exit(bold("No lamps paired"))
container = []
for l in lights:
container.append(l.raw)
print(jsonify(container)) | [
"def",
"print_lamps",
"(",
")",
":",
"print",
"(",
"\"Printing information about all lamps paired to the Gateway\"",
")",
"lights",
"=",
"[",
"dev",
"for",
"dev",
"in",
"devices",
"if",
"dev",
".",
"has_light_control",
"]",
"if",
"len",
"(",
"lights",
")",
"==",
"0",
":",
"exit",
"(",
"bold",
"(",
"\"No lamps paired\"",
")",
")",
"container",
"=",
"[",
"]",
"for",
"l",
"in",
"lights",
":",
"container",
".",
"append",
"(",
"l",
".",
"raw",
")",
"print",
"(",
"jsonify",
"(",
"container",
")",
")"
] | Print all lamp devices as JSON | [
"Print",
"all",
"lamp",
"devices",
"as",
"JSON"
] | python | train |
jobovy/galpy | galpy/orbit/Orbit.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/Orbit.py#L2287-L2319 | def ra(self,*args,**kwargs):
"""
NAME:
ra
PURPOSE:
return the right ascension
INPUT:
t - (optional) time at which to get ra (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
(default=Object-wide default; can be Quantity)
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
ra(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU)
"""
out= self._orb.ra(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | [
"def",
"ra",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"self",
".",
"_orb",
".",
"ra",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"out",
")",
"==",
"1",
":",
"return",
"out",
"[",
"0",
"]",
"else",
":",
"return",
"out"
] | NAME:
ra
PURPOSE:
return the right ascension
INPUT:
t - (optional) time at which to get ra (can be Quantity)
obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity)
(default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer
(default=Object-wide default; can be Quantity)
Y is ignored and always assumed to be zero
ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity)
OUTPUT:
ra(t) in deg
HISTORY:
2011-02-23 - Written - Bovy (NYU) | [
"NAME",
":"
] | python | train |
dopefishh/pympi | pympi/Praat.py | https://github.com/dopefishh/pympi/blob/79c747cde45b5ba203ed93154d8c123ac9c3ef56/pympi/Praat.py#L401-L408 | def get_intervals(self, sort=False):
"""Give all the intervals or points.
:param bool sort: Flag for yielding the intervals or points sorted.
:yields: All the intervals
"""
for i in sorted(self.intervals) if sort else self.intervals:
yield i | [
"def",
"get_intervals",
"(",
"self",
",",
"sort",
"=",
"False",
")",
":",
"for",
"i",
"in",
"sorted",
"(",
"self",
".",
"intervals",
")",
"if",
"sort",
"else",
"self",
".",
"intervals",
":",
"yield",
"i"
] | Give all the intervals or points.
:param bool sort: Flag for yielding the intervals or points sorted.
:yields: All the intervals | [
"Give",
"all",
"the",
"intervals",
"or",
"points",
"."
] | python | test |
doconix/django-mako-plus | django_mako_plus/provider/__init__.py | https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/provider/__init__.py#L35-L55 | def template_obj_links(request, template_obj, context=None, group=None):
'''
Returns the HTML for the given provider group, using a template object.
This method should not normally be used (use links() instead). The use of
this method is when provider need to be called from regular python code instead
of from within a rendering template environment.
'''
# the template_obj can be a MakoTemplateAdapter or a Mako Template
# if our DMP-defined MakoTemplateAdapter, switch to the embedded Mako Template
template_obj = getattr(template_obj, 'mako_template', template_obj)
# create a mako context so it seems like we are inside a render
context_dict = {
'request': request,
}
if isinstance(context, Context):
for d in context:
context_dict.update(d)
elif context is not None:
context_dict.update(context)
mako_context = create_mako_context(template_obj, **context_dict)
return links(mako_context['self'], group=group) | [
"def",
"template_obj_links",
"(",
"request",
",",
"template_obj",
",",
"context",
"=",
"None",
",",
"group",
"=",
"None",
")",
":",
"# the template_obj can be a MakoTemplateAdapter or a Mako Template",
"# if our DMP-defined MakoTemplateAdapter, switch to the embedded Mako Template",
"template_obj",
"=",
"getattr",
"(",
"template_obj",
",",
"'mako_template'",
",",
"template_obj",
")",
"# create a mako context so it seems like we are inside a render",
"context_dict",
"=",
"{",
"'request'",
":",
"request",
",",
"}",
"if",
"isinstance",
"(",
"context",
",",
"Context",
")",
":",
"for",
"d",
"in",
"context",
":",
"context_dict",
".",
"update",
"(",
"d",
")",
"elif",
"context",
"is",
"not",
"None",
":",
"context_dict",
".",
"update",
"(",
"context",
")",
"mako_context",
"=",
"create_mako_context",
"(",
"template_obj",
",",
"*",
"*",
"context_dict",
")",
"return",
"links",
"(",
"mako_context",
"[",
"'self'",
"]",
",",
"group",
"=",
"group",
")"
] | Returns the HTML for the given provider group, using a template object.
This method should not normally be used (use links() instead). The use of
this method is when provider need to be called from regular python code instead
of from within a rendering template environment. | [
"Returns",
"the",
"HTML",
"for",
"the",
"given",
"provider",
"group",
"using",
"a",
"template",
"object",
".",
"This",
"method",
"should",
"not",
"normally",
"be",
"used",
"(",
"use",
"links",
"()",
"instead",
")",
".",
"The",
"use",
"of",
"this",
"method",
"is",
"when",
"provider",
"need",
"to",
"be",
"called",
"from",
"regular",
"python",
"code",
"instead",
"of",
"from",
"within",
"a",
"rendering",
"template",
"environment",
"."
] | python | train |
joshspeagle/dynesty | dynesty/sampler.py | https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/sampler.py#L482-L759 | def sample(self, maxiter=None, maxcall=None, dlogz=0.01,
logl_max=np.inf, save_bounds=True, save_samples=True):
"""
**The main nested sampling loop.** Iteratively replace the worst live
point with a sample drawn uniformly from the prior until the
provided stopping criteria are reached. Instantiates a generator
that will be called by the user.
Parameters
----------
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. Default is `0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
save_bounds : bool, optional
Whether or not to save past distributions used to bound
the live points internally. Default is `True`.
save_samples : bool, optional
Whether or not to save past samples from the nested sampling run
(along with other ancillary quantities) internally.
Default is `True`.
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample.
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
logvol : float
Ln(prior volume) within the sample.
logwt : float
Ln(weight) of the sample.
logz : float
Cumulative ln(evidence) up to the sample (inclusive).
logzvar : float
Estimated cumulative variance on `logz` (inclusive).
h : float
Cumulative information up to the sample (inclusive).
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
delta_logz : float
The estimated remaining evidence expressed as the ln(ratio) of the
current evidence.
"""
# Initialize quantities.
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
self.save_samples = save_samples
self.save_bounds = save_bounds
ncall = 0
# Check whether we're starting fresh or continuing a previous run.
if self.it == 1:
# Initialize values for nested sampling loop.
h = 0. # information, initially *0.*
logz = -1.e300 # ln(evidence), initially *0.*
logzvar = 0. # var[ln(evidence)], initially *0.*
logvol = 0. # initially contains the whole prior (volume=1.)
loglstar = -1.e300 # initial ln(likelihood)
delta_logz = 1.e300 # ln(ratio) of total/current evidence
# Check if we should initialize a different bounding distribution
# instead of using the unit cube.
pointvol = 1. / self.nlive
if self._beyond_unit_bound(loglstar):
bound = self.update(pointvol)
if self.save_bounds:
self.bound.append(bound)
self.nbound += 1
self.since_update = 0
else:
# Remove live points (if added) from previous run.
if self.added_live:
self._remove_live_points()
# Get final state from previous run.
h = self.saved_h[-1] # information
logz = self.saved_logz[-1] # ln(evidence)
logzvar = self.saved_logzvar[-1] # var[ln(evidence)]
logvol = self.saved_logvol[-1] # ln(volume)
loglstar = min(self.live_logl) # ln(likelihood)
delta_logz = np.logaddexp(logz, np.max(self.live_logl) +
logvol) - logz # log-evidence ratio
# The main nested sampling loop.
for it in range(sys.maxsize):
# Stopping criterion 1: current number of iterations
# exceeds `maxiter`.
if it > maxiter:
# If dumping past states, save only the required quantities.
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 2: current number of `loglikelihood`
# calls exceeds `maxcall`.
if ncall > maxcall:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 3: estimated (fractional) remaining evidence
# lies below some threshold set by `dlogz`.
logz_remain = np.max(self.live_logl) + logvol
delta_logz = np.logaddexp(logz, logz_remain) - logz
if dlogz is not None:
if delta_logz < dlogz:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Stopping criterion 4: last dead point exceeded the upper
# `logl_max` bound.
if loglstar > logl_max:
if not self.save_samples:
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_logvol.append(logvol)
self.saved_logl.append(loglstar)
break
# Expected ln(volume) shrinkage.
logvol -= self.dlv
# After `update_interval` interations have passed *and* we meet
# the criteria for moving beyond sampling from the unit cube,
# update the bound using the current set of live points.
ucheck = self.since_update >= self.update_interval
bcheck = self._beyond_unit_bound(loglstar)
if ucheck and bcheck:
pointvol = math.exp(logvol) / self.nlive
bound = self.update(pointvol)
if self.save_bounds:
self.bound.append(bound)
self.nbound += 1
self.since_update = 0
# Locate the "live" point with the lowest `logl`.
worst = np.argmin(self.live_logl) # index
worst_it = self.live_it[worst] # when point was proposed
boundidx = self.live_bound[worst] # associated bound index
# Set our new worst likelihood constraint.
ustar = np.array(self.live_u[worst]) # unit cube position
vstar = np.array(self.live_v[worst]) # transformed position
loglstar_new = self.live_logl[worst] # new likelihood
# Set our new weight using quadratic estimates (trapezoid rule).
logdvol = logsumexp(a=[logvol + self.dlv, logvol],
b=[0.5, -0.5]) # ln(dvol)
logwt = np.logaddexp(loglstar_new, loglstar) + logdvol # ln(wt)
# Sample a new live point from within the likelihood constraint
# `logl > loglstar` using the bounding distribution and sampling
# method from our sampler.
u, v, logl, nc = self._new_point(loglstar_new, logvol)
ncall += nc
self.ncall += nc
self.since_update += nc
# Update evidence `logz` and information `h`.
logz_new = np.logaddexp(logz, logwt)
lzterm = (math.exp(loglstar - logz_new) * loglstar +
math.exp(loglstar_new - logz_new) * loglstar_new)
h_new = (math.exp(logdvol) * lzterm +
math.exp(logz - logz_new) * (h + logz) -
logz_new)
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * self.dlv
loglstar = loglstar_new
# Compute bound index at the current iteration.
if self._beyond_unit_bound(loglstar):
bounditer = self.nbound - 1
else:
bounditer = 0
# Save the worst live point. It is now a "dead" point.
if self.save_samples:
self.saved_id.append(worst)
self.saved_u.append(ustar)
self.saved_v.append(vstar)
self.saved_logl.append(loglstar)
self.saved_logvol.append(logvol)
self.saved_logwt.append(logwt)
self.saved_logz.append(logz)
self.saved_logzvar.append(logzvar)
self.saved_h.append(h)
self.saved_nc.append(nc)
self.saved_boundidx.append(boundidx)
self.saved_it.append(worst_it)
self.saved_bounditer.append(bounditer)
self.saved_scale.append(self.scale)
# Update the live point (previously our "worst" point).
self.live_u[worst] = u
self.live_v[worst] = v
self.live_logl[worst] = logl
self.live_bound[worst] = bounditer
self.live_it[worst] = self.it
# Compute our sampling efficiency.
self.eff = 100. * self.it / self.ncall
# Increment total number of iterations.
self.it += 1
# Return dead point and ancillary quantities.
yield (worst, ustar, vstar, loglstar, logvol, logwt,
logz, logzvar, h, nc, worst_it, boundidx, bounditer,
self.eff, delta_logz) | [
"def",
"sample",
"(",
"self",
",",
"maxiter",
"=",
"None",
",",
"maxcall",
"=",
"None",
",",
"dlogz",
"=",
"0.01",
",",
"logl_max",
"=",
"np",
".",
"inf",
",",
"save_bounds",
"=",
"True",
",",
"save_samples",
"=",
"True",
")",
":",
"# Initialize quantities.",
"if",
"maxcall",
"is",
"None",
":",
"maxcall",
"=",
"sys",
".",
"maxsize",
"if",
"maxiter",
"is",
"None",
":",
"maxiter",
"=",
"sys",
".",
"maxsize",
"self",
".",
"save_samples",
"=",
"save_samples",
"self",
".",
"save_bounds",
"=",
"save_bounds",
"ncall",
"=",
"0",
"# Check whether we're starting fresh or continuing a previous run.",
"if",
"self",
".",
"it",
"==",
"1",
":",
"# Initialize values for nested sampling loop.",
"h",
"=",
"0.",
"# information, initially *0.*",
"logz",
"=",
"-",
"1.e300",
"# ln(evidence), initially *0.*",
"logzvar",
"=",
"0.",
"# var[ln(evidence)], initially *0.*",
"logvol",
"=",
"0.",
"# initially contains the whole prior (volume=1.)",
"loglstar",
"=",
"-",
"1.e300",
"# initial ln(likelihood)",
"delta_logz",
"=",
"1.e300",
"# ln(ratio) of total/current evidence",
"# Check if we should initialize a different bounding distribution",
"# instead of using the unit cube.",
"pointvol",
"=",
"1.",
"/",
"self",
".",
"nlive",
"if",
"self",
".",
"_beyond_unit_bound",
"(",
"loglstar",
")",
":",
"bound",
"=",
"self",
".",
"update",
"(",
"pointvol",
")",
"if",
"self",
".",
"save_bounds",
":",
"self",
".",
"bound",
".",
"append",
"(",
"bound",
")",
"self",
".",
"nbound",
"+=",
"1",
"self",
".",
"since_update",
"=",
"0",
"else",
":",
"# Remove live points (if added) from previous run.",
"if",
"self",
".",
"added_live",
":",
"self",
".",
"_remove_live_points",
"(",
")",
"# Get final state from previous run.",
"h",
"=",
"self",
".",
"saved_h",
"[",
"-",
"1",
"]",
"# information",
"logz",
"=",
"self",
".",
"saved_logz",
"[",
"-",
"1",
"]",
"# ln(evidence)",
"logzvar",
"=",
"self",
".",
"saved_logzvar",
"[",
"-",
"1",
"]",
"# var[ln(evidence)]",
"logvol",
"=",
"self",
".",
"saved_logvol",
"[",
"-",
"1",
"]",
"# ln(volume)",
"loglstar",
"=",
"min",
"(",
"self",
".",
"live_logl",
")",
"# ln(likelihood)",
"delta_logz",
"=",
"np",
".",
"logaddexp",
"(",
"logz",
",",
"np",
".",
"max",
"(",
"self",
".",
"live_logl",
")",
"+",
"logvol",
")",
"-",
"logz",
"# log-evidence ratio",
"# The main nested sampling loop.",
"for",
"it",
"in",
"range",
"(",
"sys",
".",
"maxsize",
")",
":",
"# Stopping criterion 1: current number of iterations",
"# exceeds `maxiter`.",
"if",
"it",
">",
"maxiter",
":",
"# If dumping past states, save only the required quantities.",
"if",
"not",
"self",
".",
"save_samples",
":",
"self",
".",
"saved_logz",
".",
"append",
"(",
"logz",
")",
"self",
".",
"saved_logzvar",
".",
"append",
"(",
"logzvar",
")",
"self",
".",
"saved_h",
".",
"append",
"(",
"h",
")",
"self",
".",
"saved_logvol",
".",
"append",
"(",
"logvol",
")",
"self",
".",
"saved_logl",
".",
"append",
"(",
"loglstar",
")",
"break",
"# Stopping criterion 2: current number of `loglikelihood`",
"# calls exceeds `maxcall`.",
"if",
"ncall",
">",
"maxcall",
":",
"if",
"not",
"self",
".",
"save_samples",
":",
"self",
".",
"saved_logz",
".",
"append",
"(",
"logz",
")",
"self",
".",
"saved_logzvar",
".",
"append",
"(",
"logzvar",
")",
"self",
".",
"saved_h",
".",
"append",
"(",
"h",
")",
"self",
".",
"saved_logvol",
".",
"append",
"(",
"logvol",
")",
"self",
".",
"saved_logl",
".",
"append",
"(",
"loglstar",
")",
"break",
"# Stopping criterion 3: estimated (fractional) remaining evidence",
"# lies below some threshold set by `dlogz`.",
"logz_remain",
"=",
"np",
".",
"max",
"(",
"self",
".",
"live_logl",
")",
"+",
"logvol",
"delta_logz",
"=",
"np",
".",
"logaddexp",
"(",
"logz",
",",
"logz_remain",
")",
"-",
"logz",
"if",
"dlogz",
"is",
"not",
"None",
":",
"if",
"delta_logz",
"<",
"dlogz",
":",
"if",
"not",
"self",
".",
"save_samples",
":",
"self",
".",
"saved_logz",
".",
"append",
"(",
"logz",
")",
"self",
".",
"saved_logzvar",
".",
"append",
"(",
"logzvar",
")",
"self",
".",
"saved_h",
".",
"append",
"(",
"h",
")",
"self",
".",
"saved_logvol",
".",
"append",
"(",
"logvol",
")",
"self",
".",
"saved_logl",
".",
"append",
"(",
"loglstar",
")",
"break",
"# Stopping criterion 4: last dead point exceeded the upper",
"# `logl_max` bound.",
"if",
"loglstar",
">",
"logl_max",
":",
"if",
"not",
"self",
".",
"save_samples",
":",
"self",
".",
"saved_logz",
".",
"append",
"(",
"logz",
")",
"self",
".",
"saved_logzvar",
".",
"append",
"(",
"logzvar",
")",
"self",
".",
"saved_h",
".",
"append",
"(",
"h",
")",
"self",
".",
"saved_logvol",
".",
"append",
"(",
"logvol",
")",
"self",
".",
"saved_logl",
".",
"append",
"(",
"loglstar",
")",
"break",
"# Expected ln(volume) shrinkage.",
"logvol",
"-=",
"self",
".",
"dlv",
"# After `update_interval` interations have passed *and* we meet",
"# the criteria for moving beyond sampling from the unit cube,",
"# update the bound using the current set of live points.",
"ucheck",
"=",
"self",
".",
"since_update",
">=",
"self",
".",
"update_interval",
"bcheck",
"=",
"self",
".",
"_beyond_unit_bound",
"(",
"loglstar",
")",
"if",
"ucheck",
"and",
"bcheck",
":",
"pointvol",
"=",
"math",
".",
"exp",
"(",
"logvol",
")",
"/",
"self",
".",
"nlive",
"bound",
"=",
"self",
".",
"update",
"(",
"pointvol",
")",
"if",
"self",
".",
"save_bounds",
":",
"self",
".",
"bound",
".",
"append",
"(",
"bound",
")",
"self",
".",
"nbound",
"+=",
"1",
"self",
".",
"since_update",
"=",
"0",
"# Locate the \"live\" point with the lowest `logl`.",
"worst",
"=",
"np",
".",
"argmin",
"(",
"self",
".",
"live_logl",
")",
"# index",
"worst_it",
"=",
"self",
".",
"live_it",
"[",
"worst",
"]",
"# when point was proposed",
"boundidx",
"=",
"self",
".",
"live_bound",
"[",
"worst",
"]",
"# associated bound index",
"# Set our new worst likelihood constraint.",
"ustar",
"=",
"np",
".",
"array",
"(",
"self",
".",
"live_u",
"[",
"worst",
"]",
")",
"# unit cube position",
"vstar",
"=",
"np",
".",
"array",
"(",
"self",
".",
"live_v",
"[",
"worst",
"]",
")",
"# transformed position",
"loglstar_new",
"=",
"self",
".",
"live_logl",
"[",
"worst",
"]",
"# new likelihood",
"# Set our new weight using quadratic estimates (trapezoid rule).",
"logdvol",
"=",
"logsumexp",
"(",
"a",
"=",
"[",
"logvol",
"+",
"self",
".",
"dlv",
",",
"logvol",
"]",
",",
"b",
"=",
"[",
"0.5",
",",
"-",
"0.5",
"]",
")",
"# ln(dvol)",
"logwt",
"=",
"np",
".",
"logaddexp",
"(",
"loglstar_new",
",",
"loglstar",
")",
"+",
"logdvol",
"# ln(wt)",
"# Sample a new live point from within the likelihood constraint",
"# `logl > loglstar` using the bounding distribution and sampling",
"# method from our sampler.",
"u",
",",
"v",
",",
"logl",
",",
"nc",
"=",
"self",
".",
"_new_point",
"(",
"loglstar_new",
",",
"logvol",
")",
"ncall",
"+=",
"nc",
"self",
".",
"ncall",
"+=",
"nc",
"self",
".",
"since_update",
"+=",
"nc",
"# Update evidence `logz` and information `h`.",
"logz_new",
"=",
"np",
".",
"logaddexp",
"(",
"logz",
",",
"logwt",
")",
"lzterm",
"=",
"(",
"math",
".",
"exp",
"(",
"loglstar",
"-",
"logz_new",
")",
"*",
"loglstar",
"+",
"math",
".",
"exp",
"(",
"loglstar_new",
"-",
"logz_new",
")",
"*",
"loglstar_new",
")",
"h_new",
"=",
"(",
"math",
".",
"exp",
"(",
"logdvol",
")",
"*",
"lzterm",
"+",
"math",
".",
"exp",
"(",
"logz",
"-",
"logz_new",
")",
"*",
"(",
"h",
"+",
"logz",
")",
"-",
"logz_new",
")",
"dh",
"=",
"h_new",
"-",
"h",
"h",
"=",
"h_new",
"logz",
"=",
"logz_new",
"logzvar",
"+=",
"dh",
"*",
"self",
".",
"dlv",
"loglstar",
"=",
"loglstar_new",
"# Compute bound index at the current iteration.",
"if",
"self",
".",
"_beyond_unit_bound",
"(",
"loglstar",
")",
":",
"bounditer",
"=",
"self",
".",
"nbound",
"-",
"1",
"else",
":",
"bounditer",
"=",
"0",
"# Save the worst live point. It is now a \"dead\" point.",
"if",
"self",
".",
"save_samples",
":",
"self",
".",
"saved_id",
".",
"append",
"(",
"worst",
")",
"self",
".",
"saved_u",
".",
"append",
"(",
"ustar",
")",
"self",
".",
"saved_v",
".",
"append",
"(",
"vstar",
")",
"self",
".",
"saved_logl",
".",
"append",
"(",
"loglstar",
")",
"self",
".",
"saved_logvol",
".",
"append",
"(",
"logvol",
")",
"self",
".",
"saved_logwt",
".",
"append",
"(",
"logwt",
")",
"self",
".",
"saved_logz",
".",
"append",
"(",
"logz",
")",
"self",
".",
"saved_logzvar",
".",
"append",
"(",
"logzvar",
")",
"self",
".",
"saved_h",
".",
"append",
"(",
"h",
")",
"self",
".",
"saved_nc",
".",
"append",
"(",
"nc",
")",
"self",
".",
"saved_boundidx",
".",
"append",
"(",
"boundidx",
")",
"self",
".",
"saved_it",
".",
"append",
"(",
"worst_it",
")",
"self",
".",
"saved_bounditer",
".",
"append",
"(",
"bounditer",
")",
"self",
".",
"saved_scale",
".",
"append",
"(",
"self",
".",
"scale",
")",
"# Update the live point (previously our \"worst\" point).",
"self",
".",
"live_u",
"[",
"worst",
"]",
"=",
"u",
"self",
".",
"live_v",
"[",
"worst",
"]",
"=",
"v",
"self",
".",
"live_logl",
"[",
"worst",
"]",
"=",
"logl",
"self",
".",
"live_bound",
"[",
"worst",
"]",
"=",
"bounditer",
"self",
".",
"live_it",
"[",
"worst",
"]",
"=",
"self",
".",
"it",
"# Compute our sampling efficiency.",
"self",
".",
"eff",
"=",
"100.",
"*",
"self",
".",
"it",
"/",
"self",
".",
"ncall",
"# Increment total number of iterations.",
"self",
".",
"it",
"+=",
"1",
"# Return dead point and ancillary quantities.",
"yield",
"(",
"worst",
",",
"ustar",
",",
"vstar",
",",
"loglstar",
",",
"logvol",
",",
"logwt",
",",
"logz",
",",
"logzvar",
",",
"h",
",",
"nc",
",",
"worst_it",
",",
"boundidx",
",",
"bounditer",
",",
"self",
".",
"eff",
",",
"delta_logz",
")"
] | **The main nested sampling loop.** Iteratively replace the worst live
point with a sample drawn uniformly from the prior until the
provided stopping criteria are reached. Instantiates a generator
that will be called by the user.
Parameters
----------
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. Default is `0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
save_bounds : bool, optional
Whether or not to save past distributions used to bound
the live points internally. Default is `True`.
save_samples : bool, optional
Whether or not to save past samples from the nested sampling run
(along with other ancillary quantities) internally.
Default is `True`.
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample.
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
logvol : float
Ln(prior volume) within the sample.
logwt : float
Ln(weight) of the sample.
logz : float
Cumulative ln(evidence) up to the sample (inclusive).
logzvar : float
Estimated cumulative variance on `logz` (inclusive).
h : float
Cumulative information up to the sample (inclusive).
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
delta_logz : float
The estimated remaining evidence expressed as the ln(ratio) of the
current evidence. | [
"**",
"The",
"main",
"nested",
"sampling",
"loop",
".",
"**",
"Iteratively",
"replace",
"the",
"worst",
"live",
"point",
"with",
"a",
"sample",
"drawn",
"uniformly",
"from",
"the",
"prior",
"until",
"the",
"provided",
"stopping",
"criteria",
"are",
"reached",
".",
"Instantiates",
"a",
"generator",
"that",
"will",
"be",
"called",
"by",
"the",
"user",
"."
] | python | train |
secure-systems-lab/securesystemslib | securesystemslib/ed25519_keys.py | https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/ed25519_keys.py#L289-L404 | def verify_signature(public_key, scheme, signature, data, use_pynacl=False):
"""
<Purpose>
Determine whether the private key corresponding to 'public_key' produced
'signature'. verify_signature() will use the public key, the 'scheme' and
'sig', and 'data' arguments to complete the verification.
>>> public, private = generate_public_and_private()
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'ed25519'
>>> signature, scheme = \
create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data, use_pynacl=False)
True
>>> verify_signature(public, scheme, signature, data, use_pynacl=True)
True
>>> bad_data = b'The sly brown fox jumps over the lazy dog'
>>> bad_signature, scheme = \
create_signature(public, private, bad_data, scheme)
>>> verify_signature(public, scheme, bad_signature, data, use_pynacl=False)
False
<Arguments>
public_key:
The public key is a 32-byte string.
scheme:
'ed25519' signature scheme used by either the pure python
implementation (i.e., ed25519.py) or PyNacl (i.e., 'nacl').
signature:
The signature is a 64-byte string.
data:
Data object used by securesystemslib.ed25519_keys.create_signature() to
generate 'signature'. 'data' is needed here to verify the signature.
use_pynacl:
True, if the ed25519 signature should be verified by PyNaCl. False,
if the signature should be verified with the pure Python implementation
of ed25519 (slower).
<Exceptions>
securesystemslib.exceptions.UnsupportedAlgorithmError. Raised if the
signature scheme 'scheme' is not one supported by
securesystemslib.ed25519_keys.create_signature().
securesystemslib.exceptions.FormatError. Raised if the arguments are
improperly formatted.
<Side Effects>
securesystemslib._vendor.ed25519.ed25519.checkvalid() called to do the
actual verification. nacl.signing.VerifyKey.verify() called if
'use_pynacl' is True.
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
# Does 'public_key' have the correct format?
# This check will ensure 'public_key' conforms to
# 'securesystemslib.formats.ED25519PUBLIC_SCHEMA', which must have length 32
# bytes. Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ED25519PUBLIC_SCHEMA.check_match(public_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ED25519_SIG_SCHEMA.check_match(scheme)
# Is 'signature' properly formatted?
securesystemslib.formats.ED25519SIGNATURE_SCHEMA.check_match(signature)
# Is 'use_pynacl' properly formatted?
securesystemslib.formats.BOOLEAN_SCHEMA.check_match(use_pynacl)
# Verify 'signature'. Before returning the Boolean result, ensure 'ed25519'
# was used as the signature scheme. Raise
# 'securesystemslib.exceptions.UnsupportedLibraryError' if 'use_pynacl' is
# True but 'nacl' is unavailable.
public = public_key
valid_signature = False
if scheme in _SUPPORTED_ED25519_SIGNING_SCHEMES:
if use_pynacl:
try:
nacl_verify_key = nacl.signing.VerifyKey(public)
nacl_message = nacl_verify_key.verify(data, signature)
valid_signature = True
# The unit tests expect PyNaCl to be installed.
except NameError: # pragma: no cover
raise securesystemslib.exceptions.UnsupportedLibraryError('The PyNaCl'
' library and/or its dependencies unavailable.')
except nacl.exceptions.BadSignatureError:
pass
# Verify 'ed25519' signature with the pure Python implementation.
else:
try:
securesystemslib._vendor.ed25519.ed25519.checkvalid(signature,
data, public)
valid_signature = True
# The pure Python implementation raises 'Exception' if 'signature' is
# invalid.
except Exception as e:
pass
# This is a defensive check for a valid 'scheme', which should have already
# been validated in the ED25519_SIG_SCHEMA.check_match(scheme) above.
else: #pragma: no cover
message = 'Unsupported ed25519 signature scheme: ' + repr(scheme) + '.\n' + \
'Supported schemes: ' + repr(_SUPPORTED_ED25519_SIGNING_SCHEMES) + '.'
raise securesystemslib.exceptions.UnsupportedAlgorithmError(message)
return valid_signature | [
"def",
"verify_signature",
"(",
"public_key",
",",
"scheme",
",",
"signature",
",",
"data",
",",
"use_pynacl",
"=",
"False",
")",
":",
"# Does 'public_key' have the correct format?",
"# This check will ensure 'public_key' conforms to",
"# 'securesystemslib.formats.ED25519PUBLIC_SCHEMA', which must have length 32",
"# bytes. Raise 'securesystemslib.exceptions.FormatError' if the check fails.",
"securesystemslib",
".",
"formats",
".",
"ED25519PUBLIC_SCHEMA",
".",
"check_match",
"(",
"public_key",
")",
"# Is 'scheme' properly formatted?",
"securesystemslib",
".",
"formats",
".",
"ED25519_SIG_SCHEMA",
".",
"check_match",
"(",
"scheme",
")",
"# Is 'signature' properly formatted?",
"securesystemslib",
".",
"formats",
".",
"ED25519SIGNATURE_SCHEMA",
".",
"check_match",
"(",
"signature",
")",
"# Is 'use_pynacl' properly formatted?",
"securesystemslib",
".",
"formats",
".",
"BOOLEAN_SCHEMA",
".",
"check_match",
"(",
"use_pynacl",
")",
"# Verify 'signature'. Before returning the Boolean result, ensure 'ed25519'",
"# was used as the signature scheme. Raise",
"# 'securesystemslib.exceptions.UnsupportedLibraryError' if 'use_pynacl' is",
"# True but 'nacl' is unavailable.",
"public",
"=",
"public_key",
"valid_signature",
"=",
"False",
"if",
"scheme",
"in",
"_SUPPORTED_ED25519_SIGNING_SCHEMES",
":",
"if",
"use_pynacl",
":",
"try",
":",
"nacl_verify_key",
"=",
"nacl",
".",
"signing",
".",
"VerifyKey",
"(",
"public",
")",
"nacl_message",
"=",
"nacl_verify_key",
".",
"verify",
"(",
"data",
",",
"signature",
")",
"valid_signature",
"=",
"True",
"# The unit tests expect PyNaCl to be installed.",
"except",
"NameError",
":",
"# pragma: no cover",
"raise",
"securesystemslib",
".",
"exceptions",
".",
"UnsupportedLibraryError",
"(",
"'The PyNaCl'",
"' library and/or its dependencies unavailable.'",
")",
"except",
"nacl",
".",
"exceptions",
".",
"BadSignatureError",
":",
"pass",
"# Verify 'ed25519' signature with the pure Python implementation.",
"else",
":",
"try",
":",
"securesystemslib",
".",
"_vendor",
".",
"ed25519",
".",
"ed25519",
".",
"checkvalid",
"(",
"signature",
",",
"data",
",",
"public",
")",
"valid_signature",
"=",
"True",
"# The pure Python implementation raises 'Exception' if 'signature' is",
"# invalid.",
"except",
"Exception",
"as",
"e",
":",
"pass",
"# This is a defensive check for a valid 'scheme', which should have already",
"# been validated in the ED25519_SIG_SCHEMA.check_match(scheme) above.",
"else",
":",
"#pragma: no cover",
"message",
"=",
"'Unsupported ed25519 signature scheme: '",
"+",
"repr",
"(",
"scheme",
")",
"+",
"'.\\n'",
"+",
"'Supported schemes: '",
"+",
"repr",
"(",
"_SUPPORTED_ED25519_SIGNING_SCHEMES",
")",
"+",
"'.'",
"raise",
"securesystemslib",
".",
"exceptions",
".",
"UnsupportedAlgorithmError",
"(",
"message",
")",
"return",
"valid_signature"
] | <Purpose>
Determine whether the private key corresponding to 'public_key' produced
'signature'. verify_signature() will use the public key, the 'scheme' and
'sig', and 'data' arguments to complete the verification.
>>> public, private = generate_public_and_private()
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> scheme = 'ed25519'
>>> signature, scheme = \
create_signature(public, private, data, scheme)
>>> verify_signature(public, scheme, signature, data, use_pynacl=False)
True
>>> verify_signature(public, scheme, signature, data, use_pynacl=True)
True
>>> bad_data = b'The sly brown fox jumps over the lazy dog'
>>> bad_signature, scheme = \
create_signature(public, private, bad_data, scheme)
>>> verify_signature(public, scheme, bad_signature, data, use_pynacl=False)
False
<Arguments>
public_key:
The public key is a 32-byte string.
scheme:
'ed25519' signature scheme used by either the pure python
implementation (i.e., ed25519.py) or PyNacl (i.e., 'nacl').
signature:
The signature is a 64-byte string.
data:
Data object used by securesystemslib.ed25519_keys.create_signature() to
generate 'signature'. 'data' is needed here to verify the signature.
use_pynacl:
True, if the ed25519 signature should be verified by PyNaCl. False,
if the signature should be verified with the pure Python implementation
of ed25519 (slower).
<Exceptions>
securesystemslib.exceptions.UnsupportedAlgorithmError. Raised if the
signature scheme 'scheme' is not one supported by
securesystemslib.ed25519_keys.create_signature().
securesystemslib.exceptions.FormatError. Raised if the arguments are
improperly formatted.
<Side Effects>
securesystemslib._vendor.ed25519.ed25519.checkvalid() called to do the
actual verification. nacl.signing.VerifyKey.verify() called if
'use_pynacl' is True.
<Returns>
Boolean. True if the signature is valid, False otherwise. | [
"<Purpose",
">",
"Determine",
"whether",
"the",
"private",
"key",
"corresponding",
"to",
"public_key",
"produced",
"signature",
".",
"verify_signature",
"()",
"will",
"use",
"the",
"public",
"key",
"the",
"scheme",
"and",
"sig",
"and",
"data",
"arguments",
"to",
"complete",
"the",
"verification",
"."
] | python | train |
tensorflow/datasets | tensorflow_datasets/core/utils/py_utils.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/utils/py_utils.py#L276-L280 | def reraise(additional_msg):
"""Reraise an exception with an additional message."""
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = str(exc_value) + "\n" + additional_msg
six.reraise(exc_type, exc_type(msg), exc_traceback) | [
"def",
"reraise",
"(",
"additional_msg",
")",
":",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"msg",
"=",
"str",
"(",
"exc_value",
")",
"+",
"\"\\n\"",
"+",
"additional_msg",
"six",
".",
"reraise",
"(",
"exc_type",
",",
"exc_type",
"(",
"msg",
")",
",",
"exc_traceback",
")"
] | Reraise an exception with an additional message. | [
"Reraise",
"an",
"exception",
"with",
"an",
"additional",
"message",
"."
] | python | train |
mikedh/trimesh | trimesh/scene/scene.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/scene.py#L77-L164 | def add_geometry(self,
geometry,
node_name=None,
geom_name=None,
parent_node_name=None,
transform=None):
"""
Add a geometry to the scene.
If the mesh has multiple transforms defined in its
metadata, they will all be copied into the
TransformForest of the current scene automatically.
Parameters
----------
geometry : Trimesh, Path2D, Path3D PointCloud or list
Geometry to initially add to the scene
base_frame : str or hashable
Name of base frame
metadata : dict
Any metadata about the scene
graph : TransformForest or None
A passed transform graph to use
Returns
----------
node_name : str
Name of node in self.graph
"""
if geometry is None:
return
# PointCloud objects will look like a sequence
elif util.is_sequence(geometry):
# if passed a sequence add all elements
for value in geometry:
self.add_geometry(
geometry=value,
node_name=node_name,
geom_name=geom_name,
parent_node_name=parent_node_name,
transform=transform,
)
return
elif isinstance(geometry, dict):
# if someone passed us a dict of geometry
for key, value in geometry.items():
self.add_geometry(value, geom_name=key)
return
# get or create a name to reference the geometry by
if geom_name is not None:
# if name is passed use it
name = geom_name
elif 'name' in geometry.metadata:
# if name is in metadata use it
name = geometry.metadata['name']
elif 'file_name' in geometry.metadata:
name = geometry.metadata['file_name']
else:
# try to create a simple name
name = 'geometry_' + str(len(self.geometry))
# if its already taken add a unique random string to it
if name in self.geometry:
name += ':' + util.unique_id().upper()
# save the geometry reference
self.geometry[name] = geometry
# create a unique node name if not passed
if node_name is None:
# a random unique identifier
unique = util.unique_id(increment=len(self.geometry))
# geometry name + UUID
node_name = name + '_' + unique.upper()
if transform is None:
# create an identity transform from parent_node
transform = np.eye(4)
self.graph.update(frame_to=node_name,
frame_from=parent_node_name,
matrix=transform,
geometry=name,
geometry_flags={'visible': True})
return node_name | [
"def",
"add_geometry",
"(",
"self",
",",
"geometry",
",",
"node_name",
"=",
"None",
",",
"geom_name",
"=",
"None",
",",
"parent_node_name",
"=",
"None",
",",
"transform",
"=",
"None",
")",
":",
"if",
"geometry",
"is",
"None",
":",
"return",
"# PointCloud objects will look like a sequence",
"elif",
"util",
".",
"is_sequence",
"(",
"geometry",
")",
":",
"# if passed a sequence add all elements",
"for",
"value",
"in",
"geometry",
":",
"self",
".",
"add_geometry",
"(",
"geometry",
"=",
"value",
",",
"node_name",
"=",
"node_name",
",",
"geom_name",
"=",
"geom_name",
",",
"parent_node_name",
"=",
"parent_node_name",
",",
"transform",
"=",
"transform",
",",
")",
"return",
"elif",
"isinstance",
"(",
"geometry",
",",
"dict",
")",
":",
"# if someone passed us a dict of geometry",
"for",
"key",
",",
"value",
"in",
"geometry",
".",
"items",
"(",
")",
":",
"self",
".",
"add_geometry",
"(",
"value",
",",
"geom_name",
"=",
"key",
")",
"return",
"# get or create a name to reference the geometry by",
"if",
"geom_name",
"is",
"not",
"None",
":",
"# if name is passed use it",
"name",
"=",
"geom_name",
"elif",
"'name'",
"in",
"geometry",
".",
"metadata",
":",
"# if name is in metadata use it",
"name",
"=",
"geometry",
".",
"metadata",
"[",
"'name'",
"]",
"elif",
"'file_name'",
"in",
"geometry",
".",
"metadata",
":",
"name",
"=",
"geometry",
".",
"metadata",
"[",
"'file_name'",
"]",
"else",
":",
"# try to create a simple name",
"name",
"=",
"'geometry_'",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"geometry",
")",
")",
"# if its already taken add a unique random string to it",
"if",
"name",
"in",
"self",
".",
"geometry",
":",
"name",
"+=",
"':'",
"+",
"util",
".",
"unique_id",
"(",
")",
".",
"upper",
"(",
")",
"# save the geometry reference",
"self",
".",
"geometry",
"[",
"name",
"]",
"=",
"geometry",
"# create a unique node name if not passed",
"if",
"node_name",
"is",
"None",
":",
"# a random unique identifier",
"unique",
"=",
"util",
".",
"unique_id",
"(",
"increment",
"=",
"len",
"(",
"self",
".",
"geometry",
")",
")",
"# geometry name + UUID",
"node_name",
"=",
"name",
"+",
"'_'",
"+",
"unique",
".",
"upper",
"(",
")",
"if",
"transform",
"is",
"None",
":",
"# create an identity transform from parent_node",
"transform",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"self",
".",
"graph",
".",
"update",
"(",
"frame_to",
"=",
"node_name",
",",
"frame_from",
"=",
"parent_node_name",
",",
"matrix",
"=",
"transform",
",",
"geometry",
"=",
"name",
",",
"geometry_flags",
"=",
"{",
"'visible'",
":",
"True",
"}",
")",
"return",
"node_name"
] | Add a geometry to the scene.
If the mesh has multiple transforms defined in its
metadata, they will all be copied into the
TransformForest of the current scene automatically.
Parameters
----------
geometry : Trimesh, Path2D, Path3D PointCloud or list
Geometry to initially add to the scene
base_frame : str or hashable
Name of base frame
metadata : dict
Any metadata about the scene
graph : TransformForest or None
A passed transform graph to use
Returns
----------
node_name : str
Name of node in self.graph | [
"Add",
"a",
"geometry",
"to",
"the",
"scene",
"."
] | python | train |
deepmipt/DeepPavlov | deeppavlov/settings.py | https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/settings.py#L24-L35 | def main():
"""DeepPavlov console configuration utility."""
args = parser.parse_args()
path = get_settings_path()
if args.default:
if populate_settings_dir(force=True):
print(f'Populated {path} with default settings files')
else:
print(f'{path} is already a default settings directory')
else:
print(f'Current DeepPavlov settings path: {path}') | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"path",
"=",
"get_settings_path",
"(",
")",
"if",
"args",
".",
"default",
":",
"if",
"populate_settings_dir",
"(",
"force",
"=",
"True",
")",
":",
"print",
"(",
"f'Populated {path} with default settings files'",
")",
"else",
":",
"print",
"(",
"f'{path} is already a default settings directory'",
")",
"else",
":",
"print",
"(",
"f'Current DeepPavlov settings path: {path}'",
")"
] | DeepPavlov console configuration utility. | [
"DeepPavlov",
"console",
"configuration",
"utility",
"."
] | python | test |
jbarlow83/OCRmyPDF | src/ocrmypdf/__main__.py | https://github.com/jbarlow83/OCRmyPDF/blob/79c84eefa353632a3d7ccddbd398c6678c1c1777/src/ocrmypdf/__main__.py#L759-L819 | def do_ruffus_exception(ruffus_five_tuple, options, log):
"""Replace the elaborate ruffus stack trace with a user friendly
description of the error message that occurred."""
exit_code = None
_task_name, _job_name, exc_name, exc_value, exc_stack = ruffus_five_tuple
if isinstance(exc_name, type):
# ruffus is full of mystery... sometimes (probably when the process
# group leader is killed) exc_name is the class object of the exception,
# rather than a str. So reach into the object and get its name.
exc_name = exc_name.__name__
if exc_name.startswith('ocrmypdf.exceptions.'):
base_exc_name = exc_name.replace('ocrmypdf.exceptions.', '')
exc_class = getattr(ocrmypdf_exceptions, base_exc_name)
exit_code = getattr(exc_class, 'exit_code', ExitCode.other_error)
try:
if isinstance(exc_value, exc_class):
exc_msg = str(exc_value)
elif isinstance(exc_value, str):
exc_msg = exc_value
else:
exc_msg = str(exc_class())
except Exception:
exc_msg = "Unknown"
if exc_name in ('builtins.SystemExit', 'SystemExit'):
match = re.search(r"\.(.+?)\)", exc_value)
exit_code_name = match.groups()[0]
exit_code = getattr(ExitCode, exit_code_name, 'other_error')
elif exc_name == 'ruffus.ruffus_exceptions.MissingInputFileError':
log.error(cleanup_ruffus_error_message(exc_value))
exit_code = ExitCode.input_file
elif exc_name in ('builtins.KeyboardInterrupt', 'KeyboardInterrupt'):
# We have to print in this case because the log daemon might be toast
print("Interrupted by user", file=sys.stderr)
exit_code = ExitCode.ctrl_c
elif exc_name == 'subprocess.CalledProcessError':
# It's up to the subprocess handler to report something useful
msg = "Error occurred while running this command:"
log.error(msg + '\n' + exc_value)
exit_code = ExitCode.child_process_error
elif exc_name.startswith('ocrmypdf.exceptions.'):
if exc_msg:
log.error(exc_msg)
elif exc_name == 'PIL.Image.DecompressionBombError':
msg = cleanup_ruffus_error_message(exc_value)
msg += (
"\nUse the --max-image-mpixels argument to set increase the "
"maximum number of megapixels to accept."
)
log.error(msg)
exit_code = ExitCode.input_file
if exit_code is not None:
return exit_code
if not options.verbose:
log.error(exc_stack)
return ExitCode.other_error | [
"def",
"do_ruffus_exception",
"(",
"ruffus_five_tuple",
",",
"options",
",",
"log",
")",
":",
"exit_code",
"=",
"None",
"_task_name",
",",
"_job_name",
",",
"exc_name",
",",
"exc_value",
",",
"exc_stack",
"=",
"ruffus_five_tuple",
"if",
"isinstance",
"(",
"exc_name",
",",
"type",
")",
":",
"# ruffus is full of mystery... sometimes (probably when the process",
"# group leader is killed) exc_name is the class object of the exception,",
"# rather than a str. So reach into the object and get its name.",
"exc_name",
"=",
"exc_name",
".",
"__name__",
"if",
"exc_name",
".",
"startswith",
"(",
"'ocrmypdf.exceptions.'",
")",
":",
"base_exc_name",
"=",
"exc_name",
".",
"replace",
"(",
"'ocrmypdf.exceptions.'",
",",
"''",
")",
"exc_class",
"=",
"getattr",
"(",
"ocrmypdf_exceptions",
",",
"base_exc_name",
")",
"exit_code",
"=",
"getattr",
"(",
"exc_class",
",",
"'exit_code'",
",",
"ExitCode",
".",
"other_error",
")",
"try",
":",
"if",
"isinstance",
"(",
"exc_value",
",",
"exc_class",
")",
":",
"exc_msg",
"=",
"str",
"(",
"exc_value",
")",
"elif",
"isinstance",
"(",
"exc_value",
",",
"str",
")",
":",
"exc_msg",
"=",
"exc_value",
"else",
":",
"exc_msg",
"=",
"str",
"(",
"exc_class",
"(",
")",
")",
"except",
"Exception",
":",
"exc_msg",
"=",
"\"Unknown\"",
"if",
"exc_name",
"in",
"(",
"'builtins.SystemExit'",
",",
"'SystemExit'",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r\"\\.(.+?)\\)\"",
",",
"exc_value",
")",
"exit_code_name",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"exit_code",
"=",
"getattr",
"(",
"ExitCode",
",",
"exit_code_name",
",",
"'other_error'",
")",
"elif",
"exc_name",
"==",
"'ruffus.ruffus_exceptions.MissingInputFileError'",
":",
"log",
".",
"error",
"(",
"cleanup_ruffus_error_message",
"(",
"exc_value",
")",
")",
"exit_code",
"=",
"ExitCode",
".",
"input_file",
"elif",
"exc_name",
"in",
"(",
"'builtins.KeyboardInterrupt'",
",",
"'KeyboardInterrupt'",
")",
":",
"# We have to print in this case because the log daemon might be toast",
"print",
"(",
"\"Interrupted by user\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit_code",
"=",
"ExitCode",
".",
"ctrl_c",
"elif",
"exc_name",
"==",
"'subprocess.CalledProcessError'",
":",
"# It's up to the subprocess handler to report something useful",
"msg",
"=",
"\"Error occurred while running this command:\"",
"log",
".",
"error",
"(",
"msg",
"+",
"'\\n'",
"+",
"exc_value",
")",
"exit_code",
"=",
"ExitCode",
".",
"child_process_error",
"elif",
"exc_name",
".",
"startswith",
"(",
"'ocrmypdf.exceptions.'",
")",
":",
"if",
"exc_msg",
":",
"log",
".",
"error",
"(",
"exc_msg",
")",
"elif",
"exc_name",
"==",
"'PIL.Image.DecompressionBombError'",
":",
"msg",
"=",
"cleanup_ruffus_error_message",
"(",
"exc_value",
")",
"msg",
"+=",
"(",
"\"\\nUse the --max-image-mpixels argument to set increase the \"",
"\"maximum number of megapixels to accept.\"",
")",
"log",
".",
"error",
"(",
"msg",
")",
"exit_code",
"=",
"ExitCode",
".",
"input_file",
"if",
"exit_code",
"is",
"not",
"None",
":",
"return",
"exit_code",
"if",
"not",
"options",
".",
"verbose",
":",
"log",
".",
"error",
"(",
"exc_stack",
")",
"return",
"ExitCode",
".",
"other_error"
] | Replace the elaborate ruffus stack trace with a user friendly
description of the error message that occurred. | [
"Replace",
"the",
"elaborate",
"ruffus",
"stack",
"trace",
"with",
"a",
"user",
"friendly",
"description",
"of",
"the",
"error",
"message",
"that",
"occurred",
"."
] | python | train |
davidcarboni/Flask-B3 | b3/__init__.py | https://github.com/davidcarboni/Flask-B3/blob/55092cb1070568aeecfd2c07c5ad6122e15ca345/b3/__init__.py#L42-L85 | def start_span(request_headers=None):
"""Collects incoming B3 headers and sets up values for this request as needed.
The collected/computed values are stored on the application context g using the defined http header names as keys.
:param request_headers: Incoming request headers can be passed explicitly.
If not passed, Flask request.headers will be used. This enables you to pass this function to Flask.before_request().
"""
global debug
try:
headers = request_headers if request_headers else request.headers
except RuntimeError:
# We're probably working outside the Application Context at this point, likely on startup:
# https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context
# We return a dict of empty values so the expected keys are present.
headers = {}
trace_id = headers.get(b3_trace_id)
parent_span_id = headers.get(b3_parent_span_id)
span_id = headers.get(b3_span_id)
sampled = headers.get(b3_sampled)
flags = headers.get(b3_flags)
root_span = not trace_id
# Collect (or generate) a trace ID
setattr(g, b3_trace_id, trace_id or _generate_identifier())
# Parent span, if present
setattr(g, b3_parent_span_id, parent_span_id)
# Collect (or set) the span ID
setattr(g, b3_span_id, span_id or g.get(b3_trace_id))
# Collect the "sampled" flag, if present
# We'll propagate the sampled value unchanged if it's set.
# We're not currently recording traces to Zipkin, so if it's present, follow the standard and propagate it,
# otherwise it's better to leave it out, rather than make it "0".
# This allows downstream services to make a decision if they need to.
setattr(g, b3_sampled, sampled)
# Set or update the debug setting
# We'll set it to "1" if debug=True, otherwise we'll propagate it if present.
setattr(g, b3_flags, "1" if debug else flags)
_info("Server receive. Starting span" if trace_id else "Root span")
_log.debug("Resolved B3 values: {values}".format(values=values())) | [
"def",
"start_span",
"(",
"request_headers",
"=",
"None",
")",
":",
"global",
"debug",
"try",
":",
"headers",
"=",
"request_headers",
"if",
"request_headers",
"else",
"request",
".",
"headers",
"except",
"RuntimeError",
":",
"# We're probably working outside the Application Context at this point, likely on startup:",
"# https://stackoverflow.com/questions/31444036/runtimeerror-working-outside-of-application-context",
"# We return a dict of empty values so the expected keys are present.",
"headers",
"=",
"{",
"}",
"trace_id",
"=",
"headers",
".",
"get",
"(",
"b3_trace_id",
")",
"parent_span_id",
"=",
"headers",
".",
"get",
"(",
"b3_parent_span_id",
")",
"span_id",
"=",
"headers",
".",
"get",
"(",
"b3_span_id",
")",
"sampled",
"=",
"headers",
".",
"get",
"(",
"b3_sampled",
")",
"flags",
"=",
"headers",
".",
"get",
"(",
"b3_flags",
")",
"root_span",
"=",
"not",
"trace_id",
"# Collect (or generate) a trace ID",
"setattr",
"(",
"g",
",",
"b3_trace_id",
",",
"trace_id",
"or",
"_generate_identifier",
"(",
")",
")",
"# Parent span, if present",
"setattr",
"(",
"g",
",",
"b3_parent_span_id",
",",
"parent_span_id",
")",
"# Collect (or set) the span ID",
"setattr",
"(",
"g",
",",
"b3_span_id",
",",
"span_id",
"or",
"g",
".",
"get",
"(",
"b3_trace_id",
")",
")",
"# Collect the \"sampled\" flag, if present",
"# We'll propagate the sampled value unchanged if it's set.",
"# We're not currently recording traces to Zipkin, so if it's present, follow the standard and propagate it,",
"# otherwise it's better to leave it out, rather than make it \"0\".",
"# This allows downstream services to make a decision if they need to.",
"setattr",
"(",
"g",
",",
"b3_sampled",
",",
"sampled",
")",
"# Set or update the debug setting",
"# We'll set it to \"1\" if debug=True, otherwise we'll propagate it if present.",
"setattr",
"(",
"g",
",",
"b3_flags",
",",
"\"1\"",
"if",
"debug",
"else",
"flags",
")",
"_info",
"(",
"\"Server receive. Starting span\"",
"if",
"trace_id",
"else",
"\"Root span\"",
")",
"_log",
".",
"debug",
"(",
"\"Resolved B3 values: {values}\"",
".",
"format",
"(",
"values",
"=",
"values",
"(",
")",
")",
")"
] | Collects incoming B3 headers and sets up values for this request as needed.
The collected/computed values are stored on the application context g using the defined http header names as keys.
:param request_headers: Incoming request headers can be passed explicitly.
If not passed, Flask request.headers will be used. This enables you to pass this function to Flask.before_request(). | [
"Collects",
"incoming",
"B3",
"headers",
"and",
"sets",
"up",
"values",
"for",
"this",
"request",
"as",
"needed",
".",
"The",
"collected",
"/",
"computed",
"values",
"are",
"stored",
"on",
"the",
"application",
"context",
"g",
"using",
"the",
"defined",
"http",
"header",
"names",
"as",
"keys",
".",
":",
"param",
"request_headers",
":",
"Incoming",
"request",
"headers",
"can",
"be",
"passed",
"explicitly",
".",
"If",
"not",
"passed",
"Flask",
"request",
".",
"headers",
"will",
"be",
"used",
".",
"This",
"enables",
"you",
"to",
"pass",
"this",
"function",
"to",
"Flask",
".",
"before_request",
"()",
"."
] | python | train |
Leeps-Lab/otree-redwood | otree_redwood/models.py | https://github.com/Leeps-Lab/otree-redwood/blob/59212f61a256ef77e0a9ed392ff497ea83ee6245/otree_redwood/models.py#L255-L268 | def _on_decisions_event(self, event=None, **kwargs):
"""Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel.
"""
if not self.ran_ready_function:
logger.warning('ignoring decision from {} before when_all_players_ready: {}'.format(event.participant.code, event.value))
return
with track('_on_decisions_event'):
self.group_decisions[event.participant.code] = event.value
self._group_decisions_updated = True
self.save(update_fields=['group_decisions', '_group_decisions_updated'])
if not self.num_subperiods() and not self.rate_limit():
self.send('group_decisions', self.group_decisions) | [
"def",
"_on_decisions_event",
"(",
"self",
",",
"event",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"ran_ready_function",
":",
"logger",
".",
"warning",
"(",
"'ignoring decision from {} before when_all_players_ready: {}'",
".",
"format",
"(",
"event",
".",
"participant",
".",
"code",
",",
"event",
".",
"value",
")",
")",
"return",
"with",
"track",
"(",
"'_on_decisions_event'",
")",
":",
"self",
".",
"group_decisions",
"[",
"event",
".",
"participant",
".",
"code",
"]",
"=",
"event",
".",
"value",
"self",
".",
"_group_decisions_updated",
"=",
"True",
"self",
".",
"save",
"(",
"update_fields",
"=",
"[",
"'group_decisions'",
",",
"'_group_decisions_updated'",
"]",
")",
"if",
"not",
"self",
".",
"num_subperiods",
"(",
")",
"and",
"not",
"self",
".",
"rate_limit",
"(",
")",
":",
"self",
".",
"send",
"(",
"'group_decisions'",
",",
"self",
".",
"group_decisions",
")"
] | Called when an Event is received on the decisions channel. Saves
the value in group_decisions. If num_subperiods is None, immediately
broadcasts the event back out on the group_decisions channel. | [
"Called",
"when",
"an",
"Event",
"is",
"received",
"on",
"the",
"decisions",
"channel",
".",
"Saves",
"the",
"value",
"in",
"group_decisions",
".",
"If",
"num_subperiods",
"is",
"None",
"immediately",
"broadcasts",
"the",
"event",
"back",
"out",
"on",
"the",
"group_decisions",
"channel",
"."
] | python | train |
gem/oq-engine | openquake/calculators/export/risk.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/risk.py#L131-L148 | def export_avg_losses(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
dskey = ekey[0]
oq = dstore['oqparam']
dt = oq.loss_dt()
name, value, tags = _get_data(dstore, dskey, oq.hazard_stats().items())
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
assets = get_assets(dstore)
for tag, values in zip(tags, value.transpose(1, 0, 2)):
dest = dstore.build_fname(name, tag, 'csv')
array = numpy.zeros(len(values), dt)
for l, lt in enumerate(dt.names):
array[lt] = values[:, l]
writer.save(compose_arrays(assets, array), dest)
return writer.getsaved() | [
"def",
"export_avg_losses",
"(",
"ekey",
",",
"dstore",
")",
":",
"dskey",
"=",
"ekey",
"[",
"0",
"]",
"oq",
"=",
"dstore",
"[",
"'oqparam'",
"]",
"dt",
"=",
"oq",
".",
"loss_dt",
"(",
")",
"name",
",",
"value",
",",
"tags",
"=",
"_get_data",
"(",
"dstore",
",",
"dskey",
",",
"oq",
".",
"hazard_stats",
"(",
")",
".",
"items",
"(",
")",
")",
"writer",
"=",
"writers",
".",
"CsvWriter",
"(",
"fmt",
"=",
"writers",
".",
"FIVEDIGITS",
")",
"assets",
"=",
"get_assets",
"(",
"dstore",
")",
"for",
"tag",
",",
"values",
"in",
"zip",
"(",
"tags",
",",
"value",
".",
"transpose",
"(",
"1",
",",
"0",
",",
"2",
")",
")",
":",
"dest",
"=",
"dstore",
".",
"build_fname",
"(",
"name",
",",
"tag",
",",
"'csv'",
")",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"values",
")",
",",
"dt",
")",
"for",
"l",
",",
"lt",
"in",
"enumerate",
"(",
"dt",
".",
"names",
")",
":",
"array",
"[",
"lt",
"]",
"=",
"values",
"[",
":",
",",
"l",
"]",
"writer",
".",
"save",
"(",
"compose_arrays",
"(",
"assets",
",",
"array",
")",
",",
"dest",
")",
"return",
"writer",
".",
"getsaved",
"(",
")"
] | :param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object | [
":",
"param",
"ekey",
":",
"export",
"key",
"i",
".",
"e",
".",
"a",
"pair",
"(",
"datastore",
"key",
"fmt",
")",
":",
"param",
"dstore",
":",
"datastore",
"object"
] | python | train |
quantum5/2048 | _2048/game.py | https://github.com/quantum5/2048/blob/93ada2e3026eaf154e1bbee943d0500c9253e66f/_2048/game.py#L197-L211 | def _draw_button(self, overlay, text, location):
"""Draws a button on the won and lost overlays, and return its hitbox."""
label = self.button_font.render(text, True, (119, 110, 101))
w, h = label.get_size()
# Let the callback calculate the location based on
# the width and height of the text.
x, y = location(w, h)
# Draw a box with some border space.
pygame.draw.rect(overlay, (238, 228, 218), (x - 5, y - 5, w + 10, h + 10))
overlay.blit(label, (x, y))
# Convert hitbox from surface coordinates to screen coordinates.
x += self.origin[0] - 5
y += self.origin[1] - 5
# Return the hitbox.
return x - 5, y - 5, x + w + 10, y + h + 10 | [
"def",
"_draw_button",
"(",
"self",
",",
"overlay",
",",
"text",
",",
"location",
")",
":",
"label",
"=",
"self",
".",
"button_font",
".",
"render",
"(",
"text",
",",
"True",
",",
"(",
"119",
",",
"110",
",",
"101",
")",
")",
"w",
",",
"h",
"=",
"label",
".",
"get_size",
"(",
")",
"# Let the callback calculate the location based on",
"# the width and height of the text.",
"x",
",",
"y",
"=",
"location",
"(",
"w",
",",
"h",
")",
"# Draw a box with some border space.",
"pygame",
".",
"draw",
".",
"rect",
"(",
"overlay",
",",
"(",
"238",
",",
"228",
",",
"218",
")",
",",
"(",
"x",
"-",
"5",
",",
"y",
"-",
"5",
",",
"w",
"+",
"10",
",",
"h",
"+",
"10",
")",
")",
"overlay",
".",
"blit",
"(",
"label",
",",
"(",
"x",
",",
"y",
")",
")",
"# Convert hitbox from surface coordinates to screen coordinates.",
"x",
"+=",
"self",
".",
"origin",
"[",
"0",
"]",
"-",
"5",
"y",
"+=",
"self",
".",
"origin",
"[",
"1",
"]",
"-",
"5",
"# Return the hitbox.",
"return",
"x",
"-",
"5",
",",
"y",
"-",
"5",
",",
"x",
"+",
"w",
"+",
"10",
",",
"y",
"+",
"h",
"+",
"10"
] | Draws a button on the won and lost overlays, and return its hitbox. | [
"Draws",
"a",
"button",
"on",
"the",
"won",
"and",
"lost",
"overlays",
"and",
"return",
"its",
"hitbox",
"."
] | python | train |
MouseLand/rastermap | rastermap/isorec.py | https://github.com/MouseLand/rastermap/blob/eee7a46db80b6e33207543778e11618d0fed08a6/rastermap/isorec.py#L363-L413 | def fit(self, X=None, u=None, s = None):
"""Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode
"""
X = X.copy()
X -= X.mean(axis=0)
if self.mode is 'parallel':
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
#X -= X.mean(axis=-1)[:,np.newaxis]
if ((u is None)):
# compute svd and keep iPC's of data
nmin = min([X.shape[0], X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
u = u * sv
NN, self.nPC = u.shape
# first smooth in Y (if n_Y > 0)
self.u = u
if self.mode is 'parallel':
NN = Xall.shape[1]
X = np.zeros((2, NN, u.shape[1]), 'float64')
for j in range(2):
Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis]
X[j] = Xall[j] @ self.v
nclust = self.n_X
if self.n_components==1 and init_sort.ndim==1:
uinit = uinit[:,np.newaxis]
# now sort in X
Y = self._map(u.copy(), self.n_components)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
"=",
"None",
",",
"u",
"=",
"None",
",",
"s",
"=",
"None",
")",
":",
"X",
"=",
"X",
".",
"copy",
"(",
")",
"X",
"-=",
"X",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"if",
"self",
".",
"mode",
"is",
"'parallel'",
":",
"Xall",
"=",
"X",
".",
"copy",
"(",
")",
"X",
"=",
"np",
".",
"reshape",
"(",
"Xall",
".",
"copy",
"(",
")",
",",
"(",
"-",
"1",
",",
"Xall",
".",
"shape",
"[",
"-",
"1",
"]",
")",
")",
"#X -= X.mean(axis=-1)[:,np.newaxis]",
"if",
"(",
"(",
"u",
"is",
"None",
")",
")",
":",
"# compute svd and keep iPC's of data",
"nmin",
"=",
"min",
"(",
"[",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"X",
".",
"shape",
"[",
"1",
"]",
"]",
")",
"nmin",
"=",
"np",
".",
"minimum",
"(",
"nmin",
"-",
"1",
",",
"self",
".",
"nPC",
")",
"u",
",",
"sv",
",",
"v",
"=",
"svdecon",
"(",
"np",
".",
"float64",
"(",
"X",
")",
",",
"k",
"=",
"nmin",
")",
"u",
"=",
"u",
"*",
"sv",
"NN",
",",
"self",
".",
"nPC",
"=",
"u",
".",
"shape",
"# first smooth in Y (if n_Y > 0)",
"self",
".",
"u",
"=",
"u",
"if",
"self",
".",
"mode",
"is",
"'parallel'",
":",
"NN",
"=",
"Xall",
".",
"shape",
"[",
"1",
"]",
"X",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
",",
"NN",
",",
"u",
".",
"shape",
"[",
"1",
"]",
")",
",",
"'float64'",
")",
"for",
"j",
"in",
"range",
"(",
"2",
")",
":",
"Xall",
"[",
"j",
"]",
"-=",
"Xall",
"[",
"j",
"]",
".",
"mean",
"(",
"axis",
"=",
"-",
"1",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"X",
"[",
"j",
"]",
"=",
"Xall",
"[",
"j",
"]",
"@",
"self",
".",
"v",
"nclust",
"=",
"self",
".",
"n_X",
"if",
"self",
".",
"n_components",
"==",
"1",
"and",
"init_sort",
".",
"ndim",
"==",
"1",
":",
"uinit",
"=",
"uinit",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"# now sort in X",
"Y",
"=",
"self",
".",
"_map",
"(",
"u",
".",
"copy",
"(",
")",
",",
"self",
".",
"n_components",
")",
"return",
"self"
] | Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode | [
"Fit",
"X",
"into",
"an",
"embedded",
"space",
".",
"Inputs",
"----------",
"X",
":",
"array",
"shape",
"(",
"n_samples",
"n_features",
")",
"u",
"s",
"v",
":",
"svd",
"decomposition",
"of",
"X",
"(",
"optional",
")"
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/nose/plugins/attrib.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/attrib.py#L126-L137 | def get_method_attr(method, cls, attr_name, default = False):
"""Look up an attribute on a method/ function.
If the attribute isn't found there, looking it up in the
method's class, if any.
"""
Missing = object()
value = getattr(method, attr_name, Missing)
if value is Missing and cls is not None:
value = getattr(cls, attr_name, Missing)
if value is Missing:
return default
return value | [
"def",
"get_method_attr",
"(",
"method",
",",
"cls",
",",
"attr_name",
",",
"default",
"=",
"False",
")",
":",
"Missing",
"=",
"object",
"(",
")",
"value",
"=",
"getattr",
"(",
"method",
",",
"attr_name",
",",
"Missing",
")",
"if",
"value",
"is",
"Missing",
"and",
"cls",
"is",
"not",
"None",
":",
"value",
"=",
"getattr",
"(",
"cls",
",",
"attr_name",
",",
"Missing",
")",
"if",
"value",
"is",
"Missing",
":",
"return",
"default",
"return",
"value"
] | Look up an attribute on a method/ function.
If the attribute isn't found there, looking it up in the
method's class, if any. | [
"Look",
"up",
"an",
"attribute",
"on",
"a",
"method",
"/",
"function",
".",
"If",
"the",
"attribute",
"isn",
"t",
"found",
"there",
"looking",
"it",
"up",
"in",
"the",
"method",
"s",
"class",
"if",
"any",
"."
] | python | test |
saltstack/salt | salt/modules/xapi_virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xapi_virt.py#L200-L240 | def vm_info(vm_=None):
'''
Return detailed information about the vms.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_info
'''
with _get_xapi_session() as xapi:
def _info(vm_):
vm_rec = _get_record_by_label(xapi, 'VM', vm_)
if vm_rec is False:
return False
vm_metrics_rec = _get_metrics_record(xapi, 'VM', vm_rec)
return {'cpu': vm_metrics_rec['VCPUs_number'],
'maxCPU': _get_val(vm_rec, ['VCPUs_max']),
'cputime': vm_metrics_rec['VCPUs_utilisation'],
'disks': get_disks(vm_),
'nics': get_nics(vm_),
'maxMem': int(_get_val(vm_rec, ['memory_dynamic_max'])),
'mem': int(vm_metrics_rec['memory_actual']),
'state': _get_val(vm_rec, ['power_state'])
}
info = {}
if vm_:
ret = _info(vm_)
if ret is not None:
info[vm_] = ret
else:
for vm_ in list_domains():
ret = _info(vm_)
if ret is not None:
info[vm_] = _info(vm_)
return info | [
"def",
"vm_info",
"(",
"vm_",
"=",
"None",
")",
":",
"with",
"_get_xapi_session",
"(",
")",
"as",
"xapi",
":",
"def",
"_info",
"(",
"vm_",
")",
":",
"vm_rec",
"=",
"_get_record_by_label",
"(",
"xapi",
",",
"'VM'",
",",
"vm_",
")",
"if",
"vm_rec",
"is",
"False",
":",
"return",
"False",
"vm_metrics_rec",
"=",
"_get_metrics_record",
"(",
"xapi",
",",
"'VM'",
",",
"vm_rec",
")",
"return",
"{",
"'cpu'",
":",
"vm_metrics_rec",
"[",
"'VCPUs_number'",
"]",
",",
"'maxCPU'",
":",
"_get_val",
"(",
"vm_rec",
",",
"[",
"'VCPUs_max'",
"]",
")",
",",
"'cputime'",
":",
"vm_metrics_rec",
"[",
"'VCPUs_utilisation'",
"]",
",",
"'disks'",
":",
"get_disks",
"(",
"vm_",
")",
",",
"'nics'",
":",
"get_nics",
"(",
"vm_",
")",
",",
"'maxMem'",
":",
"int",
"(",
"_get_val",
"(",
"vm_rec",
",",
"[",
"'memory_dynamic_max'",
"]",
")",
")",
",",
"'mem'",
":",
"int",
"(",
"vm_metrics_rec",
"[",
"'memory_actual'",
"]",
")",
",",
"'state'",
":",
"_get_val",
"(",
"vm_rec",
",",
"[",
"'power_state'",
"]",
")",
"}",
"info",
"=",
"{",
"}",
"if",
"vm_",
":",
"ret",
"=",
"_info",
"(",
"vm_",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"info",
"[",
"vm_",
"]",
"=",
"ret",
"else",
":",
"for",
"vm_",
"in",
"list_domains",
"(",
")",
":",
"ret",
"=",
"_info",
"(",
"vm_",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"info",
"[",
"vm_",
"]",
"=",
"_info",
"(",
"vm_",
")",
"return",
"info"
] | Return detailed information about the vms.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_info | [
"Return",
"detailed",
"information",
"about",
"the",
"vms",
"."
] | python | train |
henzk/ape | ape/__init__.py | https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/__init__.py#L198-L205 | def superimpose(self, module):
"""
superimpose a task module on registered tasks'''
:param module: ape tasks module that is superimposed on available ape tasks
:return: None
"""
featuremonkey.compose(module, self._tasks)
self._tasks.FEATURE_SELECTION.append(module.__name__) | [
"def",
"superimpose",
"(",
"self",
",",
"module",
")",
":",
"featuremonkey",
".",
"compose",
"(",
"module",
",",
"self",
".",
"_tasks",
")",
"self",
".",
"_tasks",
".",
"FEATURE_SELECTION",
".",
"append",
"(",
"module",
".",
"__name__",
")"
] | superimpose a task module on registered tasks'''
:param module: ape tasks module that is superimposed on available ape tasks
:return: None | [
"superimpose",
"a",
"task",
"module",
"on",
"registered",
"tasks",
":",
"param",
"module",
":",
"ape",
"tasks",
"module",
"that",
"is",
"superimposed",
"on",
"available",
"ape",
"tasks",
":",
"return",
":",
"None"
] | python | train |
sublee/etc | etc/adapters/mock.py | https://github.com/sublee/etc/blob/f2be64604da5af0d7739cfacf36f55712f0fc5cb/etc/adapters/mock.py#L87-L101 | def canonicalize(self, include_nodes=True, sorted=False):
"""Generates a canonical :class:`etc.Node` object from this mock node.
"""
node_class = Directory if self.dir else Value
kwargs = {attr: getattr(self, attr) for attr in node_class.__slots__}
if self.dir:
if include_nodes:
nodes = [node.canonicalize() for node in
six.viewvalues(kwargs['nodes'])]
if sorted:
nodes.sort(key=lambda n: n.key)
kwargs['nodes'] = nodes
else:
kwargs['nodes'] = []
return node_class(**kwargs) | [
"def",
"canonicalize",
"(",
"self",
",",
"include_nodes",
"=",
"True",
",",
"sorted",
"=",
"False",
")",
":",
"node_class",
"=",
"Directory",
"if",
"self",
".",
"dir",
"else",
"Value",
"kwargs",
"=",
"{",
"attr",
":",
"getattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"node_class",
".",
"__slots__",
"}",
"if",
"self",
".",
"dir",
":",
"if",
"include_nodes",
":",
"nodes",
"=",
"[",
"node",
".",
"canonicalize",
"(",
")",
"for",
"node",
"in",
"six",
".",
"viewvalues",
"(",
"kwargs",
"[",
"'nodes'",
"]",
")",
"]",
"if",
"sorted",
":",
"nodes",
".",
"sort",
"(",
"key",
"=",
"lambda",
"n",
":",
"n",
".",
"key",
")",
"kwargs",
"[",
"'nodes'",
"]",
"=",
"nodes",
"else",
":",
"kwargs",
"[",
"'nodes'",
"]",
"=",
"[",
"]",
"return",
"node_class",
"(",
"*",
"*",
"kwargs",
")"
] | Generates a canonical :class:`etc.Node` object from this mock node. | [
"Generates",
"a",
"canonical",
":",
"class",
":",
"etc",
".",
"Node",
"object",
"from",
"this",
"mock",
"node",
"."
] | python | train |
wummel/linkchecker | linkcheck/strformat.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/strformat.py#L297-L312 | def limit (s, length=72):
"""If the length of the string exceeds the given limit, it will be cut
off and three dots will be appended.
@param s: the string to limit
@type s: string
@param length: maximum length
@type length: non-negative integer
@return: limited string, at most length+3 characters long
"""
assert length >= 0, "length limit must be a non-negative integer"
if not s or len(s) <= length:
return s
if length == 0:
return ""
return "%s..." % s[:length] | [
"def",
"limit",
"(",
"s",
",",
"length",
"=",
"72",
")",
":",
"assert",
"length",
">=",
"0",
",",
"\"length limit must be a non-negative integer\"",
"if",
"not",
"s",
"or",
"len",
"(",
"s",
")",
"<=",
"length",
":",
"return",
"s",
"if",
"length",
"==",
"0",
":",
"return",
"\"\"",
"return",
"\"%s...\"",
"%",
"s",
"[",
":",
"length",
"]"
] | If the length of the string exceeds the given limit, it will be cut
off and three dots will be appended.
@param s: the string to limit
@type s: string
@param length: maximum length
@type length: non-negative integer
@return: limited string, at most length+3 characters long | [
"If",
"the",
"length",
"of",
"the",
"string",
"exceeds",
"the",
"given",
"limit",
"it",
"will",
"be",
"cut",
"off",
"and",
"three",
"dots",
"will",
"be",
"appended",
"."
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/layers/common_layers.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L4118-L4151 | def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = layers().InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True | [
"def",
"build",
"(",
"self",
",",
"input_shape",
"=",
"None",
")",
":",
"input_shape",
"=",
"tf",
".",
"TensorShape",
"(",
"input_shape",
")",
".",
"as_list",
"(",
")",
"self",
".",
"input_spec",
"=",
"layers",
"(",
")",
".",
"InputSpec",
"(",
"shape",
"=",
"input_shape",
")",
"if",
"not",
"self",
".",
"layer",
".",
"built",
":",
"self",
".",
"layer",
".",
"build",
"(",
"input_shape",
")",
"self",
".",
"layer",
".",
"built",
"=",
"False",
"if",
"not",
"hasattr",
"(",
"self",
".",
"layer",
",",
"\"kernel\"",
")",
":",
"raise",
"ValueError",
"(",
"\"`WeightNorm` must wrap a layer that\"",
"\" contains a `kernel` for weights\"",
")",
"# The kernel's filter or unit dimension is -1",
"self",
".",
"layer_depth",
"=",
"int",
"(",
"self",
".",
"layer",
".",
"kernel",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"self",
".",
"norm_axes",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"layer",
".",
"kernel",
".",
"shape",
".",
"ndims",
"-",
"1",
")",
")",
"self",
".",
"layer",
".",
"v",
"=",
"self",
".",
"layer",
".",
"kernel",
"self",
".",
"layer",
".",
"g",
"=",
"self",
".",
"layer",
".",
"add_variable",
"(",
"name",
"=",
"\"g\"",
",",
"shape",
"=",
"(",
"self",
".",
"layer_depth",
",",
")",
",",
"initializer",
"=",
"tf",
".",
"ones_initializer",
",",
"dtype",
"=",
"self",
".",
"layer",
".",
"kernel",
".",
"dtype",
",",
"trainable",
"=",
"True",
")",
"# with ops.control_dependencies([self.layer.g.assign(",
"# self._init_norm(self.layer.v))]):",
"# self._compute_weights()",
"self",
".",
"_compute_weights",
"(",
")",
"self",
".",
"layer",
".",
"built",
"=",
"True",
"super",
"(",
"WeightNorm",
",",
"self",
")",
".",
"build",
"(",
")",
"self",
".",
"built",
"=",
"True"
] | Build `Layer`. | [
"Build",
"Layer",
"."
] | python | train |
treycucco/bidon | bidon/experimental/transfer_tracker.py | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/experimental/transfer_tracker.py#L54-L82 | def setup(self):
"""Creates the default relations and transfers tables. The SQL used may not work on all
databases. (It was written for SQLite3)
"""
cmds = [
"""
create table if not exists relations (
id integer not null primary key,
name text not null unique,
completed_at datetime
);
""",
"""
create table if not exists transfers (
relation_id integer not null references relations (id) on delete cascade,
old_id text not null,
new_id text,
primary key (relation_id, old_id)
);
""",
"""
create index if not exists transfers_relation_id_idx on transfers (relation_id);
"""
]
for cmd in cmds:
self.data_access.execute(cmd)
self.data_access.commit()
return self | [
"def",
"setup",
"(",
"self",
")",
":",
"cmds",
"=",
"[",
"\"\"\"\n create table if not exists relations (\n id integer not null primary key,\n name text not null unique,\n completed_at datetime\n );\n \"\"\"",
",",
"\"\"\"\n create table if not exists transfers (\n relation_id integer not null references relations (id) on delete cascade,\n old_id text not null,\n new_id text,\n primary key (relation_id, old_id)\n );\n \"\"\"",
",",
"\"\"\"\n create index if not exists transfers_relation_id_idx on transfers (relation_id);\n \"\"\"",
"]",
"for",
"cmd",
"in",
"cmds",
":",
"self",
".",
"data_access",
".",
"execute",
"(",
"cmd",
")",
"self",
".",
"data_access",
".",
"commit",
"(",
")",
"return",
"self"
] | Creates the default relations and transfers tables. The SQL used may not work on all
databases. (It was written for SQLite3) | [
"Creates",
"the",
"default",
"relations",
"and",
"transfers",
"tables",
".",
"The",
"SQL",
"used",
"may",
"not",
"work",
"on",
"all",
"databases",
".",
"(",
"It",
"was",
"written",
"for",
"SQLite3",
")"
] | python | train |
Alveo/pyalveo | pyalveo/pyalveo.py | https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L1317-L1339 | def add_to_item_list_by_name(self, item_urls, item_list_name):
""" Instruct the server to add the given items to the specified
Item List (which will be created if it does not already exist)
:type item_urls: List or ItemGroup
:param item_urls: List of URLs for the items to add,
or an ItemGroup object
:type item_list_name: String
:param item_list_name: name of the item list to retrieve
:rtype: String
:returns: the server success message, if successful
:raises: APIError if the request was not successful
"""
url_name = urlencode((('name', item_list_name),))
request_url = '/item_lists?' + url_name
data = json.dumps({'items': list(item_urls)})
resp = self.api_request(request_url, method='POST', data=data)
return self.__check_success(resp) | [
"def",
"add_to_item_list_by_name",
"(",
"self",
",",
"item_urls",
",",
"item_list_name",
")",
":",
"url_name",
"=",
"urlencode",
"(",
"(",
"(",
"'name'",
",",
"item_list_name",
")",
",",
")",
")",
"request_url",
"=",
"'/item_lists?'",
"+",
"url_name",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'items'",
":",
"list",
"(",
"item_urls",
")",
"}",
")",
"resp",
"=",
"self",
".",
"api_request",
"(",
"request_url",
",",
"method",
"=",
"'POST'",
",",
"data",
"=",
"data",
")",
"return",
"self",
".",
"__check_success",
"(",
"resp",
")"
] | Instruct the server to add the given items to the specified
Item List (which will be created if it does not already exist)
:type item_urls: List or ItemGroup
:param item_urls: List of URLs for the items to add,
or an ItemGroup object
:type item_list_name: String
:param item_list_name: name of the item list to retrieve
:rtype: String
:returns: the server success message, if successful
:raises: APIError if the request was not successful | [
"Instruct",
"the",
"server",
"to",
"add",
"the",
"given",
"items",
"to",
"the",
"specified",
"Item",
"List",
"(",
"which",
"will",
"be",
"created",
"if",
"it",
"does",
"not",
"already",
"exist",
")"
] | python | train |
minhhoit/yacms | yacms/core/views.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/views.py#L224-L231 | def server_error(request, template_name="errors/500.html"):
"""
Mimics Django's error handler but adds ``STATIC_URL`` to the
context.
"""
context = {"STATIC_URL": settings.STATIC_URL}
t = get_template(template_name)
return HttpResponseServerError(t.render(context, request)) | [
"def",
"server_error",
"(",
"request",
",",
"template_name",
"=",
"\"errors/500.html\"",
")",
":",
"context",
"=",
"{",
"\"STATIC_URL\"",
":",
"settings",
".",
"STATIC_URL",
"}",
"t",
"=",
"get_template",
"(",
"template_name",
")",
"return",
"HttpResponseServerError",
"(",
"t",
".",
"render",
"(",
"context",
",",
"request",
")",
")"
] | Mimics Django's error handler but adds ``STATIC_URL`` to the
context. | [
"Mimics",
"Django",
"s",
"error",
"handler",
"but",
"adds",
"STATIC_URL",
"to",
"the",
"context",
"."
] | python | train |
bastibe/SoundFile | soundfile.py | https://github.com/bastibe/SoundFile/blob/161e930da9c9ea76579b6ee18a131e10bca8a605/soundfile.py#L1472-L1480 | def _format_info(format_int, format_flag=_snd.SFC_GET_FORMAT_INFO):
"""Return the ID and short description of a given format."""
format_info = _ffi.new("SF_FORMAT_INFO*")
format_info.format = format_int
_snd.sf_command(_ffi.NULL, format_flag, format_info,
_ffi.sizeof("SF_FORMAT_INFO"))
name = format_info.name
return (_format_str(format_info.format),
_ffi.string(name).decode('utf-8', 'replace') if name else "") | [
"def",
"_format_info",
"(",
"format_int",
",",
"format_flag",
"=",
"_snd",
".",
"SFC_GET_FORMAT_INFO",
")",
":",
"format_info",
"=",
"_ffi",
".",
"new",
"(",
"\"SF_FORMAT_INFO*\"",
")",
"format_info",
".",
"format",
"=",
"format_int",
"_snd",
".",
"sf_command",
"(",
"_ffi",
".",
"NULL",
",",
"format_flag",
",",
"format_info",
",",
"_ffi",
".",
"sizeof",
"(",
"\"SF_FORMAT_INFO\"",
")",
")",
"name",
"=",
"format_info",
".",
"name",
"return",
"(",
"_format_str",
"(",
"format_info",
".",
"format",
")",
",",
"_ffi",
".",
"string",
"(",
"name",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"'replace'",
")",
"if",
"name",
"else",
"\"\"",
")"
] | Return the ID and short description of a given format. | [
"Return",
"the",
"ID",
"and",
"short",
"description",
"of",
"a",
"given",
"format",
"."
] | python | train |
danilobellini/audiolazy | examples/save_and_memoize_synth.py | https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/examples/save_and_memoize_synth.py#L92-L110 | def new_note_track(env, synth):
"""
Audio track with the frequencies.
Parameters
----------
env:
Envelope Stream (which imposes the duration).
synth:
One-argument function that receives a frequency (in rad/sample) and
returns a Stream instance (a synthesized note).
Returns
-------
Endless Stream instance that joins synthesized notes.
"""
list_env = list(env)
return chain.from_iterable(synth(freq) * list_env for freq in freq_gen()) | [
"def",
"new_note_track",
"(",
"env",
",",
"synth",
")",
":",
"list_env",
"=",
"list",
"(",
"env",
")",
"return",
"chain",
".",
"from_iterable",
"(",
"synth",
"(",
"freq",
")",
"*",
"list_env",
"for",
"freq",
"in",
"freq_gen",
"(",
")",
")"
] | Audio track with the frequencies.
Parameters
----------
env:
Envelope Stream (which imposes the duration).
synth:
One-argument function that receives a frequency (in rad/sample) and
returns a Stream instance (a synthesized note).
Returns
-------
Endless Stream instance that joins synthesized notes. | [
"Audio",
"track",
"with",
"the",
"frequencies",
"."
] | python | train |
michael-lazar/rtv | rtv/packages/praw/objects.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/objects.py#L994-L1010 | def get_upvoted(self, *args, **kwargs):
"""Return a listing of the Submissions the user has upvoted.
:returns: get_content generator of Submission items.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
As a default, this listing is only accessible by the user. Thereby
requirering either user/pswd authentication or OAuth authentication
with the 'history' scope. Users may choose to make their voting record
public by changing a user preference. In this case, no authentication
will be needed to access this listing.
"""
kwargs['_use_oauth'] = self.reddit_session.is_oauth_session()
return _get_redditor_listing('upvoted')(self, *args, **kwargs) | [
"def",
"get_upvoted",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_use_oauth'",
"]",
"=",
"self",
".",
"reddit_session",
".",
"is_oauth_session",
"(",
")",
"return",
"_get_redditor_listing",
"(",
"'upvoted'",
")",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return a listing of the Submissions the user has upvoted.
:returns: get_content generator of Submission items.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
As a default, this listing is only accessible by the user. Thereby
requirering either user/pswd authentication or OAuth authentication
with the 'history' scope. Users may choose to make their voting record
public by changing a user preference. In this case, no authentication
will be needed to access this listing. | [
"Return",
"a",
"listing",
"of",
"the",
"Submissions",
"the",
"user",
"has",
"upvoted",
"."
] | python | train |
chinapnr/fishbase | fishbase/fish_data.py | https://github.com/chinapnr/fishbase/blob/23c5147a6bc0d8ed36409e55352ffb2c5b0edc82/fishbase/fish_data.py#L197-L281 | def get_zone_info(cls, area_str, match_type='EXACT', result_type='LIST'):
"""
输入包含省份、城市、地区信息的内容,返回地区编号;
:param:
* area_str: (string) 要查询的区域,省份、城市、地区信息,比如 北京市
* match_type: (string) 查询匹配模式,默认值 'EXACT',表示精确匹配,可选 'FUZZY',表示模糊查询
* result_type: (string) 返回结果数量类型,默认值 'LIST',表示返回列表,可选 'SINGLE_STR',返回结果的第一个地区编号字符串
:returns:
* 返回类型 根据 resule_type 决定返回类型是列表或者单一字符串,列表中包含元组 比如:[('110000', '北京市')],元组中的第一个元素是地区码,
第二个元素是对应的区域内容 结果最多返回 20 个。
举例如下::
from fishbase.fish_data import *
print('--- fish_data get_zone_info demo ---')
result = IdCard.get_zone_info(area_str='北京市')
print(result)
# 模糊查询
result = IdCard.get_zone_info(area_str='西安市', match_type='FUZZY')
print(result)
result0 = []
for i in result:
result0.append(i[0])
print('---西安市---')
print(len(result0))
print(result0)
# 模糊查询, 结果返回设定 single_str
result = IdCard.get_zone_info(area_str='西安市', match_type='FUZZY', result_type='SINGLE_STR')
print(result)
# 模糊查询, 结果返回设定 single_str,西安市 和 西安 的差别
result = IdCard.get_zone_info(area_str='西安', match_type='FUZZY', result_type='SINGLE_STR')
print(result)
print('---')
输出结果::
--- fish_data get_zone_info demo ---
[('110000', '北京市')]
130522198407316471 True
---西安市---
11
['610100', '610101', '610102', '610103', '610104', '610111', '610112', '610113', '610114', '610115',
'610116']
610100
220403
---
"""
values = []
if match_type == 'EXACT':
values = sqlite_query('fish_data.sqlite',
'select zone, areanote from cn_idcard where areanote = :area', {"area": area_str})
if match_type == 'FUZZY':
values = sqlite_query('fish_data.sqlite',
'select zone, areanote from cn_idcard where areanote like :area',
{"area": '%' + area_str + '%'})
# result_type 结果数量判断处理
if result_type == 'LIST':
# 如果返回记录多,大于 20 项,只返回前面 20 个结果
if len(values) > 20:
values = values[0:20]
return values
if result_type == 'SINGLE_STR':
if len(values) == 0:
return ''
if len(values) > 0:
value_str = values[0][0]
return value_str | [
"def",
"get_zone_info",
"(",
"cls",
",",
"area_str",
",",
"match_type",
"=",
"'EXACT'",
",",
"result_type",
"=",
"'LIST'",
")",
":",
"values",
"=",
"[",
"]",
"if",
"match_type",
"==",
"'EXACT'",
":",
"values",
"=",
"sqlite_query",
"(",
"'fish_data.sqlite'",
",",
"'select zone, areanote from cn_idcard where areanote = :area'",
",",
"{",
"\"area\"",
":",
"area_str",
"}",
")",
"if",
"match_type",
"==",
"'FUZZY'",
":",
"values",
"=",
"sqlite_query",
"(",
"'fish_data.sqlite'",
",",
"'select zone, areanote from cn_idcard where areanote like :area'",
",",
"{",
"\"area\"",
":",
"'%'",
"+",
"area_str",
"+",
"'%'",
"}",
")",
"# result_type 结果数量判断处理",
"if",
"result_type",
"==",
"'LIST'",
":",
"# 如果返回记录多,大于 20 项,只返回前面 20 个结果",
"if",
"len",
"(",
"values",
")",
">",
"20",
":",
"values",
"=",
"values",
"[",
"0",
":",
"20",
"]",
"return",
"values",
"if",
"result_type",
"==",
"'SINGLE_STR'",
":",
"if",
"len",
"(",
"values",
")",
"==",
"0",
":",
"return",
"''",
"if",
"len",
"(",
"values",
")",
">",
"0",
":",
"value_str",
"=",
"values",
"[",
"0",
"]",
"[",
"0",
"]",
"return",
"value_str"
] | 输入包含省份、城市、地区信息的内容,返回地区编号;
:param:
* area_str: (string) 要查询的区域,省份、城市、地区信息,比如 北京市
* match_type: (string) 查询匹配模式,默认值 'EXACT',表示精确匹配,可选 'FUZZY',表示模糊查询
* result_type: (string) 返回结果数量类型,默认值 'LIST',表示返回列表,可选 'SINGLE_STR',返回结果的第一个地区编号字符串
:returns:
* 返回类型 根据 resule_type 决定返回类型是列表或者单一字符串,列表中包含元组 比如:[('110000', '北京市')],元组中的第一个元素是地区码,
第二个元素是对应的区域内容 结果最多返回 20 个。
举例如下::
from fishbase.fish_data import *
print('--- fish_data get_zone_info demo ---')
result = IdCard.get_zone_info(area_str='北京市')
print(result)
# 模糊查询
result = IdCard.get_zone_info(area_str='西安市', match_type='FUZZY')
print(result)
result0 = []
for i in result:
result0.append(i[0])
print('---西安市---')
print(len(result0))
print(result0)
# 模糊查询, 结果返回设定 single_str
result = IdCard.get_zone_info(area_str='西安市', match_type='FUZZY', result_type='SINGLE_STR')
print(result)
# 模糊查询, 结果返回设定 single_str,西安市 和 西安 的差别
result = IdCard.get_zone_info(area_str='西安', match_type='FUZZY', result_type='SINGLE_STR')
print(result)
print('---')
输出结果::
--- fish_data get_zone_info demo ---
[('110000', '北京市')]
130522198407316471 True
---西安市---
11
['610100', '610101', '610102', '610103', '610104', '610111', '610112', '610113', '610114', '610115',
'610116']
610100
220403
--- | [
"输入包含省份、城市、地区信息的内容,返回地区编号;"
] | python | train |
pyokagan/pyglreg | glreg.py | https://github.com/pyokagan/pyglreg/blob/68fa5a6c6cee8667879840fbbcc7d30f52852915/glreg.py#L533-L552 | def get_requires(self, api=None, profile=None, support=None):
"""Returns filtered list of Require objects in this registry
:param str api: Return Require objects with this api name or None to
return all Require objects.
:param str profile: Return Require objects with this profile or None
to return all Require objects.
:param str support: Return Require objects with this extension support
string or None to return all Require objects.
:return: list of Require objects
"""
out = []
for ft in self.get_features(api):
out.extend(ft.get_requires(profile))
for ext in self.extensions.values():
# Filter extension support
if support and support not in ext.supported:
continue
out.extend(ext.get_requires(api, profile))
return out | [
"def",
"get_requires",
"(",
"self",
",",
"api",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"support",
"=",
"None",
")",
":",
"out",
"=",
"[",
"]",
"for",
"ft",
"in",
"self",
".",
"get_features",
"(",
"api",
")",
":",
"out",
".",
"extend",
"(",
"ft",
".",
"get_requires",
"(",
"profile",
")",
")",
"for",
"ext",
"in",
"self",
".",
"extensions",
".",
"values",
"(",
")",
":",
"# Filter extension support",
"if",
"support",
"and",
"support",
"not",
"in",
"ext",
".",
"supported",
":",
"continue",
"out",
".",
"extend",
"(",
"ext",
".",
"get_requires",
"(",
"api",
",",
"profile",
")",
")",
"return",
"out"
] | Returns filtered list of Require objects in this registry
:param str api: Return Require objects with this api name or None to
return all Require objects.
:param str profile: Return Require objects with this profile or None
to return all Require objects.
:param str support: Return Require objects with this extension support
string or None to return all Require objects.
:return: list of Require objects | [
"Returns",
"filtered",
"list",
"of",
"Require",
"objects",
"in",
"this",
"registry"
] | python | train |
openid/JWTConnect-Python-CryptoJWT | src/cryptojwt/jwk/rsa.py | https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/jwk/rsa.py#L244-L259 | def cmp_private_numbers(pn1, pn2):
"""
Compare 2 sets of private numbers. This is for comparing 2
private RSA keys.
:param pn1: The set of values belonging to the 1st key
:param pn2: The set of values belonging to the 2nd key
:return: True is the sets are the same otherwise False.
"""
if not cmp_public_numbers(pn1.public_numbers, pn2.public_numbers):
return False
for param in ['d', 'p', 'q']:
if getattr(pn1, param) != getattr(pn2, param):
return False
return True | [
"def",
"cmp_private_numbers",
"(",
"pn1",
",",
"pn2",
")",
":",
"if",
"not",
"cmp_public_numbers",
"(",
"pn1",
".",
"public_numbers",
",",
"pn2",
".",
"public_numbers",
")",
":",
"return",
"False",
"for",
"param",
"in",
"[",
"'d'",
",",
"'p'",
",",
"'q'",
"]",
":",
"if",
"getattr",
"(",
"pn1",
",",
"param",
")",
"!=",
"getattr",
"(",
"pn2",
",",
"param",
")",
":",
"return",
"False",
"return",
"True"
] | Compare 2 sets of private numbers. This is for comparing 2
private RSA keys.
:param pn1: The set of values belonging to the 1st key
:param pn2: The set of values belonging to the 2nd key
:return: True is the sets are the same otherwise False. | [
"Compare",
"2",
"sets",
"of",
"private",
"numbers",
".",
"This",
"is",
"for",
"comparing",
"2",
"private",
"RSA",
"keys",
"."
] | python | train |
opencobra/cobrapy | cobra/manipulation/delete.py | https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/manipulation/delete.py#L36-L56 | def prune_unused_reactions(cobra_model):
"""Remove reactions with no assigned metabolites, returns pruned model
Parameters
----------
cobra_model: class:`~cobra.core.Model.Model` object
the model to remove unused reactions from
Returns
-------
output_model: class:`~cobra.core.Model.Model` object
input model with unused reactions removed
reactions_to_prune: list of class:`~cobra.core.reaction.Reaction`
list of reactions that were removed
"""
output_model = cobra_model.copy()
reactions_to_prune = [r for r in output_model.reactions
if len(r.metabolites) == 0]
output_model.remove_reactions(reactions_to_prune)
return output_model, reactions_to_prune | [
"def",
"prune_unused_reactions",
"(",
"cobra_model",
")",
":",
"output_model",
"=",
"cobra_model",
".",
"copy",
"(",
")",
"reactions_to_prune",
"=",
"[",
"r",
"for",
"r",
"in",
"output_model",
".",
"reactions",
"if",
"len",
"(",
"r",
".",
"metabolites",
")",
"==",
"0",
"]",
"output_model",
".",
"remove_reactions",
"(",
"reactions_to_prune",
")",
"return",
"output_model",
",",
"reactions_to_prune"
] | Remove reactions with no assigned metabolites, returns pruned model
Parameters
----------
cobra_model: class:`~cobra.core.Model.Model` object
the model to remove unused reactions from
Returns
-------
output_model: class:`~cobra.core.Model.Model` object
input model with unused reactions removed
reactions_to_prune: list of class:`~cobra.core.reaction.Reaction`
list of reactions that were removed | [
"Remove",
"reactions",
"with",
"no",
"assigned",
"metabolites",
"returns",
"pruned",
"model"
] | python | valid |
ethereum/lahja | lahja/endpoint.py | https://github.com/ethereum/lahja/blob/e3993c5892232887a11800ed3e66332febcee96b/lahja/endpoint.py#L188-L196 | async def start_serving(self,
connection_config: ConnectionConfig,
loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
"""
Start serving this :class:`~lahja.endpoint.Endpoint` so that it can receive events. Await
until the :class:`~lahja.endpoint.Endpoint` is ready.
"""
self.start_serving_nowait(connection_config, loop)
await self.wait_until_serving() | [
"async",
"def",
"start_serving",
"(",
"self",
",",
"connection_config",
":",
"ConnectionConfig",
",",
"loop",
":",
"Optional",
"[",
"asyncio",
".",
"AbstractEventLoop",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"start_serving_nowait",
"(",
"connection_config",
",",
"loop",
")",
"await",
"self",
".",
"wait_until_serving",
"(",
")"
] | Start serving this :class:`~lahja.endpoint.Endpoint` so that it can receive events. Await
until the :class:`~lahja.endpoint.Endpoint` is ready. | [
"Start",
"serving",
"this",
":",
"class",
":",
"~lahja",
".",
"endpoint",
".",
"Endpoint",
"so",
"that",
"it",
"can",
"receive",
"events",
".",
"Await",
"until",
"the",
":",
"class",
":",
"~lahja",
".",
"endpoint",
".",
"Endpoint",
"is",
"ready",
"."
] | python | train |
UCBerkeleySETI/blimpy | blimpy/calib_utils/fluxcal.py | https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/calib_utils/fluxcal.py#L92-L137 | def integrate_calib(name,chan_per_coarse,fullstokes=False,**kwargs):
'''
Folds Stokes I noise diode data and integrates along coarse channels
Parameters
----------
name : str
Path to noise diode filterbank file
chan_per_coarse : int
Number of frequency bins per coarse channel
fullstokes : boolean
Use fullstokes=True if data is in IQUV format or just Stokes I, use fullstokes=False if
it is in cross_pols format
'''
#Load data
obs = Waterfall(name,max_load=150)
data = obs.data
#If the data has cross_pols format calculate Stokes I
if fullstokes==False and data.shape[1]>1:
data = data[:,0,:]+data[:,1,:]
data = np.expand_dims(data,axis=1)
#If the data has IQUV format get Stokes I
if fullstokes==True:
data = data[:,0,:]
data = np.expand_dims(data,axis=1)
tsamp = obs.header['tsamp']
#Calculate ON and OFF values
OFF,ON = foldcal(data,tsamp,**kwargs)
freqs = obs.populate_freqs()
#Find ON and OFF spectra by coarse channel
ON_int = integrate_chans(ON,freqs,chan_per_coarse)
OFF_int = integrate_chans(OFF,freqs,chan_per_coarse)
#If "ON" is actually "OFF" switch them
if np.sum(ON_int)<np.sum(OFF_int):
temp = ON_int
ON_int = OFF_int
OFF_int = temp
#Return coarse channel spectrum of OFF and ON
return OFF_int,ON_int | [
"def",
"integrate_calib",
"(",
"name",
",",
"chan_per_coarse",
",",
"fullstokes",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"#Load data",
"obs",
"=",
"Waterfall",
"(",
"name",
",",
"max_load",
"=",
"150",
")",
"data",
"=",
"obs",
".",
"data",
"#If the data has cross_pols format calculate Stokes I",
"if",
"fullstokes",
"==",
"False",
"and",
"data",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"data",
"=",
"data",
"[",
":",
",",
"0",
",",
":",
"]",
"+",
"data",
"[",
":",
",",
"1",
",",
":",
"]",
"data",
"=",
"np",
".",
"expand_dims",
"(",
"data",
",",
"axis",
"=",
"1",
")",
"#If the data has IQUV format get Stokes I",
"if",
"fullstokes",
"==",
"True",
":",
"data",
"=",
"data",
"[",
":",
",",
"0",
",",
":",
"]",
"data",
"=",
"np",
".",
"expand_dims",
"(",
"data",
",",
"axis",
"=",
"1",
")",
"tsamp",
"=",
"obs",
".",
"header",
"[",
"'tsamp'",
"]",
"#Calculate ON and OFF values",
"OFF",
",",
"ON",
"=",
"foldcal",
"(",
"data",
",",
"tsamp",
",",
"*",
"*",
"kwargs",
")",
"freqs",
"=",
"obs",
".",
"populate_freqs",
"(",
")",
"#Find ON and OFF spectra by coarse channel",
"ON_int",
"=",
"integrate_chans",
"(",
"ON",
",",
"freqs",
",",
"chan_per_coarse",
")",
"OFF_int",
"=",
"integrate_chans",
"(",
"OFF",
",",
"freqs",
",",
"chan_per_coarse",
")",
"#If \"ON\" is actually \"OFF\" switch them",
"if",
"np",
".",
"sum",
"(",
"ON_int",
")",
"<",
"np",
".",
"sum",
"(",
"OFF_int",
")",
":",
"temp",
"=",
"ON_int",
"ON_int",
"=",
"OFF_int",
"OFF_int",
"=",
"temp",
"#Return coarse channel spectrum of OFF and ON",
"return",
"OFF_int",
",",
"ON_int"
] | Folds Stokes I noise diode data and integrates along coarse channels
Parameters
----------
name : str
Path to noise diode filterbank file
chan_per_coarse : int
Number of frequency bins per coarse channel
fullstokes : boolean
Use fullstokes=True if data is in IQUV format or just Stokes I, use fullstokes=False if
it is in cross_pols format | [
"Folds",
"Stokes",
"I",
"noise",
"diode",
"data",
"and",
"integrates",
"along",
"coarse",
"channels"
] | python | test |
erdc/RAPIDpy | RAPIDpy/postprocess/goodness_of_fit.py | https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/postprocess/goodness_of_fit.py#L21-L32 | def filter_nan(s, o):
"""
this functions removed the data from simulated and observed data
whereever the observed data contains nan
this is used by all other functions, otherwise they will produce nan as
output
"""
data = np.array([s.flatten(), o.flatten()])
data = np.transpose(data)
data = data[~np.isnan(data).any(1)]
return data[:, 0], data[:, 1] | [
"def",
"filter_nan",
"(",
"s",
",",
"o",
")",
":",
"data",
"=",
"np",
".",
"array",
"(",
"[",
"s",
".",
"flatten",
"(",
")",
",",
"o",
".",
"flatten",
"(",
")",
"]",
")",
"data",
"=",
"np",
".",
"transpose",
"(",
"data",
")",
"data",
"=",
"data",
"[",
"~",
"np",
".",
"isnan",
"(",
"data",
")",
".",
"any",
"(",
"1",
")",
"]",
"return",
"data",
"[",
":",
",",
"0",
"]",
",",
"data",
"[",
":",
",",
"1",
"]"
] | this functions removed the data from simulated and observed data
whereever the observed data contains nan
this is used by all other functions, otherwise they will produce nan as
output | [
"this",
"functions",
"removed",
"the",
"data",
"from",
"simulated",
"and",
"observed",
"data",
"whereever",
"the",
"observed",
"data",
"contains",
"nan"
] | python | train |
wbond/oscrypto | oscrypto/_openssl/_libcrypto.py | https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_openssl/_libcrypto.py#L57-L85 | def handle_openssl_error(result, exception_class=None):
"""
Checks if an error occured, and if so throws an OSError containing the
last OpenSSL error message
:param result:
An integer result code - 1 or greater indicates success
:param exception_class:
The exception class to use for the exception if an error occurred
:raises:
OSError - when an OpenSSL error occurs
"""
if result > 0:
return
if exception_class is None:
exception_class = OSError
error_num = libcrypto.ERR_get_error()
buffer = buffer_from_bytes(120)
libcrypto.ERR_error_string(error_num, buffer)
# Since we are dealing with a string, it is NULL terminated
error_string = byte_string_from_buffer(buffer)
raise exception_class(_try_decode(error_string)) | [
"def",
"handle_openssl_error",
"(",
"result",
",",
"exception_class",
"=",
"None",
")",
":",
"if",
"result",
">",
"0",
":",
"return",
"if",
"exception_class",
"is",
"None",
":",
"exception_class",
"=",
"OSError",
"error_num",
"=",
"libcrypto",
".",
"ERR_get_error",
"(",
")",
"buffer",
"=",
"buffer_from_bytes",
"(",
"120",
")",
"libcrypto",
".",
"ERR_error_string",
"(",
"error_num",
",",
"buffer",
")",
"# Since we are dealing with a string, it is NULL terminated",
"error_string",
"=",
"byte_string_from_buffer",
"(",
"buffer",
")",
"raise",
"exception_class",
"(",
"_try_decode",
"(",
"error_string",
")",
")"
] | Checks if an error occured, and if so throws an OSError containing the
last OpenSSL error message
:param result:
An integer result code - 1 or greater indicates success
:param exception_class:
The exception class to use for the exception if an error occurred
:raises:
OSError - when an OpenSSL error occurs | [
"Checks",
"if",
"an",
"error",
"occured",
"and",
"if",
"so",
"throws",
"an",
"OSError",
"containing",
"the",
"last",
"OpenSSL",
"error",
"message"
] | python | valid |
rhgrant10/Groupy | groupy/utils.py | https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/utils.py#L89-L104 | def find(self, objects):
"""Find exactly one match in the list of objects.
:param objects: objects to filter
:type objects: :class:`list`
:return: the one matching object
:raises groupy.exceptions.NoMatchesError: if no objects match
:raises groupy.exceptions.MultipleMatchesError: if multiple objects match
"""
matches = list(self.__call__(objects))
if not matches:
raise exceptions.NoMatchesError(objects, self.tests)
elif len(matches) > 1:
raise exceptions.MultipleMatchesError(objects, self.tests,
matches=matches)
return matches[0] | [
"def",
"find",
"(",
"self",
",",
"objects",
")",
":",
"matches",
"=",
"list",
"(",
"self",
".",
"__call__",
"(",
"objects",
")",
")",
"if",
"not",
"matches",
":",
"raise",
"exceptions",
".",
"NoMatchesError",
"(",
"objects",
",",
"self",
".",
"tests",
")",
"elif",
"len",
"(",
"matches",
")",
">",
"1",
":",
"raise",
"exceptions",
".",
"MultipleMatchesError",
"(",
"objects",
",",
"self",
".",
"tests",
",",
"matches",
"=",
"matches",
")",
"return",
"matches",
"[",
"0",
"]"
] | Find exactly one match in the list of objects.
:param objects: objects to filter
:type objects: :class:`list`
:return: the one matching object
:raises groupy.exceptions.NoMatchesError: if no objects match
:raises groupy.exceptions.MultipleMatchesError: if multiple objects match | [
"Find",
"exactly",
"one",
"match",
"in",
"the",
"list",
"of",
"objects",
"."
] | python | train |
broadinstitute/fiss | firecloud/api.py | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/api.py#L918-L935 | def update_repository_config_acl(namespace, config, snapshot_id, acl_updates):
"""Set configuration permissions.
The configuration should exist in the methods repository.
Args:
namespace (str): Configuration namespace
config (str): Configuration name
snapshot_id (int): snapshot_id of the method
acl_updates (list(dict)): List of access control updates
Swagger:
https://api.firecloud.org/#!/Method_Repository/setConfigACL
"""
uri = "configurations/{0}/{1}/{2}/permissions".format(namespace,
config, snapshot_id)
return __post(uri, json=acl_updates) | [
"def",
"update_repository_config_acl",
"(",
"namespace",
",",
"config",
",",
"snapshot_id",
",",
"acl_updates",
")",
":",
"uri",
"=",
"\"configurations/{0}/{1}/{2}/permissions\"",
".",
"format",
"(",
"namespace",
",",
"config",
",",
"snapshot_id",
")",
"return",
"__post",
"(",
"uri",
",",
"json",
"=",
"acl_updates",
")"
] | Set configuration permissions.
The configuration should exist in the methods repository.
Args:
namespace (str): Configuration namespace
config (str): Configuration name
snapshot_id (int): snapshot_id of the method
acl_updates (list(dict)): List of access control updates
Swagger:
https://api.firecloud.org/#!/Method_Repository/setConfigACL | [
"Set",
"configuration",
"permissions",
"."
] | python | train |
saltstack/salt | salt/states/logadm.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/logadm.py#L117-L168 | def remove(name, log_file=None):
'''
Remove a log from the logadm configuration
name : string
entryname
log_file : string
(optional) log file path
.. note::
If log_file is specified it will be used instead of the entry name.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# retrieve all log configuration
config = __salt__['logadm.list_conf']()
# figure out log_file and name
if not log_file:
if name.startswith('/'):
log_file = name
name = None
else:
for log in config:
if 'entryname' in config[log] and config[log]['entryname'] == name:
log_file = config[log]['log_file']
break
if not name:
for log in config:
if 'log_file' in config[log] and config[log]['log_file'] == log_file:
if 'entryname' in config[log]:
name = config[log]['entryname']
break
# remove log if needed
if log_file in config:
res = __salt__['logadm.remove'](name if name else log_file)
ret['result'] = 'Error' not in res
if ret['result']:
ret['comment'] = 'Configuration for {} removed.'.format(log_file)
ret['changes'][log_file] = None
else:
ret['comment'] = res['Error']
else:
ret['result'] = True
ret['comment'] = 'No configuration for {} present.'.format(log_file)
return ret | [
"def",
"remove",
"(",
"name",
",",
"log_file",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"# retrieve all log configuration",
"config",
"=",
"__salt__",
"[",
"'logadm.list_conf'",
"]",
"(",
")",
"# figure out log_file and name",
"if",
"not",
"log_file",
":",
"if",
"name",
".",
"startswith",
"(",
"'/'",
")",
":",
"log_file",
"=",
"name",
"name",
"=",
"None",
"else",
":",
"for",
"log",
"in",
"config",
":",
"if",
"'entryname'",
"in",
"config",
"[",
"log",
"]",
"and",
"config",
"[",
"log",
"]",
"[",
"'entryname'",
"]",
"==",
"name",
":",
"log_file",
"=",
"config",
"[",
"log",
"]",
"[",
"'log_file'",
"]",
"break",
"if",
"not",
"name",
":",
"for",
"log",
"in",
"config",
":",
"if",
"'log_file'",
"in",
"config",
"[",
"log",
"]",
"and",
"config",
"[",
"log",
"]",
"[",
"'log_file'",
"]",
"==",
"log_file",
":",
"if",
"'entryname'",
"in",
"config",
"[",
"log",
"]",
":",
"name",
"=",
"config",
"[",
"log",
"]",
"[",
"'entryname'",
"]",
"break",
"# remove log if needed",
"if",
"log_file",
"in",
"config",
":",
"res",
"=",
"__salt__",
"[",
"'logadm.remove'",
"]",
"(",
"name",
"if",
"name",
"else",
"log_file",
")",
"ret",
"[",
"'result'",
"]",
"=",
"'Error'",
"not",
"in",
"res",
"if",
"ret",
"[",
"'result'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Configuration for {} removed.'",
".",
"format",
"(",
"log_file",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"log_file",
"]",
"=",
"None",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"res",
"[",
"'Error'",
"]",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'No configuration for {} present.'",
".",
"format",
"(",
"log_file",
")",
"return",
"ret"
] | Remove a log from the logadm configuration
name : string
entryname
log_file : string
(optional) log file path
.. note::
If log_file is specified it will be used instead of the entry name. | [
"Remove",
"a",
"log",
"from",
"the",
"logadm",
"configuration"
] | python | train |
Duke-GCB/DukeDSClient | ddsc/versioncheck.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/versioncheck.py#L18-L31 | def get_pypi_version():
"""
Returns the version info from pypi for this app.
"""
try:
response = requests.get(PYPI_URL, timeout=HALF_SECOND_TIMEOUT)
response.raise_for_status()
data = response.json()
version_str = data["info"]["version"]
return _parse_version_str(version_str)
except requests.exceptions.ConnectionError:
raise VersionException(UNABLE_TO_ACCESS_PYPI + " Failed to connect.")
except requests.exceptions.Timeout:
raise VersionException(UNABLE_TO_ACCESS_PYPI + " Timeout") | [
"def",
"get_pypi_version",
"(",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"PYPI_URL",
",",
"timeout",
"=",
"HALF_SECOND_TIMEOUT",
")",
"response",
".",
"raise_for_status",
"(",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"version_str",
"=",
"data",
"[",
"\"info\"",
"]",
"[",
"\"version\"",
"]",
"return",
"_parse_version_str",
"(",
"version_str",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
":",
"raise",
"VersionException",
"(",
"UNABLE_TO_ACCESS_PYPI",
"+",
"\" Failed to connect.\"",
")",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
":",
"raise",
"VersionException",
"(",
"UNABLE_TO_ACCESS_PYPI",
"+",
"\" Timeout\"",
")"
] | Returns the version info from pypi for this app. | [
"Returns",
"the",
"version",
"info",
"from",
"pypi",
"for",
"this",
"app",
"."
] | python | train |
learningequality/ricecooker | ricecooker/utils/jsontrees.py | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/jsontrees.py#L48-L56 | def read_tree_from_json(srcpath):
"""
Load ricecooker json tree data from json file at `srcpath`.
"""
with open(srcpath) as infile:
json_tree = json.load(infile)
if json_tree is None:
raise ValueError('Could not find ricecooker json tree')
return json_tree | [
"def",
"read_tree_from_json",
"(",
"srcpath",
")",
":",
"with",
"open",
"(",
"srcpath",
")",
"as",
"infile",
":",
"json_tree",
"=",
"json",
".",
"load",
"(",
"infile",
")",
"if",
"json_tree",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Could not find ricecooker json tree'",
")",
"return",
"json_tree"
] | Load ricecooker json tree data from json file at `srcpath`. | [
"Load",
"ricecooker",
"json",
"tree",
"data",
"from",
"json",
"file",
"at",
"srcpath",
"."
] | python | train |
vtkiorg/vtki | vtki/common.py | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L907-L920 | def update(self, data):
"""
Update this dictionary with th key-value pairs from a given
dictionary
"""
if not isinstance(data, dict):
raise TypeError('Data to update must be in a dictionary.')
for k, v in data.items():
arr = np.array(v)
try:
self[k] = arr
except TypeError:
logging.warning("Values under key ({}) not supported by VTK".format(k))
return | [
"def",
"update",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'Data to update must be in a dictionary.'",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"arr",
"=",
"np",
".",
"array",
"(",
"v",
")",
"try",
":",
"self",
"[",
"k",
"]",
"=",
"arr",
"except",
"TypeError",
":",
"logging",
".",
"warning",
"(",
"\"Values under key ({}) not supported by VTK\"",
".",
"format",
"(",
"k",
")",
")",
"return"
] | Update this dictionary with th key-value pairs from a given
dictionary | [
"Update",
"this",
"dictionary",
"with",
"th",
"key",
"-",
"value",
"pairs",
"from",
"a",
"given",
"dictionary"
] | python | train |
RJT1990/pyflux | pyflux/ssm/dynlin.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/dynlin.py#L115-L138 | def _ss_matrices(self, beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q, H : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(self.z_no-1)
H = np.identity(1)*self.latent_variables.z_list[0].prior.transform(beta[0])
Z = self.X
R = np.identity(self.z_no-1)
Q = np.identity(self.z_no-1)
for i in range(0,self.z_no-1):
Q[i][i] = self.latent_variables.z_list[i+1].prior.transform(beta[i+1])
return T, Z, R, Q, H | [
"def",
"_ss_matrices",
"(",
"self",
",",
"beta",
")",
":",
"T",
"=",
"np",
".",
"identity",
"(",
"self",
".",
"z_no",
"-",
"1",
")",
"H",
"=",
"np",
".",
"identity",
"(",
"1",
")",
"*",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"beta",
"[",
"0",
"]",
")",
"Z",
"=",
"self",
".",
"X",
"R",
"=",
"np",
".",
"identity",
"(",
"self",
".",
"z_no",
"-",
"1",
")",
"Q",
"=",
"np",
".",
"identity",
"(",
"self",
".",
"z_no",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"z_no",
"-",
"1",
")",
":",
"Q",
"[",
"i",
"]",
"[",
"i",
"]",
"=",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"i",
"+",
"1",
"]",
".",
"prior",
".",
"transform",
"(",
"beta",
"[",
"i",
"+",
"1",
"]",
")",
"return",
"T",
",",
"Z",
",",
"R",
",",
"Q",
",",
"H"
] | Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q, H : np.array
State space matrices used in KFS algorithm | [
"Creates",
"the",
"state",
"space",
"matrices",
"required"
] | python | train |
apache/incubator-mxnet | python/mxnet/recordio.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/recordio.py#L358-L391 | def pack(header, s):
"""Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s)
"""
header = IRHeader(*header)
if isinstance(header.label, numbers.Number):
header = header._replace(flag=0)
else:
label = np.asarray(header.label, dtype=np.float32)
header = header._replace(flag=label.size, label=0)
s = label.tostring() + s
s = struct.pack(_IR_FORMAT, *header) + s
return s | [
"def",
"pack",
"(",
"header",
",",
"s",
")",
":",
"header",
"=",
"IRHeader",
"(",
"*",
"header",
")",
"if",
"isinstance",
"(",
"header",
".",
"label",
",",
"numbers",
".",
"Number",
")",
":",
"header",
"=",
"header",
".",
"_replace",
"(",
"flag",
"=",
"0",
")",
"else",
":",
"label",
"=",
"np",
".",
"asarray",
"(",
"header",
".",
"label",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"header",
"=",
"header",
".",
"_replace",
"(",
"flag",
"=",
"label",
".",
"size",
",",
"label",
"=",
"0",
")",
"s",
"=",
"label",
".",
"tostring",
"(",
")",
"+",
"s",
"s",
"=",
"struct",
".",
"pack",
"(",
"_IR_FORMAT",
",",
"*",
"header",
")",
"+",
"s",
"return",
"s"
] | Pack a string into MXImageRecord.
Parameters
----------
header : IRHeader
Header of the image record.
``header.label`` can be a number or an array. See more detail in ``IRHeader``.
s : str
Raw image string to be packed.
Returns
-------
s : str
The packed string.
Examples
--------
>>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
>>> id = 2574
>>> header = mx.recordio.IRHeader(0, label, id, 0)
>>> with open(path, 'r') as file:
... s = file.read()
>>> packed_s = mx.recordio.pack(header, s) | [
"Pack",
"a",
"string",
"into",
"MXImageRecord",
"."
] | python | train |
saltstack/salt | salt/states/nexus.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/nexus.py#L27-L110 | def downloaded(name, artifact, target_dir='/tmp', target_file=None):
'''
Ensures that the artifact from nexus exists at given location. If it doesn't exist, then
it will be downloaded. If it already exists then the checksum of existing file is checked
against checksum in nexus. If it is different then the step will fail.
artifact
Details of the artifact to be downloaded from nexus. Various options are:
- nexus_url: URL of the nexus instance
- repository: Repository in nexus
- artifact_id: Artifact ID
- group_id: Group ID
- packaging: Packaging
- classifier: Classifier
- version: Version
One of the following:
- Version to download
- ``latest`` - Download the latest release of this artifact
- ``latest_snapshot`` - Download the latest snapshot for this artifact
- username: nexus username
- password: nexus password
target_dir
Directory where the artifact should be downloaded. By default it is downloaded to /tmp directory.
target_file
Target file to download artifact to. By default file name is resolved by nexus.
An example to download an artifact to a specific file:
.. code-block:: yaml
jboss_module_downloaded:
nexus.downloaded:
- artifact:
nexus_url: http://nexus.intranet.example.com/repository
repository: 'libs-release-local'
artifact_id: 'module'
group_id: 'com.company.module'
packaging: 'jar'
classifier: 'sources'
version: '1.0'
- target_file: /opt/jboss7/modules/com/company/lib/module.jar
Download artifact to the folder (automatically resolves file name):
.. code-block:: yaml
maven_artifact_downloaded:
nexus.downloaded:
- artifact:
nexus_url: http://nexus.intranet.example.com/repository
repository: 'maven-releases'
artifact_id: 'module'
group_id: 'com.company.module'
packaging: 'zip'
classifier: 'dist'
version: '1.0'
- target_dir: /opt/maven/modules/com/company/release
'''
log.debug(" ======================== STATE: nexus.downloaded (name: %s) ", name)
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
try:
fetch_result = __fetch_from_nexus(artifact, target_dir, target_file)
except Exception as exc:
ret['result'] = False
ret['comment'] = six.text_type(exc)
return ret
log.debug("fetch_result=%s", fetch_result)
ret['result'] = fetch_result['status']
ret['comment'] = fetch_result['comment']
ret['changes'] = fetch_result['changes']
log.debug("ret=%s", ret)
return ret | [
"def",
"downloaded",
"(",
"name",
",",
"artifact",
",",
"target_dir",
"=",
"'/tmp'",
",",
"target_file",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\" ======================== STATE: nexus.downloaded (name: %s) \"",
",",
"name",
")",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"''",
"}",
"try",
":",
"fetch_result",
"=",
"__fetch_from_nexus",
"(",
"artifact",
",",
"target_dir",
",",
"target_file",
")",
"except",
"Exception",
"as",
"exc",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"six",
".",
"text_type",
"(",
"exc",
")",
"return",
"ret",
"log",
".",
"debug",
"(",
"\"fetch_result=%s\"",
",",
"fetch_result",
")",
"ret",
"[",
"'result'",
"]",
"=",
"fetch_result",
"[",
"'status'",
"]",
"ret",
"[",
"'comment'",
"]",
"=",
"fetch_result",
"[",
"'comment'",
"]",
"ret",
"[",
"'changes'",
"]",
"=",
"fetch_result",
"[",
"'changes'",
"]",
"log",
".",
"debug",
"(",
"\"ret=%s\"",
",",
"ret",
")",
"return",
"ret"
] | Ensures that the artifact from nexus exists at given location. If it doesn't exist, then
it will be downloaded. If it already exists then the checksum of existing file is checked
against checksum in nexus. If it is different then the step will fail.
artifact
Details of the artifact to be downloaded from nexus. Various options are:
- nexus_url: URL of the nexus instance
- repository: Repository in nexus
- artifact_id: Artifact ID
- group_id: Group ID
- packaging: Packaging
- classifier: Classifier
- version: Version
One of the following:
- Version to download
- ``latest`` - Download the latest release of this artifact
- ``latest_snapshot`` - Download the latest snapshot for this artifact
- username: nexus username
- password: nexus password
target_dir
Directory where the artifact should be downloaded. By default it is downloaded to /tmp directory.
target_file
Target file to download artifact to. By default file name is resolved by nexus.
An example to download an artifact to a specific file:
.. code-block:: yaml
jboss_module_downloaded:
nexus.downloaded:
- artifact:
nexus_url: http://nexus.intranet.example.com/repository
repository: 'libs-release-local'
artifact_id: 'module'
group_id: 'com.company.module'
packaging: 'jar'
classifier: 'sources'
version: '1.0'
- target_file: /opt/jboss7/modules/com/company/lib/module.jar
Download artifact to the folder (automatically resolves file name):
.. code-block:: yaml
maven_artifact_downloaded:
nexus.downloaded:
- artifact:
nexus_url: http://nexus.intranet.example.com/repository
repository: 'maven-releases'
artifact_id: 'module'
group_id: 'com.company.module'
packaging: 'zip'
classifier: 'dist'
version: '1.0'
- target_dir: /opt/maven/modules/com/company/release | [
"Ensures",
"that",
"the",
"artifact",
"from",
"nexus",
"exists",
"at",
"given",
"location",
".",
"If",
"it",
"doesn",
"t",
"exist",
"then",
"it",
"will",
"be",
"downloaded",
".",
"If",
"it",
"already",
"exists",
"then",
"the",
"checksum",
"of",
"existing",
"file",
"is",
"checked",
"against",
"checksum",
"in",
"nexus",
".",
"If",
"it",
"is",
"different",
"then",
"the",
"step",
"will",
"fail",
"."
] | python | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/consumers.py | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/consumers.py#L104-L123 | async def handle_exception(self, exc: Exception, action: str, request_id):
"""
Handle any exception that occurs, by sending an appropriate message
"""
if isinstance(exc, APIException):
await self.reply(
action=action,
errors=self._format_errors(exc.detail),
status=exc.status_code,
request_id=request_id
)
elif exc == Http404 or isinstance(exc, Http404):
await self.reply(
action=action,
errors=self._format_errors('Not found'),
status=404,
request_id=request_id
)
else:
raise exc | [
"async",
"def",
"handle_exception",
"(",
"self",
",",
"exc",
":",
"Exception",
",",
"action",
":",
"str",
",",
"request_id",
")",
":",
"if",
"isinstance",
"(",
"exc",
",",
"APIException",
")",
":",
"await",
"self",
".",
"reply",
"(",
"action",
"=",
"action",
",",
"errors",
"=",
"self",
".",
"_format_errors",
"(",
"exc",
".",
"detail",
")",
",",
"status",
"=",
"exc",
".",
"status_code",
",",
"request_id",
"=",
"request_id",
")",
"elif",
"exc",
"==",
"Http404",
"or",
"isinstance",
"(",
"exc",
",",
"Http404",
")",
":",
"await",
"self",
".",
"reply",
"(",
"action",
"=",
"action",
",",
"errors",
"=",
"self",
".",
"_format_errors",
"(",
"'Not found'",
")",
",",
"status",
"=",
"404",
",",
"request_id",
"=",
"request_id",
")",
"else",
":",
"raise",
"exc"
] | Handle any exception that occurs, by sending an appropriate message | [
"Handle",
"any",
"exception",
"that",
"occurs",
"by",
"sending",
"an",
"appropriate",
"message"
] | python | train |
dschreij/python-mediadecoder | mediadecoder/decoder.py | https://github.com/dschreij/python-mediadecoder/blob/f01b02d790f2abc52d9792e43076cf4cb7d3ce51/mediadecoder/decoder.py#L345-L357 | def __calculate_audio_frames(self):
""" Aligns audio with video.
This should be called for instance after a seeking operation or resuming
from a pause. """
if self.audioformat is None:
return
start_frame = self.clock.current_frame
totalsize = int(self.clip.audio.fps*self.clip.audio.duration)
self.audio_times = list(range(0, totalsize,
self.audioformat['buffersize'])) + [totalsize]
# Remove audio segments up to the starting frame
del(self.audio_times[0:start_frame]) | [
"def",
"__calculate_audio_frames",
"(",
"self",
")",
":",
"if",
"self",
".",
"audioformat",
"is",
"None",
":",
"return",
"start_frame",
"=",
"self",
".",
"clock",
".",
"current_frame",
"totalsize",
"=",
"int",
"(",
"self",
".",
"clip",
".",
"audio",
".",
"fps",
"*",
"self",
".",
"clip",
".",
"audio",
".",
"duration",
")",
"self",
".",
"audio_times",
"=",
"list",
"(",
"range",
"(",
"0",
",",
"totalsize",
",",
"self",
".",
"audioformat",
"[",
"'buffersize'",
"]",
")",
")",
"+",
"[",
"totalsize",
"]",
"# Remove audio segments up to the starting frame",
"del",
"(",
"self",
".",
"audio_times",
"[",
"0",
":",
"start_frame",
"]",
")"
] | Aligns audio with video.
This should be called for instance after a seeking operation or resuming
from a pause. | [
"Aligns",
"audio",
"with",
"video",
".",
"This",
"should",
"be",
"called",
"for",
"instance",
"after",
"a",
"seeking",
"operation",
"or",
"resuming",
"from",
"a",
"pause",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.