repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
barrust/mediawiki | mediawiki/mediawikipage.py | https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L552-L578 | def _handle_redirect(self, redirect, preload, query, page):
""" handle redirect """
if redirect:
redirects = query["redirects"][0]
if "normalized" in query:
normalized = query["normalized"][0]
if normalized["from"] != self.title:
raise MediaWikiException(ODD_ERROR_MESSAGE)
from_title = normalized["to"]
else:
if not getattr(self, "title", None):
self.title = redirects["from"]
delattr(self, "pageid")
from_title = self.title
if redirects["from"] != from_title:
raise MediaWikiException(ODD_ERROR_MESSAGE)
# change the title and reload the whole object
self.__init__(
self.mediawiki,
title=redirects["to"],
redirect=redirect,
preload=preload,
)
else:
raise RedirectError(getattr(self, "title", page["title"])) | [
"def",
"_handle_redirect",
"(",
"self",
",",
"redirect",
",",
"preload",
",",
"query",
",",
"page",
")",
":",
"if",
"redirect",
":",
"redirects",
"=",
"query",
"[",
"\"redirects\"",
"]",
"[",
"0",
"]",
"if",
"\"normalized\"",
"in",
"query",
":",
"normalized",
"=",
"query",
"[",
"\"normalized\"",
"]",
"[",
"0",
"]",
"if",
"normalized",
"[",
"\"from\"",
"]",
"!=",
"self",
".",
"title",
":",
"raise",
"MediaWikiException",
"(",
"ODD_ERROR_MESSAGE",
")",
"from_title",
"=",
"normalized",
"[",
"\"to\"",
"]",
"else",
":",
"if",
"not",
"getattr",
"(",
"self",
",",
"\"title\"",
",",
"None",
")",
":",
"self",
".",
"title",
"=",
"redirects",
"[",
"\"from\"",
"]",
"delattr",
"(",
"self",
",",
"\"pageid\"",
")",
"from_title",
"=",
"self",
".",
"title",
"if",
"redirects",
"[",
"\"from\"",
"]",
"!=",
"from_title",
":",
"raise",
"MediaWikiException",
"(",
"ODD_ERROR_MESSAGE",
")",
"# change the title and reload the whole object",
"self",
".",
"__init__",
"(",
"self",
".",
"mediawiki",
",",
"title",
"=",
"redirects",
"[",
"\"to\"",
"]",
",",
"redirect",
"=",
"redirect",
",",
"preload",
"=",
"preload",
",",
")",
"else",
":",
"raise",
"RedirectError",
"(",
"getattr",
"(",
"self",
",",
"\"title\"",
",",
"page",
"[",
"\"title\"",
"]",
")",
")"
] | handle redirect | [
"handle",
"redirect"
] | python | train |
productml/blurr | blurr/core/base.py | https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/base.py#L147-L154 | def _needs_evaluation(self) -> bool:
"""
Returns True when:
1. Where clause is not specified
2. Where WHERE clause is specified and it evaluates to True
Returns false if a where clause is specified and it evaluates to False
"""
return self._schema.when is None or self._schema.when.evaluate(self._evaluation_context) | [
"def",
"_needs_evaluation",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_schema",
".",
"when",
"is",
"None",
"or",
"self",
".",
"_schema",
".",
"when",
".",
"evaluate",
"(",
"self",
".",
"_evaluation_context",
")"
] | Returns True when:
1. Where clause is not specified
2. Where WHERE clause is specified and it evaluates to True
Returns false if a where clause is specified and it evaluates to False | [
"Returns",
"True",
"when",
":",
"1",
".",
"Where",
"clause",
"is",
"not",
"specified",
"2",
".",
"Where",
"WHERE",
"clause",
"is",
"specified",
"and",
"it",
"evaluates",
"to",
"True",
"Returns",
"false",
"if",
"a",
"where",
"clause",
"is",
"specified",
"and",
"it",
"evaluates",
"to",
"False"
] | python | train |
apache/incubator-mxnet | tools/diagnose.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/diagnose.py#L33-L48 | def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Diagnose script for checking the current system.')
choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network']
for choice in choices:
parser.add_argument('--' + choice, default=1, type=int,
help='Diagnose {}.'.format(choice))
parser.add_argument('--region', default='', type=str,
help="Additional sites in which region(s) to test. \
Specify 'cn' for example to test mirror sites in China.")
parser.add_argument('--timeout', default=10, type=int,
help="Connection test timeout threshold, 0 to disable.")
args = parser.parse_args()
return args | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
"description",
"=",
"'Diagnose script for checking the current system.'",
")",
"choices",
"=",
"[",
"'python'",
",",
"'pip'",
",",
"'mxnet'",
",",
"'os'",
",",
"'hardware'",
",",
"'network'",
"]",
"for",
"choice",
"in",
"choices",
":",
"parser",
".",
"add_argument",
"(",
"'--'",
"+",
"choice",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'Diagnose {}.'",
".",
"format",
"(",
"choice",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--region'",
",",
"default",
"=",
"''",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Additional sites in which region(s) to test. \\\n Specify 'cn' for example to test mirror sites in China.\"",
")",
"parser",
".",
"add_argument",
"(",
"'--timeout'",
",",
"default",
"=",
"10",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Connection test timeout threshold, 0 to disable.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] | Parse arguments. | [
"Parse",
"arguments",
"."
] | python | train |
SKA-ScienceDataProcessor/integration-prototype | sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/_config_db_redis.py#L152-L177 | def _load_dict_hierarchical(self, db_key: str) -> dict:
"""Load a dictionary stored hierarchically at db_key."""
db_keys = self._db.keys(pattern=db_key + '*')
my_dict = {}
for _db_key in db_keys:
if self._db.type(_db_key) == 'list':
db_values = self._db.lrange(_db_key, 0, -1)
for i, value in enumerate(db_values):
try:
db_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
else: # self._db.type == 'hash'
db_values = self._db.hgetall(_db_key)
for _key, _value in db_values.items():
try:
db_values[_key] = ast.literal_eval(_value)
except SyntaxError:
pass
except ValueError:
pass
my_dict = self._build_dict(my_dict, _db_key.split(':'),
db_values)
return my_dict[db_key] | [
"def",
"_load_dict_hierarchical",
"(",
"self",
",",
"db_key",
":",
"str",
")",
"->",
"dict",
":",
"db_keys",
"=",
"self",
".",
"_db",
".",
"keys",
"(",
"pattern",
"=",
"db_key",
"+",
"'*'",
")",
"my_dict",
"=",
"{",
"}",
"for",
"_db_key",
"in",
"db_keys",
":",
"if",
"self",
".",
"_db",
".",
"type",
"(",
"_db_key",
")",
"==",
"'list'",
":",
"db_values",
"=",
"self",
".",
"_db",
".",
"lrange",
"(",
"_db_key",
",",
"0",
",",
"-",
"1",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"db_values",
")",
":",
"try",
":",
"db_values",
"[",
"i",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"value",
")",
"except",
"SyntaxError",
":",
"pass",
"except",
"ValueError",
":",
"pass",
"else",
":",
"# self._db.type == 'hash'",
"db_values",
"=",
"self",
".",
"_db",
".",
"hgetall",
"(",
"_db_key",
")",
"for",
"_key",
",",
"_value",
"in",
"db_values",
".",
"items",
"(",
")",
":",
"try",
":",
"db_values",
"[",
"_key",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"_value",
")",
"except",
"SyntaxError",
":",
"pass",
"except",
"ValueError",
":",
"pass",
"my_dict",
"=",
"self",
".",
"_build_dict",
"(",
"my_dict",
",",
"_db_key",
".",
"split",
"(",
"':'",
")",
",",
"db_values",
")",
"return",
"my_dict",
"[",
"db_key",
"]"
] | Load a dictionary stored hierarchically at db_key. | [
"Load",
"a",
"dictionary",
"stored",
"hierarchically",
"at",
"db_key",
"."
] | python | train |
pantsbuild/pex | pex/package.py | https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/package.py#L107-L125 | def split_fragment(cls, fragment):
"""A heuristic used to split a string into version name/fragment:
>>> SourcePackage.split_fragment('pysolr-2.1.0-beta')
('pysolr', '2.1.0-beta')
>>> SourcePackage.split_fragment('cElementTree-1.0.5-20051216')
('cElementTree', '1.0.5-20051216')
>>> SourcePackage.split_fragment('pil-1.1.7b1-20090412')
('pil', '1.1.7b1-20090412')
>>> SourcePackage.split_fragment('django-plugin-2-2.3')
('django-plugin-2', '2.3')
"""
def likely_version_component(enumerated_fragment):
return sum(bool(v and v[0].isdigit()) for v in enumerated_fragment[1].split('.'))
fragments = fragment.split('-')
if len(fragments) == 1:
return fragment, ''
max_index, _ = max(enumerate(fragments), key=likely_version_component)
return '-'.join(fragments[0:max_index]), '-'.join(fragments[max_index:]) | [
"def",
"split_fragment",
"(",
"cls",
",",
"fragment",
")",
":",
"def",
"likely_version_component",
"(",
"enumerated_fragment",
")",
":",
"return",
"sum",
"(",
"bool",
"(",
"v",
"and",
"v",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
")",
"for",
"v",
"in",
"enumerated_fragment",
"[",
"1",
"]",
".",
"split",
"(",
"'.'",
")",
")",
"fragments",
"=",
"fragment",
".",
"split",
"(",
"'-'",
")",
"if",
"len",
"(",
"fragments",
")",
"==",
"1",
":",
"return",
"fragment",
",",
"''",
"max_index",
",",
"_",
"=",
"max",
"(",
"enumerate",
"(",
"fragments",
")",
",",
"key",
"=",
"likely_version_component",
")",
"return",
"'-'",
".",
"join",
"(",
"fragments",
"[",
"0",
":",
"max_index",
"]",
")",
",",
"'-'",
".",
"join",
"(",
"fragments",
"[",
"max_index",
":",
"]",
")"
] | A heuristic used to split a string into version name/fragment:
>>> SourcePackage.split_fragment('pysolr-2.1.0-beta')
('pysolr', '2.1.0-beta')
>>> SourcePackage.split_fragment('cElementTree-1.0.5-20051216')
('cElementTree', '1.0.5-20051216')
>>> SourcePackage.split_fragment('pil-1.1.7b1-20090412')
('pil', '1.1.7b1-20090412')
>>> SourcePackage.split_fragment('django-plugin-2-2.3')
('django-plugin-2', '2.3') | [
"A",
"heuristic",
"used",
"to",
"split",
"a",
"string",
"into",
"version",
"name",
"/",
"fragment",
":"
] | python | train |
cons3rt/pycons3rt | pycons3rt/deployment.py | https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/deployment.py#L527-L534 | def set_deployment_run_name(self):
"""Sets the deployment run name from deployment properties
:return: None
"""
log = logging.getLogger(self.cls_logger + '.set_deployment_run_name')
self.deployment_run_name = self.get_value('cons3rt.deploymentRun.name')
log.info('Found deployment run name: {n}'.format(n=self.deployment_run_name)) | [
"def",
"set_deployment_run_name",
"(",
"self",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"self",
".",
"cls_logger",
"+",
"'.set_deployment_run_name'",
")",
"self",
".",
"deployment_run_name",
"=",
"self",
".",
"get_value",
"(",
"'cons3rt.deploymentRun.name'",
")",
"log",
".",
"info",
"(",
"'Found deployment run name: {n}'",
".",
"format",
"(",
"n",
"=",
"self",
".",
"deployment_run_name",
")",
")"
] | Sets the deployment run name from deployment properties
:return: None | [
"Sets",
"the",
"deployment",
"run",
"name",
"from",
"deployment",
"properties"
] | python | train |
merll/docker-fabric | dockerfabric/utils/users.py | https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/utils/users.py#L101-L122 | def get_or_create_group(groupname, gid_preset, system=False, id_dependent=True):
"""
Returns the id for the given group, and creates it first in case it does not exist.
:param groupname: Group name.
:type groupname: unicode
:param gid_preset: Group id to set if a new group is created.
:type gid_preset: int or unicode
:param system: Create a system group.
:type system: bool
:param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
:type id_dependent: bool
:return: Group id of the existing or new group.
:rtype: int
"""
gid = get_group_id(groupname)
if gid is None:
create_group(groupname, gid_preset, system)
return gid_preset
elif id_dependent and gid != gid_preset:
error("Present group id '{0}' does not match the required id of the environment '{1}'.".format(gid, gid_preset))
return gid | [
"def",
"get_or_create_group",
"(",
"groupname",
",",
"gid_preset",
",",
"system",
"=",
"False",
",",
"id_dependent",
"=",
"True",
")",
":",
"gid",
"=",
"get_group_id",
"(",
"groupname",
")",
"if",
"gid",
"is",
"None",
":",
"create_group",
"(",
"groupname",
",",
"gid_preset",
",",
"system",
")",
"return",
"gid_preset",
"elif",
"id_dependent",
"and",
"gid",
"!=",
"gid_preset",
":",
"error",
"(",
"\"Present group id '{0}' does not match the required id of the environment '{1}'.\"",
".",
"format",
"(",
"gid",
",",
"gid_preset",
")",
")",
"return",
"gid"
] | Returns the id for the given group, and creates it first in case it does not exist.
:param groupname: Group name.
:type groupname: unicode
:param gid_preset: Group id to set if a new group is created.
:type gid_preset: int or unicode
:param system: Create a system group.
:type system: bool
:param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
:type id_dependent: bool
:return: Group id of the existing or new group.
:rtype: int | [
"Returns",
"the",
"id",
"for",
"the",
"given",
"group",
"and",
"creates",
"it",
"first",
"in",
"case",
"it",
"does",
"not",
"exist",
"."
] | python | train |
kata198/AdvancedHTMLParser | AdvancedHTMLParser/Parser.py | https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L527-L539 | def containsUid(self, uid):
'''
Check if #uid is found anywhere within this element tree
@param uid <uuid.UUID> - Uid
@return <bool> - If #uid is found within this tree
'''
for rootNode in self.getRootNodes():
if rootNode.containsUid(uid):
return True
return False | [
"def",
"containsUid",
"(",
"self",
",",
"uid",
")",
":",
"for",
"rootNode",
"in",
"self",
".",
"getRootNodes",
"(",
")",
":",
"if",
"rootNode",
".",
"containsUid",
"(",
"uid",
")",
":",
"return",
"True",
"return",
"False"
] | Check if #uid is found anywhere within this element tree
@param uid <uuid.UUID> - Uid
@return <bool> - If #uid is found within this tree | [
"Check",
"if",
"#uid",
"is",
"found",
"anywhere",
"within",
"this",
"element",
"tree"
] | python | train |
SpotlightData/preprocessing | preprocessing/text.py | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L75-L98 | def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument") | [
"def",
"correct_spelling",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"word_list",
"=",
"text_string",
".",
"split",
"(",
")",
"spellchecked_word_list",
"=",
"[",
"]",
"for",
"word",
"in",
"word_list",
":",
"spellchecked_word_list",
".",
"append",
"(",
"spellcheck",
".",
"correct_word",
"(",
"word",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"spellchecked_word_list",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"none type or string not passed as an argument\"",
")"
] | Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | [
"Splits",
"string",
"and",
"converts",
"words",
"not",
"found",
"within",
"a",
"pre",
"-",
"built",
"dictionary",
"to",
"their",
"most",
"likely",
"actual",
"word",
"based",
"on",
"a",
"relative",
"probability",
"dictionary",
".",
"Returns",
"edited",
"string",
"as",
"type",
"str",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_notifications.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_notifications.py#L256-L265 | def netconf_confirmed_commit_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_confirmed_commit = ET.SubElement(config, "netconf-confirmed-commit", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
session_id = ET.SubElement(netconf_confirmed_commit, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"netconf_confirmed_commit_session_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"netconf_confirmed_commit",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"netconf-confirmed-commit\"",
",",
"xmlns",
"=",
"\"urn:ietf:params:xml:ns:yang:ietf-netconf-notifications\"",
")",
"session_id",
"=",
"ET",
".",
"SubElement",
"(",
"netconf_confirmed_commit",
",",
"\"session-id\"",
")",
"session_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'session_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
boriel/zxbasic | asmparse.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L377-L383 | def set_org(self, value, lineno):
""" Sets a new ORG value
"""
if value < 0 or value > MAX_MEM:
error(lineno, "Memory ORG out of range [0 .. 65535]. Current value: %i" % value)
self.index = self.ORG = value | [
"def",
"set_org",
"(",
"self",
",",
"value",
",",
"lineno",
")",
":",
"if",
"value",
"<",
"0",
"or",
"value",
">",
"MAX_MEM",
":",
"error",
"(",
"lineno",
",",
"\"Memory ORG out of range [0 .. 65535]. Current value: %i\"",
"%",
"value",
")",
"self",
".",
"index",
"=",
"self",
".",
"ORG",
"=",
"value"
] | Sets a new ORG value | [
"Sets",
"a",
"new",
"ORG",
"value"
] | python | train |
twilio/twilio-python | twilio/rest/studio/v1/flow/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/studio/v1/flow/__init__.py#L239-L248 | def engagements(self):
"""
Access the engagements
:returns: twilio.rest.studio.v1.flow.engagement.EngagementList
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementList
"""
if self._engagements is None:
self._engagements = EngagementList(self._version, flow_sid=self._solution['sid'], )
return self._engagements | [
"def",
"engagements",
"(",
"self",
")",
":",
"if",
"self",
".",
"_engagements",
"is",
"None",
":",
"self",
".",
"_engagements",
"=",
"EngagementList",
"(",
"self",
".",
"_version",
",",
"flow_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_engagements"
] | Access the engagements
:returns: twilio.rest.studio.v1.flow.engagement.EngagementList
:rtype: twilio.rest.studio.v1.flow.engagement.EngagementList | [
"Access",
"the",
"engagements"
] | python | train |
pkgw/pwkit | pwkit/__init__.py | https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/__init__.py#L205-L211 | def copy(self):
"""Return a shallow copy of this object.
"""
new = self.__class__()
new.__dict__ = dict(self.__dict__)
return new | [
"def",
"copy",
"(",
"self",
")",
":",
"new",
"=",
"self",
".",
"__class__",
"(",
")",
"new",
".",
"__dict__",
"=",
"dict",
"(",
"self",
".",
"__dict__",
")",
"return",
"new"
] | Return a shallow copy of this object. | [
"Return",
"a",
"shallow",
"copy",
"of",
"this",
"object",
"."
] | python | train |
BenDoan/perform | perform.py | https://github.com/BenDoan/perform/blob/3434c5c68fb7661d74f03404c71bb5fbebe1900f/perform.py#L157-L164 | def refresh_listing():
"""Refreshes the list of programs attached to the perform module from
the path"""
for program in get_programs():
if re.match(r'^[a-zA-Z_][a-zA-Z_0-9]*$', program) is not None:
globals()[program] = partial(_run_program, program)
globals()["_"] = _underscore_run_program | [
"def",
"refresh_listing",
"(",
")",
":",
"for",
"program",
"in",
"get_programs",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'^[a-zA-Z_][a-zA-Z_0-9]*$'",
",",
"program",
")",
"is",
"not",
"None",
":",
"globals",
"(",
")",
"[",
"program",
"]",
"=",
"partial",
"(",
"_run_program",
",",
"program",
")",
"globals",
"(",
")",
"[",
"\"_\"",
"]",
"=",
"_underscore_run_program"
] | Refreshes the list of programs attached to the perform module from
the path | [
"Refreshes",
"the",
"list",
"of",
"programs",
"attached",
"to",
"the",
"perform",
"module",
"from",
"the",
"path"
] | python | train |
ebu/PlugIt | plugit_proxy/views.py | https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L654-L679 | def get_current_orga(request, hproject, availableOrga):
"""Return the current orga to use"""
# If nothing available return 404
if len(availableOrga) == 0:
raise Http404
# Find the current orga
currentOrgaId = request.session.get('plugit-orgapk-' + str(hproject.pk), None)
# If we don't have a current one select the first available
if currentOrgaId is None:
(tmpOrga, _) = availableOrga[0]
currentOrgaId = tmpOrga.pk
else:
# If the current Orga is not among the available ones reset to the first one
availableOrgaIds = [o.pk for (o, r) in availableOrga]
if currentOrgaId not in availableOrgaIds:
(tmpOrga, _) = availableOrga[0]
currentOrgaId = tmpOrga.pk
from organizations.models import Organization
realCurrentOrga = get_object_or_404(Organization, pk=currentOrgaId)
return realCurrentOrga | [
"def",
"get_current_orga",
"(",
"request",
",",
"hproject",
",",
"availableOrga",
")",
":",
"# If nothing available return 404",
"if",
"len",
"(",
"availableOrga",
")",
"==",
"0",
":",
"raise",
"Http404",
"# Find the current orga",
"currentOrgaId",
"=",
"request",
".",
"session",
".",
"get",
"(",
"'plugit-orgapk-'",
"+",
"str",
"(",
"hproject",
".",
"pk",
")",
",",
"None",
")",
"# If we don't have a current one select the first available",
"if",
"currentOrgaId",
"is",
"None",
":",
"(",
"tmpOrga",
",",
"_",
")",
"=",
"availableOrga",
"[",
"0",
"]",
"currentOrgaId",
"=",
"tmpOrga",
".",
"pk",
"else",
":",
"# If the current Orga is not among the available ones reset to the first one",
"availableOrgaIds",
"=",
"[",
"o",
".",
"pk",
"for",
"(",
"o",
",",
"r",
")",
"in",
"availableOrga",
"]",
"if",
"currentOrgaId",
"not",
"in",
"availableOrgaIds",
":",
"(",
"tmpOrga",
",",
"_",
")",
"=",
"availableOrga",
"[",
"0",
"]",
"currentOrgaId",
"=",
"tmpOrga",
".",
"pk",
"from",
"organizations",
".",
"models",
"import",
"Organization",
"realCurrentOrga",
"=",
"get_object_or_404",
"(",
"Organization",
",",
"pk",
"=",
"currentOrgaId",
")",
"return",
"realCurrentOrga"
] | Return the current orga to use | [
"Return",
"the",
"current",
"orga",
"to",
"use"
] | python | train |
Rapptz/discord.py | discord/ext/commands/core.py | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/core.py#L277-L284 | def update(self, **kwargs):
"""Updates :class:`Command` instance with updated attribute.
This works similarly to the :func:`.command` decorator in terms
of parameters in that they are passed to the :class:`Command` or
subclass constructors, sans the name and callback.
"""
self.__init__(self.callback, **dict(self.__original_kwargs__, **kwargs)) | [
"def",
"update",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"__init__",
"(",
"self",
".",
"callback",
",",
"*",
"*",
"dict",
"(",
"self",
".",
"__original_kwargs__",
",",
"*",
"*",
"kwargs",
")",
")"
] | Updates :class:`Command` instance with updated attribute.
This works similarly to the :func:`.command` decorator in terms
of parameters in that they are passed to the :class:`Command` or
subclass constructors, sans the name and callback. | [
"Updates",
":",
"class",
":",
"Command",
"instance",
"with",
"updated",
"attribute",
"."
] | python | train |
bdastur/rex | rex.py | https://github.com/bdastur/rex/blob/e45173aa93f05a1d2ee65746e6f6cc6d829daf60/rex.py#L338-L380 | def parse_tabular_string(search_string,
header_keys,
delimiter=None,
merge_list=None):
'''
Given a string in a tabular format, parse it and return a
dictionary
@args:
search_string: This is a string in tabular format (e.g.: output of df
command)
header_keys: This is a list of strings for the headers.
delimiter(optional): Default is None, which translates to spaces
merge_list(optional): In some cases 2 fields need to be merged as they
are one value.
'''
first_line = True
parsed_results = []
for line in search_string.splitlines():
if first_line:
first_line = False
else:
result = {}
row = line.split()
if merge_list:
for mergeset in merge_list:
fidx = mergeset[0]
lidx = mergeset[1]
try:
row[fidx] = "_".join(row[fidx:(lidx+1)])
row.remove(row[lidx])
except IndexError:
pass
if len(row) != len(header_keys):
print "Incorrect fields len "
continue
key_count = 0
for column in row:
result[header_keys[key_count]] = column
key_count += 1
parsed_results.append(result)
return parsed_results | [
"def",
"parse_tabular_string",
"(",
"search_string",
",",
"header_keys",
",",
"delimiter",
"=",
"None",
",",
"merge_list",
"=",
"None",
")",
":",
"first_line",
"=",
"True",
"parsed_results",
"=",
"[",
"]",
"for",
"line",
"in",
"search_string",
".",
"splitlines",
"(",
")",
":",
"if",
"first_line",
":",
"first_line",
"=",
"False",
"else",
":",
"result",
"=",
"{",
"}",
"row",
"=",
"line",
".",
"split",
"(",
")",
"if",
"merge_list",
":",
"for",
"mergeset",
"in",
"merge_list",
":",
"fidx",
"=",
"mergeset",
"[",
"0",
"]",
"lidx",
"=",
"mergeset",
"[",
"1",
"]",
"try",
":",
"row",
"[",
"fidx",
"]",
"=",
"\"_\"",
".",
"join",
"(",
"row",
"[",
"fidx",
":",
"(",
"lidx",
"+",
"1",
")",
"]",
")",
"row",
".",
"remove",
"(",
"row",
"[",
"lidx",
"]",
")",
"except",
"IndexError",
":",
"pass",
"if",
"len",
"(",
"row",
")",
"!=",
"len",
"(",
"header_keys",
")",
":",
"print",
"\"Incorrect fields len \"",
"continue",
"key_count",
"=",
"0",
"for",
"column",
"in",
"row",
":",
"result",
"[",
"header_keys",
"[",
"key_count",
"]",
"]",
"=",
"column",
"key_count",
"+=",
"1",
"parsed_results",
".",
"append",
"(",
"result",
")",
"return",
"parsed_results"
] | Given a string in a tabular format, parse it and return a
dictionary
@args:
search_string: This is a string in tabular format (e.g.: output of df
command)
header_keys: This is a list of strings for the headers.
delimiter(optional): Default is None, which translates to spaces
merge_list(optional): In some cases 2 fields need to be merged as they
are one value. | [
"Given",
"a",
"string",
"in",
"a",
"tabular",
"format",
"parse",
"it",
"and",
"return",
"a",
"dictionary"
] | python | train |
nerox8664/pytorch2keras | pytorch2keras/normalization_layers.py | https://github.com/nerox8664/pytorch2keras/blob/750eaf747323580e6732d0c5ba9f2f39cb096764/pytorch2keras/normalization_layers.py#L115-L138 | def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert dropout.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting dropout ...')
if names == 'short':
tf_name = 'DO' + random_string(6)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
dropout = keras.layers.Dropout(rate=params['ratio'], name=tf_name)
layers[scope_name] = dropout(layers[inputs[0]]) | [
"def",
"convert_dropout",
"(",
"params",
",",
"w_name",
",",
"scope_name",
",",
"inputs",
",",
"layers",
",",
"weights",
",",
"names",
")",
":",
"print",
"(",
"'Converting dropout ...'",
")",
"if",
"names",
"==",
"'short'",
":",
"tf_name",
"=",
"'DO'",
"+",
"random_string",
"(",
"6",
")",
"elif",
"names",
"==",
"'keep'",
":",
"tf_name",
"=",
"w_name",
"else",
":",
"tf_name",
"=",
"w_name",
"+",
"str",
"(",
"random",
".",
"random",
"(",
")",
")",
"dropout",
"=",
"keras",
".",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"params",
"[",
"'ratio'",
"]",
",",
"name",
"=",
"tf_name",
")",
"layers",
"[",
"scope_name",
"]",
"=",
"dropout",
"(",
"layers",
"[",
"inputs",
"[",
"0",
"]",
"]",
")"
] | Convert dropout.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | [
"Convert",
"dropout",
"."
] | python | valid |
saulpw/visidata | visidata/vdtui.py | https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L608-L616 | def callHook(self, hookname, *args, **kwargs):
'Call all functions registered with `addHook` for the given hookname.'
r = []
for f in self.hooks[hookname]:
try:
r.append(f(*args, **kwargs))
except Exception as e:
exceptionCaught(e)
return r | [
"def",
"callHook",
"(",
"self",
",",
"hookname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"[",
"]",
"for",
"f",
"in",
"self",
".",
"hooks",
"[",
"hookname",
"]",
":",
"try",
":",
"r",
".",
"append",
"(",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"exceptionCaught",
"(",
"e",
")",
"return",
"r"
] | Call all functions registered with `addHook` for the given hookname. | [
"Call",
"all",
"functions",
"registered",
"with",
"addHook",
"for",
"the",
"given",
"hookname",
"."
] | python | train |
google/prettytensor | prettytensor/pretty_tensor_sparse_methods.py | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_sparse_methods.py#L22-L61 | def to_dense_one_hot(labels, class_count):
"""Converts a vector that specified one-hot per batch into a dense version.
Args:
labels: The labels input.
class_count: The number of classes as an int.
Returns:
One dense vector for each item in the batch.
Raises:
ValueError: If labels is not rank 1.
TypeError: If class_count is not an integer or labels is not an integer
Tensor.
"""
if not isinstance(class_count, tf.compat.integral_types):
raise TypeError('class_count must be an integer type.')
if labels.dtype.base_dtype not in (tf.int32, tf.int64):
raise TypeError('Labels must be an integer: %s' % labels.dtype)
if labels.get_shape().ndims != 1:
raise ValueError('Labels must be a rank 1 tensor: %s' % labels.get_shape())
dtype = labels.dtype.base_dtype
class_tensor = tf.convert_to_tensor(
class_count, dtype=dtype, name='class_count')
# Extract the batch from the shape so this is batch independent.
batch = tf.gather(tf.shape(labels), 0)
count = tf.expand_dims(tf.range(0, limit=batch), 1)
labels = tf.expand_dims(labels, 1)
batch = tf.gather(tf.shape(labels), 0)
if dtype != tf.int32:
count = tf.cast(count, dtype)
batch = tf.cast(batch, dtype)
result = tf.sparse_to_dense(
tf.concat([count, labels], 1),
tf.concat([tf.expand_dims(batch, 0), tf.expand_dims(class_tensor, 0)], 0),
1.0, 0.0)
result.set_shape([labels.get_shape().dims[0], class_count])
return result | [
"def",
"to_dense_one_hot",
"(",
"labels",
",",
"class_count",
")",
":",
"if",
"not",
"isinstance",
"(",
"class_count",
",",
"tf",
".",
"compat",
".",
"integral_types",
")",
":",
"raise",
"TypeError",
"(",
"'class_count must be an integer type.'",
")",
"if",
"labels",
".",
"dtype",
".",
"base_dtype",
"not",
"in",
"(",
"tf",
".",
"int32",
",",
"tf",
".",
"int64",
")",
":",
"raise",
"TypeError",
"(",
"'Labels must be an integer: %s'",
"%",
"labels",
".",
"dtype",
")",
"if",
"labels",
".",
"get_shape",
"(",
")",
".",
"ndims",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Labels must be a rank 1 tensor: %s'",
"%",
"labels",
".",
"get_shape",
"(",
")",
")",
"dtype",
"=",
"labels",
".",
"dtype",
".",
"base_dtype",
"class_tensor",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"class_count",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"'class_count'",
")",
"# Extract the batch from the shape so this is batch independent.",
"batch",
"=",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"labels",
")",
",",
"0",
")",
"count",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"range",
"(",
"0",
",",
"limit",
"=",
"batch",
")",
",",
"1",
")",
"labels",
"=",
"tf",
".",
"expand_dims",
"(",
"labels",
",",
"1",
")",
"batch",
"=",
"tf",
".",
"gather",
"(",
"tf",
".",
"shape",
"(",
"labels",
")",
",",
"0",
")",
"if",
"dtype",
"!=",
"tf",
".",
"int32",
":",
"count",
"=",
"tf",
".",
"cast",
"(",
"count",
",",
"dtype",
")",
"batch",
"=",
"tf",
".",
"cast",
"(",
"batch",
",",
"dtype",
")",
"result",
"=",
"tf",
".",
"sparse_to_dense",
"(",
"tf",
".",
"concat",
"(",
"[",
"count",
",",
"labels",
"]",
",",
"1",
")",
",",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"expand_dims",
"(",
"batch",
",",
"0",
")",
",",
"tf",
".",
"expand_dims",
"(",
"class_tensor",
",",
"0",
")",
"]",
",",
"0",
")",
",",
"1.0",
",",
"0.0",
")",
"result",
".",
"set_shape",
"(",
"[",
"labels",
".",
"get_shape",
"(",
")",
".",
"dims",
"[",
"0",
"]",
",",
"class_count",
"]",
")",
"return",
"result"
] | Converts a vector that specified one-hot per batch into a dense version.
Args:
labels: The labels input.
class_count: The number of classes as an int.
Returns:
One dense vector for each item in the batch.
Raises:
ValueError: If labels is not rank 1.
TypeError: If class_count is not an integer or labels is not an integer
Tensor. | [
"Converts",
"a",
"vector",
"that",
"specified",
"one",
"-",
"hot",
"per",
"batch",
"into",
"a",
"dense",
"version",
"."
] | python | train |
QuantEcon/QuantEcon.py | quantecon/robustlq.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/robustlq.py#L359-L402 | def evaluate_F(self, F):
"""
Given a fixed policy F, with the interpretation :math:`u = -F x`, this
function computes the matrix :math:`P_F` and constant :math:`d_F`
associated with discounted cost :math:`J_F(x) = x' P_F x + d_F`
Parameters
----------
F : array_like(float, ndim=2)
The policy function, a k x n array
Returns
-------
P_F : array_like(float, ndim=2)
Matrix for discounted cost
d_F : scalar(float)
Constant for discounted cost
K_F : array_like(float, ndim=2)
Worst case policy
O_F : array_like(float, ndim=2)
Matrix for discounted entropy
o_F : scalar(float)
Constant for discounted entropy
"""
# == Simplify names == #
Q, R, A, B, C = self.Q, self.R, self.A, self.B, self.C
beta, theta = self.beta, self.theta
# == Solve for policies and costs using agent 2's problem == #
K_F, P_F = self.F_to_K(F)
I = np.identity(self.j)
H = inv(I - C.T.dot(P_F.dot(C)) / theta)
d_F = log(det(H))
# == Compute O_F and o_F == #
sig = -1.0 / theta
AO = sqrt(beta) * (A - dot(B, F) + dot(C, K_F))
O_F = solve_discrete_lyapunov(AO.T, beta * dot(K_F.T, K_F))
ho = (trace(H - 1) - d_F) / 2.0
tr = trace(dot(O_F, C.dot(H.dot(C.T))))
o_F = (ho + beta * tr) / (1 - beta)
return K_F, P_F, d_F, O_F, o_F | [
"def",
"evaluate_F",
"(",
"self",
",",
"F",
")",
":",
"# == Simplify names == #",
"Q",
",",
"R",
",",
"A",
",",
"B",
",",
"C",
"=",
"self",
".",
"Q",
",",
"self",
".",
"R",
",",
"self",
".",
"A",
",",
"self",
".",
"B",
",",
"self",
".",
"C",
"beta",
",",
"theta",
"=",
"self",
".",
"beta",
",",
"self",
".",
"theta",
"# == Solve for policies and costs using agent 2's problem == #",
"K_F",
",",
"P_F",
"=",
"self",
".",
"F_to_K",
"(",
"F",
")",
"I",
"=",
"np",
".",
"identity",
"(",
"self",
".",
"j",
")",
"H",
"=",
"inv",
"(",
"I",
"-",
"C",
".",
"T",
".",
"dot",
"(",
"P_F",
".",
"dot",
"(",
"C",
")",
")",
"/",
"theta",
")",
"d_F",
"=",
"log",
"(",
"det",
"(",
"H",
")",
")",
"# == Compute O_F and o_F == #",
"sig",
"=",
"-",
"1.0",
"/",
"theta",
"AO",
"=",
"sqrt",
"(",
"beta",
")",
"*",
"(",
"A",
"-",
"dot",
"(",
"B",
",",
"F",
")",
"+",
"dot",
"(",
"C",
",",
"K_F",
")",
")",
"O_F",
"=",
"solve_discrete_lyapunov",
"(",
"AO",
".",
"T",
",",
"beta",
"*",
"dot",
"(",
"K_F",
".",
"T",
",",
"K_F",
")",
")",
"ho",
"=",
"(",
"trace",
"(",
"H",
"-",
"1",
")",
"-",
"d_F",
")",
"/",
"2.0",
"tr",
"=",
"trace",
"(",
"dot",
"(",
"O_F",
",",
"C",
".",
"dot",
"(",
"H",
".",
"dot",
"(",
"C",
".",
"T",
")",
")",
")",
")",
"o_F",
"=",
"(",
"ho",
"+",
"beta",
"*",
"tr",
")",
"/",
"(",
"1",
"-",
"beta",
")",
"return",
"K_F",
",",
"P_F",
",",
"d_F",
",",
"O_F",
",",
"o_F"
] | Given a fixed policy F, with the interpretation :math:`u = -F x`, this
function computes the matrix :math:`P_F` and constant :math:`d_F`
associated with discounted cost :math:`J_F(x) = x' P_F x + d_F`
Parameters
----------
F : array_like(float, ndim=2)
The policy function, a k x n array
Returns
-------
P_F : array_like(float, ndim=2)
Matrix for discounted cost
d_F : scalar(float)
Constant for discounted cost
K_F : array_like(float, ndim=2)
Worst case policy
O_F : array_like(float, ndim=2)
Matrix for discounted entropy
o_F : scalar(float)
Constant for discounted entropy | [
"Given",
"a",
"fixed",
"policy",
"F",
"with",
"the",
"interpretation",
":",
"math",
":",
"u",
"=",
"-",
"F",
"x",
"this",
"function",
"computes",
"the",
"matrix",
":",
"math",
":",
"P_F",
"and",
"constant",
":",
"math",
":",
"d_F",
"associated",
"with",
"discounted",
"cost",
":",
"math",
":",
"J_F",
"(",
"x",
")",
"=",
"x",
"P_F",
"x",
"+",
"d_F"
] | python | train |
inspirehep/refextract | refextract/documents/pdf.py | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/documents/pdf.py#L434-L449 | def replace_undesirable_characters(line):
"""
Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced.
"""
# These are separate because we want a particular order
for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS:
line = line.replace(bad_string, replacement)
for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS):
line = line.replace(bad_char, replacement)
return line | [
"def",
"replace_undesirable_characters",
"(",
"line",
")",
":",
"# These are separate because we want a particular order",
"for",
"bad_string",
",",
"replacement",
"in",
"UNDESIRABLE_STRING_REPLACEMENTS",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"bad_string",
",",
"replacement",
")",
"for",
"bad_char",
",",
"replacement",
"in",
"iteritems",
"(",
"UNDESIRABLE_CHAR_REPLACEMENTS",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"bad_char",
",",
"replacement",
")",
"return",
"line"
] | Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced. | [
"Replace",
"certain",
"bad",
"characters",
"in",
"a",
"text",
"line",
"."
] | python | train |
thunder-project/thunder | thunder/series/series.py | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L292-L348 | def select(self, crit):
"""
Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices.
"""
import types
# handle lists, strings, and ints
if not isinstance(crit, types.FunctionType):
# set("foo") -> {"f", "o"}; wrap in list to prevent:
if isinstance(crit, string_types):
critlist = set([crit])
else:
try:
critlist = set(crit)
except TypeError:
# typically means crit is not an iterable type; for instance, crit is an int
critlist = set([crit])
crit = lambda x: x in critlist
# if only one index, return it directly or throw an error
index = self.index
if size(index) == 1:
if crit(index[0]):
return self
else:
raise Exception('No indices found matching criterion')
# determine new index and check the result
newindex = [i for i in index if crit(i)]
if len(newindex) == 0:
raise Exception('No indices found matching criterion')
if array(newindex == index).all():
return self
# use fast logical indexing to get the new values
subinds = where([crit(i) for i in index])
new = self.map(lambda x: x[subinds], index=newindex)
# if singleton, need to check whether it's an array or a scalar/int
# if array, recompute a new set of indices
if len(newindex) == 1:
new = new.map(lambda x: x[0], index=newindex)
val = new.first()
if size(val) == 1:
newindex = [newindex[0]]
else:
newindex = arange(0, size(val))
new._index = newindex
return new | [
"def",
"select",
"(",
"self",
",",
"crit",
")",
":",
"import",
"types",
"# handle lists, strings, and ints",
"if",
"not",
"isinstance",
"(",
"crit",
",",
"types",
".",
"FunctionType",
")",
":",
"# set(\"foo\") -> {\"f\", \"o\"}; wrap in list to prevent:",
"if",
"isinstance",
"(",
"crit",
",",
"string_types",
")",
":",
"critlist",
"=",
"set",
"(",
"[",
"crit",
"]",
")",
"else",
":",
"try",
":",
"critlist",
"=",
"set",
"(",
"crit",
")",
"except",
"TypeError",
":",
"# typically means crit is not an iterable type; for instance, crit is an int",
"critlist",
"=",
"set",
"(",
"[",
"crit",
"]",
")",
"crit",
"=",
"lambda",
"x",
":",
"x",
"in",
"critlist",
"# if only one index, return it directly or throw an error",
"index",
"=",
"self",
".",
"index",
"if",
"size",
"(",
"index",
")",
"==",
"1",
":",
"if",
"crit",
"(",
"index",
"[",
"0",
"]",
")",
":",
"return",
"self",
"else",
":",
"raise",
"Exception",
"(",
"'No indices found matching criterion'",
")",
"# determine new index and check the result",
"newindex",
"=",
"[",
"i",
"for",
"i",
"in",
"index",
"if",
"crit",
"(",
"i",
")",
"]",
"if",
"len",
"(",
"newindex",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'No indices found matching criterion'",
")",
"if",
"array",
"(",
"newindex",
"==",
"index",
")",
".",
"all",
"(",
")",
":",
"return",
"self",
"# use fast logical indexing to get the new values",
"subinds",
"=",
"where",
"(",
"[",
"crit",
"(",
"i",
")",
"for",
"i",
"in",
"index",
"]",
")",
"new",
"=",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"subinds",
"]",
",",
"index",
"=",
"newindex",
")",
"# if singleton, need to check whether it's an array or a scalar/int",
"# if array, recompute a new set of indices",
"if",
"len",
"(",
"newindex",
")",
"==",
"1",
":",
"new",
"=",
"new",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
",",
"index",
"=",
"newindex",
")",
"val",
"=",
"new",
".",
"first",
"(",
")",
"if",
"size",
"(",
"val",
")",
"==",
"1",
":",
"newindex",
"=",
"[",
"newindex",
"[",
"0",
"]",
"]",
"else",
":",
"newindex",
"=",
"arange",
"(",
"0",
",",
"size",
"(",
"val",
")",
")",
"new",
".",
"_index",
"=",
"newindex",
"return",
"new"
] | Select subset of values that match a given index criterion.
Parameters
----------
crit : function, list, str, int
Criterion function to map to indices, specific index value,
or list of indices. | [
"Select",
"subset",
"of",
"values",
"that",
"match",
"a",
"given",
"index",
"criterion",
"."
] | python | train |
klavinslab/coral | coral/analysis/_sequencing/sanger.py | https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_sequencing/sanger.py#L72-L80 | def nonmatches(self):
'''Report mismatches, indels, and coverage.'''
# For every result, keep a dictionary of mismatches, insertions, and
# deletions
report = []
for result in self.aligned_results:
report.append(self._analyze_single(self.aligned_reference, result))
return report | [
"def",
"nonmatches",
"(",
"self",
")",
":",
"# For every result, keep a dictionary of mismatches, insertions, and",
"# deletions",
"report",
"=",
"[",
"]",
"for",
"result",
"in",
"self",
".",
"aligned_results",
":",
"report",
".",
"append",
"(",
"self",
".",
"_analyze_single",
"(",
"self",
".",
"aligned_reference",
",",
"result",
")",
")",
"return",
"report"
] | Report mismatches, indels, and coverage. | [
"Report",
"mismatches",
"indels",
"and",
"coverage",
"."
] | python | train |
gatkin/declxml | declxml.py | https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L476-L499 | def dictionary(
element_name, # type: Text
children, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for dictionary values.
:param element_name: Name of the XML element containing the dictionary value. Can also be
specified using supported XPath syntax.
:param children: List of declxml processor objects for processing the children
contained within the dictionary.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Dictionary(element_name, children, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | [
"def",
"dictionary",
"(",
"element_name",
",",
"# type: Text",
"children",
",",
"# type: List[Processor]",
"required",
"=",
"True",
",",
"# type: bool",
"alias",
"=",
"None",
",",
"# type: Optional[Text]",
"hooks",
"=",
"None",
"# type: Optional[Hooks]",
")",
":",
"# type: (...) -> RootProcessor",
"processor",
"=",
"_Dictionary",
"(",
"element_name",
",",
"children",
",",
"required",
",",
"alias",
")",
"return",
"_processor_wrap_if_hooks",
"(",
"processor",
",",
"hooks",
")"
] | Create a processor for dictionary values.
:param element_name: Name of the XML element containing the dictionary value. Can also be
specified using supported XPath syntax.
:param children: List of declxml processor objects for processing the children
contained within the dictionary.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param hooks: A Hooks object.
:return: A declxml processor object. | [
"Create",
"a",
"processor",
"for",
"dictionary",
"values",
"."
] | python | train |
spacetelescope/drizzlepac | drizzlepac/imgclasses.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imgclasses.py#L1613-L1672 | def _estimate_2dhist_shift(imgxy, refxy, searchrad=3.0):
""" Create a 2D matrix-histogram which contains the delta between each
XY position and each UV position. Then estimate initial offset
between catalogs.
"""
print("Computing initial guess for X and Y shifts...")
# create ZP matrix
zpmat = _xy_2dhist(imgxy, refxy, r=searchrad)
nonzeros = np.count_nonzero(zpmat)
if nonzeros == 0:
# no matches within search radius. Return (0, 0):
print("WARNING: No matches found within a search radius of {:g} "
"pixels.".format(searchrad))
return 0.0, 0.0, 0, 0, zpmat, False
elif nonzeros == 1:
# only one non-zero bin:
yp, xp = np.unravel_index(np.argmax(zpmat), zpmat.shape)
maxval = int(np.ceil(zpmat[yp, xp]))
xp -= searchrad
yp -= searchrad
print("Found initial X and Y shifts of {:.4g}, {:.4g} "
"based on a single non-zero bin and {} matches"
.format(xp, yp, maxval))
return xp, yp, maxval, maxval, zpmat, True
(xp, yp), fit_status, fit_sl = _find_peak(zpmat, peak_fit_box=5,
mask=zpmat > 0)
if fit_status.startswith('ERROR'):
print("WARNING: No valid shift found within a search radius of {:g} "
"pixels.".format(searchrad))
maxval = int(np.ceil(zpmat.max()))
return 0.0, 0.0, maxval, maxval, zpmat, False
xp -= searchrad
yp -= searchrad
if fit_status == 'WARNING:EDGE':
print(
"WARNING: Found peak in the 2D histogram lies at the edge of "
"the histogram. Try increasing 'searchrad' for improved results."
)
# Attempt to estimate "significance of detection":
maxval = zpmat.max()
zpmat_mask = (zpmat > 0) & (zpmat < maxval)
if np.any(zpmat_mask):
bkg = zpmat[zpmat_mask].mean()
sig = maxval / np.sqrt(bkg)
flux = int(zpmat[fit_sl].sum())
print("Found initial X and Y shifts of {:.4g}, {:.4g} "
"with significance of {:.4g} and {:d} matches"
.format(xp, yp, sig, flux))
return xp, yp, int(np.ceil(maxval)), flux, zpmat, True | [
"def",
"_estimate_2dhist_shift",
"(",
"imgxy",
",",
"refxy",
",",
"searchrad",
"=",
"3.0",
")",
":",
"print",
"(",
"\"Computing initial guess for X and Y shifts...\"",
")",
"# create ZP matrix",
"zpmat",
"=",
"_xy_2dhist",
"(",
"imgxy",
",",
"refxy",
",",
"r",
"=",
"searchrad",
")",
"nonzeros",
"=",
"np",
".",
"count_nonzero",
"(",
"zpmat",
")",
"if",
"nonzeros",
"==",
"0",
":",
"# no matches within search radius. Return (0, 0):",
"print",
"(",
"\"WARNING: No matches found within a search radius of {:g} \"",
"\"pixels.\"",
".",
"format",
"(",
"searchrad",
")",
")",
"return",
"0.0",
",",
"0.0",
",",
"0",
",",
"0",
",",
"zpmat",
",",
"False",
"elif",
"nonzeros",
"==",
"1",
":",
"# only one non-zero bin:",
"yp",
",",
"xp",
"=",
"np",
".",
"unravel_index",
"(",
"np",
".",
"argmax",
"(",
"zpmat",
")",
",",
"zpmat",
".",
"shape",
")",
"maxval",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"zpmat",
"[",
"yp",
",",
"xp",
"]",
")",
")",
"xp",
"-=",
"searchrad",
"yp",
"-=",
"searchrad",
"print",
"(",
"\"Found initial X and Y shifts of {:.4g}, {:.4g} \"",
"\"based on a single non-zero bin and {} matches\"",
".",
"format",
"(",
"xp",
",",
"yp",
",",
"maxval",
")",
")",
"return",
"xp",
",",
"yp",
",",
"maxval",
",",
"maxval",
",",
"zpmat",
",",
"True",
"(",
"xp",
",",
"yp",
")",
",",
"fit_status",
",",
"fit_sl",
"=",
"_find_peak",
"(",
"zpmat",
",",
"peak_fit_box",
"=",
"5",
",",
"mask",
"=",
"zpmat",
">",
"0",
")",
"if",
"fit_status",
".",
"startswith",
"(",
"'ERROR'",
")",
":",
"print",
"(",
"\"WARNING: No valid shift found within a search radius of {:g} \"",
"\"pixels.\"",
".",
"format",
"(",
"searchrad",
")",
")",
"maxval",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"zpmat",
".",
"max",
"(",
")",
")",
")",
"return",
"0.0",
",",
"0.0",
",",
"maxval",
",",
"maxval",
",",
"zpmat",
",",
"False",
"xp",
"-=",
"searchrad",
"yp",
"-=",
"searchrad",
"if",
"fit_status",
"==",
"'WARNING:EDGE'",
":",
"print",
"(",
"\"WARNING: Found peak in the 2D histogram lies at the edge of \"",
"\"the histogram. Try increasing 'searchrad' for improved results.\"",
")",
"# Attempt to estimate \"significance of detection\":",
"maxval",
"=",
"zpmat",
".",
"max",
"(",
")",
"zpmat_mask",
"=",
"(",
"zpmat",
">",
"0",
")",
"&",
"(",
"zpmat",
"<",
"maxval",
")",
"if",
"np",
".",
"any",
"(",
"zpmat_mask",
")",
":",
"bkg",
"=",
"zpmat",
"[",
"zpmat_mask",
"]",
".",
"mean",
"(",
")",
"sig",
"=",
"maxval",
"/",
"np",
".",
"sqrt",
"(",
"bkg",
")",
"flux",
"=",
"int",
"(",
"zpmat",
"[",
"fit_sl",
"]",
".",
"sum",
"(",
")",
")",
"print",
"(",
"\"Found initial X and Y shifts of {:.4g}, {:.4g} \"",
"\"with significance of {:.4g} and {:d} matches\"",
".",
"format",
"(",
"xp",
",",
"yp",
",",
"sig",
",",
"flux",
")",
")",
"return",
"xp",
",",
"yp",
",",
"int",
"(",
"np",
".",
"ceil",
"(",
"maxval",
")",
")",
",",
"flux",
",",
"zpmat",
",",
"True"
] | Create a 2D matrix-histogram which contains the delta between each
XY position and each UV position. Then estimate initial offset
between catalogs. | [
"Create",
"a",
"2D",
"matrix",
"-",
"histogram",
"which",
"contains",
"the",
"delta",
"between",
"each",
"XY",
"position",
"and",
"each",
"UV",
"position",
".",
"Then",
"estimate",
"initial",
"offset",
"between",
"catalogs",
"."
] | python | train |
ebu/PlugIt | examples/simple_service_proxy_mode/server.py | https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/examples/simple_service_proxy_mode/server.py#L10-L21 | def get_ebuio_headers(request):
"""Return a dict with ebuio headers"""
retour = {}
for (key, value) in request.headers:
if key.startswith('X-Plugit-'):
key = key[9:]
retour[key] = value
return retour | [
"def",
"get_ebuio_headers",
"(",
"request",
")",
":",
"retour",
"=",
"{",
"}",
"for",
"(",
"key",
",",
"value",
")",
"in",
"request",
".",
"headers",
":",
"if",
"key",
".",
"startswith",
"(",
"'X-Plugit-'",
")",
":",
"key",
"=",
"key",
"[",
"9",
":",
"]",
"retour",
"[",
"key",
"]",
"=",
"value",
"return",
"retour"
] | Return a dict with ebuio headers | [
"Return",
"a",
"dict",
"with",
"ebuio",
"headers"
] | python | train |
phoebe-project/phoebe2 | phoebe/frontend/bundle.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L2040-L2048 | def remove_envelope(self, component=None, **kwargs):
"""
[NOT SUPPORTED]
[NOT IMPLEMENTED]
Shortcut to :meth:`remove_component` but with kind='envelope'
"""
kwargs.setdefault('kind', 'envelope')
return self.remove_component(component, **kwargs) | [
"def",
"remove_envelope",
"(",
"self",
",",
"component",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'kind'",
",",
"'envelope'",
")",
"return",
"self",
".",
"remove_component",
"(",
"component",
",",
"*",
"*",
"kwargs",
")"
] | [NOT SUPPORTED]
[NOT IMPLEMENTED]
Shortcut to :meth:`remove_component` but with kind='envelope' | [
"[",
"NOT",
"SUPPORTED",
"]",
"[",
"NOT",
"IMPLEMENTED",
"]"
] | python | train |
cqparts/cqparts | src/cqparts/codec/gltf.py | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/gltf.py#L187-L195 | def add_poly_index(self, i, j, k):
"""
Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer.
"""
self.idx_data.write(
struct.pack(self.idx_fmt, i) +
struct.pack(self.idx_fmt, j) +
struct.pack(self.idx_fmt, k)
) | [
"def",
"add_poly_index",
"(",
"self",
",",
"i",
",",
"j",
",",
"k",
")",
":",
"self",
".",
"idx_data",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"self",
".",
"idx_fmt",
",",
"i",
")",
"+",
"struct",
".",
"pack",
"(",
"self",
".",
"idx_fmt",
",",
"j",
")",
"+",
"struct",
".",
"pack",
"(",
"self",
".",
"idx_fmt",
",",
"k",
")",
")"
] | Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer. | [
"Add",
"3",
"SCALAR",
"of",
"uint",
"to",
"the",
"idx_data",
"buffer",
"."
] | python | train |
data-8/datascience | datascience/tables.py | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2831-L2843 | def _varargs_labels_as_list(label_list):
"""Return a list of labels for a list of labels or singleton list of list
of labels."""
if len(label_list) == 0:
return []
elif not _is_non_string_iterable(label_list[0]):
# Assume everything is a label. If not, it'll be caught later.
return label_list
elif len(label_list) == 1:
return label_list[0]
else:
raise ValueError("Labels {} contain more than list.".format(label_list),
"Pass just one list of labels.") | [
"def",
"_varargs_labels_as_list",
"(",
"label_list",
")",
":",
"if",
"len",
"(",
"label_list",
")",
"==",
"0",
":",
"return",
"[",
"]",
"elif",
"not",
"_is_non_string_iterable",
"(",
"label_list",
"[",
"0",
"]",
")",
":",
"# Assume everything is a label. If not, it'll be caught later.",
"return",
"label_list",
"elif",
"len",
"(",
"label_list",
")",
"==",
"1",
":",
"return",
"label_list",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Labels {} contain more than list.\"",
".",
"format",
"(",
"label_list",
")",
",",
"\"Pass just one list of labels.\"",
")"
] | Return a list of labels for a list of labels or singleton list of list
of labels. | [
"Return",
"a",
"list",
"of",
"labels",
"for",
"a",
"list",
"of",
"labels",
"or",
"singleton",
"list",
"of",
"list",
"of",
"labels",
"."
] | python | train |
3DLIRIOUS/MeshLabXML | meshlabxml/remesh.py | https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/remesh.py#L437-L486 | def curvature_flipping(script, angle_threshold=1.0, curve_type=0,
selected=False):
""" Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
angle_threshold (float): To avoid excessive flipping/swapping we
consider only couple of faces with a significant diedral angle
(e.g. greater than the indicated threshold).
curve_type (int): Choose a metric to compute surface curvature on vertices
H = mean curv, K = gaussian curv, A = area per vertex
1: Mean curvature = H
2: Norm squared mean curvature = (H * H) / A
3: Absolute curvature:
if(K >= 0) return 2 * H
else return 2 * sqrt(H ^ 2 - A * K)
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Curvature flipping optimization">\n',
' <Param name="selection" ',
'value="{}" '.format(str(selected).lower()),
'description="Update selection" ',
'type="RichBool" ',
'/>\n',
' <Param name="pthreshold" ',
'value="{}" '.format(angle_threshold),
'description="Angle Thr (deg)" ',
'type="RichFloat" ',
'/>\n',
' <Param name="curvtype" ',
'value="{:d}" '.format(curve_type),
'description="Curvature metric" ',
'enum_val0="mean" ',
'enum_val1="norm squared" ',
'enum_val2="absolute" ',
'enum_cardinality="3" ',
'type="RichEnum" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | [
"def",
"curvature_flipping",
"(",
"script",
",",
"angle_threshold",
"=",
"1.0",
",",
"curve_type",
"=",
"0",
",",
"selected",
"=",
"False",
")",
":",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Curvature flipping optimization\">\\n'",
",",
"' <Param name=\"selection\" '",
",",
"'value=\"{}\" '",
".",
"format",
"(",
"str",
"(",
"selected",
")",
".",
"lower",
"(",
")",
")",
",",
"'description=\"Update selection\" '",
",",
"'type=\"RichBool\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"pthreshold\" '",
",",
"'value=\"{}\" '",
".",
"format",
"(",
"angle_threshold",
")",
",",
"'description=\"Angle Thr (deg)\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"curvtype\" '",
",",
"'value=\"{:d}\" '",
".",
"format",
"(",
"curve_type",
")",
",",
"'description=\"Curvature metric\" '",
",",
"'enum_val0=\"mean\" '",
",",
"'enum_val1=\"norm squared\" '",
",",
"'enum_val2=\"absolute\" '",
",",
"'enum_cardinality=\"3\" '",
",",
"'type=\"RichEnum\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"return",
"None"
] | Use the points and normals to build a surface using the Poisson
Surface reconstruction approach.
Args:
script: the FilterScript object or script filename to write
the filter to.
angle_threshold (float): To avoid excessive flipping/swapping we
consider only couple of faces with a significant diedral angle
(e.g. greater than the indicated threshold).
curve_type (int): Choose a metric to compute surface curvature on vertices
H = mean curv, K = gaussian curv, A = area per vertex
1: Mean curvature = H
2: Norm squared mean curvature = (H * H) / A
3: Absolute curvature:
if(K >= 0) return 2 * H
else return 2 * sqrt(H ^ 2 - A * K)
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | [
"Use",
"the",
"points",
"and",
"normals",
"to",
"build",
"a",
"surface",
"using",
"the",
"Poisson",
"Surface",
"reconstruction",
"approach",
"."
] | python | test |
sernst/cauldron | cauldron/cli/__init__.py | https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/__init__.py#L53-L63 | def reformat(source: str) -> str:
"""
Formats the source string to strip newlines on both ends and dedents the
the entire string
:param source:
The string to reformat
"""
value = source if source else ''
return dedent(value.strip('\n')).strip() | [
"def",
"reformat",
"(",
"source",
":",
"str",
")",
"->",
"str",
":",
"value",
"=",
"source",
"if",
"source",
"else",
"''",
"return",
"dedent",
"(",
"value",
".",
"strip",
"(",
"'\\n'",
")",
")",
".",
"strip",
"(",
")"
] | Formats the source string to strip newlines on both ends and dedents the
the entire string
:param source:
The string to reformat | [
"Formats",
"the",
"source",
"string",
"to",
"strip",
"newlines",
"on",
"both",
"ends",
"and",
"dedents",
"the",
"the",
"entire",
"string"
] | python | train |
ratt-ru/PyMORESANE | pymoresane/iuwt.py | https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt.py#L17-L41 | def iuwt_decomposition(in1, scale_count, scale_adjust=0, mode='ser', core_count=2, store_smoothed=False,
store_on_gpu=False):
"""
This function serves as a handler for the different implementations of the IUWT decomposition. It allows the
different methods to be used almost interchangeably.
INPUTS:
in1 (no default): Array on which the decomposition is to be performed.
scale_count (no default): Maximum scale to be considered.
scale_adjust (default=0): Adjustment to scale value if first scales are of no interest.
mode (default='ser'): Implementation of the IUWT to be used - 'ser', 'mp' or 'gpu'.
core_count (default=1): Additional option for multiprocessing - specifies core count.
store_smoothed (default=False): Boolean specifier for whether the smoothed image is stored or not.
store_on_gpu (default=False): Boolean specifier for whether the decomposition is stored on the gpu or not.
OUTPUTS:
Returns the decomposition with the additional smoothed coefficients if specified.
"""
if mode=='ser':
return ser_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed)
elif mode=='mp':
return mp_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed, core_count)
elif mode=='gpu':
return gpu_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed, store_on_gpu) | [
"def",
"iuwt_decomposition",
"(",
"in1",
",",
"scale_count",
",",
"scale_adjust",
"=",
"0",
",",
"mode",
"=",
"'ser'",
",",
"core_count",
"=",
"2",
",",
"store_smoothed",
"=",
"False",
",",
"store_on_gpu",
"=",
"False",
")",
":",
"if",
"mode",
"==",
"'ser'",
":",
"return",
"ser_iuwt_decomposition",
"(",
"in1",
",",
"scale_count",
",",
"scale_adjust",
",",
"store_smoothed",
")",
"elif",
"mode",
"==",
"'mp'",
":",
"return",
"mp_iuwt_decomposition",
"(",
"in1",
",",
"scale_count",
",",
"scale_adjust",
",",
"store_smoothed",
",",
"core_count",
")",
"elif",
"mode",
"==",
"'gpu'",
":",
"return",
"gpu_iuwt_decomposition",
"(",
"in1",
",",
"scale_count",
",",
"scale_adjust",
",",
"store_smoothed",
",",
"store_on_gpu",
")"
] | This function serves as a handler for the different implementations of the IUWT decomposition. It allows the
different methods to be used almost interchangeably.
INPUTS:
in1 (no default): Array on which the decomposition is to be performed.
scale_count (no default): Maximum scale to be considered.
scale_adjust (default=0): Adjustment to scale value if first scales are of no interest.
mode (default='ser'): Implementation of the IUWT to be used - 'ser', 'mp' or 'gpu'.
core_count (default=1): Additional option for multiprocessing - specifies core count.
store_smoothed (default=False): Boolean specifier for whether the smoothed image is stored or not.
store_on_gpu (default=False): Boolean specifier for whether the decomposition is stored on the gpu or not.
OUTPUTS:
Returns the decomposition with the additional smoothed coefficients if specified. | [
"This",
"function",
"serves",
"as",
"a",
"handler",
"for",
"the",
"different",
"implementations",
"of",
"the",
"IUWT",
"decomposition",
".",
"It",
"allows",
"the",
"different",
"methods",
"to",
"be",
"used",
"almost",
"interchangeably",
"."
] | python | train |
libyal/dtfabric | dtfabric/runtime/data_maps.py | https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L1584-L1686 | def _CompositeMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
"""Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
context_state = getattr(context, 'state', {})
attribute_index = context_state.get('attribute_index', 0)
mapped_values = context_state.get('mapped_values', None)
subcontext = context_state.get('context', None)
if not mapped_values:
mapped_values = self._structure_values_class()
if not subcontext:
subcontext = DataTypeMapContext(values={
type(mapped_values).__name__: mapped_values})
members_data_size = 0
for attribute_index in range(attribute_index, self._number_of_attributes):
attribute_name = self._attribute_names[attribute_index]
data_type_map = self._data_type_maps[attribute_index]
member_definition = self._data_type_definition.members[attribute_index]
condition = getattr(member_definition, 'condition', None)
if condition:
namespace = dict(subcontext.values)
# Make sure __builtins__ contains an empty dictionary.
namespace['__builtins__'] = {}
try:
condition_result = eval(condition, namespace) # pylint: disable=eval-used
except Exception as exception:
raise errors.MappingError(
'Unable to evaluate condition with error: {0!s}'.format(
exception))
if not isinstance(condition_result, bool):
raise errors.MappingError(
'Condition does not result in a boolean value')
if not condition_result:
continue
if isinstance(member_definition, data_types.PaddingDefinition):
_, byte_size = divmod(
members_data_size, member_definition.alignment_size)
if byte_size > 0:
byte_size = member_definition.alignment_size - byte_size
data_type_map.byte_size = byte_size
try:
value = data_type_map.MapByteStream(
byte_stream, byte_offset=byte_offset, context=subcontext)
setattr(mapped_values, attribute_name, value)
except errors.ByteStreamTooSmallError as exception:
context_state['attribute_index'] = attribute_index
context_state['context'] = subcontext
context_state['mapped_values'] = mapped_values
raise errors.ByteStreamTooSmallError(exception)
except Exception as exception:
raise errors.MappingError(exception)
supported_values = getattr(member_definition, 'values', None)
if supported_values and value not in supported_values:
raise errors.MappingError(
'Value: {0!s} not in supported values: {1:s}'.format(
value, ', '.join([
'{0!s}'.format(value) for value in supported_values])))
byte_offset += subcontext.byte_size
members_data_size += subcontext.byte_size
if attribute_index != (self._number_of_attributes - 1):
context_state['attribute_index'] = attribute_index
context_state['context'] = subcontext
context_state['mapped_values'] = mapped_values
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: missing attribute: {2:d}').format(
self._data_type_definition.name, byte_offset, attribute_index)
raise errors.ByteStreamTooSmallError(error_string)
if context:
context.byte_size = members_data_size
context.state = {}
return mapped_values | [
"def",
"_CompositeMapByteStream",
"(",
"self",
",",
"byte_stream",
",",
"byte_offset",
"=",
"0",
",",
"context",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"context_state",
"=",
"getattr",
"(",
"context",
",",
"'state'",
",",
"{",
"}",
")",
"attribute_index",
"=",
"context_state",
".",
"get",
"(",
"'attribute_index'",
",",
"0",
")",
"mapped_values",
"=",
"context_state",
".",
"get",
"(",
"'mapped_values'",
",",
"None",
")",
"subcontext",
"=",
"context_state",
".",
"get",
"(",
"'context'",
",",
"None",
")",
"if",
"not",
"mapped_values",
":",
"mapped_values",
"=",
"self",
".",
"_structure_values_class",
"(",
")",
"if",
"not",
"subcontext",
":",
"subcontext",
"=",
"DataTypeMapContext",
"(",
"values",
"=",
"{",
"type",
"(",
"mapped_values",
")",
".",
"__name__",
":",
"mapped_values",
"}",
")",
"members_data_size",
"=",
"0",
"for",
"attribute_index",
"in",
"range",
"(",
"attribute_index",
",",
"self",
".",
"_number_of_attributes",
")",
":",
"attribute_name",
"=",
"self",
".",
"_attribute_names",
"[",
"attribute_index",
"]",
"data_type_map",
"=",
"self",
".",
"_data_type_maps",
"[",
"attribute_index",
"]",
"member_definition",
"=",
"self",
".",
"_data_type_definition",
".",
"members",
"[",
"attribute_index",
"]",
"condition",
"=",
"getattr",
"(",
"member_definition",
",",
"'condition'",
",",
"None",
")",
"if",
"condition",
":",
"namespace",
"=",
"dict",
"(",
"subcontext",
".",
"values",
")",
"# Make sure __builtins__ contains an empty dictionary.",
"namespace",
"[",
"'__builtins__'",
"]",
"=",
"{",
"}",
"try",
":",
"condition_result",
"=",
"eval",
"(",
"condition",
",",
"namespace",
")",
"# pylint: disable=eval-used",
"except",
"Exception",
"as",
"exception",
":",
"raise",
"errors",
".",
"MappingError",
"(",
"'Unable to evaluate condition with error: {0!s}'",
".",
"format",
"(",
"exception",
")",
")",
"if",
"not",
"isinstance",
"(",
"condition_result",
",",
"bool",
")",
":",
"raise",
"errors",
".",
"MappingError",
"(",
"'Condition does not result in a boolean value'",
")",
"if",
"not",
"condition_result",
":",
"continue",
"if",
"isinstance",
"(",
"member_definition",
",",
"data_types",
".",
"PaddingDefinition",
")",
":",
"_",
",",
"byte_size",
"=",
"divmod",
"(",
"members_data_size",
",",
"member_definition",
".",
"alignment_size",
")",
"if",
"byte_size",
">",
"0",
":",
"byte_size",
"=",
"member_definition",
".",
"alignment_size",
"-",
"byte_size",
"data_type_map",
".",
"byte_size",
"=",
"byte_size",
"try",
":",
"value",
"=",
"data_type_map",
".",
"MapByteStream",
"(",
"byte_stream",
",",
"byte_offset",
"=",
"byte_offset",
",",
"context",
"=",
"subcontext",
")",
"setattr",
"(",
"mapped_values",
",",
"attribute_name",
",",
"value",
")",
"except",
"errors",
".",
"ByteStreamTooSmallError",
"as",
"exception",
":",
"context_state",
"[",
"'attribute_index'",
"]",
"=",
"attribute_index",
"context_state",
"[",
"'context'",
"]",
"=",
"subcontext",
"context_state",
"[",
"'mapped_values'",
"]",
"=",
"mapped_values",
"raise",
"errors",
".",
"ByteStreamTooSmallError",
"(",
"exception",
")",
"except",
"Exception",
"as",
"exception",
":",
"raise",
"errors",
".",
"MappingError",
"(",
"exception",
")",
"supported_values",
"=",
"getattr",
"(",
"member_definition",
",",
"'values'",
",",
"None",
")",
"if",
"supported_values",
"and",
"value",
"not",
"in",
"supported_values",
":",
"raise",
"errors",
".",
"MappingError",
"(",
"'Value: {0!s} not in supported values: {1:s}'",
".",
"format",
"(",
"value",
",",
"', '",
".",
"join",
"(",
"[",
"'{0!s}'",
".",
"format",
"(",
"value",
")",
"for",
"value",
"in",
"supported_values",
"]",
")",
")",
")",
"byte_offset",
"+=",
"subcontext",
".",
"byte_size",
"members_data_size",
"+=",
"subcontext",
".",
"byte_size",
"if",
"attribute_index",
"!=",
"(",
"self",
".",
"_number_of_attributes",
"-",
"1",
")",
":",
"context_state",
"[",
"'attribute_index'",
"]",
"=",
"attribute_index",
"context_state",
"[",
"'context'",
"]",
"=",
"subcontext",
"context_state",
"[",
"'mapped_values'",
"]",
"=",
"mapped_values",
"error_string",
"=",
"(",
"'Unable to read: {0:s} from byte stream at offset: {1:d} '",
"'with error: missing attribute: {2:d}'",
")",
".",
"format",
"(",
"self",
".",
"_data_type_definition",
".",
"name",
",",
"byte_offset",
",",
"attribute_index",
")",
"raise",
"errors",
".",
"ByteStreamTooSmallError",
"(",
"error_string",
")",
"if",
"context",
":",
"context",
".",
"byte_size",
"=",
"members_data_size",
"context",
".",
"state",
"=",
"{",
"}",
"return",
"mapped_values"
] | Maps a sequence of composite data types on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
object: mapped value.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream. | [
"Maps",
"a",
"sequence",
"of",
"composite",
"data",
"types",
"on",
"a",
"byte",
"stream",
"."
] | python | train |
saltstack/salt | salt/cli/support/collector.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/support/collector.py#L421-L434 | def _cleanup(self):
'''
Cleanup if crash/exception
:return:
'''
if (hasattr(self, 'config')
and self.config.get('support_archive')
and os.path.exists(self.config['support_archive'])):
self.out.warning('Terminated earlier, cleaning up')
try:
os.unlink(self.config['support_archive'])
except Exception as err:
log.debug(err)
self.out.error('{} while cleaning up.'.format(err)) | [
"def",
"_cleanup",
"(",
"self",
")",
":",
"if",
"(",
"hasattr",
"(",
"self",
",",
"'config'",
")",
"and",
"self",
".",
"config",
".",
"get",
"(",
"'support_archive'",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"config",
"[",
"'support_archive'",
"]",
")",
")",
":",
"self",
".",
"out",
".",
"warning",
"(",
"'Terminated earlier, cleaning up'",
")",
"try",
":",
"os",
".",
"unlink",
"(",
"self",
".",
"config",
"[",
"'support_archive'",
"]",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"debug",
"(",
"err",
")",
"self",
".",
"out",
".",
"error",
"(",
"'{} while cleaning up.'",
".",
"format",
"(",
"err",
")",
")"
] | Cleanup if crash/exception
:return: | [
"Cleanup",
"if",
"crash",
"/",
"exception",
":",
"return",
":"
] | python | train |
msmbuilder/msmbuilder | msmbuilder/msm/core.py | https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/core.py#L311-L356 | def _solve_ratemat_eigensystem(theta, k, n):
"""Find the dominant eigenpairs of a reversible rate matrix (master
equation)
Parameters
----------
theta : ndarray, shape=(n_params,)
The free parameters of the rate matrix
k : int
The number of eigenpairs to find
n : int
The number of states
Notes
-----
Normalize the left (:math:`\phi`) and right (:math:``\psi``) eigenfunctions
according to the following criteria.
* The first left eigenvector, \phi_1, _is_ the stationary
distribution, and thus should be normalized to sum to 1.
* The left-right eigenpairs should be biorthonormal:
<\phi_i, \psi_j> = \delta_{ij}
* The left eigenvectors should satisfy
<\phi_i, \phi_i>_{\mu^{-1}} = 1
* The right eigenvectors should satisfy <\psi_i, \psi_i>_{\mu} = 1
Returns
-------
eigvals : np.ndarray, shape=(k,)
The largest `k` eigenvalues
lv : np.ndarray, shape=(n_states, k)
The normalized left eigenvectors (:math:`\phi`) of the rate matrix.
rv : np.ndarray, shape=(n_states, k)
The normalized right eigenvectors (:math:`\psi`) of the rate matrix.
"""
S = np.zeros((n, n))
pi = np.exp(theta[-n:])
pi = pi / pi.sum()
_ratematrix.build_ratemat(theta, n, S, which='S')
u, lv, rv = map(np.asarray, _ratematrix.eig_K(S, n, pi, 'S'))
order = np.argsort(-u)
u = u[order[:k]]
lv = lv[:, order[:k]]
rv = rv[:, order[:k]]
return _normalize_eigensystem(u, lv, rv) | [
"def",
"_solve_ratemat_eigensystem",
"(",
"theta",
",",
"k",
",",
"n",
")",
":",
"S",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"pi",
"=",
"np",
".",
"exp",
"(",
"theta",
"[",
"-",
"n",
":",
"]",
")",
"pi",
"=",
"pi",
"/",
"pi",
".",
"sum",
"(",
")",
"_ratematrix",
".",
"build_ratemat",
"(",
"theta",
",",
"n",
",",
"S",
",",
"which",
"=",
"'S'",
")",
"u",
",",
"lv",
",",
"rv",
"=",
"map",
"(",
"np",
".",
"asarray",
",",
"_ratematrix",
".",
"eig_K",
"(",
"S",
",",
"n",
",",
"pi",
",",
"'S'",
")",
")",
"order",
"=",
"np",
".",
"argsort",
"(",
"-",
"u",
")",
"u",
"=",
"u",
"[",
"order",
"[",
":",
"k",
"]",
"]",
"lv",
"=",
"lv",
"[",
":",
",",
"order",
"[",
":",
"k",
"]",
"]",
"rv",
"=",
"rv",
"[",
":",
",",
"order",
"[",
":",
"k",
"]",
"]",
"return",
"_normalize_eigensystem",
"(",
"u",
",",
"lv",
",",
"rv",
")"
] | Find the dominant eigenpairs of a reversible rate matrix (master
equation)
Parameters
----------
theta : ndarray, shape=(n_params,)
The free parameters of the rate matrix
k : int
The number of eigenpairs to find
n : int
The number of states
Notes
-----
Normalize the left (:math:`\phi`) and right (:math:``\psi``) eigenfunctions
according to the following criteria.
* The first left eigenvector, \phi_1, _is_ the stationary
distribution, and thus should be normalized to sum to 1.
* The left-right eigenpairs should be biorthonormal:
<\phi_i, \psi_j> = \delta_{ij}
* The left eigenvectors should satisfy
<\phi_i, \phi_i>_{\mu^{-1}} = 1
* The right eigenvectors should satisfy <\psi_i, \psi_i>_{\mu} = 1
Returns
-------
eigvals : np.ndarray, shape=(k,)
The largest `k` eigenvalues
lv : np.ndarray, shape=(n_states, k)
The normalized left eigenvectors (:math:`\phi`) of the rate matrix.
rv : np.ndarray, shape=(n_states, k)
The normalized right eigenvectors (:math:`\psi`) of the rate matrix. | [
"Find",
"the",
"dominant",
"eigenpairs",
"of",
"a",
"reversible",
"rate",
"matrix",
"(",
"master",
"equation",
")"
] | python | train |
mitsei/dlkit | dlkit/json_/osid/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/objects.py#L1425-L1440 | def set_sequestered(self, sequestered):
"""Sets the sequestered flag.
arg: sequestered (boolean): the new sequestered flag
raise: InvalidArgument - ``sequestered`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if sequestered is None:
raise errors.NullArgument()
if self.get_sequestered_metadata().is_read_only():
raise errors.NoAccess()
if not isinstance(sequestered, bool):
raise errors.InvalidArgument()
self._my_map['sequestered'] = sequestered | [
"def",
"set_sequestered",
"(",
"self",
",",
"sequestered",
")",
":",
"if",
"sequestered",
"is",
"None",
":",
"raise",
"errors",
".",
"NullArgument",
"(",
")",
"if",
"self",
".",
"get_sequestered_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"if",
"not",
"isinstance",
"(",
"sequestered",
",",
"bool",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"self",
".",
"_my_map",
"[",
"'sequestered'",
"]",
"=",
"sequestered"
] | Sets the sequestered flag.
arg: sequestered (boolean): the new sequestered flag
raise: InvalidArgument - ``sequestered`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | [
"Sets",
"the",
"sequestered",
"flag",
"."
] | python | train |
dustinmm80/healthy | pylint_runner.py | https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/pylint_runner.py#L22-L44 | def score(package_path):
"""
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
"""
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5 | [
"def",
"score",
"(",
"package_path",
")",
":",
"python_files",
"=",
"find_files",
"(",
"package_path",
",",
"'*.py'",
")",
"total_counter",
"=",
"Counter",
"(",
")",
"for",
"python_file",
"in",
"python_files",
":",
"output",
"=",
"run_pylint",
"(",
"python_file",
")",
"counter",
"=",
"parse_pylint_output",
"(",
"output",
")",
"total_counter",
"+=",
"counter",
"score_value",
"=",
"0",
"for",
"count",
",",
"stat",
"in",
"enumerate",
"(",
"total_counter",
")",
":",
"score_value",
"+=",
"SCORING_VALUES",
"[",
"stat",
"]",
"*",
"count",
"return",
"score_value",
"/",
"5"
] | Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score | [
"Runs",
"pylint",
"on",
"a",
"package",
"and",
"returns",
"a",
"score",
"Lower",
"score",
"is",
"better"
] | python | train |
bitesofcode/projexui | projexui/widgets/xganttwidget/xganttscene.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttscene.py#L224-L250 | def drawBackground(self, painter, rect):
"""
Draws the background for this scene.
:param painter | <QPainter>
rect | <QRect>
"""
if self._dirty:
self.rebuild()
# draw the alternating rects
gantt = self.ganttWidget()
# draw the alternating rects
painter.setPen(Qt.NoPen)
painter.setBrush(gantt.alternateBrush())
for rect in self._alternateRects:
painter.drawRect(rect)
# draw the weekends
painter.setBrush(gantt.weekendBrush())
for rect in self._weekendRects:
painter.drawRect(rect)
# draw the default background
painter.setPen(gantt.gridPen())
painter.drawLines(self._hlines + self._vlines) | [
"def",
"drawBackground",
"(",
"self",
",",
"painter",
",",
"rect",
")",
":",
"if",
"self",
".",
"_dirty",
":",
"self",
".",
"rebuild",
"(",
")",
"# draw the alternating rects\r",
"gantt",
"=",
"self",
".",
"ganttWidget",
"(",
")",
"# draw the alternating rects\r",
"painter",
".",
"setPen",
"(",
"Qt",
".",
"NoPen",
")",
"painter",
".",
"setBrush",
"(",
"gantt",
".",
"alternateBrush",
"(",
")",
")",
"for",
"rect",
"in",
"self",
".",
"_alternateRects",
":",
"painter",
".",
"drawRect",
"(",
"rect",
")",
"# draw the weekends\r",
"painter",
".",
"setBrush",
"(",
"gantt",
".",
"weekendBrush",
"(",
")",
")",
"for",
"rect",
"in",
"self",
".",
"_weekendRects",
":",
"painter",
".",
"drawRect",
"(",
"rect",
")",
"# draw the default background\r",
"painter",
".",
"setPen",
"(",
"gantt",
".",
"gridPen",
"(",
")",
")",
"painter",
".",
"drawLines",
"(",
"self",
".",
"_hlines",
"+",
"self",
".",
"_vlines",
")"
] | Draws the background for this scene.
:param painter | <QPainter>
rect | <QRect> | [
"Draws",
"the",
"background",
"for",
"this",
"scene",
".",
":",
"param",
"painter",
"|",
"<QPainter",
">",
"rect",
"|",
"<QRect",
">"
] | python | train |
polyaxon/polyaxon | polyaxon/k8s_events_handlers/tasks/statuses.py | https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/k8s_events_handlers/tasks/statuses.py#L71-L100 | def k8s_events_handle_job_statuses(self: 'celery_app.task', payload: Dict) -> None:
"""Project jobs statuses"""
details = payload['details']
job_uuid = details['labels']['job_uuid']
job_name = details['labels']['job_name']
project_name = details['labels'].get('project_name')
logger.debug('handling events status for job %s', job_name)
try:
job = Job.objects.get(uuid=job_uuid)
except Job.DoesNotExist:
logger.debug('Job `%s` does not exist', job_name)
return
try:
job.project
except Project.DoesNotExist:
logger.debug('Project for job `%s` does not exist', project_name)
return
# Set the new status
try:
set_node_scheduling(job, details['node_name'])
job.set_status(status=payload['status'],
message=payload['message'],
traceback=payload.get('traceback'),
details=details)
except IntegrityError:
# Due to concurrency this could happen, we just retry it
self.retry(countdown=Intervals.EXPERIMENTS_SCHEDULER) | [
"def",
"k8s_events_handle_job_statuses",
"(",
"self",
":",
"'celery_app.task'",
",",
"payload",
":",
"Dict",
")",
"->",
"None",
":",
"details",
"=",
"payload",
"[",
"'details'",
"]",
"job_uuid",
"=",
"details",
"[",
"'labels'",
"]",
"[",
"'job_uuid'",
"]",
"job_name",
"=",
"details",
"[",
"'labels'",
"]",
"[",
"'job_name'",
"]",
"project_name",
"=",
"details",
"[",
"'labels'",
"]",
".",
"get",
"(",
"'project_name'",
")",
"logger",
".",
"debug",
"(",
"'handling events status for job %s'",
",",
"job_name",
")",
"try",
":",
"job",
"=",
"Job",
".",
"objects",
".",
"get",
"(",
"uuid",
"=",
"job_uuid",
")",
"except",
"Job",
".",
"DoesNotExist",
":",
"logger",
".",
"debug",
"(",
"'Job `%s` does not exist'",
",",
"job_name",
")",
"return",
"try",
":",
"job",
".",
"project",
"except",
"Project",
".",
"DoesNotExist",
":",
"logger",
".",
"debug",
"(",
"'Project for job `%s` does not exist'",
",",
"project_name",
")",
"return",
"# Set the new status",
"try",
":",
"set_node_scheduling",
"(",
"job",
",",
"details",
"[",
"'node_name'",
"]",
")",
"job",
".",
"set_status",
"(",
"status",
"=",
"payload",
"[",
"'status'",
"]",
",",
"message",
"=",
"payload",
"[",
"'message'",
"]",
",",
"traceback",
"=",
"payload",
".",
"get",
"(",
"'traceback'",
")",
",",
"details",
"=",
"details",
")",
"except",
"IntegrityError",
":",
"# Due to concurrency this could happen, we just retry it",
"self",
".",
"retry",
"(",
"countdown",
"=",
"Intervals",
".",
"EXPERIMENTS_SCHEDULER",
")"
] | Project jobs statuses | [
"Project",
"jobs",
"statuses"
] | python | train |
jaraco/tempora | tempora/__init__.py | https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L467-L481 | def divide_timedelta(td1, td2):
"""
Get the ratio of two timedeltas
>>> one_day = datetime.timedelta(days=1)
>>> one_hour = datetime.timedelta(hours=1)
>>> divide_timedelta(one_hour, one_day) == 1 / 24
True
"""
try:
return td1 / td2
except TypeError:
# Python 3.2 gets division
# http://bugs.python.org/issue2706
return td1.total_seconds() / td2.total_seconds() | [
"def",
"divide_timedelta",
"(",
"td1",
",",
"td2",
")",
":",
"try",
":",
"return",
"td1",
"/",
"td2",
"except",
"TypeError",
":",
"# Python 3.2 gets division",
"# http://bugs.python.org/issue2706",
"return",
"td1",
".",
"total_seconds",
"(",
")",
"/",
"td2",
".",
"total_seconds",
"(",
")"
] | Get the ratio of two timedeltas
>>> one_day = datetime.timedelta(days=1)
>>> one_hour = datetime.timedelta(hours=1)
>>> divide_timedelta(one_hour, one_day) == 1 / 24
True | [
"Get",
"the",
"ratio",
"of",
"two",
"timedeltas"
] | python | valid |
sorgerlab/indra | indra/preassembler/ontology_mapper.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/ontology_mapper.py#L45-L74 | def map_statements(self):
"""Run the ontology mapping on the statements."""
for stmt in self.statements:
for agent in stmt.agent_list():
if agent is None:
continue
all_mappings = []
for db_name, db_id in agent.db_refs.items():
if isinstance(db_id, list):
db_id = db_id[0][0]
mappings = self._map_id(db_name, db_id)
all_mappings += mappings
for map_db_name, map_db_id, score, orig_db_name in all_mappings:
if map_db_name in agent.db_refs:
continue
if self.scored:
# If the original one is a scored grounding,
# we take that score and multiply it with the mapping
# score. Otherwise we assume the original score is 1.
try:
orig_score = agent.db_refs[orig_db_name][0][1]
except Exception:
orig_score = 1.0
agent.db_refs[map_db_name] = \
[(map_db_id, score * orig_score)]
else:
if map_db_name in ('UN', 'HUME'):
agent.db_refs[map_db_name] = [(map_db_id, 1.0)]
else:
agent.db_refs[map_db_name] = map_db_id | [
"def",
"map_statements",
"(",
"self",
")",
":",
"for",
"stmt",
"in",
"self",
".",
"statements",
":",
"for",
"agent",
"in",
"stmt",
".",
"agent_list",
"(",
")",
":",
"if",
"agent",
"is",
"None",
":",
"continue",
"all_mappings",
"=",
"[",
"]",
"for",
"db_name",
",",
"db_id",
"in",
"agent",
".",
"db_refs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"db_id",
",",
"list",
")",
":",
"db_id",
"=",
"db_id",
"[",
"0",
"]",
"[",
"0",
"]",
"mappings",
"=",
"self",
".",
"_map_id",
"(",
"db_name",
",",
"db_id",
")",
"all_mappings",
"+=",
"mappings",
"for",
"map_db_name",
",",
"map_db_id",
",",
"score",
",",
"orig_db_name",
"in",
"all_mappings",
":",
"if",
"map_db_name",
"in",
"agent",
".",
"db_refs",
":",
"continue",
"if",
"self",
".",
"scored",
":",
"# If the original one is a scored grounding,",
"# we take that score and multiply it with the mapping",
"# score. Otherwise we assume the original score is 1.",
"try",
":",
"orig_score",
"=",
"agent",
".",
"db_refs",
"[",
"orig_db_name",
"]",
"[",
"0",
"]",
"[",
"1",
"]",
"except",
"Exception",
":",
"orig_score",
"=",
"1.0",
"agent",
".",
"db_refs",
"[",
"map_db_name",
"]",
"=",
"[",
"(",
"map_db_id",
",",
"score",
"*",
"orig_score",
")",
"]",
"else",
":",
"if",
"map_db_name",
"in",
"(",
"'UN'",
",",
"'HUME'",
")",
":",
"agent",
".",
"db_refs",
"[",
"map_db_name",
"]",
"=",
"[",
"(",
"map_db_id",
",",
"1.0",
")",
"]",
"else",
":",
"agent",
".",
"db_refs",
"[",
"map_db_name",
"]",
"=",
"map_db_id"
] | Run the ontology mapping on the statements. | [
"Run",
"the",
"ontology",
"mapping",
"on",
"the",
"statements",
"."
] | python | train |
google/grr | grr/client/grr_response_client/client_actions/file_finder.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/file_finder.py#L28-L55 | def FileFinderOSFromClient(args):
"""This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances.
"""
stat_cache = filesystem.StatCache()
opts = args.action.stat
for path in GetExpandedPaths(args):
try:
content_conditions = conditions.ContentCondition.Parse(args.conditions)
for content_condition in content_conditions:
with io.open(path, "rb") as fd:
result = list(content_condition.Search(fd))
if not result:
raise _SkipFileException()
# TODO: `opts.resolve_links` has type `RDFBool`, not `bool`.
stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links))
stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=opts.collect_ext_attrs)
yield stat_entry
except _SkipFileException:
pass | [
"def",
"FileFinderOSFromClient",
"(",
"args",
")",
":",
"stat_cache",
"=",
"filesystem",
".",
"StatCache",
"(",
")",
"opts",
"=",
"args",
".",
"action",
".",
"stat",
"for",
"path",
"in",
"GetExpandedPaths",
"(",
"args",
")",
":",
"try",
":",
"content_conditions",
"=",
"conditions",
".",
"ContentCondition",
".",
"Parse",
"(",
"args",
".",
"conditions",
")",
"for",
"content_condition",
"in",
"content_conditions",
":",
"with",
"io",
".",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"fd",
":",
"result",
"=",
"list",
"(",
"content_condition",
".",
"Search",
"(",
"fd",
")",
")",
"if",
"not",
"result",
":",
"raise",
"_SkipFileException",
"(",
")",
"# TODO: `opts.resolve_links` has type `RDFBool`, not `bool`.",
"stat",
"=",
"stat_cache",
".",
"Get",
"(",
"path",
",",
"follow_symlink",
"=",
"bool",
"(",
"opts",
".",
"resolve_links",
")",
")",
"stat_entry",
"=",
"client_utils",
".",
"StatEntryFromStatPathSpec",
"(",
"stat",
",",
"ext_attrs",
"=",
"opts",
".",
"collect_ext_attrs",
")",
"yield",
"stat_entry",
"except",
"_SkipFileException",
":",
"pass"
] | This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances. | [
"This",
"function",
"expands",
"paths",
"from",
"the",
"args",
"and",
"returns",
"related",
"stat",
"entries",
"."
] | python | train |
jilljenn/tryalgo | tryalgo/three_partition.py | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/three_partition.py#L8-L23 | def three_partition(x):
"""partition a set of integers in 3 parts of same total value
:param x: table of non negative values
:returns: triplet of the integers encoding the sets, or None otherwise
:complexity: :math:`O(2^{2n})`
"""
f = [0] * (1 << len(x))
for i in range(len(x)):
for S in range(1 << i):
f[S | (1 << i)] = f[S] + x[i]
for A in range(1 << len(x)):
for B in range(1 << len(x)):
if A & B == 0 and f[A] == f[B] and 3 * f[A] == f[-1]:
return (A, B, ((1 << len(x)) - 1) ^ A ^ B)
return None | [
"def",
"three_partition",
"(",
"x",
")",
":",
"f",
"=",
"[",
"0",
"]",
"*",
"(",
"1",
"<<",
"len",
"(",
"x",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x",
")",
")",
":",
"for",
"S",
"in",
"range",
"(",
"1",
"<<",
"i",
")",
":",
"f",
"[",
"S",
"|",
"(",
"1",
"<<",
"i",
")",
"]",
"=",
"f",
"[",
"S",
"]",
"+",
"x",
"[",
"i",
"]",
"for",
"A",
"in",
"range",
"(",
"1",
"<<",
"len",
"(",
"x",
")",
")",
":",
"for",
"B",
"in",
"range",
"(",
"1",
"<<",
"len",
"(",
"x",
")",
")",
":",
"if",
"A",
"&",
"B",
"==",
"0",
"and",
"f",
"[",
"A",
"]",
"==",
"f",
"[",
"B",
"]",
"and",
"3",
"*",
"f",
"[",
"A",
"]",
"==",
"f",
"[",
"-",
"1",
"]",
":",
"return",
"(",
"A",
",",
"B",
",",
"(",
"(",
"1",
"<<",
"len",
"(",
"x",
")",
")",
"-",
"1",
")",
"^",
"A",
"^",
"B",
")",
"return",
"None"
] | partition a set of integers in 3 parts of same total value
:param x: table of non negative values
:returns: triplet of the integers encoding the sets, or None otherwise
:complexity: :math:`O(2^{2n})` | [
"partition",
"a",
"set",
"of",
"integers",
"in",
"3",
"parts",
"of",
"same",
"total",
"value"
] | python | train |
RJT1990/pyflux | pyflux/ssm/llm.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/llm.py#L179-L262 | def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_final = 0
upper_final = 0
plot_values_final = 0
date_index = self.shift_dates(h)
plot_index = date_index[-h-past_values:]
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, h)
plot_values = a[0][-h-past_values:]
forecasted_values = a[0][-h:]
lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_final += np.append(plot_values[-h-1], lower)
upper_final += np.append(plot_values[-h-1], upper)
plot_values_final += plot_values
plot_values_final = plot_values_final / nsims
lower_final = lower_final / nsims
upper_final = upper_final / nsims
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower_final, upper_final, alpha=0.2)
plt.plot(plot_index, plot_values_final)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
else:
a, P = self._forecast_model(self.latent_variables.get_z_values(),h)
date_index = self.shift_dates(h)
plot_values = a[0][-h-past_values:]
forecasted_values = a[0][-h:]
lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
lower = np.append(plot_values[-h-1],lower)
upper = np.append(plot_values[-h-1],upper)
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | [
"def",
"plot_predict",
"(",
"self",
",",
"h",
"=",
"5",
",",
"past_values",
"=",
"20",
",",
"intervals",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"seaborn",
"as",
"sns",
"figsize",
"=",
"kwargs",
".",
"get",
"(",
"'figsize'",
",",
"(",
"10",
",",
"7",
")",
")",
"nsims",
"=",
"kwargs",
".",
"get",
"(",
"'nsims'",
",",
"200",
")",
"if",
"self",
".",
"latent_variables",
".",
"estimated",
"is",
"False",
":",
"raise",
"Exception",
"(",
"\"No latent variables estimated!\"",
")",
"else",
":",
"# Retrieve data, dates and (transformed) latent variables ",
"if",
"self",
".",
"latent_variables",
".",
"estimation_method",
"in",
"[",
"'M-H'",
"]",
":",
"lower_final",
"=",
"0",
"upper_final",
"=",
"0",
"plot_values_final",
"=",
"0",
"date_index",
"=",
"self",
".",
"shift_dates",
"(",
"h",
")",
"plot_index",
"=",
"date_index",
"[",
"-",
"h",
"-",
"past_values",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"nsims",
")",
":",
"t_params",
"=",
"self",
".",
"draw_latent_variables",
"(",
"nsims",
"=",
"1",
")",
".",
"T",
"[",
"0",
"]",
"a",
",",
"P",
"=",
"self",
".",
"_forecast_model",
"(",
"t_params",
",",
"h",
")",
"plot_values",
"=",
"a",
"[",
"0",
"]",
"[",
"-",
"h",
"-",
"past_values",
":",
"]",
"forecasted_values",
"=",
"a",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"lower",
"=",
"forecasted_values",
"-",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"t_params",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"upper",
"=",
"forecasted_values",
"+",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"t_params",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"lower_final",
"+=",
"np",
".",
"append",
"(",
"plot_values",
"[",
"-",
"h",
"-",
"1",
"]",
",",
"lower",
")",
"upper_final",
"+=",
"np",
".",
"append",
"(",
"plot_values",
"[",
"-",
"h",
"-",
"1",
"]",
",",
"upper",
")",
"plot_values_final",
"+=",
"plot_values",
"plot_values_final",
"=",
"plot_values_final",
"/",
"nsims",
"lower_final",
"=",
"lower_final",
"/",
"nsims",
"upper_final",
"=",
"upper_final",
"/",
"nsims",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"if",
"intervals",
"==",
"True",
":",
"plt",
".",
"fill_between",
"(",
"date_index",
"[",
"-",
"h",
"-",
"1",
":",
"]",
",",
"lower_final",
",",
"upper_final",
",",
"alpha",
"=",
"0.2",
")",
"plt",
".",
"plot",
"(",
"plot_index",
",",
"plot_values_final",
")",
"plt",
".",
"title",
"(",
"\"Forecast for \"",
"+",
"self",
".",
"data_name",
")",
"plt",
".",
"xlabel",
"(",
"\"Time\"",
")",
"plt",
".",
"ylabel",
"(",
"self",
".",
"data_name",
")",
"plt",
".",
"show",
"(",
")",
"else",
":",
"a",
",",
"P",
"=",
"self",
".",
"_forecast_model",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
",",
"h",
")",
"date_index",
"=",
"self",
".",
"shift_dates",
"(",
"h",
")",
"plot_values",
"=",
"a",
"[",
"0",
"]",
"[",
"-",
"h",
"-",
"past_values",
":",
"]",
"forecasted_values",
"=",
"a",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"lower",
"=",
"forecasted_values",
"-",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"upper",
"=",
"forecasted_values",
"+",
"1.96",
"*",
"np",
".",
"power",
"(",
"P",
"[",
"0",
"]",
"[",
"0",
"]",
"[",
"-",
"h",
":",
"]",
"+",
"self",
".",
"latent_variables",
".",
"z_list",
"[",
"0",
"]",
".",
"prior",
".",
"transform",
"(",
"self",
".",
"latent_variables",
".",
"get_z_values",
"(",
")",
"[",
"0",
"]",
")",
",",
"0.5",
")",
"lower",
"=",
"np",
".",
"append",
"(",
"plot_values",
"[",
"-",
"h",
"-",
"1",
"]",
",",
"lower",
")",
"upper",
"=",
"np",
".",
"append",
"(",
"plot_values",
"[",
"-",
"h",
"-",
"1",
"]",
",",
"upper",
")",
"plot_index",
"=",
"date_index",
"[",
"-",
"h",
"-",
"past_values",
":",
"]",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"if",
"intervals",
"==",
"True",
":",
"plt",
".",
"fill_between",
"(",
"date_index",
"[",
"-",
"h",
"-",
"1",
":",
"]",
",",
"lower",
",",
"upper",
",",
"alpha",
"=",
"0.2",
")",
"plt",
".",
"plot",
"(",
"plot_index",
",",
"plot_values",
")",
"plt",
".",
"title",
"(",
"\"Forecast for \"",
"+",
"self",
".",
"data_name",
")",
"plt",
".",
"xlabel",
"(",
"\"Time\"",
")",
"plt",
".",
"ylabel",
"(",
"self",
".",
"data_name",
")",
"plt",
".",
"show",
"(",
")"
] | Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast | [
"Makes",
"forecast",
"with",
"the",
"estimated",
"model"
] | python | train |
edeposit/edeposit.amqp.ftp | src/edeposit/amqp/ftp/decoders/validator.py | https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/decoders/validator.py#L158-L169 | def process(self, key, val):
"""
Try to look for `key` in all required and optional fields. If found,
set the `val`.
"""
for field in self.fields:
if field.check(key, val):
return
for field in self.optional:
if field.check(key, val):
return | [
"def",
"process",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"for",
"field",
"in",
"self",
".",
"fields",
":",
"if",
"field",
".",
"check",
"(",
"key",
",",
"val",
")",
":",
"return",
"for",
"field",
"in",
"self",
".",
"optional",
":",
"if",
"field",
".",
"check",
"(",
"key",
",",
"val",
")",
":",
"return"
] | Try to look for `key` in all required and optional fields. If found,
set the `val`. | [
"Try",
"to",
"look",
"for",
"key",
"in",
"all",
"required",
"and",
"optional",
"fields",
".",
"If",
"found",
"set",
"the",
"val",
"."
] | python | train |
gwww/elkm1 | elkm1_lib/areas.py | https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/areas.py#L20-L22 | def arm(self, level, code):
"""(Helper) Arm system at specified level (away, vacation, etc)"""
self._elk.send(al_encode(level, self._index, code)) | [
"def",
"arm",
"(",
"self",
",",
"level",
",",
"code",
")",
":",
"self",
".",
"_elk",
".",
"send",
"(",
"al_encode",
"(",
"level",
",",
"self",
".",
"_index",
",",
"code",
")",
")"
] | (Helper) Arm system at specified level (away, vacation, etc) | [
"(",
"Helper",
")",
"Arm",
"system",
"at",
"specified",
"level",
"(",
"away",
"vacation",
"etc",
")"
] | python | train |
mattja/nsim | nsim/analyses1/epochs.py | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L187-L273 | def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if variability is None:
variability = ts.variability_fp(plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
allchannels_epochs = []
for i in range(channels):
v = variability[:, i, :]
v = np.nanmean(v, axis=1) # mean of q different variability measures
# then smooth the variability with a low-pass filter
nonnan_ix = np.nonzero(~np.isnan(v))[0]
nonnans = slice(nonnan_ix.min(), nonnan_ix.max())
crit_freq = 1.0 # Hz
b, a = signal.butter(3, 2.0 * crit_freq / fs)
#v[nonnans] = signal.filtfilt(b, a, v[nonnans])
v[nonnan_ix] = signal.filtfilt(b, a, v[nonnan_ix])
# find all local minima of the variability not exceeding the threshold
m = v[1:-1]
l = v[0:-2]
r = v[2:]
minima = np.nonzero(~np.isnan(m) & ~np.isnan(l) & ~np.isnan(r) &
(m <= threshold) & (m-l < 0) & (r-m > 0))[0] + 1
if len(minima) is 0:
print(u'Channel %d: no epochs found using threshold %g' % (
i, threshold))
allchannels_epochs.append([])
else:
# Sort the list of minima by ascending variability
minima = minima[np.argsort(v[minima])]
epochs = []
for m in minima:
# Check this minimum is not inside an existing epoch
overlap = False
for e in epochs:
if m >= e[0] and m <= e[1]:
overlap = True
break
if not overlap:
# Get largest subthreshold interval surrounding the minimum
startix = m - 1
endix = m + 1
for startix in range(m - 1, 0, -1):
if np.isnan(v[startix]) or v[startix] > threshold:
startix += 1
break
for endix in range(m + 1, len(v), 1):
if np.isnan(v[endix]) or v[endix] > threshold:
break
if (endix - startix) * dt >= minlength:
epochs.append((startix, endix))
allchannels_epochs.append(epochs)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
if orig_ndim is 1:
allchannels_epochs = allchannels_epochs[0]
return (variability, allchannels_epochs) | [
"def",
"epochs",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"plot",
"=",
"True",
")",
":",
"if",
"variability",
"is",
"None",
":",
"variability",
"=",
"ts",
".",
"variability_fp",
"(",
"plot",
"=",
"False",
")",
"orig_ndim",
"=",
"ts",
".",
"ndim",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"if",
"variability",
".",
"ndim",
"is",
"1",
":",
"variability",
"=",
"variability",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"elif",
"variability",
".",
"ndim",
"is",
"2",
":",
"variability",
"=",
"variability",
"[",
":",
",",
"np",
".",
"newaxis",
",",
":",
"]",
"channels",
"=",
"ts",
".",
"shape",
"[",
"1",
"]",
"n",
"=",
"len",
"(",
"ts",
")",
"dt",
"=",
"(",
"1.0",
"*",
"ts",
".",
"tspan",
"[",
"-",
"1",
"]",
"-",
"ts",
".",
"tspan",
"[",
"0",
"]",
")",
"/",
"(",
"n",
"-",
"1",
")",
"fs",
"=",
"1.0",
"/",
"dt",
"allchannels_epochs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"channels",
")",
":",
"v",
"=",
"variability",
"[",
":",
",",
"i",
",",
":",
"]",
"v",
"=",
"np",
".",
"nanmean",
"(",
"v",
",",
"axis",
"=",
"1",
")",
"# mean of q different variability measures",
"# then smooth the variability with a low-pass filter",
"nonnan_ix",
"=",
"np",
".",
"nonzero",
"(",
"~",
"np",
".",
"isnan",
"(",
"v",
")",
")",
"[",
"0",
"]",
"nonnans",
"=",
"slice",
"(",
"nonnan_ix",
".",
"min",
"(",
")",
",",
"nonnan_ix",
".",
"max",
"(",
")",
")",
"crit_freq",
"=",
"1.0",
"# Hz",
"b",
",",
"a",
"=",
"signal",
".",
"butter",
"(",
"3",
",",
"2.0",
"*",
"crit_freq",
"/",
"fs",
")",
"#v[nonnans] = signal.filtfilt(b, a, v[nonnans])",
"v",
"[",
"nonnan_ix",
"]",
"=",
"signal",
".",
"filtfilt",
"(",
"b",
",",
"a",
",",
"v",
"[",
"nonnan_ix",
"]",
")",
"# find all local minima of the variability not exceeding the threshold",
"m",
"=",
"v",
"[",
"1",
":",
"-",
"1",
"]",
"l",
"=",
"v",
"[",
"0",
":",
"-",
"2",
"]",
"r",
"=",
"v",
"[",
"2",
":",
"]",
"minima",
"=",
"np",
".",
"nonzero",
"(",
"~",
"np",
".",
"isnan",
"(",
"m",
")",
"&",
"~",
"np",
".",
"isnan",
"(",
"l",
")",
"&",
"~",
"np",
".",
"isnan",
"(",
"r",
")",
"&",
"(",
"m",
"<=",
"threshold",
")",
"&",
"(",
"m",
"-",
"l",
"<",
"0",
")",
"&",
"(",
"r",
"-",
"m",
">",
"0",
")",
")",
"[",
"0",
"]",
"+",
"1",
"if",
"len",
"(",
"minima",
")",
"is",
"0",
":",
"print",
"(",
"u'Channel %d: no epochs found using threshold %g'",
"%",
"(",
"i",
",",
"threshold",
")",
")",
"allchannels_epochs",
".",
"append",
"(",
"[",
"]",
")",
"else",
":",
"# Sort the list of minima by ascending variability",
"minima",
"=",
"minima",
"[",
"np",
".",
"argsort",
"(",
"v",
"[",
"minima",
"]",
")",
"]",
"epochs",
"=",
"[",
"]",
"for",
"m",
"in",
"minima",
":",
"# Check this minimum is not inside an existing epoch",
"overlap",
"=",
"False",
"for",
"e",
"in",
"epochs",
":",
"if",
"m",
">=",
"e",
"[",
"0",
"]",
"and",
"m",
"<=",
"e",
"[",
"1",
"]",
":",
"overlap",
"=",
"True",
"break",
"if",
"not",
"overlap",
":",
"# Get largest subthreshold interval surrounding the minimum",
"startix",
"=",
"m",
"-",
"1",
"endix",
"=",
"m",
"+",
"1",
"for",
"startix",
"in",
"range",
"(",
"m",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"if",
"np",
".",
"isnan",
"(",
"v",
"[",
"startix",
"]",
")",
"or",
"v",
"[",
"startix",
"]",
">",
"threshold",
":",
"startix",
"+=",
"1",
"break",
"for",
"endix",
"in",
"range",
"(",
"m",
"+",
"1",
",",
"len",
"(",
"v",
")",
",",
"1",
")",
":",
"if",
"np",
".",
"isnan",
"(",
"v",
"[",
"endix",
"]",
")",
"or",
"v",
"[",
"endix",
"]",
">",
"threshold",
":",
"break",
"if",
"(",
"endix",
"-",
"startix",
")",
"*",
"dt",
">=",
"minlength",
":",
"epochs",
".",
"append",
"(",
"(",
"startix",
",",
"endix",
")",
")",
"allchannels_epochs",
".",
"append",
"(",
"epochs",
")",
"if",
"plot",
":",
"_plot_variability",
"(",
"ts",
",",
"variability",
",",
"threshold",
",",
"allchannels_epochs",
")",
"if",
"orig_ndim",
"is",
"1",
":",
"allchannels_epochs",
"=",
"allchannels_epochs",
"[",
"0",
"]",
"return",
"(",
"variability",
",",
"allchannels_epochs",
")"
] | Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | [
"Identify",
"stationary",
"epochs",
"within",
"a",
"time",
"series",
"based",
"on",
"a",
"continuous",
"measure",
"of",
"variability",
".",
"Epochs",
"are",
"defined",
"to",
"contain",
"the",
"points",
"of",
"minimal",
"variability",
"and",
"to",
"extend",
"as",
"wide",
"as",
"possible",
"with",
"variability",
"not",
"exceeding",
"the",
"threshold",
"."
] | python | train |
koriakin/binflakes | binflakes/types/word.py | https://github.com/koriakin/binflakes/blob/f059cecadf1c605802a713c62375b5bd5606d53f/binflakes/types/word.py#L328-L333 | def sge(self, other):
"""Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger or equal.
"""
self._check_match(other)
return self.to_sint() >= other.to_sint() | [
"def",
"sge",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_check_match",
"(",
"other",
")",
"return",
"self",
".",
"to_sint",
"(",
")",
">=",
"other",
".",
"to_sint",
"(",
")"
] | Compares two equal-sized BinWords, treating them as signed
integers, and returning True if the first is bigger or equal. | [
"Compares",
"two",
"equal",
"-",
"sized",
"BinWords",
"treating",
"them",
"as",
"signed",
"integers",
"and",
"returning",
"True",
"if",
"the",
"first",
"is",
"bigger",
"or",
"equal",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/campbell_bozorgnia_2014.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/campbell_bozorgnia_2014.py#L161-L166 | def _get_geometric_attenuation_term(self, C, mag, rrup):
"""
Returns the geometric attenuation term defined in equation 3
"""
return (C["c5"] + C["c6"] * mag) * np.log(np.sqrt((rrup ** 2.) +
(C["c7"] ** 2.))) | [
"def",
"_get_geometric_attenuation_term",
"(",
"self",
",",
"C",
",",
"mag",
",",
"rrup",
")",
":",
"return",
"(",
"C",
"[",
"\"c5\"",
"]",
"+",
"C",
"[",
"\"c6\"",
"]",
"*",
"mag",
")",
"*",
"np",
".",
"log",
"(",
"np",
".",
"sqrt",
"(",
"(",
"rrup",
"**",
"2.",
")",
"+",
"(",
"C",
"[",
"\"c7\"",
"]",
"**",
"2.",
")",
")",
")"
] | Returns the geometric attenuation term defined in equation 3 | [
"Returns",
"the",
"geometric",
"attenuation",
"term",
"defined",
"in",
"equation",
"3"
] | python | train |
pyQode/pyqode.core | pyqode/core/widgets/tabs.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/tabs.py#L349-L366 | def removeTab(self, index):
"""
Removes tab at index ``index``.
This method will emits tab_closed for the removed tab.
:param index: index of the tab to remove.
"""
widget = self.widget(index)
try:
self._widgets.remove(widget)
except ValueError:
pass
self.tab_closed.emit(widget)
self._del_code_edit(widget)
QTabWidget.removeTab(self, index)
if widget == self._current:
self._current = None | [
"def",
"removeTab",
"(",
"self",
",",
"index",
")",
":",
"widget",
"=",
"self",
".",
"widget",
"(",
"index",
")",
"try",
":",
"self",
".",
"_widgets",
".",
"remove",
"(",
"widget",
")",
"except",
"ValueError",
":",
"pass",
"self",
".",
"tab_closed",
".",
"emit",
"(",
"widget",
")",
"self",
".",
"_del_code_edit",
"(",
"widget",
")",
"QTabWidget",
".",
"removeTab",
"(",
"self",
",",
"index",
")",
"if",
"widget",
"==",
"self",
".",
"_current",
":",
"self",
".",
"_current",
"=",
"None"
] | Removes tab at index ``index``.
This method will emits tab_closed for the removed tab.
:param index: index of the tab to remove. | [
"Removes",
"tab",
"at",
"index",
"index",
"."
] | python | train |
manns/pyspread | pyspread/src/lib/_grid_cairo_renderer.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L1051-L1061 | def get_right_key_rect(self):
"""Returns tuple key rect of right cell"""
key_right = self.row, self.col + 1, self.tab
border_width_right = \
float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0
rect_right = (self.x+self.width, self.y,
border_width_right, self.height)
return key_right, rect_right | [
"def",
"get_right_key_rect",
"(",
"self",
")",
":",
"key_right",
"=",
"self",
".",
"row",
",",
"self",
".",
"col",
"+",
"1",
",",
"self",
".",
"tab",
"border_width_right",
"=",
"float",
"(",
"self",
".",
"cell_attributes",
"[",
"self",
".",
"key",
"]",
"[",
"\"borderwidth_right\"",
"]",
")",
"/",
"2.0",
"rect_right",
"=",
"(",
"self",
".",
"x",
"+",
"self",
".",
"width",
",",
"self",
".",
"y",
",",
"border_width_right",
",",
"self",
".",
"height",
")",
"return",
"key_right",
",",
"rect_right"
] | Returns tuple key rect of right cell | [
"Returns",
"tuple",
"key",
"rect",
"of",
"right",
"cell"
] | python | train |
thunder-project/thunder | thunder/blocks/blocks.py | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/blocks/blocks.py#L75-L87 | def toimages(self):
"""
Convert blocks to images.
"""
from thunder.images.images import Images
if self.mode == 'spark':
values = self.values.values_to_keys((0,)).unchunk()
if self.mode == 'local':
values = self.values.unchunk()
return Images(values) | [
"def",
"toimages",
"(",
"self",
")",
":",
"from",
"thunder",
".",
"images",
".",
"images",
"import",
"Images",
"if",
"self",
".",
"mode",
"==",
"'spark'",
":",
"values",
"=",
"self",
".",
"values",
".",
"values_to_keys",
"(",
"(",
"0",
",",
")",
")",
".",
"unchunk",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"'local'",
":",
"values",
"=",
"self",
".",
"values",
".",
"unchunk",
"(",
")",
"return",
"Images",
"(",
"values",
")"
] | Convert blocks to images. | [
"Convert",
"blocks",
"to",
"images",
"."
] | python | train |
bovee/Aston | aston/spectra/isotopes.py | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/spectra/isotopes.py#L34-L74 | def delta13c_craig(r45sam, r46sam, d13cstd, r45std, r46std,
ks='Craig', d18ostd=23.5):
"""
Algorithm from Craig 1957.
From the original Craig paper, we can set up a pair of equations
and solve for d13C and d18O simultaneously:
d45 * r45 = r13 * d13
+ 0.5 * r17 * d18
d46 = r13 * ((r17**2 + r17 - r18) / a) * d13
+ 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18
where a = r18 + r13 * r17 and b = 1 + r13 + r17
"""
# the constants for the calculations
# originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5
k = delta13c_constants()[ks]
# TODO: not clear why need to multiply by 2?
r13, r18 = k['S13'], 2 * k['S18']
r17 = 2 * (k['K'] * k['S18'] ** k['A'])
a = (r18 + r13 * r17) * (1. + r13 + r17)
# the coefficients for the calculations
eqn_mat = np.array([[r13, 0.5 * r17],
[r13 * ((r17 ** 2 + r17 - r18) / a),
1 - 0.5 * r17 * ((r13 ** 2 + r13 - r18) / a)]])
# precalculate the d45 and d46 of the standard versus PDB
r45d45std = (eqn_mat[0, 0] * d13cstd + eqn_mat[0, 1] * d18ostd)
d46std = eqn_mat[1, 0] * d13cstd + eqn_mat[1, 1] * d18ostd
# calculate the d45 and d46 of our sample versus PDB
# in r45d45, r45 of PDB = r13 + r17 of PDB
r45d45 = 1000. * (r45sam / r45std - 1.) * \
(r13 + r17 + 0.001 * r45d45std) + r45d45std
d46 = 1000. * (r46sam / r46std - 1.) * (1. + 0.001 * d46std) + d46std
# solve the system of equations
x = np.linalg.solve(eqn_mat, np.array([r45d45, d46]))
return x[0] | [
"def",
"delta13c_craig",
"(",
"r45sam",
",",
"r46sam",
",",
"d13cstd",
",",
"r45std",
",",
"r46std",
",",
"ks",
"=",
"'Craig'",
",",
"d18ostd",
"=",
"23.5",
")",
":",
"# the constants for the calculations",
"# originally r13, r17, r18 = 1123.72e-5, 759.9e-6, 415.8e-5",
"k",
"=",
"delta13c_constants",
"(",
")",
"[",
"ks",
"]",
"# TODO: not clear why need to multiply by 2?",
"r13",
",",
"r18",
"=",
"k",
"[",
"'S13'",
"]",
",",
"2",
"*",
"k",
"[",
"'S18'",
"]",
"r17",
"=",
"2",
"*",
"(",
"k",
"[",
"'K'",
"]",
"*",
"k",
"[",
"'S18'",
"]",
"**",
"k",
"[",
"'A'",
"]",
")",
"a",
"=",
"(",
"r18",
"+",
"r13",
"*",
"r17",
")",
"*",
"(",
"1.",
"+",
"r13",
"+",
"r17",
")",
"# the coefficients for the calculations",
"eqn_mat",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"r13",
",",
"0.5",
"*",
"r17",
"]",
",",
"[",
"r13",
"*",
"(",
"(",
"r17",
"**",
"2",
"+",
"r17",
"-",
"r18",
")",
"/",
"a",
")",
",",
"1",
"-",
"0.5",
"*",
"r17",
"*",
"(",
"(",
"r13",
"**",
"2",
"+",
"r13",
"-",
"r18",
")",
"/",
"a",
")",
"]",
"]",
")",
"# precalculate the d45 and d46 of the standard versus PDB",
"r45d45std",
"=",
"(",
"eqn_mat",
"[",
"0",
",",
"0",
"]",
"*",
"d13cstd",
"+",
"eqn_mat",
"[",
"0",
",",
"1",
"]",
"*",
"d18ostd",
")",
"d46std",
"=",
"eqn_mat",
"[",
"1",
",",
"0",
"]",
"*",
"d13cstd",
"+",
"eqn_mat",
"[",
"1",
",",
"1",
"]",
"*",
"d18ostd",
"# calculate the d45 and d46 of our sample versus PDB",
"# in r45d45, r45 of PDB = r13 + r17 of PDB",
"r45d45",
"=",
"1000.",
"*",
"(",
"r45sam",
"/",
"r45std",
"-",
"1.",
")",
"*",
"(",
"r13",
"+",
"r17",
"+",
"0.001",
"*",
"r45d45std",
")",
"+",
"r45d45std",
"d46",
"=",
"1000.",
"*",
"(",
"r46sam",
"/",
"r46std",
"-",
"1.",
")",
"*",
"(",
"1.",
"+",
"0.001",
"*",
"d46std",
")",
"+",
"d46std",
"# solve the system of equations",
"x",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"eqn_mat",
",",
"np",
".",
"array",
"(",
"[",
"r45d45",
",",
"d46",
"]",
")",
")",
"return",
"x",
"[",
"0",
"]"
] | Algorithm from Craig 1957.
From the original Craig paper, we can set up a pair of equations
and solve for d13C and d18O simultaneously:
d45 * r45 = r13 * d13
+ 0.5 * r17 * d18
d46 = r13 * ((r17**2 + r17 - r18) / a) * d13
+ 1 - 0.5 * r17 * ((r13**2 + r13 - r18) / a) * d18
where a = r18 + r13 * r17 and b = 1 + r13 + r17 | [
"Algorithm",
"from",
"Craig",
"1957",
"."
] | python | train |
newville/asteval | asteval/asteval.py | https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L535-L539 | def on_slice(self, node): # ():('lower', 'upper', 'step')
"""Simple slice."""
return slice(self.run(node.lower),
self.run(node.upper),
self.run(node.step)) | [
"def",
"on_slice",
"(",
"self",
",",
"node",
")",
":",
"# ():('lower', 'upper', 'step')",
"return",
"slice",
"(",
"self",
".",
"run",
"(",
"node",
".",
"lower",
")",
",",
"self",
".",
"run",
"(",
"node",
".",
"upper",
")",
",",
"self",
".",
"run",
"(",
"node",
".",
"step",
")",
")"
] | Simple slice. | [
"Simple",
"slice",
"."
] | python | train |
Devoxin/Lavalink.py | lavalink/Client.py | https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/Client.py#L173-L177 | def destroy(self):
""" Destroys the Lavalink client. """
self.ws.destroy()
self.bot.remove_listener(self.on_socket_response)
self.hooks.clear() | [
"def",
"destroy",
"(",
"self",
")",
":",
"self",
".",
"ws",
".",
"destroy",
"(",
")",
"self",
".",
"bot",
".",
"remove_listener",
"(",
"self",
".",
"on_socket_response",
")",
"self",
".",
"hooks",
".",
"clear",
"(",
")"
] | Destroys the Lavalink client. | [
"Destroys",
"the",
"Lavalink",
"client",
"."
] | python | valid |
b3j0f/schema | b3j0f/schema/lang/python.py | https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/python.py#L303-L462 | def _getparams_rtype(cls, function):
"""Get function params from input function and rtype.
:return: OrderedDict, rtype, vargs and kwargs.
:rtype: tuple
"""
try:
args, vargs, kwargs, default = getargspec(function)
except TypeError:
args, vargs, kwargs, default = (), (), (), ()
indexlen = len(args) - (0 if default is None else len(default))
params = OrderedDict()
for index, arg in enumerate(args):
pkwargs = {
'name': arg,
'mandatory': True
} # param kwargs
if index >= indexlen: # has default value
value = default[index - indexlen]
pkwargs['default'] = value
pkwargs['ref'] = None if value is None else data2schema(value)
pkwargs['mandatory'] = False
params[arg] = pkwargs
rtype = None
# parse docstring
if function.__doc__ is not None and not isbuiltin(function):
scope = get_function_globals(function)
for match in cls._REC.findall(function.__doc__):
if rtype is None:
rrtype = match[4].strip() or None
if rrtype:
rtypes = rrtype.split(',')
schemas = []
for rtype_ in rtypes:
rtype_ = rtype_.strip()
islist = False
try:
lkrtype = lookup(rtype_, scope=scope)
except ImportError:
islist = True
try:
if rtype_[-1] == 's':
lkrtype = lookup(
rtype_[:-1], scope=scope
)
elif rtype_.startswith('list of '):
lkrtype = lookup(
rtype_[8:], scope=scope
)
else:
raise
except ImportError:
msg = 'rtype "{0}" ({1}) from {2} not found.'
raise ImportError(
msg.format(rtype_, rrtype, function)
)
try:
schemacls = datatype2schemacls(lkrtype)
except TypeError:
schemacls = ParamTypeSchema(type=lkrtype)
rschema = schemacls()
if islist:
rschema = ArraySchema(itemtype=rschema)
schemas.append(rschema)
if len(rtypes) > 1:
rtype = OneOfSchema(schemas=schemas, nullable=True)
else:
rtype = schemas[0]
continue
pname = (match[1] or match[2]).strip()
if pname and pname in params:
ptype = (match[0] or match[3]).strip()
ptypes = ptype.split(',')
schemas = []
for ptype in ptypes:
ptype = ptype.strip()
islist = False
try:
lkptype = lookup(ptype, scope=scope)
except ImportError:
islist = True
try:
if ptype[-1] == 's':
lkptype = lookup(ptype[:-1], scope=scope)
elif ptype.startswith('list of '):
lkptype = lookup(ptype[8:], scope=scope)
else:
raise
except ImportError:
msg = 'Error on ptype "{0}" ({1}) from {2} not found.'
raise ImportError(
msg.format(pname, ptype, function)
)
try:
schemacls = datatype2schemacls(lkptype)
except TypeError:
schemacls = ParamTypeSchema(type=lkptype)
pschema = schemacls()
if islist:
pschema = ArraySchema(itemtype=pschema)
schemas.append(pschema)
if len(ptypes) > 1:
pschema = OneOfSchema(schemas=schemas, nullable=True)
else:
pschema = schemas[0]
params[pname]['ref'] = pschema
return params, rtype, vargs, kwargs | [
"def",
"_getparams_rtype",
"(",
"cls",
",",
"function",
")",
":",
"try",
":",
"args",
",",
"vargs",
",",
"kwargs",
",",
"default",
"=",
"getargspec",
"(",
"function",
")",
"except",
"TypeError",
":",
"args",
",",
"vargs",
",",
"kwargs",
",",
"default",
"=",
"(",
")",
",",
"(",
")",
",",
"(",
")",
",",
"(",
")",
"indexlen",
"=",
"len",
"(",
"args",
")",
"-",
"(",
"0",
"if",
"default",
"is",
"None",
"else",
"len",
"(",
"default",
")",
")",
"params",
"=",
"OrderedDict",
"(",
")",
"for",
"index",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"pkwargs",
"=",
"{",
"'name'",
":",
"arg",
",",
"'mandatory'",
":",
"True",
"}",
"# param kwargs",
"if",
"index",
">=",
"indexlen",
":",
"# has default value",
"value",
"=",
"default",
"[",
"index",
"-",
"indexlen",
"]",
"pkwargs",
"[",
"'default'",
"]",
"=",
"value",
"pkwargs",
"[",
"'ref'",
"]",
"=",
"None",
"if",
"value",
"is",
"None",
"else",
"data2schema",
"(",
"value",
")",
"pkwargs",
"[",
"'mandatory'",
"]",
"=",
"False",
"params",
"[",
"arg",
"]",
"=",
"pkwargs",
"rtype",
"=",
"None",
"# parse docstring",
"if",
"function",
".",
"__doc__",
"is",
"not",
"None",
"and",
"not",
"isbuiltin",
"(",
"function",
")",
":",
"scope",
"=",
"get_function_globals",
"(",
"function",
")",
"for",
"match",
"in",
"cls",
".",
"_REC",
".",
"findall",
"(",
"function",
".",
"__doc__",
")",
":",
"if",
"rtype",
"is",
"None",
":",
"rrtype",
"=",
"match",
"[",
"4",
"]",
".",
"strip",
"(",
")",
"or",
"None",
"if",
"rrtype",
":",
"rtypes",
"=",
"rrtype",
".",
"split",
"(",
"','",
")",
"schemas",
"=",
"[",
"]",
"for",
"rtype_",
"in",
"rtypes",
":",
"rtype_",
"=",
"rtype_",
".",
"strip",
"(",
")",
"islist",
"=",
"False",
"try",
":",
"lkrtype",
"=",
"lookup",
"(",
"rtype_",
",",
"scope",
"=",
"scope",
")",
"except",
"ImportError",
":",
"islist",
"=",
"True",
"try",
":",
"if",
"rtype_",
"[",
"-",
"1",
"]",
"==",
"'s'",
":",
"lkrtype",
"=",
"lookup",
"(",
"rtype_",
"[",
":",
"-",
"1",
"]",
",",
"scope",
"=",
"scope",
")",
"elif",
"rtype_",
".",
"startswith",
"(",
"'list of '",
")",
":",
"lkrtype",
"=",
"lookup",
"(",
"rtype_",
"[",
"8",
":",
"]",
",",
"scope",
"=",
"scope",
")",
"else",
":",
"raise",
"except",
"ImportError",
":",
"msg",
"=",
"'rtype \"{0}\" ({1}) from {2} not found.'",
"raise",
"ImportError",
"(",
"msg",
".",
"format",
"(",
"rtype_",
",",
"rrtype",
",",
"function",
")",
")",
"try",
":",
"schemacls",
"=",
"datatype2schemacls",
"(",
"lkrtype",
")",
"except",
"TypeError",
":",
"schemacls",
"=",
"ParamTypeSchema",
"(",
"type",
"=",
"lkrtype",
")",
"rschema",
"=",
"schemacls",
"(",
")",
"if",
"islist",
":",
"rschema",
"=",
"ArraySchema",
"(",
"itemtype",
"=",
"rschema",
")",
"schemas",
".",
"append",
"(",
"rschema",
")",
"if",
"len",
"(",
"rtypes",
")",
">",
"1",
":",
"rtype",
"=",
"OneOfSchema",
"(",
"schemas",
"=",
"schemas",
",",
"nullable",
"=",
"True",
")",
"else",
":",
"rtype",
"=",
"schemas",
"[",
"0",
"]",
"continue",
"pname",
"=",
"(",
"match",
"[",
"1",
"]",
"or",
"match",
"[",
"2",
"]",
")",
".",
"strip",
"(",
")",
"if",
"pname",
"and",
"pname",
"in",
"params",
":",
"ptype",
"=",
"(",
"match",
"[",
"0",
"]",
"or",
"match",
"[",
"3",
"]",
")",
".",
"strip",
"(",
")",
"ptypes",
"=",
"ptype",
".",
"split",
"(",
"','",
")",
"schemas",
"=",
"[",
"]",
"for",
"ptype",
"in",
"ptypes",
":",
"ptype",
"=",
"ptype",
".",
"strip",
"(",
")",
"islist",
"=",
"False",
"try",
":",
"lkptype",
"=",
"lookup",
"(",
"ptype",
",",
"scope",
"=",
"scope",
")",
"except",
"ImportError",
":",
"islist",
"=",
"True",
"try",
":",
"if",
"ptype",
"[",
"-",
"1",
"]",
"==",
"'s'",
":",
"lkptype",
"=",
"lookup",
"(",
"ptype",
"[",
":",
"-",
"1",
"]",
",",
"scope",
"=",
"scope",
")",
"elif",
"ptype",
".",
"startswith",
"(",
"'list of '",
")",
":",
"lkptype",
"=",
"lookup",
"(",
"ptype",
"[",
"8",
":",
"]",
",",
"scope",
"=",
"scope",
")",
"else",
":",
"raise",
"except",
"ImportError",
":",
"msg",
"=",
"'Error on ptype \"{0}\" ({1}) from {2} not found.'",
"raise",
"ImportError",
"(",
"msg",
".",
"format",
"(",
"pname",
",",
"ptype",
",",
"function",
")",
")",
"try",
":",
"schemacls",
"=",
"datatype2schemacls",
"(",
"lkptype",
")",
"except",
"TypeError",
":",
"schemacls",
"=",
"ParamTypeSchema",
"(",
"type",
"=",
"lkptype",
")",
"pschema",
"=",
"schemacls",
"(",
")",
"if",
"islist",
":",
"pschema",
"=",
"ArraySchema",
"(",
"itemtype",
"=",
"pschema",
")",
"schemas",
".",
"append",
"(",
"pschema",
")",
"if",
"len",
"(",
"ptypes",
")",
">",
"1",
":",
"pschema",
"=",
"OneOfSchema",
"(",
"schemas",
"=",
"schemas",
",",
"nullable",
"=",
"True",
")",
"else",
":",
"pschema",
"=",
"schemas",
"[",
"0",
"]",
"params",
"[",
"pname",
"]",
"[",
"'ref'",
"]",
"=",
"pschema",
"return",
"params",
",",
"rtype",
",",
"vargs",
",",
"kwargs"
] | Get function params from input function and rtype.
:return: OrderedDict, rtype, vargs and kwargs.
:rtype: tuple | [
"Get",
"function",
"params",
"from",
"input",
"function",
"and",
"rtype",
"."
] | python | train |
pywbem/pywbem | attic/cim_provider2.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider2.py#L562-L590 | def MI_createInstance(self,
env,
instance):
# pylint: disable=invalid-name
"""Create a CIM instance, and return its instance name
Implements the WBEM operation CreateInstance in terms
of the set_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_createInstance called...')
rval = None
'''
ch = env.get_cimom_handle()
cimClass = ch.GetClass(instance.classname,
instance.path.namespace,
LocalOnly=False,
IncludeQualifiers=True)
'''
# CIMOM has already filled in default property values for
# props with default values, if values not supplied by client.
rval = self.set_instance(env=env,
instance=instance,
modify_existing=False)
logger.log_debug('CIMProvider2 MI_createInstance returning')
return rval.path | [
"def",
"MI_createInstance",
"(",
"self",
",",
"env",
",",
"instance",
")",
":",
"# pylint: disable=invalid-name",
"logger",
"=",
"env",
".",
"get_logger",
"(",
")",
"logger",
".",
"log_debug",
"(",
"'CIMProvider2 MI_createInstance called...'",
")",
"rval",
"=",
"None",
"'''\n ch = env.get_cimom_handle()\n cimClass = ch.GetClass(instance.classname,\n instance.path.namespace,\n LocalOnly=False,\n IncludeQualifiers=True)\n '''",
"# CIMOM has already filled in default property values for",
"# props with default values, if values not supplied by client.",
"rval",
"=",
"self",
".",
"set_instance",
"(",
"env",
"=",
"env",
",",
"instance",
"=",
"instance",
",",
"modify_existing",
"=",
"False",
")",
"logger",
".",
"log_debug",
"(",
"'CIMProvider2 MI_createInstance returning'",
")",
"return",
"rval",
".",
"path"
] | Create a CIM instance, and return its instance name
Implements the WBEM operation CreateInstance in terms
of the set_instance method. A derived class will not normally
override this method. | [
"Create",
"a",
"CIM",
"instance",
"and",
"return",
"its",
"instance",
"name"
] | python | train |
kalbhor/MusicNow | musicnow/repair.py | https://github.com/kalbhor/MusicNow/blob/12ff1ed2ea2bb7dbbfd925d7998b3ea1e20de291/musicnow/repair.py#L106-L130 | def get_lyrics_genius(song_title):
'''
Scrapes the lyrics from Genius.com
'''
base_url = "http://api.genius.com"
headers = {'Authorization': 'Bearer %s' %(GENIUS_KEY)}
search_url = base_url + "/search"
data = {'q': song_title}
response = requests.get(search_url, data=data, headers=headers)
json = response.json()
song_api_path = json["response"]["hits"][0]["result"]["api_path"]
song_url = base_url + song_api_path
response = requests.get(song_url, headers=headers)
json = response.json()
path = json["response"]["song"]["path"]
page_url = "http://genius.com" + path
page = requests.get(page_url)
soup = BeautifulSoup(page.text, "html.parser")
div = soup.find('div',{'class': 'song_body-lyrics'})
lyrics = div.find('p').getText()
return lyrics | [
"def",
"get_lyrics_genius",
"(",
"song_title",
")",
":",
"base_url",
"=",
"\"http://api.genius.com\"",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Bearer %s'",
"%",
"(",
"GENIUS_KEY",
")",
"}",
"search_url",
"=",
"base_url",
"+",
"\"/search\"",
"data",
"=",
"{",
"'q'",
":",
"song_title",
"}",
"response",
"=",
"requests",
".",
"get",
"(",
"search_url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
"json",
"=",
"response",
".",
"json",
"(",
")",
"song_api_path",
"=",
"json",
"[",
"\"response\"",
"]",
"[",
"\"hits\"",
"]",
"[",
"0",
"]",
"[",
"\"result\"",
"]",
"[",
"\"api_path\"",
"]",
"song_url",
"=",
"base_url",
"+",
"song_api_path",
"response",
"=",
"requests",
".",
"get",
"(",
"song_url",
",",
"headers",
"=",
"headers",
")",
"json",
"=",
"response",
".",
"json",
"(",
")",
"path",
"=",
"json",
"[",
"\"response\"",
"]",
"[",
"\"song\"",
"]",
"[",
"\"path\"",
"]",
"page_url",
"=",
"\"http://genius.com\"",
"+",
"path",
"page",
"=",
"requests",
".",
"get",
"(",
"page_url",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"page",
".",
"text",
",",
"\"html.parser\"",
")",
"div",
"=",
"soup",
".",
"find",
"(",
"'div'",
",",
"{",
"'class'",
":",
"'song_body-lyrics'",
"}",
")",
"lyrics",
"=",
"div",
".",
"find",
"(",
"'p'",
")",
".",
"getText",
"(",
")",
"return",
"lyrics"
] | Scrapes the lyrics from Genius.com | [
"Scrapes",
"the",
"lyrics",
"from",
"Genius",
".",
"com"
] | python | train |
xolox/python-coloredlogs | coloredlogs/demo.py | https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/coloredlogs/demo.py#L29-L49 | def demonstrate_colored_logging():
"""Interactively demonstrate the :mod:`coloredlogs` package."""
# Determine the available logging levels and order them by numeric value.
decorated_levels = []
defined_levels = coloredlogs.find_defined_levels()
normalizer = coloredlogs.NameNormalizer()
for name, level in defined_levels.items():
if name != 'NOTSET':
item = (level, normalizer.normalize_name(name))
if item not in decorated_levels:
decorated_levels.append(item)
ordered_levels = sorted(decorated_levels)
# Initialize colored output to the terminal, default to the most
# verbose logging level but enable the user the customize it.
coloredlogs.install(level=os.environ.get('COLOREDLOGS_LOG_LEVEL', ordered_levels[0][1]))
# Print some examples with different timestamps.
for level, name in ordered_levels:
log_method = getattr(logger, name, None)
if log_method:
log_method("message with level %s (%i)", name, level)
time.sleep(DEMO_DELAY) | [
"def",
"demonstrate_colored_logging",
"(",
")",
":",
"# Determine the available logging levels and order them by numeric value.",
"decorated_levels",
"=",
"[",
"]",
"defined_levels",
"=",
"coloredlogs",
".",
"find_defined_levels",
"(",
")",
"normalizer",
"=",
"coloredlogs",
".",
"NameNormalizer",
"(",
")",
"for",
"name",
",",
"level",
"in",
"defined_levels",
".",
"items",
"(",
")",
":",
"if",
"name",
"!=",
"'NOTSET'",
":",
"item",
"=",
"(",
"level",
",",
"normalizer",
".",
"normalize_name",
"(",
"name",
")",
")",
"if",
"item",
"not",
"in",
"decorated_levels",
":",
"decorated_levels",
".",
"append",
"(",
"item",
")",
"ordered_levels",
"=",
"sorted",
"(",
"decorated_levels",
")",
"# Initialize colored output to the terminal, default to the most",
"# verbose logging level but enable the user the customize it.",
"coloredlogs",
".",
"install",
"(",
"level",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'COLOREDLOGS_LOG_LEVEL'",
",",
"ordered_levels",
"[",
"0",
"]",
"[",
"1",
"]",
")",
")",
"# Print some examples with different timestamps.",
"for",
"level",
",",
"name",
"in",
"ordered_levels",
":",
"log_method",
"=",
"getattr",
"(",
"logger",
",",
"name",
",",
"None",
")",
"if",
"log_method",
":",
"log_method",
"(",
"\"message with level %s (%i)\"",
",",
"name",
",",
"level",
")",
"time",
".",
"sleep",
"(",
"DEMO_DELAY",
")"
] | Interactively demonstrate the :mod:`coloredlogs` package. | [
"Interactively",
"demonstrate",
"the",
":",
"mod",
":",
"coloredlogs",
"package",
"."
] | python | train |
inveniosoftware/invenio-i18n | invenio_i18n/ext.py | https://github.com/inveniosoftware/invenio-i18n/blob/3119bb7db3369b8ae0aecce5d7d7c79f807e2763/invenio_i18n/ext.py#L32-L46 | def get_lazystring_encoder(app):
"""Return a JSONEncoder for handling lazy strings from Babel.
Installed on Flask application by default by :class:`InvenioI18N`.
"""
from speaklater import _LazyString
class JSONEncoder(app.json_encoder):
def default(self, o):
if isinstance(o, _LazyString):
return text_type(o)
return super(JSONEncoder, self).default(o)
return JSONEncoder | [
"def",
"get_lazystring_encoder",
"(",
"app",
")",
":",
"from",
"speaklater",
"import",
"_LazyString",
"class",
"JSONEncoder",
"(",
"app",
".",
"json_encoder",
")",
":",
"def",
"default",
"(",
"self",
",",
"o",
")",
":",
"if",
"isinstance",
"(",
"o",
",",
"_LazyString",
")",
":",
"return",
"text_type",
"(",
"o",
")",
"return",
"super",
"(",
"JSONEncoder",
",",
"self",
")",
".",
"default",
"(",
"o",
")",
"return",
"JSONEncoder"
] | Return a JSONEncoder for handling lazy strings from Babel.
Installed on Flask application by default by :class:`InvenioI18N`. | [
"Return",
"a",
"JSONEncoder",
"for",
"handling",
"lazy",
"strings",
"from",
"Babel",
"."
] | python | train |
twisted/mantissa | xmantissa/publicweb.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/publicweb.py#L841-L858 | def fromRequest(cls, store, request):
"""
Return a L{LoginPage} which will present the user with a login prompt.
@type store: L{Store}
@param store: A I{site} store.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request which encountered a need for
authentication. This will be effectively re-issued after login
succeeds.
@return: A L{LoginPage} and the remaining segments to be processed.
"""
location = URL.fromRequest(request)
segments = location.pathList(unquote=True, copy=False)
segments.append(request.postpath[0])
return cls(store, segments, request.args) | [
"def",
"fromRequest",
"(",
"cls",
",",
"store",
",",
"request",
")",
":",
"location",
"=",
"URL",
".",
"fromRequest",
"(",
"request",
")",
"segments",
"=",
"location",
".",
"pathList",
"(",
"unquote",
"=",
"True",
",",
"copy",
"=",
"False",
")",
"segments",
".",
"append",
"(",
"request",
".",
"postpath",
"[",
"0",
"]",
")",
"return",
"cls",
"(",
"store",
",",
"segments",
",",
"request",
".",
"args",
")"
] | Return a L{LoginPage} which will present the user with a login prompt.
@type store: L{Store}
@param store: A I{site} store.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request which encountered a need for
authentication. This will be effectively re-issued after login
succeeds.
@return: A L{LoginPage} and the remaining segments to be processed. | [
"Return",
"a",
"L",
"{",
"LoginPage",
"}",
"which",
"will",
"present",
"the",
"user",
"with",
"a",
"login",
"prompt",
"."
] | python | train |
tamasgal/km3pipe | km3pipe/math.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L494-L536 | def intersect_3d(p1, p2):
"""Find the closes point for a given set of lines in 3D.
Parameters
----------
p1 : (M, N) array_like
Starting points
p2 : (M, N) array_like
End points.
Returns
-------
x : (N,) ndarray
Least-squares solution - the closest point of the intersections.
Raises
------
numpy.linalg.LinAlgError
If computation does not converge.
"""
v = p2 - p1
normed_v = unit_vector(v)
nx = normed_v[:, 0]
ny = normed_v[:, 1]
nz = normed_v[:, 2]
xx = np.sum(nx**2 - 1)
yy = np.sum(ny**2 - 1)
zz = np.sum(nz**2 - 1)
xy = np.sum(nx * ny)
xz = np.sum(nx * nz)
yz = np.sum(ny * nz)
M = np.array([(xx, xy, xz), (xy, yy, yz), (xz, yz, zz)])
x = np.sum(
p1[:, 0] * (nx**2 - 1) + p1[:, 1] * (nx * ny) + p1[:, 2] * (nx * nz)
)
y = np.sum(
p1[:, 0] * (nx * ny) + p1[:, 1] * (ny * ny - 1) + p1[:, 2] * (ny * nz)
)
z = np.sum(
p1[:, 0] * (nx * nz) + p1[:, 1] * (ny * nz) + p1[:, 2] * (nz**2 - 1)
)
return np.linalg.lstsq(M, np.array((x, y, z)), rcond=None)[0] | [
"def",
"intersect_3d",
"(",
"p1",
",",
"p2",
")",
":",
"v",
"=",
"p2",
"-",
"p1",
"normed_v",
"=",
"unit_vector",
"(",
"v",
")",
"nx",
"=",
"normed_v",
"[",
":",
",",
"0",
"]",
"ny",
"=",
"normed_v",
"[",
":",
",",
"1",
"]",
"nz",
"=",
"normed_v",
"[",
":",
",",
"2",
"]",
"xx",
"=",
"np",
".",
"sum",
"(",
"nx",
"**",
"2",
"-",
"1",
")",
"yy",
"=",
"np",
".",
"sum",
"(",
"ny",
"**",
"2",
"-",
"1",
")",
"zz",
"=",
"np",
".",
"sum",
"(",
"nz",
"**",
"2",
"-",
"1",
")",
"xy",
"=",
"np",
".",
"sum",
"(",
"nx",
"*",
"ny",
")",
"xz",
"=",
"np",
".",
"sum",
"(",
"nx",
"*",
"nz",
")",
"yz",
"=",
"np",
".",
"sum",
"(",
"ny",
"*",
"nz",
")",
"M",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"xx",
",",
"xy",
",",
"xz",
")",
",",
"(",
"xy",
",",
"yy",
",",
"yz",
")",
",",
"(",
"xz",
",",
"yz",
",",
"zz",
")",
"]",
")",
"x",
"=",
"np",
".",
"sum",
"(",
"p1",
"[",
":",
",",
"0",
"]",
"*",
"(",
"nx",
"**",
"2",
"-",
"1",
")",
"+",
"p1",
"[",
":",
",",
"1",
"]",
"*",
"(",
"nx",
"*",
"ny",
")",
"+",
"p1",
"[",
":",
",",
"2",
"]",
"*",
"(",
"nx",
"*",
"nz",
")",
")",
"y",
"=",
"np",
".",
"sum",
"(",
"p1",
"[",
":",
",",
"0",
"]",
"*",
"(",
"nx",
"*",
"ny",
")",
"+",
"p1",
"[",
":",
",",
"1",
"]",
"*",
"(",
"ny",
"*",
"ny",
"-",
"1",
")",
"+",
"p1",
"[",
":",
",",
"2",
"]",
"*",
"(",
"ny",
"*",
"nz",
")",
")",
"z",
"=",
"np",
".",
"sum",
"(",
"p1",
"[",
":",
",",
"0",
"]",
"*",
"(",
"nx",
"*",
"nz",
")",
"+",
"p1",
"[",
":",
",",
"1",
"]",
"*",
"(",
"ny",
"*",
"nz",
")",
"+",
"p1",
"[",
":",
",",
"2",
"]",
"*",
"(",
"nz",
"**",
"2",
"-",
"1",
")",
")",
"return",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"M",
",",
"np",
".",
"array",
"(",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
",",
"rcond",
"=",
"None",
")",
"[",
"0",
"]"
] | Find the closes point for a given set of lines in 3D.
Parameters
----------
p1 : (M, N) array_like
Starting points
p2 : (M, N) array_like
End points.
Returns
-------
x : (N,) ndarray
Least-squares solution - the closest point of the intersections.
Raises
------
numpy.linalg.LinAlgError
If computation does not converge. | [
"Find",
"the",
"closes",
"point",
"for",
"a",
"given",
"set",
"of",
"lines",
"in",
"3D",
"."
] | python | train |
hfaran/Tornado-JSON | tornado_json/schema.py | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/schema.py#L82-L201 | def validate(input_schema=None, output_schema=None,
input_example=None, output_example=None,
validator_cls=None,
format_checker=None, on_empty_404=False,
use_defaults=False):
"""Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas.
"""
@container
def _validate(rh_method):
"""Decorator for RequestHandler schema validation
This decorator:
- Validates request body against input schema of the method
- Calls the ``rh_method`` and gets output from it
- Validates output against output schema of the method
- Calls ``JSendMixin.success`` to write the validated output
:type rh_method: function
:param rh_method: The RequestHandler method to be decorated
:returns: The decorated method
:raises ValidationError: If input is invalid as per the schema
or malformed
:raises TypeError: If the output is invalid as per the schema
or malformed
:raises APIError: If the output is a falsy value and
on_empty_404 is True, an HTTP 404 error is returned
"""
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
# In case the specified input_schema is ``None``, we
# don't json.loads the input, but just set it to ``None``
# instead.
if input_schema is not None:
# Attempt to json.loads the input
try:
# TODO: Assuming UTF-8 encoding for all requests,
# find a nice way of determining this from charset
# in headers if provided
encoding = "UTF-8"
input_ = json.loads(self.request.body.decode(encoding))
except ValueError as e:
raise jsonschema.ValidationError(
"Input is malformed; could not decode JSON object."
)
if use_defaults:
input_ = input_schema_clean(input_, input_schema)
# Validate the received input
jsonschema.validate(
input_,
input_schema,
cls=validator_cls,
format_checker=format_checker
)
else:
input_ = None
# A json.loads'd version of self.request["body"] is now available
# as self.body
setattr(self, "body", input_)
# Call the requesthandler method
output = rh_method(self, *args, **kwargs)
# If the rh_method returned a Future a la `raise Return(value)`
# we grab the output.
if is_future(output):
output = yield output
# if output is empty, auto return the error 404.
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
# We wrap output in an object before validating in case
# output is a string (and ergo not a validatable JSON object)
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
# We essentially re-raise this as a TypeError because
# we don't want this error data passed back to the client
# because it's a fault on our end. The client should
# only see a 500 - Internal Server Error.
raise TypeError(str(e))
# If no ValidationError has been raised up until here, we write
# back output
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate | [
"def",
"validate",
"(",
"input_schema",
"=",
"None",
",",
"output_schema",
"=",
"None",
",",
"input_example",
"=",
"None",
",",
"output_example",
"=",
"None",
",",
"validator_cls",
"=",
"None",
",",
"format_checker",
"=",
"None",
",",
"on_empty_404",
"=",
"False",
",",
"use_defaults",
"=",
"False",
")",
":",
"@",
"container",
"def",
"_validate",
"(",
"rh_method",
")",
":",
"\"\"\"Decorator for RequestHandler schema validation\n\n This decorator:\n\n - Validates request body against input schema of the method\n - Calls the ``rh_method`` and gets output from it\n - Validates output against output schema of the method\n - Calls ``JSendMixin.success`` to write the validated output\n\n :type rh_method: function\n :param rh_method: The RequestHandler method to be decorated\n :returns: The decorated method\n :raises ValidationError: If input is invalid as per the schema\n or malformed\n :raises TypeError: If the output is invalid as per the schema\n or malformed\n :raises APIError: If the output is a falsy value and\n on_empty_404 is True, an HTTP 404 error is returned\n \"\"\"",
"@",
"wraps",
"(",
"rh_method",
")",
"@",
"tornado",
".",
"gen",
".",
"coroutine",
"def",
"_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# In case the specified input_schema is ``None``, we",
"# don't json.loads the input, but just set it to ``None``",
"# instead.",
"if",
"input_schema",
"is",
"not",
"None",
":",
"# Attempt to json.loads the input",
"try",
":",
"# TODO: Assuming UTF-8 encoding for all requests,",
"# find a nice way of determining this from charset",
"# in headers if provided",
"encoding",
"=",
"\"UTF-8\"",
"input_",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
".",
"decode",
"(",
"encoding",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"jsonschema",
".",
"ValidationError",
"(",
"\"Input is malformed; could not decode JSON object.\"",
")",
"if",
"use_defaults",
":",
"input_",
"=",
"input_schema_clean",
"(",
"input_",
",",
"input_schema",
")",
"# Validate the received input",
"jsonschema",
".",
"validate",
"(",
"input_",
",",
"input_schema",
",",
"cls",
"=",
"validator_cls",
",",
"format_checker",
"=",
"format_checker",
")",
"else",
":",
"input_",
"=",
"None",
"# A json.loads'd version of self.request[\"body\"] is now available",
"# as self.body",
"setattr",
"(",
"self",
",",
"\"body\"",
",",
"input_",
")",
"# Call the requesthandler method",
"output",
"=",
"rh_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# If the rh_method returned a Future a la `raise Return(value)`",
"# we grab the output.",
"if",
"is_future",
"(",
"output",
")",
":",
"output",
"=",
"yield",
"output",
"# if output is empty, auto return the error 404.",
"if",
"not",
"output",
"and",
"on_empty_404",
":",
"raise",
"APIError",
"(",
"404",
",",
"\"Resource not found.\"",
")",
"if",
"output_schema",
"is",
"not",
"None",
":",
"# We wrap output in an object before validating in case",
"# output is a string (and ergo not a validatable JSON object)",
"try",
":",
"jsonschema",
".",
"validate",
"(",
"{",
"\"result\"",
":",
"output",
"}",
",",
"{",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"result\"",
":",
"output_schema",
"}",
",",
"\"required\"",
":",
"[",
"\"result\"",
"]",
"}",
")",
"except",
"jsonschema",
".",
"ValidationError",
"as",
"e",
":",
"# We essentially re-raise this as a TypeError because",
"# we don't want this error data passed back to the client",
"# because it's a fault on our end. The client should",
"# only see a 500 - Internal Server Error.",
"raise",
"TypeError",
"(",
"str",
"(",
"e",
")",
")",
"# If no ValidationError has been raised up until here, we write",
"# back output",
"self",
".",
"success",
"(",
"output",
")",
"setattr",
"(",
"_wrapper",
",",
"\"input_schema\"",
",",
"input_schema",
")",
"setattr",
"(",
"_wrapper",
",",
"\"output_schema\"",
",",
"output_schema",
")",
"setattr",
"(",
"_wrapper",
",",
"\"input_example\"",
",",
"input_example",
")",
"setattr",
"(",
"_wrapper",
",",
"\"output_example\"",
",",
"output_example",
")",
"return",
"_wrapper",
"return",
"_validate"
] | Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas. | [
"Parameterized",
"decorator",
"for",
"schema",
"validation"
] | python | train |
pjuren/pyokit | src/pyokit/interface/cli.py | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/interface/cli.py#L231-L242 | def getOption(self, name):
"""
Get the the Option object associated with the given name.
:param name: the name of the option to retrieve; can be short or long name.
:raise InterfaceException: if the named option doesn't exist.
"""
name = name.strip()
for o in self.options:
if o.short == name or o.long == name:
return o
raise InterfaceException("No such option: " + name) | [
"def",
"getOption",
"(",
"self",
",",
"name",
")",
":",
"name",
"=",
"name",
".",
"strip",
"(",
")",
"for",
"o",
"in",
"self",
".",
"options",
":",
"if",
"o",
".",
"short",
"==",
"name",
"or",
"o",
".",
"long",
"==",
"name",
":",
"return",
"o",
"raise",
"InterfaceException",
"(",
"\"No such option: \"",
"+",
"name",
")"
] | Get the the Option object associated with the given name.
:param name: the name of the option to retrieve; can be short or long name.
:raise InterfaceException: if the named option doesn't exist. | [
"Get",
"the",
"the",
"Option",
"object",
"associated",
"with",
"the",
"given",
"name",
"."
] | python | train |
MaT1g3R/option | option/result.py | https://github.com/MaT1g3R/option/blob/37c954e6e74273d48649b3236bc881a1286107d6/option/result.py#L255-L276 | def unwrap_or(self, optb: T) -> T:
"""
Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2
"""
return cast(T, self._val) if self._is_ok else optb | [
"def",
"unwrap_or",
"(",
"self",
",",
"optb",
":",
"T",
")",
"->",
"T",
":",
"return",
"cast",
"(",
"T",
",",
"self",
".",
"_val",
")",
"if",
"self",
".",
"_is_ok",
"else",
"optb"
] | Returns the success value in the :class:`Result` or ``optb``.
Args:
optb: The default return value.
Returns:
The success value in the :class:`Result` if it is a
:meth:`Result.Ok` value, otherwise ``optb``.
Notes:
If you wish to use a result of a function call as the default,
it is recommnded to use :meth:`unwrap_or_else` instead.
Examples:
>>> Ok(1).unwrap_or(2)
1
>>> Err(1).unwrap_or(2)
2 | [
"Returns",
"the",
"success",
"value",
"in",
"the",
":",
"class",
":",
"Result",
"or",
"optb",
"."
] | python | train |
ipazc/mtcnn | mtcnn/mtcnn.py | https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L549-L613 | def __stage3(self, img, total_boxes, stage_status: StageStatus):
"""
Third stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:
"""
num_boxes = total_boxes.shape[0]
if num_boxes == 0:
return total_boxes, np.empty(shape=(0,))
total_boxes = np.fix(total_boxes).astype(np.int32)
status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),
width=stage_status.width, height=stage_status.height)
tempimg = np.zeros((48, 48, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3))
tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \
img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)
else:
return np.empty(shape=(0,)), np.empty(shape=(0,))
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = self.__onet.feed(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > self.__steps_threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))
pick = self.__nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points | [
"def",
"__stage3",
"(",
"self",
",",
"img",
",",
"total_boxes",
",",
"stage_status",
":",
"StageStatus",
")",
":",
"num_boxes",
"=",
"total_boxes",
".",
"shape",
"[",
"0",
"]",
"if",
"num_boxes",
"==",
"0",
":",
"return",
"total_boxes",
",",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"0",
",",
")",
")",
"total_boxes",
"=",
"np",
".",
"fix",
"(",
"total_boxes",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"status",
"=",
"StageStatus",
"(",
"self",
".",
"__pad",
"(",
"total_boxes",
".",
"copy",
"(",
")",
",",
"stage_status",
".",
"width",
",",
"stage_status",
".",
"height",
")",
",",
"width",
"=",
"stage_status",
".",
"width",
",",
"height",
"=",
"stage_status",
".",
"height",
")",
"tempimg",
"=",
"np",
".",
"zeros",
"(",
"(",
"48",
",",
"48",
",",
"3",
",",
"num_boxes",
")",
")",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"num_boxes",
")",
":",
"tmp",
"=",
"np",
".",
"zeros",
"(",
"(",
"int",
"(",
"status",
".",
"tmph",
"[",
"k",
"]",
")",
",",
"int",
"(",
"status",
".",
"tmpw",
"[",
"k",
"]",
")",
",",
"3",
")",
")",
"tmp",
"[",
"status",
".",
"dy",
"[",
"k",
"]",
"-",
"1",
":",
"status",
".",
"edy",
"[",
"k",
"]",
",",
"status",
".",
"dx",
"[",
"k",
"]",
"-",
"1",
":",
"status",
".",
"edx",
"[",
"k",
"]",
",",
":",
"]",
"=",
"img",
"[",
"status",
".",
"y",
"[",
"k",
"]",
"-",
"1",
":",
"status",
".",
"ey",
"[",
"k",
"]",
",",
"status",
".",
"x",
"[",
"k",
"]",
"-",
"1",
":",
"status",
".",
"ex",
"[",
"k",
"]",
",",
":",
"]",
"if",
"tmp",
".",
"shape",
"[",
"0",
"]",
">",
"0",
"and",
"tmp",
".",
"shape",
"[",
"1",
"]",
">",
"0",
"or",
"tmp",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
"and",
"tmp",
".",
"shape",
"[",
"1",
"]",
"==",
"0",
":",
"tempimg",
"[",
":",
",",
":",
",",
":",
",",
"k",
"]",
"=",
"cv2",
".",
"resize",
"(",
"tmp",
",",
"(",
"48",
",",
"48",
")",
",",
"interpolation",
"=",
"cv2",
".",
"INTER_AREA",
")",
"else",
":",
"return",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"0",
",",
")",
")",
",",
"np",
".",
"empty",
"(",
"shape",
"=",
"(",
"0",
",",
")",
")",
"tempimg",
"=",
"(",
"tempimg",
"-",
"127.5",
")",
"*",
"0.0078125",
"tempimg1",
"=",
"np",
".",
"transpose",
"(",
"tempimg",
",",
"(",
"3",
",",
"1",
",",
"0",
",",
"2",
")",
")",
"out",
"=",
"self",
".",
"__onet",
".",
"feed",
"(",
"tempimg1",
")",
"out0",
"=",
"np",
".",
"transpose",
"(",
"out",
"[",
"0",
"]",
")",
"out1",
"=",
"np",
".",
"transpose",
"(",
"out",
"[",
"1",
"]",
")",
"out2",
"=",
"np",
".",
"transpose",
"(",
"out",
"[",
"2",
"]",
")",
"score",
"=",
"out2",
"[",
"1",
",",
":",
"]",
"points",
"=",
"out1",
"ipass",
"=",
"np",
".",
"where",
"(",
"score",
">",
"self",
".",
"__steps_threshold",
"[",
"2",
"]",
")",
"points",
"=",
"points",
"[",
":",
",",
"ipass",
"[",
"0",
"]",
"]",
"total_boxes",
"=",
"np",
".",
"hstack",
"(",
"[",
"total_boxes",
"[",
"ipass",
"[",
"0",
"]",
",",
"0",
":",
"4",
"]",
".",
"copy",
"(",
")",
",",
"np",
".",
"expand_dims",
"(",
"score",
"[",
"ipass",
"]",
".",
"copy",
"(",
")",
",",
"1",
")",
"]",
")",
"mv",
"=",
"out0",
"[",
":",
",",
"ipass",
"[",
"0",
"]",
"]",
"w",
"=",
"total_boxes",
"[",
":",
",",
"2",
"]",
"-",
"total_boxes",
"[",
":",
",",
"0",
"]",
"+",
"1",
"h",
"=",
"total_boxes",
"[",
":",
",",
"3",
"]",
"-",
"total_boxes",
"[",
":",
",",
"1",
"]",
"+",
"1",
"points",
"[",
"0",
":",
"5",
",",
":",
"]",
"=",
"np",
".",
"tile",
"(",
"w",
",",
"(",
"5",
",",
"1",
")",
")",
"*",
"points",
"[",
"0",
":",
"5",
",",
":",
"]",
"+",
"np",
".",
"tile",
"(",
"total_boxes",
"[",
":",
",",
"0",
"]",
",",
"(",
"5",
",",
"1",
")",
")",
"-",
"1",
"points",
"[",
"5",
":",
"10",
",",
":",
"]",
"=",
"np",
".",
"tile",
"(",
"h",
",",
"(",
"5",
",",
"1",
")",
")",
"*",
"points",
"[",
"5",
":",
"10",
",",
":",
"]",
"+",
"np",
".",
"tile",
"(",
"total_boxes",
"[",
":",
",",
"1",
"]",
",",
"(",
"5",
",",
"1",
")",
")",
"-",
"1",
"if",
"total_boxes",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"total_boxes",
"=",
"self",
".",
"__bbreg",
"(",
"total_boxes",
".",
"copy",
"(",
")",
",",
"np",
".",
"transpose",
"(",
"mv",
")",
")",
"pick",
"=",
"self",
".",
"__nms",
"(",
"total_boxes",
".",
"copy",
"(",
")",
",",
"0.7",
",",
"'Min'",
")",
"total_boxes",
"=",
"total_boxes",
"[",
"pick",
",",
":",
"]",
"points",
"=",
"points",
"[",
":",
",",
"pick",
"]",
"return",
"total_boxes",
",",
"points"
] | Third stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return: | [
"Third",
"stage",
"of",
"the",
"MTCNN",
"."
] | python | train |
NaPs/Kolekto | kolekto/profiles/__init__.py | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/profiles/__init__.py#L19-L32 | def load_commands(self, parser):
""" Load commands of this profile.
:param parser: argparse parser on which to add commands
"""
entrypoints = self._get_entrypoints()
already_loaded = set()
for entrypoint in entrypoints:
if entrypoint.name not in already_loaded:
command_class = entrypoint.load()
command_class(entrypoint.name, self, parser).prepare()
already_loaded.add(entrypoint.name) | [
"def",
"load_commands",
"(",
"self",
",",
"parser",
")",
":",
"entrypoints",
"=",
"self",
".",
"_get_entrypoints",
"(",
")",
"already_loaded",
"=",
"set",
"(",
")",
"for",
"entrypoint",
"in",
"entrypoints",
":",
"if",
"entrypoint",
".",
"name",
"not",
"in",
"already_loaded",
":",
"command_class",
"=",
"entrypoint",
".",
"load",
"(",
")",
"command_class",
"(",
"entrypoint",
".",
"name",
",",
"self",
",",
"parser",
")",
".",
"prepare",
"(",
")",
"already_loaded",
".",
"add",
"(",
"entrypoint",
".",
"name",
")"
] | Load commands of this profile.
:param parser: argparse parser on which to add commands | [
"Load",
"commands",
"of",
"this",
"profile",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/rdfvalues/paths.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/paths.py#L281-L284 | def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value) | [
"def",
"Validate",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"RECURSION_REGEX",
".",
"findall",
"(",
"self",
".",
"_value",
")",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Only one ** is permitted per path: %s.\"",
"%",
"self",
".",
"_value",
")"
] | GlobExpression is valid. | [
"GlobExpression",
"is",
"valid",
"."
] | python | train |
NiklasRosenstein-Python/nr-deprecated | nr/path.py | https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/path.py#L246-L254 | def rmvsuffix(subject):
"""
Remove the suffix from *subject*.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
subject = subject[:index]
return subject | [
"def",
"rmvsuffix",
"(",
"subject",
")",
":",
"index",
"=",
"subject",
".",
"rfind",
"(",
"'.'",
")",
"if",
"index",
">",
"subject",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
".",
"rfind",
"(",
"'/'",
")",
":",
"subject",
"=",
"subject",
"[",
":",
"index",
"]",
"return",
"subject"
] | Remove the suffix from *subject*. | [
"Remove",
"the",
"suffix",
"from",
"*",
"subject",
"*",
"."
] | python | train |
tanghaibao/jcvi | jcvi/apps/biomart.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/biomart.py#L297-L324 | def bed(args):
"""
%prog bed genes.ids
Get gene bed from phytozome. `genes.ids` contains the list of gene you want
to pull from Phytozome. Write output to .bed file.
"""
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
ids = set(x.strip() for x in open(idsfile))
data = get_bed_from_phytozome(list(ids))
pf = idsfile.rsplit(".", 1)[0]
bedfile = pf + ".bed"
fw = open(bedfile, "w")
for i, row in enumerate(data):
row = row.strip()
if row == "":
continue
print(row, file=fw)
logging.debug("A total of {0} records written to `{1}`.".format(i + 1, bedfile)) | [
"def",
"bed",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"bed",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"idsfile",
",",
"=",
"args",
"ids",
"=",
"set",
"(",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"open",
"(",
"idsfile",
")",
")",
"data",
"=",
"get_bed_from_phytozome",
"(",
"list",
"(",
"ids",
")",
")",
"pf",
"=",
"idsfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"bedfile",
"=",
"pf",
"+",
"\".bed\"",
"fw",
"=",
"open",
"(",
"bedfile",
",",
"\"w\"",
")",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"data",
")",
":",
"row",
"=",
"row",
".",
"strip",
"(",
")",
"if",
"row",
"==",
"\"\"",
":",
"continue",
"print",
"(",
"row",
",",
"file",
"=",
"fw",
")",
"logging",
".",
"debug",
"(",
"\"A total of {0} records written to `{1}`.\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"bedfile",
")",
")"
] | %prog bed genes.ids
Get gene bed from phytozome. `genes.ids` contains the list of gene you want
to pull from Phytozome. Write output to .bed file. | [
"%prog",
"bed",
"genes",
".",
"ids"
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/interactive.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L2012-L2050 | def do_register(self, arg):
"""
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
"""
arg = arg.strip()
if not arg:
self.print_current_location()
else:
equ = arg.find('=')
if equ >= 0:
register = arg[:equ].strip()
value = arg[equ+1:].strip()
if not value:
value = '0'
self.change_register(register, value)
else:
value = self.input_register(arg)
if value is None:
raise CmdError("unknown register: %s" % arg)
try:
label = None
thread = self.get_thread_from_prefix()
process = thread.get_process()
module = process.get_module_at_address(value)
if module:
label = module.get_label_at_address(value)
except RuntimeError:
label = None
reg = arg.upper()
val = HexDump.address(value)
if label:
print("%s: %s (%s)" % (reg, val, label))
else:
print("%s: %s" % (reg, val)) | [
"def",
"do_register",
"(",
"self",
",",
"arg",
")",
":",
"arg",
"=",
"arg",
".",
"strip",
"(",
")",
"if",
"not",
"arg",
":",
"self",
".",
"print_current_location",
"(",
")",
"else",
":",
"equ",
"=",
"arg",
".",
"find",
"(",
"'='",
")",
"if",
"equ",
">=",
"0",
":",
"register",
"=",
"arg",
"[",
":",
"equ",
"]",
".",
"strip",
"(",
")",
"value",
"=",
"arg",
"[",
"equ",
"+",
"1",
":",
"]",
".",
"strip",
"(",
")",
"if",
"not",
"value",
":",
"value",
"=",
"'0'",
"self",
".",
"change_register",
"(",
"register",
",",
"value",
")",
"else",
":",
"value",
"=",
"self",
".",
"input_register",
"(",
"arg",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"CmdError",
"(",
"\"unknown register: %s\"",
"%",
"arg",
")",
"try",
":",
"label",
"=",
"None",
"thread",
"=",
"self",
".",
"get_thread_from_prefix",
"(",
")",
"process",
"=",
"thread",
".",
"get_process",
"(",
")",
"module",
"=",
"process",
".",
"get_module_at_address",
"(",
"value",
")",
"if",
"module",
":",
"label",
"=",
"module",
".",
"get_label_at_address",
"(",
"value",
")",
"except",
"RuntimeError",
":",
"label",
"=",
"None",
"reg",
"=",
"arg",
".",
"upper",
"(",
")",
"val",
"=",
"HexDump",
".",
"address",
"(",
"value",
")",
"if",
"label",
":",
"print",
"(",
"\"%s: %s (%s)\"",
"%",
"(",
"reg",
",",
"val",
",",
"label",
")",
")",
"else",
":",
"print",
"(",
"\"%s: %s\"",
"%",
"(",
"reg",
",",
"val",
")",
")"
] | [~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register | [
"[",
"~thread",
"]",
"r",
"-",
"print",
"(",
"the",
"value",
"of",
"all",
"registers",
"[",
"~thread",
"]",
"r",
"<register",
">",
"-",
"print",
"(",
"the",
"value",
"of",
"a",
"register",
"[",
"~thread",
"]",
"r",
"<register",
">",
"=",
"<value",
">",
"-",
"change",
"the",
"value",
"of",
"a",
"register",
"[",
"~thread",
"]",
"register",
"-",
"print",
"(",
"the",
"value",
"of",
"all",
"registers",
"[",
"~thread",
"]",
"register",
"<register",
">",
"-",
"print",
"(",
"the",
"value",
"of",
"a",
"register",
"[",
"~thread",
"]",
"register",
"<register",
">",
"=",
"<value",
">",
"-",
"change",
"the",
"value",
"of",
"a",
"register"
] | python | train |
sbuss/pypercube | pypercube/metric.py | https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/metric.py#L25-L48 | def from_json(cls, json_obj):
"""Build a MetricResponse from JSON.
:param json_obj: JSON data representing a Cube Metric.
:type json_obj: `String` or `json`
:throws: `InvalidMetricError` when any of {type,time,data} fields are
not present in json_obj.
"""
if isinstance(json_obj, str):
json_obj = json.loads(json_obj)
time = None
value = None
if cls.TIME_FIELD_NAME in json_obj:
time = json_obj[cls.TIME_FIELD_NAME]
else:
raise InvalidMetricError("{field} must be present!".format(
field=cls.TIME_FIELD_NAME))
if cls.VALUE_FIELD_NAME in json_obj:
value = json_obj[cls.VALUE_FIELD_NAME]
return cls(time, value) | [
"def",
"from_json",
"(",
"cls",
",",
"json_obj",
")",
":",
"if",
"isinstance",
"(",
"json_obj",
",",
"str",
")",
":",
"json_obj",
"=",
"json",
".",
"loads",
"(",
"json_obj",
")",
"time",
"=",
"None",
"value",
"=",
"None",
"if",
"cls",
".",
"TIME_FIELD_NAME",
"in",
"json_obj",
":",
"time",
"=",
"json_obj",
"[",
"cls",
".",
"TIME_FIELD_NAME",
"]",
"else",
":",
"raise",
"InvalidMetricError",
"(",
"\"{field} must be present!\"",
".",
"format",
"(",
"field",
"=",
"cls",
".",
"TIME_FIELD_NAME",
")",
")",
"if",
"cls",
".",
"VALUE_FIELD_NAME",
"in",
"json_obj",
":",
"value",
"=",
"json_obj",
"[",
"cls",
".",
"VALUE_FIELD_NAME",
"]",
"return",
"cls",
"(",
"time",
",",
"value",
")"
] | Build a MetricResponse from JSON.
:param json_obj: JSON data representing a Cube Metric.
:type json_obj: `String` or `json`
:throws: `InvalidMetricError` when any of {type,time,data} fields are
not present in json_obj. | [
"Build",
"a",
"MetricResponse",
"from",
"JSON",
"."
] | python | train |
HttpRunner/HttpRunner | httprunner/client.py | https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/client.py#L64-L126 | def get_req_resp_record(self, resp_obj):
""" get request and response info from Response() object.
"""
def log_print(req_resp_dict, r_type):
msg = "\n================== {} details ==================\n".format(r_type)
for key, value in req_resp_dict[r_type].items():
msg += "{:<16} : {}\n".format(key, repr(value))
logger.log_debug(msg)
req_resp_dict = {
"request": {},
"response": {}
}
# record actual request info
req_resp_dict["request"]["url"] = resp_obj.request.url
req_resp_dict["request"]["method"] = resp_obj.request.method
req_resp_dict["request"]["headers"] = dict(resp_obj.request.headers)
request_body = resp_obj.request.body
if request_body:
request_content_type = lower_dict_keys(
req_resp_dict["request"]["headers"]
).get("content-type")
if request_content_type and "multipart/form-data" in request_content_type:
# upload file type
req_resp_dict["request"]["body"] = "upload file stream (OMITTED)"
else:
req_resp_dict["request"]["body"] = request_body
# log request details in debug mode
log_print(req_resp_dict, "request")
# record response info
req_resp_dict["response"]["ok"] = resp_obj.ok
req_resp_dict["response"]["url"] = resp_obj.url
req_resp_dict["response"]["status_code"] = resp_obj.status_code
req_resp_dict["response"]["reason"] = resp_obj.reason
req_resp_dict["response"]["cookies"] = resp_obj.cookies or {}
req_resp_dict["response"]["encoding"] = resp_obj.encoding
resp_headers = dict(resp_obj.headers)
req_resp_dict["response"]["headers"] = resp_headers
lower_resp_headers = lower_dict_keys(resp_headers)
content_type = lower_resp_headers.get("content-type", "")
req_resp_dict["response"]["content_type"] = content_type
if "image" in content_type:
# response is image type, record bytes content only
req_resp_dict["response"]["content"] = resp_obj.content
else:
try:
# try to record json data
req_resp_dict["response"]["json"] = resp_obj.json()
except ValueError:
# only record at most 512 text charactors
resp_text = resp_obj.text
req_resp_dict["response"]["text"] = omit_long_data(resp_text)
# log response details in debug mode
log_print(req_resp_dict, "response")
return req_resp_dict | [
"def",
"get_req_resp_record",
"(",
"self",
",",
"resp_obj",
")",
":",
"def",
"log_print",
"(",
"req_resp_dict",
",",
"r_type",
")",
":",
"msg",
"=",
"\"\\n================== {} details ==================\\n\"",
".",
"format",
"(",
"r_type",
")",
"for",
"key",
",",
"value",
"in",
"req_resp_dict",
"[",
"r_type",
"]",
".",
"items",
"(",
")",
":",
"msg",
"+=",
"\"{:<16} : {}\\n\"",
".",
"format",
"(",
"key",
",",
"repr",
"(",
"value",
")",
")",
"logger",
".",
"log_debug",
"(",
"msg",
")",
"req_resp_dict",
"=",
"{",
"\"request\"",
":",
"{",
"}",
",",
"\"response\"",
":",
"{",
"}",
"}",
"# record actual request info",
"req_resp_dict",
"[",
"\"request\"",
"]",
"[",
"\"url\"",
"]",
"=",
"resp_obj",
".",
"request",
".",
"url",
"req_resp_dict",
"[",
"\"request\"",
"]",
"[",
"\"method\"",
"]",
"=",
"resp_obj",
".",
"request",
".",
"method",
"req_resp_dict",
"[",
"\"request\"",
"]",
"[",
"\"headers\"",
"]",
"=",
"dict",
"(",
"resp_obj",
".",
"request",
".",
"headers",
")",
"request_body",
"=",
"resp_obj",
".",
"request",
".",
"body",
"if",
"request_body",
":",
"request_content_type",
"=",
"lower_dict_keys",
"(",
"req_resp_dict",
"[",
"\"request\"",
"]",
"[",
"\"headers\"",
"]",
")",
".",
"get",
"(",
"\"content-type\"",
")",
"if",
"request_content_type",
"and",
"\"multipart/form-data\"",
"in",
"request_content_type",
":",
"# upload file type",
"req_resp_dict",
"[",
"\"request\"",
"]",
"[",
"\"body\"",
"]",
"=",
"\"upload file stream (OMITTED)\"",
"else",
":",
"req_resp_dict",
"[",
"\"request\"",
"]",
"[",
"\"body\"",
"]",
"=",
"request_body",
"# log request details in debug mode",
"log_print",
"(",
"req_resp_dict",
",",
"\"request\"",
")",
"# record response info",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"ok\"",
"]",
"=",
"resp_obj",
".",
"ok",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"url\"",
"]",
"=",
"resp_obj",
".",
"url",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"status_code\"",
"]",
"=",
"resp_obj",
".",
"status_code",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"reason\"",
"]",
"=",
"resp_obj",
".",
"reason",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"cookies\"",
"]",
"=",
"resp_obj",
".",
"cookies",
"or",
"{",
"}",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"encoding\"",
"]",
"=",
"resp_obj",
".",
"encoding",
"resp_headers",
"=",
"dict",
"(",
"resp_obj",
".",
"headers",
")",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"headers\"",
"]",
"=",
"resp_headers",
"lower_resp_headers",
"=",
"lower_dict_keys",
"(",
"resp_headers",
")",
"content_type",
"=",
"lower_resp_headers",
".",
"get",
"(",
"\"content-type\"",
",",
"\"\"",
")",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"content_type\"",
"]",
"=",
"content_type",
"if",
"\"image\"",
"in",
"content_type",
":",
"# response is image type, record bytes content only",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"content\"",
"]",
"=",
"resp_obj",
".",
"content",
"else",
":",
"try",
":",
"# try to record json data",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"json\"",
"]",
"=",
"resp_obj",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"# only record at most 512 text charactors",
"resp_text",
"=",
"resp_obj",
".",
"text",
"req_resp_dict",
"[",
"\"response\"",
"]",
"[",
"\"text\"",
"]",
"=",
"omit_long_data",
"(",
"resp_text",
")",
"# log response details in debug mode",
"log_print",
"(",
"req_resp_dict",
",",
"\"response\"",
")",
"return",
"req_resp_dict"
] | get request and response info from Response() object. | [
"get",
"request",
"and",
"response",
"info",
"from",
"Response",
"()",
"object",
"."
] | python | train |
RI-imaging/qpsphere | qpsphere/models/mod_rytov.py | https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/models/mod_rytov.py#L108-L238 | def sphere_prop_fslice_bessel(radius, sphere_index, medium_index,
wavelength=550e-9, pixel_size=1e-7,
grid_size=(80, 80), lD=0, approx="rytov",
zeropad=5, oversample=1
):
"""Compute the projection of a disc using the Fourier slice theorem
and the Bessel function of the first kind of order 1.
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
lD: float
The axial distance [m] from the center of the sphere
at which the field is computed.
approx: str
Which approximation to use (either "born" or "rytov")
zeropad: int
Zero-padding factor
oversample: int
Oversampling factor
"""
assert approx in ["born", "rytov"]
assert oversample > 0
assert zeropad > 0
assert isinstance(oversample, int)
assert isinstance(zeropad, int)
# convert everything to pixels
radius /= pixel_size
wavelength /= pixel_size
lD /= pixel_size
# apply over-sampling and zero-padding
wavelength *= oversample
opad_size = np.array(grid_size) * zeropad * oversample
assert (int(s) == s for s in opad_size), "grid_size must be integer type"
opad_size = np.array(np.round(opad_size), dtype=int)
grid_size = np.array(np.round(grid_size), dtype=int)
kx = 2 * np.pi * \
np.fft.ifftshift(np.fft.fftfreq(opad_size[0])).reshape(-1, 1)
ky = 2 * np.pi * \
np.fft.ifftshift(np.fft.fftfreq(opad_size[1])).reshape(1, -1)
km = 2 * np.pi * medium_index / wavelength
filter_klp = (kx**2 + ky**2 < km**2)
kz = np.sqrt((km**2 - kx**2 - ky**2) * filter_klp) - km
r = np.sqrt((kx**2 + ky**2 + kz**2) * filter_klp) / (2 * np.pi)
comp_id = r != 0
F = np.zeros_like(r)
F[comp_id] = spspec.spherical_jn(1, r[comp_id] * radius * np.pi * 2) \
* radius**2 / r[comp_id] * 2
# center has analytical value
center_fft = np.where(np.abs(kx) + np.abs(ky) + np.abs(kz) == 0)
F[center_fft] = 4 / 3 * np.pi * radius**3
# object amplitude
F *= km**2 * ((sphere_index / medium_index)**2 - 1)
# prefactor A
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
# division factor
A = -2j * km * M * np.exp(-1j * km * M * lD)
# rotate phase by half a pixel so the ifft is centered in real space
if grid_size[0] % 2:
doffx = 0
else:
doffx = .5
if grid_size[1] % 2:
doffy = 0
else:
doffy = .5
transl = np.exp(1j * ((doffx) * kx + (doffy) * ky))
valid = F != 0
Fconv = np.zeros((opad_size[0], opad_size[1]), dtype=complex)
Fconv[valid] = F[valid] / A[valid] * transl[valid]
p = np.fft.ifftn(np.fft.fftshift(Fconv))
p = np.fft.ifftshift(p)
if oversample > 1:
p = p[::oversample, ::oversample]
if zeropad > 1:
# Slice
a0, a1 = np.array(np.floor(opad_size / 2), dtype=int) // oversample
b0, b1 = np.array(np.floor(grid_size / 2), dtype=int)
if grid_size[0] % 2 != 0:
of0 = 1
a0 += 1
else:
of0 = 0
if grid_size[1] % 2 != 0:
of1 = 1
a1 += 1
else:
of1 = 0
# remove zero-padding
p = p[a0 - b0:a0 + b0 + of0, a1 - b1:a1 + b1 + of1]
if approx == "born":
# norm = (u0 + ub)/u0
# norm = 1 + ub/u0
return 1 + p / np.exp(1j * km * lD)
elif approx == "rytov":
# norm = (u0 + ur)/u0
# ur = u0 ( exp(ub/u0) -1 )
# norm = ( u0 + u0 *(exp(ub/u0)-1) )/u0
# norm = exp(ub/u0)
return np.exp(p / np.exp(1j * km * lD)) | [
"def",
"sphere_prop_fslice_bessel",
"(",
"radius",
",",
"sphere_index",
",",
"medium_index",
",",
"wavelength",
"=",
"550e-9",
",",
"pixel_size",
"=",
"1e-7",
",",
"grid_size",
"=",
"(",
"80",
",",
"80",
")",
",",
"lD",
"=",
"0",
",",
"approx",
"=",
"\"rytov\"",
",",
"zeropad",
"=",
"5",
",",
"oversample",
"=",
"1",
")",
":",
"assert",
"approx",
"in",
"[",
"\"born\"",
",",
"\"rytov\"",
"]",
"assert",
"oversample",
">",
"0",
"assert",
"zeropad",
">",
"0",
"assert",
"isinstance",
"(",
"oversample",
",",
"int",
")",
"assert",
"isinstance",
"(",
"zeropad",
",",
"int",
")",
"# convert everything to pixels",
"radius",
"/=",
"pixel_size",
"wavelength",
"/=",
"pixel_size",
"lD",
"/=",
"pixel_size",
"# apply over-sampling and zero-padding",
"wavelength",
"*=",
"oversample",
"opad_size",
"=",
"np",
".",
"array",
"(",
"grid_size",
")",
"*",
"zeropad",
"*",
"oversample",
"assert",
"(",
"int",
"(",
"s",
")",
"==",
"s",
"for",
"s",
"in",
"opad_size",
")",
",",
"\"grid_size must be integer type\"",
"opad_size",
"=",
"np",
".",
"array",
"(",
"np",
".",
"round",
"(",
"opad_size",
")",
",",
"dtype",
"=",
"int",
")",
"grid_size",
"=",
"np",
".",
"array",
"(",
"np",
".",
"round",
"(",
"grid_size",
")",
",",
"dtype",
"=",
"int",
")",
"kx",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"opad_size",
"[",
"0",
"]",
")",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"ky",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"opad_size",
"[",
"1",
"]",
")",
")",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"km",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"medium_index",
"/",
"wavelength",
"filter_klp",
"=",
"(",
"kx",
"**",
"2",
"+",
"ky",
"**",
"2",
"<",
"km",
"**",
"2",
")",
"kz",
"=",
"np",
".",
"sqrt",
"(",
"(",
"km",
"**",
"2",
"-",
"kx",
"**",
"2",
"-",
"ky",
"**",
"2",
")",
"*",
"filter_klp",
")",
"-",
"km",
"r",
"=",
"np",
".",
"sqrt",
"(",
"(",
"kx",
"**",
"2",
"+",
"ky",
"**",
"2",
"+",
"kz",
"**",
"2",
")",
"*",
"filter_klp",
")",
"/",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"comp_id",
"=",
"r",
"!=",
"0",
"F",
"=",
"np",
".",
"zeros_like",
"(",
"r",
")",
"F",
"[",
"comp_id",
"]",
"=",
"spspec",
".",
"spherical_jn",
"(",
"1",
",",
"r",
"[",
"comp_id",
"]",
"*",
"radius",
"*",
"np",
".",
"pi",
"*",
"2",
")",
"*",
"radius",
"**",
"2",
"/",
"r",
"[",
"comp_id",
"]",
"*",
"2",
"# center has analytical value",
"center_fft",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"kx",
")",
"+",
"np",
".",
"abs",
"(",
"ky",
")",
"+",
"np",
".",
"abs",
"(",
"kz",
")",
"==",
"0",
")",
"F",
"[",
"center_fft",
"]",
"=",
"4",
"/",
"3",
"*",
"np",
".",
"pi",
"*",
"radius",
"**",
"3",
"# object amplitude",
"F",
"*=",
"km",
"**",
"2",
"*",
"(",
"(",
"sphere_index",
"/",
"medium_index",
")",
"**",
"2",
"-",
"1",
")",
"# prefactor A",
"M",
"=",
"1.",
"/",
"km",
"*",
"np",
".",
"sqrt",
"(",
"(",
"km",
"**",
"2",
"-",
"kx",
"**",
"2",
"-",
"ky",
"**",
"2",
")",
"*",
"filter_klp",
")",
"# division factor",
"A",
"=",
"-",
"2j",
"*",
"km",
"*",
"M",
"*",
"np",
".",
"exp",
"(",
"-",
"1j",
"*",
"km",
"*",
"M",
"*",
"lD",
")",
"# rotate phase by half a pixel so the ifft is centered in real space",
"if",
"grid_size",
"[",
"0",
"]",
"%",
"2",
":",
"doffx",
"=",
"0",
"else",
":",
"doffx",
"=",
".5",
"if",
"grid_size",
"[",
"1",
"]",
"%",
"2",
":",
"doffy",
"=",
"0",
"else",
":",
"doffy",
"=",
".5",
"transl",
"=",
"np",
".",
"exp",
"(",
"1j",
"*",
"(",
"(",
"doffx",
")",
"*",
"kx",
"+",
"(",
"doffy",
")",
"*",
"ky",
")",
")",
"valid",
"=",
"F",
"!=",
"0",
"Fconv",
"=",
"np",
".",
"zeros",
"(",
"(",
"opad_size",
"[",
"0",
"]",
",",
"opad_size",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"complex",
")",
"Fconv",
"[",
"valid",
"]",
"=",
"F",
"[",
"valid",
"]",
"/",
"A",
"[",
"valid",
"]",
"*",
"transl",
"[",
"valid",
"]",
"p",
"=",
"np",
".",
"fft",
".",
"ifftn",
"(",
"np",
".",
"fft",
".",
"fftshift",
"(",
"Fconv",
")",
")",
"p",
"=",
"np",
".",
"fft",
".",
"ifftshift",
"(",
"p",
")",
"if",
"oversample",
">",
"1",
":",
"p",
"=",
"p",
"[",
":",
":",
"oversample",
",",
":",
":",
"oversample",
"]",
"if",
"zeropad",
">",
"1",
":",
"# Slice",
"a0",
",",
"a1",
"=",
"np",
".",
"array",
"(",
"np",
".",
"floor",
"(",
"opad_size",
"/",
"2",
")",
",",
"dtype",
"=",
"int",
")",
"//",
"oversample",
"b0",
",",
"b1",
"=",
"np",
".",
"array",
"(",
"np",
".",
"floor",
"(",
"grid_size",
"/",
"2",
")",
",",
"dtype",
"=",
"int",
")",
"if",
"grid_size",
"[",
"0",
"]",
"%",
"2",
"!=",
"0",
":",
"of0",
"=",
"1",
"a0",
"+=",
"1",
"else",
":",
"of0",
"=",
"0",
"if",
"grid_size",
"[",
"1",
"]",
"%",
"2",
"!=",
"0",
":",
"of1",
"=",
"1",
"a1",
"+=",
"1",
"else",
":",
"of1",
"=",
"0",
"# remove zero-padding",
"p",
"=",
"p",
"[",
"a0",
"-",
"b0",
":",
"a0",
"+",
"b0",
"+",
"of0",
",",
"a1",
"-",
"b1",
":",
"a1",
"+",
"b1",
"+",
"of1",
"]",
"if",
"approx",
"==",
"\"born\"",
":",
"# norm = (u0 + ub)/u0",
"# norm = 1 + ub/u0",
"return",
"1",
"+",
"p",
"/",
"np",
".",
"exp",
"(",
"1j",
"*",
"km",
"*",
"lD",
")",
"elif",
"approx",
"==",
"\"rytov\"",
":",
"# norm = (u0 + ur)/u0",
"# ur = u0 ( exp(ub/u0) -1 )",
"# norm = ( u0 + u0 *(exp(ub/u0)-1) )/u0",
"# norm = exp(ub/u0)",
"return",
"np",
".",
"exp",
"(",
"p",
"/",
"np",
".",
"exp",
"(",
"1j",
"*",
"km",
"*",
"lD",
")",
")"
] | Compute the projection of a disc using the Fourier slice theorem
and the Bessel function of the first kind of order 1.
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
lD: float
The axial distance [m] from the center of the sphere
at which the field is computed.
approx: str
Which approximation to use (either "born" or "rytov")
zeropad: int
Zero-padding factor
oversample: int
Oversampling factor | [
"Compute",
"the",
"projection",
"of",
"a",
"disc",
"using",
"the",
"Fourier",
"slice",
"theorem",
"and",
"the",
"Bessel",
"function",
"of",
"the",
"first",
"kind",
"of",
"order",
"1",
"."
] | python | train |
keon/algorithms | algorithms/sort/top_sort.py | https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/sort/top_sort.py#L26-L66 | def top_sort(graph):
""" Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V)
"""
order, enter, state = [], set(graph), {}
def is_ready(node):
lst = graph.get(node, ())
if len(lst) == 0:
return True
for k in lst:
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk != BLACK:
return False
return True
while enter:
node = enter.pop()
stack = []
while True:
state[node] = GRAY
stack.append(node)
for k in graph.get(node, ()):
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk == BLACK:
continue
enter.discard(k)
stack.append(k)
while stack and is_ready(stack[-1]):
node = stack.pop()
order.append(node)
state[node] = BLACK
if len(stack) == 0:
break
node = stack.pop()
return order | [
"def",
"top_sort",
"(",
"graph",
")",
":",
"order",
",",
"enter",
",",
"state",
"=",
"[",
"]",
",",
"set",
"(",
"graph",
")",
",",
"{",
"}",
"def",
"is_ready",
"(",
"node",
")",
":",
"lst",
"=",
"graph",
".",
"get",
"(",
"node",
",",
"(",
")",
")",
"if",
"len",
"(",
"lst",
")",
"==",
"0",
":",
"return",
"True",
"for",
"k",
"in",
"lst",
":",
"sk",
"=",
"state",
".",
"get",
"(",
"k",
",",
"None",
")",
"if",
"sk",
"==",
"GRAY",
":",
"raise",
"ValueError",
"(",
"\"cycle\"",
")",
"if",
"sk",
"!=",
"BLACK",
":",
"return",
"False",
"return",
"True",
"while",
"enter",
":",
"node",
"=",
"enter",
".",
"pop",
"(",
")",
"stack",
"=",
"[",
"]",
"while",
"True",
":",
"state",
"[",
"node",
"]",
"=",
"GRAY",
"stack",
".",
"append",
"(",
"node",
")",
"for",
"k",
"in",
"graph",
".",
"get",
"(",
"node",
",",
"(",
")",
")",
":",
"sk",
"=",
"state",
".",
"get",
"(",
"k",
",",
"None",
")",
"if",
"sk",
"==",
"GRAY",
":",
"raise",
"ValueError",
"(",
"\"cycle\"",
")",
"if",
"sk",
"==",
"BLACK",
":",
"continue",
"enter",
".",
"discard",
"(",
"k",
")",
"stack",
".",
"append",
"(",
"k",
")",
"while",
"stack",
"and",
"is_ready",
"(",
"stack",
"[",
"-",
"1",
"]",
")",
":",
"node",
"=",
"stack",
".",
"pop",
"(",
")",
"order",
".",
"append",
"(",
"node",
")",
"state",
"[",
"node",
"]",
"=",
"BLACK",
"if",
"len",
"(",
"stack",
")",
"==",
"0",
":",
"break",
"node",
"=",
"stack",
".",
"pop",
"(",
")",
"return",
"order"
] | Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V) | [
"Time",
"complexity",
"is",
"the",
"same",
"as",
"DFS",
"which",
"is",
"O",
"(",
"V",
"+",
"E",
")",
"Space",
"complexity",
":",
"O",
"(",
"V",
")"
] | python | train |
dj-stripe/dj-stripe | djstripe/models/core.py | https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/core.py#L303-L321 | def refund(self, amount=None, reason=None):
"""
Initiate a refund. If amount is not provided, then this will be a full refund.
:param amount: A positive decimal amount representing how much of this charge
to refund. Can only refund up to the unrefunded amount remaining of the charge.
:trye amount: Decimal
:param reason: String indicating the reason for the refund. If set, possible values
are ``duplicate``, ``fraudulent``, and ``requested_by_customer``. Specifying
``fraudulent`` as the reason when you believe the charge to be fraudulent will
help Stripe improve their fraud detection algorithms.
:return: Stripe charge object
:rtype: dict
"""
charge_obj = self.api_retrieve().refund(
amount=self._calculate_refund_amount(amount=amount), reason=reason
)
return self.__class__.sync_from_stripe_data(charge_obj) | [
"def",
"refund",
"(",
"self",
",",
"amount",
"=",
"None",
",",
"reason",
"=",
"None",
")",
":",
"charge_obj",
"=",
"self",
".",
"api_retrieve",
"(",
")",
".",
"refund",
"(",
"amount",
"=",
"self",
".",
"_calculate_refund_amount",
"(",
"amount",
"=",
"amount",
")",
",",
"reason",
"=",
"reason",
")",
"return",
"self",
".",
"__class__",
".",
"sync_from_stripe_data",
"(",
"charge_obj",
")"
] | Initiate a refund. If amount is not provided, then this will be a full refund.
:param amount: A positive decimal amount representing how much of this charge
to refund. Can only refund up to the unrefunded amount remaining of the charge.
:trye amount: Decimal
:param reason: String indicating the reason for the refund. If set, possible values
are ``duplicate``, ``fraudulent``, and ``requested_by_customer``. Specifying
``fraudulent`` as the reason when you believe the charge to be fraudulent will
help Stripe improve their fraud detection algorithms.
:return: Stripe charge object
:rtype: dict | [
"Initiate",
"a",
"refund",
".",
"If",
"amount",
"is",
"not",
"provided",
"then",
"this",
"will",
"be",
"a",
"full",
"refund",
"."
] | python | train |
Aluriak/bubble-tools | bubbletools/utils.py | https://github.com/Aluriak/bubble-tools/blob/f014f4a1986abefc80dc418feaa05ed258c2221a/bubbletools/utils.py#L19-L22 | def infer_format(filename:str) -> str:
"""Return extension identifying format of given filename"""
_, ext = os.path.splitext(filename)
return ext | [
"def",
"infer_format",
"(",
"filename",
":",
"str",
")",
"->",
"str",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"return",
"ext"
] | Return extension identifying format of given filename | [
"Return",
"extension",
"identifying",
"format",
"of",
"given",
"filename"
] | python | train |
ArangoDB-Community/pyArango | pyArango/collection.py | https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L498-L528 | def bulkSave(self, docs, onDuplicate="error", **params) :
"""Parameter docs must be either an iterrable of documents or dictionnaries.
This function will return the number of documents, created and updated, and will raise an UpdateError exception if there's at least one error.
params are any parameters from arango's documentation"""
payload = []
for d in docs :
if type(d) is dict :
payload.append(json.dumps(d, default=str))
else :
try:
payload.append(d.toJson())
except Exception as e:
payload.append(json.dumps(d.getStore(), default=str))
payload = '\n'.join(payload)
params["type"] = "documents"
params["onDuplicate"] = onDuplicate
params["collection"] = self.name
URL = "%s/import" % self.database.URL
r = self.connection.session.post(URL, params = params, data = payload)
data = r.json()
if (r.status_code == 201) and "error" not in data :
return True
else :
if data["errors"] > 0 :
raise UpdateError("%d documents could not be created" % data["errors"], data)
return data["updated"] + data["created"] | [
"def",
"bulkSave",
"(",
"self",
",",
"docs",
",",
"onDuplicate",
"=",
"\"error\"",
",",
"*",
"*",
"params",
")",
":",
"payload",
"=",
"[",
"]",
"for",
"d",
"in",
"docs",
":",
"if",
"type",
"(",
"d",
")",
"is",
"dict",
":",
"payload",
".",
"append",
"(",
"json",
".",
"dumps",
"(",
"d",
",",
"default",
"=",
"str",
")",
")",
"else",
":",
"try",
":",
"payload",
".",
"append",
"(",
"d",
".",
"toJson",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"payload",
".",
"append",
"(",
"json",
".",
"dumps",
"(",
"d",
".",
"getStore",
"(",
")",
",",
"default",
"=",
"str",
")",
")",
"payload",
"=",
"'\\n'",
".",
"join",
"(",
"payload",
")",
"params",
"[",
"\"type\"",
"]",
"=",
"\"documents\"",
"params",
"[",
"\"onDuplicate\"",
"]",
"=",
"onDuplicate",
"params",
"[",
"\"collection\"",
"]",
"=",
"self",
".",
"name",
"URL",
"=",
"\"%s/import\"",
"%",
"self",
".",
"database",
".",
"URL",
"r",
"=",
"self",
".",
"connection",
".",
"session",
".",
"post",
"(",
"URL",
",",
"params",
"=",
"params",
",",
"data",
"=",
"payload",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"if",
"(",
"r",
".",
"status_code",
"==",
"201",
")",
"and",
"\"error\"",
"not",
"in",
"data",
":",
"return",
"True",
"else",
":",
"if",
"data",
"[",
"\"errors\"",
"]",
">",
"0",
":",
"raise",
"UpdateError",
"(",
"\"%d documents could not be created\"",
"%",
"data",
"[",
"\"errors\"",
"]",
",",
"data",
")",
"return",
"data",
"[",
"\"updated\"",
"]",
"+",
"data",
"[",
"\"created\"",
"]"
] | Parameter docs must be either an iterrable of documents or dictionnaries.
This function will return the number of documents, created and updated, and will raise an UpdateError exception if there's at least one error.
params are any parameters from arango's documentation | [
"Parameter",
"docs",
"must",
"be",
"either",
"an",
"iterrable",
"of",
"documents",
"or",
"dictionnaries",
".",
"This",
"function",
"will",
"return",
"the",
"number",
"of",
"documents",
"created",
"and",
"updated",
"and",
"will",
"raise",
"an",
"UpdateError",
"exception",
"if",
"there",
"s",
"at",
"least",
"one",
"error",
".",
"params",
"are",
"any",
"parameters",
"from",
"arango",
"s",
"documentation"
] | python | train |
gem/oq-engine | openquake/hazardlib/sourcewriter.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourcewriter.py#L442-L467 | def build_rupture_node(rupt, probs_occur):
"""
:param rupt: a hazardlib rupture object
:param probs_occur: a list of floats with sum 1
"""
s = sum(probs_occur)
if abs(s - 1) > pmf.PRECISION:
raise ValueError('The sum of %s is not 1: %s' % (probs_occur, s))
h = rupt.hypocenter
hp_dict = dict(lon=h.longitude, lat=h.latitude, depth=h.depth)
rupt_nodes = [Node('magnitude', {}, rupt.mag),
Node('rake', {}, rupt.rake),
Node('hypocenter', hp_dict)]
rupt_nodes.extend(rupt.surface.surface_nodes)
geom = rupt.surface.surface_nodes[0].tag
if len(rupt.surface.surface_nodes) > 1:
name = 'multiPlanesRupture'
elif geom == 'planarSurface':
name = 'singlePlaneRupture'
elif geom == 'simpleFaultGeometry':
name = 'simpleFaultRupture'
elif geom == 'complexFaultGeometry':
name = 'complexFaultRupture'
elif geom == 'griddedSurface':
name = 'griddedRupture'
return Node(name, {'probs_occur': probs_occur}, nodes=rupt_nodes) | [
"def",
"build_rupture_node",
"(",
"rupt",
",",
"probs_occur",
")",
":",
"s",
"=",
"sum",
"(",
"probs_occur",
")",
"if",
"abs",
"(",
"s",
"-",
"1",
")",
">",
"pmf",
".",
"PRECISION",
":",
"raise",
"ValueError",
"(",
"'The sum of %s is not 1: %s'",
"%",
"(",
"probs_occur",
",",
"s",
")",
")",
"h",
"=",
"rupt",
".",
"hypocenter",
"hp_dict",
"=",
"dict",
"(",
"lon",
"=",
"h",
".",
"longitude",
",",
"lat",
"=",
"h",
".",
"latitude",
",",
"depth",
"=",
"h",
".",
"depth",
")",
"rupt_nodes",
"=",
"[",
"Node",
"(",
"'magnitude'",
",",
"{",
"}",
",",
"rupt",
".",
"mag",
")",
",",
"Node",
"(",
"'rake'",
",",
"{",
"}",
",",
"rupt",
".",
"rake",
")",
",",
"Node",
"(",
"'hypocenter'",
",",
"hp_dict",
")",
"]",
"rupt_nodes",
".",
"extend",
"(",
"rupt",
".",
"surface",
".",
"surface_nodes",
")",
"geom",
"=",
"rupt",
".",
"surface",
".",
"surface_nodes",
"[",
"0",
"]",
".",
"tag",
"if",
"len",
"(",
"rupt",
".",
"surface",
".",
"surface_nodes",
")",
">",
"1",
":",
"name",
"=",
"'multiPlanesRupture'",
"elif",
"geom",
"==",
"'planarSurface'",
":",
"name",
"=",
"'singlePlaneRupture'",
"elif",
"geom",
"==",
"'simpleFaultGeometry'",
":",
"name",
"=",
"'simpleFaultRupture'",
"elif",
"geom",
"==",
"'complexFaultGeometry'",
":",
"name",
"=",
"'complexFaultRupture'",
"elif",
"geom",
"==",
"'griddedSurface'",
":",
"name",
"=",
"'griddedRupture'",
"return",
"Node",
"(",
"name",
",",
"{",
"'probs_occur'",
":",
"probs_occur",
"}",
",",
"nodes",
"=",
"rupt_nodes",
")"
] | :param rupt: a hazardlib rupture object
:param probs_occur: a list of floats with sum 1 | [
":",
"param",
"rupt",
":",
"a",
"hazardlib",
"rupture",
"object",
":",
"param",
"probs_occur",
":",
"a",
"list",
"of",
"floats",
"with",
"sum",
"1"
] | python | train |
FNNDSC/pfmisc | pfmisc/C_snode.py | https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L644-L660 | def paths_update(self, al_branchNodes):
"""
Add each node in <al_branchNodes> to the self.ml_cwd and
append the combined list to ml_allPaths. This method is
typically not called by a user, but by other methods in
this module.
Returns the list of all paths.
"""
for node in al_branchNodes:
#print "appending %s" % node
l_pwd = self.l_cwd[:]
l_pwd.append(node)
#print "l_pwd: %s" % l_pwd
#print "ml_cwd: %s" % self.ml_cwd
self.l_allPaths.append(l_pwd)
return self.l_allPaths | [
"def",
"paths_update",
"(",
"self",
",",
"al_branchNodes",
")",
":",
"for",
"node",
"in",
"al_branchNodes",
":",
"#print \"appending %s\" % node",
"l_pwd",
"=",
"self",
".",
"l_cwd",
"[",
":",
"]",
"l_pwd",
".",
"append",
"(",
"node",
")",
"#print \"l_pwd: %s\" % l_pwd",
"#print \"ml_cwd: %s\" % self.ml_cwd",
"self",
".",
"l_allPaths",
".",
"append",
"(",
"l_pwd",
")",
"return",
"self",
".",
"l_allPaths"
] | Add each node in <al_branchNodes> to the self.ml_cwd and
append the combined list to ml_allPaths. This method is
typically not called by a user, but by other methods in
this module.
Returns the list of all paths. | [
"Add",
"each",
"node",
"in",
"<al_branchNodes",
">",
"to",
"the",
"self",
".",
"ml_cwd",
"and",
"append",
"the",
"combined",
"list",
"to",
"ml_allPaths",
".",
"This",
"method",
"is",
"typically",
"not",
"called",
"by",
"a",
"user",
"but",
"by",
"other",
"methods",
"in",
"this",
"module",
"."
] | python | train |
rohankapoorcom/zm-py | zoneminder/monitor.py | https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L41-L49 | def get_time_period(value):
"""Get the corresponding TimePeriod from the value.
Example values: 'all', 'hour', 'day', 'week', or 'month'.
"""
for time_period in TimePeriod:
if time_period.period == value:
return time_period
raise ValueError('{} is not a valid TimePeriod'.format(value)) | [
"def",
"get_time_period",
"(",
"value",
")",
":",
"for",
"time_period",
"in",
"TimePeriod",
":",
"if",
"time_period",
".",
"period",
"==",
"value",
":",
"return",
"time_period",
"raise",
"ValueError",
"(",
"'{} is not a valid TimePeriod'",
".",
"format",
"(",
"value",
")",
")"
] | Get the corresponding TimePeriod from the value.
Example values: 'all', 'hour', 'day', 'week', or 'month'. | [
"Get",
"the",
"corresponding",
"TimePeriod",
"from",
"the",
"value",
"."
] | python | train |
Unidata/MetPy | metpy/interpolate/grid.py | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/interpolate/grid.py#L67-L84 | def get_xy_range(bbox):
r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float
Range in meters in y dimension.
"""
x_range = bbox['east'] - bbox['west']
y_range = bbox['north'] - bbox['south']
return x_range, y_range | [
"def",
"get_xy_range",
"(",
"bbox",
")",
":",
"x_range",
"=",
"bbox",
"[",
"'east'",
"]",
"-",
"bbox",
"[",
"'west'",
"]",
"y_range",
"=",
"bbox",
"[",
"'north'",
"]",
"-",
"bbox",
"[",
"'south'",
"]",
"return",
"x_range",
",",
"y_range"
] | r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float
Range in meters in y dimension. | [
"r",
"Return",
"x",
"and",
"y",
"ranges",
"in",
"meters",
"based",
"on",
"bounding",
"box",
"."
] | python | train |
dwavesystems/dwave-cloud-client | dwave/cloud/config.py | https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/config.py#L239-L304 | def get_configfile_paths(system=True, user=True, local=True, only_existing=True):
"""Return a list of local configuration file paths.
Search paths for configuration files on the local system
are based on homebase_ and depend on operating system; for example, for Linux systems
these might include ``dwave.conf`` in the current working directory (CWD),
user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``.
.. _homebase: https://github.com/dwavesystems/homebase
Args:
system (boolean, default=True):
Search for system-wide configuration files.
user (boolean, default=True):
Search for user-local configuration files.
local (boolean, default=True):
Search for local configuration files (in CWD).
only_existing (boolean, default=True):
Return only paths for files that exist on the local system.
Returns:
list[str]:
List of configuration file paths.
Examples:
This example displays all paths to configuration files on a Windows system
running Python 2.7 and then finds the single existing configuration file.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
[u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf',
u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf',
'.\\dwave.conf']
>>> # Find existing files
>>> dc.config.get_configfile_paths() # doctest: +SKIP
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
"""
candidates = []
# system-wide has the lowest priority, `/etc/dwave/dwave.conf`
if system:
candidates.extend(homebase.site_config_dir_list(
app_author=CONF_AUTHOR, app_name=CONF_APP,
use_virtualenv=False, create=False))
# user-local will override it, `~/.config/dwave/dwave.conf`
if user:
candidates.append(homebase.user_config_dir(
app_author=CONF_AUTHOR, app_name=CONF_APP, roaming=False,
use_virtualenv=False, create=False))
# highest priority (overrides all): `./dwave.conf`
if local:
candidates.append(".")
paths = [os.path.join(base, CONF_FILENAME) for base in candidates]
if only_existing:
paths = list(filter(os.path.exists, paths))
return paths | [
"def",
"get_configfile_paths",
"(",
"system",
"=",
"True",
",",
"user",
"=",
"True",
",",
"local",
"=",
"True",
",",
"only_existing",
"=",
"True",
")",
":",
"candidates",
"=",
"[",
"]",
"# system-wide has the lowest priority, `/etc/dwave/dwave.conf`",
"if",
"system",
":",
"candidates",
".",
"extend",
"(",
"homebase",
".",
"site_config_dir_list",
"(",
"app_author",
"=",
"CONF_AUTHOR",
",",
"app_name",
"=",
"CONF_APP",
",",
"use_virtualenv",
"=",
"False",
",",
"create",
"=",
"False",
")",
")",
"# user-local will override it, `~/.config/dwave/dwave.conf`",
"if",
"user",
":",
"candidates",
".",
"append",
"(",
"homebase",
".",
"user_config_dir",
"(",
"app_author",
"=",
"CONF_AUTHOR",
",",
"app_name",
"=",
"CONF_APP",
",",
"roaming",
"=",
"False",
",",
"use_virtualenv",
"=",
"False",
",",
"create",
"=",
"False",
")",
")",
"# highest priority (overrides all): `./dwave.conf`",
"if",
"local",
":",
"candidates",
".",
"append",
"(",
"\".\"",
")",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"CONF_FILENAME",
")",
"for",
"base",
"in",
"candidates",
"]",
"if",
"only_existing",
":",
"paths",
"=",
"list",
"(",
"filter",
"(",
"os",
".",
"path",
".",
"exists",
",",
"paths",
")",
")",
"return",
"paths"
] | Return a list of local configuration file paths.
Search paths for configuration files on the local system
are based on homebase_ and depend on operating system; for example, for Linux systems
these might include ``dwave.conf`` in the current working directory (CWD),
user-local ``.config/dwave/``, and system-wide ``/etc/dwave/``.
.. _homebase: https://github.com/dwavesystems/homebase
Args:
system (boolean, default=True):
Search for system-wide configuration files.
user (boolean, default=True):
Search for user-local configuration files.
local (boolean, default=True):
Search for local configuration files (in CWD).
only_existing (boolean, default=True):
Return only paths for files that exist on the local system.
Returns:
list[str]:
List of configuration file paths.
Examples:
This example displays all paths to configuration files on a Windows system
running Python 2.7 and then finds the single existing configuration file.
>>> import dwave.cloud as dc
>>> # Display paths
>>> dc.config.get_configfile_paths(only_existing=False) # doctest: +SKIP
[u'C:\\ProgramData\\dwavesystem\\dwave\\dwave.conf',
u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf',
'.\\dwave.conf']
>>> # Find existing files
>>> dc.config.get_configfile_paths() # doctest: +SKIP
[u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf'] | [
"Return",
"a",
"list",
"of",
"local",
"configuration",
"file",
"paths",
"."
] | python | train |
atdt/afraid | afraid/__init__.py | https://github.com/atdt/afraid/blob/d74b2d4e41ed14e420da2793a89bef5d9b26ea26/afraid/__init__.py#L106-L114 | def update_continuously(records, update_interval=600):
"""Update `records` every `update_interval` seconds"""
while True:
for record in records:
try:
record.update()
except (ApiError, RequestException):
pass
time.sleep(update_interval) | [
"def",
"update_continuously",
"(",
"records",
",",
"update_interval",
"=",
"600",
")",
":",
"while",
"True",
":",
"for",
"record",
"in",
"records",
":",
"try",
":",
"record",
".",
"update",
"(",
")",
"except",
"(",
"ApiError",
",",
"RequestException",
")",
":",
"pass",
"time",
".",
"sleep",
"(",
"update_interval",
")"
] | Update `records` every `update_interval` seconds | [
"Update",
"records",
"every",
"update_interval",
"seconds"
] | python | train |
geophysics-ubonn/crtomo_tools | lib/crtomo/eitManager.py | https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/eitManager.py#L220-L271 | def load_data_crt_files(self, data_dict):
"""Load sEIT data from .ctr files (volt.dat files readable by CRTomo,
produced by CRMod)
Parameters
----------
data_dict : dict
Data files that are imported. See example down below
Examples
--------
>>> import glob
data_files = {}
data_files['frequencies'] = 'data/frequencies.dat'
files = sorted(glob.glob('data/volt_*.crt'))
data_files['crt'] = files
"""
if isinstance(data_dict, str):
raise Exception('Parameter must be a dict!')
frequency_data = data_dict['frequencies']
if isinstance(frequency_data, str):
frequencies = np.loadtxt(data_dict['frequencies'])
else:
# if this is not a string, assume it to be the data
frequencies = frequency_data
if frequencies.size != len(data_dict['crt']):
raise Exception(
'number of frequencies does not match the number of data files'
)
self._init_frequencies(frequencies)
for frequency, filename in zip(frequencies, data_dict['crt']):
subdata = np.atleast_2d(np.loadtxt(filename, skiprows=1))
if subdata.size == 0:
continue
# extract configurations
A = (subdata[:, 0] / 1e4).astype(int)
B = (subdata[:, 0] % 1e4).astype(int)
M = (subdata[:, 1] / 1e4).astype(int)
N = (subdata[:, 1] % 1e4).astype(int)
ABMN = np.vstack((A, B, M, N)).T
magnitudes = subdata[:, 2]
phases = subdata[:, 3]
self.tds[frequency].configs.add_to_configs(ABMN)
self.tds[frequency].register_measurements(magnitudes, phases) | [
"def",
"load_data_crt_files",
"(",
"self",
",",
"data_dict",
")",
":",
"if",
"isinstance",
"(",
"data_dict",
",",
"str",
")",
":",
"raise",
"Exception",
"(",
"'Parameter must be a dict!'",
")",
"frequency_data",
"=",
"data_dict",
"[",
"'frequencies'",
"]",
"if",
"isinstance",
"(",
"frequency_data",
",",
"str",
")",
":",
"frequencies",
"=",
"np",
".",
"loadtxt",
"(",
"data_dict",
"[",
"'frequencies'",
"]",
")",
"else",
":",
"# if this is not a string, assume it to be the data",
"frequencies",
"=",
"frequency_data",
"if",
"frequencies",
".",
"size",
"!=",
"len",
"(",
"data_dict",
"[",
"'crt'",
"]",
")",
":",
"raise",
"Exception",
"(",
"'number of frequencies does not match the number of data files'",
")",
"self",
".",
"_init_frequencies",
"(",
"frequencies",
")",
"for",
"frequency",
",",
"filename",
"in",
"zip",
"(",
"frequencies",
",",
"data_dict",
"[",
"'crt'",
"]",
")",
":",
"subdata",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"loadtxt",
"(",
"filename",
",",
"skiprows",
"=",
"1",
")",
")",
"if",
"subdata",
".",
"size",
"==",
"0",
":",
"continue",
"# extract configurations",
"A",
"=",
"(",
"subdata",
"[",
":",
",",
"0",
"]",
"/",
"1e4",
")",
".",
"astype",
"(",
"int",
")",
"B",
"=",
"(",
"subdata",
"[",
":",
",",
"0",
"]",
"%",
"1e4",
")",
".",
"astype",
"(",
"int",
")",
"M",
"=",
"(",
"subdata",
"[",
":",
",",
"1",
"]",
"/",
"1e4",
")",
".",
"astype",
"(",
"int",
")",
"N",
"=",
"(",
"subdata",
"[",
":",
",",
"1",
"]",
"%",
"1e4",
")",
".",
"astype",
"(",
"int",
")",
"ABMN",
"=",
"np",
".",
"vstack",
"(",
"(",
"A",
",",
"B",
",",
"M",
",",
"N",
")",
")",
".",
"T",
"magnitudes",
"=",
"subdata",
"[",
":",
",",
"2",
"]",
"phases",
"=",
"subdata",
"[",
":",
",",
"3",
"]",
"self",
".",
"tds",
"[",
"frequency",
"]",
".",
"configs",
".",
"add_to_configs",
"(",
"ABMN",
")",
"self",
".",
"tds",
"[",
"frequency",
"]",
".",
"register_measurements",
"(",
"magnitudes",
",",
"phases",
")"
] | Load sEIT data from .ctr files (volt.dat files readable by CRTomo,
produced by CRMod)
Parameters
----------
data_dict : dict
Data files that are imported. See example down below
Examples
--------
>>> import glob
data_files = {}
data_files['frequencies'] = 'data/frequencies.dat'
files = sorted(glob.glob('data/volt_*.crt'))
data_files['crt'] = files | [
"Load",
"sEIT",
"data",
"from",
".",
"ctr",
"files",
"(",
"volt",
".",
"dat",
"files",
"readable",
"by",
"CRTomo",
"produced",
"by",
"CRMod",
")"
] | python | train |
pycontribs/pyrax | pyrax/clouddatabases.py | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddatabases.py#L649-L653 | def revoke_user_access(self, db_names, strict=True):
"""
Revokes access to the databases listed in `db_names` for the user.
"""
return self.manager.revoke_user_access(self, db_names, strict=strict) | [
"def",
"revoke_user_access",
"(",
"self",
",",
"db_names",
",",
"strict",
"=",
"True",
")",
":",
"return",
"self",
".",
"manager",
".",
"revoke_user_access",
"(",
"self",
",",
"db_names",
",",
"strict",
"=",
"strict",
")"
] | Revokes access to the databases listed in `db_names` for the user. | [
"Revokes",
"access",
"to",
"the",
"databases",
"listed",
"in",
"db_names",
"for",
"the",
"user",
"."
] | python | train |
tensorpack/tensorpack | tensorpack/tfutils/varmanip.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/varmanip.py#L18-L35 | def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
"""
Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable
"""
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name | [
"def",
"get_savename_from_varname",
"(",
"varname",
",",
"varname_prefix",
"=",
"None",
",",
"savename_prefix",
"=",
"None",
")",
":",
"name",
"=",
"varname",
"if",
"varname_prefix",
"is",
"not",
"None",
"and",
"name",
".",
"startswith",
"(",
"varname_prefix",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"varname_prefix",
")",
"+",
"1",
":",
"]",
"if",
"savename_prefix",
"is",
"not",
"None",
":",
"name",
"=",
"savename_prefix",
"+",
"'/'",
"+",
"name",
"return",
"name"
] | Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable | [
"Args",
":",
"varname",
"(",
"str",
")",
":",
"a",
"variable",
"name",
"in",
"the",
"graph",
"varname_prefix",
"(",
"str",
")",
":",
"an",
"optional",
"prefix",
"that",
"may",
"need",
"to",
"be",
"removed",
"in",
"varname",
"savename_prefix",
"(",
"str",
")",
":",
"an",
"optional",
"prefix",
"to",
"append",
"to",
"all",
"savename",
"Returns",
":",
"str",
":",
"the",
"name",
"used",
"to",
"save",
"the",
"variable"
] | python | train |
wbond/asn1crypto | asn1crypto/core.py | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L4671-L4690 | def set(self, value):
"""
Sets the value of the object
:param value:
A unicode string or a datetime.datetime object
:raises:
ValueError - when an invalid value is passed
"""
if isinstance(value, datetime):
value = value.strftime('%y%m%d%H%M%SZ')
if _PY2:
value = value.decode('ascii')
AbstractString.set(self, value)
# Set it to None and let the class take care of converting the next
# time that .native is called
self._native = None | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
")",
":",
"value",
"=",
"value",
".",
"strftime",
"(",
"'%y%m%d%H%M%SZ'",
")",
"if",
"_PY2",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'ascii'",
")",
"AbstractString",
".",
"set",
"(",
"self",
",",
"value",
")",
"# Set it to None and let the class take care of converting the next",
"# time that .native is called",
"self",
".",
"_native",
"=",
"None"
] | Sets the value of the object
:param value:
A unicode string or a datetime.datetime object
:raises:
ValueError - when an invalid value is passed | [
"Sets",
"the",
"value",
"of",
"the",
"object"
] | python | train |
wonambi-python/wonambi | wonambi/widgets/notes.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1078-L1089 | def set_quality_index(self):
"""Set the current signal quality in combobox."""
window_start = self.parent.value('window_start')
window_length = self.parent.value('window_length')
qual = self.annot.get_stage_for_epoch(window_start, window_length,
attr='quality')
#lg.info('winstart: ' + str(window_start) + ', quality: ' + str(qual))
if qual is None:
self.idx_quality.setCurrentIndex(-1)
else:
self.idx_quality.setCurrentIndex(QUALIFIERS.index(qual)) | [
"def",
"set_quality_index",
"(",
"self",
")",
":",
"window_start",
"=",
"self",
".",
"parent",
".",
"value",
"(",
"'window_start'",
")",
"window_length",
"=",
"self",
".",
"parent",
".",
"value",
"(",
"'window_length'",
")",
"qual",
"=",
"self",
".",
"annot",
".",
"get_stage_for_epoch",
"(",
"window_start",
",",
"window_length",
",",
"attr",
"=",
"'quality'",
")",
"#lg.info('winstart: ' + str(window_start) + ', quality: ' + str(qual))",
"if",
"qual",
"is",
"None",
":",
"self",
".",
"idx_quality",
".",
"setCurrentIndex",
"(",
"-",
"1",
")",
"else",
":",
"self",
".",
"idx_quality",
".",
"setCurrentIndex",
"(",
"QUALIFIERS",
".",
"index",
"(",
"qual",
")",
")"
] | Set the current signal quality in combobox. | [
"Set",
"the",
"current",
"signal",
"quality",
"in",
"combobox",
"."
] | python | train |
OSSOS/MOP | src/ossos/core/ossos/plant.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/plant.py#L114-L124 | def next(self):
"""
:return: a set of values that can be used for an planted object builder.
"""
# x y mag pix rate angle ''/h rate id
# 912.48 991.06 22.01 57.32 -45.23 10.60 0
self._n -= 1
if self._n < 0:
raise StopIteration()
return {'x': self.x(), 'y': self.y(), 'mag': self.mag(), 'sky_rate': self.rate(), 'angle': self.angle(),
'id': self.id} | [
"def",
"next",
"(",
"self",
")",
":",
"# x y mag pix rate angle ''/h rate id",
"# 912.48 991.06 22.01 57.32 -45.23 10.60 0",
"self",
".",
"_n",
"-=",
"1",
"if",
"self",
".",
"_n",
"<",
"0",
":",
"raise",
"StopIteration",
"(",
")",
"return",
"{",
"'x'",
":",
"self",
".",
"x",
"(",
")",
",",
"'y'",
":",
"self",
".",
"y",
"(",
")",
",",
"'mag'",
":",
"self",
".",
"mag",
"(",
")",
",",
"'sky_rate'",
":",
"self",
".",
"rate",
"(",
")",
",",
"'angle'",
":",
"self",
".",
"angle",
"(",
")",
",",
"'id'",
":",
"self",
".",
"id",
"}"
] | :return: a set of values that can be used for an planted object builder. | [
":",
"return",
":",
"a",
"set",
"of",
"values",
"that",
"can",
"be",
"used",
"for",
"an",
"planted",
"object",
"builder",
"."
] | python | train |
chaoss/grimoirelab-sirmordred | sirmordred/eclipse_projects_lib.py | https://github.com/chaoss/grimoirelab-sirmordred/blob/d6ac94d28d707fae23170064d078f1edf937d13e/sirmordred/eclipse_projects_lib.py#L134-L149 | def compose_github(projects, data):
""" Compose projects.json for github
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with github
"""
for p in [project for project in data if len(data[project]['github_repos']) > 0]:
if 'github' not in projects[p]:
projects[p]['github'] = []
urls = [url['url'] for url in data[p]['github_repos'] if
url['url'] not in projects[p]['github']]
projects[p]['github'] += urls
return projects | [
"def",
"compose_github",
"(",
"projects",
",",
"data",
")",
":",
"for",
"p",
"in",
"[",
"project",
"for",
"project",
"in",
"data",
"if",
"len",
"(",
"data",
"[",
"project",
"]",
"[",
"'github_repos'",
"]",
")",
">",
"0",
"]",
":",
"if",
"'github'",
"not",
"in",
"projects",
"[",
"p",
"]",
":",
"projects",
"[",
"p",
"]",
"[",
"'github'",
"]",
"=",
"[",
"]",
"urls",
"=",
"[",
"url",
"[",
"'url'",
"]",
"for",
"url",
"in",
"data",
"[",
"p",
"]",
"[",
"'github_repos'",
"]",
"if",
"url",
"[",
"'url'",
"]",
"not",
"in",
"projects",
"[",
"p",
"]",
"[",
"'github'",
"]",
"]",
"projects",
"[",
"p",
"]",
"[",
"'github'",
"]",
"+=",
"urls",
"return",
"projects"
] | Compose projects.json for github
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with github | [
"Compose",
"projects",
".",
"json",
"for",
"github"
] | python | valid |
SoCo/SoCo | soco/data_structures.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/data_structures.py#L591-L617 | def to_dict(self, remove_nones=False):
"""Return the dict representation of the instance.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representation of the `DidlObject`.
"""
content = {}
# Get the value of each attribute listed in _translation, and add it
# to the content dict
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
# also add parent_id, item_id, restricted, title and resources because
# they are not listed in _translation
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = [resource.to_dict(remove_nones=remove_nones)
for resource in self.resources]
content['desc'] = self.desc
return content | [
"def",
"to_dict",
"(",
"self",
",",
"remove_nones",
"=",
"False",
")",
":",
"content",
"=",
"{",
"}",
"# Get the value of each attribute listed in _translation, and add it",
"# to the content dict",
"for",
"key",
"in",
"self",
".",
"_translation",
":",
"if",
"hasattr",
"(",
"self",
",",
"key",
")",
":",
"content",
"[",
"key",
"]",
"=",
"getattr",
"(",
"self",
",",
"key",
")",
"# also add parent_id, item_id, restricted, title and resources because",
"# they are not listed in _translation",
"content",
"[",
"'parent_id'",
"]",
"=",
"self",
".",
"parent_id",
"content",
"[",
"'item_id'",
"]",
"=",
"self",
".",
"item_id",
"content",
"[",
"'restricted'",
"]",
"=",
"self",
".",
"restricted",
"content",
"[",
"'title'",
"]",
"=",
"self",
".",
"title",
"if",
"self",
".",
"resources",
"!=",
"[",
"]",
":",
"content",
"[",
"'resources'",
"]",
"=",
"[",
"resource",
".",
"to_dict",
"(",
"remove_nones",
"=",
"remove_nones",
")",
"for",
"resource",
"in",
"self",
".",
"resources",
"]",
"content",
"[",
"'desc'",
"]",
"=",
"self",
".",
"desc",
"return",
"content"
] | Return the dict representation of the instance.
Args:
remove_nones (bool, optional): Optionally remove dictionary
elements when their value is `None`.
Returns:
dict: a dict representation of the `DidlObject`. | [
"Return",
"the",
"dict",
"representation",
"of",
"the",
"instance",
"."
] | python | train |
benmontet/f3 | f3/photometry.py | https://github.com/benmontet/f3/blob/b2e1dc250e4e3e884a54c501cd35cf02d5b8719e/f3/photometry.py#L301-L329 | def do_photometry(self):
"""
Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data
in each orientation. This function is called by other functions and generally the user will not need
to interact with it directly.
"""
std_f = np.zeros(4)
data_save = np.zeros_like(self.postcard)
self.obs_flux = np.zeros_like(self.reference_flux)
for i in range(4):
g = np.where(self.qs == i)[0]
wh = np.where(self.times[g] > 54947)
data_save[g] = np.roll(self.postcard[g], int(self.roll_best[i,0]), axis=1)
data_save[g] = np.roll(data_save[g], int(self.roll_best[i,1]), axis=2)
self.target_flux_pixels = data_save[:,self.targets == 1]
self.target_flux = np.sum(self.target_flux_pixels, axis=1)
self.obs_flux[g] = self.target_flux[g] / self.reference_flux[g]
self.obs_flux[g] /= np.median(self.obs_flux[g[wh]])
fitline = np.polyfit(self.times[g][wh], self.obs_flux[g][wh], 1)
std_f[i] = np.max([np.std(self.obs_flux[g][wh]/(fitline[0]*self.times[g][wh]+fitline[1])), 0.001])
self.flux_uncert = std_f | [
"def",
"do_photometry",
"(",
"self",
")",
":",
"std_f",
"=",
"np",
".",
"zeros",
"(",
"4",
")",
"data_save",
"=",
"np",
".",
"zeros_like",
"(",
"self",
".",
"postcard",
")",
"self",
".",
"obs_flux",
"=",
"np",
".",
"zeros_like",
"(",
"self",
".",
"reference_flux",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"g",
"=",
"np",
".",
"where",
"(",
"self",
".",
"qs",
"==",
"i",
")",
"[",
"0",
"]",
"wh",
"=",
"np",
".",
"where",
"(",
"self",
".",
"times",
"[",
"g",
"]",
">",
"54947",
")",
"data_save",
"[",
"g",
"]",
"=",
"np",
".",
"roll",
"(",
"self",
".",
"postcard",
"[",
"g",
"]",
",",
"int",
"(",
"self",
".",
"roll_best",
"[",
"i",
",",
"0",
"]",
")",
",",
"axis",
"=",
"1",
")",
"data_save",
"[",
"g",
"]",
"=",
"np",
".",
"roll",
"(",
"data_save",
"[",
"g",
"]",
",",
"int",
"(",
"self",
".",
"roll_best",
"[",
"i",
",",
"1",
"]",
")",
",",
"axis",
"=",
"2",
")",
"self",
".",
"target_flux_pixels",
"=",
"data_save",
"[",
":",
",",
"self",
".",
"targets",
"==",
"1",
"]",
"self",
".",
"target_flux",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"target_flux_pixels",
",",
"axis",
"=",
"1",
")",
"self",
".",
"obs_flux",
"[",
"g",
"]",
"=",
"self",
".",
"target_flux",
"[",
"g",
"]",
"/",
"self",
".",
"reference_flux",
"[",
"g",
"]",
"self",
".",
"obs_flux",
"[",
"g",
"]",
"/=",
"np",
".",
"median",
"(",
"self",
".",
"obs_flux",
"[",
"g",
"[",
"wh",
"]",
"]",
")",
"fitline",
"=",
"np",
".",
"polyfit",
"(",
"self",
".",
"times",
"[",
"g",
"]",
"[",
"wh",
"]",
",",
"self",
".",
"obs_flux",
"[",
"g",
"]",
"[",
"wh",
"]",
",",
"1",
")",
"std_f",
"[",
"i",
"]",
"=",
"np",
".",
"max",
"(",
"[",
"np",
".",
"std",
"(",
"self",
".",
"obs_flux",
"[",
"g",
"]",
"[",
"wh",
"]",
"/",
"(",
"fitline",
"[",
"0",
"]",
"*",
"self",
".",
"times",
"[",
"g",
"]",
"[",
"wh",
"]",
"+",
"fitline",
"[",
"1",
"]",
")",
")",
",",
"0.001",
"]",
")",
"self",
".",
"flux_uncert",
"=",
"std_f"
] | Does photometry and estimates uncertainties by calculating the scatter around a linear fit to the data
in each orientation. This function is called by other functions and generally the user will not need
to interact with it directly. | [
"Does",
"photometry",
"and",
"estimates",
"uncertainties",
"by",
"calculating",
"the",
"scatter",
"around",
"a",
"linear",
"fit",
"to",
"the",
"data",
"in",
"each",
"orientation",
".",
"This",
"function",
"is",
"called",
"by",
"other",
"functions",
"and",
"generally",
"the",
"user",
"will",
"not",
"need",
"to",
"interact",
"with",
"it",
"directly",
"."
] | python | valid |
sosy-lab/benchexec | benchexec/util.py | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/util.py#L84-L95 | def is_code(filename):
"""
This function returns True, if a line of the file contains bracket '{'.
"""
with open(filename, "r") as file:
for line in file:
# ignore comments and empty lines
if not is_comment(line) \
and '{' in line: # <-- simple indicator for code
if '${' not in line: # <-- ${abc} variable to substitute
return True
return False | [
"def",
"is_code",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"file",
":",
"for",
"line",
"in",
"file",
":",
"# ignore comments and empty lines",
"if",
"not",
"is_comment",
"(",
"line",
")",
"and",
"'{'",
"in",
"line",
":",
"# <-- simple indicator for code",
"if",
"'${'",
"not",
"in",
"line",
":",
"# <-- ${abc} variable to substitute",
"return",
"True",
"return",
"False"
] | This function returns True, if a line of the file contains bracket '{'. | [
"This",
"function",
"returns",
"True",
"if",
"a",
"line",
"of",
"the",
"file",
"contains",
"bracket",
"{",
"."
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_vendor/ipaddress.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/ipaddress.py#L480-L502 | def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented | [
"def",
"get_mixed_type_key",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"_BaseNetwork",
")",
":",
"return",
"obj",
".",
"_get_networks_key",
"(",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"_BaseAddress",
")",
":",
"return",
"obj",
".",
"_get_address_key",
"(",
")",
"return",
"NotImplemented"
] | Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key. | [
"Return",
"a",
"key",
"suitable",
"for",
"sorting",
"between",
"networks",
"and",
"addresses",
"."
] | python | train |
readbeyond/aeneas | aeneas/tools/run_sd.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/tools/run_sd.py#L171-L203 | def print_result(self, audio_len, start, end):
"""
Print result of SD.
:param audio_len: the length of the entire audio file, in seconds
:type audio_len: float
:param start: the start position of the spoken text
:type start: float
:param end: the end position of the spoken text
:type end: float
"""
msg = []
zero = 0
head_len = start
text_len = end - start
tail_len = audio_len - end
msg.append(u"")
msg.append(u"Head: %.3f %.3f (%.3f)" % (zero, start, head_len))
msg.append(u"Text: %.3f %.3f (%.3f)" % (start, end, text_len))
msg.append(u"Tail: %.3f %.3f (%.3f)" % (end, audio_len, tail_len))
msg.append(u"")
zero_h = gf.time_to_hhmmssmmm(0)
start_h = gf.time_to_hhmmssmmm(start)
end_h = gf.time_to_hhmmssmmm(end)
audio_len_h = gf.time_to_hhmmssmmm(audio_len)
head_len_h = gf.time_to_hhmmssmmm(head_len)
text_len_h = gf.time_to_hhmmssmmm(text_len)
tail_len_h = gf.time_to_hhmmssmmm(tail_len)
msg.append("Head: %s %s (%s)" % (zero_h, start_h, head_len_h))
msg.append("Text: %s %s (%s)" % (start_h, end_h, text_len_h))
msg.append("Tail: %s %s (%s)" % (end_h, audio_len_h, tail_len_h))
msg.append(u"")
self.print_info(u"\n".join(msg)) | [
"def",
"print_result",
"(",
"self",
",",
"audio_len",
",",
"start",
",",
"end",
")",
":",
"msg",
"=",
"[",
"]",
"zero",
"=",
"0",
"head_len",
"=",
"start",
"text_len",
"=",
"end",
"-",
"start",
"tail_len",
"=",
"audio_len",
"-",
"end",
"msg",
".",
"append",
"(",
"u\"\"",
")",
"msg",
".",
"append",
"(",
"u\"Head: %.3f %.3f (%.3f)\"",
"%",
"(",
"zero",
",",
"start",
",",
"head_len",
")",
")",
"msg",
".",
"append",
"(",
"u\"Text: %.3f %.3f (%.3f)\"",
"%",
"(",
"start",
",",
"end",
",",
"text_len",
")",
")",
"msg",
".",
"append",
"(",
"u\"Tail: %.3f %.3f (%.3f)\"",
"%",
"(",
"end",
",",
"audio_len",
",",
"tail_len",
")",
")",
"msg",
".",
"append",
"(",
"u\"\"",
")",
"zero_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"0",
")",
"start_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"start",
")",
"end_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"end",
")",
"audio_len_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"audio_len",
")",
"head_len_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"head_len",
")",
"text_len_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"text_len",
")",
"tail_len_h",
"=",
"gf",
".",
"time_to_hhmmssmmm",
"(",
"tail_len",
")",
"msg",
".",
"append",
"(",
"\"Head: %s %s (%s)\"",
"%",
"(",
"zero_h",
",",
"start_h",
",",
"head_len_h",
")",
")",
"msg",
".",
"append",
"(",
"\"Text: %s %s (%s)\"",
"%",
"(",
"start_h",
",",
"end_h",
",",
"text_len_h",
")",
")",
"msg",
".",
"append",
"(",
"\"Tail: %s %s (%s)\"",
"%",
"(",
"end_h",
",",
"audio_len_h",
",",
"tail_len_h",
")",
")",
"msg",
".",
"append",
"(",
"u\"\"",
")",
"self",
".",
"print_info",
"(",
"u\"\\n\"",
".",
"join",
"(",
"msg",
")",
")"
] | Print result of SD.
:param audio_len: the length of the entire audio file, in seconds
:type audio_len: float
:param start: the start position of the spoken text
:type start: float
:param end: the end position of the spoken text
:type end: float | [
"Print",
"result",
"of",
"SD",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/controllers/state_machines_editor.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_machines_editor.py#L134-L141 | def register_view(self, view):
"""Called when the View was registered"""
super(StateMachinesEditorController, self).register_view(view)
self.view['notebook'].connect('switch-page', self.on_switch_page)
# Add all already open state machines
for state_machine in self.model.state_machines.values():
self.add_graphical_state_machine_editor(state_machine) | [
"def",
"register_view",
"(",
"self",
",",
"view",
")",
":",
"super",
"(",
"StateMachinesEditorController",
",",
"self",
")",
".",
"register_view",
"(",
"view",
")",
"self",
".",
"view",
"[",
"'notebook'",
"]",
".",
"connect",
"(",
"'switch-page'",
",",
"self",
".",
"on_switch_page",
")",
"# Add all already open state machines",
"for",
"state_machine",
"in",
"self",
".",
"model",
".",
"state_machines",
".",
"values",
"(",
")",
":",
"self",
".",
"add_graphical_state_machine_editor",
"(",
"state_machine",
")"
] | Called when the View was registered | [
"Called",
"when",
"the",
"View",
"was",
"registered"
] | python | train |
TomAugspurger/engarde | docs/sphinxext/ipython_directive.py | https://github.com/TomAugspurger/engarde/blob/e7ea040cf0d20aee7ca4375b8c27caa2d9e43945/docs/sphinxext/ipython_directive.py#L386-L499 | def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive) | [
"def",
"process_input",
"(",
"self",
",",
"data",
",",
"input_prompt",
",",
"lineno",
")",
":",
"decorator",
",",
"input",
",",
"rest",
"=",
"data",
"image_file",
"=",
"None",
"image_directive",
"=",
"None",
"is_verbatim",
"=",
"decorator",
"==",
"'@verbatim'",
"or",
"self",
".",
"is_verbatim",
"is_doctest",
"=",
"(",
"decorator",
"is",
"not",
"None",
"and",
"decorator",
".",
"startswith",
"(",
"'@doctest'",
")",
")",
"or",
"self",
".",
"is_doctest",
"is_suppress",
"=",
"decorator",
"==",
"'@suppress'",
"or",
"self",
".",
"is_suppress",
"is_okexcept",
"=",
"decorator",
"==",
"'@okexcept'",
"or",
"self",
".",
"is_okexcept",
"is_okwarning",
"=",
"decorator",
"==",
"'@okwarning'",
"or",
"self",
".",
"is_okwarning",
"is_savefig",
"=",
"decorator",
"is",
"not",
"None",
"and",
"decorator",
".",
"startswith",
"(",
"'@savefig'",
")",
"# set the encodings to be used by DecodingStringIO",
"# to convert the execution output into unicode if",
"# needed. this attrib is set by IpythonDirective.run()",
"# based on the specified block options, defaulting to ['ut",
"self",
".",
"cout",
".",
"set_encodings",
"(",
"self",
".",
"output_encoding",
")",
"input_lines",
"=",
"input",
".",
"split",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"input_lines",
")",
">",
"1",
":",
"if",
"input_lines",
"[",
"-",
"1",
"]",
"!=",
"\"\"",
":",
"input_lines",
".",
"append",
"(",
"''",
")",
"# make sure there's a blank line",
"# so splitter buffer gets reset",
"continuation",
"=",
"' %s:'",
"%",
"''",
".",
"join",
"(",
"[",
"'.'",
"]",
"*",
"(",
"len",
"(",
"str",
"(",
"lineno",
")",
")",
"+",
"2",
")",
")",
"if",
"is_savefig",
":",
"image_file",
",",
"image_directive",
"=",
"self",
".",
"process_image",
"(",
"decorator",
")",
"ret",
"=",
"[",
"]",
"is_semicolon",
"=",
"False",
"# Hold the execution count, if requested to do so.",
"if",
"is_suppress",
"and",
"self",
".",
"hold_count",
":",
"store_history",
"=",
"False",
"else",
":",
"store_history",
"=",
"True",
"# Note: catch_warnings is not thread safe",
"with",
"warnings",
".",
"catch_warnings",
"(",
"record",
"=",
"True",
")",
"as",
"ws",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"input_lines",
")",
":",
"if",
"line",
".",
"endswith",
"(",
"';'",
")",
":",
"is_semicolon",
"=",
"True",
"if",
"i",
"==",
"0",
":",
"# process the first input line",
"if",
"is_verbatim",
":",
"self",
".",
"process_input_line",
"(",
"''",
")",
"self",
".",
"IP",
".",
"execution_count",
"+=",
"1",
"# increment it anyway",
"else",
":",
"# only submit the line in non-verbatim mode",
"self",
".",
"process_input_line",
"(",
"line",
",",
"store_history",
"=",
"store_history",
")",
"formatted_line",
"=",
"'%s %s'",
"%",
"(",
"input_prompt",
",",
"line",
")",
"else",
":",
"# process a continuation line",
"if",
"not",
"is_verbatim",
":",
"self",
".",
"process_input_line",
"(",
"line",
",",
"store_history",
"=",
"store_history",
")",
"formatted_line",
"=",
"'%s %s'",
"%",
"(",
"continuation",
",",
"line",
")",
"if",
"not",
"is_suppress",
":",
"ret",
".",
"append",
"(",
"formatted_line",
")",
"if",
"not",
"is_suppress",
"and",
"len",
"(",
"rest",
".",
"strip",
"(",
")",
")",
"and",
"is_verbatim",
":",
"# the \"rest\" is the standard output of the",
"# input, which needs to be added in",
"# verbatim mode",
"ret",
".",
"append",
"(",
"rest",
")",
"self",
".",
"cout",
".",
"seek",
"(",
"0",
")",
"output",
"=",
"self",
".",
"cout",
".",
"read",
"(",
")",
"if",
"not",
"is_suppress",
"and",
"not",
"is_semicolon",
":",
"ret",
".",
"append",
"(",
"output",
")",
"elif",
"is_semicolon",
":",
"# get spacing right",
"ret",
".",
"append",
"(",
"''",
")",
"# context information",
"filename",
"=",
"self",
".",
"state",
".",
"document",
".",
"current_source",
"lineno",
"=",
"self",
".",
"state",
".",
"document",
".",
"current_line",
"# output any exceptions raised during execution to stdout",
"# unless :okexcept: has been specified.",
"if",
"not",
"is_okexcept",
"and",
"\"Traceback\"",
"in",
"output",
":",
"s",
"=",
"\"\\nException in %s at block ending on line %s\\n\"",
"%",
"(",
"filename",
",",
"lineno",
")",
"s",
"+=",
"\"Specify :okexcept: as an option in the ipython:: block to suppress this message\\n\"",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n\\n>>>'",
"+",
"(",
"'-'",
"*",
"73",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"s",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"output",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'<<<'",
"+",
"(",
"'-'",
"*",
"73",
")",
"+",
"'\\n\\n'",
")",
"# output any warning raised during execution to stdout",
"# unless :okwarning: has been specified.",
"if",
"not",
"is_okwarning",
":",
"for",
"w",
"in",
"ws",
":",
"s",
"=",
"\"\\nWarning in %s at block ending on line %s\\n\"",
"%",
"(",
"filename",
",",
"lineno",
")",
"s",
"+=",
"\"Specify :okwarning: as an option in the ipython:: block to suppress this message\\n\"",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n\\n>>>'",
"+",
"(",
"'-'",
"*",
"73",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"s",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'-'",
"*",
"76",
"+",
"'\\n'",
")",
"s",
"=",
"warnings",
".",
"formatwarning",
"(",
"w",
".",
"message",
",",
"w",
".",
"category",
",",
"w",
".",
"filename",
",",
"w",
".",
"lineno",
",",
"w",
".",
"line",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"s",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'<<<'",
"+",
"(",
"'-'",
"*",
"73",
")",
"+",
"'\\n'",
")",
"self",
".",
"cout",
".",
"truncate",
"(",
"0",
")",
"return",
"(",
"ret",
",",
"input_lines",
",",
"output",
",",
"is_doctest",
",",
"decorator",
",",
"image_file",
",",
"image_directive",
")"
] | Process data block for INPUT token. | [
"Process",
"data",
"block",
"for",
"INPUT",
"token",
"."
] | python | train |
jtwhite79/pyemu | pyemu/en.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/en.py#L478-L494 | def from_binary(cls,pst,filename):
"""instantiate an observation obsemble from a jco-type file
Parameters
----------
pst : pyemu.Pst
a Pst instance
filename : str
the binary file name
Returns
-------
oe : ObservationEnsemble
"""
m = Matrix.from_binary(filename)
return ObservationEnsemble(data=m.x,pst=pst, index=m.row_names) | [
"def",
"from_binary",
"(",
"cls",
",",
"pst",
",",
"filename",
")",
":",
"m",
"=",
"Matrix",
".",
"from_binary",
"(",
"filename",
")",
"return",
"ObservationEnsemble",
"(",
"data",
"=",
"m",
".",
"x",
",",
"pst",
"=",
"pst",
",",
"index",
"=",
"m",
".",
"row_names",
")"
] | instantiate an observation obsemble from a jco-type file
Parameters
----------
pst : pyemu.Pst
a Pst instance
filename : str
the binary file name
Returns
-------
oe : ObservationEnsemble | [
"instantiate",
"an",
"observation",
"obsemble",
"from",
"a",
"jco",
"-",
"type",
"file"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.