repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
hardbyte/python-can | can/interfaces/systec/ucan.py | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/systec/ucan.py#L548-L559 | def get_msg_pending(self, channel, flags):
"""
Returns the number of pending CAN messages.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int flags: Flags specifies which buffers should be checked (see enum :class:`PendingFlags`).
:return: The number of pending messages.
:rtype: int
"""
count = DWORD(0)
UcanGetMsgPending(self._handle, channel, flags, byref(count))
return count.value | [
"def",
"get_msg_pending",
"(",
"self",
",",
"channel",
",",
"flags",
")",
":",
"count",
"=",
"DWORD",
"(",
"0",
")",
"UcanGetMsgPending",
"(",
"self",
".",
"_handle",
",",
"channel",
",",
"flags",
",",
"byref",
"(",
"count",
")",
")",
"return",
"count",
".",
"value"
] | Returns the number of pending CAN messages.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int flags: Flags specifies which buffers should be checked (see enum :class:`PendingFlags`).
:return: The number of pending messages.
:rtype: int | [
"Returns",
"the",
"number",
"of",
"pending",
"CAN",
"messages",
"."
] | python | train |
minhhoit/yacms | yacms/accounts/views.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/accounts/views.py#L103-L111 | def profile(request, username, template="accounts/account_profile.html",
extra_context=None):
"""
Display a profile.
"""
lookup = {"username__iexact": username, "is_active": True}
context = {"profile_user": get_object_or_404(User, **lookup)}
context.update(extra_context or {})
return TemplateResponse(request, template, context) | [
"def",
"profile",
"(",
"request",
",",
"username",
",",
"template",
"=",
"\"accounts/account_profile.html\"",
",",
"extra_context",
"=",
"None",
")",
":",
"lookup",
"=",
"{",
"\"username__iexact\"",
":",
"username",
",",
"\"is_active\"",
":",
"True",
"}",
"context",
"=",
"{",
"\"profile_user\"",
":",
"get_object_or_404",
"(",
"User",
",",
"*",
"*",
"lookup",
")",
"}",
"context",
".",
"update",
"(",
"extra_context",
"or",
"{",
"}",
")",
"return",
"TemplateResponse",
"(",
"request",
",",
"template",
",",
"context",
")"
] | Display a profile. | [
"Display",
"a",
"profile",
"."
] | python | train |
tk0miya/tk.phpautodoc | src/phply/phpparse.py | https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L477-L483 | def p_class_declaration_statement(p):
'''class_declaration_statement : class_entry_type STRING extends_from implements_list LBRACE class_statement_list RBRACE
| INTERFACE STRING interface_extends_list LBRACE class_statement_list RBRACE'''
if len(p) == 8:
p[0] = ast.Class(p[2], p[1], p[3], p[4], p[6], lineno=p.lineno(2))
else:
p[0] = ast.Interface(p[2], p[3], p[5], lineno=p.lineno(1)) | [
"def",
"p_class_declaration_statement",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"8",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Class",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"p",
"[",
"4",
"]",
",",
"p",
"[",
"6",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"2",
")",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Interface",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"3",
"]",
",",
"p",
"[",
"5",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | class_declaration_statement : class_entry_type STRING extends_from implements_list LBRACE class_statement_list RBRACE
| INTERFACE STRING interface_extends_list LBRACE class_statement_list RBRACE | [
"class_declaration_statement",
":",
"class_entry_type",
"STRING",
"extends_from",
"implements_list",
"LBRACE",
"class_statement_list",
"RBRACE",
"|",
"INTERFACE",
"STRING",
"interface_extends_list",
"LBRACE",
"class_statement_list",
"RBRACE"
] | python | train |
takuti/flurs | flurs/datasets/movielens.py | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/datasets/movielens.py#L151-L167 | def delta(d1, d2, opt='d'):
"""Compute difference between given 2 dates in month/day.
"""
delta = 0
if opt == 'm':
while True:
mdays = monthrange(d1.year, d1.month)[1]
d1 += timedelta(days=mdays)
if d1 <= d2:
delta += 1
else:
break
else:
delta = (d2 - d1).days
return delta | [
"def",
"delta",
"(",
"d1",
",",
"d2",
",",
"opt",
"=",
"'d'",
")",
":",
"delta",
"=",
"0",
"if",
"opt",
"==",
"'m'",
":",
"while",
"True",
":",
"mdays",
"=",
"monthrange",
"(",
"d1",
".",
"year",
",",
"d1",
".",
"month",
")",
"[",
"1",
"]",
"d1",
"+=",
"timedelta",
"(",
"days",
"=",
"mdays",
")",
"if",
"d1",
"<=",
"d2",
":",
"delta",
"+=",
"1",
"else",
":",
"break",
"else",
":",
"delta",
"=",
"(",
"d2",
"-",
"d1",
")",
".",
"days",
"return",
"delta"
] | Compute difference between given 2 dates in month/day. | [
"Compute",
"difference",
"between",
"given",
"2",
"dates",
"in",
"month",
"/",
"day",
"."
] | python | train |
cackharot/suds-py3 | suds/bindings/rpc.py | https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/bindings/rpc.py#L89-L98 | def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxEncoded(self.schema())
else:
return RPC.unmarshaller(self, typed) | [
"def",
"unmarshaller",
"(",
"self",
",",
"typed",
"=",
"True",
")",
":",
"if",
"typed",
":",
"return",
"UmxEncoded",
"(",
"self",
".",
"schema",
"(",
")",
")",
"else",
":",
"return",
"RPC",
".",
"unmarshaller",
"(",
"self",
",",
"typed",
")"
] | Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped} | [
"Get",
"the",
"appropriate",
"XML",
"decoder",
"."
] | python | train |
saltstack/salt | salt/modules/kubernetesmod.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1551-L1560 | def __dict_to_pod_spec(spec):
'''
Converts a dictionary into kubernetes V1PodSpec instance.
'''
spec_obj = kubernetes.client.V1PodSpec()
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)
return spec_obj | [
"def",
"__dict_to_pod_spec",
"(",
"spec",
")",
":",
"spec_obj",
"=",
"kubernetes",
".",
"client",
".",
"V1PodSpec",
"(",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"spec",
")",
":",
"if",
"hasattr",
"(",
"spec_obj",
",",
"key",
")",
":",
"setattr",
"(",
"spec_obj",
",",
"key",
",",
"value",
")",
"return",
"spec_obj"
] | Converts a dictionary into kubernetes V1PodSpec instance. | [
"Converts",
"a",
"dictionary",
"into",
"kubernetes",
"V1PodSpec",
"instance",
"."
] | python | train |
woolfson-group/isambard | isambard/ampal/base_ampal.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/base_ampal.py#L22-L39 | def find_atoms_within_distance(atoms, cutoff_distance, point):
"""Returns atoms within the distance from the point.
Parameters
----------
atoms : [ampal.atom]
A list of `ampal.atoms`.
cutoff_distance : float
Maximum distance from point.
point : (float, float, float)
Reference point, 3D coordinate.
Returns
-------
filtered_atoms : [ampal.atoms]
`atoms` list filtered by distance.
"""
return [x for x in atoms if distance(x, point) <= cutoff_distance] | [
"def",
"find_atoms_within_distance",
"(",
"atoms",
",",
"cutoff_distance",
",",
"point",
")",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"atoms",
"if",
"distance",
"(",
"x",
",",
"point",
")",
"<=",
"cutoff_distance",
"]"
] | Returns atoms within the distance from the point.
Parameters
----------
atoms : [ampal.atom]
A list of `ampal.atoms`.
cutoff_distance : float
Maximum distance from point.
point : (float, float, float)
Reference point, 3D coordinate.
Returns
-------
filtered_atoms : [ampal.atoms]
`atoms` list filtered by distance. | [
"Returns",
"atoms",
"within",
"the",
"distance",
"from",
"the",
"point",
"."
] | python | train |
CityOfZion/neo-python | neo/Core/TX/Transaction.py | https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/Transaction.py#L124-L133 | def Serialize(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
writer.WriteUInt256(self.AssetId)
writer.WriteFixed8(self.Value)
writer.WriteUInt160(self.ScriptHash) | [
"def",
"Serialize",
"(",
"self",
",",
"writer",
")",
":",
"writer",
".",
"WriteUInt256",
"(",
"self",
".",
"AssetId",
")",
"writer",
".",
"WriteFixed8",
"(",
"self",
".",
"Value",
")",
"writer",
".",
"WriteUInt160",
"(",
"self",
".",
"ScriptHash",
")"
] | Serialize object.
Args:
writer (neo.IO.BinaryWriter): | [
"Serialize",
"object",
"."
] | python | train |
floydhub/floyd-cli | floyd/cli/auth.py | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/auth.py#L77-L98 | def login(token, apikey, username, password):
"""
Login to FloydHub.
"""
if manual_login_success(token, username, password):
return
if not apikey:
if has_browser():
apikey = wait_for_apikey()
else:
floyd_logger.error(
"No browser found, please login manually by creating login key at %s/settings/apikey.",
floyd.floyd_web_host)
sys.exit(1)
if apikey:
user = AuthClient().get_user(apikey, is_apikey=True)
AuthConfigManager.set_apikey(username=user.username, apikey=apikey)
floyd_logger.info("Login Successful as %s", user.username)
else:
floyd_logger.error("Login failed, please see --help for other login options.") | [
"def",
"login",
"(",
"token",
",",
"apikey",
",",
"username",
",",
"password",
")",
":",
"if",
"manual_login_success",
"(",
"token",
",",
"username",
",",
"password",
")",
":",
"return",
"if",
"not",
"apikey",
":",
"if",
"has_browser",
"(",
")",
":",
"apikey",
"=",
"wait_for_apikey",
"(",
")",
"else",
":",
"floyd_logger",
".",
"error",
"(",
"\"No browser found, please login manually by creating login key at %s/settings/apikey.\"",
",",
"floyd",
".",
"floyd_web_host",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"apikey",
":",
"user",
"=",
"AuthClient",
"(",
")",
".",
"get_user",
"(",
"apikey",
",",
"is_apikey",
"=",
"True",
")",
"AuthConfigManager",
".",
"set_apikey",
"(",
"username",
"=",
"user",
".",
"username",
",",
"apikey",
"=",
"apikey",
")",
"floyd_logger",
".",
"info",
"(",
"\"Login Successful as %s\"",
",",
"user",
".",
"username",
")",
"else",
":",
"floyd_logger",
".",
"error",
"(",
"\"Login failed, please see --help for other login options.\"",
")"
] | Login to FloydHub. | [
"Login",
"to",
"FloydHub",
"."
] | python | train |
jxtech/wechatpy | wechatpy/client/api/tag.py | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/tag.py#L169-L185 | def get_black_list(self, begin_openid=None):
"""
获取公众号的黑名单列表
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1471422259_pJMWA
:param begin_openid: 起始的 OpenID,传空则默认从头开始拉取
:return: 返回的 JSON 数据包
:rtype: dict
"""
data = {}
if begin_openid:
data['begin_openid'] = begin_openid
return self._post(
'tags/members/getblacklist',
data=data,
) | [
"def",
"get_black_list",
"(",
"self",
",",
"begin_openid",
"=",
"None",
")",
":",
"data",
"=",
"{",
"}",
"if",
"begin_openid",
":",
"data",
"[",
"'begin_openid'",
"]",
"=",
"begin_openid",
"return",
"self",
".",
"_post",
"(",
"'tags/members/getblacklist'",
",",
"data",
"=",
"data",
",",
")"
] | 获取公众号的黑名单列表
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1471422259_pJMWA
:param begin_openid: 起始的 OpenID,传空则默认从头开始拉取
:return: 返回的 JSON 数据包
:rtype: dict | [
"获取公众号的黑名单列表",
"详情请参考",
"https",
":",
"//",
"mp",
".",
"weixin",
".",
"qq",
".",
"com",
"/",
"wiki?id",
"=",
"mp1471422259_pJMWA"
] | python | train |
aboSamoor/polyglot | polyglot/downloader.py | https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/downloader.py#L1280-L1344 | def build_index(root, base_url):
"""
Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique.
"""
# Find all packages.
packages = []
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, 'packages')):
zipstat = os.stat(zf.filename)
url = '%s/%s/%s' % (base_url, subdir, os.path.split(zf.filename)[1])
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
# Fill in several fields of the package xml with calculated values.
pkg_xml.set('unzipped_size', '%s' % unzipped_size)
pkg_xml.set('size', '%s' % zipstat.st_size)
pkg_xml.set('subdir', subdir)
pkg_xml.set('url', url)
# Record the package.
packages.append(pkg_xml)
# Find all collections
collections = list(_find_collections(os.path.join(root, 'collections')))
# Check that all UIDs are unique
uids = set()
for item in packages+collections:
if item.get('id') in uids:
raise ValueError('Duplicate UID: %s' % item.get('id'))
uids.add(item.get('id'))
# Put it all together
top_elt = ElementTree.Element('polyglot_data')
top_elt.append(ElementTree.Element('packages'))
for package in packages: top_elt[0].append(package)
top_elt.append(ElementTree.Element('collections'))
for collection in collections: top_elt[1].append(collection)
_indent_xml(top_elt)
return top_elt | [
"def",
"build_index",
"(",
"root",
",",
"base_url",
")",
":",
"# Find all packages.",
"packages",
"=",
"[",
"]",
"for",
"pkg_xml",
",",
"zf",
",",
"subdir",
"in",
"_find_packages",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'packages'",
")",
")",
":",
"zipstat",
"=",
"os",
".",
"stat",
"(",
"zf",
".",
"filename",
")",
"url",
"=",
"'%s/%s/%s'",
"%",
"(",
"base_url",
",",
"subdir",
",",
"os",
".",
"path",
".",
"split",
"(",
"zf",
".",
"filename",
")",
"[",
"1",
"]",
")",
"unzipped_size",
"=",
"sum",
"(",
"zf_info",
".",
"file_size",
"for",
"zf_info",
"in",
"zf",
".",
"infolist",
"(",
")",
")",
"# Fill in several fields of the package xml with calculated values.",
"pkg_xml",
".",
"set",
"(",
"'unzipped_size'",
",",
"'%s'",
"%",
"unzipped_size",
")",
"pkg_xml",
".",
"set",
"(",
"'size'",
",",
"'%s'",
"%",
"zipstat",
".",
"st_size",
")",
"pkg_xml",
".",
"set",
"(",
"'subdir'",
",",
"subdir",
")",
"pkg_xml",
".",
"set",
"(",
"'url'",
",",
"url",
")",
"# Record the package.",
"packages",
".",
"append",
"(",
"pkg_xml",
")",
"# Find all collections",
"collections",
"=",
"list",
"(",
"_find_collections",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'collections'",
")",
")",
")",
"# Check that all UIDs are unique",
"uids",
"=",
"set",
"(",
")",
"for",
"item",
"in",
"packages",
"+",
"collections",
":",
"if",
"item",
".",
"get",
"(",
"'id'",
")",
"in",
"uids",
":",
"raise",
"ValueError",
"(",
"'Duplicate UID: %s'",
"%",
"item",
".",
"get",
"(",
"'id'",
")",
")",
"uids",
".",
"add",
"(",
"item",
".",
"get",
"(",
"'id'",
")",
")",
"# Put it all together",
"top_elt",
"=",
"ElementTree",
".",
"Element",
"(",
"'polyglot_data'",
")",
"top_elt",
".",
"append",
"(",
"ElementTree",
".",
"Element",
"(",
"'packages'",
")",
")",
"for",
"package",
"in",
"packages",
":",
"top_elt",
"[",
"0",
"]",
".",
"append",
"(",
"package",
")",
"top_elt",
".",
"append",
"(",
"ElementTree",
".",
"Element",
"(",
"'collections'",
")",
")",
"for",
"collection",
"in",
"collections",
":",
"top_elt",
"[",
"1",
"]",
".",
"append",
"(",
"collection",
")",
"_indent_xml",
"(",
"top_elt",
")",
"return",
"top_elt"
] | Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique. | [
"Create",
"a",
"new",
"data",
".",
"xml",
"index",
"file",
"by",
"combining",
"the",
"xml",
"description",
"files",
"for",
"various",
"packages",
"and",
"collections",
".",
"root",
"should",
"be",
"the",
"path",
"to",
"a",
"directory",
"containing",
"the",
"package",
"xml",
"and",
"zip",
"files",
";",
"and",
"the",
"collection",
"xml",
"files",
".",
"The",
"root",
"directory",
"is",
"expected",
"to",
"have",
"the",
"following",
"subdirectories",
"::"
] | python | train |
HazyResearch/fonduer | src/fonduer/learning/disc_models/sparse_logistic_regression.py | https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/sparse_logistic_regression.py#L134-L150 | def _update_settings(self, X):
"""
Update the model argument.
:param X: The input data of the model.
:type X: list of (candidate, features) pair
"""
self.logger.info("Loading default parameters for Sparse Logistic Regression")
config = get_config()["learning"]["SparseLogisticRegression"]
for key in config.keys():
if key not in self.settings:
self.settings[key] = config[key]
# Add one feature for padding vector (all 0s)
self.settings["input_dim"] = X[1].shape[1] + 1 | [
"def",
"_update_settings",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Loading default parameters for Sparse Logistic Regression\"",
")",
"config",
"=",
"get_config",
"(",
")",
"[",
"\"learning\"",
"]",
"[",
"\"SparseLogisticRegression\"",
"]",
"for",
"key",
"in",
"config",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"settings",
":",
"self",
".",
"settings",
"[",
"key",
"]",
"=",
"config",
"[",
"key",
"]",
"# Add one feature for padding vector (all 0s)",
"self",
".",
"settings",
"[",
"\"input_dim\"",
"]",
"=",
"X",
"[",
"1",
"]",
".",
"shape",
"[",
"1",
"]",
"+",
"1"
] | Update the model argument.
:param X: The input data of the model.
:type X: list of (candidate, features) pair | [
"Update",
"the",
"model",
"argument",
"."
] | python | train |
rapidpro/expressions | python/temba_expressions/evaluator.py | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L475-L481 | def visitConcatenation(self, ctx):
"""
expression: expression AMPERSAND expression
"""
arg1 = conversions.to_string(self.visit(ctx.expression(0)), self._eval_context)
arg2 = conversions.to_string(self.visit(ctx.expression(1)), self._eval_context)
return arg1 + arg2 | [
"def",
"visitConcatenation",
"(",
"self",
",",
"ctx",
")",
":",
"arg1",
"=",
"conversions",
".",
"to_string",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"0",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"arg2",
"=",
"conversions",
".",
"to_string",
"(",
"self",
".",
"visit",
"(",
"ctx",
".",
"expression",
"(",
"1",
")",
")",
",",
"self",
".",
"_eval_context",
")",
"return",
"arg1",
"+",
"arg2"
] | expression: expression AMPERSAND expression | [
"expression",
":",
"expression",
"AMPERSAND",
"expression"
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_map/srtm.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/srtm.py#L121-L131 | def createFileList(self):
"""SRTM data is split into different directories, get a list of all of
them and create a dictionary for easy lookup."""
global childFileListDownload
global filelistDownloadActive
mypid = os.getpid()
if mypid not in childFileListDownload or not childFileListDownload[mypid].is_alive():
childFileListDownload[mypid] = multiproc.Process(target=self.createFileListHTTP)
filelistDownloadActive = 1
childFileListDownload[mypid].start()
filelistDownloadActive = 0 | [
"def",
"createFileList",
"(",
"self",
")",
":",
"global",
"childFileListDownload",
"global",
"filelistDownloadActive",
"mypid",
"=",
"os",
".",
"getpid",
"(",
")",
"if",
"mypid",
"not",
"in",
"childFileListDownload",
"or",
"not",
"childFileListDownload",
"[",
"mypid",
"]",
".",
"is_alive",
"(",
")",
":",
"childFileListDownload",
"[",
"mypid",
"]",
"=",
"multiproc",
".",
"Process",
"(",
"target",
"=",
"self",
".",
"createFileListHTTP",
")",
"filelistDownloadActive",
"=",
"1",
"childFileListDownload",
"[",
"mypid",
"]",
".",
"start",
"(",
")",
"filelistDownloadActive",
"=",
"0"
] | SRTM data is split into different directories, get a list of all of
them and create a dictionary for easy lookup. | [
"SRTM",
"data",
"is",
"split",
"into",
"different",
"directories",
"get",
"a",
"list",
"of",
"all",
"of",
"them",
"and",
"create",
"a",
"dictionary",
"for",
"easy",
"lookup",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/pipeline/variation.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/variation.py#L65-L75 | def _normalize_vc_input(data):
"""Normalize different types of variant calling inputs.
Handles standard and ensemble inputs.
"""
if data.get("ensemble"):
for k in ["batch_samples", "validate", "vrn_file"]:
data[k] = data["ensemble"][k]
data["config"]["algorithm"]["variantcaller"] = "ensemble"
data["metadata"] = {"batch": data["ensemble"]["batch_id"]}
return data | [
"def",
"_normalize_vc_input",
"(",
"data",
")",
":",
"if",
"data",
".",
"get",
"(",
"\"ensemble\"",
")",
":",
"for",
"k",
"in",
"[",
"\"batch_samples\"",
",",
"\"validate\"",
",",
"\"vrn_file\"",
"]",
":",
"data",
"[",
"k",
"]",
"=",
"data",
"[",
"\"ensemble\"",
"]",
"[",
"k",
"]",
"data",
"[",
"\"config\"",
"]",
"[",
"\"algorithm\"",
"]",
"[",
"\"variantcaller\"",
"]",
"=",
"\"ensemble\"",
"data",
"[",
"\"metadata\"",
"]",
"=",
"{",
"\"batch\"",
":",
"data",
"[",
"\"ensemble\"",
"]",
"[",
"\"batch_id\"",
"]",
"}",
"return",
"data"
] | Normalize different types of variant calling inputs.
Handles standard and ensemble inputs. | [
"Normalize",
"different",
"types",
"of",
"variant",
"calling",
"inputs",
"."
] | python | train |
lsst-sqre/documenteer | documenteer/sphinxext/lssttasks/taskutils.py | https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/taskutils.py#L82-L103 | def get_subtask_fields(config_class):
"""Get all configurable subtask fields from a Config class.
Parameters
----------
config_class : ``lsst.pipe.base.Config``-type
The configuration class (not an instance) corresponding to a Task.
Returns
-------
subtask_fields : `dict`
Mapping where keys are the config attribute names and values are
subclasses of ``lsst.pex.config.ConfigurableField`` or
``RegistryField``). The mapping is alphabetically ordered by
attribute name.
"""
from lsst.pex.config import ConfigurableField, RegistryField
def is_subtask_field(obj):
return isinstance(obj, (ConfigurableField, RegistryField))
return _get_alphabetical_members(config_class, is_subtask_field) | [
"def",
"get_subtask_fields",
"(",
"config_class",
")",
":",
"from",
"lsst",
".",
"pex",
".",
"config",
"import",
"ConfigurableField",
",",
"RegistryField",
"def",
"is_subtask_field",
"(",
"obj",
")",
":",
"return",
"isinstance",
"(",
"obj",
",",
"(",
"ConfigurableField",
",",
"RegistryField",
")",
")",
"return",
"_get_alphabetical_members",
"(",
"config_class",
",",
"is_subtask_field",
")"
] | Get all configurable subtask fields from a Config class.
Parameters
----------
config_class : ``lsst.pipe.base.Config``-type
The configuration class (not an instance) corresponding to a Task.
Returns
-------
subtask_fields : `dict`
Mapping where keys are the config attribute names and values are
subclasses of ``lsst.pex.config.ConfigurableField`` or
``RegistryField``). The mapping is alphabetically ordered by
attribute name. | [
"Get",
"all",
"configurable",
"subtask",
"fields",
"from",
"a",
"Config",
"class",
"."
] | python | train |
IdentityPython/fedoidcmsg | src/fedoidcmsg/utils.py | https://github.com/IdentityPython/fedoidcmsg/blob/d30107be02521fa6cdfe285da3b6b0cdd153c8cc/src/fedoidcmsg/utils.py#L15-L33 | def self_sign_jwks(keyjar, iss, kid='', lifetime=3600):
"""
Create a signed JWT containing a JWKS. The JWT is signed by one of the
keys in the JWKS.
:param keyjar: A KeyJar instance with at least one private signing key
:param iss: issuer of the JWT, should be the owner of the keys
:param kid: A key ID if a special key should be used otherwise one
is picked at random.
:param lifetime: The lifetime of the signed JWT
:return: A signed JWT
"""
# _json = json.dumps(jwks)
_jwt = JWT(keyjar, iss=iss, lifetime=lifetime)
jwks = keyjar.export_jwks(issuer=iss)
return _jwt.pack(payload={'jwks': jwks}, owner=iss, kid=kid) | [
"def",
"self_sign_jwks",
"(",
"keyjar",
",",
"iss",
",",
"kid",
"=",
"''",
",",
"lifetime",
"=",
"3600",
")",
":",
"# _json = json.dumps(jwks)",
"_jwt",
"=",
"JWT",
"(",
"keyjar",
",",
"iss",
"=",
"iss",
",",
"lifetime",
"=",
"lifetime",
")",
"jwks",
"=",
"keyjar",
".",
"export_jwks",
"(",
"issuer",
"=",
"iss",
")",
"return",
"_jwt",
".",
"pack",
"(",
"payload",
"=",
"{",
"'jwks'",
":",
"jwks",
"}",
",",
"owner",
"=",
"iss",
",",
"kid",
"=",
"kid",
")"
] | Create a signed JWT containing a JWKS. The JWT is signed by one of the
keys in the JWKS.
:param keyjar: A KeyJar instance with at least one private signing key
:param iss: issuer of the JWT, should be the owner of the keys
:param kid: A key ID if a special key should be used otherwise one
is picked at random.
:param lifetime: The lifetime of the signed JWT
:return: A signed JWT | [
"Create",
"a",
"signed",
"JWT",
"containing",
"a",
"JWKS",
".",
"The",
"JWT",
"is",
"signed",
"by",
"one",
"of",
"the",
"keys",
"in",
"the",
"JWKS",
"."
] | python | test |
Crunch-io/crunch-cube | src/cr/cube/crunch_cube.py | https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1401-L1416 | def population_fraction(self):
"""The filtered/unfiltered ratio for cube response.
This value is required for properly calculating population on a cube
where a filter has been applied. Returns 1.0 for an unfiltered cube.
Returns `np.nan` if the unfiltered count is zero, which would
otherwise result in a divide-by-zero error.
"""
numerator = self._cube_dict["result"].get("filtered", {}).get("weighted_n")
denominator = self._cube_dict["result"].get("unfiltered", {}).get("weighted_n")
try:
return numerator / denominator
except ZeroDivisionError:
return np.nan
except Exception:
return 1.0 | [
"def",
"population_fraction",
"(",
"self",
")",
":",
"numerator",
"=",
"self",
".",
"_cube_dict",
"[",
"\"result\"",
"]",
".",
"get",
"(",
"\"filtered\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"weighted_n\"",
")",
"denominator",
"=",
"self",
".",
"_cube_dict",
"[",
"\"result\"",
"]",
".",
"get",
"(",
"\"unfiltered\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"weighted_n\"",
")",
"try",
":",
"return",
"numerator",
"/",
"denominator",
"except",
"ZeroDivisionError",
":",
"return",
"np",
".",
"nan",
"except",
"Exception",
":",
"return",
"1.0"
] | The filtered/unfiltered ratio for cube response.
This value is required for properly calculating population on a cube
where a filter has been applied. Returns 1.0 for an unfiltered cube.
Returns `np.nan` if the unfiltered count is zero, which would
otherwise result in a divide-by-zero error. | [
"The",
"filtered",
"/",
"unfiltered",
"ratio",
"for",
"cube",
"response",
"."
] | python | train |
draperjames/qtpandas | qtpandas/models/DataFrameModelManager.py | https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/models/DataFrameModelManager.py#L164-L173 | def remove_file(self, filepath):
"""
Removes the DataFrameModel from being registered.
:param filepath: (str)
The filepath to delete from the DataFrameModelManager.
:return: None
"""
self._models.pop(filepath)
self._updates.pop(filepath, default=None)
self.signalModelDestroyed.emit(filepath) | [
"def",
"remove_file",
"(",
"self",
",",
"filepath",
")",
":",
"self",
".",
"_models",
".",
"pop",
"(",
"filepath",
")",
"self",
".",
"_updates",
".",
"pop",
"(",
"filepath",
",",
"default",
"=",
"None",
")",
"self",
".",
"signalModelDestroyed",
".",
"emit",
"(",
"filepath",
")"
] | Removes the DataFrameModel from being registered.
:param filepath: (str)
The filepath to delete from the DataFrameModelManager.
:return: None | [
"Removes",
"the",
"DataFrameModel",
"from",
"being",
"registered",
".",
":",
"param",
"filepath",
":",
"(",
"str",
")",
"The",
"filepath",
"to",
"delete",
"from",
"the",
"DataFrameModelManager",
".",
":",
"return",
":",
"None"
] | python | train |
pandas-dev/pandas | pandas/core/series.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L540-L580 | def nonzero(self):
"""
Return the *integer* indices of the elements that are non-zero.
.. deprecated:: 0.24.0
Please use .to_numpy().nonzero() as a replacement.
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatibility with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
See Also
--------
numpy.nonzero
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
>>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
# same return although index of s is different
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
b 3
d 4
dtype: int64
"""
msg = ("Series.nonzero() is deprecated "
"and will be removed in a future version."
"Use Series.to_numpy().nonzero() instead")
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._values.nonzero() | [
"def",
"nonzero",
"(",
"self",
")",
":",
"msg",
"=",
"(",
"\"Series.nonzero() is deprecated \"",
"\"and will be removed in a future version.\"",
"\"Use Series.to_numpy().nonzero() instead\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_values",
".",
"nonzero",
"(",
")"
] | Return the *integer* indices of the elements that are non-zero.
.. deprecated:: 0.24.0
Please use .to_numpy().nonzero() as a replacement.
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatibility with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
See Also
--------
numpy.nonzero
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
>>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
# same return although index of s is different
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
b 3
d 4
dtype: int64 | [
"Return",
"the",
"*",
"integer",
"*",
"indices",
"of",
"the",
"elements",
"that",
"are",
"non",
"-",
"zero",
"."
] | python | train |
rbarrois/restricted_pkg | restricted_pkg/base.py | https://github.com/rbarrois/restricted_pkg/blob/abbd3cb33ed85af02fbb531fd85dda9c1b070c85/restricted_pkg/base.py#L137-L146 | def prompt_auth(self):
"""Prompt the user for login/pass, if needed."""
if self.username and self.password:
return
sys.stdout.write("Please insert your credentials for %s\n" % self.url.base_url)
while not self.username:
self.username = raw_input("Username [%s]: " % getpass.getuser())
while not self.password:
self.password = getpass.getpass("Password: ") | [
"def",
"prompt_auth",
"(",
"self",
")",
":",
"if",
"self",
".",
"username",
"and",
"self",
".",
"password",
":",
"return",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Please insert your credentials for %s\\n\"",
"%",
"self",
".",
"url",
".",
"base_url",
")",
"while",
"not",
"self",
".",
"username",
":",
"self",
".",
"username",
"=",
"raw_input",
"(",
"\"Username [%s]: \"",
"%",
"getpass",
".",
"getuser",
"(",
")",
")",
"while",
"not",
"self",
".",
"password",
":",
"self",
".",
"password",
"=",
"getpass",
".",
"getpass",
"(",
"\"Password: \"",
")"
] | Prompt the user for login/pass, if needed. | [
"Prompt",
"the",
"user",
"for",
"login",
"/",
"pass",
"if",
"needed",
"."
] | python | train |
pydanny-archive/django-uni-form | uni_form/layout.py | https://github.com/pydanny-archive/django-uni-form/blob/159f539e2fb98752b7964d75e955fc62881c28fb/uni_form/layout.py#L95-L99 | def render(self, form, form_style, context):
"""
Renders an `<input />` if container is used as a Layout object
"""
return render_to_string(self.template, Context({'input': self})) | [
"def",
"render",
"(",
"self",
",",
"form",
",",
"form_style",
",",
"context",
")",
":",
"return",
"render_to_string",
"(",
"self",
".",
"template",
",",
"Context",
"(",
"{",
"'input'",
":",
"self",
"}",
")",
")"
] | Renders an `<input />` if container is used as a Layout object | [
"Renders",
"an",
"<input",
"/",
">",
"if",
"container",
"is",
"used",
"as",
"a",
"Layout",
"object"
] | python | train |
spotify/luigi | luigi/contrib/hdfs/snakebite_client.py | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hdfs/snakebite_client.py#L110-L126 | def rename_dont_move(self, path, dest):
"""
Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException
"""
from snakebite.errors import FileAlreadyExistsException
try:
self.get_bite().rename2(path, dest, overwriteDest=False)
except FileAlreadyExistsException:
# Unfortunately python2 don't allow exception chaining.
raise luigi.target.FileAlreadyExists() | [
"def",
"rename_dont_move",
"(",
"self",
",",
"path",
",",
"dest",
")",
":",
"from",
"snakebite",
".",
"errors",
"import",
"FileAlreadyExistsException",
"try",
":",
"self",
".",
"get_bite",
"(",
")",
".",
"rename2",
"(",
"path",
",",
"dest",
",",
"overwriteDest",
"=",
"False",
")",
"except",
"FileAlreadyExistsException",
":",
"# Unfortunately python2 don't allow exception chaining.",
"raise",
"luigi",
".",
"target",
".",
"FileAlreadyExists",
"(",
")"
] | Use snakebite.rename_dont_move, if available.
:param path: source path (single input)
:type path: string
:param dest: destination path
:type dest: string
:return: True if succeeded
:raises: snakebite.errors.FileAlreadyExistsException | [
"Use",
"snakebite",
".",
"rename_dont_move",
"if",
"available",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/nose/config.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/config.py#L423-L568 | def getParser(self, doc=None):
"""Get the command line option parser.
"""
if self.parser:
return self.parser
env = self.env
parser = self.parserClass(doc)
parser.add_option(
"-V","--version", action="store_true",
dest="version", default=False,
help="Output nose version and exit")
parser.add_option(
"-p", "--plugins", action="store_true",
dest="showPlugins", default=False,
help="Output list of available plugins and exit. Combine with "
"higher verbosity for greater detail")
parser.add_option(
"-v", "--verbose",
action="count", dest="verbosity",
default=self.verbosity,
help="Be more verbose. [NOSE_VERBOSE]")
parser.add_option(
"--verbosity", action="store", dest="verbosity",
metavar='VERBOSITY',
type="int", help="Set verbosity; --verbosity=2 is "
"the same as -v")
parser.add_option(
"-q", "--quiet", action="store_const", const=0, dest="verbosity",
help="Be less verbose")
parser.add_option(
"-c", "--config", action="append", dest="files",
metavar="FILES",
help="Load configuration from config file(s). May be specified "
"multiple times; in that case, all config files will be "
"loaded and combined")
parser.add_option(
"-w", "--where", action="append", dest="where",
metavar="WHERE",
help="Look for tests in this directory. "
"May be specified multiple times. The first directory passed "
"will be used as the working directory, in place of the current "
"working directory, which is the default. Others will be added "
"to the list of tests to execute. [NOSE_WHERE]"
)
parser.add_option(
"--py3where", action="append", dest="py3where",
metavar="PY3WHERE",
help="Look for tests in this directory under Python 3.x. "
"Functions the same as 'where', but only applies if running under "
"Python 3.x or above. Note that, if present under 3.x, this "
"option completely replaces any directories specified with "
"'where', so the 'where' option becomes ineffective. "
"[NOSE_PY3WHERE]"
)
parser.add_option(
"-m", "--match", "--testmatch", action="store",
dest="testMatch", metavar="REGEX",
help="Files, directories, function names, and class names "
"that match this regular expression are considered tests. "
"Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
default=self.testMatchPat)
parser.add_option(
"--tests", action="store", dest="testNames", default=None,
metavar='NAMES',
help="Run these tests (comma-separated list). This argument is "
"useful mainly from configuration files; on the command line, "
"just pass the tests to run as additional arguments with no "
"switch.")
parser.add_option(
"-l", "--debug", action="store",
dest="debug", default=self.debug,
help="Activate debug logging for one or more systems. "
"Available debug loggers: nose, nose.importer, "
"nose.inspector, nose.plugins, nose.result and "
"nose.selector. Separate multiple names with a comma.")
parser.add_option(
"--debug-log", dest="debugLog", action="store",
default=self.debugLog, metavar="FILE",
help="Log debug messages to this file "
"(default: sys.stderr)")
parser.add_option(
"--logging-config", "--log-config",
dest="loggingConfig", action="store",
default=self.loggingConfig, metavar="FILE",
help="Load logging config from this file -- bypasses all other"
" logging config settings.")
parser.add_option(
"-I", "--ignore-files", action="append", dest="ignoreFiles",
metavar="REGEX",
help="Completely ignore any file that matches this regular "
"expression. Takes precedence over any other settings or "
"plugins. "
"Specifying this option will replace the default setting. "
"Specify this option multiple times "
"to add more regular expressions [NOSE_IGNORE_FILES]")
parser.add_option(
"-e", "--exclude", action="append", dest="exclude",
metavar="REGEX",
help="Don't run tests that match regular "
"expression [NOSE_EXCLUDE]")
parser.add_option(
"-i", "--include", action="append", dest="include",
metavar="REGEX",
help="This regular expression will be applied to files, "
"directories, function names, and class names for a chance "
"to include additional tests that do not match TESTMATCH. "
"Specify this option multiple times "
"to add more regular expressions [NOSE_INCLUDE]")
parser.add_option(
"-x", "--stop", action="store_true", dest="stopOnError",
default=self.stopOnError,
help="Stop running tests after the first error or failure")
parser.add_option(
"-P", "--no-path-adjustment", action="store_false",
dest="addPaths",
default=self.addPaths,
help="Don't make any changes to sys.path when "
"loading tests [NOSE_NOPATH]")
parser.add_option(
"--exe", action="store_true", dest="includeExe",
default=self.includeExe,
help="Look for tests in python modules that are "
"executable. Normal behavior is to exclude executable "
"modules, since they may not be import-safe "
"[NOSE_INCLUDE_EXE]")
parser.add_option(
"--noexe", action="store_false", dest="includeExe",
help="DO NOT look for tests in python modules that are "
"executable. (The default on the windows platform is to "
"do so.)")
parser.add_option(
"--traverse-namespace", action="store_true",
default=self.traverseNamespace, dest="traverseNamespace",
help="Traverse through all path entries of a namespace package")
parser.add_option(
"--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
action="store_true", default=False, dest="firstPackageWins",
help="nose's importer will normally evict a package from sys."
"modules if it sees a package with the same name in a different "
"location. Set this option to disable that behavior.")
self.plugins.loadPlugins()
self.pluginOpts(parser)
self.parser = parser
return parser | [
"def",
"getParser",
"(",
"self",
",",
"doc",
"=",
"None",
")",
":",
"if",
"self",
".",
"parser",
":",
"return",
"self",
".",
"parser",
"env",
"=",
"self",
".",
"env",
"parser",
"=",
"self",
".",
"parserClass",
"(",
"doc",
")",
"parser",
".",
"add_option",
"(",
"\"-V\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"version\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Output nose version and exit\"",
")",
"parser",
".",
"add_option",
"(",
"\"-p\"",
",",
"\"--plugins\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"showPlugins\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Output list of available plugins and exit. Combine with \"",
"\"higher verbosity for greater detail\"",
")",
"parser",
".",
"add_option",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"action",
"=",
"\"count\"",
",",
"dest",
"=",
"\"verbosity\"",
",",
"default",
"=",
"self",
".",
"verbosity",
",",
"help",
"=",
"\"Be more verbose. [NOSE_VERBOSE]\"",
")",
"parser",
".",
"add_option",
"(",
"\"--verbosity\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"verbosity\"",
",",
"metavar",
"=",
"'VERBOSITY'",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"Set verbosity; --verbosity=2 is \"",
"\"the same as -v\"",
")",
"parser",
".",
"add_option",
"(",
"\"-q\"",
",",
"\"--quiet\"",
",",
"action",
"=",
"\"store_const\"",
",",
"const",
"=",
"0",
",",
"dest",
"=",
"\"verbosity\"",
",",
"help",
"=",
"\"Be less verbose\"",
")",
"parser",
".",
"add_option",
"(",
"\"-c\"",
",",
"\"--config\"",
",",
"action",
"=",
"\"append\"",
",",
"dest",
"=",
"\"files\"",
",",
"metavar",
"=",
"\"FILES\"",
",",
"help",
"=",
"\"Load configuration from config file(s). May be specified \"",
"\"multiple times; in that case, all config files will be \"",
"\"loaded and combined\"",
")",
"parser",
".",
"add_option",
"(",
"\"-w\"",
",",
"\"--where\"",
",",
"action",
"=",
"\"append\"",
",",
"dest",
"=",
"\"where\"",
",",
"metavar",
"=",
"\"WHERE\"",
",",
"help",
"=",
"\"Look for tests in this directory. \"",
"\"May be specified multiple times. The first directory passed \"",
"\"will be used as the working directory, in place of the current \"",
"\"working directory, which is the default. Others will be added \"",
"\"to the list of tests to execute. [NOSE_WHERE]\"",
")",
"parser",
".",
"add_option",
"(",
"\"--py3where\"",
",",
"action",
"=",
"\"append\"",
",",
"dest",
"=",
"\"py3where\"",
",",
"metavar",
"=",
"\"PY3WHERE\"",
",",
"help",
"=",
"\"Look for tests in this directory under Python 3.x. \"",
"\"Functions the same as 'where', but only applies if running under \"",
"\"Python 3.x or above. Note that, if present under 3.x, this \"",
"\"option completely replaces any directories specified with \"",
"\"'where', so the 'where' option becomes ineffective. \"",
"\"[NOSE_PY3WHERE]\"",
")",
"parser",
".",
"add_option",
"(",
"\"-m\"",
",",
"\"--match\"",
",",
"\"--testmatch\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"testMatch\"",
",",
"metavar",
"=",
"\"REGEX\"",
",",
"help",
"=",
"\"Files, directories, function names, and class names \"",
"\"that match this regular expression are considered tests. \"",
"\"Default: %s [NOSE_TESTMATCH]\"",
"%",
"self",
".",
"testMatchPat",
",",
"default",
"=",
"self",
".",
"testMatchPat",
")",
"parser",
".",
"add_option",
"(",
"\"--tests\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"testNames\"",
",",
"default",
"=",
"None",
",",
"metavar",
"=",
"'NAMES'",
",",
"help",
"=",
"\"Run these tests (comma-separated list). This argument is \"",
"\"useful mainly from configuration files; on the command line, \"",
"\"just pass the tests to run as additional arguments with no \"",
"\"switch.\"",
")",
"parser",
".",
"add_option",
"(",
"\"-l\"",
",",
"\"--debug\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"debug\"",
",",
"default",
"=",
"self",
".",
"debug",
",",
"help",
"=",
"\"Activate debug logging for one or more systems. \"",
"\"Available debug loggers: nose, nose.importer, \"",
"\"nose.inspector, nose.plugins, nose.result and \"",
"\"nose.selector. Separate multiple names with a comma.\"",
")",
"parser",
".",
"add_option",
"(",
"\"--debug-log\"",
",",
"dest",
"=",
"\"debugLog\"",
",",
"action",
"=",
"\"store\"",
",",
"default",
"=",
"self",
".",
"debugLog",
",",
"metavar",
"=",
"\"FILE\"",
",",
"help",
"=",
"\"Log debug messages to this file \"",
"\"(default: sys.stderr)\"",
")",
"parser",
".",
"add_option",
"(",
"\"--logging-config\"",
",",
"\"--log-config\"",
",",
"dest",
"=",
"\"loggingConfig\"",
",",
"action",
"=",
"\"store\"",
",",
"default",
"=",
"self",
".",
"loggingConfig",
",",
"metavar",
"=",
"\"FILE\"",
",",
"help",
"=",
"\"Load logging config from this file -- bypasses all other\"",
"\" logging config settings.\"",
")",
"parser",
".",
"add_option",
"(",
"\"-I\"",
",",
"\"--ignore-files\"",
",",
"action",
"=",
"\"append\"",
",",
"dest",
"=",
"\"ignoreFiles\"",
",",
"metavar",
"=",
"\"REGEX\"",
",",
"help",
"=",
"\"Completely ignore any file that matches this regular \"",
"\"expression. Takes precedence over any other settings or \"",
"\"plugins. \"",
"\"Specifying this option will replace the default setting. \"",
"\"Specify this option multiple times \"",
"\"to add more regular expressions [NOSE_IGNORE_FILES]\"",
")",
"parser",
".",
"add_option",
"(",
"\"-e\"",
",",
"\"--exclude\"",
",",
"action",
"=",
"\"append\"",
",",
"dest",
"=",
"\"exclude\"",
",",
"metavar",
"=",
"\"REGEX\"",
",",
"help",
"=",
"\"Don't run tests that match regular \"",
"\"expression [NOSE_EXCLUDE]\"",
")",
"parser",
".",
"add_option",
"(",
"\"-i\"",
",",
"\"--include\"",
",",
"action",
"=",
"\"append\"",
",",
"dest",
"=",
"\"include\"",
",",
"metavar",
"=",
"\"REGEX\"",
",",
"help",
"=",
"\"This regular expression will be applied to files, \"",
"\"directories, function names, and class names for a chance \"",
"\"to include additional tests that do not match TESTMATCH. \"",
"\"Specify this option multiple times \"",
"\"to add more regular expressions [NOSE_INCLUDE]\"",
")",
"parser",
".",
"add_option",
"(",
"\"-x\"",
",",
"\"--stop\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"stopOnError\"",
",",
"default",
"=",
"self",
".",
"stopOnError",
",",
"help",
"=",
"\"Stop running tests after the first error or failure\"",
")",
"parser",
".",
"add_option",
"(",
"\"-P\"",
",",
"\"--no-path-adjustment\"",
",",
"action",
"=",
"\"store_false\"",
",",
"dest",
"=",
"\"addPaths\"",
",",
"default",
"=",
"self",
".",
"addPaths",
",",
"help",
"=",
"\"Don't make any changes to sys.path when \"",
"\"loading tests [NOSE_NOPATH]\"",
")",
"parser",
".",
"add_option",
"(",
"\"--exe\"",
",",
"action",
"=",
"\"store_true\"",
",",
"dest",
"=",
"\"includeExe\"",
",",
"default",
"=",
"self",
".",
"includeExe",
",",
"help",
"=",
"\"Look for tests in python modules that are \"",
"\"executable. Normal behavior is to exclude executable \"",
"\"modules, since they may not be import-safe \"",
"\"[NOSE_INCLUDE_EXE]\"",
")",
"parser",
".",
"add_option",
"(",
"\"--noexe\"",
",",
"action",
"=",
"\"store_false\"",
",",
"dest",
"=",
"\"includeExe\"",
",",
"help",
"=",
"\"DO NOT look for tests in python modules that are \"",
"\"executable. (The default on the windows platform is to \"",
"\"do so.)\"",
")",
"parser",
".",
"add_option",
"(",
"\"--traverse-namespace\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"self",
".",
"traverseNamespace",
",",
"dest",
"=",
"\"traverseNamespace\"",
",",
"help",
"=",
"\"Traverse through all path entries of a namespace package\"",
")",
"parser",
".",
"add_option",
"(",
"\"--first-package-wins\"",
",",
"\"--first-pkg-wins\"",
",",
"\"--1st-pkg-wins\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"\"firstPackageWins\"",
",",
"help",
"=",
"\"nose's importer will normally evict a package from sys.\"",
"\"modules if it sees a package with the same name in a different \"",
"\"location. Set this option to disable that behavior.\"",
")",
"self",
".",
"plugins",
".",
"loadPlugins",
"(",
")",
"self",
".",
"pluginOpts",
"(",
"parser",
")",
"self",
".",
"parser",
"=",
"parser",
"return",
"parser"
] | Get the command line option parser. | [
"Get",
"the",
"command",
"line",
"option",
"parser",
"."
] | python | test |
Contraz/demosys-py | demosys/timers/clock.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/timers/clock.py#L57-L69 | def get_time(self) -> float:
"""
Get the current time in seconds
Returns:
The current time in seconds
"""
if self.pause_time is not None:
curr_time = self.pause_time - self.offset - self.start_time
return curr_time
curr_time = time.time()
return curr_time - self.start_time - self.offset | [
"def",
"get_time",
"(",
"self",
")",
"->",
"float",
":",
"if",
"self",
".",
"pause_time",
"is",
"not",
"None",
":",
"curr_time",
"=",
"self",
".",
"pause_time",
"-",
"self",
".",
"offset",
"-",
"self",
".",
"start_time",
"return",
"curr_time",
"curr_time",
"=",
"time",
".",
"time",
"(",
")",
"return",
"curr_time",
"-",
"self",
".",
"start_time",
"-",
"self",
".",
"offset"
] | Get the current time in seconds
Returns:
The current time in seconds | [
"Get",
"the",
"current",
"time",
"in",
"seconds"
] | python | valid |
bitesofcode/projexui | projexui/widgets/xloggerwidget/xloggerwidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xloggerwidget/xloggerwidget.py#L479-L486 | def setConfigurable(self, state):
"""
Sets whether or not this logger widget is configurable.
:param state | <bool>
"""
self._configurable = state
self._configButton.setVisible(state) | [
"def",
"setConfigurable",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"_configurable",
"=",
"state",
"self",
".",
"_configButton",
".",
"setVisible",
"(",
"state",
")"
] | Sets whether or not this logger widget is configurable.
:param state | <bool> | [
"Sets",
"whether",
"or",
"not",
"this",
"logger",
"widget",
"is",
"configurable",
".",
":",
"param",
"state",
"|",
"<bool",
">"
] | python | train |
fishtown-analytics/dbt | plugins/bigquery/dbt/adapters/bigquery/impl.py | https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/plugins/bigquery/dbt/adapters/bigquery/impl.py#L196-L207 | def _get_dbt_columns_from_bq_table(self, table):
"Translates BQ SchemaField dicts into dbt BigQueryColumn objects"
columns = []
for col in table.schema:
# BigQuery returns type labels that are not valid type specifiers
dtype = self.Column.translate_type(col.field_type)
column = self.Column(
col.name, dtype, col.fields, col.mode)
columns.append(column)
return columns | [
"def",
"_get_dbt_columns_from_bq_table",
"(",
"self",
",",
"table",
")",
":",
"columns",
"=",
"[",
"]",
"for",
"col",
"in",
"table",
".",
"schema",
":",
"# BigQuery returns type labels that are not valid type specifiers",
"dtype",
"=",
"self",
".",
"Column",
".",
"translate_type",
"(",
"col",
".",
"field_type",
")",
"column",
"=",
"self",
".",
"Column",
"(",
"col",
".",
"name",
",",
"dtype",
",",
"col",
".",
"fields",
",",
"col",
".",
"mode",
")",
"columns",
".",
"append",
"(",
"column",
")",
"return",
"columns"
] | Translates BQ SchemaField dicts into dbt BigQueryColumn objects | [
"Translates",
"BQ",
"SchemaField",
"dicts",
"into",
"dbt",
"BigQueryColumn",
"objects"
] | python | train |
openstack/proliantutils | proliantutils/redfish/redfish.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/redfish.py#L269-L282 | def press_pwr_btn(self):
"""Simulates a physical press of the server power button.
:raises: IloError, on an error from iLO.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
try:
sushy_system.push_power_button(sys_cons.PUSH_POWER_BUTTON_PRESS)
except sushy.exceptions.SushyError as e:
msg = (self._('The Redfish controller failed to press power button'
' of server. Error %(error)s') %
{'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg) | [
"def",
"press_pwr_btn",
"(",
"self",
")",
":",
"sushy_system",
"=",
"self",
".",
"_get_sushy_system",
"(",
"PROLIANT_SYSTEM_ID",
")",
"try",
":",
"sushy_system",
".",
"push_power_button",
"(",
"sys_cons",
".",
"PUSH_POWER_BUTTON_PRESS",
")",
"except",
"sushy",
".",
"exceptions",
".",
"SushyError",
"as",
"e",
":",
"msg",
"=",
"(",
"self",
".",
"_",
"(",
"'The Redfish controller failed to press power button'",
"' of server. Error %(error)s'",
")",
"%",
"{",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")",
"LOG",
".",
"debug",
"(",
"msg",
")",
"raise",
"exception",
".",
"IloError",
"(",
"msg",
")"
] | Simulates a physical press of the server power button.
:raises: IloError, on an error from iLO. | [
"Simulates",
"a",
"physical",
"press",
"of",
"the",
"server",
"power",
"button",
"."
] | python | train |
cokelaer/spectrum | src/spectrum/yulewalker.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/yulewalker.py#L23-L116 | def aryule(X, order, norm='biased', allow_singularity=True):
r"""Compute AR coefficients using Yule-Walker method
:param X: Array of complex data values, X(1) to X(N)
:param int order: Order of autoregressive process to be fitted (integer)
:param str norm: Use a biased or unbiased correlation.
:param bool allow_singularity:
:return:
* AR coefficients (complex)
* variance of white noise (Real)
* reflection coefficients for use in lattice filter
.. rubric:: Description:
The Yule-Walker method returns the polynomial A corresponding to the
AR parametric signal model estimate of vector X using the Yule-Walker
(autocorrelation) method. The autocorrelation may be computed using a
**biased** or **unbiased** estimation. In practice, the biased estimate of
the autocorrelation is used for the unknown true autocorrelation. Indeed,
an unbiased estimate may result in nonpositive-definite autocorrelation
matrix.
So, a biased estimate leads to a stable AR filter.
The following matrix form represents the Yule-Walker equations. The are
solved by means of the Levinson-Durbin recursion:
.. math::
\left( \begin{array}{cccc}
r(1) & r(2)^* & \dots & r(n)^*\\
r(2) & r(1)^* & \dots & r(n-1)^*\\
\dots & \dots & \dots & \dots\\
r(n) & \dots & r(2) & r(1) \end{array} \right)
\left( \begin{array}{cccc}
a(2)\\
a(3) \\
\dots \\
a(n+1) \end{array} \right)
=
\left( \begin{array}{cccc}
-r(2)\\
-r(3) \\
\dots \\
-r(n+1) \end{array} \right)
The outputs consists of the AR coefficients, the estimated variance of the
white noise process, and the reflection coefficients. These outputs can be
used to estimate the optimal order by using :mod:`~spectrum.criteria`.
.. rubric:: Examples:
From a known AR process or order 4, we estimate those AR parameters using
the aryule function.
.. doctest::
>>> from scipy.signal import lfilter
>>> from spectrum import *
>>> from numpy.random import randn
>>> A =[1, -2.7607, 3.8106, -2.6535, 0.9238]
>>> noise = randn(1, 1024)
>>> y = lfilter([1], A, noise);
>>> #filter a white noise input to create AR(4) process
>>> [ar, var, reflec] = aryule(y[0], 4)
>>> # ar should contains values similar to A
The PSD estimate of a data samples is computed and plotted as follows:
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
ar, P, k = aryule(marple_data, 15, norm='biased')
psd = arma2psd(ar)
plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. note:: The outputs have been double checked against (1) octave outputs
(octave has norm='biased' by default) and (2) Marple test code.
.. seealso:: This function uses :func:`~spectrum.levinson.LEVINSON` and
:func:`~spectrum.correlation.CORRELATION`. See the :mod:`~spectrum.criteria`
module for criteria to automatically select the AR order.
:References: [Marple]_
"""
assert norm in ['biased', 'unbiased']
r = CORRELATION(X, maxlags=order, norm=norm)
A, P, k = LEVINSON(r, allow_singularity=allow_singularity)
return A, P, k | [
"def",
"aryule",
"(",
"X",
",",
"order",
",",
"norm",
"=",
"'biased'",
",",
"allow_singularity",
"=",
"True",
")",
":",
"assert",
"norm",
"in",
"[",
"'biased'",
",",
"'unbiased'",
"]",
"r",
"=",
"CORRELATION",
"(",
"X",
",",
"maxlags",
"=",
"order",
",",
"norm",
"=",
"norm",
")",
"A",
",",
"P",
",",
"k",
"=",
"LEVINSON",
"(",
"r",
",",
"allow_singularity",
"=",
"allow_singularity",
")",
"return",
"A",
",",
"P",
",",
"k"
] | r"""Compute AR coefficients using Yule-Walker method
:param X: Array of complex data values, X(1) to X(N)
:param int order: Order of autoregressive process to be fitted (integer)
:param str norm: Use a biased or unbiased correlation.
:param bool allow_singularity:
:return:
* AR coefficients (complex)
* variance of white noise (Real)
* reflection coefficients for use in lattice filter
.. rubric:: Description:
The Yule-Walker method returns the polynomial A corresponding to the
AR parametric signal model estimate of vector X using the Yule-Walker
(autocorrelation) method. The autocorrelation may be computed using a
**biased** or **unbiased** estimation. In practice, the biased estimate of
the autocorrelation is used for the unknown true autocorrelation. Indeed,
an unbiased estimate may result in nonpositive-definite autocorrelation
matrix.
So, a biased estimate leads to a stable AR filter.
The following matrix form represents the Yule-Walker equations. The are
solved by means of the Levinson-Durbin recursion:
.. math::
\left( \begin{array}{cccc}
r(1) & r(2)^* & \dots & r(n)^*\\
r(2) & r(1)^* & \dots & r(n-1)^*\\
\dots & \dots & \dots & \dots\\
r(n) & \dots & r(2) & r(1) \end{array} \right)
\left( \begin{array}{cccc}
a(2)\\
a(3) \\
\dots \\
a(n+1) \end{array} \right)
=
\left( \begin{array}{cccc}
-r(2)\\
-r(3) \\
\dots \\
-r(n+1) \end{array} \right)
The outputs consists of the AR coefficients, the estimated variance of the
white noise process, and the reflection coefficients. These outputs can be
used to estimate the optimal order by using :mod:`~spectrum.criteria`.
.. rubric:: Examples:
From a known AR process or order 4, we estimate those AR parameters using
the aryule function.
.. doctest::
>>> from scipy.signal import lfilter
>>> from spectrum import *
>>> from numpy.random import randn
>>> A =[1, -2.7607, 3.8106, -2.6535, 0.9238]
>>> noise = randn(1, 1024)
>>> y = lfilter([1], A, noise);
>>> #filter a white noise input to create AR(4) process
>>> [ar, var, reflec] = aryule(y[0], 4)
>>> # ar should contains values similar to A
The PSD estimate of a data samples is computed and plotted as follows:
.. plot::
:width: 80%
:include-source:
from spectrum import *
from pylab import *
ar, P, k = aryule(marple_data, 15, norm='biased')
psd = arma2psd(ar)
plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. note:: The outputs have been double checked against (1) octave outputs
(octave has norm='biased' by default) and (2) Marple test code.
.. seealso:: This function uses :func:`~spectrum.levinson.LEVINSON` and
:func:`~spectrum.correlation.CORRELATION`. See the :mod:`~spectrum.criteria`
module for criteria to automatically select the AR order.
:References: [Marple]_ | [
"r",
"Compute",
"AR",
"coefficients",
"using",
"Yule",
"-",
"Walker",
"method"
] | python | valid |
gwastro/pycbc | pycbc/filter/matchedfilter.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/matchedfilter.py#L1318-L1364 | def match(vec1, vec2, psd=None, low_frequency_cutoff=None,
high_frequency_cutoff=None, v1_norm=None, v2_norm=None):
""" Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivelant to the overlap
maximized over time and phase.
Parameters
----------
vec1 : TimeSeries or FrequencySeries
The input vector containing a waveform.
vec2 : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
v1_norm : {None, float}, optional
The normalization of the first waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
v2_norm : {None, float}, optional
The normalization of the second waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
"""
htilde = make_frequency_series(vec1)
stilde = make_frequency_series(vec2)
N = (len(htilde)-1) * 2
global _snr
if _snr is None or _snr.dtype != htilde.dtype or len(_snr) != N:
_snr = zeros(N,dtype=complex_same_precision_as(vec1))
snr, _, snr_norm = matched_filter_core(htilde,stilde,psd,low_frequency_cutoff,
high_frequency_cutoff, v1_norm, out=_snr)
maxsnr, max_id = snr.abs_max_loc()
if v2_norm is None:
v2_norm = sigmasq(stilde, psd, low_frequency_cutoff, high_frequency_cutoff)
return maxsnr * snr_norm / sqrt(v2_norm), max_id | [
"def",
"match",
"(",
"vec1",
",",
"vec2",
",",
"psd",
"=",
"None",
",",
"low_frequency_cutoff",
"=",
"None",
",",
"high_frequency_cutoff",
"=",
"None",
",",
"v1_norm",
"=",
"None",
",",
"v2_norm",
"=",
"None",
")",
":",
"htilde",
"=",
"make_frequency_series",
"(",
"vec1",
")",
"stilde",
"=",
"make_frequency_series",
"(",
"vec2",
")",
"N",
"=",
"(",
"len",
"(",
"htilde",
")",
"-",
"1",
")",
"*",
"2",
"global",
"_snr",
"if",
"_snr",
"is",
"None",
"or",
"_snr",
".",
"dtype",
"!=",
"htilde",
".",
"dtype",
"or",
"len",
"(",
"_snr",
")",
"!=",
"N",
":",
"_snr",
"=",
"zeros",
"(",
"N",
",",
"dtype",
"=",
"complex_same_precision_as",
"(",
"vec1",
")",
")",
"snr",
",",
"_",
",",
"snr_norm",
"=",
"matched_filter_core",
"(",
"htilde",
",",
"stilde",
",",
"psd",
",",
"low_frequency_cutoff",
",",
"high_frequency_cutoff",
",",
"v1_norm",
",",
"out",
"=",
"_snr",
")",
"maxsnr",
",",
"max_id",
"=",
"snr",
".",
"abs_max_loc",
"(",
")",
"if",
"v2_norm",
"is",
"None",
":",
"v2_norm",
"=",
"sigmasq",
"(",
"stilde",
",",
"psd",
",",
"low_frequency_cutoff",
",",
"high_frequency_cutoff",
")",
"return",
"maxsnr",
"*",
"snr_norm",
"/",
"sqrt",
"(",
"v2_norm",
")",
",",
"max_id"
] | Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivelant to the overlap
maximized over time and phase.
Parameters
----------
vec1 : TimeSeries or FrequencySeries
The input vector containing a waveform.
vec2 : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
v1_norm : {None, float}, optional
The normalization of the first waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
v2_norm : {None, float}, optional
The normalization of the second waveform. This is equivalent to its
sigmasq value. If None, it is internally calculated.
Returns
-------
match: float
index: int
The number of samples to shift to get the match. | [
"Return",
"the",
"match",
"between",
"the",
"two",
"TimeSeries",
"or",
"FrequencySeries",
"."
] | python | train |
skymill/automated-ebs-snapshots | automated_ebs_snapshots/volume_manager.py | https://github.com/skymill/automated-ebs-snapshots/blob/9595bc49d458f6ffb93430722757d2284e878fab/automated_ebs_snapshots/volume_manager.py#L210-L222 | def unwatch_from_file(connection, file_name):
""" Start watching a new volume
:type connection: boto.ec2.connection.EC2Connection
:param connection: EC2 connection object
:type file_name: str
:param file_name: path to config file
:returns: None
"""
with open(file_name, 'r') as filehandle:
for line in filehandle.xreadlines():
volume, interval, retention = line.rstrip().split(',')
unwatch(connection, get_volume_id(connection, volume)) | [
"def",
"unwatch_from_file",
"(",
"connection",
",",
"file_name",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"'r'",
")",
"as",
"filehandle",
":",
"for",
"line",
"in",
"filehandle",
".",
"xreadlines",
"(",
")",
":",
"volume",
",",
"interval",
",",
"retention",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"','",
")",
"unwatch",
"(",
"connection",
",",
"get_volume_id",
"(",
"connection",
",",
"volume",
")",
")"
] | Start watching a new volume
:type connection: boto.ec2.connection.EC2Connection
:param connection: EC2 connection object
:type file_name: str
:param file_name: path to config file
:returns: None | [
"Start",
"watching",
"a",
"new",
"volume"
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py#L1744-L1765 | def get_account_certificate(self, account_id, cert_id, **kwargs): # noqa: E501
"""Get trusted certificate by ID. # noqa: E501
An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be retrieved. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_certificate_with_http_info(account_id, cert_id, **kwargs) # noqa: E501
return data | [
"def",
"get_account_certificate",
"(",
"self",
",",
"account_id",
",",
"cert_id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"get_account_certificate_with_http_info",
"(",
"account_id",
",",
"cert_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_account_certificate_with_http_info",
"(",
"account_id",
",",
"cert_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Get trusted certificate by ID. # noqa: E501
An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_account_certificate(account_id, cert_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str cert_id: The ID of the trusted certificate to be retrieved. (required)
:return: TrustedCertificateInternalResp
If the method is called asynchronously,
returns the request thread. | [
"Get",
"trusted",
"certificate",
"by",
"ID",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
saltstack/salt | salt/modules/vboxmanage.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vboxmanage.py#L404-L494 | def clonemedium(medium,
uuid_in=None,
file_in=None,
uuid_out=None,
file_out=None,
mformat=None,
variant=None,
existing=False,
**kwargs):
'''
Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonemedium <name> <new_name>
'''
params = ''
valid_mediums = ('disk', 'dvd', 'floppy')
if medium in valid_mediums:
params += medium
else:
raise CommandExecutionError(
'Medium must be one of: {0}.'.format(', '.join(valid_mediums))
)
if (uuid_in and file_in) or (not uuid_in and not file_in):
raise CommandExecutionError(
'Either uuid_in or file_in must be used, but not both.'
)
if uuid_in:
if medium == 'disk':
item = 'hdds'
elif medium == 'dvd':
item = 'dvds'
elif medium == 'floppy':
item = 'floppies'
items = list_items(item)
if uuid_in not in items:
raise CommandExecutionError('UUID {0} was not found'.format(uuid_in))
params += ' ' + uuid_in
elif file_in:
if not os.path.exists(file_in):
raise CommandExecutionError('File {0} was not found'.format(file_in))
params += ' ' + file_in
if (uuid_out and file_out) or (not uuid_out and not file_out):
raise CommandExecutionError(
'Either uuid_out or file_out must be used, but not both.'
)
if uuid_out:
params += ' ' + uuid_out
elif file_out:
try:
salt.utils.files.fopen(file_out, 'w').close() # pylint: disable=resource-leakage
os.unlink(file_out)
params += ' ' + file_out
except OSError:
raise CommandExecutionError('{0} is not a valid filename'.format(file_out))
if mformat:
valid_mformat = ('VDI', 'VMDK', 'VHD', 'RAW')
if mformat not in valid_mformat:
raise CommandExecutionError(
'If specified, mformat must be one of: {0}'.format(', '.join(valid_mformat))
)
else:
params += ' --format ' + mformat
valid_variant = ('Standard', 'Fixed', 'Split2G', 'Stream', 'ESX')
if variant and variant not in valid_variant:
if not os.path.exists(file_in):
raise CommandExecutionError(
'If specified, variant must be one of: {0}'.format(', '.join(valid_variant))
)
else:
params += ' --variant ' + variant
if existing:
params += ' --existing'
cmd = '{0} clonemedium {1}'.format(vboxcmd(), params)
ret = salt.modules.cmdmod.run_all(cmd)
if ret['retcode'] == 0:
return True
return ret['stderr'] | [
"def",
"clonemedium",
"(",
"medium",
",",
"uuid_in",
"=",
"None",
",",
"file_in",
"=",
"None",
",",
"uuid_out",
"=",
"None",
",",
"file_out",
"=",
"None",
",",
"mformat",
"=",
"None",
",",
"variant",
"=",
"None",
",",
"existing",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"''",
"valid_mediums",
"=",
"(",
"'disk'",
",",
"'dvd'",
",",
"'floppy'",
")",
"if",
"medium",
"in",
"valid_mediums",
":",
"params",
"+=",
"medium",
"else",
":",
"raise",
"CommandExecutionError",
"(",
"'Medium must be one of: {0}.'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"valid_mediums",
")",
")",
")",
"if",
"(",
"uuid_in",
"and",
"file_in",
")",
"or",
"(",
"not",
"uuid_in",
"and",
"not",
"file_in",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Either uuid_in or file_in must be used, but not both.'",
")",
"if",
"uuid_in",
":",
"if",
"medium",
"==",
"'disk'",
":",
"item",
"=",
"'hdds'",
"elif",
"medium",
"==",
"'dvd'",
":",
"item",
"=",
"'dvds'",
"elif",
"medium",
"==",
"'floppy'",
":",
"item",
"=",
"'floppies'",
"items",
"=",
"list_items",
"(",
"item",
")",
"if",
"uuid_in",
"not",
"in",
"items",
":",
"raise",
"CommandExecutionError",
"(",
"'UUID {0} was not found'",
".",
"format",
"(",
"uuid_in",
")",
")",
"params",
"+=",
"' '",
"+",
"uuid_in",
"elif",
"file_in",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_in",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'File {0} was not found'",
".",
"format",
"(",
"file_in",
")",
")",
"params",
"+=",
"' '",
"+",
"file_in",
"if",
"(",
"uuid_out",
"and",
"file_out",
")",
"or",
"(",
"not",
"uuid_out",
"and",
"not",
"file_out",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Either uuid_out or file_out must be used, but not both.'",
")",
"if",
"uuid_out",
":",
"params",
"+=",
"' '",
"+",
"uuid_out",
"elif",
"file_out",
":",
"try",
":",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"file_out",
",",
"'w'",
")",
".",
"close",
"(",
")",
"# pylint: disable=resource-leakage",
"os",
".",
"unlink",
"(",
"file_out",
")",
"params",
"+=",
"' '",
"+",
"file_out",
"except",
"OSError",
":",
"raise",
"CommandExecutionError",
"(",
"'{0} is not a valid filename'",
".",
"format",
"(",
"file_out",
")",
")",
"if",
"mformat",
":",
"valid_mformat",
"=",
"(",
"'VDI'",
",",
"'VMDK'",
",",
"'VHD'",
",",
"'RAW'",
")",
"if",
"mformat",
"not",
"in",
"valid_mformat",
":",
"raise",
"CommandExecutionError",
"(",
"'If specified, mformat must be one of: {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"valid_mformat",
")",
")",
")",
"else",
":",
"params",
"+=",
"' --format '",
"+",
"mformat",
"valid_variant",
"=",
"(",
"'Standard'",
",",
"'Fixed'",
",",
"'Split2G'",
",",
"'Stream'",
",",
"'ESX'",
")",
"if",
"variant",
"and",
"variant",
"not",
"in",
"valid_variant",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_in",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'If specified, variant must be one of: {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"valid_variant",
")",
")",
")",
"else",
":",
"params",
"+=",
"' --variant '",
"+",
"variant",
"if",
"existing",
":",
"params",
"+=",
"' --existing'",
"cmd",
"=",
"'{0} clonemedium {1}'",
".",
"format",
"(",
"vboxcmd",
"(",
")",
",",
"params",
")",
"ret",
"=",
"salt",
".",
"modules",
".",
"cmdmod",
".",
"run_all",
"(",
"cmd",
")",
"if",
"ret",
"[",
"'retcode'",
"]",
"==",
"0",
":",
"return",
"True",
"return",
"ret",
"[",
"'stderr'",
"]"
] | Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonemedium <name> <new_name> | [
"Clone",
"a",
"new",
"VM",
"from",
"an",
"existing",
"VM"
] | python | train |
RedFantom/ttkwidgets | ttkwidgets/itemscanvas.py | https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/itemscanvas.py#L220-L243 | def cget(self, key):
"""
Query widget option.
:param key: option name
:type key: str
:return: value of the option
To get the list of options for this widget, call the method :meth:`~ItemsCanvas.keys`.
"""
if key is "canvaswidth":
return self._canvaswidth
elif key is "canvasheight":
return self._canvasheight
elif key is "function_new":
return self._function_new
elif key is "callback_add":
return self._callback_add
elif key is "callback_del":
return self._callback_del
elif key is "callback_move":
return self._callback_move
else:
ttk.Frame.cget(self, key) | [
"def",
"cget",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"is",
"\"canvaswidth\"",
":",
"return",
"self",
".",
"_canvaswidth",
"elif",
"key",
"is",
"\"canvasheight\"",
":",
"return",
"self",
".",
"_canvasheight",
"elif",
"key",
"is",
"\"function_new\"",
":",
"return",
"self",
".",
"_function_new",
"elif",
"key",
"is",
"\"callback_add\"",
":",
"return",
"self",
".",
"_callback_add",
"elif",
"key",
"is",
"\"callback_del\"",
":",
"return",
"self",
".",
"_callback_del",
"elif",
"key",
"is",
"\"callback_move\"",
":",
"return",
"self",
".",
"_callback_move",
"else",
":",
"ttk",
".",
"Frame",
".",
"cget",
"(",
"self",
",",
"key",
")"
] | Query widget option.
:param key: option name
:type key: str
:return: value of the option
To get the list of options for this widget, call the method :meth:`~ItemsCanvas.keys`. | [
"Query",
"widget",
"option",
"."
] | python | train |
ga4gh/ga4gh-client | ga4gh/client/client.py | https://github.com/ga4gh/ga4gh-client/blob/d23b00b89112ef0930d45ee75aa3c6de3db615c5/ga4gh/client/client.py#L814-L826 | def search_rna_quantifications(self, rna_quantification_set_id=""):
"""
Returns an iterator over the RnaQuantification objects from the server
:param str rna_quantification_set_id: The ID of the
:class:`ga4gh.protocol.RnaQuantificationSet` of interest.
"""
request = protocol.SearchRnaQuantificationsRequest()
request.rna_quantification_set_id = rna_quantification_set_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "rnaquantifications",
protocol.SearchRnaQuantificationsResponse) | [
"def",
"search_rna_quantifications",
"(",
"self",
",",
"rna_quantification_set_id",
"=",
"\"\"",
")",
":",
"request",
"=",
"protocol",
".",
"SearchRnaQuantificationsRequest",
"(",
")",
"request",
".",
"rna_quantification_set_id",
"=",
"rna_quantification_set_id",
"request",
".",
"page_size",
"=",
"pb",
".",
"int",
"(",
"self",
".",
"_page_size",
")",
"return",
"self",
".",
"_run_search_request",
"(",
"request",
",",
"\"rnaquantifications\"",
",",
"protocol",
".",
"SearchRnaQuantificationsResponse",
")"
] | Returns an iterator over the RnaQuantification objects from the server
:param str rna_quantification_set_id: The ID of the
:class:`ga4gh.protocol.RnaQuantificationSet` of interest. | [
"Returns",
"an",
"iterator",
"over",
"the",
"RnaQuantification",
"objects",
"from",
"the",
"server"
] | python | train |
Diviyan-Kalainathan/CausalDiscoveryToolbox | cdt/utils/graph.py | https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/utils/graph.py#L176-L213 | def aracne(m, **kwargs):
"""Implementation of the ARACNE algorithm.
Args:
mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes
it is a relevance matrix where mat(i,j) represents the similarity content
between nodes i and j. Elements of matrix should be
non-negative.
Returns:
mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components
represent direct edge weights of observed interactions.
.. note::
Ref: ARACNE: An Algorithm for the Reconstruction of Gene Regulatory Networks in a Mammalian Cellular Context
Adam A Margolin, Ilya Nemenman, Katia Basso, Chris Wiggins, Gustavo Stolovitzky, Riccardo Dalla Favera and Andrea Califano
DOI: https://doi.org/10.1186/1471-2105-7-S1-S7
"""
I0 = kwargs.get('I0', 0.0) # No default thresholding
W0 = kwargs.get('W0', 0.05)
# thresholding
m = np.where(m > I0, m, 0)
# Finding triplets and filtering them
for i in range(m.shape[0]-2):
for j in range(i+1, m.shape[0]-1):
for k in range(j+1, m.shape[0]):
triplet = [m[i, j], m[j, k], m[i, k]]
min_index, min_value = min(enumerate(triplet), key=operator.itemgetter(1))
if 0 < min_value < W0:
if min_index == 0:
m[i, j] = m[j, i] = 0.
elif min_index == 1:
m[j, k] = m[k, j] = 0.
else:
m[i, k] = m[k, i] = 0.
return m | [
"def",
"aracne",
"(",
"m",
",",
"*",
"*",
"kwargs",
")",
":",
"I0",
"=",
"kwargs",
".",
"get",
"(",
"'I0'",
",",
"0.0",
")",
"# No default thresholding",
"W0",
"=",
"kwargs",
".",
"get",
"(",
"'W0'",
",",
"0.05",
")",
"# thresholding",
"m",
"=",
"np",
".",
"where",
"(",
"m",
">",
"I0",
",",
"m",
",",
"0",
")",
"# Finding triplets and filtering them",
"for",
"i",
"in",
"range",
"(",
"m",
".",
"shape",
"[",
"0",
"]",
"-",
"2",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"m",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
":",
"for",
"k",
"in",
"range",
"(",
"j",
"+",
"1",
",",
"m",
".",
"shape",
"[",
"0",
"]",
")",
":",
"triplet",
"=",
"[",
"m",
"[",
"i",
",",
"j",
"]",
",",
"m",
"[",
"j",
",",
"k",
"]",
",",
"m",
"[",
"i",
",",
"k",
"]",
"]",
"min_index",
",",
"min_value",
"=",
"min",
"(",
"enumerate",
"(",
"triplet",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"1",
")",
")",
"if",
"0",
"<",
"min_value",
"<",
"W0",
":",
"if",
"min_index",
"==",
"0",
":",
"m",
"[",
"i",
",",
"j",
"]",
"=",
"m",
"[",
"j",
",",
"i",
"]",
"=",
"0.",
"elif",
"min_index",
"==",
"1",
":",
"m",
"[",
"j",
",",
"k",
"]",
"=",
"m",
"[",
"k",
",",
"j",
"]",
"=",
"0.",
"else",
":",
"m",
"[",
"i",
",",
"k",
"]",
"=",
"m",
"[",
"k",
",",
"i",
"]",
"=",
"0.",
"return",
"m"
] | Implementation of the ARACNE algorithm.
Args:
mat (numpy.ndarray): matrix, if it is a square matrix, the program assumes
it is a relevance matrix where mat(i,j) represents the similarity content
between nodes i and j. Elements of matrix should be
non-negative.
Returns:
mat_nd (numpy.ndarray): Output deconvolved matrix (direct dependency matrix). Its components
represent direct edge weights of observed interactions.
.. note::
Ref: ARACNE: An Algorithm for the Reconstruction of Gene Regulatory Networks in a Mammalian Cellular Context
Adam A Margolin, Ilya Nemenman, Katia Basso, Chris Wiggins, Gustavo Stolovitzky, Riccardo Dalla Favera and Andrea Califano
DOI: https://doi.org/10.1186/1471-2105-7-S1-S7 | [
"Implementation",
"of",
"the",
"ARACNE",
"algorithm",
"."
] | python | valid |
boriel/zxbasic | zxb.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxb.py#L74-L351 | def main(args=None):
""" Entry point when executed from command line.
You can use zxb.py as a module with import, and this
function won't be executed.
"""
api.config.init()
zxbpp.init()
zxbparser.init()
arch.zx48k.backend.init()
arch.zx48k.Translator.reset()
asmparse.init()
# ------------------------------------------------------------
# Command line parsing
# ------------------------------------------------------------
parser = argparse.ArgumentParser(prog='zxb')
parser.add_argument('PROGRAM', type=str,
help='BASIC program file')
parser.add_argument('-d', '--debug', dest='debug', default=OPTIONS.Debug.value, action='count',
help='Enable verbosity/debugging output. Additional -d increase verbosity/debug level')
parser.add_argument('-O', '--optimize', type=int, default=OPTIONS.optimization.value,
help='Sets optimization level. '
'0 = None (default level is {0})'.format(OPTIONS.optimization.value))
parser.add_argument('-o', '--output', type=str, dest='output_file', default=None,
help='Sets output file. Default is input filename with .bin extension')
parser.add_argument('-T', '--tzx', action='store_true',
help="Sets output format to tzx (default is .bin)")
parser.add_argument('-t', '--tap', action='store_true',
help="Sets output format to tap (default is .bin)")
parser.add_argument('-B', '--BASIC', action='store_true', dest='basic',
help="Creates a BASIC loader which loads the rest of the CODE. Requires -T ot -t")
parser.add_argument('-a', '--autorun', action='store_true',
help="Sets the program to be run once loaded")
parser.add_argument('-A', '--asm', action='store_true',
help="Sets output format to asm")
parser.add_argument('-S', '--org', type=str, default=str(OPTIONS.org.value),
help="Start of machine code. By default %i" % OPTIONS.org.value)
parser.add_argument('-e', '--errmsg', type=str, dest='stderr', default=OPTIONS.StdErrFileName.value,
help='Error messages file (standard error console by default)')
parser.add_argument('--array-base', type=int, default=OPTIONS.array_base.value,
help='Default lower index for arrays ({0} by default)'.format(OPTIONS.array_base.value))
parser.add_argument('--string-base', type=int, default=OPTIONS.string_base.value,
help='Default lower index for strings ({0} by default)'.format(OPTIONS.array_base.value))
parser.add_argument('-Z', '--sinclair', action='store_true',
help='Enable by default some more original ZX Spectrum Sinclair BASIC features: ATTR, SCREEN$, '
'POINT')
parser.add_argument('-H', '--heap-size', type=int, default=OPTIONS.heap_size.value,
help='Sets heap size in bytes (default {0} bytes)'.format(OPTIONS.heap_size.value))
parser.add_argument('--debug-memory', action='store_true',
help='Enables out-of-memory debug')
parser.add_argument('--debug-array', action='store_true',
help='Enables array boundary checking')
parser.add_argument('--strict-bool', action='store_true',
help='Enforce boolean values to be 0 or 1')
parser.add_argument('--enable-break', action='store_true',
help='Enables program execution BREAK detection')
parser.add_argument('-E', '--emit-backend', action='store_true',
help='Emits backend code instead of ASM or binary')
parser.add_argument('--explicit', action='store_true',
help='Requires all variables and functions to be declared before used')
parser.add_argument('-D', '--define', type=str, dest='defines', action='append',
help='Defines de given macro. Eg. -D MYDEBUG or -D NAME=Value')
parser.add_argument('-M', '--mmap', type=str, dest='memory_map', default=None,
help='Generate label memory map')
parser.add_argument('-i', '--ignore-case', action='store_true',
help='Ignore case. Makes variable names are case insensitive')
parser.add_argument('-I', '--include-path', type=str, default='',
help='Add colon separated list of directories to add to include path. e.g. -I dir1:dir2')
parser.add_argument('--strict', action='store_true',
help='Enables strict mode. Force explicit type declaration')
parser.add_argument('--headerless', action='store_true',
help='Header-less mode: omit asm prologue and epilogue')
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(VERSION))
parser.add_argument('--parse-only', action='store_true',
help='Only parses to check for syntax and semantic errors')
parser.add_argument('--append-binary', default=[], action='append',
help='Appends binary to tape file (only works with -t or -T)')
parser.add_argument('--append-headless-binary', default=[], action='append',
help='Appends binary to tape file (only works with -t or -T)')
options = parser.parse_args(args=args)
# ------------------------------------------------------------
# Setting of internal parameters according to command line
# ------------------------------------------------------------
OPTIONS.Debug.value = options.debug
OPTIONS.optimization.value = options.optimize
OPTIONS.outputFileName.value = options.output_file
OPTIONS.StdErrFileName.value = options.stderr
OPTIONS.array_base.value = options.array_base
OPTIONS.string_base.value = options.string_base
OPTIONS.Sinclair.value = options.sinclair
OPTIONS.heap_size.value = options.heap_size
OPTIONS.memoryCheck.value = options.debug_memory
OPTIONS.strictBool.value = options.strict_bool or OPTIONS.Sinclair.value
OPTIONS.arrayCheck.value = options.debug_array
OPTIONS.emitBackend.value = options.emit_backend
OPTIONS.enableBreak.value = options.enable_break
OPTIONS.explicit.value = options.explicit
OPTIONS.memory_map.value = options.memory_map
OPTIONS.strict.value = options.strict
OPTIONS.headerless.value = options.headerless
OPTIONS.org.value = api.utils.parse_int(options.org)
if OPTIONS.org.value is None:
parser.error("Invalid --org option '{}'".format(options.org))
if options.defines:
for i in options.defines:
name, val = tuple(i.split('=', 1))
OPTIONS.__DEFINES.value[name] = val
zxbpp.ID_TABLE.define(name, lineno=0)
if OPTIONS.Sinclair.value:
OPTIONS.array_base.value = 1
OPTIONS.string_base.value = 1
OPTIONS.strictBool.value = True
OPTIONS.case_insensitive.value = True
if options.ignore_case:
OPTIONS.case_insensitive.value = True
debug.ENABLED = OPTIONS.Debug.value
if int(options.tzx) + int(options.tap) + int(options.asm) + int(options.emit_backend) + \
int(options.parse_only) > 1:
parser.error("Options --tap, --tzx, --emit-backend, --parse-only and --asm are mutually exclusive")
return 3
if options.basic and not options.tzx and not options.tap:
parser.error('Option --BASIC and --autorun requires --tzx or tap format')
return 4
if options.append_binary and not options.tzx and not options.tap:
parser.error('Option --append-binary needs either --tap or --tzx')
return 5
OPTIONS.use_loader.value = options.basic
OPTIONS.autorun.value = options.autorun
if options.tzx:
OPTIONS.output_file_type.value = 'tzx'
elif options.tap:
OPTIONS.output_file_type.value = 'tap'
elif options.asm:
OPTIONS.output_file_type.value = 'asm'
elif options.emit_backend:
OPTIONS.output_file_type.value = 'ic'
args = [options.PROGRAM]
if not os.path.exists(options.PROGRAM):
parser.error("No such file or directory: '%s'" % args[0])
return 2
if OPTIONS.memoryCheck.value:
OPTIONS.__DEFINES.value['__MEMORY_CHECK__'] = ''
zxbpp.ID_TABLE.define('__MEMORY_CHECK__', lineno=0)
if OPTIONS.arrayCheck.value:
OPTIONS.__DEFINES.value['__CHECK_ARRAY_BOUNDARY__'] = ''
zxbpp.ID_TABLE.define('__CHECK_ARRAY_BOUNDARY__', lineno=0)
OPTIONS.include_path.value = options.include_path
OPTIONS.inputFileName.value = zxbparser.FILENAME = \
os.path.basename(args[0])
if not OPTIONS.outputFileName.value:
OPTIONS.outputFileName.value = \
os.path.splitext(os.path.basename(OPTIONS.inputFileName.value))[0] + os.path.extsep + \
OPTIONS.output_file_type.value
if OPTIONS.StdErrFileName.value:
OPTIONS.stderr.value = open_file(OPTIONS.StdErrFileName.value, 'wt', 'utf-8')
zxbpp.setMode('basic')
zxbpp.main(args)
if gl.has_errors:
debug.__DEBUG__("exiting due to errors.")
return 1 # Exit with errors
input_ = zxbpp.OUTPUT
zxbparser.parser.parse(input_, lexer=zxblex.lexer, tracking=True,
debug=(OPTIONS.Debug.value > 2))
if gl.has_errors:
debug.__DEBUG__("exiting due to errors.")
return 1 # Exit with errors
# Optimizations
optimizer = api.optimize.OptimizerVisitor()
optimizer.visit(zxbparser.ast)
# Emits intermediate code
translator = arch.zx48k.Translator()
translator.visit(zxbparser.ast)
if gl.DATA_IS_USED:
gl.FUNCTIONS.extend(gl.DATA_FUNCTIONS)
# This will fill MEMORY with pending functions
func_visitor = arch.zx48k.FunctionTranslator(gl.FUNCTIONS)
func_visitor.start()
# Emits data lines
translator.emit_data_blocks()
# Emits default constant strings
translator.emit_strings()
# Emits jump tables
translator.emit_jump_tables()
if OPTIONS.emitBackend.value:
with open_file(OPTIONS.outputFileName.value, 'wt', 'utf-8') as output_file:
for quad in translator.dumpMemory(backend.MEMORY):
output_file.write(str(quad) + '\n')
backend.MEMORY[:] = [] # Empties memory
# This will fill MEMORY with global declared variables
translator = arch.zx48k.VarTranslator()
translator.visit(zxbparser.data_ast)
for quad in translator.dumpMemory(backend.MEMORY):
output_file.write(str(quad) + '\n')
return 0 # Exit success
# Join all lines into a single string and ensures an INTRO at end of file
asm_output = backend.emit(backend.MEMORY)
asm_output = optimize(asm_output) + '\n'
asm_output = asm_output.split('\n')
for i in range(len(asm_output)):
tmp = backend.ASMS.get(asm_output[i], None)
if tmp is not None:
asm_output[i] = '\n'.join(tmp)
asm_output = '\n'.join(asm_output)
# Now filter them against the preprocessor again
zxbpp.setMode('asm')
zxbpp.OUTPUT = ''
zxbpp.filter_(asm_output, args[0])
# Now output the result
asm_output = zxbpp.OUTPUT.split('\n')
get_inits(asm_output) # Find out remaining inits
backend.MEMORY[:] = []
# This will fill MEMORY with global declared variables
translator = arch.zx48k.VarTranslator()
translator.visit(zxbparser.data_ast)
if gl.has_errors:
debug.__DEBUG__("exiting due to errors.")
return 1 # Exit with errors
tmp = [x for x in backend.emit(backend.MEMORY) if x.strip()[0] != '#']
asm_output += tmp
asm_output = backend.emit_start() + asm_output
asm_output += backend.emit_end(asm_output)
if options.asm: # Only output assembler file
with open_file(OPTIONS.outputFileName.value, 'wt', 'utf-8') as output_file:
output(asm_output, output_file)
elif not options.parse_only:
fout = StringIO()
output(asm_output, fout)
asmparse.assemble(fout.getvalue())
fout.close()
asmparse.generate_binary(OPTIONS.outputFileName.value, OPTIONS.output_file_type.value,
binary_files=options.append_binary,
headless_binary_files=options.append_headless_binary)
if gl.has_errors:
return 5 # Error in assembly
if OPTIONS.memory_map.value:
with open_file(OPTIONS.memory_map.value, 'wt', 'utf-8') as f:
f.write(asmparse.MEMORY.memory_map)
return gl.has_errors | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"api",
".",
"config",
".",
"init",
"(",
")",
"zxbpp",
".",
"init",
"(",
")",
"zxbparser",
".",
"init",
"(",
")",
"arch",
".",
"zx48k",
".",
"backend",
".",
"init",
"(",
")",
"arch",
".",
"zx48k",
".",
"Translator",
".",
"reset",
"(",
")",
"asmparse",
".",
"init",
"(",
")",
"# ------------------------------------------------------------",
"# Command line parsing",
"# ------------------------------------------------------------",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'zxb'",
")",
"parser",
".",
"add_argument",
"(",
"'PROGRAM'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'BASIC program file'",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"dest",
"=",
"'debug'",
",",
"default",
"=",
"OPTIONS",
".",
"Debug",
".",
"value",
",",
"action",
"=",
"'count'",
",",
"help",
"=",
"'Enable verbosity/debugging output. Additional -d increase verbosity/debug level'",
")",
"parser",
".",
"add_argument",
"(",
"'-O'",
",",
"'--optimize'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"OPTIONS",
".",
"optimization",
".",
"value",
",",
"help",
"=",
"'Sets optimization level. '",
"'0 = None (default level is {0})'",
".",
"format",
"(",
"OPTIONS",
".",
"optimization",
".",
"value",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output'",
",",
"type",
"=",
"str",
",",
"dest",
"=",
"'output_file'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Sets output file. Default is input filename with .bin extension'",
")",
"parser",
".",
"add_argument",
"(",
"'-T'",
",",
"'--tzx'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Sets output format to tzx (default is .bin)\"",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--tap'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Sets output format to tap (default is .bin)\"",
")",
"parser",
".",
"add_argument",
"(",
"'-B'",
",",
"'--BASIC'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'basic'",
",",
"help",
"=",
"\"Creates a BASIC loader which loads the rest of the CODE. Requires -T ot -t\"",
")",
"parser",
".",
"add_argument",
"(",
"'-a'",
",",
"'--autorun'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Sets the program to be run once loaded\"",
")",
"parser",
".",
"add_argument",
"(",
"'-A'",
",",
"'--asm'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Sets output format to asm\"",
")",
"parser",
".",
"add_argument",
"(",
"'-S'",
",",
"'--org'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"str",
"(",
"OPTIONS",
".",
"org",
".",
"value",
")",
",",
"help",
"=",
"\"Start of machine code. By default %i\"",
"%",
"OPTIONS",
".",
"org",
".",
"value",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--errmsg'",
",",
"type",
"=",
"str",
",",
"dest",
"=",
"'stderr'",
",",
"default",
"=",
"OPTIONS",
".",
"StdErrFileName",
".",
"value",
",",
"help",
"=",
"'Error messages file (standard error console by default)'",
")",
"parser",
".",
"add_argument",
"(",
"'--array-base'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"OPTIONS",
".",
"array_base",
".",
"value",
",",
"help",
"=",
"'Default lower index for arrays ({0} by default)'",
".",
"format",
"(",
"OPTIONS",
".",
"array_base",
".",
"value",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--string-base'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"OPTIONS",
".",
"string_base",
".",
"value",
",",
"help",
"=",
"'Default lower index for strings ({0} by default)'",
".",
"format",
"(",
"OPTIONS",
".",
"array_base",
".",
"value",
")",
")",
"parser",
".",
"add_argument",
"(",
"'-Z'",
",",
"'--sinclair'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Enable by default some more original ZX Spectrum Sinclair BASIC features: ATTR, SCREEN$, '",
"'POINT'",
")",
"parser",
".",
"add_argument",
"(",
"'-H'",
",",
"'--heap-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"OPTIONS",
".",
"heap_size",
".",
"value",
",",
"help",
"=",
"'Sets heap size in bytes (default {0} bytes)'",
".",
"format",
"(",
"OPTIONS",
".",
"heap_size",
".",
"value",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--debug-memory'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Enables out-of-memory debug'",
")",
"parser",
".",
"add_argument",
"(",
"'--debug-array'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Enables array boundary checking'",
")",
"parser",
".",
"add_argument",
"(",
"'--strict-bool'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Enforce boolean values to be 0 or 1'",
")",
"parser",
".",
"add_argument",
"(",
"'--enable-break'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Enables program execution BREAK detection'",
")",
"parser",
".",
"add_argument",
"(",
"'-E'",
",",
"'--emit-backend'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Emits backend code instead of ASM or binary'",
")",
"parser",
".",
"add_argument",
"(",
"'--explicit'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Requires all variables and functions to be declared before used'",
")",
"parser",
".",
"add_argument",
"(",
"'-D'",
",",
"'--define'",
",",
"type",
"=",
"str",
",",
"dest",
"=",
"'defines'",
",",
"action",
"=",
"'append'",
",",
"help",
"=",
"'Defines de given macro. Eg. -D MYDEBUG or -D NAME=Value'",
")",
"parser",
".",
"add_argument",
"(",
"'-M'",
",",
"'--mmap'",
",",
"type",
"=",
"str",
",",
"dest",
"=",
"'memory_map'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Generate label memory map'",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--ignore-case'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Ignore case. Makes variable names are case insensitive'",
")",
"parser",
".",
"add_argument",
"(",
"'-I'",
",",
"'--include-path'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'Add colon separated list of directories to add to include path. e.g. -I dir1:dir2'",
")",
"parser",
".",
"add_argument",
"(",
"'--strict'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Enables strict mode. Force explicit type declaration'",
")",
"parser",
".",
"add_argument",
"(",
"'--headerless'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Header-less mode: omit asm prologue and epilogue'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'%(prog)s {0}'",
".",
"format",
"(",
"VERSION",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--parse-only'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Only parses to check for syntax and semantic errors'",
")",
"parser",
".",
"add_argument",
"(",
"'--append-binary'",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"'append'",
",",
"help",
"=",
"'Appends binary to tape file (only works with -t or -T)'",
")",
"parser",
".",
"add_argument",
"(",
"'--append-headless-binary'",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"'append'",
",",
"help",
"=",
"'Appends binary to tape file (only works with -t or -T)'",
")",
"options",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"args",
")",
"# ------------------------------------------------------------",
"# Setting of internal parameters according to command line",
"# ------------------------------------------------------------",
"OPTIONS",
".",
"Debug",
".",
"value",
"=",
"options",
".",
"debug",
"OPTIONS",
".",
"optimization",
".",
"value",
"=",
"options",
".",
"optimize",
"OPTIONS",
".",
"outputFileName",
".",
"value",
"=",
"options",
".",
"output_file",
"OPTIONS",
".",
"StdErrFileName",
".",
"value",
"=",
"options",
".",
"stderr",
"OPTIONS",
".",
"array_base",
".",
"value",
"=",
"options",
".",
"array_base",
"OPTIONS",
".",
"string_base",
".",
"value",
"=",
"options",
".",
"string_base",
"OPTIONS",
".",
"Sinclair",
".",
"value",
"=",
"options",
".",
"sinclair",
"OPTIONS",
".",
"heap_size",
".",
"value",
"=",
"options",
".",
"heap_size",
"OPTIONS",
".",
"memoryCheck",
".",
"value",
"=",
"options",
".",
"debug_memory",
"OPTIONS",
".",
"strictBool",
".",
"value",
"=",
"options",
".",
"strict_bool",
"or",
"OPTIONS",
".",
"Sinclair",
".",
"value",
"OPTIONS",
".",
"arrayCheck",
".",
"value",
"=",
"options",
".",
"debug_array",
"OPTIONS",
".",
"emitBackend",
".",
"value",
"=",
"options",
".",
"emit_backend",
"OPTIONS",
".",
"enableBreak",
".",
"value",
"=",
"options",
".",
"enable_break",
"OPTIONS",
".",
"explicit",
".",
"value",
"=",
"options",
".",
"explicit",
"OPTIONS",
".",
"memory_map",
".",
"value",
"=",
"options",
".",
"memory_map",
"OPTIONS",
".",
"strict",
".",
"value",
"=",
"options",
".",
"strict",
"OPTIONS",
".",
"headerless",
".",
"value",
"=",
"options",
".",
"headerless",
"OPTIONS",
".",
"org",
".",
"value",
"=",
"api",
".",
"utils",
".",
"parse_int",
"(",
"options",
".",
"org",
")",
"if",
"OPTIONS",
".",
"org",
".",
"value",
"is",
"None",
":",
"parser",
".",
"error",
"(",
"\"Invalid --org option '{}'\"",
".",
"format",
"(",
"options",
".",
"org",
")",
")",
"if",
"options",
".",
"defines",
":",
"for",
"i",
"in",
"options",
".",
"defines",
":",
"name",
",",
"val",
"=",
"tuple",
"(",
"i",
".",
"split",
"(",
"'='",
",",
"1",
")",
")",
"OPTIONS",
".",
"__DEFINES",
".",
"value",
"[",
"name",
"]",
"=",
"val",
"zxbpp",
".",
"ID_TABLE",
".",
"define",
"(",
"name",
",",
"lineno",
"=",
"0",
")",
"if",
"OPTIONS",
".",
"Sinclair",
".",
"value",
":",
"OPTIONS",
".",
"array_base",
".",
"value",
"=",
"1",
"OPTIONS",
".",
"string_base",
".",
"value",
"=",
"1",
"OPTIONS",
".",
"strictBool",
".",
"value",
"=",
"True",
"OPTIONS",
".",
"case_insensitive",
".",
"value",
"=",
"True",
"if",
"options",
".",
"ignore_case",
":",
"OPTIONS",
".",
"case_insensitive",
".",
"value",
"=",
"True",
"debug",
".",
"ENABLED",
"=",
"OPTIONS",
".",
"Debug",
".",
"value",
"if",
"int",
"(",
"options",
".",
"tzx",
")",
"+",
"int",
"(",
"options",
".",
"tap",
")",
"+",
"int",
"(",
"options",
".",
"asm",
")",
"+",
"int",
"(",
"options",
".",
"emit_backend",
")",
"+",
"int",
"(",
"options",
".",
"parse_only",
")",
">",
"1",
":",
"parser",
".",
"error",
"(",
"\"Options --tap, --tzx, --emit-backend, --parse-only and --asm are mutually exclusive\"",
")",
"return",
"3",
"if",
"options",
".",
"basic",
"and",
"not",
"options",
".",
"tzx",
"and",
"not",
"options",
".",
"tap",
":",
"parser",
".",
"error",
"(",
"'Option --BASIC and --autorun requires --tzx or tap format'",
")",
"return",
"4",
"if",
"options",
".",
"append_binary",
"and",
"not",
"options",
".",
"tzx",
"and",
"not",
"options",
".",
"tap",
":",
"parser",
".",
"error",
"(",
"'Option --append-binary needs either --tap or --tzx'",
")",
"return",
"5",
"OPTIONS",
".",
"use_loader",
".",
"value",
"=",
"options",
".",
"basic",
"OPTIONS",
".",
"autorun",
".",
"value",
"=",
"options",
".",
"autorun",
"if",
"options",
".",
"tzx",
":",
"OPTIONS",
".",
"output_file_type",
".",
"value",
"=",
"'tzx'",
"elif",
"options",
".",
"tap",
":",
"OPTIONS",
".",
"output_file_type",
".",
"value",
"=",
"'tap'",
"elif",
"options",
".",
"asm",
":",
"OPTIONS",
".",
"output_file_type",
".",
"value",
"=",
"'asm'",
"elif",
"options",
".",
"emit_backend",
":",
"OPTIONS",
".",
"output_file_type",
".",
"value",
"=",
"'ic'",
"args",
"=",
"[",
"options",
".",
"PROGRAM",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"PROGRAM",
")",
":",
"parser",
".",
"error",
"(",
"\"No such file or directory: '%s'\"",
"%",
"args",
"[",
"0",
"]",
")",
"return",
"2",
"if",
"OPTIONS",
".",
"memoryCheck",
".",
"value",
":",
"OPTIONS",
".",
"__DEFINES",
".",
"value",
"[",
"'__MEMORY_CHECK__'",
"]",
"=",
"''",
"zxbpp",
".",
"ID_TABLE",
".",
"define",
"(",
"'__MEMORY_CHECK__'",
",",
"lineno",
"=",
"0",
")",
"if",
"OPTIONS",
".",
"arrayCheck",
".",
"value",
":",
"OPTIONS",
".",
"__DEFINES",
".",
"value",
"[",
"'__CHECK_ARRAY_BOUNDARY__'",
"]",
"=",
"''",
"zxbpp",
".",
"ID_TABLE",
".",
"define",
"(",
"'__CHECK_ARRAY_BOUNDARY__'",
",",
"lineno",
"=",
"0",
")",
"OPTIONS",
".",
"include_path",
".",
"value",
"=",
"options",
".",
"include_path",
"OPTIONS",
".",
"inputFileName",
".",
"value",
"=",
"zxbparser",
".",
"FILENAME",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"args",
"[",
"0",
"]",
")",
"if",
"not",
"OPTIONS",
".",
"outputFileName",
".",
"value",
":",
"OPTIONS",
".",
"outputFileName",
".",
"value",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"OPTIONS",
".",
"inputFileName",
".",
"value",
")",
")",
"[",
"0",
"]",
"+",
"os",
".",
"path",
".",
"extsep",
"+",
"OPTIONS",
".",
"output_file_type",
".",
"value",
"if",
"OPTIONS",
".",
"StdErrFileName",
".",
"value",
":",
"OPTIONS",
".",
"stderr",
".",
"value",
"=",
"open_file",
"(",
"OPTIONS",
".",
"StdErrFileName",
".",
"value",
",",
"'wt'",
",",
"'utf-8'",
")",
"zxbpp",
".",
"setMode",
"(",
"'basic'",
")",
"zxbpp",
".",
"main",
"(",
"args",
")",
"if",
"gl",
".",
"has_errors",
":",
"debug",
".",
"__DEBUG__",
"(",
"\"exiting due to errors.\"",
")",
"return",
"1",
"# Exit with errors",
"input_",
"=",
"zxbpp",
".",
"OUTPUT",
"zxbparser",
".",
"parser",
".",
"parse",
"(",
"input_",
",",
"lexer",
"=",
"zxblex",
".",
"lexer",
",",
"tracking",
"=",
"True",
",",
"debug",
"=",
"(",
"OPTIONS",
".",
"Debug",
".",
"value",
">",
"2",
")",
")",
"if",
"gl",
".",
"has_errors",
":",
"debug",
".",
"__DEBUG__",
"(",
"\"exiting due to errors.\"",
")",
"return",
"1",
"# Exit with errors",
"# Optimizations",
"optimizer",
"=",
"api",
".",
"optimize",
".",
"OptimizerVisitor",
"(",
")",
"optimizer",
".",
"visit",
"(",
"zxbparser",
".",
"ast",
")",
"# Emits intermediate code",
"translator",
"=",
"arch",
".",
"zx48k",
".",
"Translator",
"(",
")",
"translator",
".",
"visit",
"(",
"zxbparser",
".",
"ast",
")",
"if",
"gl",
".",
"DATA_IS_USED",
":",
"gl",
".",
"FUNCTIONS",
".",
"extend",
"(",
"gl",
".",
"DATA_FUNCTIONS",
")",
"# This will fill MEMORY with pending functions",
"func_visitor",
"=",
"arch",
".",
"zx48k",
".",
"FunctionTranslator",
"(",
"gl",
".",
"FUNCTIONS",
")",
"func_visitor",
".",
"start",
"(",
")",
"# Emits data lines",
"translator",
".",
"emit_data_blocks",
"(",
")",
"# Emits default constant strings",
"translator",
".",
"emit_strings",
"(",
")",
"# Emits jump tables",
"translator",
".",
"emit_jump_tables",
"(",
")",
"if",
"OPTIONS",
".",
"emitBackend",
".",
"value",
":",
"with",
"open_file",
"(",
"OPTIONS",
".",
"outputFileName",
".",
"value",
",",
"'wt'",
",",
"'utf-8'",
")",
"as",
"output_file",
":",
"for",
"quad",
"in",
"translator",
".",
"dumpMemory",
"(",
"backend",
".",
"MEMORY",
")",
":",
"output_file",
".",
"write",
"(",
"str",
"(",
"quad",
")",
"+",
"'\\n'",
")",
"backend",
".",
"MEMORY",
"[",
":",
"]",
"=",
"[",
"]",
"# Empties memory",
"# This will fill MEMORY with global declared variables",
"translator",
"=",
"arch",
".",
"zx48k",
".",
"VarTranslator",
"(",
")",
"translator",
".",
"visit",
"(",
"zxbparser",
".",
"data_ast",
")",
"for",
"quad",
"in",
"translator",
".",
"dumpMemory",
"(",
"backend",
".",
"MEMORY",
")",
":",
"output_file",
".",
"write",
"(",
"str",
"(",
"quad",
")",
"+",
"'\\n'",
")",
"return",
"0",
"# Exit success",
"# Join all lines into a single string and ensures an INTRO at end of file",
"asm_output",
"=",
"backend",
".",
"emit",
"(",
"backend",
".",
"MEMORY",
")",
"asm_output",
"=",
"optimize",
"(",
"asm_output",
")",
"+",
"'\\n'",
"asm_output",
"=",
"asm_output",
".",
"split",
"(",
"'\\n'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"asm_output",
")",
")",
":",
"tmp",
"=",
"backend",
".",
"ASMS",
".",
"get",
"(",
"asm_output",
"[",
"i",
"]",
",",
"None",
")",
"if",
"tmp",
"is",
"not",
"None",
":",
"asm_output",
"[",
"i",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"tmp",
")",
"asm_output",
"=",
"'\\n'",
".",
"join",
"(",
"asm_output",
")",
"# Now filter them against the preprocessor again",
"zxbpp",
".",
"setMode",
"(",
"'asm'",
")",
"zxbpp",
".",
"OUTPUT",
"=",
"''",
"zxbpp",
".",
"filter_",
"(",
"asm_output",
",",
"args",
"[",
"0",
"]",
")",
"# Now output the result",
"asm_output",
"=",
"zxbpp",
".",
"OUTPUT",
".",
"split",
"(",
"'\\n'",
")",
"get_inits",
"(",
"asm_output",
")",
"# Find out remaining inits",
"backend",
".",
"MEMORY",
"[",
":",
"]",
"=",
"[",
"]",
"# This will fill MEMORY with global declared variables",
"translator",
"=",
"arch",
".",
"zx48k",
".",
"VarTranslator",
"(",
")",
"translator",
".",
"visit",
"(",
"zxbparser",
".",
"data_ast",
")",
"if",
"gl",
".",
"has_errors",
":",
"debug",
".",
"__DEBUG__",
"(",
"\"exiting due to errors.\"",
")",
"return",
"1",
"# Exit with errors",
"tmp",
"=",
"[",
"x",
"for",
"x",
"in",
"backend",
".",
"emit",
"(",
"backend",
".",
"MEMORY",
")",
"if",
"x",
".",
"strip",
"(",
")",
"[",
"0",
"]",
"!=",
"'#'",
"]",
"asm_output",
"+=",
"tmp",
"asm_output",
"=",
"backend",
".",
"emit_start",
"(",
")",
"+",
"asm_output",
"asm_output",
"+=",
"backend",
".",
"emit_end",
"(",
"asm_output",
")",
"if",
"options",
".",
"asm",
":",
"# Only output assembler file",
"with",
"open_file",
"(",
"OPTIONS",
".",
"outputFileName",
".",
"value",
",",
"'wt'",
",",
"'utf-8'",
")",
"as",
"output_file",
":",
"output",
"(",
"asm_output",
",",
"output_file",
")",
"elif",
"not",
"options",
".",
"parse_only",
":",
"fout",
"=",
"StringIO",
"(",
")",
"output",
"(",
"asm_output",
",",
"fout",
")",
"asmparse",
".",
"assemble",
"(",
"fout",
".",
"getvalue",
"(",
")",
")",
"fout",
".",
"close",
"(",
")",
"asmparse",
".",
"generate_binary",
"(",
"OPTIONS",
".",
"outputFileName",
".",
"value",
",",
"OPTIONS",
".",
"output_file_type",
".",
"value",
",",
"binary_files",
"=",
"options",
".",
"append_binary",
",",
"headless_binary_files",
"=",
"options",
".",
"append_headless_binary",
")",
"if",
"gl",
".",
"has_errors",
":",
"return",
"5",
"# Error in assembly",
"if",
"OPTIONS",
".",
"memory_map",
".",
"value",
":",
"with",
"open_file",
"(",
"OPTIONS",
".",
"memory_map",
".",
"value",
",",
"'wt'",
",",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"asmparse",
".",
"MEMORY",
".",
"memory_map",
")",
"return",
"gl",
".",
"has_errors"
] | Entry point when executed from command line.
You can use zxb.py as a module with import, and this
function won't be executed. | [
"Entry",
"point",
"when",
"executed",
"from",
"command",
"line",
".",
"You",
"can",
"use",
"zxb",
".",
"py",
"as",
"a",
"module",
"with",
"import",
"and",
"this",
"function",
"won",
"t",
"be",
"executed",
"."
] | python | train |
deepmipt/DeepPavlov | deeppavlov/metrics/squad_metrics.py | https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/squad_metrics.py#L68-L100 | def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:
""" Calculates F-1 score between y_true and y_predicted
F-1 score uses the best matching y_true answer
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
F-1 score : float
"""
f1_total = 0.0
for ground_truth, prediction in zip(y_true, y_predicted):
prediction_tokens = normalize_answer(prediction).split()
f1s = []
for gt in ground_truth:
gt_tokens = normalize_answer(gt).split()
if len(gt_tokens) == 0 or len(prediction_tokens) == 0:
f1s.append(float(gt_tokens == prediction_tokens))
continue
common = Counter(prediction_tokens) & Counter(gt_tokens)
num_same = sum(common.values())
if num_same == 0:
f1s.append(0.0)
continue
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(gt_tokens)
f1 = (2 * precision * recall) / (precision + recall)
f1s.append(f1)
f1_total += max(f1s)
return 100 * f1_total / len(y_true) if len(y_true) > 0 else 0 | [
"def",
"squad_v2_f1",
"(",
"y_true",
":",
"List",
"[",
"List",
"[",
"str",
"]",
"]",
",",
"y_predicted",
":",
"List",
"[",
"str",
"]",
")",
"->",
"float",
":",
"f1_total",
"=",
"0.0",
"for",
"ground_truth",
",",
"prediction",
"in",
"zip",
"(",
"y_true",
",",
"y_predicted",
")",
":",
"prediction_tokens",
"=",
"normalize_answer",
"(",
"prediction",
")",
".",
"split",
"(",
")",
"f1s",
"=",
"[",
"]",
"for",
"gt",
"in",
"ground_truth",
":",
"gt_tokens",
"=",
"normalize_answer",
"(",
"gt",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"gt_tokens",
")",
"==",
"0",
"or",
"len",
"(",
"prediction_tokens",
")",
"==",
"0",
":",
"f1s",
".",
"append",
"(",
"float",
"(",
"gt_tokens",
"==",
"prediction_tokens",
")",
")",
"continue",
"common",
"=",
"Counter",
"(",
"prediction_tokens",
")",
"&",
"Counter",
"(",
"gt_tokens",
")",
"num_same",
"=",
"sum",
"(",
"common",
".",
"values",
"(",
")",
")",
"if",
"num_same",
"==",
"0",
":",
"f1s",
".",
"append",
"(",
"0.0",
")",
"continue",
"precision",
"=",
"1.0",
"*",
"num_same",
"/",
"len",
"(",
"prediction_tokens",
")",
"recall",
"=",
"1.0",
"*",
"num_same",
"/",
"len",
"(",
"gt_tokens",
")",
"f1",
"=",
"(",
"2",
"*",
"precision",
"*",
"recall",
")",
"/",
"(",
"precision",
"+",
"recall",
")",
"f1s",
".",
"append",
"(",
"f1",
")",
"f1_total",
"+=",
"max",
"(",
"f1s",
")",
"return",
"100",
"*",
"f1_total",
"/",
"len",
"(",
"y_true",
")",
"if",
"len",
"(",
"y_true",
")",
">",
"0",
"else",
"0"
] | Calculates F-1 score between y_true and y_predicted
F-1 score uses the best matching y_true answer
The same as in SQuAD-v2.0
Args:
y_true: list of correct answers (correct answers are represented by list of strings)
y_predicted: list of predicted answers
Returns:
F-1 score : float | [
"Calculates",
"F",
"-",
"1",
"score",
"between",
"y_true",
"and",
"y_predicted",
"F",
"-",
"1",
"score",
"uses",
"the",
"best",
"matching",
"y_true",
"answer"
] | python | test |
Yipit/eventlib | eventlib/listener.py | https://github.com/Yipit/eventlib/blob/0cf29e5251a59fcbfc727af5f5157a3bb03832e2/eventlib/listener.py#L21-L37 | def listen_for_events():
"""Pubsub event listener
Listen for events in the pubsub bus and calls the process function
when somebody comes to play.
"""
import_event_modules()
conn = redis_connection.get_connection()
pubsub = conn.pubsub()
pubsub.subscribe("eventlib")
for message in pubsub.listen():
if message['type'] != 'message':
continue
data = loads(message["data"])
if 'name' in data:
event_name = data.pop('name')
process_external(event_name, data) | [
"def",
"listen_for_events",
"(",
")",
":",
"import_event_modules",
"(",
")",
"conn",
"=",
"redis_connection",
".",
"get_connection",
"(",
")",
"pubsub",
"=",
"conn",
".",
"pubsub",
"(",
")",
"pubsub",
".",
"subscribe",
"(",
"\"eventlib\"",
")",
"for",
"message",
"in",
"pubsub",
".",
"listen",
"(",
")",
":",
"if",
"message",
"[",
"'type'",
"]",
"!=",
"'message'",
":",
"continue",
"data",
"=",
"loads",
"(",
"message",
"[",
"\"data\"",
"]",
")",
"if",
"'name'",
"in",
"data",
":",
"event_name",
"=",
"data",
".",
"pop",
"(",
"'name'",
")",
"process_external",
"(",
"event_name",
",",
"data",
")"
] | Pubsub event listener
Listen for events in the pubsub bus and calls the process function
when somebody comes to play. | [
"Pubsub",
"event",
"listener"
] | python | train |
Spinmob/spinmob | _pylab_tweaks.py | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_pylab_tweaks.py#L472-L506 | def image_neighbor_smooth(xlevel=0.2, ylevel=0.2, image="auto"):
"""
This will bleed nearest neighbor pixels into each other with
the specified weight factors.
"""
if image == "auto": image = _pylab.gca().images[0]
Z = _n.array(image.get_array())
# store this image in the undo list
global image_undo_list
image_undo_list.append([image, Z])
if len(image_undo_list) > 10: image_undo_list.pop(0)
# get the diagonal smoothing level (eliptical, and scaled down by distance)
dlevel = ((xlevel**2+ylevel**2)/2.0)**(0.5)
# don't touch the first column
new_Z = [Z[0]*1.0]
for m in range(1,len(Z)-1):
new_Z.append(Z[m]*1.0)
for n in range(1,len(Z[0])-1):
new_Z[-1][n] = (Z[m,n] + xlevel*(Z[m+1,n]+Z[m-1,n]) + ylevel*(Z[m,n+1]+Z[m,n-1]) \
+ dlevel*(Z[m+1,n+1]+Z[m-1,n+1]+Z[m+1,n-1]+Z[m-1,n-1]) ) \
/ (1.0+xlevel*2+ylevel*2 + dlevel*4)
# don't touch the last column
new_Z.append(Z[-1]*1.0)
# images have transposed data
image.set_array(_n.array(new_Z))
# update the plot
_pylab.draw() | [
"def",
"image_neighbor_smooth",
"(",
"xlevel",
"=",
"0.2",
",",
"ylevel",
"=",
"0.2",
",",
"image",
"=",
"\"auto\"",
")",
":",
"if",
"image",
"==",
"\"auto\"",
":",
"image",
"=",
"_pylab",
".",
"gca",
"(",
")",
".",
"images",
"[",
"0",
"]",
"Z",
"=",
"_n",
".",
"array",
"(",
"image",
".",
"get_array",
"(",
")",
")",
"# store this image in the undo list",
"global",
"image_undo_list",
"image_undo_list",
".",
"append",
"(",
"[",
"image",
",",
"Z",
"]",
")",
"if",
"len",
"(",
"image_undo_list",
")",
">",
"10",
":",
"image_undo_list",
".",
"pop",
"(",
"0",
")",
"# get the diagonal smoothing level (eliptical, and scaled down by distance)",
"dlevel",
"=",
"(",
"(",
"xlevel",
"**",
"2",
"+",
"ylevel",
"**",
"2",
")",
"/",
"2.0",
")",
"**",
"(",
"0.5",
")",
"# don't touch the first column",
"new_Z",
"=",
"[",
"Z",
"[",
"0",
"]",
"*",
"1.0",
"]",
"for",
"m",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"Z",
")",
"-",
"1",
")",
":",
"new_Z",
".",
"append",
"(",
"Z",
"[",
"m",
"]",
"*",
"1.0",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"Z",
"[",
"0",
"]",
")",
"-",
"1",
")",
":",
"new_Z",
"[",
"-",
"1",
"]",
"[",
"n",
"]",
"=",
"(",
"Z",
"[",
"m",
",",
"n",
"]",
"+",
"xlevel",
"*",
"(",
"Z",
"[",
"m",
"+",
"1",
",",
"n",
"]",
"+",
"Z",
"[",
"m",
"-",
"1",
",",
"n",
"]",
")",
"+",
"ylevel",
"*",
"(",
"Z",
"[",
"m",
",",
"n",
"+",
"1",
"]",
"+",
"Z",
"[",
"m",
",",
"n",
"-",
"1",
"]",
")",
"+",
"dlevel",
"*",
"(",
"Z",
"[",
"m",
"+",
"1",
",",
"n",
"+",
"1",
"]",
"+",
"Z",
"[",
"m",
"-",
"1",
",",
"n",
"+",
"1",
"]",
"+",
"Z",
"[",
"m",
"+",
"1",
",",
"n",
"-",
"1",
"]",
"+",
"Z",
"[",
"m",
"-",
"1",
",",
"n",
"-",
"1",
"]",
")",
")",
"/",
"(",
"1.0",
"+",
"xlevel",
"*",
"2",
"+",
"ylevel",
"*",
"2",
"+",
"dlevel",
"*",
"4",
")",
"# don't touch the last column",
"new_Z",
".",
"append",
"(",
"Z",
"[",
"-",
"1",
"]",
"*",
"1.0",
")",
"# images have transposed data",
"image",
".",
"set_array",
"(",
"_n",
".",
"array",
"(",
"new_Z",
")",
")",
"# update the plot",
"_pylab",
".",
"draw",
"(",
")"
] | This will bleed nearest neighbor pixels into each other with
the specified weight factors. | [
"This",
"will",
"bleed",
"nearest",
"neighbor",
"pixels",
"into",
"each",
"other",
"with",
"the",
"specified",
"weight",
"factors",
"."
] | python | train |
kmmbvnr/django-any | django_any/models.py | https://github.com/kmmbvnr/django-any/blob/6f64ebd05476e2149e2e71deeefbb10f8edfc412/django_any/models.py#L213-L235 | def any_file_field(field, **kwargs):
"""
Lookup for nearest existing file
"""
def get_some_file(path):
subdirs, files = field.storage.listdir(path)
if files:
result_file = random.choice(files)
instance = field.storage.open("%s/%s" % (path, result_file)).file
return FieldFile(instance, field, result_file)
for subdir in subdirs:
result = get_some_file("%s/%s" % (path, subdir))
if result:
return result
result = get_some_file(field.upload_to)
if result is None and not field.null:
raise TypeError("Can't found file in %s for non nullable FileField" % field.upload_to)
return result | [
"def",
"any_file_field",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"get_some_file",
"(",
"path",
")",
":",
"subdirs",
",",
"files",
"=",
"field",
".",
"storage",
".",
"listdir",
"(",
"path",
")",
"if",
"files",
":",
"result_file",
"=",
"random",
".",
"choice",
"(",
"files",
")",
"instance",
"=",
"field",
".",
"storage",
".",
"open",
"(",
"\"%s/%s\"",
"%",
"(",
"path",
",",
"result_file",
")",
")",
".",
"file",
"return",
"FieldFile",
"(",
"instance",
",",
"field",
",",
"result_file",
")",
"for",
"subdir",
"in",
"subdirs",
":",
"result",
"=",
"get_some_file",
"(",
"\"%s/%s\"",
"%",
"(",
"path",
",",
"subdir",
")",
")",
"if",
"result",
":",
"return",
"result",
"result",
"=",
"get_some_file",
"(",
"field",
".",
"upload_to",
")",
"if",
"result",
"is",
"None",
"and",
"not",
"field",
".",
"null",
":",
"raise",
"TypeError",
"(",
"\"Can't found file in %s for non nullable FileField\"",
"%",
"field",
".",
"upload_to",
")",
"return",
"result"
] | Lookup for nearest existing file | [
"Lookup",
"for",
"nearest",
"existing",
"file"
] | python | test |
deontologician/restnavigator | restnavigator/utils.py | https://github.com/deontologician/restnavigator/blob/453b9de4e70e602009d3e3ffafcf77d23c8b07c5/restnavigator/utils.py#L205-L216 | def get_by(self, prop, val, raise_exc=False):
'''Retrieve an item from the dictionary with the given metadata
properties. If there is no such item, None will be returned, if there
are multiple such items, the first will be returned.'''
try:
val = self.serialize(val)
return self._meta[prop][val][0]
except (KeyError, IndexError):
if raise_exc:
raise
else:
return None | [
"def",
"get_by",
"(",
"self",
",",
"prop",
",",
"val",
",",
"raise_exc",
"=",
"False",
")",
":",
"try",
":",
"val",
"=",
"self",
".",
"serialize",
"(",
"val",
")",
"return",
"self",
".",
"_meta",
"[",
"prop",
"]",
"[",
"val",
"]",
"[",
"0",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"if",
"raise_exc",
":",
"raise",
"else",
":",
"return",
"None"
] | Retrieve an item from the dictionary with the given metadata
properties. If there is no such item, None will be returned, if there
are multiple such items, the first will be returned. | [
"Retrieve",
"an",
"item",
"from",
"the",
"dictionary",
"with",
"the",
"given",
"metadata",
"properties",
".",
"If",
"there",
"is",
"no",
"such",
"item",
"None",
"will",
"be",
"returned",
"if",
"there",
"are",
"multiple",
"such",
"items",
"the",
"first",
"will",
"be",
"returned",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/assessment/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L3042-L3058 | def get_assessment_taken(self):
"""Gets the ``AssessmentTakeb``.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
if not bool(self._my_map['assessmentTakenId']):
raise errors.IllegalState('assessment_taken empty')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_taken_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentTaken lookup')
lookup_session = mgr.get_assessment_taken_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_bank_view()
return lookup_session.get_assessment_taken(self.get_assessment_taken_id()) | [
"def",
"get_assessment_taken",
"(",
"self",
")",
":",
"# Implemented from template for osid.learning.Activity.get_objective",
"if",
"not",
"bool",
"(",
"self",
".",
"_my_map",
"[",
"'assessmentTakenId'",
"]",
")",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'assessment_taken empty'",
")",
"mgr",
"=",
"self",
".",
"_get_provider_manager",
"(",
"'ASSESSMENT'",
")",
"if",
"not",
"mgr",
".",
"supports_assessment_taken_lookup",
"(",
")",
":",
"raise",
"errors",
".",
"OperationFailed",
"(",
"'Assessment does not support AssessmentTaken lookup'",
")",
"lookup_session",
"=",
"mgr",
".",
"get_assessment_taken_lookup_session",
"(",
"proxy",
"=",
"getattr",
"(",
"self",
",",
"\"_proxy\"",
",",
"None",
")",
")",
"lookup_session",
".",
"use_federated_bank_view",
"(",
")",
"return",
"lookup_session",
".",
"get_assessment_taken",
"(",
"self",
".",
"get_assessment_taken_id",
"(",
")",
")"
] | Gets the ``AssessmentTakeb``.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"AssessmentTakeb",
"."
] | python | train |
pybel/pybel | src/pybel/manager/cache_manager.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L1175-L1189 | def _make_property_from_dict(self, property_def: Dict) -> Property:
"""Build an edge property from a dictionary."""
property_hash = hash_dump(property_def)
edge_property_model = self.object_cache_property.get(property_hash)
if edge_property_model is None:
edge_property_model = self.get_property_by_hash(property_hash)
if not edge_property_model:
property_def['sha512'] = property_hash
edge_property_model = Property(**property_def)
self.object_cache_property[property_hash] = edge_property_model
return edge_property_model | [
"def",
"_make_property_from_dict",
"(",
"self",
",",
"property_def",
":",
"Dict",
")",
"->",
"Property",
":",
"property_hash",
"=",
"hash_dump",
"(",
"property_def",
")",
"edge_property_model",
"=",
"self",
".",
"object_cache_property",
".",
"get",
"(",
"property_hash",
")",
"if",
"edge_property_model",
"is",
"None",
":",
"edge_property_model",
"=",
"self",
".",
"get_property_by_hash",
"(",
"property_hash",
")",
"if",
"not",
"edge_property_model",
":",
"property_def",
"[",
"'sha512'",
"]",
"=",
"property_hash",
"edge_property_model",
"=",
"Property",
"(",
"*",
"*",
"property_def",
")",
"self",
".",
"object_cache_property",
"[",
"property_hash",
"]",
"=",
"edge_property_model",
"return",
"edge_property_model"
] | Build an edge property from a dictionary. | [
"Build",
"an",
"edge",
"property",
"from",
"a",
"dictionary",
"."
] | python | train |
minrk/findspark | findspark.py | https://github.com/minrk/findspark/blob/20c945d5136269ca56b1341786c49087faa7c75e/findspark.py#L68-L98 | def edit_ipython_profile(spark_home, spark_python, py4j):
"""Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library.
"""
from IPython import get_ipython
ip = get_ipython()
if ip:
profile_dir = ip.profile_dir.location
else:
from IPython.utils.path import locate_profile
profile_dir = locate_profile()
startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")
with open(startup_file_loc, 'w') as startup_file:
#Lines of code to be run when IPython starts
startup_file.write("import sys, os\n")
startup_file.write("os.environ['SPARK_HOME'] = '" + spark_home + "'\n")
startup_file.write("sys.path[:0] = " + str([spark_python, py4j]) + "\n")
startup_file.write("import pyspark\n") | [
"def",
"edit_ipython_profile",
"(",
"spark_home",
",",
"spark_python",
",",
"py4j",
")",
":",
"from",
"IPython",
"import",
"get_ipython",
"ip",
"=",
"get_ipython",
"(",
")",
"if",
"ip",
":",
"profile_dir",
"=",
"ip",
".",
"profile_dir",
".",
"location",
"else",
":",
"from",
"IPython",
".",
"utils",
".",
"path",
"import",
"locate_profile",
"profile_dir",
"=",
"locate_profile",
"(",
")",
"startup_file_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"profile_dir",
",",
"\"startup\"",
",",
"\"findspark.py\"",
")",
"with",
"open",
"(",
"startup_file_loc",
",",
"'w'",
")",
"as",
"startup_file",
":",
"#Lines of code to be run when IPython starts",
"startup_file",
".",
"write",
"(",
"\"import sys, os\\n\"",
")",
"startup_file",
".",
"write",
"(",
"\"os.environ['SPARK_HOME'] = '\"",
"+",
"spark_home",
"+",
"\"'\\n\"",
")",
"startup_file",
".",
"write",
"(",
"\"sys.path[:0] = \"",
"+",
"str",
"(",
"[",
"spark_python",
",",
"py4j",
"]",
")",
"+",
"\"\\n\"",
")",
"startup_file",
".",
"write",
"(",
"\"import pyspark\\n\"",
")"
] | Adds a startup file to the current IPython profile to import pyspark.
The startup file sets the required environment variables and imports pyspark.
Parameters
----------
spark_home : str
Path to Spark installation.
spark_python : str
Path to python subdirectory of Spark installation.
py4j : str
Path to py4j library. | [
"Adds",
"a",
"startup",
"file",
"to",
"the",
"current",
"IPython",
"profile",
"to",
"import",
"pyspark",
"."
] | python | train |
cslarsen/crianza | crianza/tokenizer.py | https://github.com/cslarsen/crianza/blob/fa044f9d491f37cc06892bad14b2c80b8ac5a7cd/crianza/tokenizer.py#L141-L152 | def tokentype(self, s):
"""Parses string and returns a (Tokenizer.TYPE, value) tuple."""
a = s[0] if len(s)>0 else ""
b = s[1] if len(s)>1 else ""
if a.isdigit() or (a in ["+","-"] and b.isdigit()):
return self.parse_number(s)
elif a == '"': return self.parse_string(s)
elif a == ':': return self.parse_colon(s)
elif a == ';': return self.parse_semicolon(s)
else:
return self.parse_word(s) | [
"def",
"tokentype",
"(",
"self",
",",
"s",
")",
":",
"a",
"=",
"s",
"[",
"0",
"]",
"if",
"len",
"(",
"s",
")",
">",
"0",
"else",
"\"\"",
"b",
"=",
"s",
"[",
"1",
"]",
"if",
"len",
"(",
"s",
")",
">",
"1",
"else",
"\"\"",
"if",
"a",
".",
"isdigit",
"(",
")",
"or",
"(",
"a",
"in",
"[",
"\"+\"",
",",
"\"-\"",
"]",
"and",
"b",
".",
"isdigit",
"(",
")",
")",
":",
"return",
"self",
".",
"parse_number",
"(",
"s",
")",
"elif",
"a",
"==",
"'\"'",
":",
"return",
"self",
".",
"parse_string",
"(",
"s",
")",
"elif",
"a",
"==",
"':'",
":",
"return",
"self",
".",
"parse_colon",
"(",
"s",
")",
"elif",
"a",
"==",
"';'",
":",
"return",
"self",
".",
"parse_semicolon",
"(",
"s",
")",
"else",
":",
"return",
"self",
".",
"parse_word",
"(",
"s",
")"
] | Parses string and returns a (Tokenizer.TYPE, value) tuple. | [
"Parses",
"string",
"and",
"returns",
"a",
"(",
"Tokenizer",
".",
"TYPE",
"value",
")",
"tuple",
"."
] | python | train |
fracpete/python-weka-wrapper3 | python/weka/core/converters.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/converters.py#L299-L340 | def ndarray_to_instances(array, relation, att_template="Att-#", att_list=None):
"""
Converts the numpy matrix into an Instances object and returns it.
:param array: the numpy ndarray to convert
:type array: numpy.darray
:param relation: the name of the dataset
:type relation: str
:param att_template: the prefix to use for the attribute names, "#" is the 1-based index,
"!" is the 0-based index, "@" the relation name
:type att_template: str
:param att_list: the list of attribute names to use
:type att_list: list
:return: the generated instances object
:rtype: Instances
"""
if len(numpy.shape(array)) != 2:
raise Exception("Number of array dimensions must be 2!")
rows, cols = numpy.shape(array)
# header
atts = []
if att_list is not None:
if len(att_list) != cols:
raise Exception(
"Number columns and provided attribute names differ: " + str(cols) + " != " + len(att_list))
for name in att_list:
att = Attribute.create_numeric(name)
atts.append(att)
else:
for i in range(cols):
name = att_template.replace("#", str(i+1)).replace("!", str(i)).replace("@", relation)
att = Attribute.create_numeric(name)
atts.append(att)
result = Instances.create_instances(relation, atts, rows)
# data
for i in range(rows):
inst = Instance.create_instance(array[i])
result.add_instance(inst)
return result | [
"def",
"ndarray_to_instances",
"(",
"array",
",",
"relation",
",",
"att_template",
"=",
"\"Att-#\"",
",",
"att_list",
"=",
"None",
")",
":",
"if",
"len",
"(",
"numpy",
".",
"shape",
"(",
"array",
")",
")",
"!=",
"2",
":",
"raise",
"Exception",
"(",
"\"Number of array dimensions must be 2!\"",
")",
"rows",
",",
"cols",
"=",
"numpy",
".",
"shape",
"(",
"array",
")",
"# header",
"atts",
"=",
"[",
"]",
"if",
"att_list",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"att_list",
")",
"!=",
"cols",
":",
"raise",
"Exception",
"(",
"\"Number columns and provided attribute names differ: \"",
"+",
"str",
"(",
"cols",
")",
"+",
"\" != \"",
"+",
"len",
"(",
"att_list",
")",
")",
"for",
"name",
"in",
"att_list",
":",
"att",
"=",
"Attribute",
".",
"create_numeric",
"(",
"name",
")",
"atts",
".",
"append",
"(",
"att",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"cols",
")",
":",
"name",
"=",
"att_template",
".",
"replace",
"(",
"\"#\"",
",",
"str",
"(",
"i",
"+",
"1",
")",
")",
".",
"replace",
"(",
"\"!\"",
",",
"str",
"(",
"i",
")",
")",
".",
"replace",
"(",
"\"@\"",
",",
"relation",
")",
"att",
"=",
"Attribute",
".",
"create_numeric",
"(",
"name",
")",
"atts",
".",
"append",
"(",
"att",
")",
"result",
"=",
"Instances",
".",
"create_instances",
"(",
"relation",
",",
"atts",
",",
"rows",
")",
"# data",
"for",
"i",
"in",
"range",
"(",
"rows",
")",
":",
"inst",
"=",
"Instance",
".",
"create_instance",
"(",
"array",
"[",
"i",
"]",
")",
"result",
".",
"add_instance",
"(",
"inst",
")",
"return",
"result"
] | Converts the numpy matrix into an Instances object and returns it.
:param array: the numpy ndarray to convert
:type array: numpy.darray
:param relation: the name of the dataset
:type relation: str
:param att_template: the prefix to use for the attribute names, "#" is the 1-based index,
"!" is the 0-based index, "@" the relation name
:type att_template: str
:param att_list: the list of attribute names to use
:type att_list: list
:return: the generated instances object
:rtype: Instances | [
"Converts",
"the",
"numpy",
"matrix",
"into",
"an",
"Instances",
"object",
"and",
"returns",
"it",
"."
] | python | train |
rfosterslo/wagtailplus | wagtailplus/wagtailrelations/templatetags/wagtailrelations_tags.py | https://github.com/rfosterslo/wagtailplus/blob/22cac857175d8a6f77e470751831c14a92ccd768/wagtailplus/wagtailrelations/templatetags/wagtailrelations_tags.py#L33-L50 | def get_related_entry_admin_url(entry):
"""
Returns admin URL for specified entry instance.
:param entry: the entry instance.
:return: str.
"""
namespaces = {
Document: 'wagtaildocs:edit',
Link: 'wagtaillinks:edit',
Page: 'wagtailadmin_pages:edit',
}
for cls, url in namespaces.iteritems():
if issubclass(entry.content_type.model_class(), cls):
return urlresolvers.reverse(url, args=(entry.object_id,))
return '' | [
"def",
"get_related_entry_admin_url",
"(",
"entry",
")",
":",
"namespaces",
"=",
"{",
"Document",
":",
"'wagtaildocs:edit'",
",",
"Link",
":",
"'wagtaillinks:edit'",
",",
"Page",
":",
"'wagtailadmin_pages:edit'",
",",
"}",
"for",
"cls",
",",
"url",
"in",
"namespaces",
".",
"iteritems",
"(",
")",
":",
"if",
"issubclass",
"(",
"entry",
".",
"content_type",
".",
"model_class",
"(",
")",
",",
"cls",
")",
":",
"return",
"urlresolvers",
".",
"reverse",
"(",
"url",
",",
"args",
"=",
"(",
"entry",
".",
"object_id",
",",
")",
")",
"return",
"''"
] | Returns admin URL for specified entry instance.
:param entry: the entry instance.
:return: str. | [
"Returns",
"admin",
"URL",
"for",
"specified",
"entry",
"instance",
"."
] | python | train |
DataONEorg/d1_python | lib_common/src/d1_common/type_conversions.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/type_conversions.py#L455-L468 | def pyxb_is_v1(pyxb_obj):
"""
Args:
pyxb_obj : PyXB object
PyXB object holding an unknown type.
Returns:
bool: **True** if ``pyxb_obj`` holds an API v1 type.
"""
# TODO: Will not detect v1.2 as v1.
return (
pyxb_obj._element().name().namespace()
== d1_common.types.dataoneTypes_v1.Namespace
) | [
"def",
"pyxb_is_v1",
"(",
"pyxb_obj",
")",
":",
"# TODO: Will not detect v1.2 as v1.",
"return",
"(",
"pyxb_obj",
".",
"_element",
"(",
")",
".",
"name",
"(",
")",
".",
"namespace",
"(",
")",
"==",
"d1_common",
".",
"types",
".",
"dataoneTypes_v1",
".",
"Namespace",
")"
] | Args:
pyxb_obj : PyXB object
PyXB object holding an unknown type.
Returns:
bool: **True** if ``pyxb_obj`` holds an API v1 type. | [
"Args",
":",
"pyxb_obj",
":",
"PyXB",
"object",
"PyXB",
"object",
"holding",
"an",
"unknown",
"type",
"."
] | python | train |
bkg/django-spillway | spillway/query.py | https://github.com/bkg/django-spillway/blob/c488a62642430b005f1e0d4a19e160d8d5964b67/spillway/query.py#L161-L175 | def arrays(self, field_name=None):
"""Returns a list of ndarrays.
Keyword args:
field_name -- raster field name as str
"""
fieldname = field_name or self.raster_field.name
arrays = []
for obj in self:
arr = getattr(obj, fieldname)
if isinstance(arr, np.ndarray):
arrays.append(arr)
else:
arrays.append(obj.array())
return arrays | [
"def",
"arrays",
"(",
"self",
",",
"field_name",
"=",
"None",
")",
":",
"fieldname",
"=",
"field_name",
"or",
"self",
".",
"raster_field",
".",
"name",
"arrays",
"=",
"[",
"]",
"for",
"obj",
"in",
"self",
":",
"arr",
"=",
"getattr",
"(",
"obj",
",",
"fieldname",
")",
"if",
"isinstance",
"(",
"arr",
",",
"np",
".",
"ndarray",
")",
":",
"arrays",
".",
"append",
"(",
"arr",
")",
"else",
":",
"arrays",
".",
"append",
"(",
"obj",
".",
"array",
"(",
")",
")",
"return",
"arrays"
] | Returns a list of ndarrays.
Keyword args:
field_name -- raster field name as str | [
"Returns",
"a",
"list",
"of",
"ndarrays",
"."
] | python | train |
kennethreitz/bucketstore | bucketstore.py | https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L180-L186 | def temp_url(self, duration=120):
"""Returns a temporary URL for the given key."""
return self.bucket._boto_s3.meta.client.generate_presigned_url(
'get_object',
Params={'Bucket': self.bucket.name, 'Key': self.name},
ExpiresIn=duration
) | [
"def",
"temp_url",
"(",
"self",
",",
"duration",
"=",
"120",
")",
":",
"return",
"self",
".",
"bucket",
".",
"_boto_s3",
".",
"meta",
".",
"client",
".",
"generate_presigned_url",
"(",
"'get_object'",
",",
"Params",
"=",
"{",
"'Bucket'",
":",
"self",
".",
"bucket",
".",
"name",
",",
"'Key'",
":",
"self",
".",
"name",
"}",
",",
"ExpiresIn",
"=",
"duration",
")"
] | Returns a temporary URL for the given key. | [
"Returns",
"a",
"temporary",
"URL",
"for",
"the",
"given",
"key",
"."
] | python | train |
OSSOS/MOP | src/jjk/preproc/verifyDetection.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/verifyDetection.py#L34-L43 | def get_file_ids(object):
"""Get the exposure for a particular line in the meausre table"""
import MOPdbaccess
mysql = MOPdbaccess.connect('cfeps','cfhls',dbSystem='MYSQL')
cfeps=mysql.cursor()
sql="SELECT file_id FROM measure WHERE provisional LIKE %s"
cfeps.execute(sql,(object, ))
file_ids=cfeps.fetchall()
return (file_ids) | [
"def",
"get_file_ids",
"(",
"object",
")",
":",
"import",
"MOPdbaccess",
"mysql",
"=",
"MOPdbaccess",
".",
"connect",
"(",
"'cfeps'",
",",
"'cfhls'",
",",
"dbSystem",
"=",
"'MYSQL'",
")",
"cfeps",
"=",
"mysql",
".",
"cursor",
"(",
")",
"sql",
"=",
"\"SELECT file_id FROM measure WHERE provisional LIKE %s\"",
"cfeps",
".",
"execute",
"(",
"sql",
",",
"(",
"object",
",",
")",
")",
"file_ids",
"=",
"cfeps",
".",
"fetchall",
"(",
")",
"return",
"(",
"file_ids",
")"
] | Get the exposure for a particular line in the meausre table | [
"Get",
"the",
"exposure",
"for",
"a",
"particular",
"line",
"in",
"the",
"meausre",
"table"
] | python | train |
QuantEcon/QuantEcon.py | quantecon/optimize/root_finding.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/optimize/root_finding.py#L297-L374 | def bisect(f, a, b, args=(), xtol=_xtol,
rtol=_rtol, maxiter=_iter, disp=True):
"""
Find root of a function within an interval adapted from Scipy's bisect.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple
"""
if xtol <= 0:
raise ValueError("xtol is too small (<= 0)")
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
# Convert to float
xa = a * 1.0
xb = b * 1.0
fa = f(xa, *args)
fb = f(xb, *args)
funcalls = 2
root, status = _bisect_interval(xa, xb, fa, fb)
# Check for sign error and early termination
if status == _ECONVERGED:
itr = 0
else:
# Perform bisection
dm = xb - xa
for itr in range(maxiter):
dm *= 0.5
xm = xa + dm
fm = f(xm, *args)
funcalls += 1
if fm * fa >= 0:
xa = xm
if fm == 0 or abs(dm) < xtol + rtol * abs(xm):
root = xm
status = _ECONVERGED
itr += 1
break
if disp and status == _ECONVERR:
raise RuntimeError("Failed to converge")
return _results((root, funcalls, itr, status)) | [
"def",
"bisect",
"(",
"f",
",",
"a",
",",
"b",
",",
"args",
"=",
"(",
")",
",",
"xtol",
"=",
"_xtol",
",",
"rtol",
"=",
"_rtol",
",",
"maxiter",
"=",
"_iter",
",",
"disp",
"=",
"True",
")",
":",
"if",
"xtol",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"xtol is too small (<= 0)\"",
")",
"if",
"maxiter",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"maxiter must be greater than 0\"",
")",
"# Convert to float",
"xa",
"=",
"a",
"*",
"1.0",
"xb",
"=",
"b",
"*",
"1.0",
"fa",
"=",
"f",
"(",
"xa",
",",
"*",
"args",
")",
"fb",
"=",
"f",
"(",
"xb",
",",
"*",
"args",
")",
"funcalls",
"=",
"2",
"root",
",",
"status",
"=",
"_bisect_interval",
"(",
"xa",
",",
"xb",
",",
"fa",
",",
"fb",
")",
"# Check for sign error and early termination",
"if",
"status",
"==",
"_ECONVERGED",
":",
"itr",
"=",
"0",
"else",
":",
"# Perform bisection",
"dm",
"=",
"xb",
"-",
"xa",
"for",
"itr",
"in",
"range",
"(",
"maxiter",
")",
":",
"dm",
"*=",
"0.5",
"xm",
"=",
"xa",
"+",
"dm",
"fm",
"=",
"f",
"(",
"xm",
",",
"*",
"args",
")",
"funcalls",
"+=",
"1",
"if",
"fm",
"*",
"fa",
">=",
"0",
":",
"xa",
"=",
"xm",
"if",
"fm",
"==",
"0",
"or",
"abs",
"(",
"dm",
")",
"<",
"xtol",
"+",
"rtol",
"*",
"abs",
"(",
"xm",
")",
":",
"root",
"=",
"xm",
"status",
"=",
"_ECONVERGED",
"itr",
"+=",
"1",
"break",
"if",
"disp",
"and",
"status",
"==",
"_ECONVERR",
":",
"raise",
"RuntimeError",
"(",
"\"Failed to converge\"",
")",
"return",
"_results",
"(",
"(",
"root",
",",
"funcalls",
",",
"itr",
",",
"status",
")",
")"
] | Find root of a function within an interval adapted from Scipy's bisect.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
`f` must be jitted via numba.
Parameters
----------
f : jitted and callable
Python function returning a number. `f` must be continuous.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
args : tuple, optional(default=())
Extra arguments to be used in the function call.
xtol : number, optional(default=2e-12)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional(default=4*np.finfo(float).eps)
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root.
maxiter : number, optional(default=100)
Maximum number of iterations.
disp : bool, optional(default=True)
If True, raise a RuntimeError if the algorithm didn't converge.
Returns
-------
results : namedtuple | [
"Find",
"root",
"of",
"a",
"function",
"within",
"an",
"interval",
"adapted",
"from",
"Scipy",
"s",
"bisect",
"."
] | python | train |
OTL/jps | jps/security.py | https://github.com/OTL/jps/blob/2c5a438d59611fffca6853072c822ef22665ed87/jps/security.py#L37-L41 | def set_client_key(self, zmq_socket, client_secret_key_path, server_public_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, client_secret_key_path)
server_public, _ = zmq.auth.load_certificate(server_public_key_path)
zmq_socket.curve_serverkey = server_public | [
"def",
"set_client_key",
"(",
"self",
",",
"zmq_socket",
",",
"client_secret_key_path",
",",
"server_public_key_path",
")",
":",
"load_and_set_key",
"(",
"zmq_socket",
",",
"client_secret_key_path",
")",
"server_public",
",",
"_",
"=",
"zmq",
".",
"auth",
".",
"load_certificate",
"(",
"server_public_key_path",
")",
"zmq_socket",
".",
"curve_serverkey",
"=",
"server_public"
] | must call before bind | [
"must",
"call",
"before",
"bind"
] | python | train |
jeffh/sniffer | sniffer/scanner/__init__.py | https://github.com/jeffh/sniffer/blob/8e4c3e77743aef08109ea0225b4a6536d4e60270/sniffer/scanner/__init__.py#L18-L31 | def _import(module, cls):
"""
A messy way to import library-specific classes.
TODO: I should really make a factory class or something, but I'm lazy.
Plus, factories remind me a lot of java...
"""
global Scanner
try:
cls = str(cls)
mod = __import__(str(module), globals(), locals(), [cls], 1)
Scanner = getattr(mod, cls)
except ImportError:
pass | [
"def",
"_import",
"(",
"module",
",",
"cls",
")",
":",
"global",
"Scanner",
"try",
":",
"cls",
"=",
"str",
"(",
"cls",
")",
"mod",
"=",
"__import__",
"(",
"str",
"(",
"module",
")",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"[",
"cls",
"]",
",",
"1",
")",
"Scanner",
"=",
"getattr",
"(",
"mod",
",",
"cls",
")",
"except",
"ImportError",
":",
"pass"
] | A messy way to import library-specific classes.
TODO: I should really make a factory class or something, but I'm lazy.
Plus, factories remind me a lot of java... | [
"A",
"messy",
"way",
"to",
"import",
"library",
"-",
"specific",
"classes",
".",
"TODO",
":",
"I",
"should",
"really",
"make",
"a",
"factory",
"class",
"or",
"something",
"but",
"I",
"m",
"lazy",
".",
"Plus",
"factories",
"remind",
"me",
"a",
"lot",
"of",
"java",
"..."
] | python | train |
wummel/linkchecker | linkcheck/director/aggregator.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/aggregator.py#L134-L144 | def wait_for_host(self, host):
"""Throttle requests to one host."""
t = time.time()
if host in self.times:
due_time = self.times[host]
if due_time > t:
wait = due_time - t
time.sleep(wait)
t = time.time()
wait_time = random.uniform(self.wait_time_min, self.wait_time_max)
self.times[host] = t + wait_time | [
"def",
"wait_for_host",
"(",
"self",
",",
"host",
")",
":",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"host",
"in",
"self",
".",
"times",
":",
"due_time",
"=",
"self",
".",
"times",
"[",
"host",
"]",
"if",
"due_time",
">",
"t",
":",
"wait",
"=",
"due_time",
"-",
"t",
"time",
".",
"sleep",
"(",
"wait",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"wait_time",
"=",
"random",
".",
"uniform",
"(",
"self",
".",
"wait_time_min",
",",
"self",
".",
"wait_time_max",
")",
"self",
".",
"times",
"[",
"host",
"]",
"=",
"t",
"+",
"wait_time"
] | Throttle requests to one host. | [
"Throttle",
"requests",
"to",
"one",
"host",
"."
] | python | train |
inasafe/inasafe | safe/gui/tools/shake_grid/shake_grid.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/shake_grid/shake_grid.py#L493-L519 | def _run_command(self, command):
"""Run a command and raise any error as needed.
This is a simple runner for executing gdal commands.
:param command: A command string to be run.
:type command: str
:raises: Any exceptions will be propagated.
"""
try:
my_result = call(command, shell=True)
del my_result
except CalledProcessError as e:
LOGGER.exception('Running command failed %s' % command)
message = (
'Error while executing the following shell '
'command: %s\nError message: %s' % (command, str(e)))
# shameless hack - see https://github.com/AIFDR/inasafe/issues/141
if sys.platform == 'darwin': # Mac OS X
if 'Errno 4' in str(e):
# continue as the error seems to be non critical
pass
else:
raise Exception(message)
else:
raise Exception(message) | [
"def",
"_run_command",
"(",
"self",
",",
"command",
")",
":",
"try",
":",
"my_result",
"=",
"call",
"(",
"command",
",",
"shell",
"=",
"True",
")",
"del",
"my_result",
"except",
"CalledProcessError",
"as",
"e",
":",
"LOGGER",
".",
"exception",
"(",
"'Running command failed %s'",
"%",
"command",
")",
"message",
"=",
"(",
"'Error while executing the following shell '",
"'command: %s\\nError message: %s'",
"%",
"(",
"command",
",",
"str",
"(",
"e",
")",
")",
")",
"# shameless hack - see https://github.com/AIFDR/inasafe/issues/141",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# Mac OS X",
"if",
"'Errno 4'",
"in",
"str",
"(",
"e",
")",
":",
"# continue as the error seems to be non critical",
"pass",
"else",
":",
"raise",
"Exception",
"(",
"message",
")",
"else",
":",
"raise",
"Exception",
"(",
"message",
")"
] | Run a command and raise any error as needed.
This is a simple runner for executing gdal commands.
:param command: A command string to be run.
:type command: str
:raises: Any exceptions will be propagated. | [
"Run",
"a",
"command",
"and",
"raise",
"any",
"error",
"as",
"needed",
"."
] | python | train |
pgmpy/pgmpy | pgmpy/factors/distributions/GaussianDistribution.py | https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/distributions/GaussianDistribution.py#L323-L362 | def copy(self):
"""
Return a copy of the distribution.
Returns
-------
GaussianDistribution: copy of the distribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> gauss_dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]])
>>> copy_dis = gauss_dis.copy()
>>> copy_dis.variables
['x1', 'x2', 'x3']
>>> copy_dis.mean
array([[ 1],
[-3],
[ 4]])
>>> copy_dis.covariance
array([[ 4, 2, -2],
[ 2, 5, -5],
[-2, -5, 8]])
>>> copy_dis.precision_matrix
array([[ 0.3125 , -0.125 , 0. ],
[-0.125 , 0.58333333, 0.33333333],
[ 0. , 0.33333333, 0.33333333]])
"""
copy_distribution = GaussianDistribution(variables=self.variables,
mean=self.mean.copy(),
cov=self.covariance.copy())
if self._precision_matrix is not None:
copy_distribution._precision_matrix = self._precision_matrix.copy()
return copy_distribution | [
"def",
"copy",
"(",
"self",
")",
":",
"copy_distribution",
"=",
"GaussianDistribution",
"(",
"variables",
"=",
"self",
".",
"variables",
",",
"mean",
"=",
"self",
".",
"mean",
".",
"copy",
"(",
")",
",",
"cov",
"=",
"self",
".",
"covariance",
".",
"copy",
"(",
")",
")",
"if",
"self",
".",
"_precision_matrix",
"is",
"not",
"None",
":",
"copy_distribution",
".",
"_precision_matrix",
"=",
"self",
".",
"_precision_matrix",
".",
"copy",
"(",
")",
"return",
"copy_distribution"
] | Return a copy of the distribution.
Returns
-------
GaussianDistribution: copy of the distribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.distributions import GaussianDistribution as GD
>>> gauss_dis = GD(variables=['x1', 'x2', 'x3'],
... mean=[1, -3, 4],
... cov=[[4, 2, -2],
... [2, 5, -5],
... [-2, -5, 8]])
>>> copy_dis = gauss_dis.copy()
>>> copy_dis.variables
['x1', 'x2', 'x3']
>>> copy_dis.mean
array([[ 1],
[-3],
[ 4]])
>>> copy_dis.covariance
array([[ 4, 2, -2],
[ 2, 5, -5],
[-2, -5, 8]])
>>> copy_dis.precision_matrix
array([[ 0.3125 , -0.125 , 0. ],
[-0.125 , 0.58333333, 0.33333333],
[ 0. , 0.33333333, 0.33333333]]) | [
"Return",
"a",
"copy",
"of",
"the",
"distribution",
"."
] | python | train |
ksbg/sparklanes | sparklanes/_submit/submit.py | https://github.com/ksbg/sparklanes/blob/62e70892e6ae025be2f4c419f4afc34714d6884c/sparklanes/_submit/submit.py#L19-L47 | def _package_and_submit(args):
"""
Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments
"""
args = _parse_and_validate_args(args)
logging.debug(args)
dist = __make_tmp_dir()
try:
__package_dependencies(dist_dir=dist, additional_reqs=args['requirements'],
silent=args['silent'])
__package_app(tasks_pkg=args['package'],
dist_dir=dist,
custom_main=args['main'],
extra_data=args['extra_data'])
__run_spark_submit(lane_yaml=args['yaml'],
dist_dir=dist,
spark_home=args['spark_home'],
spark_args=args['spark_args'],
silent=args['silent'])
except Exception as exc:
__clean_up(dist)
raise exc
__clean_up(dist) | [
"def",
"_package_and_submit",
"(",
"args",
")",
":",
"args",
"=",
"_parse_and_validate_args",
"(",
"args",
")",
"logging",
".",
"debug",
"(",
"args",
")",
"dist",
"=",
"__make_tmp_dir",
"(",
")",
"try",
":",
"__package_dependencies",
"(",
"dist_dir",
"=",
"dist",
",",
"additional_reqs",
"=",
"args",
"[",
"'requirements'",
"]",
",",
"silent",
"=",
"args",
"[",
"'silent'",
"]",
")",
"__package_app",
"(",
"tasks_pkg",
"=",
"args",
"[",
"'package'",
"]",
",",
"dist_dir",
"=",
"dist",
",",
"custom_main",
"=",
"args",
"[",
"'main'",
"]",
",",
"extra_data",
"=",
"args",
"[",
"'extra_data'",
"]",
")",
"__run_spark_submit",
"(",
"lane_yaml",
"=",
"args",
"[",
"'yaml'",
"]",
",",
"dist_dir",
"=",
"dist",
",",
"spark_home",
"=",
"args",
"[",
"'spark_home'",
"]",
",",
"spark_args",
"=",
"args",
"[",
"'spark_args'",
"]",
",",
"silent",
"=",
"args",
"[",
"'silent'",
"]",
")",
"except",
"Exception",
"as",
"exc",
":",
"__clean_up",
"(",
"dist",
")",
"raise",
"exc",
"__clean_up",
"(",
"dist",
")"
] | Packages and submits a job, which is defined in a YAML file, to Spark.
Parameters
----------
args (List): Command-line arguments | [
"Packages",
"and",
"submits",
"a",
"job",
"which",
"is",
"defined",
"in",
"a",
"YAML",
"file",
"to",
"Spark",
"."
] | python | train |
Rapptz/discord.py | discord/abc.py | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/abc.py#L488-L510 | async def delete(self, *, reason=None):
"""|coro|
Deletes the channel.
You must have :attr:`~.Permissions.manage_channels` permission to use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this channel.
Shows up on the audit log.
Raises
-------
Forbidden
You do not have proper permissions to delete the channel.
NotFound
The channel was not found or was already deleted.
HTTPException
Deleting the channel failed.
"""
await self._state.http.delete_channel(self.id, reason=reason) | [
"async",
"def",
"delete",
"(",
"self",
",",
"*",
",",
"reason",
"=",
"None",
")",
":",
"await",
"self",
".",
"_state",
".",
"http",
".",
"delete_channel",
"(",
"self",
".",
"id",
",",
"reason",
"=",
"reason",
")"
] | |coro|
Deletes the channel.
You must have :attr:`~.Permissions.manage_channels` permission to use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this channel.
Shows up on the audit log.
Raises
-------
Forbidden
You do not have proper permissions to delete the channel.
NotFound
The channel was not found or was already deleted.
HTTPException
Deleting the channel failed. | [
"|coro|"
] | python | train |
tonioo/sievelib | sievelib/parser.py | https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/parser.py#L251-L285 | def __argument(self, ttype, tvalue):
"""Argument parsing method
This method acts as an entry point for 'argument' parsing.
Syntax:
string-list / number / tag
:param ttype: current token type
:param tvalue: current token value
:return: False if an error is encountered, True otherwise
"""
if ttype in ["multiline", "string"]:
return self.__curcommand.check_next_arg("string", tvalue.decode("utf-8"))
if ttype in ["number", "tag"]:
return self.__curcommand.check_next_arg(ttype, tvalue.decode("ascii"))
if ttype == "left_bracket":
self.__cstate = self.__stringlist
self.__curstringlist = []
self.__set_expected("string")
return True
condition = (
ttype in ["left_cbracket", "comma"] and
self.__curcommand.non_deterministic_args
)
if condition:
self.__curcommand.reassign_arguments()
# rewind lexer
self.lexer.pos -= 1
return True
return False | [
"def",
"__argument",
"(",
"self",
",",
"ttype",
",",
"tvalue",
")",
":",
"if",
"ttype",
"in",
"[",
"\"multiline\"",
",",
"\"string\"",
"]",
":",
"return",
"self",
".",
"__curcommand",
".",
"check_next_arg",
"(",
"\"string\"",
",",
"tvalue",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"if",
"ttype",
"in",
"[",
"\"number\"",
",",
"\"tag\"",
"]",
":",
"return",
"self",
".",
"__curcommand",
".",
"check_next_arg",
"(",
"ttype",
",",
"tvalue",
".",
"decode",
"(",
"\"ascii\"",
")",
")",
"if",
"ttype",
"==",
"\"left_bracket\"",
":",
"self",
".",
"__cstate",
"=",
"self",
".",
"__stringlist",
"self",
".",
"__curstringlist",
"=",
"[",
"]",
"self",
".",
"__set_expected",
"(",
"\"string\"",
")",
"return",
"True",
"condition",
"=",
"(",
"ttype",
"in",
"[",
"\"left_cbracket\"",
",",
"\"comma\"",
"]",
"and",
"self",
".",
"__curcommand",
".",
"non_deterministic_args",
")",
"if",
"condition",
":",
"self",
".",
"__curcommand",
".",
"reassign_arguments",
"(",
")",
"# rewind lexer",
"self",
".",
"lexer",
".",
"pos",
"-=",
"1",
"return",
"True",
"return",
"False"
] | Argument parsing method
This method acts as an entry point for 'argument' parsing.
Syntax:
string-list / number / tag
:param ttype: current token type
:param tvalue: current token value
:return: False if an error is encountered, True otherwise | [
"Argument",
"parsing",
"method"
] | python | train |
tanghaibao/goatools | goatools/grouper/grprobj_init.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprobj_init.py#L143-L155 | def get_go2nt(self, usr_go2nt):
"""Combine user namedtuple fields, GO object fields, and format_txt."""
gos_all = self.get_gos_all()
# Minimum set of namedtuple fields available for use with Sorter on grouped GO IDs
prt_flds_all = get_hdridx_flds() + self.gosubdag.prt_attr['flds']
if not usr_go2nt:
return self.__init_go2nt_dflt(gos_all, prt_flds_all)
usr_nt_flds = next(iter(usr_go2nt.values()))._fields
# If user namedtuple already contains all fields available, then return usr_go2nt
if len(set(prt_flds_all).difference(usr_nt_flds)) == 0:
return self._init_go2nt_aug(usr_go2nt)
# Otherwise, combine user fields and default Sorter fields
return self.__init_go2nt_w_usr(gos_all, usr_go2nt, prt_flds_all) | [
"def",
"get_go2nt",
"(",
"self",
",",
"usr_go2nt",
")",
":",
"gos_all",
"=",
"self",
".",
"get_gos_all",
"(",
")",
"# Minimum set of namedtuple fields available for use with Sorter on grouped GO IDs",
"prt_flds_all",
"=",
"get_hdridx_flds",
"(",
")",
"+",
"self",
".",
"gosubdag",
".",
"prt_attr",
"[",
"'flds'",
"]",
"if",
"not",
"usr_go2nt",
":",
"return",
"self",
".",
"__init_go2nt_dflt",
"(",
"gos_all",
",",
"prt_flds_all",
")",
"usr_nt_flds",
"=",
"next",
"(",
"iter",
"(",
"usr_go2nt",
".",
"values",
"(",
")",
")",
")",
".",
"_fields",
"# If user namedtuple already contains all fields available, then return usr_go2nt",
"if",
"len",
"(",
"set",
"(",
"prt_flds_all",
")",
".",
"difference",
"(",
"usr_nt_flds",
")",
")",
"==",
"0",
":",
"return",
"self",
".",
"_init_go2nt_aug",
"(",
"usr_go2nt",
")",
"# Otherwise, combine user fields and default Sorter fields",
"return",
"self",
".",
"__init_go2nt_w_usr",
"(",
"gos_all",
",",
"usr_go2nt",
",",
"prt_flds_all",
")"
] | Combine user namedtuple fields, GO object fields, and format_txt. | [
"Combine",
"user",
"namedtuple",
"fields",
"GO",
"object",
"fields",
"and",
"format_txt",
"."
] | python | train |
jingw/pyhdfs | pyhdfs.py | https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L433-L445 | def append(self, path, data, **kwargs):
"""Append to the given file.
:param data: ``bytes`` or a ``file``-like object
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int
"""
metadata_response = self._post(
path, 'APPEND', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs)
data_response = self._requests_session.post(
metadata_response.headers['location'], data=data, **self._requests_kwargs)
_check_response(data_response)
assert not data_response.content | [
"def",
"append",
"(",
"self",
",",
"path",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"metadata_response",
"=",
"self",
".",
"_post",
"(",
"path",
",",
"'APPEND'",
",",
"expected_status",
"=",
"httplib",
".",
"TEMPORARY_REDIRECT",
",",
"*",
"*",
"kwargs",
")",
"data_response",
"=",
"self",
".",
"_requests_session",
".",
"post",
"(",
"metadata_response",
".",
"headers",
"[",
"'location'",
"]",
",",
"data",
"=",
"data",
",",
"*",
"*",
"self",
".",
"_requests_kwargs",
")",
"_check_response",
"(",
"data_response",
")",
"assert",
"not",
"data_response",
".",
"content"
] | Append to the given file.
:param data: ``bytes`` or a ``file``-like object
:param buffersize: The size of the buffer used in transferring data.
:type buffersize: int | [
"Append",
"to",
"the",
"given",
"file",
"."
] | python | train |
savvastj/nbashots | nbashots/charts.py | https://github.com/savvastj/nbashots/blob/76ece28d717f10b25eb0fc681b317df6ef6b5157/nbashots/charts.py#L15-L103 | def draw_court(ax=None, color='gray', lw=1, outer_lines=False):
"""Returns an axes with a basketball court drawn onto to it.
This function draws a court based on the x and y-axis values that the NBA
stats API provides for the shot chart data. For example the center of the
hoop is located at the (0,0) coordinate. Twenty-two feet from the left of
the center of the hoop in is represented by the (-220,0) coordinates.
So one foot equals +/-10 units on the x and y-axis.
Parameters
----------
ax : Axes, optional
The Axes object to plot the court onto.
color : matplotlib color, optional
The color of the court lines.
lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If `True` it draws the out of bound lines in same style as the rest of
the court.
Returns
-------
ax : Axes
The Axes object with the court on it.
"""
if ax is None:
ax = plt.gca()
# Create the various parts of an NBA basketball court
# Create the basketball hoop
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
# Create backboard
backboard = Rectangle((-30, -12.5), 60, 0, linewidth=lw, color=color)
# The paint
# Create the outer box 0f the paint, width=16ft, height=19ft
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
# Create the inner box of the paint, widt=12ft, height=19ft
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
# Create free throw top arc
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
# Create free throw bottom arc
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
# Restricted Zone, it is an arc with 4ft radius from center of the hoop
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
# Three point line
# Create the right side 3pt lines, it's 14ft long before it arcs
corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw,
color=color)
# Create the right side 3pt lines, it's 14ft long before it arcs
corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color)
# 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
# Center Court
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
# List of the court elements to be plotted onto the axes
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
# Draw the half court line, baseline and side out bound lines
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
# Add the court elements onto the axes
for element in court_elements:
ax.add_patch(element)
return ax | [
"def",
"draw_court",
"(",
"ax",
"=",
"None",
",",
"color",
"=",
"'gray'",
",",
"lw",
"=",
"1",
",",
"outer_lines",
"=",
"False",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# Create the various parts of an NBA basketball court",
"# Create the basketball hoop",
"hoop",
"=",
"Circle",
"(",
"(",
"0",
",",
"0",
")",
",",
"radius",
"=",
"7.5",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
",",
"fill",
"=",
"False",
")",
"# Create backboard",
"backboard",
"=",
"Rectangle",
"(",
"(",
"-",
"30",
",",
"-",
"12.5",
")",
",",
"60",
",",
"0",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"# The paint",
"# Create the outer box 0f the paint, width=16ft, height=19ft",
"outer_box",
"=",
"Rectangle",
"(",
"(",
"-",
"80",
",",
"-",
"47.5",
")",
",",
"160",
",",
"190",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
",",
"fill",
"=",
"False",
")",
"# Create the inner box of the paint, widt=12ft, height=19ft",
"inner_box",
"=",
"Rectangle",
"(",
"(",
"-",
"60",
",",
"-",
"47.5",
")",
",",
"120",
",",
"190",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
",",
"fill",
"=",
"False",
")",
"# Create free throw top arc",
"top_free_throw",
"=",
"Arc",
"(",
"(",
"0",
",",
"142.5",
")",
",",
"120",
",",
"120",
",",
"theta1",
"=",
"0",
",",
"theta2",
"=",
"180",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
",",
"fill",
"=",
"False",
")",
"# Create free throw bottom arc",
"bottom_free_throw",
"=",
"Arc",
"(",
"(",
"0",
",",
"142.5",
")",
",",
"120",
",",
"120",
",",
"theta1",
"=",
"180",
",",
"theta2",
"=",
"0",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
",",
"linestyle",
"=",
"'dashed'",
")",
"# Restricted Zone, it is an arc with 4ft radius from center of the hoop",
"restricted",
"=",
"Arc",
"(",
"(",
"0",
",",
"0",
")",
",",
"80",
",",
"80",
",",
"theta1",
"=",
"0",
",",
"theta2",
"=",
"180",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"# Three point line",
"# Create the right side 3pt lines, it's 14ft long before it arcs",
"corner_three_a",
"=",
"Rectangle",
"(",
"(",
"-",
"220",
",",
"-",
"47.5",
")",
",",
"0",
",",
"140",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"# Create the right side 3pt lines, it's 14ft long before it arcs",
"corner_three_b",
"=",
"Rectangle",
"(",
"(",
"220",
",",
"-",
"47.5",
")",
",",
"0",
",",
"140",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"# 3pt arc - center of arc will be the hoop, arc is 23'9\" away from hoop",
"three_arc",
"=",
"Arc",
"(",
"(",
"0",
",",
"0",
")",
",",
"475",
",",
"475",
",",
"theta1",
"=",
"22",
",",
"theta2",
"=",
"158",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"# Center Court",
"center_outer_arc",
"=",
"Arc",
"(",
"(",
"0",
",",
"422.5",
")",
",",
"120",
",",
"120",
",",
"theta1",
"=",
"180",
",",
"theta2",
"=",
"0",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"center_inner_arc",
"=",
"Arc",
"(",
"(",
"0",
",",
"422.5",
")",
",",
"40",
",",
"40",
",",
"theta1",
"=",
"180",
",",
"theta2",
"=",
"0",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
")",
"# List of the court elements to be plotted onto the axes",
"court_elements",
"=",
"[",
"hoop",
",",
"backboard",
",",
"outer_box",
",",
"inner_box",
",",
"top_free_throw",
",",
"bottom_free_throw",
",",
"restricted",
",",
"corner_three_a",
",",
"corner_three_b",
",",
"three_arc",
",",
"center_outer_arc",
",",
"center_inner_arc",
"]",
"if",
"outer_lines",
":",
"# Draw the half court line, baseline and side out bound lines",
"outer_lines",
"=",
"Rectangle",
"(",
"(",
"-",
"250",
",",
"-",
"47.5",
")",
",",
"500",
",",
"470",
",",
"linewidth",
"=",
"lw",
",",
"color",
"=",
"color",
",",
"fill",
"=",
"False",
")",
"court_elements",
".",
"append",
"(",
"outer_lines",
")",
"# Add the court elements onto the axes",
"for",
"element",
"in",
"court_elements",
":",
"ax",
".",
"add_patch",
"(",
"element",
")",
"return",
"ax"
] | Returns an axes with a basketball court drawn onto to it.
This function draws a court based on the x and y-axis values that the NBA
stats API provides for the shot chart data. For example the center of the
hoop is located at the (0,0) coordinate. Twenty-two feet from the left of
the center of the hoop in is represented by the (-220,0) coordinates.
So one foot equals +/-10 units on the x and y-axis.
Parameters
----------
ax : Axes, optional
The Axes object to plot the court onto.
color : matplotlib color, optional
The color of the court lines.
lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If `True` it draws the out of bound lines in same style as the rest of
the court.
Returns
-------
ax : Axes
The Axes object with the court on it. | [
"Returns",
"an",
"axes",
"with",
"a",
"basketball",
"court",
"drawn",
"onto",
"to",
"it",
"."
] | python | train |
allenai/allennlp | allennlp/semparse/domain_languages/wikitables_language.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L737-L745 | def average(self, rows: List[Row], column: NumberColumn) -> Number:
"""
Takes a list of rows and a column and returns the mean of the values under that column in
those rows.
"""
cell_values = [row.values[column.name] for row in rows]
if not cell_values:
return 0.0 # type: ignore
return sum(cell_values) / len(cell_values) | [
"def",
"average",
"(",
"self",
",",
"rows",
":",
"List",
"[",
"Row",
"]",
",",
"column",
":",
"NumberColumn",
")",
"->",
"Number",
":",
"cell_values",
"=",
"[",
"row",
".",
"values",
"[",
"column",
".",
"name",
"]",
"for",
"row",
"in",
"rows",
"]",
"if",
"not",
"cell_values",
":",
"return",
"0.0",
"# type: ignore",
"return",
"sum",
"(",
"cell_values",
")",
"/",
"len",
"(",
"cell_values",
")"
] | Takes a list of rows and a column and returns the mean of the values under that column in
those rows. | [
"Takes",
"a",
"list",
"of",
"rows",
"and",
"a",
"column",
"and",
"returns",
"the",
"mean",
"of",
"the",
"values",
"under",
"that",
"column",
"in",
"those",
"rows",
"."
] | python | train |
zhmcclient/python-zhmcclient | zhmcclient/_cpc.py | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_cpc.py#L769-L815 | def get_wwpns(self, partitions):
"""
Return the WWPNs of the host ports (of the :term:`HBAs <HBA>`) of the
specified :term:`Partitions <Partition>` of this CPC.
This method performs the HMC operation "Export WWPN List".
Authorization requirements:
* Object-access permission to this CPC.
* Object-access permission to the Partitions designated by the
"partitions" parameter.
* Task permission for the "Export WWPNs" task.
Parameters:
partitions (:term:`iterable` of :class:`~zhmcclient.Partition`):
:term:`Partitions <Partition>` to be used.
Returns:
A list of items for each WWPN, where each item is a dict with the
following keys:
* 'partition-name' (string): Name of the :term:`Partition`.
* 'adapter-id' (string): ID of the :term:`FCP Adapter`.
* 'device-number' (string): Virtual device number of the :term:`HBA`.
* 'wwpn' (string): WWPN of the HBA.
Raises:
:exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of
operation "Export WWPN List" in the :term:`HMC API` book.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'partitions': [p.uri for p in partitions]}
result = self.manager.session.post(self._uri + '/operations/'
'export-port-names-list', body=body)
# Parse the returned comma-separated string for each WWPN into a dict:
wwpn_list = []
dict_keys = ('partition-name', 'adapter-id', 'device-number', 'wwpn')
for wwpn_item in result['wwpn-list']:
dict_values = wwpn_item.split(',')
wwpn_list.append(dict(zip(dict_keys, dict_values)))
return wwpn_list | [
"def",
"get_wwpns",
"(",
"self",
",",
"partitions",
")",
":",
"body",
"=",
"{",
"'partitions'",
":",
"[",
"p",
".",
"uri",
"for",
"p",
"in",
"partitions",
"]",
"}",
"result",
"=",
"self",
".",
"manager",
".",
"session",
".",
"post",
"(",
"self",
".",
"_uri",
"+",
"'/operations/'",
"'export-port-names-list'",
",",
"body",
"=",
"body",
")",
"# Parse the returned comma-separated string for each WWPN into a dict:",
"wwpn_list",
"=",
"[",
"]",
"dict_keys",
"=",
"(",
"'partition-name'",
",",
"'adapter-id'",
",",
"'device-number'",
",",
"'wwpn'",
")",
"for",
"wwpn_item",
"in",
"result",
"[",
"'wwpn-list'",
"]",
":",
"dict_values",
"=",
"wwpn_item",
".",
"split",
"(",
"','",
")",
"wwpn_list",
".",
"append",
"(",
"dict",
"(",
"zip",
"(",
"dict_keys",
",",
"dict_values",
")",
")",
")",
"return",
"wwpn_list"
] | Return the WWPNs of the host ports (of the :term:`HBAs <HBA>`) of the
specified :term:`Partitions <Partition>` of this CPC.
This method performs the HMC operation "Export WWPN List".
Authorization requirements:
* Object-access permission to this CPC.
* Object-access permission to the Partitions designated by the
"partitions" parameter.
* Task permission for the "Export WWPNs" task.
Parameters:
partitions (:term:`iterable` of :class:`~zhmcclient.Partition`):
:term:`Partitions <Partition>` to be used.
Returns:
A list of items for each WWPN, where each item is a dict with the
following keys:
* 'partition-name' (string): Name of the :term:`Partition`.
* 'adapter-id' (string): ID of the :term:`FCP Adapter`.
* 'device-number' (string): Virtual device number of the :term:`HBA`.
* 'wwpn' (string): WWPN of the HBA.
Raises:
:exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of
operation "Export WWPN List" in the :term:`HMC API` book.
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | [
"Return",
"the",
"WWPNs",
"of",
"the",
"host",
"ports",
"(",
"of",
"the",
":",
"term",
":",
"HBAs",
"<HBA",
">",
")",
"of",
"the",
"specified",
":",
"term",
":",
"Partitions",
"<Partition",
">",
"of",
"this",
"CPC",
"."
] | python | train |
juju/python-libjuju | juju/model.py | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/model.py#L164-L169 | def entity_data(self, entity_type, entity_id, history_index):
"""Return the data dict for an entity at a specific index of its
history.
"""
return self.entity_history(entity_type, entity_id)[history_index] | [
"def",
"entity_data",
"(",
"self",
",",
"entity_type",
",",
"entity_id",
",",
"history_index",
")",
":",
"return",
"self",
".",
"entity_history",
"(",
"entity_type",
",",
"entity_id",
")",
"[",
"history_index",
"]"
] | Return the data dict for an entity at a specific index of its
history. | [
"Return",
"the",
"data",
"dict",
"for",
"an",
"entity",
"at",
"a",
"specific",
"index",
"of",
"its",
"history",
"."
] | python | train |
dr-leo/pandaSDMX | pandasdmx/reader/__init__.py | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/reader/__init__.py#L33-L52 | def read_identifiables(self, cls, sdmxobj, offset=None):
'''
If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike.
'''
path = self._paths[cls]
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = {e.get('id'): cls(self, e) for e in path(base)}
if isinstance(sdmxobj, dict):
sdmxobj.update(result)
else:
return DictLike(result) | [
"def",
"read_identifiables",
"(",
"self",
",",
"cls",
",",
"sdmxobj",
",",
"offset",
"=",
"None",
")",
":",
"path",
"=",
"self",
".",
"_paths",
"[",
"cls",
"]",
"if",
"offset",
":",
"try",
":",
"base",
"=",
"self",
".",
"_paths",
"[",
"offset",
"]",
"(",
"sdmxobj",
".",
"_elem",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"return",
"None",
"else",
":",
"base",
"=",
"sdmxobj",
".",
"_elem",
"result",
"=",
"{",
"e",
".",
"get",
"(",
"'id'",
")",
":",
"cls",
"(",
"self",
",",
"e",
")",
"for",
"e",
"in",
"path",
"(",
"base",
")",
"}",
"if",
"isinstance",
"(",
"sdmxobj",
",",
"dict",
")",
":",
"sdmxobj",
".",
"update",
"(",
"result",
")",
"else",
":",
"return",
"DictLike",
"(",
"result",
")"
] | If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike. | [
"If",
"sdmxobj",
"inherits",
"from",
"dict",
":",
"update",
"it",
"with",
"modelized",
"elements",
".",
"These",
"must",
"be",
"instances",
"of",
"model",
".",
"IdentifiableArtefact",
"i",
".",
"e",
".",
"have",
"an",
"id",
"attribute",
".",
"This",
"will",
"be",
"used",
"as",
"dict",
"keys",
".",
"If",
"sdmxobj",
"does",
"not",
"inherit",
"from",
"dict",
":",
"return",
"a",
"new",
"DictLike",
"."
] | python | train |
saltstack/salt | salt/modules/kerberos.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L119-L144 | def list_policies():
'''
List policies
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.list_policies
'''
ret = {}
cmd = __execute_kadmin('list_policies')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
ret = {'policies': []}
for i in cmd['stdout'].splitlines()[1:]:
ret['policies'].append(i)
return ret | [
"def",
"list_policies",
"(",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"__execute_kadmin",
"(",
"'list_policies'",
")",
"if",
"cmd",
"[",
"'retcode'",
"]",
"!=",
"0",
"or",
"cmd",
"[",
"'stderr'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"cmd",
"[",
"'stderr'",
"]",
".",
"splitlines",
"(",
")",
"[",
"-",
"1",
"]",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"ret",
"=",
"{",
"'policies'",
":",
"[",
"]",
"}",
"for",
"i",
"in",
"cmd",
"[",
"'stdout'",
"]",
".",
"splitlines",
"(",
")",
"[",
"1",
":",
"]",
":",
"ret",
"[",
"'policies'",
"]",
".",
"append",
"(",
"i",
")",
"return",
"ret"
] | List policies
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.list_policies | [
"List",
"policies"
] | python | train |
ucfopen/canvasapi | canvasapi/canvas.py | https://github.com/ucfopen/canvasapi/blob/319064b5fc97ba54250af683eb98723ef3f76cf8/canvasapi/canvas.py#L901-L926 | def get_user_participants(self, appointment_group, **kwargs):
"""
List user participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/users \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.user import User
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
User,
self.__requester,
'GET',
'appointment_groups/{}/users'.format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs)
) | [
"def",
"get_user_participants",
"(",
"self",
",",
"appointment_group",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"canvasapi",
".",
"appointment_group",
"import",
"AppointmentGroup",
"from",
"canvasapi",
".",
"user",
"import",
"User",
"appointment_group_id",
"=",
"obj_or_id",
"(",
"appointment_group",
",",
"\"appointment_group\"",
",",
"(",
"AppointmentGroup",
",",
")",
")",
"return",
"PaginatedList",
"(",
"User",
",",
"self",
".",
"__requester",
",",
"'GET'",
",",
"'appointment_groups/{}/users'",
".",
"format",
"(",
"appointment_group_id",
")",
",",
"_kwargs",
"=",
"combine_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")"
] | List user participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/users \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User` | [
"List",
"user",
"participants",
"in",
"this",
"appointment",
"group",
"."
] | python | train |
jasonrbriggs/proton | python/proton/xmlutils.py | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/xmlutils.py#L29-L39 | def replaceelement(oldelem, newelem):
'''
Given a parent element, replace oldelem with newelem.
'''
parent = oldelem.getparent()
if parent is not None:
size = len(parent.getchildren())
for x in range(0, size):
if parent.getchildren()[x] == oldelem:
parent.remove(oldelem)
parent.insert(x, newelem) | [
"def",
"replaceelement",
"(",
"oldelem",
",",
"newelem",
")",
":",
"parent",
"=",
"oldelem",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"not",
"None",
":",
"size",
"=",
"len",
"(",
"parent",
".",
"getchildren",
"(",
")",
")",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"size",
")",
":",
"if",
"parent",
".",
"getchildren",
"(",
")",
"[",
"x",
"]",
"==",
"oldelem",
":",
"parent",
".",
"remove",
"(",
"oldelem",
")",
"parent",
".",
"insert",
"(",
"x",
",",
"newelem",
")"
] | Given a parent element, replace oldelem with newelem. | [
"Given",
"a",
"parent",
"element",
"replace",
"oldelem",
"with",
"newelem",
"."
] | python | train |
yyuu/botornado | boto/connection.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/connection.py#L267-L285 | def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
"""
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now | [
"def",
"clean",
"(",
"self",
")",
":",
"with",
"self",
".",
"mutex",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"last_clean_time",
"+",
"self",
".",
"CLEAN_INTERVAL",
"<",
"now",
":",
"to_remove",
"=",
"[",
"]",
"for",
"(",
"host",
",",
"pool",
")",
"in",
"self",
".",
"host_to_pool",
".",
"items",
"(",
")",
":",
"pool",
".",
"clean",
"(",
")",
"if",
"pool",
".",
"size",
"(",
")",
"==",
"0",
":",
"to_remove",
".",
"append",
"(",
"host",
")",
"for",
"host",
"in",
"to_remove",
":",
"del",
"self",
".",
"host_to_pool",
"[",
"host",
"]",
"self",
".",
"last_clean_time",
"=",
"now"
] | Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them. | [
"Clean",
"up",
"the",
"stale",
"connections",
"in",
"all",
"of",
"the",
"pools",
"and",
"then",
"get",
"rid",
"of",
"empty",
"pools",
".",
"Pools",
"clean",
"themselves",
"every",
"time",
"a",
"connection",
"is",
"fetched",
";",
"this",
"cleaning",
"takes",
"care",
"of",
"pools",
"that",
"aren",
"t",
"being",
"used",
"any",
"more",
"so",
"nothing",
"is",
"being",
"gotten",
"from",
"them",
"."
] | python | train |
sergei-maertens/django-systemjs | systemjs/templatetags/system_tags.py | https://github.com/sergei-maertens/django-systemjs/blob/efd4a3862a39d9771609a25a5556f36023cf6e5c/systemjs/templatetags/system_tags.py#L27-L56 | def render(self, context):
"""
Build the filepath by appending the extension.
"""
module_path = self.path.resolve(context)
if not settings.SYSTEMJS_ENABLED:
if settings.SYSTEMJS_DEFAULT_JS_EXTENSIONS:
name, ext = posixpath.splitext(module_path)
if not ext:
module_path = '{}.js'.format(module_path)
if settings.SYSTEMJS_SERVER_URL:
tpl = """<script src="{url}{app}" type="text/javascript"></script>"""
else:
tpl = """<script type="text/javascript">System.import('{app}');</script>"""
return tpl.format(app=module_path, url=settings.SYSTEMJS_SERVER_URL)
# else: create a bundle
rel_path = System.get_bundle_path(module_path)
url = staticfiles_storage.url(rel_path)
tag_attrs = {'type': 'text/javascript'}
for key, value in self.tag_attrs.items():
if not isinstance(value, bool):
value = value.resolve(context)
tag_attrs[key] = value
return """<script{attrs} src="{url}"></script>""".format(
url=url, attrs=flatatt(tag_attrs)
) | [
"def",
"render",
"(",
"self",
",",
"context",
")",
":",
"module_path",
"=",
"self",
".",
"path",
".",
"resolve",
"(",
"context",
")",
"if",
"not",
"settings",
".",
"SYSTEMJS_ENABLED",
":",
"if",
"settings",
".",
"SYSTEMJS_DEFAULT_JS_EXTENSIONS",
":",
"name",
",",
"ext",
"=",
"posixpath",
".",
"splitext",
"(",
"module_path",
")",
"if",
"not",
"ext",
":",
"module_path",
"=",
"'{}.js'",
".",
"format",
"(",
"module_path",
")",
"if",
"settings",
".",
"SYSTEMJS_SERVER_URL",
":",
"tpl",
"=",
"\"\"\"<script src=\"{url}{app}\" type=\"text/javascript\"></script>\"\"\"",
"else",
":",
"tpl",
"=",
"\"\"\"<script type=\"text/javascript\">System.import('{app}');</script>\"\"\"",
"return",
"tpl",
".",
"format",
"(",
"app",
"=",
"module_path",
",",
"url",
"=",
"settings",
".",
"SYSTEMJS_SERVER_URL",
")",
"# else: create a bundle",
"rel_path",
"=",
"System",
".",
"get_bundle_path",
"(",
"module_path",
")",
"url",
"=",
"staticfiles_storage",
".",
"url",
"(",
"rel_path",
")",
"tag_attrs",
"=",
"{",
"'type'",
":",
"'text/javascript'",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"tag_attrs",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"value",
"=",
"value",
".",
"resolve",
"(",
"context",
")",
"tag_attrs",
"[",
"key",
"]",
"=",
"value",
"return",
"\"\"\"<script{attrs} src=\"{url}\"></script>\"\"\"",
".",
"format",
"(",
"url",
"=",
"url",
",",
"attrs",
"=",
"flatatt",
"(",
"tag_attrs",
")",
")"
] | Build the filepath by appending the extension. | [
"Build",
"the",
"filepath",
"by",
"appending",
"the",
"extension",
"."
] | python | test |
getfleety/coralillo | coralillo/datamodel.py | https://github.com/getfleety/coralillo/blob/9cac101738a0fa7c1106f129604c00ef703370e1/coralillo/datamodel.py#L39-L60 | def distance(self, loc):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
assert type(loc) == type(self)
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [
self.lon,
self.lat,
loc.lon,
loc.lat,
])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371000 # Radius of earth in meters.
return c * r | [
"def",
"distance",
"(",
"self",
",",
"loc",
")",
":",
"assert",
"type",
"(",
"loc",
")",
"==",
"type",
"(",
"self",
")",
"# convert decimal degrees to radians",
"lon1",
",",
"lat1",
",",
"lon2",
",",
"lat2",
"=",
"map",
"(",
"radians",
",",
"[",
"self",
".",
"lon",
",",
"self",
".",
"lat",
",",
"loc",
".",
"lon",
",",
"loc",
".",
"lat",
",",
"]",
")",
"# haversine formula",
"dlon",
"=",
"lon2",
"-",
"lon1",
"dlat",
"=",
"lat2",
"-",
"lat1",
"a",
"=",
"sin",
"(",
"dlat",
"/",
"2",
")",
"**",
"2",
"+",
"cos",
"(",
"lat1",
")",
"*",
"cos",
"(",
"lat2",
")",
"*",
"sin",
"(",
"dlon",
"/",
"2",
")",
"**",
"2",
"c",
"=",
"2",
"*",
"asin",
"(",
"sqrt",
"(",
"a",
")",
")",
"r",
"=",
"6371000",
"# Radius of earth in meters.",
"return",
"c",
"*",
"r"
] | Calculate the great circle distance between two points
on the earth (specified in decimal degrees) | [
"Calculate",
"the",
"great",
"circle",
"distance",
"between",
"two",
"points",
"on",
"the",
"earth",
"(",
"specified",
"in",
"decimal",
"degrees",
")"
] | python | train |
yahoo/TensorFlowOnSpark | examples/imagenet/inception/inception_export.py | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/inception_export.py#L30-L115 | def export(_):
FLAGS = tf.app.flags.FLAGS
"""Evaluate model on Dataset for a number of steps."""
#with tf.Graph().as_default():
tf.reset_default_graph()
def preprocess_image(image_buffer):
"""Preprocess JPEG encoded bytes to 3D float Tensor."""
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(
image, [FLAGS.image_size, FLAGS.image_size], align_corners=False)
image = tf.squeeze(image, [0])
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
# Get images and labels from the dataset.
jpegs = tf.placeholder(tf.string, [None], name='jpegs')
images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
labels = tf.placeholder(tf.int32, [None], name='labels')
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
dataset = ImagenetData(subset=FLAGS.subset)
num_classes = dataset.num_classes() + 1
# Build a Graph that computes the logits predictions from the
# inference model.
logits, _ = inception.inference(images, num_classes)
# Calculate predictions.
top_1_op = tf.nn.in_top_k(logits, labels, 1)
top_5_op = tf.nn.in_top_k(logits, labels, 5)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if not ckpt or not ckpt.model_checkpoint_path:
raise Exception("No checkpoint file found at: {}".format(FLAGS.train_dir))
print("ckpt.model_checkpoint_path: {0}".format(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Successfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
print("Exporting saved_model to: {}".format(FLAGS.export_dir))
# exported signatures defined in code
signatures = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: {
'inputs': { 'jpegs': jpegs, 'labels': labels },
'outputs': { 'top_5_acc': top_5_op },
'method_name': tf.saved_model.signature_constants.PREDICT_METHOD_NAME
}
}
TFNode.export_saved_model(sess,
FLAGS.export_dir,
tf.saved_model.tag_constants.SERVING,
signatures)
print("Exported saved_model") | [
"def",
"export",
"(",
"_",
")",
":",
"FLAGS",
"=",
"tf",
".",
"app",
".",
"flags",
".",
"FLAGS",
"#with tf.Graph().as_default():",
"tf",
".",
"reset_default_graph",
"(",
")",
"def",
"preprocess_image",
"(",
"image_buffer",
")",
":",
"\"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\"",
"# Decode the string as an RGB JPEG.",
"# Note that the resulting image contains an unknown height and width",
"# that is set dynamically by decode_jpeg. In other words, the height",
"# and width of image is unknown at compile-time.",
"image",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"image_buffer",
",",
"channels",
"=",
"3",
")",
"# After this point, all image pixels reside in [0,1)",
"# until the very end, when they're rescaled to (-1, 1). The various",
"# adjust_* ops all require this range for dtype float.",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Crop the central region of the image with an area containing 87.5% of",
"# the original image.",
"image",
"=",
"tf",
".",
"image",
".",
"central_crop",
"(",
"image",
",",
"central_fraction",
"=",
"0.875",
")",
"# Resize the image to the original height and width.",
"image",
"=",
"tf",
".",
"expand_dims",
"(",
"image",
",",
"0",
")",
"image",
"=",
"tf",
".",
"image",
".",
"resize_bilinear",
"(",
"image",
",",
"[",
"FLAGS",
".",
"image_size",
",",
"FLAGS",
".",
"image_size",
"]",
",",
"align_corners",
"=",
"False",
")",
"image",
"=",
"tf",
".",
"squeeze",
"(",
"image",
",",
"[",
"0",
"]",
")",
"# Finally, rescale to [-1,1] instead of [0, 1)",
"image",
"=",
"tf",
".",
"subtract",
"(",
"image",
",",
"0.5",
")",
"image",
"=",
"tf",
".",
"multiply",
"(",
"image",
",",
"2.0",
")",
"return",
"image",
"# Get images and labels from the dataset.",
"jpegs",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"[",
"None",
"]",
",",
"name",
"=",
"'jpegs'",
")",
"images",
"=",
"tf",
".",
"map_fn",
"(",
"preprocess_image",
",",
"jpegs",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"labels",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"[",
"None",
"]",
",",
"name",
"=",
"'labels'",
")",
"# Number of classes in the Dataset label set plus 1.",
"# Label 0 is reserved for an (unused) background class.",
"dataset",
"=",
"ImagenetData",
"(",
"subset",
"=",
"FLAGS",
".",
"subset",
")",
"num_classes",
"=",
"dataset",
".",
"num_classes",
"(",
")",
"+",
"1",
"# Build a Graph that computes the logits predictions from the",
"# inference model.",
"logits",
",",
"_",
"=",
"inception",
".",
"inference",
"(",
"images",
",",
"num_classes",
")",
"# Calculate predictions.",
"top_1_op",
"=",
"tf",
".",
"nn",
".",
"in_top_k",
"(",
"logits",
",",
"labels",
",",
"1",
")",
"top_5_op",
"=",
"tf",
".",
"nn",
".",
"in_top_k",
"(",
"logits",
",",
"labels",
",",
"5",
")",
"# Restore the moving average version of the learned variables for eval.",
"variable_averages",
"=",
"tf",
".",
"train",
".",
"ExponentialMovingAverage",
"(",
"inception",
".",
"MOVING_AVERAGE_DECAY",
")",
"variables_to_restore",
"=",
"variable_averages",
".",
"variables_to_restore",
"(",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"variables_to_restore",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"ckpt",
"=",
"tf",
".",
"train",
".",
"get_checkpoint_state",
"(",
"FLAGS",
".",
"train_dir",
")",
"if",
"not",
"ckpt",
"or",
"not",
"ckpt",
".",
"model_checkpoint_path",
":",
"raise",
"Exception",
"(",
"\"No checkpoint file found at: {}\"",
".",
"format",
"(",
"FLAGS",
".",
"train_dir",
")",
")",
"print",
"(",
"\"ckpt.model_checkpoint_path: {0}\"",
".",
"format",
"(",
"ckpt",
".",
"model_checkpoint_path",
")",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"ckpt",
".",
"model_checkpoint_path",
")",
"# Assuming model_checkpoint_path looks something like:",
"# /my-favorite-path/imagenet_train/model.ckpt-0,",
"# extract global_step from it.",
"global_step",
"=",
"ckpt",
".",
"model_checkpoint_path",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"-",
"1",
"]",
"print",
"(",
"'Successfully loaded model from %s at step=%s.'",
"%",
"(",
"ckpt",
".",
"model_checkpoint_path",
",",
"global_step",
")",
")",
"print",
"(",
"\"Exporting saved_model to: {}\"",
".",
"format",
"(",
"FLAGS",
".",
"export_dir",
")",
")",
"# exported signatures defined in code",
"signatures",
"=",
"{",
"tf",
".",
"saved_model",
".",
"signature_constants",
".",
"DEFAULT_SERVING_SIGNATURE_DEF_KEY",
":",
"{",
"'inputs'",
":",
"{",
"'jpegs'",
":",
"jpegs",
",",
"'labels'",
":",
"labels",
"}",
",",
"'outputs'",
":",
"{",
"'top_5_acc'",
":",
"top_5_op",
"}",
",",
"'method_name'",
":",
"tf",
".",
"saved_model",
".",
"signature_constants",
".",
"PREDICT_METHOD_NAME",
"}",
"}",
"TFNode",
".",
"export_saved_model",
"(",
"sess",
",",
"FLAGS",
".",
"export_dir",
",",
"tf",
".",
"saved_model",
".",
"tag_constants",
".",
"SERVING",
",",
"signatures",
")",
"print",
"(",
"\"Exported saved_model\"",
")"
] | Evaluate model on Dataset for a number of steps. | [
"Evaluate",
"model",
"on",
"Dataset",
"for",
"a",
"number",
"of",
"steps",
"."
] | python | train |
riggsd/davies | examples/wx_compass.py | https://github.com/riggsd/davies/blob/8566c626202a875947ad01c087300108c68d80b5/examples/wx_compass.py#L262-L268 | def OnInit(self):
"""Initialize by creating the split window with the tree"""
project = compass.CompassProjectParser(sys.argv[1]).parse()
frame = MyFrame(None, -1, 'wxCompass', project)
frame.Show(True)
self.SetTopWindow(frame)
return True | [
"def",
"OnInit",
"(",
"self",
")",
":",
"project",
"=",
"compass",
".",
"CompassProjectParser",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
")",
".",
"parse",
"(",
")",
"frame",
"=",
"MyFrame",
"(",
"None",
",",
"-",
"1",
",",
"'wxCompass'",
",",
"project",
")",
"frame",
".",
"Show",
"(",
"True",
")",
"self",
".",
"SetTopWindow",
"(",
"frame",
")",
"return",
"True"
] | Initialize by creating the split window with the tree | [
"Initialize",
"by",
"creating",
"the",
"split",
"window",
"with",
"the",
"tree"
] | python | train |
rosenbrockc/ci | pyci/msg.py | https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/msg.py#L74-L78 | def vms(message, level=1):
"""Writes the specified message *only* if verbose output is enabled."""
if verbose is not None and verbose != False:
if isinstance(verbose, bool) or (isinstance(verbose, int) and level <= verbose):
std(message) | [
"def",
"vms",
"(",
"message",
",",
"level",
"=",
"1",
")",
":",
"if",
"verbose",
"is",
"not",
"None",
"and",
"verbose",
"!=",
"False",
":",
"if",
"isinstance",
"(",
"verbose",
",",
"bool",
")",
"or",
"(",
"isinstance",
"(",
"verbose",
",",
"int",
")",
"and",
"level",
"<=",
"verbose",
")",
":",
"std",
"(",
"message",
")"
] | Writes the specified message *only* if verbose output is enabled. | [
"Writes",
"the",
"specified",
"message",
"*",
"only",
"*",
"if",
"verbose",
"output",
"is",
"enabled",
"."
] | python | train |
loli/medpy | medpy/graphcut/wrapper.py | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/wrapper.py#L42-L72 | def split_marker(marker, fg_id = 1, bg_id = 2):
"""
Splits an integer marker image into two binary image containing the foreground and
background markers respectively.
All encountered 1's are hereby treated as foreground, all 2's as background, all 0's
as neutral marker and all others are ignored.
This behaviour can be changed by supplying the fg_id and/or bg_id parameters.
Parameters
----------
marker : ndarray
The marker image.
fg_id : integer
The value that should be treated as foreground.
bg_id : integer
The value that should be treated as background.
Returns
-------
fgmarkers, bgmarkers : nadarray
The fore- and background markers as boolean images.
"""
img_marker = scipy.asarray(marker)
img_fgmarker = scipy.zeros(img_marker.shape, scipy.bool_)
img_fgmarker[img_marker == fg_id] = True
img_bgmarker = scipy.zeros(img_marker.shape, scipy.bool_)
img_bgmarker[img_marker == bg_id] = True
return img_fgmarker, img_bgmarker | [
"def",
"split_marker",
"(",
"marker",
",",
"fg_id",
"=",
"1",
",",
"bg_id",
"=",
"2",
")",
":",
"img_marker",
"=",
"scipy",
".",
"asarray",
"(",
"marker",
")",
"img_fgmarker",
"=",
"scipy",
".",
"zeros",
"(",
"img_marker",
".",
"shape",
",",
"scipy",
".",
"bool_",
")",
"img_fgmarker",
"[",
"img_marker",
"==",
"fg_id",
"]",
"=",
"True",
"img_bgmarker",
"=",
"scipy",
".",
"zeros",
"(",
"img_marker",
".",
"shape",
",",
"scipy",
".",
"bool_",
")",
"img_bgmarker",
"[",
"img_marker",
"==",
"bg_id",
"]",
"=",
"True",
"return",
"img_fgmarker",
",",
"img_bgmarker"
] | Splits an integer marker image into two binary image containing the foreground and
background markers respectively.
All encountered 1's are hereby treated as foreground, all 2's as background, all 0's
as neutral marker and all others are ignored.
This behaviour can be changed by supplying the fg_id and/or bg_id parameters.
Parameters
----------
marker : ndarray
The marker image.
fg_id : integer
The value that should be treated as foreground.
bg_id : integer
The value that should be treated as background.
Returns
-------
fgmarkers, bgmarkers : nadarray
The fore- and background markers as boolean images. | [
"Splits",
"an",
"integer",
"marker",
"image",
"into",
"two",
"binary",
"image",
"containing",
"the",
"foreground",
"and",
"background",
"markers",
"respectively",
".",
"All",
"encountered",
"1",
"s",
"are",
"hereby",
"treated",
"as",
"foreground",
"all",
"2",
"s",
"as",
"background",
"all",
"0",
"s",
"as",
"neutral",
"marker",
"and",
"all",
"others",
"are",
"ignored",
".",
"This",
"behaviour",
"can",
"be",
"changed",
"by",
"supplying",
"the",
"fg_id",
"and",
"/",
"or",
"bg_id",
"parameters",
".",
"Parameters",
"----------",
"marker",
":",
"ndarray",
"The",
"marker",
"image",
".",
"fg_id",
":",
"integer",
"The",
"value",
"that",
"should",
"be",
"treated",
"as",
"foreground",
".",
"bg_id",
":",
"integer",
"The",
"value",
"that",
"should",
"be",
"treated",
"as",
"background",
".",
"Returns",
"-------",
"fgmarkers",
"bgmarkers",
":",
"nadarray",
"The",
"fore",
"-",
"and",
"background",
"markers",
"as",
"boolean",
"images",
"."
] | python | train |
tdryer/hangups | hangups/conversation.py | https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/conversation.py#L476-L506 | async def leave(self):
"""Leave this conversation.
Raises:
.NetworkError: If conversation cannot be left.
"""
is_group_conversation = (self._conversation.type ==
hangouts_pb2.CONVERSATION_TYPE_GROUP)
try:
if is_group_conversation:
await self._client.remove_user(
hangouts_pb2.RemoveUserRequest(
request_header=self._client.get_request_header(),
event_request_header=self._get_event_request_header(),
)
)
else:
await self._client.delete_conversation(
hangouts_pb2.DeleteConversationRequest(
request_header=self._client.get_request_header(),
conversation_id=hangouts_pb2.ConversationId(
id=self.id_
),
delete_upper_bound_timestamp=parsers.to_timestamp(
datetime.datetime.now(tz=datetime.timezone.utc)
)
)
)
except exceptions.NetworkError as e:
logger.warning('Failed to leave conversation: {}'.format(e))
raise | [
"async",
"def",
"leave",
"(",
"self",
")",
":",
"is_group_conversation",
"=",
"(",
"self",
".",
"_conversation",
".",
"type",
"==",
"hangouts_pb2",
".",
"CONVERSATION_TYPE_GROUP",
")",
"try",
":",
"if",
"is_group_conversation",
":",
"await",
"self",
".",
"_client",
".",
"remove_user",
"(",
"hangouts_pb2",
".",
"RemoveUserRequest",
"(",
"request_header",
"=",
"self",
".",
"_client",
".",
"get_request_header",
"(",
")",
",",
"event_request_header",
"=",
"self",
".",
"_get_event_request_header",
"(",
")",
",",
")",
")",
"else",
":",
"await",
"self",
".",
"_client",
".",
"delete_conversation",
"(",
"hangouts_pb2",
".",
"DeleteConversationRequest",
"(",
"request_header",
"=",
"self",
".",
"_client",
".",
"get_request_header",
"(",
")",
",",
"conversation_id",
"=",
"hangouts_pb2",
".",
"ConversationId",
"(",
"id",
"=",
"self",
".",
"id_",
")",
",",
"delete_upper_bound_timestamp",
"=",
"parsers",
".",
"to_timestamp",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
"tz",
"=",
"datetime",
".",
"timezone",
".",
"utc",
")",
")",
")",
")",
"except",
"exceptions",
".",
"NetworkError",
"as",
"e",
":",
"logger",
".",
"warning",
"(",
"'Failed to leave conversation: {}'",
".",
"format",
"(",
"e",
")",
")",
"raise"
] | Leave this conversation.
Raises:
.NetworkError: If conversation cannot be left. | [
"Leave",
"this",
"conversation",
"."
] | python | valid |
Locu/chronology | kronos/kronos/storage/router.py | https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/storage/router.py#L101-L109 | def backends_to_mutate(self, namespace, stream):
"""
Return all the backends enabled for writing for `stream`.
"""
if namespace not in self.namespaces:
raise NamespaceMissing('`{}` namespace is not configured'
.format(namespace))
return self.prefix_confs[namespace][self.get_matching_prefix(namespace,
stream)] | [
"def",
"backends_to_mutate",
"(",
"self",
",",
"namespace",
",",
"stream",
")",
":",
"if",
"namespace",
"not",
"in",
"self",
".",
"namespaces",
":",
"raise",
"NamespaceMissing",
"(",
"'`{}` namespace is not configured'",
".",
"format",
"(",
"namespace",
")",
")",
"return",
"self",
".",
"prefix_confs",
"[",
"namespace",
"]",
"[",
"self",
".",
"get_matching_prefix",
"(",
"namespace",
",",
"stream",
")",
"]"
] | Return all the backends enabled for writing for `stream`. | [
"Return",
"all",
"the",
"backends",
"enabled",
"for",
"writing",
"for",
"stream",
"."
] | python | train |
twilio/twilio-python | twilio/rest/video/v1/composition_hook.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/composition_hook.py#L104-L139 | def page(self, enabled=values.unset, date_created_after=values.unset,
date_created_before=values.unset, friendly_name=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CompositionHookInstance records from the API.
Request is executed immediately
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
"""
params = values.of({
'Enabled': enabled,
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'DateCreatedBefore': serialize.iso8601_datetime(date_created_before),
'FriendlyName': friendly_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CompositionHookPage(self._version, response, self._solution) | [
"def",
"page",
"(",
"self",
",",
"enabled",
"=",
"values",
".",
"unset",
",",
"date_created_after",
"=",
"values",
".",
"unset",
",",
"date_created_before",
"=",
"values",
".",
"unset",
",",
"friendly_name",
"=",
"values",
".",
"unset",
",",
"page_token",
"=",
"values",
".",
"unset",
",",
"page_number",
"=",
"values",
".",
"unset",
",",
"page_size",
"=",
"values",
".",
"unset",
")",
":",
"params",
"=",
"values",
".",
"of",
"(",
"{",
"'Enabled'",
":",
"enabled",
",",
"'DateCreatedAfter'",
":",
"serialize",
".",
"iso8601_datetime",
"(",
"date_created_after",
")",
",",
"'DateCreatedBefore'",
":",
"serialize",
".",
"iso8601_datetime",
"(",
"date_created_before",
")",
",",
"'FriendlyName'",
":",
"friendly_name",
",",
"'PageToken'",
":",
"page_token",
",",
"'Page'",
":",
"page_number",
",",
"'PageSize'",
":",
"page_size",
",",
"}",
")",
"response",
"=",
"self",
".",
"_version",
".",
"page",
"(",
"'GET'",
",",
"self",
".",
"_uri",
",",
"params",
"=",
"params",
",",
")",
"return",
"CompositionHookPage",
"(",
"self",
".",
"_version",
",",
"response",
",",
"self",
".",
"_solution",
")"
] | Retrieve a single page of CompositionHookInstance records from the API.
Request is executed immediately
:param bool enabled: Only show Composition Hooks enabled or disabled.
:param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone.
:param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone.
:param unicode friendly_name: Only show Composition Hooks with friendly name that match this name.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage | [
"Retrieve",
"a",
"single",
"page",
"of",
"CompositionHookInstance",
"records",
"from",
"the",
"API",
".",
"Request",
"is",
"executed",
"immediately"
] | python | train |
wavefrontHQ/python-client | wavefront_api_client/api/derived_metric_api.py | https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/derived_metric_api.py#L143-L163 | def create_derived_metric(self, **kwargs): # noqa: E501
"""Create a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_derived_metric(async_req=True)
>>> result = thread.get()
:param async_req bool
:param DerivedMetricDefinition body: Example Body: <pre>{ \"name\": \"Query Name\", \"query\": \"aliasMetric(ts(~sample.cpu.loadavg.1m), \\\"my.new.metric\\\")\", \"minutes\": 5, \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"derivedMetricTag1\" ] } }</pre>
:return: ResponseContainerDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_derived_metric_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_derived_metric_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"create_derived_metric",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"create_derived_metric_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"create_derived_metric_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Create a specific derived metric definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_derived_metric(async_req=True)
>>> result = thread.get()
:param async_req bool
:param DerivedMetricDefinition body: Example Body: <pre>{ \"name\": \"Query Name\", \"query\": \"aliasMetric(ts(~sample.cpu.loadavg.1m), \\\"my.new.metric\\\")\", \"minutes\": 5, \"additionalInformation\": \"Additional Info\", \"tags\": { \"customerTags\": [ \"derivedMetricTag1\" ] } }</pre>
:return: ResponseContainerDerivedMetricDefinition
If the method is called asynchronously,
returns the request thread. | [
"Create",
"a",
"specific",
"derived",
"metric",
"definition",
"#",
"noqa",
":",
"E501"
] | python | train |
PmagPy/PmagPy | pmagpy/contribution_builder.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L177-L206 | def propagate_measurement_info(self):
"""
Take a contribution with a measurement table.
Create specimen, sample, site, and location tables
using the unique names in the measurement table to fill in
the index.
"""
meas_df = self.tables['measurements'].df
names_list = ['specimen', 'sample', 'site', 'location']
# add in any tables that you can
for num, name in enumerate(names_list):
# don't replace tables that already exist
if (name + "s") in self.tables:
continue
elif name in meas_df.columns:
items = meas_df[name].unique()
df = pd.DataFrame(columns=[name], index=items)
df[name] = df.index
# add in parent name if possible
# (i.e., sample name to specimens table)
if num < (len(names_list) - 1):
parent = names_list[num+1]
if parent in meas_df.columns:
meas_df = meas_df.where(meas_df.notnull(), "")
df[parent] = meas_df.drop_duplicates(subset=[name])[parent].values.astype(str)
df = df.where(df != "", np.nan)
df = df.dropna(how='all', axis='rows')
if len(df):
self.tables[name + "s"] = MagicDataFrame(dtype=name + "s", df=df)
self.write_table_to_file(name + "s") | [
"def",
"propagate_measurement_info",
"(",
"self",
")",
":",
"meas_df",
"=",
"self",
".",
"tables",
"[",
"'measurements'",
"]",
".",
"df",
"names_list",
"=",
"[",
"'specimen'",
",",
"'sample'",
",",
"'site'",
",",
"'location'",
"]",
"# add in any tables that you can",
"for",
"num",
",",
"name",
"in",
"enumerate",
"(",
"names_list",
")",
":",
"# don't replace tables that already exist",
"if",
"(",
"name",
"+",
"\"s\"",
")",
"in",
"self",
".",
"tables",
":",
"continue",
"elif",
"name",
"in",
"meas_df",
".",
"columns",
":",
"items",
"=",
"meas_df",
"[",
"name",
"]",
".",
"unique",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"name",
"]",
",",
"index",
"=",
"items",
")",
"df",
"[",
"name",
"]",
"=",
"df",
".",
"index",
"# add in parent name if possible",
"# (i.e., sample name to specimens table)",
"if",
"num",
"<",
"(",
"len",
"(",
"names_list",
")",
"-",
"1",
")",
":",
"parent",
"=",
"names_list",
"[",
"num",
"+",
"1",
"]",
"if",
"parent",
"in",
"meas_df",
".",
"columns",
":",
"meas_df",
"=",
"meas_df",
".",
"where",
"(",
"meas_df",
".",
"notnull",
"(",
")",
",",
"\"\"",
")",
"df",
"[",
"parent",
"]",
"=",
"meas_df",
".",
"drop_duplicates",
"(",
"subset",
"=",
"[",
"name",
"]",
")",
"[",
"parent",
"]",
".",
"values",
".",
"astype",
"(",
"str",
")",
"df",
"=",
"df",
".",
"where",
"(",
"df",
"!=",
"\"\"",
",",
"np",
".",
"nan",
")",
"df",
"=",
"df",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"axis",
"=",
"'rows'",
")",
"if",
"len",
"(",
"df",
")",
":",
"self",
".",
"tables",
"[",
"name",
"+",
"\"s\"",
"]",
"=",
"MagicDataFrame",
"(",
"dtype",
"=",
"name",
"+",
"\"s\"",
",",
"df",
"=",
"df",
")",
"self",
".",
"write_table_to_file",
"(",
"name",
"+",
"\"s\"",
")"
] | Take a contribution with a measurement table.
Create specimen, sample, site, and location tables
using the unique names in the measurement table to fill in
the index. | [
"Take",
"a",
"contribution",
"with",
"a",
"measurement",
"table",
".",
"Create",
"specimen",
"sample",
"site",
"and",
"location",
"tables",
"using",
"the",
"unique",
"names",
"in",
"the",
"measurement",
"table",
"to",
"fill",
"in",
"the",
"index",
"."
] | python | train |
guaix-ucm/numina | numina/array/nirproc.py | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/nirproc.py#L135-L202 | def ramp_array(rampdata, ti, gain=1.0, ron=1.0,
badpixels=None, dtype='float64',
saturation=65631, blank=0, nsig=None, normalize=False):
"""Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError
"""
import numina.array._nirproc as _nirproc
if ti <= 0:
raise ValueError("invalid parameter, ti <= 0.0")
if gain <= 0:
raise ValueError("invalid parameter, gain <= 0.0")
if ron <= 0:
raise ValueError("invalid parameter, ron < 0.0")
if saturation <= 0:
raise ValueError("invalid parameter, saturation <= 0")
rampdata = numpy.asarray(rampdata)
if rampdata.ndim != 3:
raise ValueError('rampdata must be 3D')
# change byteorder
ndtype = rampdata.dtype.newbyteorder('=')
rampdata = numpy.asarray(rampdata, dtype=ndtype)
# type of the output
fdtype = numpy.result_type(rampdata.dtype, dtype)
# Type of the mask
mdtype = numpy.dtype('uint8')
fshape = (rampdata.shape[1], rampdata.shape[2])
if badpixels is None:
badpixels = numpy.zeros(fshape, dtype=mdtype)
else:
if badpixels.shape != fshape:
msg = 'shape of badpixels is not compatible with shape of rampdata'
raise ValueError(msg)
if badpixels.dtype != mdtype:
raise ValueError('dtype of badpixels must be uint8')
result = numpy.empty(fshape, dtype=fdtype)
var = numpy.empty_like(result)
npix = numpy.empty(fshape, dtype=mdtype)
mask = badpixels.copy()
_nirproc._process_ramp_intl(
rampdata, ti, gain, ron, badpixels,
saturation, blank, result, var, npix, mask
)
return result, var, npix, mask | [
"def",
"ramp_array",
"(",
"rampdata",
",",
"ti",
",",
"gain",
"=",
"1.0",
",",
"ron",
"=",
"1.0",
",",
"badpixels",
"=",
"None",
",",
"dtype",
"=",
"'float64'",
",",
"saturation",
"=",
"65631",
",",
"blank",
"=",
"0",
",",
"nsig",
"=",
"None",
",",
"normalize",
"=",
"False",
")",
":",
"import",
"numina",
".",
"array",
".",
"_nirproc",
"as",
"_nirproc",
"if",
"ti",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ti <= 0.0\"",
")",
"if",
"gain",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, gain <= 0.0\"",
")",
"if",
"ron",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, ron < 0.0\"",
")",
"if",
"saturation",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"invalid parameter, saturation <= 0\"",
")",
"rampdata",
"=",
"numpy",
".",
"asarray",
"(",
"rampdata",
")",
"if",
"rampdata",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'rampdata must be 3D'",
")",
"# change byteorder",
"ndtype",
"=",
"rampdata",
".",
"dtype",
".",
"newbyteorder",
"(",
"'='",
")",
"rampdata",
"=",
"numpy",
".",
"asarray",
"(",
"rampdata",
",",
"dtype",
"=",
"ndtype",
")",
"# type of the output",
"fdtype",
"=",
"numpy",
".",
"result_type",
"(",
"rampdata",
".",
"dtype",
",",
"dtype",
")",
"# Type of the mask",
"mdtype",
"=",
"numpy",
".",
"dtype",
"(",
"'uint8'",
")",
"fshape",
"=",
"(",
"rampdata",
".",
"shape",
"[",
"1",
"]",
",",
"rampdata",
".",
"shape",
"[",
"2",
"]",
")",
"if",
"badpixels",
"is",
"None",
":",
"badpixels",
"=",
"numpy",
".",
"zeros",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"else",
":",
"if",
"badpixels",
".",
"shape",
"!=",
"fshape",
":",
"msg",
"=",
"'shape of badpixels is not compatible with shape of rampdata'",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"badpixels",
".",
"dtype",
"!=",
"mdtype",
":",
"raise",
"ValueError",
"(",
"'dtype of badpixels must be uint8'",
")",
"result",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"fdtype",
")",
"var",
"=",
"numpy",
".",
"empty_like",
"(",
"result",
")",
"npix",
"=",
"numpy",
".",
"empty",
"(",
"fshape",
",",
"dtype",
"=",
"mdtype",
")",
"mask",
"=",
"badpixels",
".",
"copy",
"(",
")",
"_nirproc",
".",
"_process_ramp_intl",
"(",
"rampdata",
",",
"ti",
",",
"gain",
",",
"ron",
",",
"badpixels",
",",
"saturation",
",",
"blank",
",",
"result",
",",
"var",
",",
"npix",
",",
"mask",
")",
"return",
"result",
",",
"var",
",",
"npix",
",",
"mask"
] | Loop over the first axis applying ramp processing.
*rampdata* is assumed to be a 3D numpy.ndarray containing the
result of a nIR observation in folow-up-the-ramp mode.
The shape of the array must be of the form N_s x M x N, with N_s being
the number of samples.
:param fowlerdata: Convertible to a 3D numpy.ndarray
:param ti: Integration time.
:param gain: Detector gain.
:param ron: Detector readout noise in counts.
:param badpixels: An optional MxN mask of dtype 'uint8'.
:param dtype: The dtype of the float outputs.
:param saturation: The saturation level of the detector.
:param blank: Invalid values in output are substituted by *blank*.
:returns: A tuple of signal, variance of the signal, numper of pixels used
and badpixel mask.
:raises: ValueError | [
"Loop",
"over",
"the",
"first",
"axis",
"applying",
"ramp",
"processing",
"."
] | python | train |
wummel/patool | patoolib/__init__.py | https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/__init__.py#L317-L322 | def check_archive_format (format, compression):
"""Make sure format and compression is known."""
if format not in ArchiveFormats:
raise util.PatoolError("unknown archive format `%s'" % format)
if compression is not None and compression not in ArchiveCompressions:
raise util.PatoolError("unkonwn archive compression `%s'" % compression) | [
"def",
"check_archive_format",
"(",
"format",
",",
"compression",
")",
":",
"if",
"format",
"not",
"in",
"ArchiveFormats",
":",
"raise",
"util",
".",
"PatoolError",
"(",
"\"unknown archive format `%s'\"",
"%",
"format",
")",
"if",
"compression",
"is",
"not",
"None",
"and",
"compression",
"not",
"in",
"ArchiveCompressions",
":",
"raise",
"util",
".",
"PatoolError",
"(",
"\"unkonwn archive compression `%s'\"",
"%",
"compression",
")"
] | Make sure format and compression is known. | [
"Make",
"sure",
"format",
"and",
"compression",
"is",
"known",
"."
] | python | train |
bokeh/bokeh | bokeh/protocol/message.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/protocol/message.py#L248-L281 | def send(self, conn):
''' Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot send to connection None")
with (yield conn.write_lock.acquire()):
sent = 0
yield conn.write_message(self.header_json, locked=False)
sent += len(self.header_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#yield gen.sleep(0.1)
yield conn.write_message(self.metadata_json, locked=False)
sent += len(self.metadata_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#yield gen.sleep(0.1)
yield conn.write_message(self.content_json, locked=False)
sent += len(self.content_json)
sent += yield self.write_buffers(conn, locked=False)
raise gen.Return(sent) | [
"def",
"send",
"(",
"self",
",",
"conn",
")",
":",
"if",
"conn",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot send to connection None\"",
")",
"with",
"(",
"yield",
"conn",
".",
"write_lock",
".",
"acquire",
"(",
")",
")",
":",
"sent",
"=",
"0",
"yield",
"conn",
".",
"write_message",
"(",
"self",
".",
"header_json",
",",
"locked",
"=",
"False",
")",
"sent",
"+=",
"len",
"(",
"self",
".",
"header_json",
")",
"# uncomment this to make it a lot easier to reproduce lock-related bugs",
"#yield gen.sleep(0.1)",
"yield",
"conn",
".",
"write_message",
"(",
"self",
".",
"metadata_json",
",",
"locked",
"=",
"False",
")",
"sent",
"+=",
"len",
"(",
"self",
".",
"metadata_json",
")",
"# uncomment this to make it a lot easier to reproduce lock-related bugs",
"#yield gen.sleep(0.1)",
"yield",
"conn",
".",
"write_message",
"(",
"self",
".",
"content_json",
",",
"locked",
"=",
"False",
")",
"sent",
"+=",
"len",
"(",
"self",
".",
"content_json",
")",
"sent",
"+=",
"yield",
"self",
".",
"write_buffers",
"(",
"conn",
",",
"locked",
"=",
"False",
")",
"raise",
"gen",
".",
"Return",
"(",
"sent",
")"
] | Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent | [
"Send",
"the",
"message",
"on",
"the",
"given",
"connection",
"."
] | python | train |
PolyJIT/benchbuild | benchbuild/reports/status.py | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/reports/status.py#L82-L100 | def generate(self):
"""
Fetch all rows associated with this experiment.
This will generate a huge .csv.
"""
exp_name = self.exp_name()
fname = os.path.basename(self.out_path)
fname = "{exp}_{prefix}_{name}{ending}".format(
exp=exp_name,
prefix=os.path.splitext(fname)[0],
ending=os.path.splitext(fname)[-1],
name="full")
first = True
for chunk in self.report():
print("Writing chunk to :'{0}'".format(fname))
chunk.to_csv(fname, header=first, mode='a')
first = False | [
"def",
"generate",
"(",
"self",
")",
":",
"exp_name",
"=",
"self",
".",
"exp_name",
"(",
")",
"fname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"out_path",
")",
"fname",
"=",
"\"{exp}_{prefix}_{name}{ending}\"",
".",
"format",
"(",
"exp",
"=",
"exp_name",
",",
"prefix",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"0",
"]",
",",
"ending",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"-",
"1",
"]",
",",
"name",
"=",
"\"full\"",
")",
"first",
"=",
"True",
"for",
"chunk",
"in",
"self",
".",
"report",
"(",
")",
":",
"print",
"(",
"\"Writing chunk to :'{0}'\"",
".",
"format",
"(",
"fname",
")",
")",
"chunk",
".",
"to_csv",
"(",
"fname",
",",
"header",
"=",
"first",
",",
"mode",
"=",
"'a'",
")",
"first",
"=",
"False"
] | Fetch all rows associated with this experiment.
This will generate a huge .csv. | [
"Fetch",
"all",
"rows",
"associated",
"with",
"this",
"experiment",
"."
] | python | train |
StackStorm/pybind | pybind/nos/v7_2_0/rbridge_id/resource_monitor/memory/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/resource_monitor/memory/__init__.py#L163-L184 | def _set_action_memory(self, v, load=False):
"""
Setter method for action_memory, mapped from YANG variable /rbridge_id/resource_monitor/memory/action_memory (resource-monitor-actiontype)
If this variable is read-only (config: false) in the
source YANG file, then _set_action_memory is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action_memory() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'raslog': {'value': 1}},), is_leaf=True, yang_name="action-memory", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action to take when memory usage exceeds threshold', u'alt-name': u'action', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='resource-monitor-actiontype', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """action_memory must be of a type compatible with resource-monitor-actiontype""",
'defined-type': "brocade-resource-monitor:resource-monitor-actiontype",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'raslog': {'value': 1}},), is_leaf=True, yang_name="action-memory", rest_name="action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action to take when memory usage exceeds threshold', u'alt-name': u'action', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='resource-monitor-actiontype', is_config=True)""",
})
self.__action_memory = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_action_memory",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'raslog'",
":",
"{",
"'value'",
":",
"1",
"}",
"}",
",",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"action-memory\"",
",",
"rest_name",
"=",
"\"action\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Action to take when memory usage exceeds threshold'",
",",
"u'alt-name'",
":",
"u'action'",
",",
"u'cli-suppress-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-resource-monitor'",
",",
"defining_module",
"=",
"'brocade-resource-monitor'",
",",
"yang_type",
"=",
"'resource-monitor-actiontype'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"action_memory must be of a type compatible with resource-monitor-actiontype\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-resource-monitor:resource-monitor-actiontype\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'raslog': {'value': 1}},), is_leaf=True, yang_name=\"action-memory\", rest_name=\"action\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Action to take when memory usage exceeds threshold', u'alt-name': u'action', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='resource-monitor-actiontype', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__action_memory",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for action_memory, mapped from YANG variable /rbridge_id/resource_monitor/memory/action_memory (resource-monitor-actiontype)
If this variable is read-only (config: false) in the
source YANG file, then _set_action_memory is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_action_memory() directly. | [
"Setter",
"method",
"for",
"action_memory",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"resource_monitor",
"/",
"memory",
"/",
"action_memory",
"(",
"resource",
"-",
"monitor",
"-",
"actiontype",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_action_memory",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_action_memory",
"()",
"directly",
"."
] | python | train |
gwastro/pycbc | pycbc/waveform/generator.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/generator.py#L545-L594 | def generate(self, **kwargs):
"""Generates a waveform, applies a time shift and the detector response
function from the given kwargs.
"""
self.current_params.update(kwargs)
rfparams = {param: self.current_params[param]
for param in kwargs if param not in self.location_args}
hp, hc = self.rframe_generator.generate(**rfparams)
if isinstance(hp, TimeSeries):
df = self.current_params['delta_f']
hp = hp.to_frequencyseries(delta_f=df)
hc = hc.to_frequencyseries(delta_f=df)
# time-domain waveforms will not be shifted so that the peak amp
# happens at the end of the time series (as they are for f-domain),
# so we add an additional shift to account for it
tshift = 1./df - abs(hp._epoch)
else:
tshift = 0.
hp._epoch = hc._epoch = self._epoch
h = {}
if self.detector_names != ['RF']:
for detname, det in self.detectors.items():
# apply detector response function
fp, fc = det.antenna_pattern(self.current_params['ra'],
self.current_params['dec'],
self.current_params['polarization'],
self.current_params['tc'])
thish = fp*hp + fc*hc
# apply the time shift
tc = self.current_params['tc'] + \
det.time_delay_from_earth_center(self.current_params['ra'],
self.current_params['dec'], self.current_params['tc'])
h[detname] = apply_fd_time_shift(thish, tc+tshift, copy=False)
if self.recalib:
# recalibrate with given calibration model
h[detname] = \
self.recalib[detname].map_to_adjust(h[detname],
**self.current_params)
else:
# no detector response, just use the + polarization
if 'tc' in self.current_params:
hp = apply_fd_time_shift(hp, self.current_params['tc']+tshift,
copy=False)
h['RF'] = hp
if self.gates is not None:
# resize all to nearest power of 2
for d in h.values():
d.resize(ceilpow2(len(d)-1) + 1)
h = strain.apply_gates_to_fd(h, self.gates)
return h | [
"def",
"generate",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"current_params",
".",
"update",
"(",
"kwargs",
")",
"rfparams",
"=",
"{",
"param",
":",
"self",
".",
"current_params",
"[",
"param",
"]",
"for",
"param",
"in",
"kwargs",
"if",
"param",
"not",
"in",
"self",
".",
"location_args",
"}",
"hp",
",",
"hc",
"=",
"self",
".",
"rframe_generator",
".",
"generate",
"(",
"*",
"*",
"rfparams",
")",
"if",
"isinstance",
"(",
"hp",
",",
"TimeSeries",
")",
":",
"df",
"=",
"self",
".",
"current_params",
"[",
"'delta_f'",
"]",
"hp",
"=",
"hp",
".",
"to_frequencyseries",
"(",
"delta_f",
"=",
"df",
")",
"hc",
"=",
"hc",
".",
"to_frequencyseries",
"(",
"delta_f",
"=",
"df",
")",
"# time-domain waveforms will not be shifted so that the peak amp",
"# happens at the end of the time series (as they are for f-domain),",
"# so we add an additional shift to account for it",
"tshift",
"=",
"1.",
"/",
"df",
"-",
"abs",
"(",
"hp",
".",
"_epoch",
")",
"else",
":",
"tshift",
"=",
"0.",
"hp",
".",
"_epoch",
"=",
"hc",
".",
"_epoch",
"=",
"self",
".",
"_epoch",
"h",
"=",
"{",
"}",
"if",
"self",
".",
"detector_names",
"!=",
"[",
"'RF'",
"]",
":",
"for",
"detname",
",",
"det",
"in",
"self",
".",
"detectors",
".",
"items",
"(",
")",
":",
"# apply detector response function",
"fp",
",",
"fc",
"=",
"det",
".",
"antenna_pattern",
"(",
"self",
".",
"current_params",
"[",
"'ra'",
"]",
",",
"self",
".",
"current_params",
"[",
"'dec'",
"]",
",",
"self",
".",
"current_params",
"[",
"'polarization'",
"]",
",",
"self",
".",
"current_params",
"[",
"'tc'",
"]",
")",
"thish",
"=",
"fp",
"*",
"hp",
"+",
"fc",
"*",
"hc",
"# apply the time shift",
"tc",
"=",
"self",
".",
"current_params",
"[",
"'tc'",
"]",
"+",
"det",
".",
"time_delay_from_earth_center",
"(",
"self",
".",
"current_params",
"[",
"'ra'",
"]",
",",
"self",
".",
"current_params",
"[",
"'dec'",
"]",
",",
"self",
".",
"current_params",
"[",
"'tc'",
"]",
")",
"h",
"[",
"detname",
"]",
"=",
"apply_fd_time_shift",
"(",
"thish",
",",
"tc",
"+",
"tshift",
",",
"copy",
"=",
"False",
")",
"if",
"self",
".",
"recalib",
":",
"# recalibrate with given calibration model",
"h",
"[",
"detname",
"]",
"=",
"self",
".",
"recalib",
"[",
"detname",
"]",
".",
"map_to_adjust",
"(",
"h",
"[",
"detname",
"]",
",",
"*",
"*",
"self",
".",
"current_params",
")",
"else",
":",
"# no detector response, just use the + polarization",
"if",
"'tc'",
"in",
"self",
".",
"current_params",
":",
"hp",
"=",
"apply_fd_time_shift",
"(",
"hp",
",",
"self",
".",
"current_params",
"[",
"'tc'",
"]",
"+",
"tshift",
",",
"copy",
"=",
"False",
")",
"h",
"[",
"'RF'",
"]",
"=",
"hp",
"if",
"self",
".",
"gates",
"is",
"not",
"None",
":",
"# resize all to nearest power of 2",
"for",
"d",
"in",
"h",
".",
"values",
"(",
")",
":",
"d",
".",
"resize",
"(",
"ceilpow2",
"(",
"len",
"(",
"d",
")",
"-",
"1",
")",
"+",
"1",
")",
"h",
"=",
"strain",
".",
"apply_gates_to_fd",
"(",
"h",
",",
"self",
".",
"gates",
")",
"return",
"h"
] | Generates a waveform, applies a time shift and the detector response
function from the given kwargs. | [
"Generates",
"a",
"waveform",
"applies",
"a",
"time",
"shift",
"and",
"the",
"detector",
"response",
"function",
"from",
"the",
"given",
"kwargs",
"."
] | python | train |
streamlink/streamlink | src/streamlink/plugins/abweb.py | https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/abweb.py#L93-L135 | def _login(self, username, password):
'''login and update cached cookies'''
self.logger.debug('login ...')
res = self.session.http.get(self.login_url)
input_list = self._input_re.findall(res.text)
if not input_list:
raise PluginError('Missing input data on login website.')
data = {}
for _input_data in input_list:
try:
_input_name = self._name_re.search(_input_data).group(1)
except AttributeError:
continue
try:
_input_value = self._value_re.search(_input_data).group(1)
except AttributeError:
_input_value = ''
data[_input_name] = _input_value
login_data = {
'ctl00$Login1$UserName': username,
'ctl00$Login1$Password': password,
'ctl00$Login1$LoginButton.x': '0',
'ctl00$Login1$LoginButton.y': '0'
}
data.update(login_data)
res = self.session.http.post(self.login_url, data=data)
for cookie in self.session.http.cookies:
self._session_attributes.set(cookie.name, cookie.value, expires=3600 * 24)
if self._session_attributes.get('ASP.NET_SessionId') and self._session_attributes.get('.abportail1'):
self.logger.debug('New session data')
self.set_expires_time_cache()
return True
else:
self.logger.error('Failed to login, check your username/password')
return False | [
"def",
"_login",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'login ...'",
")",
"res",
"=",
"self",
".",
"session",
".",
"http",
".",
"get",
"(",
"self",
".",
"login_url",
")",
"input_list",
"=",
"self",
".",
"_input_re",
".",
"findall",
"(",
"res",
".",
"text",
")",
"if",
"not",
"input_list",
":",
"raise",
"PluginError",
"(",
"'Missing input data on login website.'",
")",
"data",
"=",
"{",
"}",
"for",
"_input_data",
"in",
"input_list",
":",
"try",
":",
"_input_name",
"=",
"self",
".",
"_name_re",
".",
"search",
"(",
"_input_data",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"continue",
"try",
":",
"_input_value",
"=",
"self",
".",
"_value_re",
".",
"search",
"(",
"_input_data",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"_input_value",
"=",
"''",
"data",
"[",
"_input_name",
"]",
"=",
"_input_value",
"login_data",
"=",
"{",
"'ctl00$Login1$UserName'",
":",
"username",
",",
"'ctl00$Login1$Password'",
":",
"password",
",",
"'ctl00$Login1$LoginButton.x'",
":",
"'0'",
",",
"'ctl00$Login1$LoginButton.y'",
":",
"'0'",
"}",
"data",
".",
"update",
"(",
"login_data",
")",
"res",
"=",
"self",
".",
"session",
".",
"http",
".",
"post",
"(",
"self",
".",
"login_url",
",",
"data",
"=",
"data",
")",
"for",
"cookie",
"in",
"self",
".",
"session",
".",
"http",
".",
"cookies",
":",
"self",
".",
"_session_attributes",
".",
"set",
"(",
"cookie",
".",
"name",
",",
"cookie",
".",
"value",
",",
"expires",
"=",
"3600",
"*",
"24",
")",
"if",
"self",
".",
"_session_attributes",
".",
"get",
"(",
"'ASP.NET_SessionId'",
")",
"and",
"self",
".",
"_session_attributes",
".",
"get",
"(",
"'.abportail1'",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'New session data'",
")",
"self",
".",
"set_expires_time_cache",
"(",
")",
"return",
"True",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Failed to login, check your username/password'",
")",
"return",
"False"
] | login and update cached cookies | [
"login",
"and",
"update",
"cached",
"cookies"
] | python | test |
juju/charm-helpers | charmhelpers/contrib/openstack/templating.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/templating.py#L121-L128 | def complete_contexts(self):
'''
Return a list of interfaces that have satisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts | [
"def",
"complete_contexts",
"(",
"self",
")",
":",
"if",
"self",
".",
"_complete_contexts",
":",
"return",
"self",
".",
"_complete_contexts",
"self",
".",
"context",
"(",
")",
"return",
"self",
".",
"_complete_contexts"
] | Return a list of interfaces that have satisfied contexts. | [
"Return",
"a",
"list",
"of",
"interfaces",
"that",
"have",
"satisfied",
"contexts",
"."
] | python | train |
drdoctr/doctr | doctr/local.py | https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/local.py#L86-L109 | def encrypt_to_file(contents, filename):
"""
Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`.
"""
if not filename.endswith('.enc'):
raise ValueError("%s does not end with .enc" % filename)
key = Fernet.generate_key()
fer = Fernet(key)
encrypted_file = fer.encrypt(contents)
with open(filename, 'wb') as f:
f.write(encrypted_file)
return key | [
"def",
"encrypt_to_file",
"(",
"contents",
",",
"filename",
")",
":",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.enc'",
")",
":",
"raise",
"ValueError",
"(",
"\"%s does not end with .enc\"",
"%",
"filename",
")",
"key",
"=",
"Fernet",
".",
"generate_key",
"(",
")",
"fer",
"=",
"Fernet",
"(",
"key",
")",
"encrypted_file",
"=",
"fer",
".",
"encrypt",
"(",
"contents",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"encrypted_file",
")",
"return",
"key"
] | Encrypts ``contents`` and writes it to ``filename``.
``contents`` should be a bytes string. ``filename`` should end with
``.enc``.
Returns the secret key used for the encryption.
Decrypt the file with :func:`doctr.travis.decrypt_file`. | [
"Encrypts",
"contents",
"and",
"writes",
"it",
"to",
"filename",
"."
] | python | train |
ConsenSys/mythril-classic | mythril/mythril/mythril_config.py | https://github.com/ConsenSys/mythril-classic/blob/27af71c34b2ce94f4fae5613ec457f93df1a8f56/mythril/mythril/mythril_config.py#L215-L226 | def _set_rpc(self, rpc_type: str) -> None:
"""
Sets rpc based on the type
:param rpc_type: The type of connection: like infura, ganache, localhost
:return:
"""
if rpc_type == "infura":
self.set_api_rpc_infura()
elif rpc_type == "localhost":
self.set_api_rpc_localhost()
else:
self.set_api_rpc(rpc_type) | [
"def",
"_set_rpc",
"(",
"self",
",",
"rpc_type",
":",
"str",
")",
"->",
"None",
":",
"if",
"rpc_type",
"==",
"\"infura\"",
":",
"self",
".",
"set_api_rpc_infura",
"(",
")",
"elif",
"rpc_type",
"==",
"\"localhost\"",
":",
"self",
".",
"set_api_rpc_localhost",
"(",
")",
"else",
":",
"self",
".",
"set_api_rpc",
"(",
"rpc_type",
")"
] | Sets rpc based on the type
:param rpc_type: The type of connection: like infura, ganache, localhost
:return: | [
"Sets",
"rpc",
"based",
"on",
"the",
"type",
":",
"param",
"rpc_type",
":",
"The",
"type",
"of",
"connection",
":",
"like",
"infura",
"ganache",
"localhost",
":",
"return",
":"
] | python | train |
bcbio/bcbio-nextgen | bcbio/bam/callable.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/callable.py#L32-L57 | def sample_callable_bed(bam_file, ref_file, data):
"""Retrieve callable regions for a sample subset by defined analysis regions.
"""
from bcbio.heterogeneity import chromhacks
CovInfo = collections.namedtuple("CovInfo", "callable, raw_callable, depth_files")
noalt_calling = "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data)
def callable_chrom_filter(r):
"""Filter to callable region, potentially limiting by chromosomes.
"""
return r.name == "CALLABLE" and (not noalt_calling or chromhacks.is_nonalt(r.chrom))
out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0]
with shared.bedtools_tmpdir(data):
sv_bed = regions.get_sv_bed(data)
callable_bed, depth_files = coverage.calculate(bam_file, data, sv_bed)
input_regions_bed = dd.get_variant_regions(data)
if not utils.file_uptodate(out_file, callable_bed):
with file_transaction(data, out_file) as tx_out_file:
callable_regions = pybedtools.BedTool(callable_bed)
filter_regions = callable_regions.filter(callable_chrom_filter)
if input_regions_bed:
if not utils.file_uptodate(out_file, input_regions_bed):
input_regions = pybedtools.BedTool(input_regions_bed)
filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file)
else:
filter_regions.saveas(tx_out_file)
return CovInfo(out_file, callable_bed, depth_files) | [
"def",
"sample_callable_bed",
"(",
"bam_file",
",",
"ref_file",
",",
"data",
")",
":",
"from",
"bcbio",
".",
"heterogeneity",
"import",
"chromhacks",
"CovInfo",
"=",
"collections",
".",
"namedtuple",
"(",
"\"CovInfo\"",
",",
"\"callable, raw_callable, depth_files\"",
")",
"noalt_calling",
"=",
"\"noalt_calling\"",
"in",
"dd",
".",
"get_tools_on",
"(",
"data",
")",
"or",
"\"altcontigs\"",
"in",
"dd",
".",
"get_exclude_regions",
"(",
"data",
")",
"def",
"callable_chrom_filter",
"(",
"r",
")",
":",
"\"\"\"Filter to callable region, potentially limiting by chromosomes.\n \"\"\"",
"return",
"r",
".",
"name",
"==",
"\"CALLABLE\"",
"and",
"(",
"not",
"noalt_calling",
"or",
"chromhacks",
".",
"is_nonalt",
"(",
"r",
".",
"chrom",
")",
")",
"out_file",
"=",
"\"%s-callable_sample.bed\"",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"bam_file",
")",
"[",
"0",
"]",
"with",
"shared",
".",
"bedtools_tmpdir",
"(",
"data",
")",
":",
"sv_bed",
"=",
"regions",
".",
"get_sv_bed",
"(",
"data",
")",
"callable_bed",
",",
"depth_files",
"=",
"coverage",
".",
"calculate",
"(",
"bam_file",
",",
"data",
",",
"sv_bed",
")",
"input_regions_bed",
"=",
"dd",
".",
"get_variant_regions",
"(",
"data",
")",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"callable_bed",
")",
":",
"with",
"file_transaction",
"(",
"data",
",",
"out_file",
")",
"as",
"tx_out_file",
":",
"callable_regions",
"=",
"pybedtools",
".",
"BedTool",
"(",
"callable_bed",
")",
"filter_regions",
"=",
"callable_regions",
".",
"filter",
"(",
"callable_chrom_filter",
")",
"if",
"input_regions_bed",
":",
"if",
"not",
"utils",
".",
"file_uptodate",
"(",
"out_file",
",",
"input_regions_bed",
")",
":",
"input_regions",
"=",
"pybedtools",
".",
"BedTool",
"(",
"input_regions_bed",
")",
"filter_regions",
".",
"intersect",
"(",
"input_regions",
",",
"nonamecheck",
"=",
"True",
")",
".",
"saveas",
"(",
"tx_out_file",
")",
"else",
":",
"filter_regions",
".",
"saveas",
"(",
"tx_out_file",
")",
"return",
"CovInfo",
"(",
"out_file",
",",
"callable_bed",
",",
"depth_files",
")"
] | Retrieve callable regions for a sample subset by defined analysis regions. | [
"Retrieve",
"callable",
"regions",
"for",
"a",
"sample",
"subset",
"by",
"defined",
"analysis",
"regions",
"."
] | python | train |
markchil/gptools | gptools/utils.py | https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1070-L1079 | def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([scipy.stats.gamma.rvs(a, loc=0, scale=1.0 / b, size=size) for a, b in zip(self.a, self.b)]) | [
"def",
"random_draw",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"return",
"scipy",
".",
"asarray",
"(",
"[",
"scipy",
".",
"stats",
".",
"gamma",
".",
"rvs",
"(",
"a",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"1.0",
"/",
"b",
",",
"size",
"=",
"size",
")",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"self",
".",
"a",
",",
"self",
".",
"b",
")",
"]",
")"
] | Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None. | [
"Draw",
"random",
"samples",
"of",
"the",
"hyperparameters",
".",
"Parameters",
"----------",
"size",
":",
"None",
"int",
"or",
"array",
"-",
"like",
"optional",
"The",
"number",
"/",
"shape",
"of",
"samples",
"to",
"draw",
".",
"If",
"None",
"only",
"one",
"sample",
"is",
"returned",
".",
"Default",
"is",
"None",
"."
] | python | train |
Spinmob/spinmob | _pylab_colormap.py | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_pylab_colormap.py#L406-L420 | def _signal_load(self):
"""
Load the selected cmap.
"""
# set our name
self.set_name(str(self._combobox_cmaps.currentText()))
# load the colormap
self.load_colormap()
# rebuild the interface
self._build_gui()
self._button_save.setEnabled(False) | [
"def",
"_signal_load",
"(",
"self",
")",
":",
"# set our name",
"self",
".",
"set_name",
"(",
"str",
"(",
"self",
".",
"_combobox_cmaps",
".",
"currentText",
"(",
")",
")",
")",
"# load the colormap",
"self",
".",
"load_colormap",
"(",
")",
"# rebuild the interface",
"self",
".",
"_build_gui",
"(",
")",
"self",
".",
"_button_save",
".",
"setEnabled",
"(",
"False",
")"
] | Load the selected cmap. | [
"Load",
"the",
"selected",
"cmap",
"."
] | python | train |
peterbe/gg | gg/builtins/bugzilla.py | https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/bugzilla.py#L69-L76 | def logout(config):
"""Remove and forget your Bugzilla credentials"""
state = read(config.configfile)
if state.get("BUGZILLA"):
remove(config.configfile, "BUGZILLA")
success_out("Forgotten")
else:
error_out("No stored Bugzilla credentials") | [
"def",
"logout",
"(",
"config",
")",
":",
"state",
"=",
"read",
"(",
"config",
".",
"configfile",
")",
"if",
"state",
".",
"get",
"(",
"\"BUGZILLA\"",
")",
":",
"remove",
"(",
"config",
".",
"configfile",
",",
"\"BUGZILLA\"",
")",
"success_out",
"(",
"\"Forgotten\"",
")",
"else",
":",
"error_out",
"(",
"\"No stored Bugzilla credentials\"",
")"
] | Remove and forget your Bugzilla credentials | [
"Remove",
"and",
"forget",
"your",
"Bugzilla",
"credentials"
] | python | train |
googlefonts/ufo2ft | Lib/ufo2ft/outlineCompiler.py | https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/outlineCompiler.py#L739-L760 | def setupTable_vmtx(self):
"""
Make the vmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "vmtx" not in self.tables:
return
self.otf["vmtx"] = vmtx = newTable("vmtx")
vmtx.metrics = {}
for glyphName, glyph in self.allGlyphs.items():
height = otRound(glyph.height)
if height < 0:
raise ValueError(
"The height should not be negative: '%s'" % (glyphName))
verticalOrigin = _getVerticalOrigin(self.otf, glyph)
bounds = self.glyphBoundingBoxes[glyphName]
top = bounds.yMax if bounds else 0
vmtx[glyphName] = (height, verticalOrigin - top) | [
"def",
"setupTable_vmtx",
"(",
"self",
")",
":",
"if",
"\"vmtx\"",
"not",
"in",
"self",
".",
"tables",
":",
"return",
"self",
".",
"otf",
"[",
"\"vmtx\"",
"]",
"=",
"vmtx",
"=",
"newTable",
"(",
"\"vmtx\"",
")",
"vmtx",
".",
"metrics",
"=",
"{",
"}",
"for",
"glyphName",
",",
"glyph",
"in",
"self",
".",
"allGlyphs",
".",
"items",
"(",
")",
":",
"height",
"=",
"otRound",
"(",
"glyph",
".",
"height",
")",
"if",
"height",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"The height should not be negative: '%s'\"",
"%",
"(",
"glyphName",
")",
")",
"verticalOrigin",
"=",
"_getVerticalOrigin",
"(",
"self",
".",
"otf",
",",
"glyph",
")",
"bounds",
"=",
"self",
".",
"glyphBoundingBoxes",
"[",
"glyphName",
"]",
"top",
"=",
"bounds",
".",
"yMax",
"if",
"bounds",
"else",
"0",
"vmtx",
"[",
"glyphName",
"]",
"=",
"(",
"height",
",",
"verticalOrigin",
"-",
"top",
")"
] | Make the vmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired. | [
"Make",
"the",
"vmtx",
"table",
"."
] | python | train |
CEA-COSMIC/ModOpt | modopt/base/transform.py | https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/base/transform.py#L63-L113 | def map2cube(data_map, layout):
r"""Map to cube
This method transforms the input data from a 2D map with given layout to
a 3D cube
Parameters
----------
data_map : np.ndarray
Input data map, 2D array
layout : tuple
2D layout of 2D images
Returns
-------
np.ndarray 3D cube
Raises
------
ValueError
For invalid layout
Examples
--------
>>> from modopt.base.transform import map2cube
>>> a = np.array([[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13],
[10, 11, 14, 15]])
>>> map2cube(a, (2, 2))
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15]]])
"""
if np.all(np.array(data_map.shape) % np.array(layout)):
raise ValueError('The desired layout must be a multiple of the number '
'pixels in the data map.')
d_shape = np.array(data_map.shape) // np.array(layout)
return np.array([data_map[(slice(i * d_shape[0], (i + 1) * d_shape[0]),
slice(j * d_shape[1], (j + 1) * d_shape[1]))] for i in
range(layout[0]) for j in range(layout[1])]) | [
"def",
"map2cube",
"(",
"data_map",
",",
"layout",
")",
":",
"if",
"np",
".",
"all",
"(",
"np",
".",
"array",
"(",
"data_map",
".",
"shape",
")",
"%",
"np",
".",
"array",
"(",
"layout",
")",
")",
":",
"raise",
"ValueError",
"(",
"'The desired layout must be a multiple of the number '",
"'pixels in the data map.'",
")",
"d_shape",
"=",
"np",
".",
"array",
"(",
"data_map",
".",
"shape",
")",
"//",
"np",
".",
"array",
"(",
"layout",
")",
"return",
"np",
".",
"array",
"(",
"[",
"data_map",
"[",
"(",
"slice",
"(",
"i",
"*",
"d_shape",
"[",
"0",
"]",
",",
"(",
"i",
"+",
"1",
")",
"*",
"d_shape",
"[",
"0",
"]",
")",
",",
"slice",
"(",
"j",
"*",
"d_shape",
"[",
"1",
"]",
",",
"(",
"j",
"+",
"1",
")",
"*",
"d_shape",
"[",
"1",
"]",
")",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"layout",
"[",
"0",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"layout",
"[",
"1",
"]",
")",
"]",
")"
] | r"""Map to cube
This method transforms the input data from a 2D map with given layout to
a 3D cube
Parameters
----------
data_map : np.ndarray
Input data map, 2D array
layout : tuple
2D layout of 2D images
Returns
-------
np.ndarray 3D cube
Raises
------
ValueError
For invalid layout
Examples
--------
>>> from modopt.base.transform import map2cube
>>> a = np.array([[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13],
[10, 11, 14, 15]])
>>> map2cube(a, (2, 2))
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15]]]) | [
"r",
"Map",
"to",
"cube"
] | python | train |
asphalt-framework/asphalt | asphalt/core/context.py | https://github.com/asphalt-framework/asphalt/blob/4114b3ac9743cbd9facb374a3f53e19d3afef22d/asphalt/core/context.py#L504-L521 | def call_in_executor(self, func: Callable, *args, executor: Union[Executor, str] = None,
**kwargs) -> Awaitable:
"""
Call the given callable in an executor.
:param func: the callable to call
:param args: positional arguments to call the callable with
:param executor: either an :class:`~concurrent.futures.Executor` instance, the resource
name of one or ``None`` to use the event loop's default executor
:param kwargs: keyword arguments to call the callable with
:return: an awaitable that resolves to the return value of the call
"""
assert check_argument_types()
if isinstance(executor, str):
executor = self.require_resource(Executor, executor)
return asyncio_extras.call_in_executor(func, *args, executor=executor, **kwargs) | [
"def",
"call_in_executor",
"(",
"self",
",",
"func",
":",
"Callable",
",",
"*",
"args",
",",
"executor",
":",
"Union",
"[",
"Executor",
",",
"str",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"Awaitable",
":",
"assert",
"check_argument_types",
"(",
")",
"if",
"isinstance",
"(",
"executor",
",",
"str",
")",
":",
"executor",
"=",
"self",
".",
"require_resource",
"(",
"Executor",
",",
"executor",
")",
"return",
"asyncio_extras",
".",
"call_in_executor",
"(",
"func",
",",
"*",
"args",
",",
"executor",
"=",
"executor",
",",
"*",
"*",
"kwargs",
")"
] | Call the given callable in an executor.
:param func: the callable to call
:param args: positional arguments to call the callable with
:param executor: either an :class:`~concurrent.futures.Executor` instance, the resource
name of one or ``None`` to use the event loop's default executor
:param kwargs: keyword arguments to call the callable with
:return: an awaitable that resolves to the return value of the call | [
"Call",
"the",
"given",
"callable",
"in",
"an",
"executor",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.