repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
ronhanson/python-tbx | tbx/file.py | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/file.py#L45-L65 | def full_file_list_with_sequence(scan_path):
"""
Returns a list of all files in a folder and its subfolders (only files).
"""
file_list = []
path = os.path.abspath(scan_path)
for root, dirs, files in os.walk(path):
if len(files) != 0 and not '.svn' in root and not '.git' in root:
try:
sc = sequential.SequentialFolder(str(root))
if sc.sequence:
file_list.append(sc)
continue
except Exception as e:
pass
for f in files:
file_list.append(os.path.join(root, f))
return file_list | [
"def",
"full_file_list_with_sequence",
"(",
"scan_path",
")",
":",
"file_list",
"=",
"[",
"]",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"scan_path",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"if",
"len",
"(",
"files",
")",
"!=",
"0",
"and",
"not",
"'.svn'",
"in",
"root",
"and",
"not",
"'.git'",
"in",
"root",
":",
"try",
":",
"sc",
"=",
"sequential",
".",
"SequentialFolder",
"(",
"str",
"(",
"root",
")",
")",
"if",
"sc",
".",
"sequence",
":",
"file_list",
".",
"append",
"(",
"sc",
")",
"continue",
"except",
"Exception",
"as",
"e",
":",
"pass",
"for",
"f",
"in",
"files",
":",
"file_list",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
")",
"return",
"file_list"
] | Returns a list of all files in a folder and its subfolders (only files). | [
"Returns",
"a",
"list",
"of",
"all",
"files",
"in",
"a",
"folder",
"and",
"its",
"subfolders",
"(",
"only",
"files",
")",
"."
] | python | train |
msfrank/cifparser | cifparser/valuetree.py | https://github.com/msfrank/cifparser/blob/ecd899ba2e7b990e2cec62b115742d830e7e4384/cifparser/valuetree.py#L228-L246 | def get_field(self, path, name):
"""
Retrieves the value of the field at the specified path.
:param path: str or Path instance
:param name:
:type name: str
:return:
:raises ValueError: A component of path is a field name.
:raises KeyError: A component of path doesn't exist.
:raises TypeError: The field name is a component of a path.
"""
try:
value = self.get(path, name)
if not isinstance(value, str):
raise TypeError()
return value
except KeyError:
raise KeyError() | [
"def",
"get_field",
"(",
"self",
",",
"path",
",",
"name",
")",
":",
"try",
":",
"value",
"=",
"self",
".",
"get",
"(",
"path",
",",
"name",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
")",
"return",
"value",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
")"
] | Retrieves the value of the field at the specified path.
:param path: str or Path instance
:param name:
:type name: str
:return:
:raises ValueError: A component of path is a field name.
:raises KeyError: A component of path doesn't exist.
:raises TypeError: The field name is a component of a path. | [
"Retrieves",
"the",
"value",
"of",
"the",
"field",
"at",
"the",
"specified",
"path",
"."
] | python | train |
nickhand/classylss | classylss/__init__.py | https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/__init__.py#L60-L99 | def load_ini(filename):
"""
Read a CLASS ``.ini`` file, returning a dictionary of parameters
Parameters
----------
filename : str
the name of an existing parameter file to load, or one included as
part of the CLASS source
Returns
-------
dict :
the input parameters loaded from file
"""
# also look in data dir
path = _find_file(filename)
pars = {}
with open(path, 'r') as ff:
# loop over lines
for lineno, line in enumerate(ff):
if not line: continue
# skip any commented lines with #
if '#' in line: line = line[line.index('#')+1:]
# must have an equals sign to be valid
if "=" not in line: continue
# extract key and value pairs
fields = line.split("=")
if len(fields) != 2:
import warnings
warnings.warn("skipping line number %d: '%s'" %(lineno,line))
continue
pars[fields[0].strip()] = fields[1].strip()
return pars | [
"def",
"load_ini",
"(",
"filename",
")",
":",
"# also look in data dir",
"path",
"=",
"_find_file",
"(",
"filename",
")",
"pars",
"=",
"{",
"}",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"ff",
":",
"# loop over lines",
"for",
"lineno",
",",
"line",
"in",
"enumerate",
"(",
"ff",
")",
":",
"if",
"not",
"line",
":",
"continue",
"# skip any commented lines with #",
"if",
"'#'",
"in",
"line",
":",
"line",
"=",
"line",
"[",
"line",
".",
"index",
"(",
"'#'",
")",
"+",
"1",
":",
"]",
"# must have an equals sign to be valid",
"if",
"\"=\"",
"not",
"in",
"line",
":",
"continue",
"# extract key and value pairs",
"fields",
"=",
"line",
".",
"split",
"(",
"\"=\"",
")",
"if",
"len",
"(",
"fields",
")",
"!=",
"2",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"skipping line number %d: '%s'\"",
"%",
"(",
"lineno",
",",
"line",
")",
")",
"continue",
"pars",
"[",
"fields",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"]",
"=",
"fields",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"pars"
] | Read a CLASS ``.ini`` file, returning a dictionary of parameters
Parameters
----------
filename : str
the name of an existing parameter file to load, or one included as
part of the CLASS source
Returns
-------
dict :
the input parameters loaded from file | [
"Read",
"a",
"CLASS",
".",
"ini",
"file",
"returning",
"a",
"dictionary",
"of",
"parameters"
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py#L271-L285 | def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED) | [
"def",
"executed_without_callbacks",
"(",
"self",
")",
":",
"T",
"=",
"self",
".",
"tm",
".",
"trace",
"if",
"T",
":",
"T",
".",
"write",
"(",
"self",
".",
"trace_message",
"(",
"'Task.executed_without_callbacks()'",
",",
"self",
".",
"node",
")",
")",
"for",
"t",
"in",
"self",
".",
"targets",
":",
"if",
"t",
".",
"get_state",
"(",
")",
"==",
"NODE_EXECUTING",
":",
"for",
"side_effect",
"in",
"t",
".",
"side_effects",
":",
"side_effect",
".",
"set_state",
"(",
"NODE_NO_STATE",
")",
"t",
".",
"set_state",
"(",
"NODE_EXECUTED",
")"
] | Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods. | [
"Called",
"when",
"the",
"task",
"has",
"been",
"successfully",
"executed",
"and",
"the",
"Taskmaster",
"instance",
"doesn",
"t",
"want",
"to",
"call",
"the",
"Node",
"s",
"callback",
"methods",
"."
] | python | train |
Azure/azure-cosmos-python | azure/cosmos/auth.py | https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/auth.py#L69-L114 | def __GetAuthorizationTokenUsingMasterKey(verb,
resource_id_or_fullname,
resource_type,
headers,
master_key):
"""Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict
"""
# decodes the master key which is encoded in base64
key = base64.b64decode(master_key)
# Skipping lower casing of resource_id_or_fullname since it may now contain "ID" of the resource as part of the fullname
text = '{verb}\n{resource_type}\n{resource_id_or_fullname}\n{x_date}\n{http_date}\n'.format(
verb=(verb.lower() or ''),
resource_type=(resource_type.lower() or ''),
resource_id_or_fullname=(resource_id_or_fullname or ''),
x_date=headers.get(http_constants.HttpHeaders.XDate, '').lower(),
http_date=headers.get(http_constants.HttpHeaders.HttpDate, '').lower())
if six.PY2:
body = text.decode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = digest.encode('base64')
else:
# python 3 support
body = text.encode('utf-8')
digest = hmac.new(key, body, sha256).digest()
signature = base64.encodebytes(digest).decode('utf-8')
master_token = 'master'
token_version = '1.0'
return 'type={type}&ver={ver}&sig={sig}'.format(type=master_token,
ver=token_version,
sig=signature[:-1]) | [
"def",
"__GetAuthorizationTokenUsingMasterKey",
"(",
"verb",
",",
"resource_id_or_fullname",
",",
"resource_type",
",",
"headers",
",",
"master_key",
")",
":",
"# decodes the master key which is encoded in base64 ",
"key",
"=",
"base64",
".",
"b64decode",
"(",
"master_key",
")",
"# Skipping lower casing of resource_id_or_fullname since it may now contain \"ID\" of the resource as part of the fullname",
"text",
"=",
"'{verb}\\n{resource_type}\\n{resource_id_or_fullname}\\n{x_date}\\n{http_date}\\n'",
".",
"format",
"(",
"verb",
"=",
"(",
"verb",
".",
"lower",
"(",
")",
"or",
"''",
")",
",",
"resource_type",
"=",
"(",
"resource_type",
".",
"lower",
"(",
")",
"or",
"''",
")",
",",
"resource_id_or_fullname",
"=",
"(",
"resource_id_or_fullname",
"or",
"''",
")",
",",
"x_date",
"=",
"headers",
".",
"get",
"(",
"http_constants",
".",
"HttpHeaders",
".",
"XDate",
",",
"''",
")",
".",
"lower",
"(",
")",
",",
"http_date",
"=",
"headers",
".",
"get",
"(",
"http_constants",
".",
"HttpHeaders",
".",
"HttpDate",
",",
"''",
")",
".",
"lower",
"(",
")",
")",
"if",
"six",
".",
"PY2",
":",
"body",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"digest",
"=",
"hmac",
".",
"new",
"(",
"key",
",",
"body",
",",
"sha256",
")",
".",
"digest",
"(",
")",
"signature",
"=",
"digest",
".",
"encode",
"(",
"'base64'",
")",
"else",
":",
"# python 3 support",
"body",
"=",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
"digest",
"=",
"hmac",
".",
"new",
"(",
"key",
",",
"body",
",",
"sha256",
")",
".",
"digest",
"(",
")",
"signature",
"=",
"base64",
".",
"encodebytes",
"(",
"digest",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"master_token",
"=",
"'master'",
"token_version",
"=",
"'1.0'",
"return",
"'type={type}&ver={ver}&sig={sig}'",
".",
"format",
"(",
"type",
"=",
"master_token",
",",
"ver",
"=",
"token_version",
",",
"sig",
"=",
"signature",
"[",
":",
"-",
"1",
"]",
")"
] | Gets the authorization token using `master_key.
:param str verb:
:param str resource_id_or_fullname:
:param str resource_type:
:param dict headers:
:param str master_key:
:return:
The authorization token.
:rtype: dict | [
"Gets",
"the",
"authorization",
"token",
"using",
"master_key",
"."
] | python | train |
rsmuc/health_monitoring_plugins | health_monitoring_plugins/check_snmp_ilo4/check_snmp_ilo4.py | https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/check_snmp_ilo4/check_snmp_ilo4.py#L350-L369 | def check_power_redundancy():
"""
Check if the power supplies are redundant
The check is skipped if --noPowerRedundancy is set
"""
# skip the check if --noPowerRedundancy is set
if power_redundancy_flag:
# walk the data
ps_redundant_data = walk_data(sess, oid_ps_redundant, helper)[0]
for x, state in enumerate(ps_redundant_data, 1):
# human readable status
hr_status = ps_redundant_state[int(state)]
if hr_status != "redundant":
# if the power supply is not redundant, we will set a critical status and add it to the summary
helper.add_summary('Power supply %s: %s' % (x, hr_status))
helper.status(critical)
# we always want to see the redundancy status in the long output
helper.add_long_output('Power supply %s: %s' % (x, hr_status))
helper.add_long_output('') | [
"def",
"check_power_redundancy",
"(",
")",
":",
"# skip the check if --noPowerRedundancy is set",
"if",
"power_redundancy_flag",
":",
"# walk the data ",
"ps_redundant_data",
"=",
"walk_data",
"(",
"sess",
",",
"oid_ps_redundant",
",",
"helper",
")",
"[",
"0",
"]",
"for",
"x",
",",
"state",
"in",
"enumerate",
"(",
"ps_redundant_data",
",",
"1",
")",
":",
"# human readable status",
"hr_status",
"=",
"ps_redundant_state",
"[",
"int",
"(",
"state",
")",
"]",
"if",
"hr_status",
"!=",
"\"redundant\"",
":",
"# if the power supply is not redundant, we will set a critical status and add it to the summary",
"helper",
".",
"add_summary",
"(",
"'Power supply %s: %s'",
"%",
"(",
"x",
",",
"hr_status",
")",
")",
"helper",
".",
"status",
"(",
"critical",
")",
"# we always want to see the redundancy status in the long output",
"helper",
".",
"add_long_output",
"(",
"'Power supply %s: %s'",
"%",
"(",
"x",
",",
"hr_status",
")",
")",
"helper",
".",
"add_long_output",
"(",
"''",
")"
] | Check if the power supplies are redundant
The check is skipped if --noPowerRedundancy is set | [
"Check",
"if",
"the",
"power",
"supplies",
"are",
"redundant",
"The",
"check",
"is",
"skipped",
"if",
"--",
"noPowerRedundancy",
"is",
"set"
] | python | train |
pywbem/pywbem | attic/cimxml_parse.py | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cimxml_parse.py#L319-L334 | def parse_namespace(parser, event, node):
#pylint: disable=unused-argument
"""Parse the CIM/XML NAMESPACE element and return the value
of the CIMName attribute
<!ELEMENT NAMESPACE EMPTY>
<!ATTLIST NAMESPACE
%CIMName;
"""
name = _get_required_attribute(node, 'NAME')
(next_event, next_node) = six.next(parser)
if not _is_end(next_event, next_node, 'NAMESPACE'):
raise ParseError('Expecting end NAMESPACE')
return name | [
"def",
"parse_namespace",
"(",
"parser",
",",
"event",
",",
"node",
")",
":",
"#pylint: disable=unused-argument",
"name",
"=",
"_get_required_attribute",
"(",
"node",
",",
"'NAME'",
")",
"(",
"next_event",
",",
"next_node",
")",
"=",
"six",
".",
"next",
"(",
"parser",
")",
"if",
"not",
"_is_end",
"(",
"next_event",
",",
"next_node",
",",
"'NAMESPACE'",
")",
":",
"raise",
"ParseError",
"(",
"'Expecting end NAMESPACE'",
")",
"return",
"name"
] | Parse the CIM/XML NAMESPACE element and return the value
of the CIMName attribute
<!ELEMENT NAMESPACE EMPTY>
<!ATTLIST NAMESPACE
%CIMName; | [
"Parse",
"the",
"CIM",
"/",
"XML",
"NAMESPACE",
"element",
"and",
"return",
"the",
"value",
"of",
"the",
"CIMName",
"attribute",
"<!ELEMENT",
"NAMESPACE",
"EMPTY",
">",
"<!ATTLIST",
"NAMESPACE",
"%CIMName",
";"
] | python | train |
assamite/creamas | creamas/util.py | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/util.py#L173-L190 | def split_addrs(addrs):
'''Split addresses into dictionaries by hosts and ports.
:param list addrs: A list of addresses.
:returns:
A dictionary of dictionaries, where ``dict[HOST][PORT]`` holds a list
of all agent addresses in that environment.
'''
splitted = {}
for addr in addrs:
host, port, _ = _addr_key(addr)
if host not in splitted:
splitted[host] = {}
if port not in splitted[host]:
splitted[host][port] = []
splitted[host][port].append(addr)
return splitted | [
"def",
"split_addrs",
"(",
"addrs",
")",
":",
"splitted",
"=",
"{",
"}",
"for",
"addr",
"in",
"addrs",
":",
"host",
",",
"port",
",",
"_",
"=",
"_addr_key",
"(",
"addr",
")",
"if",
"host",
"not",
"in",
"splitted",
":",
"splitted",
"[",
"host",
"]",
"=",
"{",
"}",
"if",
"port",
"not",
"in",
"splitted",
"[",
"host",
"]",
":",
"splitted",
"[",
"host",
"]",
"[",
"port",
"]",
"=",
"[",
"]",
"splitted",
"[",
"host",
"]",
"[",
"port",
"]",
".",
"append",
"(",
"addr",
")",
"return",
"splitted"
] | Split addresses into dictionaries by hosts and ports.
:param list addrs: A list of addresses.
:returns:
A dictionary of dictionaries, where ``dict[HOST][PORT]`` holds a list
of all agent addresses in that environment. | [
"Split",
"addresses",
"into",
"dictionaries",
"by",
"hosts",
"and",
"ports",
"."
] | python | train |
fabioz/PyDev.Debugger | _pydevd_bundle/pydevd_api.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_api.py#L513-L518 | def request_get_variable_json(self, py_db, request, thread_id):
'''
:param VariablesRequest request:
'''
py_db.post_method_as_internal_command(
thread_id, internal_get_variable_json, request) | [
"def",
"request_get_variable_json",
"(",
"self",
",",
"py_db",
",",
"request",
",",
"thread_id",
")",
":",
"py_db",
".",
"post_method_as_internal_command",
"(",
"thread_id",
",",
"internal_get_variable_json",
",",
"request",
")"
] | :param VariablesRequest request: | [
":",
"param",
"VariablesRequest",
"request",
":"
] | python | train |
frispete/keyrings.cryptfile | keyrings/cryptfile/convert.py | https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/convert.py#L132-L142 | def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
try:
return cli.run(argv)
except KeyboardInterrupt:
print('Canceled')
return 3 | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"if",
"argv",
"is",
"None",
":",
"argv",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"cli",
"=",
"CommandLineTool",
"(",
")",
"try",
":",
"return",
"cli",
".",
"run",
"(",
"argv",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'Canceled'",
")",
"return",
"3"
] | Main command line interface. | [
"Main",
"command",
"line",
"interface",
"."
] | python | test |
yyuu/botornado | boto/sqs/queue.py | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sqs/queue.py#L287-L302 | def count_slow(self, page_size=10, vtimeout=10):
"""
Deprecated. This is the old 'count' method that actually counts
the messages by reading them all. This gives an accurate count but
is very slow for queues with non-trivial number of messasges.
Instead, use get_attribute('ApproximateNumberOfMessages') to take
advantage of the new SQS capability. This is retained only for
the unit tests.
"""
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
n += 1
l = self.get_messages(page_size, vtimeout)
return n | [
"def",
"count_slow",
"(",
"self",
",",
"page_size",
"=",
"10",
",",
"vtimeout",
"=",
"10",
")",
":",
"n",
"=",
"0",
"l",
"=",
"self",
".",
"get_messages",
"(",
"page_size",
",",
"vtimeout",
")",
"while",
"l",
":",
"for",
"m",
"in",
"l",
":",
"n",
"+=",
"1",
"l",
"=",
"self",
".",
"get_messages",
"(",
"page_size",
",",
"vtimeout",
")",
"return",
"n"
] | Deprecated. This is the old 'count' method that actually counts
the messages by reading them all. This gives an accurate count but
is very slow for queues with non-trivial number of messasges.
Instead, use get_attribute('ApproximateNumberOfMessages') to take
advantage of the new SQS capability. This is retained only for
the unit tests. | [
"Deprecated",
".",
"This",
"is",
"the",
"old",
"count",
"method",
"that",
"actually",
"counts",
"the",
"messages",
"by",
"reading",
"them",
"all",
".",
"This",
"gives",
"an",
"accurate",
"count",
"but",
"is",
"very",
"slow",
"for",
"queues",
"with",
"non",
"-",
"trivial",
"number",
"of",
"messasges",
".",
"Instead",
"use",
"get_attribute",
"(",
"ApproximateNumberOfMessages",
")",
"to",
"take",
"advantage",
"of",
"the",
"new",
"SQS",
"capability",
".",
"This",
"is",
"retained",
"only",
"for",
"the",
"unit",
"tests",
"."
] | python | train |
mlouielu/twstock | twstock/legacy.py | https://github.com/mlouielu/twstock/blob/cddddcc084d2d00497d591ab3059e3205b755825/twstock/legacy.py#L119-L122 | def best_buy_3(self):
"""三日均價由下往上
"""
return self.data.continuous(self.data.moving_average(self.data.price, 3)) == 1 | [
"def",
"best_buy_3",
"(",
"self",
")",
":",
"return",
"self",
".",
"data",
".",
"continuous",
"(",
"self",
".",
"data",
".",
"moving_average",
"(",
"self",
".",
"data",
".",
"price",
",",
"3",
")",
")",
"==",
"1"
] | 三日均價由下往上 | [
"三日均價由下往上"
] | python | train |
AtteqCom/zsl | src/zsl/resource/json_server_resource.py | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/json_server_resource.py#L47-L71 | def _get_link_pages(page, per_page, count, page_url):
# type: (int, int, int, str) -> Dict[str, str]
"""Create link header for page metadata.
:param page: current page
:param per_page: page limit
:param count: count of all resources
:param page_url: url for resources
:return: dictionary with name of the link as key and its url as value
"""
current_page = _page_arg(page)
links = {}
end = page * per_page
if page > 1:
links['prev'] = page_url.replace(current_page, _page_arg(page - 1))
if end < count:
links['next'] = page_url.replace(current_page, _page_arg(page + 1))
if per_page < count:
links['first'] = page_url.replace(current_page, _page_arg(1))
links['last'] = page_url.replace(current_page, _page_arg((count + per_page - 1) // per_page))
return links | [
"def",
"_get_link_pages",
"(",
"page",
",",
"per_page",
",",
"count",
",",
"page_url",
")",
":",
"# type: (int, int, int, str) -> Dict[str, str]",
"current_page",
"=",
"_page_arg",
"(",
"page",
")",
"links",
"=",
"{",
"}",
"end",
"=",
"page",
"*",
"per_page",
"if",
"page",
">",
"1",
":",
"links",
"[",
"'prev'",
"]",
"=",
"page_url",
".",
"replace",
"(",
"current_page",
",",
"_page_arg",
"(",
"page",
"-",
"1",
")",
")",
"if",
"end",
"<",
"count",
":",
"links",
"[",
"'next'",
"]",
"=",
"page_url",
".",
"replace",
"(",
"current_page",
",",
"_page_arg",
"(",
"page",
"+",
"1",
")",
")",
"if",
"per_page",
"<",
"count",
":",
"links",
"[",
"'first'",
"]",
"=",
"page_url",
".",
"replace",
"(",
"current_page",
",",
"_page_arg",
"(",
"1",
")",
")",
"links",
"[",
"'last'",
"]",
"=",
"page_url",
".",
"replace",
"(",
"current_page",
",",
"_page_arg",
"(",
"(",
"count",
"+",
"per_page",
"-",
"1",
")",
"//",
"per_page",
")",
")",
"return",
"links"
] | Create link header for page metadata.
:param page: current page
:param per_page: page limit
:param count: count of all resources
:param page_url: url for resources
:return: dictionary with name of the link as key and its url as value | [
"Create",
"link",
"header",
"for",
"page",
"metadata",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/rdfvalues/paths.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/paths.py#L298-L314 | def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector) | [
"def",
"InterpolateGrouping",
"(",
"self",
",",
"pattern",
")",
":",
"components",
"=",
"[",
"]",
"offset",
"=",
"0",
"for",
"match",
"in",
"GROUPING_PATTERN",
".",
"finditer",
"(",
"pattern",
")",
":",
"components",
".",
"append",
"(",
"[",
"pattern",
"[",
"offset",
":",
"match",
".",
"start",
"(",
")",
"]",
"]",
")",
"# Expand the attribute into the set of possibilities:",
"alternatives",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"\",\"",
")",
"components",
".",
"append",
"(",
"set",
"(",
"alternatives",
")",
")",
"offset",
"=",
"match",
".",
"end",
"(",
")",
"components",
".",
"append",
"(",
"[",
"pattern",
"[",
"offset",
":",
"]",
"]",
")",
"# Now calculate the cartesian products of all these sets to form all",
"# strings.",
"for",
"vector",
"in",
"itertools",
".",
"product",
"(",
"*",
"components",
")",
":",
"yield",
"u\"\"",
".",
"join",
"(",
"vector",
")"
] | Interpolate inline globbing groups. | [
"Interpolate",
"inline",
"globbing",
"groups",
"."
] | python | train |
PmagPy/PmagPy | dev_setup.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dev_setup.py#L56-L96 | def unix_install():
"""
Edits or creates .bashrc, .bash_profile, and .profile files in the users
HOME directory in order to add your current directory (hopefully your
PmagPy directory) and assorted lower directories in the PmagPy/programs
directory to your PATH environment variable. It also adds the PmagPy and
the PmagPy/programs directories to PYTHONPATH.
"""
PmagPyDir = os.path.abspath(".")
COMMAND = """\n
for d in %s/programs/*/ "%s/programs/"; do
case ":$PATH:" in
*":$d:"*) :;; # already there
*) PMAGPATHS="$PMAGPATHS:$d";; # or PATH="$PATH:$new_entry"
esac
done
export PYTHONPATH="$PYTHONPATH:%s:%s/programs/"
export PATH="$PATH:$PMAGPATHS" """ % (PmagPyDir, PmagPyDir, PmagPyDir, PmagPyDir)
frc_path = os.path.join(
os.environ["HOME"], ".bashrc") # not recommended, but hey it freaking works
fbprof_path = os.path.join(os.environ["HOME"], ".bash_profile")
fprof_path = os.path.join(os.environ["HOME"], ".profile")
all_paths = [frc_path, fbprof_path, fprof_path]
for f_path in all_paths:
open_type = 'a'
if not os.path.isfile(f_path):
open_type = 'w+'
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
else:
fin = open(f_path, 'r')
current_f = fin.read()
fin.close()
if COMMAND not in current_f:
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
print("Install complete. Please restart the shell to complete install.\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.") | [
"def",
"unix_install",
"(",
")",
":",
"PmagPyDir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"\".\"",
")",
"COMMAND",
"=",
"\"\"\"\\n\nfor d in %s/programs/*/ \"%s/programs/\"; do\n case \":$PATH:\" in\n *\":$d:\"*) :;; # already there\n *) PMAGPATHS=\"$PMAGPATHS:$d\";; # or PATH=\"$PATH:$new_entry\"\n esac\ndone\nexport PYTHONPATH=\"$PYTHONPATH:%s:%s/programs/\"\nexport PATH=\"$PATH:$PMAGPATHS\" \"\"\"",
"%",
"(",
"PmagPyDir",
",",
"PmagPyDir",
",",
"PmagPyDir",
",",
"PmagPyDir",
")",
"frc_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"\"HOME\"",
"]",
",",
"\".bashrc\"",
")",
"# not recommended, but hey it freaking works",
"fbprof_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"\"HOME\"",
"]",
",",
"\".bash_profile\"",
")",
"fprof_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"environ",
"[",
"\"HOME\"",
"]",
",",
"\".profile\"",
")",
"all_paths",
"=",
"[",
"frc_path",
",",
"fbprof_path",
",",
"fprof_path",
"]",
"for",
"f_path",
"in",
"all_paths",
":",
"open_type",
"=",
"'a'",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"f_path",
")",
":",
"open_type",
"=",
"'w+'",
"fout",
"=",
"open",
"(",
"f_path",
",",
"open_type",
")",
"fout",
".",
"write",
"(",
"COMMAND",
")",
"fout",
".",
"close",
"(",
")",
"else",
":",
"fin",
"=",
"open",
"(",
"f_path",
",",
"'r'",
")",
"current_f",
"=",
"fin",
".",
"read",
"(",
")",
"fin",
".",
"close",
"(",
")",
"if",
"COMMAND",
"not",
"in",
"current_f",
":",
"fout",
"=",
"open",
"(",
"f_path",
",",
"open_type",
")",
"fout",
".",
"write",
"(",
"COMMAND",
")",
"fout",
".",
"close",
"(",
")",
"print",
"(",
"\"Install complete. Please restart the shell to complete install.\\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.\"",
")"
] | Edits or creates .bashrc, .bash_profile, and .profile files in the users
HOME directory in order to add your current directory (hopefully your
PmagPy directory) and assorted lower directories in the PmagPy/programs
directory to your PATH environment variable. It also adds the PmagPy and
the PmagPy/programs directories to PYTHONPATH. | [
"Edits",
"or",
"creates",
".",
"bashrc",
".",
"bash_profile",
"and",
".",
"profile",
"files",
"in",
"the",
"users",
"HOME",
"directory",
"in",
"order",
"to",
"add",
"your",
"current",
"directory",
"(",
"hopefully",
"your",
"PmagPy",
"directory",
")",
"and",
"assorted",
"lower",
"directories",
"in",
"the",
"PmagPy",
"/",
"programs",
"directory",
"to",
"your",
"PATH",
"environment",
"variable",
".",
"It",
"also",
"adds",
"the",
"PmagPy",
"and",
"the",
"PmagPy",
"/",
"programs",
"directories",
"to",
"PYTHONPATH",
"."
] | python | train |
lucasmaystre/choix | choix/mm.py | https://github.com/lucasmaystre/choix/blob/05a57a10bb707338113a9d91601ca528ead7a881/choix/mm.py#L198-L252 | def choicerank(
digraph, traffic_in, traffic_out, weight=None,
initial_params=None, alpha=1.0, max_iter=10000, tol=1e-8):
"""Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported.
"""
import networkx as nx
# Compute the (sparse) adjacency matrix.
n_items = len(digraph)
nodes = np.arange(n_items)
adj = nx.to_scipy_sparse_matrix(digraph, nodelist=nodes, weight=weight)
adj_t = adj.T.tocsr()
# Process the data into a standard form.
traffic_in = np.asarray(traffic_in)
traffic_out = np.asarray(traffic_out)
data = (adj, adj_t, traffic_in, traffic_out)
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _choicerank) | [
"def",
"choicerank",
"(",
"digraph",
",",
"traffic_in",
",",
"traffic_out",
",",
"weight",
"=",
"None",
",",
"initial_params",
"=",
"None",
",",
"alpha",
"=",
"1.0",
",",
"max_iter",
"=",
"10000",
",",
"tol",
"=",
"1e-8",
")",
":",
"import",
"networkx",
"as",
"nx",
"# Compute the (sparse) adjacency matrix.",
"n_items",
"=",
"len",
"(",
"digraph",
")",
"nodes",
"=",
"np",
".",
"arange",
"(",
"n_items",
")",
"adj",
"=",
"nx",
".",
"to_scipy_sparse_matrix",
"(",
"digraph",
",",
"nodelist",
"=",
"nodes",
",",
"weight",
"=",
"weight",
")",
"adj_t",
"=",
"adj",
".",
"T",
".",
"tocsr",
"(",
")",
"# Process the data into a standard form.",
"traffic_in",
"=",
"np",
".",
"asarray",
"(",
"traffic_in",
")",
"traffic_out",
"=",
"np",
".",
"asarray",
"(",
"traffic_out",
")",
"data",
"=",
"(",
"adj",
",",
"adj_t",
",",
"traffic_in",
",",
"traffic_out",
")",
"return",
"_mm",
"(",
"n_items",
",",
"data",
",",
"initial_params",
",",
"alpha",
",",
"max_iter",
",",
"tol",
",",
"_choicerank",
")"
] | Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported. | [
"Compute",
"the",
"MAP",
"estimate",
"of",
"a",
"network",
"choice",
"model",
"s",
"parameters",
"."
] | python | train |
openstack/horizon | openstack_dashboard/utils/config.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/utils/config.py#L26-L38 | def load_config(files=None, root_path=None, local_path=None):
"""Load the configuration from specified files."""
config = cfg.ConfigOpts()
config.register_opts([
cfg.Opt('root_path', default=root_path),
cfg.Opt('local_path', default=local_path),
])
# XXX register actual config groups here
# theme_group = config_theme.register_config(config)
if files is not None:
config(args=[], default_config_files=files)
return config | [
"def",
"load_config",
"(",
"files",
"=",
"None",
",",
"root_path",
"=",
"None",
",",
"local_path",
"=",
"None",
")",
":",
"config",
"=",
"cfg",
".",
"ConfigOpts",
"(",
")",
"config",
".",
"register_opts",
"(",
"[",
"cfg",
".",
"Opt",
"(",
"'root_path'",
",",
"default",
"=",
"root_path",
")",
",",
"cfg",
".",
"Opt",
"(",
"'local_path'",
",",
"default",
"=",
"local_path",
")",
",",
"]",
")",
"# XXX register actual config groups here",
"# theme_group = config_theme.register_config(config)",
"if",
"files",
"is",
"not",
"None",
":",
"config",
"(",
"args",
"=",
"[",
"]",
",",
"default_config_files",
"=",
"files",
")",
"return",
"config"
] | Load the configuration from specified files. | [
"Load",
"the",
"configuration",
"from",
"specified",
"files",
"."
] | python | train |
peerplays-network/python-peerplays | peerplays/peerplays.py | https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/peerplays.py#L1533-L1556 | def bet_cancel(self, bet_to_cancel, account=None, **kwargs):
""" Cancel a bet
:param str bet_to_cancel: The identifier that identifies the bet to
cancel
:param str account: (optional) the account that owns the bet
(defaults to ``default_account``)
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
bet = Bet(bet_to_cancel)
op = operations.Bet_cancel(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"bettor_id": account["id"],
"bet_to_cancel": bet["id"],
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | [
"def",
"bet_cancel",
"(",
"self",
",",
"bet_to_cancel",
",",
"account",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"account",
":",
"if",
"\"default_account\"",
"in",
"self",
".",
"config",
":",
"account",
"=",
"self",
".",
"config",
"[",
"\"default_account\"",
"]",
"if",
"not",
"account",
":",
"raise",
"ValueError",
"(",
"\"You need to provide an account\"",
")",
"account",
"=",
"Account",
"(",
"account",
")",
"bet",
"=",
"Bet",
"(",
"bet_to_cancel",
")",
"op",
"=",
"operations",
".",
"Bet_cancel",
"(",
"*",
"*",
"{",
"\"fee\"",
":",
"{",
"\"amount\"",
":",
"0",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
",",
"\"bettor_id\"",
":",
"account",
"[",
"\"id\"",
"]",
",",
"\"bet_to_cancel\"",
":",
"bet",
"[",
"\"id\"",
"]",
",",
"\"prefix\"",
":",
"self",
".",
"prefix",
",",
"}",
")",
"return",
"self",
".",
"finalizeOp",
"(",
"op",
",",
"account",
"[",
"\"name\"",
"]",
",",
"\"active\"",
",",
"*",
"*",
"kwargs",
")"
] | Cancel a bet
:param str bet_to_cancel: The identifier that identifies the bet to
cancel
:param str account: (optional) the account that owns the bet
(defaults to ``default_account``) | [
"Cancel",
"a",
"bet"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py#L100-L109 | def button(self, name, filename, command):
'''add a button'''
try:
img = LoadImage(filename)
b = Tkinter.Button(self.frame, image=img, command=command)
b.image = img
except Exception:
b = Tkinter.Button(self.frame, text=filename, command=command)
b.pack(side=Tkinter.LEFT)
self.buttons[name] = b | [
"def",
"button",
"(",
"self",
",",
"name",
",",
"filename",
",",
"command",
")",
":",
"try",
":",
"img",
"=",
"LoadImage",
"(",
"filename",
")",
"b",
"=",
"Tkinter",
".",
"Button",
"(",
"self",
".",
"frame",
",",
"image",
"=",
"img",
",",
"command",
"=",
"command",
")",
"b",
".",
"image",
"=",
"img",
"except",
"Exception",
":",
"b",
"=",
"Tkinter",
".",
"Button",
"(",
"self",
".",
"frame",
",",
"text",
"=",
"filename",
",",
"command",
"=",
"command",
")",
"b",
".",
"pack",
"(",
"side",
"=",
"Tkinter",
".",
"LEFT",
")",
"self",
".",
"buttons",
"[",
"name",
"]",
"=",
"b"
] | add a button | [
"add",
"a",
"button"
] | python | train |
twitterdev/search-tweets-python | searchtweets/utils.py | https://github.com/twitterdev/search-tweets-python/blob/7875afb4f3ee125a9fdcf2e50b5ae761da5f46b5/searchtweets/utils.py#L60-L84 | def merge_dicts(*dicts):
"""
Helpful function to merge / combine dictionaries and return a new
dictionary.
Args:
dicts (list or Iterable): iterable set of dictionaries for merging.
Returns:
dict: dict with all keys from the passed list. Later dictionaries in
the sequence will override duplicate keys from previous dictionaries.
Example:
>>> from searchtweets.utils import merge_dicts
>>> d1 = {"rule": "something has:geo"}
>>> d2 = {"maxResults": 1000}
>>> merge_dicts(*[d1, d2])
{"maxResults": 1000, "rule": "something has:geo"}
"""
def _merge_dicts(dict1, dict2):
merged = dict1.copy()
merged.update(dict2)
return merged
return reduce(_merge_dicts, dicts) | [
"def",
"merge_dicts",
"(",
"*",
"dicts",
")",
":",
"def",
"_merge_dicts",
"(",
"dict1",
",",
"dict2",
")",
":",
"merged",
"=",
"dict1",
".",
"copy",
"(",
")",
"merged",
".",
"update",
"(",
"dict2",
")",
"return",
"merged",
"return",
"reduce",
"(",
"_merge_dicts",
",",
"dicts",
")"
] | Helpful function to merge / combine dictionaries and return a new
dictionary.
Args:
dicts (list or Iterable): iterable set of dictionaries for merging.
Returns:
dict: dict with all keys from the passed list. Later dictionaries in
the sequence will override duplicate keys from previous dictionaries.
Example:
>>> from searchtweets.utils import merge_dicts
>>> d1 = {"rule": "something has:geo"}
>>> d2 = {"maxResults": 1000}
>>> merge_dicts(*[d1, d2])
{"maxResults": 1000, "rule": "something has:geo"} | [
"Helpful",
"function",
"to",
"merge",
"/",
"combine",
"dictionaries",
"and",
"return",
"a",
"new",
"dictionary",
"."
] | python | train |
SBRG/ssbio | ssbio/biopython/Bio/Struct/Protein.py | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/Protein.py#L92-L131 | def check_missing_atoms(self, template=None, ha_only=True):
"""
Checks for missing atoms based on a template.
Default: Searches for missing heavy atoms (not Hydrogen) based on Bio.Struct.protein_residues
Arguments:
- template, dictionary, keys are residue names, values list of atom names.
- ha_only, boolean, default True, restrict check to heavy atoms.
Returns a dictionary of tuples with the missing atoms per residue.
"""
missing_atoms = {}
if not template:
import protein_residues
template = protein_residues.normal # Don't care for terminal residues here..
for residue in self.get_residues():
if not template.has_key(residue.resname):
# Maybe add this as a warning instead of exception?
raise ValueError('Residue name (%s) not in the template' %residue.resname )
if ha_only:
heavy_atoms = [ atom for atom in template[residue.resname]['atoms'].keys()
if atom[0] != 'H' and not (atom[0].isdigit() and atom[1] == 'H')]
reference_set = set(heavy_atoms)
else:
reference_set = set(template[residue.resname]['atoms'].keys())
structure_set = set(residue.child_dict.keys())
diff = reference_set.difference(structure_set)
if diff:
residue_uniq_id = (residue.parent.id, residue.resname, residue.get_id()[1]) # Chain, Name, Number
missing_atoms[residue_uniq_id] = list(diff)
return missing_atoms | [
"def",
"check_missing_atoms",
"(",
"self",
",",
"template",
"=",
"None",
",",
"ha_only",
"=",
"True",
")",
":",
"missing_atoms",
"=",
"{",
"}",
"if",
"not",
"template",
":",
"import",
"protein_residues",
"template",
"=",
"protein_residues",
".",
"normal",
"# Don't care for terminal residues here..",
"for",
"residue",
"in",
"self",
".",
"get_residues",
"(",
")",
":",
"if",
"not",
"template",
".",
"has_key",
"(",
"residue",
".",
"resname",
")",
":",
"# Maybe add this as a warning instead of exception?",
"raise",
"ValueError",
"(",
"'Residue name (%s) not in the template'",
"%",
"residue",
".",
"resname",
")",
"if",
"ha_only",
":",
"heavy_atoms",
"=",
"[",
"atom",
"for",
"atom",
"in",
"template",
"[",
"residue",
".",
"resname",
"]",
"[",
"'atoms'",
"]",
".",
"keys",
"(",
")",
"if",
"atom",
"[",
"0",
"]",
"!=",
"'H'",
"and",
"not",
"(",
"atom",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
"and",
"atom",
"[",
"1",
"]",
"==",
"'H'",
")",
"]",
"reference_set",
"=",
"set",
"(",
"heavy_atoms",
")",
"else",
":",
"reference_set",
"=",
"set",
"(",
"template",
"[",
"residue",
".",
"resname",
"]",
"[",
"'atoms'",
"]",
".",
"keys",
"(",
")",
")",
"structure_set",
"=",
"set",
"(",
"residue",
".",
"child_dict",
".",
"keys",
"(",
")",
")",
"diff",
"=",
"reference_set",
".",
"difference",
"(",
"structure_set",
")",
"if",
"diff",
":",
"residue_uniq_id",
"=",
"(",
"residue",
".",
"parent",
".",
"id",
",",
"residue",
".",
"resname",
",",
"residue",
".",
"get_id",
"(",
")",
"[",
"1",
"]",
")",
"# Chain, Name, Number",
"missing_atoms",
"[",
"residue_uniq_id",
"]",
"=",
"list",
"(",
"diff",
")",
"return",
"missing_atoms"
] | Checks for missing atoms based on a template.
Default: Searches for missing heavy atoms (not Hydrogen) based on Bio.Struct.protein_residues
Arguments:
- template, dictionary, keys are residue names, values list of atom names.
- ha_only, boolean, default True, restrict check to heavy atoms.
Returns a dictionary of tuples with the missing atoms per residue. | [
"Checks",
"for",
"missing",
"atoms",
"based",
"on",
"a",
"template",
".",
"Default",
":",
"Searches",
"for",
"missing",
"heavy",
"atoms",
"(",
"not",
"Hydrogen",
")",
"based",
"on",
"Bio",
".",
"Struct",
".",
"protein_residues",
"Arguments",
":",
"-",
"template",
"dictionary",
"keys",
"are",
"residue",
"names",
"values",
"list",
"of",
"atom",
"names",
".",
"-",
"ha_only",
"boolean",
"default",
"True",
"restrict",
"check",
"to",
"heavy",
"atoms",
".",
"Returns",
"a",
"dictionary",
"of",
"tuples",
"with",
"the",
"missing",
"atoms",
"per",
"residue",
"."
] | python | train |
SeabornGames/RequestClient | seaborn/request_client/api_call.py | https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/api_call.py#L568-L586 | def time_report_item(self, label, message=None):
"""
This will return a dictionary for the given message based on timestamps
:param label:
:param message: str of the message to find the timestamp
:return: dict of times
"""
next_ = TIMESTAMPS_ORDER[TIMESTAMPS_ORDER.index(label) + 1]
while next_ not in self._timestamps:
next_ = TIMESTAMPS_ORDER[TIMESTAMPS_ORDER.index(next_) + 1]
assert label in TIMESTAMPS_ORDER
start = self._timestamps[label] - self._timestamps[TIMESTAMPS_ORDER[0]]
end = self._timestamps[next_] - self._timestamps[TIMESTAMPS_ORDER[0]]
return {'Message': message,
'Start': start,
'End': end,
'Sum': end - start,
'Count': 1} | [
"def",
"time_report_item",
"(",
"self",
",",
"label",
",",
"message",
"=",
"None",
")",
":",
"next_",
"=",
"TIMESTAMPS_ORDER",
"[",
"TIMESTAMPS_ORDER",
".",
"index",
"(",
"label",
")",
"+",
"1",
"]",
"while",
"next_",
"not",
"in",
"self",
".",
"_timestamps",
":",
"next_",
"=",
"TIMESTAMPS_ORDER",
"[",
"TIMESTAMPS_ORDER",
".",
"index",
"(",
"next_",
")",
"+",
"1",
"]",
"assert",
"label",
"in",
"TIMESTAMPS_ORDER",
"start",
"=",
"self",
".",
"_timestamps",
"[",
"label",
"]",
"-",
"self",
".",
"_timestamps",
"[",
"TIMESTAMPS_ORDER",
"[",
"0",
"]",
"]",
"end",
"=",
"self",
".",
"_timestamps",
"[",
"next_",
"]",
"-",
"self",
".",
"_timestamps",
"[",
"TIMESTAMPS_ORDER",
"[",
"0",
"]",
"]",
"return",
"{",
"'Message'",
":",
"message",
",",
"'Start'",
":",
"start",
",",
"'End'",
":",
"end",
",",
"'Sum'",
":",
"end",
"-",
"start",
",",
"'Count'",
":",
"1",
"}"
] | This will return a dictionary for the given message based on timestamps
:param label:
:param message: str of the message to find the timestamp
:return: dict of times | [
"This",
"will",
"return",
"a",
"dictionary",
"for",
"the",
"given",
"message",
"based",
"on",
"timestamps",
":",
"param",
"label",
":",
":",
"param",
"message",
":",
"str",
"of",
"the",
"message",
"to",
"find",
"the",
"timestamp",
":",
"return",
":",
"dict",
"of",
"times"
] | python | train |
limodou/uliweb | uliweb/core/SimpleFrame.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/SimpleFrame.py#L405-L424 | def get_app_dir(app):
"""
Get an app's directory
"""
path = __app_dirs__.get(app)
if path is not None:
return path
else:
p = app.split('.')
try:
path = pkg.resource_filename(p[0], '')
except ImportError as e:
log.error("Can't import app %s" % app)
log.exception(e)
path = ''
if len(p) > 1:
path = os.path.join(path, *p[1:])
__app_dirs__[app] = path
return path | [
"def",
"get_app_dir",
"(",
"app",
")",
":",
"path",
"=",
"__app_dirs__",
".",
"get",
"(",
"app",
")",
"if",
"path",
"is",
"not",
"None",
":",
"return",
"path",
"else",
":",
"p",
"=",
"app",
".",
"split",
"(",
"'.'",
")",
"try",
":",
"path",
"=",
"pkg",
".",
"resource_filename",
"(",
"p",
"[",
"0",
"]",
",",
"''",
")",
"except",
"ImportError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Can't import app %s\"",
"%",
"app",
")",
"log",
".",
"exception",
"(",
"e",
")",
"path",
"=",
"''",
"if",
"len",
"(",
"p",
")",
">",
"1",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"*",
"p",
"[",
"1",
":",
"]",
")",
"__app_dirs__",
"[",
"app",
"]",
"=",
"path",
"return",
"path"
] | Get an app's directory | [
"Get",
"an",
"app",
"s",
"directory"
] | python | train |
noxdafox/vminspect | vminspect/timeline.py | https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/timeline.py#L216-L233 | def generate_timeline(usnjrnl, filesystem_content):
"""Aggregates the data collected from the USN journal
and the filesystem content.
"""
journal_content = defaultdict(list)
for event in usnjrnl:
journal_content[event.inode].append(event)
for event in usnjrnl:
try:
dirent = lookup_dirent(event, filesystem_content, journal_content)
yield UsnJrnlEvent(
dirent.inode, dirent.path, dirent.size, dirent.allocated,
event.timestamp, event.changes, event.attributes)
except LookupError as error:
LOGGER.debug(error) | [
"def",
"generate_timeline",
"(",
"usnjrnl",
",",
"filesystem_content",
")",
":",
"journal_content",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"event",
"in",
"usnjrnl",
":",
"journal_content",
"[",
"event",
".",
"inode",
"]",
".",
"append",
"(",
"event",
")",
"for",
"event",
"in",
"usnjrnl",
":",
"try",
":",
"dirent",
"=",
"lookup_dirent",
"(",
"event",
",",
"filesystem_content",
",",
"journal_content",
")",
"yield",
"UsnJrnlEvent",
"(",
"dirent",
".",
"inode",
",",
"dirent",
".",
"path",
",",
"dirent",
".",
"size",
",",
"dirent",
".",
"allocated",
",",
"event",
".",
"timestamp",
",",
"event",
".",
"changes",
",",
"event",
".",
"attributes",
")",
"except",
"LookupError",
"as",
"error",
":",
"LOGGER",
".",
"debug",
"(",
"error",
")"
] | Aggregates the data collected from the USN journal
and the filesystem content. | [
"Aggregates",
"the",
"data",
"collected",
"from",
"the",
"USN",
"journal",
"and",
"the",
"filesystem",
"content",
"."
] | python | train |
barrust/mediawiki | mediawiki/mediawiki.py | https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L543-L572 | def prefixsearch(self, prefix, results=10):
""" Perform a prefix search using the provided prefix string
Args:
prefix (str): Prefix string to use for search
results (int): Number of pages with the prefix to return
Returns:
list: List of page titles
Note:
**Per the documentation:** "The purpose of this module is \
similar to action=opensearch: to take user input and provide \
the best-matching titles. Depending on the search engine \
backend, this might include typo correction, redirect \
avoidance, or other heuristics." """
self._check_query(prefix, "Prefix must be specified")
query_params = {
"list": "prefixsearch",
"pssearch": prefix,
"pslimit": ("max" if results > 500 else results),
"psnamespace": 0,
"psoffset": 0, # parameterize to skip to later in the list?
}
raw_results = self.wiki_request(query_params)
self._check_error_response(raw_results, prefix)
return [rec["title"] for rec in raw_results["query"]["prefixsearch"]] | [
"def",
"prefixsearch",
"(",
"self",
",",
"prefix",
",",
"results",
"=",
"10",
")",
":",
"self",
".",
"_check_query",
"(",
"prefix",
",",
"\"Prefix must be specified\"",
")",
"query_params",
"=",
"{",
"\"list\"",
":",
"\"prefixsearch\"",
",",
"\"pssearch\"",
":",
"prefix",
",",
"\"pslimit\"",
":",
"(",
"\"max\"",
"if",
"results",
">",
"500",
"else",
"results",
")",
",",
"\"psnamespace\"",
":",
"0",
",",
"\"psoffset\"",
":",
"0",
",",
"# parameterize to skip to later in the list?",
"}",
"raw_results",
"=",
"self",
".",
"wiki_request",
"(",
"query_params",
")",
"self",
".",
"_check_error_response",
"(",
"raw_results",
",",
"prefix",
")",
"return",
"[",
"rec",
"[",
"\"title\"",
"]",
"for",
"rec",
"in",
"raw_results",
"[",
"\"query\"",
"]",
"[",
"\"prefixsearch\"",
"]",
"]"
] | Perform a prefix search using the provided prefix string
Args:
prefix (str): Prefix string to use for search
results (int): Number of pages with the prefix to return
Returns:
list: List of page titles
Note:
**Per the documentation:** "The purpose of this module is \
similar to action=opensearch: to take user input and provide \
the best-matching titles. Depending on the search engine \
backend, this might include typo correction, redirect \
avoidance, or other heuristics." | [
"Perform",
"a",
"prefix",
"search",
"using",
"the",
"provided",
"prefix",
"string"
] | python | train |
ArchiveTeam/wpull | wpull/document/sitemap.py | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/document/sitemap.py#L37-L41 | def is_response(cls, response):
'''Return whether the document is likely to be a Sitemap.'''
if response.body:
if cls.is_file(response.body):
return True | [
"def",
"is_response",
"(",
"cls",
",",
"response",
")",
":",
"if",
"response",
".",
"body",
":",
"if",
"cls",
".",
"is_file",
"(",
"response",
".",
"body",
")",
":",
"return",
"True"
] | Return whether the document is likely to be a Sitemap. | [
"Return",
"whether",
"the",
"document",
"is",
"likely",
"to",
"be",
"a",
"Sitemap",
"."
] | python | train |
jasonrbriggs/stomp.py | stomp/protocol.py | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/protocol.py#L68-L85 | def begin(self, transaction=None, headers=None, **keyword_headers):
"""
Begin a transaction.
:param str transaction: the identifier for the transaction (optional - if not specified
a unique transaction id will be generated)
:param dict headers: a map of any additional headers the broker requires
:param keyword_headers: any additional headers the broker requires
:return: the transaction id
:rtype: str
"""
headers = utils.merge_headers([headers, keyword_headers])
if not transaction:
transaction = utils.get_uuid()
headers[HDR_TRANSACTION] = transaction
self.send_frame(CMD_BEGIN, headers)
return transaction | [
"def",
"begin",
"(",
"self",
",",
"transaction",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"keyword_headers",
")",
":",
"headers",
"=",
"utils",
".",
"merge_headers",
"(",
"[",
"headers",
",",
"keyword_headers",
"]",
")",
"if",
"not",
"transaction",
":",
"transaction",
"=",
"utils",
".",
"get_uuid",
"(",
")",
"headers",
"[",
"HDR_TRANSACTION",
"]",
"=",
"transaction",
"self",
".",
"send_frame",
"(",
"CMD_BEGIN",
",",
"headers",
")",
"return",
"transaction"
] | Begin a transaction.
:param str transaction: the identifier for the transaction (optional - if not specified
a unique transaction id will be generated)
:param dict headers: a map of any additional headers the broker requires
:param keyword_headers: any additional headers the broker requires
:return: the transaction id
:rtype: str | [
"Begin",
"a",
"transaction",
"."
] | python | train |
evhub/coconut | coconut/compiler/util.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/util.py#L65-L100 | def evaluate_tokens(tokens):
"""Evaluate the given tokens in the computation graph."""
if isinstance(tokens, str):
return tokens
elif isinstance(tokens, ParseResults):
# evaluate the list portion of the ParseResults
toklist, name, asList, modal = tokens.__getnewargs__()
new_toklist = [evaluate_tokens(toks) for toks in toklist]
new_tokens = ParseResults(new_toklist, name, asList, modal)
# evaluate the dictionary portion of the ParseResults
new_tokdict = {}
for name, occurrences in tokens._ParseResults__tokdict.items():
new_occurences = []
for value, position in occurrences:
if isinstance(value, ParseResults) and value._ParseResults__toklist == toklist:
new_value = new_tokens
else:
try:
new_value = new_toklist[toklist.index(value)]
except ValueError:
complain(lambda: CoconutInternalException("inefficient reevaluation of tokens: {} not in {}".format(
value,
toklist,
)))
new_value = evaluate_tokens(value)
new_occurences.append(_ParseResultsWithOffset(new_value, position))
new_tokdict[name] = occurrences
new_tokens._ParseResults__accumNames.update(tokens._ParseResults__accumNames)
new_tokens._ParseResults__tokdict.update(new_tokdict)
return new_tokens
elif isinstance(tokens, ComputationNode):
return tokens.evaluate()
elif isinstance(tokens, (list, tuple)):
return [evaluate_tokens(inner_toks) for inner_toks in tokens]
else:
raise CoconutInternalException("invalid computation graph tokens", tokens) | [
"def",
"evaluate_tokens",
"(",
"tokens",
")",
":",
"if",
"isinstance",
"(",
"tokens",
",",
"str",
")",
":",
"return",
"tokens",
"elif",
"isinstance",
"(",
"tokens",
",",
"ParseResults",
")",
":",
"# evaluate the list portion of the ParseResults",
"toklist",
",",
"name",
",",
"asList",
",",
"modal",
"=",
"tokens",
".",
"__getnewargs__",
"(",
")",
"new_toklist",
"=",
"[",
"evaluate_tokens",
"(",
"toks",
")",
"for",
"toks",
"in",
"toklist",
"]",
"new_tokens",
"=",
"ParseResults",
"(",
"new_toklist",
",",
"name",
",",
"asList",
",",
"modal",
")",
"# evaluate the dictionary portion of the ParseResults",
"new_tokdict",
"=",
"{",
"}",
"for",
"name",
",",
"occurrences",
"in",
"tokens",
".",
"_ParseResults__tokdict",
".",
"items",
"(",
")",
":",
"new_occurences",
"=",
"[",
"]",
"for",
"value",
",",
"position",
"in",
"occurrences",
":",
"if",
"isinstance",
"(",
"value",
",",
"ParseResults",
")",
"and",
"value",
".",
"_ParseResults__toklist",
"==",
"toklist",
":",
"new_value",
"=",
"new_tokens",
"else",
":",
"try",
":",
"new_value",
"=",
"new_toklist",
"[",
"toklist",
".",
"index",
"(",
"value",
")",
"]",
"except",
"ValueError",
":",
"complain",
"(",
"lambda",
":",
"CoconutInternalException",
"(",
"\"inefficient reevaluation of tokens: {} not in {}\"",
".",
"format",
"(",
"value",
",",
"toklist",
",",
")",
")",
")",
"new_value",
"=",
"evaluate_tokens",
"(",
"value",
")",
"new_occurences",
".",
"append",
"(",
"_ParseResultsWithOffset",
"(",
"new_value",
",",
"position",
")",
")",
"new_tokdict",
"[",
"name",
"]",
"=",
"occurrences",
"new_tokens",
".",
"_ParseResults__accumNames",
".",
"update",
"(",
"tokens",
".",
"_ParseResults__accumNames",
")",
"new_tokens",
".",
"_ParseResults__tokdict",
".",
"update",
"(",
"new_tokdict",
")",
"return",
"new_tokens",
"elif",
"isinstance",
"(",
"tokens",
",",
"ComputationNode",
")",
":",
"return",
"tokens",
".",
"evaluate",
"(",
")",
"elif",
"isinstance",
"(",
"tokens",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"evaluate_tokens",
"(",
"inner_toks",
")",
"for",
"inner_toks",
"in",
"tokens",
"]",
"else",
":",
"raise",
"CoconutInternalException",
"(",
"\"invalid computation graph tokens\"",
",",
"tokens",
")"
] | Evaluate the given tokens in the computation graph. | [
"Evaluate",
"the",
"given",
"tokens",
"in",
"the",
"computation",
"graph",
"."
] | python | train |
wbond/asn1crypto | asn1crypto/core.py | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L3221-L3234 | def _is_mutated(self):
"""
:return:
A boolean - if the sequence or any children (recursively) have been
mutated
"""
mutated = self._mutated
if self.children is not None:
for child in self.children:
if isinstance(child, Sequence) or isinstance(child, SequenceOf):
mutated = mutated or child._is_mutated()
return mutated | [
"def",
"_is_mutated",
"(",
"self",
")",
":",
"mutated",
"=",
"self",
".",
"_mutated",
"if",
"self",
".",
"children",
"is",
"not",
"None",
":",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"isinstance",
"(",
"child",
",",
"Sequence",
")",
"or",
"isinstance",
"(",
"child",
",",
"SequenceOf",
")",
":",
"mutated",
"=",
"mutated",
"or",
"child",
".",
"_is_mutated",
"(",
")",
"return",
"mutated"
] | :return:
A boolean - if the sequence or any children (recursively) have been
mutated | [
":",
"return",
":",
"A",
"boolean",
"-",
"if",
"the",
"sequence",
"or",
"any",
"children",
"(",
"recursively",
")",
"have",
"been",
"mutated"
] | python | train |
globocom/GloboNetworkAPI-client-python | networkapiclient/ApiV4VirtualInterface.py | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiV4VirtualInterface.py#L89-L99 | def create(self, virtual_interfaces):
"""
Method to create Virtual Interfaces
:param Virtual Interfaces: List containing Virtual Interfaces desired to be created on database
:return: None
"""
data = {'virtual_interfaces': virtual_interfaces}
return super(ApiV4VirtualInterface, self).post\
('api/v4/virtual-interface/', data) | [
"def",
"create",
"(",
"self",
",",
"virtual_interfaces",
")",
":",
"data",
"=",
"{",
"'virtual_interfaces'",
":",
"virtual_interfaces",
"}",
"return",
"super",
"(",
"ApiV4VirtualInterface",
",",
"self",
")",
".",
"post",
"(",
"'api/v4/virtual-interface/'",
",",
"data",
")"
] | Method to create Virtual Interfaces
:param Virtual Interfaces: List containing Virtual Interfaces desired to be created on database
:return: None | [
"Method",
"to",
"create",
"Virtual",
"Interfaces"
] | python | train |
nion-software/nionswift | nion/swift/Facade.py | https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L1156-L1172 | def set_metadata_value(self, key: str, value: typing.Any) -> None:
"""Set the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
.. versionadded:: 1.0
Scriptable: Yes
"""
self._data_item.set_metadata_value(key, value) | [
"def",
"set_metadata_value",
"(",
"self",
",",
"key",
":",
"str",
",",
"value",
":",
"typing",
".",
"Any",
")",
"->",
"None",
":",
"self",
".",
"_data_item",
".",
"set_metadata_value",
"(",
"key",
",",
"value",
")"
] | Set the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
.. versionadded:: 1.0
Scriptable: Yes | [
"Set",
"the",
"metadata",
"value",
"for",
"the",
"given",
"key",
"."
] | python | train |
spacetelescope/drizzlepac | drizzlepac/util.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/util.py#L680-L743 | def getDefaultConfigObj(taskname,configObj,input_dict={},loadOnly=True):
""" Return default configObj instance for task updated
with user-specified values from input_dict.
Parameters
----------
taskname : string
Name of task to load into TEAL
configObj : string
The valid values for 'configObj' would be::
None - loads last saved user .cfg file
'defaults' - loads task default .cfg file
name of .cfg file (string)- loads user-specified .cfg file
input_dict : dict
Set of parameters and values specified by user to be different from
what gets loaded in from the .cfg file for the task
loadOnly : bool
Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
user to edit the values further and then run the task if desired.
"""
if configObj is None:
# Start by grabbing the default values without using the GUI
# This insures that all subsequent use of the configObj includes
# all parameters and their last saved values
configObj = teal.load(taskname)
elif isinstance(configObj,str):
if configObj.lower().strip() == 'defaults':
# Load task default .cfg file with all default values
configObj = teal.load(taskname,defaults=True)
# define default filename for configObj
configObj.filename = taskname.lower()+'.cfg'
else:
# Load user-specified .cfg file with its special default values
# we need to call 'fileutil.osfn()' to insure all environment
# variables specified by the user in the configObj filename are
# expanded to the full path
configObj = teal.load(fileutil.osfn(configObj))
# merge in the user values for this run
# this, though, does not save the results for use later
if input_dict not in [None,{}]:# and configObj not in [None, {}]:
# check to see whether any input parameters are unexpected.
# Any unexpected parameters provided on input should be reported and
# the code should stop
validateUserPars(configObj,input_dict)
# If everything looks good, merge user inputs with configObj and continue
cfgpars.mergeConfigObj(configObj, input_dict)
# Update the input .cfg file with the updated parameter values
#configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))
#configObj.write()
if not loadOnly:
# We want to run the GUI AFTER merging in any parameters
# specified by the user on the command-line and provided in
# input_dict
configObj = teal.teal(configObj,loadOnly=False)
return configObj | [
"def",
"getDefaultConfigObj",
"(",
"taskname",
",",
"configObj",
",",
"input_dict",
"=",
"{",
"}",
",",
"loadOnly",
"=",
"True",
")",
":",
"if",
"configObj",
"is",
"None",
":",
"# Start by grabbing the default values without using the GUI",
"# This insures that all subsequent use of the configObj includes",
"# all parameters and their last saved values",
"configObj",
"=",
"teal",
".",
"load",
"(",
"taskname",
")",
"elif",
"isinstance",
"(",
"configObj",
",",
"str",
")",
":",
"if",
"configObj",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"==",
"'defaults'",
":",
"# Load task default .cfg file with all default values",
"configObj",
"=",
"teal",
".",
"load",
"(",
"taskname",
",",
"defaults",
"=",
"True",
")",
"# define default filename for configObj",
"configObj",
".",
"filename",
"=",
"taskname",
".",
"lower",
"(",
")",
"+",
"'.cfg'",
"else",
":",
"# Load user-specified .cfg file with its special default values",
"# we need to call 'fileutil.osfn()' to insure all environment",
"# variables specified by the user in the configObj filename are",
"# expanded to the full path",
"configObj",
"=",
"teal",
".",
"load",
"(",
"fileutil",
".",
"osfn",
"(",
"configObj",
")",
")",
"# merge in the user values for this run",
"# this, though, does not save the results for use later",
"if",
"input_dict",
"not",
"in",
"[",
"None",
",",
"{",
"}",
"]",
":",
"# and configObj not in [None, {}]:",
"# check to see whether any input parameters are unexpected.",
"# Any unexpected parameters provided on input should be reported and",
"# the code should stop",
"validateUserPars",
"(",
"configObj",
",",
"input_dict",
")",
"# If everything looks good, merge user inputs with configObj and continue",
"cfgpars",
".",
"mergeConfigObj",
"(",
"configObj",
",",
"input_dict",
")",
"# Update the input .cfg file with the updated parameter values",
"#configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))",
"#configObj.write()",
"if",
"not",
"loadOnly",
":",
"# We want to run the GUI AFTER merging in any parameters",
"# specified by the user on the command-line and provided in",
"# input_dict",
"configObj",
"=",
"teal",
".",
"teal",
"(",
"configObj",
",",
"loadOnly",
"=",
"False",
")",
"return",
"configObj"
] | Return default configObj instance for task updated
with user-specified values from input_dict.
Parameters
----------
taskname : string
Name of task to load into TEAL
configObj : string
The valid values for 'configObj' would be::
None - loads last saved user .cfg file
'defaults' - loads task default .cfg file
name of .cfg file (string)- loads user-specified .cfg file
input_dict : dict
Set of parameters and values specified by user to be different from
what gets loaded in from the .cfg file for the task
loadOnly : bool
Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
user to edit the values further and then run the task if desired. | [
"Return",
"default",
"configObj",
"instance",
"for",
"task",
"updated",
"with",
"user",
"-",
"specified",
"values",
"from",
"input_dict",
"."
] | python | train |
albertz/py_better_exchook | better_exchook.py | https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L219-L260 | def simple_debug_shell(globals, locals):
"""
:param dict[str] globals:
:param dict[str] locals:
:return: nothing
"""
try:
import readline
except ImportError:
pass # ignore
compile_string_fn = "<simple_debug_shell input>"
while True:
try:
s = raw_input("> ")
except (KeyboardInterrupt, EOFError):
print("breaked debug shell: " + sys.exc_info()[0].__name__)
break
if s.strip() == "":
continue
try:
c = compile(s, compile_string_fn, "single")
except Exception as e:
print("%s : %s in %r" % (e.__class__.__name__, str(e), s))
else:
set_linecache(compile_string_fn, s)
# noinspection PyBroadException
try:
ret = eval(c, globals, locals)
except (KeyboardInterrupt, SystemExit):
print("debug shell exit: " + sys.exc_info()[0].__name__)
break
except Exception:
print("Error executing %r" % s)
better_exchook(*sys.exc_info(), autodebugshell=False)
else:
# noinspection PyBroadException
try:
if ret is not None:
print(ret)
except Exception:
print("Error printing return value of %r" % s)
better_exchook(*sys.exc_info(), autodebugshell=False) | [
"def",
"simple_debug_shell",
"(",
"globals",
",",
"locals",
")",
":",
"try",
":",
"import",
"readline",
"except",
"ImportError",
":",
"pass",
"# ignore",
"compile_string_fn",
"=",
"\"<simple_debug_shell input>\"",
"while",
"True",
":",
"try",
":",
"s",
"=",
"raw_input",
"(",
"\"> \"",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"EOFError",
")",
":",
"print",
"(",
"\"breaked debug shell: \"",
"+",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
".",
"__name__",
")",
"break",
"if",
"s",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"continue",
"try",
":",
"c",
"=",
"compile",
"(",
"s",
",",
"compile_string_fn",
",",
"\"single\"",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"%s : %s in %r\"",
"%",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
",",
"s",
")",
")",
"else",
":",
"set_linecache",
"(",
"compile_string_fn",
",",
"s",
")",
"# noinspection PyBroadException",
"try",
":",
"ret",
"=",
"eval",
"(",
"c",
",",
"globals",
",",
"locals",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"print",
"(",
"\"debug shell exit: \"",
"+",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
".",
"__name__",
")",
"break",
"except",
"Exception",
":",
"print",
"(",
"\"Error executing %r\"",
"%",
"s",
")",
"better_exchook",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
",",
"autodebugshell",
"=",
"False",
")",
"else",
":",
"# noinspection PyBroadException",
"try",
":",
"if",
"ret",
"is",
"not",
"None",
":",
"print",
"(",
"ret",
")",
"except",
"Exception",
":",
"print",
"(",
"\"Error printing return value of %r\"",
"%",
"s",
")",
"better_exchook",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
",",
"autodebugshell",
"=",
"False",
")"
] | :param dict[str] globals:
:param dict[str] locals:
:return: nothing | [
":",
"param",
"dict",
"[",
"str",
"]",
"globals",
":",
":",
"param",
"dict",
"[",
"str",
"]",
"locals",
":",
":",
"return",
":",
"nothing"
] | python | train |
shaypal5/pdutil | pdutil/display/display.py | https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/display/display.py#L9-L42 | def df_string(df, percentage_columns=(), format_map=None, **kwargs):
"""Return a nicely formatted string for the given dataframe.
Arguments
---------
df : pandas.DataFrame
A dataframe object.
percentage_columns : iterable
A list of cloumn names to be displayed with a percentage sign.
Returns
-------
str
A nicely formatted string for the given dataframe.
Example
-------
>>> import pandas as pd
>>> df = pd.DataFrame([[8,'a'],[5,'b']],[1,2],['num', 'char'])
>>> print(df_string(df))
num char
1 8 a
2 5 b
"""
formatters_map = {}
for col, dtype in df.dtypes.iteritems():
if col in percentage_columns:
formatters_map[col] = '{:,.2f} %'.format
elif dtype == 'float64':
formatters_map[col] = '{:,.2f}'.format
if format_map:
for key in format_map:
formatters_map[key] = format_map[key]
return df.to_string(formatters=formatters_map, **kwargs) | [
"def",
"df_string",
"(",
"df",
",",
"percentage_columns",
"=",
"(",
")",
",",
"format_map",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"formatters_map",
"=",
"{",
"}",
"for",
"col",
",",
"dtype",
"in",
"df",
".",
"dtypes",
".",
"iteritems",
"(",
")",
":",
"if",
"col",
"in",
"percentage_columns",
":",
"formatters_map",
"[",
"col",
"]",
"=",
"'{:,.2f} %'",
".",
"format",
"elif",
"dtype",
"==",
"'float64'",
":",
"formatters_map",
"[",
"col",
"]",
"=",
"'{:,.2f}'",
".",
"format",
"if",
"format_map",
":",
"for",
"key",
"in",
"format_map",
":",
"formatters_map",
"[",
"key",
"]",
"=",
"format_map",
"[",
"key",
"]",
"return",
"df",
".",
"to_string",
"(",
"formatters",
"=",
"formatters_map",
",",
"*",
"*",
"kwargs",
")"
] | Return a nicely formatted string for the given dataframe.
Arguments
---------
df : pandas.DataFrame
A dataframe object.
percentage_columns : iterable
A list of cloumn names to be displayed with a percentage sign.
Returns
-------
str
A nicely formatted string for the given dataframe.
Example
-------
>>> import pandas as pd
>>> df = pd.DataFrame([[8,'a'],[5,'b']],[1,2],['num', 'char'])
>>> print(df_string(df))
num char
1 8 a
2 5 b | [
"Return",
"a",
"nicely",
"formatted",
"string",
"for",
"the",
"given",
"dataframe",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library_ext/virtual_system_description.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library_ext/virtual_system_description.py#L18-L48 | def set_final_value(self, description_type, value):
"""Set the value for the given description type.
in description_type type :class:`VirtualSystemDescriptionType`
in value type str
"""
types, _, _, vbox_values, extra_config = self.get_description()
# find offset to description type
for offset, t in enumerate(types):
if t == description_type:
break
else:
raise Exception("Failed to find type for %s" % description_type)
enabled = [True] * len(types)
vbox_values = list(vbox_values)
extra_config = list(extra_config)
if isinstance(value, basestring):
final_value = value
elif isinstance(value, Enum):
final_value = str(value._value)
elif isinstance(value, int):
final_value = str(value)
else:
raise ValueError("Incorrect value type.")
vbox_values[offset] = final_value
self.set_final_values(enabled, vbox_values, extra_config) | [
"def",
"set_final_value",
"(",
"self",
",",
"description_type",
",",
"value",
")",
":",
"types",
",",
"_",
",",
"_",
",",
"vbox_values",
",",
"extra_config",
"=",
"self",
".",
"get_description",
"(",
")",
"# find offset to description type",
"for",
"offset",
",",
"t",
"in",
"enumerate",
"(",
"types",
")",
":",
"if",
"t",
"==",
"description_type",
":",
"break",
"else",
":",
"raise",
"Exception",
"(",
"\"Failed to find type for %s\"",
"%",
"description_type",
")",
"enabled",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"types",
")",
"vbox_values",
"=",
"list",
"(",
"vbox_values",
")",
"extra_config",
"=",
"list",
"(",
"extra_config",
")",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"final_value",
"=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"Enum",
")",
":",
"final_value",
"=",
"str",
"(",
"value",
".",
"_value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"final_value",
"=",
"str",
"(",
"value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Incorrect value type.\"",
")",
"vbox_values",
"[",
"offset",
"]",
"=",
"final_value",
"self",
".",
"set_final_values",
"(",
"enabled",
",",
"vbox_values",
",",
"extra_config",
")"
] | Set the value for the given description type.
in description_type type :class:`VirtualSystemDescriptionType`
in value type str | [
"Set",
"the",
"value",
"for",
"the",
"given",
"description",
"type",
"."
] | python | train |
fboender/ansible-cmdb | src/ansiblecmdb/ansible.py | https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/ansible.py#L363-L390 | def get_hosts(self):
"""
Return a list of parsed hosts info, with the limit applied if required.
"""
limited_hosts = {}
if self.limit is not None:
# Find hosts and groups of hosts to include
for include in self.limit['include']:
# Include whole group
for hostname in self.hosts_in_group(include):
limited_hosts[hostname] = self.hosts[hostname]
# Include individual host
if include in self.hosts:
limited_hosts[include] = self.hosts[include]
# Find hosts and groups of hosts to exclude
for exclude in self.limit["exclude"]:
# Exclude whole group
for hostname in self.hosts_in_group(exclude):
if hostname in limited_hosts:
limited_hosts.pop(hostname)
# Exclude individual host
if exclude in limited_hosts:
limited_hosts.pop(exclude)
return limited_hosts
else:
# Return all hosts
return self.hosts | [
"def",
"get_hosts",
"(",
"self",
")",
":",
"limited_hosts",
"=",
"{",
"}",
"if",
"self",
".",
"limit",
"is",
"not",
"None",
":",
"# Find hosts and groups of hosts to include",
"for",
"include",
"in",
"self",
".",
"limit",
"[",
"'include'",
"]",
":",
"# Include whole group",
"for",
"hostname",
"in",
"self",
".",
"hosts_in_group",
"(",
"include",
")",
":",
"limited_hosts",
"[",
"hostname",
"]",
"=",
"self",
".",
"hosts",
"[",
"hostname",
"]",
"# Include individual host",
"if",
"include",
"in",
"self",
".",
"hosts",
":",
"limited_hosts",
"[",
"include",
"]",
"=",
"self",
".",
"hosts",
"[",
"include",
"]",
"# Find hosts and groups of hosts to exclude",
"for",
"exclude",
"in",
"self",
".",
"limit",
"[",
"\"exclude\"",
"]",
":",
"# Exclude whole group",
"for",
"hostname",
"in",
"self",
".",
"hosts_in_group",
"(",
"exclude",
")",
":",
"if",
"hostname",
"in",
"limited_hosts",
":",
"limited_hosts",
".",
"pop",
"(",
"hostname",
")",
"# Exclude individual host",
"if",
"exclude",
"in",
"limited_hosts",
":",
"limited_hosts",
".",
"pop",
"(",
"exclude",
")",
"return",
"limited_hosts",
"else",
":",
"# Return all hosts",
"return",
"self",
".",
"hosts"
] | Return a list of parsed hosts info, with the limit applied if required. | [
"Return",
"a",
"list",
"of",
"parsed",
"hosts",
"info",
"with",
"the",
"limit",
"applied",
"if",
"required",
"."
] | python | train |
anomaly/vishnu | vishnu/cipher.py | https://github.com/anomaly/vishnu/blob/5b3a6a69beedc8554cc506ddfab273760d61dc65/vishnu/cipher.py#L62-L72 | def decrypt(self, encrypted):
"""
Base64 decodes the data and then decrypts using AES.
:param encrypted:
:return:
"""
decoded = b64decode(encrypted)
init_vec = decoded[:AES.block_size]
cipher = AES.new(self._key, AES.MODE_CBC, init_vec)
return AESCipher.unpad(cipher.decrypt(decoded[AES.block_size:])) | [
"def",
"decrypt",
"(",
"self",
",",
"encrypted",
")",
":",
"decoded",
"=",
"b64decode",
"(",
"encrypted",
")",
"init_vec",
"=",
"decoded",
"[",
":",
"AES",
".",
"block_size",
"]",
"cipher",
"=",
"AES",
".",
"new",
"(",
"self",
".",
"_key",
",",
"AES",
".",
"MODE_CBC",
",",
"init_vec",
")",
"return",
"AESCipher",
".",
"unpad",
"(",
"cipher",
".",
"decrypt",
"(",
"decoded",
"[",
"AES",
".",
"block_size",
":",
"]",
")",
")"
] | Base64 decodes the data and then decrypts using AES.
:param encrypted:
:return: | [
"Base64",
"decodes",
"the",
"data",
"and",
"then",
"decrypts",
"using",
"AES",
".",
":",
"param",
"encrypted",
":",
":",
"return",
":"
] | python | train |
bpsmith/tia | tia/bbg/bbg_com.py | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/bbg_com.py#L406-L417 | def on_security_data_node(self, node):
"""process a securityData node - FIXME: currently not handling relateDate node """
sid = XmlHelper.get_child_value(node, 'security')
farr = node.GetElement('fieldData')
dmap = defaultdict(list)
for i in range(farr.NumValues):
pt = farr.GetValue(i)
[dmap[f].append(XmlHelper.get_child_value(pt, f, allow_missing=1)) for f in ['date'] + self.fields]
idx = dmap.pop('date')
frame = DataFrame(dmap, columns=self.fields, index=idx)
frame.index.name = 'date'
self.response[sid] = frame | [
"def",
"on_security_data_node",
"(",
"self",
",",
"node",
")",
":",
"sid",
"=",
"XmlHelper",
".",
"get_child_value",
"(",
"node",
",",
"'security'",
")",
"farr",
"=",
"node",
".",
"GetElement",
"(",
"'fieldData'",
")",
"dmap",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"i",
"in",
"range",
"(",
"farr",
".",
"NumValues",
")",
":",
"pt",
"=",
"farr",
".",
"GetValue",
"(",
"i",
")",
"[",
"dmap",
"[",
"f",
"]",
".",
"append",
"(",
"XmlHelper",
".",
"get_child_value",
"(",
"pt",
",",
"f",
",",
"allow_missing",
"=",
"1",
")",
")",
"for",
"f",
"in",
"[",
"'date'",
"]",
"+",
"self",
".",
"fields",
"]",
"idx",
"=",
"dmap",
".",
"pop",
"(",
"'date'",
")",
"frame",
"=",
"DataFrame",
"(",
"dmap",
",",
"columns",
"=",
"self",
".",
"fields",
",",
"index",
"=",
"idx",
")",
"frame",
".",
"index",
".",
"name",
"=",
"'date'",
"self",
".",
"response",
"[",
"sid",
"]",
"=",
"frame"
] | process a securityData node - FIXME: currently not handling relateDate node | [
"process",
"a",
"securityData",
"node",
"-",
"FIXME",
":",
"currently",
"not",
"handling",
"relateDate",
"node"
] | python | train |
manns/pyspread | pyspread/src/gui/_dialogs.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L881-L907 | def OnApply(self, event):
"""Event handler for Apply button"""
# See if we have valid python
try:
ast.parse(self.macros)
except:
# Grab the traceback and print it for the user
s = StringIO()
e = exc_info()
# usr_tb will more than likely be none because ast throws
# SytnaxErrorsas occurring outside of the current
# execution frame
usr_tb = get_user_codeframe(e[2]) or None
print_exception(e[0], e[1], usr_tb, None, s)
post_command_event(self.parent, self.MacroErrorMsg,
err=s.getvalue())
success = False
else:
self.result_ctrl.SetValue('')
post_command_event(self.parent, self.MacroReplaceMsg,
macros=self.macros)
post_command_event(self.parent, self.MacroExecuteMsg)
success = True
event.Skip()
return success | [
"def",
"OnApply",
"(",
"self",
",",
"event",
")",
":",
"# See if we have valid python",
"try",
":",
"ast",
".",
"parse",
"(",
"self",
".",
"macros",
")",
"except",
":",
"# Grab the traceback and print it for the user",
"s",
"=",
"StringIO",
"(",
")",
"e",
"=",
"exc_info",
"(",
")",
"# usr_tb will more than likely be none because ast throws",
"# SytnaxErrorsas occurring outside of the current",
"# execution frame",
"usr_tb",
"=",
"get_user_codeframe",
"(",
"e",
"[",
"2",
"]",
")",
"or",
"None",
"print_exception",
"(",
"e",
"[",
"0",
"]",
",",
"e",
"[",
"1",
"]",
",",
"usr_tb",
",",
"None",
",",
"s",
")",
"post_command_event",
"(",
"self",
".",
"parent",
",",
"self",
".",
"MacroErrorMsg",
",",
"err",
"=",
"s",
".",
"getvalue",
"(",
")",
")",
"success",
"=",
"False",
"else",
":",
"self",
".",
"result_ctrl",
".",
"SetValue",
"(",
"''",
")",
"post_command_event",
"(",
"self",
".",
"parent",
",",
"self",
".",
"MacroReplaceMsg",
",",
"macros",
"=",
"self",
".",
"macros",
")",
"post_command_event",
"(",
"self",
".",
"parent",
",",
"self",
".",
"MacroExecuteMsg",
")",
"success",
"=",
"True",
"event",
".",
"Skip",
"(",
")",
"return",
"success"
] | Event handler for Apply button | [
"Event",
"handler",
"for",
"Apply",
"button"
] | python | train |
wndhydrnt/python-oauth2 | oauth2/grant.py | https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/grant.py#L56-L66 | def json_error_response(error, response, status_code=400):
"""
Formats an error as a response containing a JSON body.
"""
msg = {"error": error.error, "error_description": error.explanation}
response.status_code = status_code
response.add_header("Content-Type", "application/json")
response.body = json.dumps(msg)
return response | [
"def",
"json_error_response",
"(",
"error",
",",
"response",
",",
"status_code",
"=",
"400",
")",
":",
"msg",
"=",
"{",
"\"error\"",
":",
"error",
".",
"error",
",",
"\"error_description\"",
":",
"error",
".",
"explanation",
"}",
"response",
".",
"status_code",
"=",
"status_code",
"response",
".",
"add_header",
"(",
"\"Content-Type\"",
",",
"\"application/json\"",
")",
"response",
".",
"body",
"=",
"json",
".",
"dumps",
"(",
"msg",
")",
"return",
"response"
] | Formats an error as a response containing a JSON body. | [
"Formats",
"an",
"error",
"as",
"a",
"response",
"containing",
"a",
"JSON",
"body",
"."
] | python | train |
saltstack/salt | salt/utils/win_pdh.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L183-L191 | def add_to_query(self, query):
'''
Add the current path to the query
Args:
query (obj):
The handle to the query to add the counter
'''
self.handle = win32pdh.AddCounter(query, self.path) | [
"def",
"add_to_query",
"(",
"self",
",",
"query",
")",
":",
"self",
".",
"handle",
"=",
"win32pdh",
".",
"AddCounter",
"(",
"query",
",",
"self",
".",
"path",
")"
] | Add the current path to the query
Args:
query (obj):
The handle to the query to add the counter | [
"Add",
"the",
"current",
"path",
"to",
"the",
"query"
] | python | train |
fermiPy/fermipy | fermipy/irfs.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/irfs.py#L915-L932 | def calc_wtd_exp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=16):
"""Calculate the effective exposure.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
nbin : int
Number of points per decade with which to sample true energy.
"""
cnts = calc_counts_edisp(skydir, ltc, event_class, event_types,
egy_bins, cth_bins, fn, nbin=nbin)
flux = fn.flux(egy_bins[:-1], egy_bins[1:])
return cnts / flux[:, None] | [
"def",
"calc_wtd_exp",
"(",
"skydir",
",",
"ltc",
",",
"event_class",
",",
"event_types",
",",
"egy_bins",
",",
"cth_bins",
",",
"fn",
",",
"nbin",
"=",
"16",
")",
":",
"cnts",
"=",
"calc_counts_edisp",
"(",
"skydir",
",",
"ltc",
",",
"event_class",
",",
"event_types",
",",
"egy_bins",
",",
"cth_bins",
",",
"fn",
",",
"nbin",
"=",
"nbin",
")",
"flux",
"=",
"fn",
".",
"flux",
"(",
"egy_bins",
"[",
":",
"-",
"1",
"]",
",",
"egy_bins",
"[",
"1",
":",
"]",
")",
"return",
"cnts",
"/",
"flux",
"[",
":",
",",
"None",
"]"
] | Calculate the effective exposure.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ltc : `~fermipy.irfs.LTCube`
nbin : int
Number of points per decade with which to sample true energy. | [
"Calculate",
"the",
"effective",
"exposure",
"."
] | python | train |
bovee/Aston | aston/trace/math_chromatograms.py | https://github.com/bovee/Aston/blob/007630fdf074690373d03398fe818260d3d3cf5a/aston/trace/math_chromatograms.py#L6-L11 | def molmz(df, noise=10000):
"""
The mz of the molecular ion.
"""
d = ((df.values > noise) * df.columns).max(axis=1)
return Trace(d, df.index, name='molmz') | [
"def",
"molmz",
"(",
"df",
",",
"noise",
"=",
"10000",
")",
":",
"d",
"=",
"(",
"(",
"df",
".",
"values",
">",
"noise",
")",
"*",
"df",
".",
"columns",
")",
".",
"max",
"(",
"axis",
"=",
"1",
")",
"return",
"Trace",
"(",
"d",
",",
"df",
".",
"index",
",",
"name",
"=",
"'molmz'",
")"
] | The mz of the molecular ion. | [
"The",
"mz",
"of",
"the",
"molecular",
"ion",
"."
] | python | train |
nikhilkumarsingh/gnewsclient | gnewsclient/gnewsclient.py | https://github.com/nikhilkumarsingh/gnewsclient/blob/65422f1dee9408f1b51ae6a2ee08ae478432e1d5/gnewsclient/gnewsclient.py#L36-L51 | def params_dict(self):
"""
function to get params dict for HTTP request
"""
location_code = 'US'
language_code = 'en'
if len(self.location):
location_code = locationMap[process.extractOne(self.location, self.locations)[0]]
if len(self.language):
language_code = langMap[process.extractOne(self.language, self.languages)[0]]
params = {
'hl': language_code,
'gl': location_code,
'ceid': '{}:{}'.format(location_code, language_code)
}
return params | [
"def",
"params_dict",
"(",
"self",
")",
":",
"location_code",
"=",
"'US'",
"language_code",
"=",
"'en'",
"if",
"len",
"(",
"self",
".",
"location",
")",
":",
"location_code",
"=",
"locationMap",
"[",
"process",
".",
"extractOne",
"(",
"self",
".",
"location",
",",
"self",
".",
"locations",
")",
"[",
"0",
"]",
"]",
"if",
"len",
"(",
"self",
".",
"language",
")",
":",
"language_code",
"=",
"langMap",
"[",
"process",
".",
"extractOne",
"(",
"self",
".",
"language",
",",
"self",
".",
"languages",
")",
"[",
"0",
"]",
"]",
"params",
"=",
"{",
"'hl'",
":",
"language_code",
",",
"'gl'",
":",
"location_code",
",",
"'ceid'",
":",
"'{}:{}'",
".",
"format",
"(",
"location_code",
",",
"language_code",
")",
"}",
"return",
"params"
] | function to get params dict for HTTP request | [
"function",
"to",
"get",
"params",
"dict",
"for",
"HTTP",
"request"
] | python | train |
ejeschke/ginga | ginga/ImageView.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L1866-L1881 | def offset_to_window(self, off_x, off_y):
"""Convert data offset to window coordinates.
Parameters
----------
off_x, off_y : float or ndarray
Data offsets.
Returns
-------
coord : tuple
Offset in window coordinates in the form of ``(x, y)``.
"""
arr_pts = np.asarray((off_x, off_y)).T
return self.tform['cartesian_to_native'].to_(arr_pts).T[:2] | [
"def",
"offset_to_window",
"(",
"self",
",",
"off_x",
",",
"off_y",
")",
":",
"arr_pts",
"=",
"np",
".",
"asarray",
"(",
"(",
"off_x",
",",
"off_y",
")",
")",
".",
"T",
"return",
"self",
".",
"tform",
"[",
"'cartesian_to_native'",
"]",
".",
"to_",
"(",
"arr_pts",
")",
".",
"T",
"[",
":",
"2",
"]"
] | Convert data offset to window coordinates.
Parameters
----------
off_x, off_y : float or ndarray
Data offsets.
Returns
-------
coord : tuple
Offset in window coordinates in the form of ``(x, y)``. | [
"Convert",
"data",
"offset",
"to",
"window",
"coordinates",
"."
] | python | train |
titusjan/argos | argos/config/configitemdelegate.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/configitemdelegate.py#L92-L109 | def setModelData(self, editor, model, index):
""" Gets data from the editor widget and stores it in the specified model at the item index.
Does this by calling getEditorValue of the config tree item at the index.
:type editor: QWidget
:type model: ConfigTreeModel
:type index: QModelIndex
Reimplemented from QStyledItemDelegate.
"""
try:
data = editor.getData()
except InvalidInputError as ex:
logger.warn(ex)
else:
# The value is set via the model so that signals are emitted
logger.debug("ConfigItemDelegate.setModelData: {}".format(data))
model.setData(index, data, Qt.EditRole) | [
"def",
"setModelData",
"(",
"self",
",",
"editor",
",",
"model",
",",
"index",
")",
":",
"try",
":",
"data",
"=",
"editor",
".",
"getData",
"(",
")",
"except",
"InvalidInputError",
"as",
"ex",
":",
"logger",
".",
"warn",
"(",
"ex",
")",
"else",
":",
"# The value is set via the model so that signals are emitted",
"logger",
".",
"debug",
"(",
"\"ConfigItemDelegate.setModelData: {}\"",
".",
"format",
"(",
"data",
")",
")",
"model",
".",
"setData",
"(",
"index",
",",
"data",
",",
"Qt",
".",
"EditRole",
")"
] | Gets data from the editor widget and stores it in the specified model at the item index.
Does this by calling getEditorValue of the config tree item at the index.
:type editor: QWidget
:type model: ConfigTreeModel
:type index: QModelIndex
Reimplemented from QStyledItemDelegate. | [
"Gets",
"data",
"from",
"the",
"editor",
"widget",
"and",
"stores",
"it",
"in",
"the",
"specified",
"model",
"at",
"the",
"item",
"index",
".",
"Does",
"this",
"by",
"calling",
"getEditorValue",
"of",
"the",
"config",
"tree",
"item",
"at",
"the",
"index",
"."
] | python | train |
robotframework/Rammbock | src/Rammbock/rammbock.py | https://github.com/robotframework/Rammbock/blob/c906058d055a6f7c68fe1a6096d78c2e3f642b1c/src/Rammbock/rammbock.py#L164-L172 | def embed_seqdiag_sequence(self):
"""Create a message sequence diagram png file to output folder and embed the image to log file.
You need to have seqdiag installed to create the sequence diagram. See http://blockdiag.com/en/seqdiag/
"""
test_name = BuiltIn().replace_variables('${TEST NAME}')
outputdir = BuiltIn().replace_variables('${OUTPUTDIR}')
path = os.path.join(outputdir, test_name + '.seqdiag')
SeqdiagGenerator().compile(path, self._message_sequence) | [
"def",
"embed_seqdiag_sequence",
"(",
"self",
")",
":",
"test_name",
"=",
"BuiltIn",
"(",
")",
".",
"replace_variables",
"(",
"'${TEST NAME}'",
")",
"outputdir",
"=",
"BuiltIn",
"(",
")",
".",
"replace_variables",
"(",
"'${OUTPUTDIR}'",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outputdir",
",",
"test_name",
"+",
"'.seqdiag'",
")",
"SeqdiagGenerator",
"(",
")",
".",
"compile",
"(",
"path",
",",
"self",
".",
"_message_sequence",
")"
] | Create a message sequence diagram png file to output folder and embed the image to log file.
You need to have seqdiag installed to create the sequence diagram. See http://blockdiag.com/en/seqdiag/ | [
"Create",
"a",
"message",
"sequence",
"diagram",
"png",
"file",
"to",
"output",
"folder",
"and",
"embed",
"the",
"image",
"to",
"log",
"file",
"."
] | python | train |
log2timeline/plaso | plaso/cli/tools.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/tools.py#L180-L190 | def _PromptUserForInput(self, input_text):
"""Prompts user for an input.
Args:
input_text (str): text used for prompting the user for input.
Returns:
str: input read from the user.
"""
self._output_writer.Write('{0:s}: '.format(input_text))
return self._input_reader.Read() | [
"def",
"_PromptUserForInput",
"(",
"self",
",",
"input_text",
")",
":",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"'{0:s}: '",
".",
"format",
"(",
"input_text",
")",
")",
"return",
"self",
".",
"_input_reader",
".",
"Read",
"(",
")"
] | Prompts user for an input.
Args:
input_text (str): text used for prompting the user for input.
Returns:
str: input read from the user. | [
"Prompts",
"user",
"for",
"an",
"input",
"."
] | python | train |
tensorflow/cleverhans | examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/classification_results.py#L286-L292 | def init_from_datastore(self):
"""Initializes data by reading it from the datastore."""
self._data = {}
client = self._datastore_client
for entity in client.query_fetch(kind=KIND_CLASSIFICATION_BATCH):
class_batch_id = entity.key.flat_path[-1]
self.data[class_batch_id] = dict(entity) | [
"def",
"init_from_datastore",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"{",
"}",
"client",
"=",
"self",
".",
"_datastore_client",
"for",
"entity",
"in",
"client",
".",
"query_fetch",
"(",
"kind",
"=",
"KIND_CLASSIFICATION_BATCH",
")",
":",
"class_batch_id",
"=",
"entity",
".",
"key",
".",
"flat_path",
"[",
"-",
"1",
"]",
"self",
".",
"data",
"[",
"class_batch_id",
"]",
"=",
"dict",
"(",
"entity",
")"
] | Initializes data by reading it from the datastore. | [
"Initializes",
"data",
"by",
"reading",
"it",
"from",
"the",
"datastore",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/learning/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/objects.py#L877-L887 | def get_courses_metadata(self):
"""Gets the metadata for the courses.
return: (osid.Metadata) - metadata for the courses
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._mdata['courses'])
metadata.update({'existing_courses_values': self._my_map['courseIds']})
return Metadata(**metadata) | [
"def",
"get_courses_metadata",
"(",
"self",
")",
":",
"# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template",
"metadata",
"=",
"dict",
"(",
"self",
".",
"_mdata",
"[",
"'courses'",
"]",
")",
"metadata",
".",
"update",
"(",
"{",
"'existing_courses_values'",
":",
"self",
".",
"_my_map",
"[",
"'courseIds'",
"]",
"}",
")",
"return",
"Metadata",
"(",
"*",
"*",
"metadata",
")"
] | Gets the metadata for the courses.
return: (osid.Metadata) - metadata for the courses
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"metadata",
"for",
"the",
"courses",
"."
] | python | train |
Erotemic/utool | utool/util_cache.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L906-L910 | def get_global_shelf_fpath(appname='default', ensure=False):
""" Returns the filepath to the global shelf """
global_cache_dir = get_global_cache_dir(appname, ensure=ensure)
shelf_fpath = join(global_cache_dir, meta_util_constants.global_cache_fname)
return shelf_fpath | [
"def",
"get_global_shelf_fpath",
"(",
"appname",
"=",
"'default'",
",",
"ensure",
"=",
"False",
")",
":",
"global_cache_dir",
"=",
"get_global_cache_dir",
"(",
"appname",
",",
"ensure",
"=",
"ensure",
")",
"shelf_fpath",
"=",
"join",
"(",
"global_cache_dir",
",",
"meta_util_constants",
".",
"global_cache_fname",
")",
"return",
"shelf_fpath"
] | Returns the filepath to the global shelf | [
"Returns",
"the",
"filepath",
"to",
"the",
"global",
"shelf"
] | python | train |
hydpy-dev/hydpy | hydpy/cythons/modelutils.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/cythons/modelutils.py#L1142-L1168 | def cleanlines(self):
"""Cleaned code lines.
Implemented cleanups:
* eventually remove method version
* remove docstrings
* remove comments
* remove empty lines
* remove line brackes within brackets
* replace `modelutils` with nothing
* remove complete lines containing `fastaccess`
* replace shortcuts with complete references
"""
code = inspect.getsource(self.func)
code = '\n'.join(code.split('"""')[::2])
code = code.replace('modelutils.', '')
for (name, shortcut) in zip(self.collectornames,
self.collectorshortcuts):
code = code.replace('%s.' % shortcut, 'self.%s.' % name)
code = self.remove_linebreaks_within_equations(code)
lines = code.splitlines()
self.remove_imath_operators(lines)
lines[0] = 'def %s(self):' % self.funcname
lines = [l.split('#')[0] for l in lines]
lines = [l for l in lines if 'fastaccess' not in l]
lines = [l.rstrip() for l in lines if l.rstrip()]
return Lines(*lines) | [
"def",
"cleanlines",
"(",
"self",
")",
":",
"code",
"=",
"inspect",
".",
"getsource",
"(",
"self",
".",
"func",
")",
"code",
"=",
"'\\n'",
".",
"join",
"(",
"code",
".",
"split",
"(",
"'\"\"\"'",
")",
"[",
":",
":",
"2",
"]",
")",
"code",
"=",
"code",
".",
"replace",
"(",
"'modelutils.'",
",",
"''",
")",
"for",
"(",
"name",
",",
"shortcut",
")",
"in",
"zip",
"(",
"self",
".",
"collectornames",
",",
"self",
".",
"collectorshortcuts",
")",
":",
"code",
"=",
"code",
".",
"replace",
"(",
"'%s.'",
"%",
"shortcut",
",",
"'self.%s.'",
"%",
"name",
")",
"code",
"=",
"self",
".",
"remove_linebreaks_within_equations",
"(",
"code",
")",
"lines",
"=",
"code",
".",
"splitlines",
"(",
")",
"self",
".",
"remove_imath_operators",
"(",
"lines",
")",
"lines",
"[",
"0",
"]",
"=",
"'def %s(self):'",
"%",
"self",
".",
"funcname",
"lines",
"=",
"[",
"l",
".",
"split",
"(",
"'#'",
")",
"[",
"0",
"]",
"for",
"l",
"in",
"lines",
"]",
"lines",
"=",
"[",
"l",
"for",
"l",
"in",
"lines",
"if",
"'fastaccess'",
"not",
"in",
"l",
"]",
"lines",
"=",
"[",
"l",
".",
"rstrip",
"(",
")",
"for",
"l",
"in",
"lines",
"if",
"l",
".",
"rstrip",
"(",
")",
"]",
"return",
"Lines",
"(",
"*",
"lines",
")"
] | Cleaned code lines.
Implemented cleanups:
* eventually remove method version
* remove docstrings
* remove comments
* remove empty lines
* remove line brackes within brackets
* replace `modelutils` with nothing
* remove complete lines containing `fastaccess`
* replace shortcuts with complete references | [
"Cleaned",
"code",
"lines",
"."
] | python | train |
Danielhiversen/pyTibber | tibber/__init__.py | https://github.com/Danielhiversen/pyTibber/blob/114ebc3dd49f6affd93665b0862d4cbdea03e9ef/tibber/__init__.py#L493-L500 | def price_unit(self):
"""Return the price unit."""
currency = self.currency
consumption_unit = self.consumption_unit
if not currency or not consumption_unit:
_LOGGER.error("Could not find price_unit.")
return " "
return currency + "/" + consumption_unit | [
"def",
"price_unit",
"(",
"self",
")",
":",
"currency",
"=",
"self",
".",
"currency",
"consumption_unit",
"=",
"self",
".",
"consumption_unit",
"if",
"not",
"currency",
"or",
"not",
"consumption_unit",
":",
"_LOGGER",
".",
"error",
"(",
"\"Could not find price_unit.\"",
")",
"return",
"\" \"",
"return",
"currency",
"+",
"\"/\"",
"+",
"consumption_unit"
] | Return the price unit. | [
"Return",
"the",
"price",
"unit",
"."
] | python | valid |
saltstack/salt | salt/modules/boto_efs.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L340-L365 | def delete_tags(filesystemid,
tags,
keyid=None,
key=None,
profile=None,
region=None,
**kwargs):
'''
Deletes the specified tags from a file system.
filesystemid
(string) - ID of the file system for whose tags will be removed.
tags
(list[string]) - The tag keys to delete to the file system
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.delete_tags
'''
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
client.delete_tags(FileSystemId=filesystemid, Tags=tags) | [
"def",
"delete_tags",
"(",
"filesystemid",
",",
"tags",
",",
"keyid",
"=",
"None",
",",
"key",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"region",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"client",
"=",
"_get_conn",
"(",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"region",
"=",
"region",
")",
"client",
".",
"delete_tags",
"(",
"FileSystemId",
"=",
"filesystemid",
",",
"Tags",
"=",
"tags",
")"
] | Deletes the specified tags from a file system.
filesystemid
(string) - ID of the file system for whose tags will be removed.
tags
(list[string]) - The tag keys to delete to the file system
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.delete_tags | [
"Deletes",
"the",
"specified",
"tags",
"from",
"a",
"file",
"system",
"."
] | python | train |
IBMStreams/pypi.streamsx | streamsx/rest_primitives.py | https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L1859-L1875 | def create_application_configuration(self, name, properties, description=None):
"""Create an application configuration.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
.. versionadded 1.12
"""
if not hasattr(self, 'applicationConfigurations'):
raise NotImplementedError()
cv = ApplicationConfiguration._props(name, properties, description)
res = self.rest_client.session.post(self.applicationConfigurations,
headers = {'Accept' : 'application/json'},
json=cv)
_handle_http_errors(res)
return ApplicationConfiguration(res.json(), self.rest_client) | [
"def",
"create_application_configuration",
"(",
"self",
",",
"name",
",",
"properties",
",",
"description",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'applicationConfigurations'",
")",
":",
"raise",
"NotImplementedError",
"(",
")",
"cv",
"=",
"ApplicationConfiguration",
".",
"_props",
"(",
"name",
",",
"properties",
",",
"description",
")",
"res",
"=",
"self",
".",
"rest_client",
".",
"session",
".",
"post",
"(",
"self",
".",
"applicationConfigurations",
",",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/json'",
"}",
",",
"json",
"=",
"cv",
")",
"_handle_http_errors",
"(",
"res",
")",
"return",
"ApplicationConfiguration",
"(",
"res",
".",
"json",
"(",
")",
",",
"self",
".",
"rest_client",
")"
] | Create an application configuration.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
.. versionadded 1.12 | [
"Create",
"an",
"application",
"configuration",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xtreewidget/xtreewidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L321-L330 | def __setUserMinimumSize( self, section, oldSize, newSize ):
"""
Records the user minimum size for a column.
:param section | <int>
oldSize | <int>
newSize | <int>
"""
if self.isVisible():
self._columnMinimums[section] = newSize | [
"def",
"__setUserMinimumSize",
"(",
"self",
",",
"section",
",",
"oldSize",
",",
"newSize",
")",
":",
"if",
"self",
".",
"isVisible",
"(",
")",
":",
"self",
".",
"_columnMinimums",
"[",
"section",
"]",
"=",
"newSize"
] | Records the user minimum size for a column.
:param section | <int>
oldSize | <int>
newSize | <int> | [
"Records",
"the",
"user",
"minimum",
"size",
"for",
"a",
"column",
".",
":",
"param",
"section",
"|",
"<int",
">",
"oldSize",
"|",
"<int",
">",
"newSize",
"|",
"<int",
">"
] | python | train |
jtwhite79/pyemu | pyemu/utils/gw_utils.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/gw_utils.py#L2107-L2180 | def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values
"""
if ins_file is None:
ins_file = gage_output_file + '.ins'
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, 'r').readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split('.')[0]
else:
gage_num = re.sub("[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0])
# get the column names
cols = [i.lower() for i in header if 'data' in i.lower()][0].lower().replace('"', '').replace('data:', '').split()
# make sure "Flow" is included in the columns
if 'flow' not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == 'flow')[0][0]
# write out the instruction file lines
inslines = ['l1 ' + (flowidx + 1) * 'w ' + '!g{0}_{1:d}!'.format(gage_num, j)
for j in range(len(indat) - len(header))]
inslines[0] = inslines[0].replace('l1', 'l{0:d}'.format(len(header) + 1))
# write the instruction file
with open(ins_file, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('{0}\n'.format(line)) for line in inslines]
df = _try_run_inschek(ins_file, gage_output_file)
if df is not None:
return df, ins_file, gage_output_file
else:
print("Inschek didn't run so nothing returned")
return None | [
"def",
"modflow_sfr_gag_to_instruction_file",
"(",
"gage_output_file",
",",
"ins_file",
"=",
"None",
",",
"parse_filename",
"=",
"False",
")",
":",
"if",
"ins_file",
"is",
"None",
":",
"ins_file",
"=",
"gage_output_file",
"+",
"'.ins'",
"# navigate the file to be sure the header makes sense",
"indat",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"open",
"(",
"gage_output_file",
",",
"'r'",
")",
".",
"readlines",
"(",
")",
"]",
"header",
"=",
"[",
"i",
"for",
"i",
"in",
"indat",
"if",
"i",
".",
"startswith",
"(",
"'\"'",
")",
"]",
"# yank out the gage number to identify the observation names",
"if",
"parse_filename",
":",
"gage_num",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"gage_output_file",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"else",
":",
"gage_num",
"=",
"re",
".",
"sub",
"(",
"\"[^0-9]\"",
",",
"\"\"",
",",
"indat",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"split",
"(",
"\"gage no.\"",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"# get the column names",
"cols",
"=",
"[",
"i",
".",
"lower",
"(",
")",
"for",
"i",
"in",
"header",
"if",
"'data'",
"in",
"i",
".",
"lower",
"(",
")",
"]",
"[",
"0",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"replace",
"(",
"'data:'",
",",
"''",
")",
".",
"split",
"(",
")",
"# make sure \"Flow\" is included in the columns",
"if",
"'flow'",
"not",
"in",
"cols",
":",
"raise",
"Exception",
"(",
"'Requested field \"Flow\" not in gage output columns'",
")",
"# find which column is for \"Flow\"",
"flowidx",
"=",
"np",
".",
"where",
"(",
"np",
".",
"array",
"(",
"cols",
")",
"==",
"'flow'",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# write out the instruction file lines",
"inslines",
"=",
"[",
"'l1 '",
"+",
"(",
"flowidx",
"+",
"1",
")",
"*",
"'w '",
"+",
"'!g{0}_{1:d}!'",
".",
"format",
"(",
"gage_num",
",",
"j",
")",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"indat",
")",
"-",
"len",
"(",
"header",
")",
")",
"]",
"inslines",
"[",
"0",
"]",
"=",
"inslines",
"[",
"0",
"]",
".",
"replace",
"(",
"'l1'",
",",
"'l{0:d}'",
".",
"format",
"(",
"len",
"(",
"header",
")",
"+",
"1",
")",
")",
"# write the instruction file",
"with",
"open",
"(",
"ins_file",
",",
"'w'",
")",
"as",
"ofp",
":",
"ofp",
".",
"write",
"(",
"'pif ~\\n'",
")",
"[",
"ofp",
".",
"write",
"(",
"'{0}\\n'",
".",
"format",
"(",
"line",
")",
")",
"for",
"line",
"in",
"inslines",
"]",
"df",
"=",
"_try_run_inschek",
"(",
"ins_file",
",",
"gage_output_file",
")",
"if",
"df",
"is",
"not",
"None",
":",
"return",
"df",
",",
"ins_file",
",",
"gage_output_file",
"else",
":",
"print",
"(",
"\"Inschek didn't run so nothing returned\"",
")",
"return",
"None"
] | writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values | [
"writes",
"an",
"instruction",
"file",
"for",
"an",
"SFR",
"gage",
"output",
"file",
"to",
"read",
"Flow",
"only",
"at",
"all",
"times"
] | python | train |
gwastro/pycbc-glue | pycbc_glue/pipeline.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L1381-L1397 | def write_job(self,fh):
"""
Write the DAG entry for this node's job to the DAG file descriptor.
@param fh: descriptor of open DAG file.
"""
if isinstance(self.job(),CondorDAGManJob):
# create an external subdag from this dag
fh.write( ' '.join(
['SUBDAG EXTERNAL', self.__name, self.__job.get_sub_file()]) )
if self.job().get_dag_directory():
fh.write( ' DIR ' + self.job().get_dag_directory() )
else:
# write a regular condor job
fh.write( 'JOB ' + self.__name + ' ' + self.__job.get_sub_file() )
fh.write( '\n')
fh.write( 'RETRY ' + self.__name + ' ' + str(self.__retry) + '\n' ) | [
"def",
"write_job",
"(",
"self",
",",
"fh",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"job",
"(",
")",
",",
"CondorDAGManJob",
")",
":",
"# create an external subdag from this dag",
"fh",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"[",
"'SUBDAG EXTERNAL'",
",",
"self",
".",
"__name",
",",
"self",
".",
"__job",
".",
"get_sub_file",
"(",
")",
"]",
")",
")",
"if",
"self",
".",
"job",
"(",
")",
".",
"get_dag_directory",
"(",
")",
":",
"fh",
".",
"write",
"(",
"' DIR '",
"+",
"self",
".",
"job",
"(",
")",
".",
"get_dag_directory",
"(",
")",
")",
"else",
":",
"# write a regular condor job",
"fh",
".",
"write",
"(",
"'JOB '",
"+",
"self",
".",
"__name",
"+",
"' '",
"+",
"self",
".",
"__job",
".",
"get_sub_file",
"(",
")",
")",
"fh",
".",
"write",
"(",
"'\\n'",
")",
"fh",
".",
"write",
"(",
"'RETRY '",
"+",
"self",
".",
"__name",
"+",
"' '",
"+",
"str",
"(",
"self",
".",
"__retry",
")",
"+",
"'\\n'",
")"
] | Write the DAG entry for this node's job to the DAG file descriptor.
@param fh: descriptor of open DAG file. | [
"Write",
"the",
"DAG",
"entry",
"for",
"this",
"node",
"s",
"job",
"to",
"the",
"DAG",
"file",
"descriptor",
"."
] | python | train |
shaypal5/strct | strct/sortedlists/sortedlist.py | https://github.com/shaypal5/strct/blob/f3a301692d052ddb79331230b3c00625db1d83fc/strct/sortedlists/sortedlist.py#L110-L151 | def find_range_in_section_list(start, end, section_list):
"""Returns all sections belonging to the given range.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31]. As such, this function
will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30).
Parameters
---------
start : float
The start of the desired range.
end : float
The end of the desired range.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
iterable
The starting points of all sections belonging to the given range.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_range_in_section_list(3, 4, seclist)
[]
>>> find_range_in_section_list(6, 7, seclist)
[5]
>>> find_range_in_section_list(7, 9, seclist)
[5, 8]
>>> find_range_in_section_list(7, 30, seclist)
[5, 8, 30]
>>> find_range_in_section_list(7, 321, seclist)
[5, 8, 30]
>>> find_range_in_section_list(4, 321, seclist)
[5, 8, 30]
"""
ind = find_range_ix_in_section_list(start, end, section_list)
return section_list[ind[0]: ind[1]] | [
"def",
"find_range_in_section_list",
"(",
"start",
",",
"end",
",",
"section_list",
")",
":",
"ind",
"=",
"find_range_ix_in_section_list",
"(",
"start",
",",
"end",
",",
"section_list",
")",
"return",
"section_list",
"[",
"ind",
"[",
"0",
"]",
":",
"ind",
"[",
"1",
"]",
"]"
] | Returns all sections belonging to the given range.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31]. As such, this function
will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30).
Parameters
---------
start : float
The start of the desired range.
end : float
The end of the desired range.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
iterable
The starting points of all sections belonging to the given range.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_range_in_section_list(3, 4, seclist)
[]
>>> find_range_in_section_list(6, 7, seclist)
[5]
>>> find_range_in_section_list(7, 9, seclist)
[5, 8]
>>> find_range_in_section_list(7, 30, seclist)
[5, 8, 30]
>>> find_range_in_section_list(7, 321, seclist)
[5, 8, 30]
>>> find_range_in_section_list(4, 321, seclist)
[5, 8, 30] | [
"Returns",
"all",
"sections",
"belonging",
"to",
"the",
"given",
"range",
"."
] | python | train |
swimlane/swimlane-python | swimlane/core/fields/usergroup.py | https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/fields/usergroup.py#L105-L110 | def set_swimlane(self, value):
"""Workaround for reports returning an empty usergroup field as a single element list with no id/name"""
if value == [{"$type": "Core.Models.Utilities.UserGroupSelection, Core"}]:
value = []
return super(UserGroupField, self).set_swimlane(value) | [
"def",
"set_swimlane",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"==",
"[",
"{",
"\"$type\"",
":",
"\"Core.Models.Utilities.UserGroupSelection, Core\"",
"}",
"]",
":",
"value",
"=",
"[",
"]",
"return",
"super",
"(",
"UserGroupField",
",",
"self",
")",
".",
"set_swimlane",
"(",
"value",
")"
] | Workaround for reports returning an empty usergroup field as a single element list with no id/name | [
"Workaround",
"for",
"reports",
"returning",
"an",
"empty",
"usergroup",
"field",
"as",
"a",
"single",
"element",
"list",
"with",
"no",
"id",
"/",
"name"
] | python | train |
spacetelescope/pysynphot | pysynphot/observation.py | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/observation.py#L26-L78 | def check_overlap(a, b):
"""Check for wavelength overlap between two spectra.
.. note::
Generalized from
:meth:`pysynphot.spectrum.SpectralElement.check_overlap`.
Parameters
----------
a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Typically a source spectrum, spectral element, observation,
or bandpass from observation mode.
Returns
-------
result : {'full', 'partial', 'none'}
Full, partial, or no overlap.
Raises
------
AttributeError
Given spectrum does not have flux or throughput.
"""
if a.isAnalytic or b.isAnalytic:
#then it's defined everywhere
result = 'full'
else:
#get the wavelength arrays
waves = list()
for x in (a, b):
if hasattr(x,'throughput'):
wv = x.wave[np.where(x.throughput != 0)]
elif hasattr(x,'flux'):
wv = x.wave
else:
raise AttributeError("neither flux nor throughput in %s"%x)
waves.append(wv)
#get the endpoints
a1,a2 = waves[0].min(), waves[0].max()
b1,b2 = waves[1].min(), waves[1].max()
#do the comparison
if (a1>=b1 and a2<=b2):
result = 'full'
elif (a2<b1) or (b2<a1):
result = 'none'
else:
result = 'partial'
return result | [
"def",
"check_overlap",
"(",
"a",
",",
"b",
")",
":",
"if",
"a",
".",
"isAnalytic",
"or",
"b",
".",
"isAnalytic",
":",
"#then it's defined everywhere",
"result",
"=",
"'full'",
"else",
":",
"#get the wavelength arrays",
"waves",
"=",
"list",
"(",
")",
"for",
"x",
"in",
"(",
"a",
",",
"b",
")",
":",
"if",
"hasattr",
"(",
"x",
",",
"'throughput'",
")",
":",
"wv",
"=",
"x",
".",
"wave",
"[",
"np",
".",
"where",
"(",
"x",
".",
"throughput",
"!=",
"0",
")",
"]",
"elif",
"hasattr",
"(",
"x",
",",
"'flux'",
")",
":",
"wv",
"=",
"x",
".",
"wave",
"else",
":",
"raise",
"AttributeError",
"(",
"\"neither flux nor throughput in %s\"",
"%",
"x",
")",
"waves",
".",
"append",
"(",
"wv",
")",
"#get the endpoints",
"a1",
",",
"a2",
"=",
"waves",
"[",
"0",
"]",
".",
"min",
"(",
")",
",",
"waves",
"[",
"0",
"]",
".",
"max",
"(",
")",
"b1",
",",
"b2",
"=",
"waves",
"[",
"1",
"]",
".",
"min",
"(",
")",
",",
"waves",
"[",
"1",
"]",
".",
"max",
"(",
")",
"#do the comparison",
"if",
"(",
"a1",
">=",
"b1",
"and",
"a2",
"<=",
"b2",
")",
":",
"result",
"=",
"'full'",
"elif",
"(",
"a2",
"<",
"b1",
")",
"or",
"(",
"b2",
"<",
"a1",
")",
":",
"result",
"=",
"'none'",
"else",
":",
"result",
"=",
"'partial'",
"return",
"result"
] | Check for wavelength overlap between two spectra.
.. note::
Generalized from
:meth:`pysynphot.spectrum.SpectralElement.check_overlap`.
Parameters
----------
a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Typically a source spectrum, spectral element, observation,
or bandpass from observation mode.
Returns
-------
result : {'full', 'partial', 'none'}
Full, partial, or no overlap.
Raises
------
AttributeError
Given spectrum does not have flux or throughput. | [
"Check",
"for",
"wavelength",
"overlap",
"between",
"two",
"spectra",
"."
] | python | train |
google/transitfeed | misc/import_ch_zurich.py | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/misc/import_ch_zurich.py#L387-L400 | def Write(self, outpath):
"Writes a .zip file in Google Transit format."
out = zipfile.ZipFile(outpath, mode="w", compression=zipfile.ZIP_DEFLATED)
for filename, func in [('agency.txt', self.WriteAgency),
('calendar.txt', self.WriteCalendar),
('calendar_dates.txt', self.WriteCalendarDates),
('routes.txt', self.WriteRoutes),
('trips.txt', self.WriteTrips),
('stops.txt', self.WriteStations),
('stop_times.txt', self.WriteStopTimes)]:
s = cStringIO.StringIO()
func(s)
out.writestr(filename, s.getvalue())
out.close() | [
"def",
"Write",
"(",
"self",
",",
"outpath",
")",
":",
"out",
"=",
"zipfile",
".",
"ZipFile",
"(",
"outpath",
",",
"mode",
"=",
"\"w\"",
",",
"compression",
"=",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"for",
"filename",
",",
"func",
"in",
"[",
"(",
"'agency.txt'",
",",
"self",
".",
"WriteAgency",
")",
",",
"(",
"'calendar.txt'",
",",
"self",
".",
"WriteCalendar",
")",
",",
"(",
"'calendar_dates.txt'",
",",
"self",
".",
"WriteCalendarDates",
")",
",",
"(",
"'routes.txt'",
",",
"self",
".",
"WriteRoutes",
")",
",",
"(",
"'trips.txt'",
",",
"self",
".",
"WriteTrips",
")",
",",
"(",
"'stops.txt'",
",",
"self",
".",
"WriteStations",
")",
",",
"(",
"'stop_times.txt'",
",",
"self",
".",
"WriteStopTimes",
")",
"]",
":",
"s",
"=",
"cStringIO",
".",
"StringIO",
"(",
")",
"func",
"(",
"s",
")",
"out",
".",
"writestr",
"(",
"filename",
",",
"s",
".",
"getvalue",
"(",
")",
")",
"out",
".",
"close",
"(",
")"
] | Writes a .zip file in Google Transit format. | [
"Writes",
"a",
".",
"zip",
"file",
"in",
"Google",
"Transit",
"format",
"."
] | python | train |
pyviz/param | param/parameterized.py | https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/param/parameterized.py#L1453-L1475 | def get_param_values(self_,onlychanged=False):
"""
Return a list of name,value pairs for all Parameters of this
object.
When called on an instance with onlychanged set to True, will
only return values that are not equal to the default value
(onlychanged has no effect when called on a class).
"""
self_or_cls = self_.self_or_cls
# CEB: we'd actually like to know whether a value has been
# explicitly set on the instance, but I'm not sure that's easy
# (would need to distinguish instantiation of default from
# user setting of value).
vals = []
for name,val in self_or_cls.param.objects('existing').items():
value = self_or_cls.param.get_value_generator(name)
# (this is pointless for cls)
if not onlychanged or not all_equal(value,val.default):
vals.append((name,value))
vals.sort(key=itemgetter(0))
return vals | [
"def",
"get_param_values",
"(",
"self_",
",",
"onlychanged",
"=",
"False",
")",
":",
"self_or_cls",
"=",
"self_",
".",
"self_or_cls",
"# CEB: we'd actually like to know whether a value has been",
"# explicitly set on the instance, but I'm not sure that's easy",
"# (would need to distinguish instantiation of default from",
"# user setting of value).",
"vals",
"=",
"[",
"]",
"for",
"name",
",",
"val",
"in",
"self_or_cls",
".",
"param",
".",
"objects",
"(",
"'existing'",
")",
".",
"items",
"(",
")",
":",
"value",
"=",
"self_or_cls",
".",
"param",
".",
"get_value_generator",
"(",
"name",
")",
"# (this is pointless for cls)",
"if",
"not",
"onlychanged",
"or",
"not",
"all_equal",
"(",
"value",
",",
"val",
".",
"default",
")",
":",
"vals",
".",
"append",
"(",
"(",
"name",
",",
"value",
")",
")",
"vals",
".",
"sort",
"(",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
"return",
"vals"
] | Return a list of name,value pairs for all Parameters of this
object.
When called on an instance with onlychanged set to True, will
only return values that are not equal to the default value
(onlychanged has no effect when called on a class). | [
"Return",
"a",
"list",
"of",
"name",
"value",
"pairs",
"for",
"all",
"Parameters",
"of",
"this",
"object",
"."
] | python | train |
dh1tw/pyhamtools | pyhamtools/lookuplib.py | https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/lookuplib.py#L1084-L1119 | def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
"""
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return | [
"def",
"_extract_clublog_header",
"(",
"self",
",",
"cty_xml_filename",
")",
":",
"cty_header",
"=",
"{",
"}",
"try",
":",
"with",
"open",
"(",
"cty_xml_filename",
",",
"\"r\"",
")",
"as",
"cty",
":",
"raw_header",
"=",
"cty",
".",
"readline",
"(",
")",
"cty_date",
"=",
"re",
".",
"search",
"(",
"\"date='.+'\"",
",",
"raw_header",
")",
"if",
"cty_date",
":",
"cty_date",
"=",
"cty_date",
".",
"group",
"(",
"0",
")",
".",
"replace",
"(",
"\"date=\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"cty_date",
"=",
"datetime",
".",
"strptime",
"(",
"cty_date",
"[",
":",
"19",
"]",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"cty_date",
".",
"replace",
"(",
"tzinfo",
"=",
"UTC",
")",
"cty_header",
"[",
"\"Date\"",
"]",
"=",
"cty_date",
"cty_ns",
"=",
"re",
".",
"search",
"(",
"\"xmlns='.+[']\"",
",",
"raw_header",
")",
"if",
"cty_ns",
":",
"cty_ns",
"=",
"cty_ns",
".",
"group",
"(",
"0",
")",
".",
"replace",
"(",
"\"xmlns=\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"cty_header",
"[",
"'NameSpace'",
"]",
"=",
"cty_ns",
"if",
"len",
"(",
"cty_header",
")",
"==",
"2",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Header successfully retrieved from CTY File\"",
")",
"elif",
"len",
"(",
"cty_header",
")",
"<",
"2",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Header could only be partically retrieved from CTY File\"",
")",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Content of Header: \"",
")",
"for",
"key",
"in",
"cty_header",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"str",
"(",
"key",
")",
"+",
"\": \"",
"+",
"str",
"(",
"cty_header",
"[",
"key",
"]",
")",
")",
"return",
"cty_header",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Clublog CTY File could not be opened / modified\"",
")",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Error Message: \"",
"+",
"str",
"(",
"e",
")",
")",
"return"
] | Extract the header of the Clublog XML File | [
"Extract",
"the",
"header",
"of",
"the",
"Clublog",
"XML",
"File"
] | python | train |
jamescooke/flake8-aaa | src/flake8_aaa/helpers.py | https://github.com/jamescooke/flake8-aaa/blob/29938b96845fe32ced4358ba66af3b3be2a37794/src/flake8_aaa/helpers.py#L170-L178 | def filter_arrange_nodes(nodes: List[ast.stmt], max_line_number: int) -> List[ast.stmt]:
"""
Finds all nodes that are before the ``max_line_number`` and are not
docstrings or ``pass``.
"""
return [
node for node in nodes if node.lineno < max_line_number and not isinstance(node, ast.Pass)
and not (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str))
] | [
"def",
"filter_arrange_nodes",
"(",
"nodes",
":",
"List",
"[",
"ast",
".",
"stmt",
"]",
",",
"max_line_number",
":",
"int",
")",
"->",
"List",
"[",
"ast",
".",
"stmt",
"]",
":",
"return",
"[",
"node",
"for",
"node",
"in",
"nodes",
"if",
"node",
".",
"lineno",
"<",
"max_line_number",
"and",
"not",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Pass",
")",
"and",
"not",
"(",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Expr",
")",
"and",
"isinstance",
"(",
"node",
".",
"value",
",",
"ast",
".",
"Str",
")",
")",
"]"
] | Finds all nodes that are before the ``max_line_number`` and are not
docstrings or ``pass``. | [
"Finds",
"all",
"nodes",
"that",
"are",
"before",
"the",
"max_line_number",
"and",
"are",
"not",
"docstrings",
"or",
"pass",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L1950-L1970 | def share_vm_image(self, vm_image_name, permission):
'''
Share an already replicated OS image. This operation is only for
publishers. You have to be registered as image publisher with Windows
Azure to be able to call this.
vm_image_name:
The name of the virtual machine image to share
permission:
The sharing permission: public, msdn, or private
'''
_validate_not_none('vm_image_name', vm_image_name)
_validate_not_none('permission', permission)
path = self._get_sharing_path_using_vm_image_name(vm_image_name)
query = '&permission=' + permission
path = path + '?' + query.lstrip('&')
return self._perform_put(
path, None, as_async=True, x_ms_version='2015-04-01'
) | [
"def",
"share_vm_image",
"(",
"self",
",",
"vm_image_name",
",",
"permission",
")",
":",
"_validate_not_none",
"(",
"'vm_image_name'",
",",
"vm_image_name",
")",
"_validate_not_none",
"(",
"'permission'",
",",
"permission",
")",
"path",
"=",
"self",
".",
"_get_sharing_path_using_vm_image_name",
"(",
"vm_image_name",
")",
"query",
"=",
"'&permission='",
"+",
"permission",
"path",
"=",
"path",
"+",
"'?'",
"+",
"query",
".",
"lstrip",
"(",
"'&'",
")",
"return",
"self",
".",
"_perform_put",
"(",
"path",
",",
"None",
",",
"as_async",
"=",
"True",
",",
"x_ms_version",
"=",
"'2015-04-01'",
")"
] | Share an already replicated OS image. This operation is only for
publishers. You have to be registered as image publisher with Windows
Azure to be able to call this.
vm_image_name:
The name of the virtual machine image to share
permission:
The sharing permission: public, msdn, or private | [
"Share",
"an",
"already",
"replicated",
"OS",
"image",
".",
"This",
"operation",
"is",
"only",
"for",
"publishers",
".",
"You",
"have",
"to",
"be",
"registered",
"as",
"image",
"publisher",
"with",
"Windows",
"Azure",
"to",
"be",
"able",
"to",
"call",
"this",
"."
] | python | test |
mongodb/mongo-python-driver | pymongo/client_session.py | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/client_session.py#L382-L502 | def with_transaction(self, callback, read_concern=None, write_concern=None,
read_preference=None):
"""Execute a callback in a transaction.
This method starts a transaction on this session, executes ``callback``
once, and then commits the transaction. For example::
def callback(session):
orders = session.client.db.orders
inventory = session.client.db.inventory
orders.insert_one({"sku": "abc123", "qty": 100}, session=session)
inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}},
{"$inc": {"qty": -100}}, session=session)
with client.start_session() as session:
session.with_transaction(callback)
To pass arbitrary arguments to the ``callback``, wrap your callable
with a ``lambda`` like this::
def callback(session, custom_arg, custom_kwarg=None):
# Transaction operations...
with client.start_session() as session:
session.with_transaction(
lambda s: callback(s, "custom_arg", custom_kwarg=1))
In the event of an exception, ``with_transaction`` may retry the commit
or the entire transaction, therefore ``callback`` may be invoked
multiple times by a single call to ``with_transaction``. Developers
should be mindful of this possiblity when writing a ``callback`` that
modifies application state or has any other side-effects.
Note that even when the ``callback`` is invoked multiple times,
``with_transaction`` ensures that the transaction will be committed
at-most-once on the server.
The ``callback`` should not attempt to start new transactions, but
should simply run operations meant to be contained within a
transaction. The ``callback`` should also not commit the transaction;
this is handled automatically by ``with_transaction``. If the
``callback`` does commit or abort the transaction without error,
however, ``with_transaction`` will return without taking further
action.
When ``callback`` raises an exception, ``with_transaction``
automatically aborts the current transaction. When ``callback`` or
:meth:`~ClientSession.commit_transaction` raises an exception that
includes the ``"TransientTransactionError"`` error label,
``with_transaction`` starts a new transaction and re-executes
the ``callback``.
When :meth:`~ClientSession.commit_transaction` raises an exception with
the ``"UnknownTransactionCommitResult"`` error label,
``with_transaction`` retries the commit until the result of the
transaction is known.
This method will cease retrying after 120 seconds has elapsed. This
timeout is not configurable and any exception raised by the
``callback`` or by :meth:`ClientSession.commit_transaction` after the
timeout is reached will be re-raised. Applications that desire a
different timeout duration should not use this method.
:Parameters:
- `callback`: The callable ``callback`` to run inside a transaction.
The callable must accept a single argument, this session. Note,
under certain error conditions the callback may be run multiple
times.
- `read_concern` (optional): The
:class:`~pymongo.read_concern.ReadConcern` to use for this
transaction.
- `write_concern` (optional): The
:class:`~pymongo.write_concern.WriteConcern` to use for this
transaction.
- `read_preference` (optional): The read preference to use for this
transaction. If ``None`` (the default) the :attr:`read_preference`
of this :class:`Database` is used. See
:mod:`~pymongo.read_preferences` for options.
:Returns:
The return value of the ``callback``.
.. versionadded:: 3.9
"""
start_time = monotonic.time()
while True:
self.start_transaction(
read_concern, write_concern, read_preference)
try:
ret = callback(self)
except Exception as exc:
if self._in_transaction:
self.abort_transaction()
if (isinstance(exc, PyMongoError) and
exc.has_error_label("TransientTransactionError") and
_within_time_limit(start_time)):
# Retry the entire transaction.
continue
raise
if self._transaction.state in (
_TxnState.NONE, _TxnState.COMMITTED, _TxnState.ABORTED):
# Assume callback intentionally ended the transaction.
return ret
while True:
try:
self.commit_transaction()
except PyMongoError as exc:
if (exc.has_error_label("UnknownTransactionCommitResult")
and _within_time_limit(start_time)):
# Retry the commit.
continue
if (exc.has_error_label("TransientTransactionError") and
_within_time_limit(start_time)):
# Retry the entire transaction.
break
raise
# Commit succeeded.
return ret | [
"def",
"with_transaction",
"(",
"self",
",",
"callback",
",",
"read_concern",
"=",
"None",
",",
"write_concern",
"=",
"None",
",",
"read_preference",
"=",
"None",
")",
":",
"start_time",
"=",
"monotonic",
".",
"time",
"(",
")",
"while",
"True",
":",
"self",
".",
"start_transaction",
"(",
"read_concern",
",",
"write_concern",
",",
"read_preference",
")",
"try",
":",
"ret",
"=",
"callback",
"(",
"self",
")",
"except",
"Exception",
"as",
"exc",
":",
"if",
"self",
".",
"_in_transaction",
":",
"self",
".",
"abort_transaction",
"(",
")",
"if",
"(",
"isinstance",
"(",
"exc",
",",
"PyMongoError",
")",
"and",
"exc",
".",
"has_error_label",
"(",
"\"TransientTransactionError\"",
")",
"and",
"_within_time_limit",
"(",
"start_time",
")",
")",
":",
"# Retry the entire transaction.",
"continue",
"raise",
"if",
"self",
".",
"_transaction",
".",
"state",
"in",
"(",
"_TxnState",
".",
"NONE",
",",
"_TxnState",
".",
"COMMITTED",
",",
"_TxnState",
".",
"ABORTED",
")",
":",
"# Assume callback intentionally ended the transaction.",
"return",
"ret",
"while",
"True",
":",
"try",
":",
"self",
".",
"commit_transaction",
"(",
")",
"except",
"PyMongoError",
"as",
"exc",
":",
"if",
"(",
"exc",
".",
"has_error_label",
"(",
"\"UnknownTransactionCommitResult\"",
")",
"and",
"_within_time_limit",
"(",
"start_time",
")",
")",
":",
"# Retry the commit.",
"continue",
"if",
"(",
"exc",
".",
"has_error_label",
"(",
"\"TransientTransactionError\"",
")",
"and",
"_within_time_limit",
"(",
"start_time",
")",
")",
":",
"# Retry the entire transaction.",
"break",
"raise",
"# Commit succeeded.",
"return",
"ret"
] | Execute a callback in a transaction.
This method starts a transaction on this session, executes ``callback``
once, and then commits the transaction. For example::
def callback(session):
orders = session.client.db.orders
inventory = session.client.db.inventory
orders.insert_one({"sku": "abc123", "qty": 100}, session=session)
inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}},
{"$inc": {"qty": -100}}, session=session)
with client.start_session() as session:
session.with_transaction(callback)
To pass arbitrary arguments to the ``callback``, wrap your callable
with a ``lambda`` like this::
def callback(session, custom_arg, custom_kwarg=None):
# Transaction operations...
with client.start_session() as session:
session.with_transaction(
lambda s: callback(s, "custom_arg", custom_kwarg=1))
In the event of an exception, ``with_transaction`` may retry the commit
or the entire transaction, therefore ``callback`` may be invoked
multiple times by a single call to ``with_transaction``. Developers
should be mindful of this possiblity when writing a ``callback`` that
modifies application state or has any other side-effects.
Note that even when the ``callback`` is invoked multiple times,
``with_transaction`` ensures that the transaction will be committed
at-most-once on the server.
The ``callback`` should not attempt to start new transactions, but
should simply run operations meant to be contained within a
transaction. The ``callback`` should also not commit the transaction;
this is handled automatically by ``with_transaction``. If the
``callback`` does commit or abort the transaction without error,
however, ``with_transaction`` will return without taking further
action.
When ``callback`` raises an exception, ``with_transaction``
automatically aborts the current transaction. When ``callback`` or
:meth:`~ClientSession.commit_transaction` raises an exception that
includes the ``"TransientTransactionError"`` error label,
``with_transaction`` starts a new transaction and re-executes
the ``callback``.
When :meth:`~ClientSession.commit_transaction` raises an exception with
the ``"UnknownTransactionCommitResult"`` error label,
``with_transaction`` retries the commit until the result of the
transaction is known.
This method will cease retrying after 120 seconds has elapsed. This
timeout is not configurable and any exception raised by the
``callback`` or by :meth:`ClientSession.commit_transaction` after the
timeout is reached will be re-raised. Applications that desire a
different timeout duration should not use this method.
:Parameters:
- `callback`: The callable ``callback`` to run inside a transaction.
The callable must accept a single argument, this session. Note,
under certain error conditions the callback may be run multiple
times.
- `read_concern` (optional): The
:class:`~pymongo.read_concern.ReadConcern` to use for this
transaction.
- `write_concern` (optional): The
:class:`~pymongo.write_concern.WriteConcern` to use for this
transaction.
- `read_preference` (optional): The read preference to use for this
transaction. If ``None`` (the default) the :attr:`read_preference`
of this :class:`Database` is used. See
:mod:`~pymongo.read_preferences` for options.
:Returns:
The return value of the ``callback``.
.. versionadded:: 3.9 | [
"Execute",
"a",
"callback",
"in",
"a",
"transaction",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/utils/geometry.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/utils/geometry.py#L70-L89 | def point_in_triangle(p, v1, v2, v3):
"""Checks whether a point is within the given triangle
The function checks, whether the given point p is within the triangle defined by the the three corner point v1,
v2 and v3.
This is done by checking whether the point is on all three half-planes defined by the three edges of the triangle.
:param p: The point to be checked (tuple with x any y coordinate)
:param v1: First vertex of the triangle (tuple with x any y coordinate)
:param v2: Second vertex of the triangle (tuple with x any y coordinate)
:param v3: Third vertex of the triangle (tuple with x any y coordinate)
:return: True if the point is within the triangle, False if not
"""
def _test(p1, p2, p3):
return (p1[0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[1] - p3[1])
b1 = _test(p, v1, v2) < 0.0
b2 = _test(p, v2, v3) < 0.0
b3 = _test(p, v3, v1) < 0.0
return (b1 == b2) and (b2 == b3) | [
"def",
"point_in_triangle",
"(",
"p",
",",
"v1",
",",
"v2",
",",
"v3",
")",
":",
"def",
"_test",
"(",
"p1",
",",
"p2",
",",
"p3",
")",
":",
"return",
"(",
"p1",
"[",
"0",
"]",
"-",
"p3",
"[",
"0",
"]",
")",
"*",
"(",
"p2",
"[",
"1",
"]",
"-",
"p3",
"[",
"1",
"]",
")",
"-",
"(",
"p2",
"[",
"0",
"]",
"-",
"p3",
"[",
"0",
"]",
")",
"*",
"(",
"p1",
"[",
"1",
"]",
"-",
"p3",
"[",
"1",
"]",
")",
"b1",
"=",
"_test",
"(",
"p",
",",
"v1",
",",
"v2",
")",
"<",
"0.0",
"b2",
"=",
"_test",
"(",
"p",
",",
"v2",
",",
"v3",
")",
"<",
"0.0",
"b3",
"=",
"_test",
"(",
"p",
",",
"v3",
",",
"v1",
")",
"<",
"0.0",
"return",
"(",
"b1",
"==",
"b2",
")",
"and",
"(",
"b2",
"==",
"b3",
")"
] | Checks whether a point is within the given triangle
The function checks, whether the given point p is within the triangle defined by the the three corner point v1,
v2 and v3.
This is done by checking whether the point is on all three half-planes defined by the three edges of the triangle.
:param p: The point to be checked (tuple with x any y coordinate)
:param v1: First vertex of the triangle (tuple with x any y coordinate)
:param v2: Second vertex of the triangle (tuple with x any y coordinate)
:param v3: Third vertex of the triangle (tuple with x any y coordinate)
:return: True if the point is within the triangle, False if not | [
"Checks",
"whether",
"a",
"point",
"is",
"within",
"the",
"given",
"triangle"
] | python | train |
google/grr | grr/server/grr_response_server/signed_binary_utils.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/signed_binary_utils.py#L113-L149 | def WriteSignedBinaryBlobs(binary_urn,
blobs,
token = None):
"""Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
"""
if _ShouldUseLegacyDatastore():
aff4.FACTORY.Delete(binary_urn, token=token)
with data_store.DB.GetMutationPool() as mutation_pool:
with aff4.FACTORY.Create(
binary_urn,
collects.GRRSignedBlob,
mode="w",
mutation_pool=mutation_pool,
token=token) as fd:
for blob in blobs:
fd.Add(blob, mutation_pool=mutation_pool)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
current_offset = 0
for blob in blobs:
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob.SerializeToString())
blob_references.items.Append(
rdf_objects.BlobReference(
offset=current_offset, size=len(blob.data), blob_id=blob_id))
current_offset += len(blob.data)
data_store.REL_DB.WriteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn), blob_references) | [
"def",
"WriteSignedBinaryBlobs",
"(",
"binary_urn",
",",
"blobs",
",",
"token",
"=",
"None",
")",
":",
"if",
"_ShouldUseLegacyDatastore",
"(",
")",
":",
"aff4",
".",
"FACTORY",
".",
"Delete",
"(",
"binary_urn",
",",
"token",
"=",
"token",
")",
"with",
"data_store",
".",
"DB",
".",
"GetMutationPool",
"(",
")",
"as",
"mutation_pool",
":",
"with",
"aff4",
".",
"FACTORY",
".",
"Create",
"(",
"binary_urn",
",",
"collects",
".",
"GRRSignedBlob",
",",
"mode",
"=",
"\"w\"",
",",
"mutation_pool",
"=",
"mutation_pool",
",",
"token",
"=",
"token",
")",
"as",
"fd",
":",
"for",
"blob",
"in",
"blobs",
":",
"fd",
".",
"Add",
"(",
"blob",
",",
"mutation_pool",
"=",
"mutation_pool",
")",
"if",
"data_store",
".",
"RelationalDBEnabled",
"(",
")",
":",
"blob_references",
"=",
"rdf_objects",
".",
"BlobReferences",
"(",
")",
"current_offset",
"=",
"0",
"for",
"blob",
"in",
"blobs",
":",
"blob_id",
"=",
"data_store",
".",
"BLOBS",
".",
"WriteBlobWithUnknownHash",
"(",
"blob",
".",
"SerializeToString",
"(",
")",
")",
"blob_references",
".",
"items",
".",
"Append",
"(",
"rdf_objects",
".",
"BlobReference",
"(",
"offset",
"=",
"current_offset",
",",
"size",
"=",
"len",
"(",
"blob",
".",
"data",
")",
",",
"blob_id",
"=",
"blob_id",
")",
")",
"current_offset",
"+=",
"len",
"(",
"blob",
".",
"data",
")",
"data_store",
".",
"REL_DB",
".",
"WriteSignedBinaryReferences",
"(",
"_SignedBinaryIDFromURN",
"(",
"binary_urn",
")",
",",
"blob_references",
")"
] | Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with the legacy (non-relational) datastore. | [
"Saves",
"signed",
"blobs",
"to",
"the",
"datastore",
"."
] | python | train |
slimkrazy/python-google-places | googleplaces/__init__.py | https://github.com/slimkrazy/python-google-places/blob/d4b7363e1655cdc091a6253379f6d2a95b827881/googleplaces/__init__.py#L922-L930 | def vicinity(self):
"""Returns a feature name of a nearby location.
Often this feature refers to a street or neighborhood within the given
results.
"""
if self._vicinity == '' and self.details != None and 'vicinity' in self.details:
self._vicinity = self.details['vicinity']
return self._vicinity | [
"def",
"vicinity",
"(",
"self",
")",
":",
"if",
"self",
".",
"_vicinity",
"==",
"''",
"and",
"self",
".",
"details",
"!=",
"None",
"and",
"'vicinity'",
"in",
"self",
".",
"details",
":",
"self",
".",
"_vicinity",
"=",
"self",
".",
"details",
"[",
"'vicinity'",
"]",
"return",
"self",
".",
"_vicinity"
] | Returns a feature name of a nearby location.
Often this feature refers to a street or neighborhood within the given
results. | [
"Returns",
"a",
"feature",
"name",
"of",
"a",
"nearby",
"location",
"."
] | python | train |
deep-compute/basescript | basescript/log.py | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L177-L186 | def critical(self, event=None, *args, **kw):
"""
Process event and call :meth:`logging.Logger.critical` with the result.
"""
if not self._logger.isEnabledFor(logging.CRITICAL):
return
kw = self._add_base_info(kw)
kw['level'] = "critical"
return self._proxy_to_logger('critical', event, *args, **kw) | [
"def",
"critical",
"(",
"self",
",",
"event",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"self",
".",
"_logger",
".",
"isEnabledFor",
"(",
"logging",
".",
"CRITICAL",
")",
":",
"return",
"kw",
"=",
"self",
".",
"_add_base_info",
"(",
"kw",
")",
"kw",
"[",
"'level'",
"]",
"=",
"\"critical\"",
"return",
"self",
".",
"_proxy_to_logger",
"(",
"'critical'",
",",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | Process event and call :meth:`logging.Logger.critical` with the result. | [
"Process",
"event",
"and",
"call",
":",
"meth",
":",
"logging",
".",
"Logger",
".",
"critical",
"with",
"the",
"result",
"."
] | python | train |
awacha/credolib | credolib/procedures.py | https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/procedures.py#L136-L367 | def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
"""Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
"""
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg | [
"def",
"summarize",
"(",
"reintegrate",
"=",
"True",
",",
"dist_tolerance",
"=",
"3",
",",
"qranges",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"late_radavg",
"=",
"True",
",",
"graph_ncols",
"=",
"3",
",",
"std_multiplier",
"=",
"3",
",",
"graph_extension",
"=",
"'png'",
",",
"graph_dpi",
"=",
"80",
",",
"correlmatrix_colormap",
"=",
"'coolwarm'",
",",
"image_colormap",
"=",
"'viridis'",
",",
"correlmatrix_logarithmic",
"=",
"True",
",",
"cormaptest",
"=",
"True",
")",
":",
"if",
"qranges",
"is",
"None",
":",
"qranges",
"=",
"{",
"}",
"ip",
"=",
"get_ipython",
"(",
")",
"data2d",
"=",
"{",
"}",
"data1d",
"=",
"{",
"}",
"headers_tosave",
"=",
"{",
"}",
"rowavg",
"=",
"{",
"}",
"if",
"raw",
":",
"writemarkdown",
"(",
"'# Summarizing RAW images.'",
")",
"headers",
"=",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"[",
"'raw'",
"]",
"rawpart",
"=",
"'_raw'",
"# this will be added in the filenames saved",
"else",
":",
"writemarkdown",
"(",
"'# Summarizing CORRECTED images.'",
")",
"headers",
"=",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"[",
"'processed'",
"]",
"rawpart",
"=",
"''",
"# nothing will be added in the filenames saved",
"if",
"samples",
"is",
"None",
":",
"samples",
"=",
"sorted",
"(",
"ip",
".",
"user_ns",
"[",
"'allsamplenames'",
"]",
")",
"for",
"samplename",
"in",
"samples",
":",
"writemarkdown",
"(",
"'## '",
"+",
"samplename",
")",
"headers_sample",
"=",
"[",
"h",
"for",
"h",
"in",
"headers",
"if",
"h",
".",
"title",
"==",
"samplename",
"]",
"data2d",
"[",
"samplename",
"]",
"=",
"{",
"}",
"rowavg",
"[",
"samplename",
"]",
"=",
"{",
"}",
"data1d",
"[",
"samplename",
"]",
"=",
"{",
"}",
"headers_tosave",
"[",
"samplename",
"]",
"=",
"{",
"}",
"dists",
"=",
"get_different_distances",
"(",
"[",
"h",
"for",
"h",
"in",
"headers",
"if",
"h",
".",
"title",
"==",
"samplename",
"]",
",",
"dist_tolerance",
")",
"if",
"not",
"dists",
":",
"writemarkdown",
"(",
"'No measurements from sample, skipping.'",
")",
"continue",
"fig_2d",
"=",
"plt",
".",
"figure",
"(",
")",
"fig_curves",
"=",
"plt",
".",
"figure",
"(",
")",
"fig_correlmatrices",
"=",
"plt",
".",
"figure",
"(",
")",
"distaxes",
"=",
"{",
"}",
"correlmatrixaxes",
"=",
"{",
"}",
"ncols",
"=",
"min",
"(",
"len",
"(",
"dists",
")",
",",
"graph_ncols",
")",
"nrows",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"len",
"(",
"dists",
")",
"/",
"ncols",
")",
")",
"onedimaxes",
"=",
"fig_curves",
".",
"add_axes",
"(",
"(",
"0.1",
",",
"0.3",
",",
"0.8",
",",
"0.5",
")",
")",
"onedimstdaxes",
"=",
"fig_curves",
".",
"add_axes",
"(",
"(",
"0.1",
",",
"0.1",
",",
"0.8",
",",
"0.2",
")",
")",
"for",
"distidx",
",",
"dist",
"in",
"enumerate",
"(",
"dists",
")",
":",
"writemarkdown",
"(",
"\"### Distance \"",
"+",
"str",
"(",
"dist",
")",
"+",
"\" mm\"",
")",
"headers_narrowed",
"=",
"[",
"h",
"for",
"h",
"in",
"headers_sample",
"if",
"abs",
"(",
"float",
"(",
"h",
".",
"distance",
")",
"-",
"dist",
")",
"<",
"dist_tolerance",
"]",
"distaxes",
"[",
"dist",
"]",
"=",
"fig_2d",
".",
"add_subplot",
"(",
"nrows",
",",
"ncols",
",",
"distidx",
"+",
"1",
")",
"correlmatrixaxes",
"[",
"dist",
"]",
"=",
"fig_correlmatrices",
".",
"add_subplot",
"(",
"nrows",
",",
"ncols",
",",
"distidx",
"+",
"1",
")",
"# determine the q-range to be used from the qranges argument.",
"try",
":",
"distkey_min",
"=",
"min",
"(",
"[",
"np",
".",
"abs",
"(",
"k",
"-",
"dist",
")",
"for",
"k",
"in",
"qranges",
"if",
"np",
".",
"abs",
"(",
"k",
"-",
"dist",
")",
"<",
"dist_tolerance",
"]",
")",
"except",
"ValueError",
":",
"# no matching key in qranges dict",
"qrange",
"=",
"None",
"# request auto-determination of q-range",
"else",
":",
"distkey",
"=",
"[",
"k",
"for",
"k",
"in",
"qranges",
"if",
"np",
".",
"abs",
"(",
"k",
"-",
"dist",
")",
"==",
"distkey_min",
"]",
"[",
"0",
"]",
"qrange",
"=",
"qranges",
"[",
"distkey",
"]",
"(",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
",",
"data2d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
",",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
"=",
"_collect_data_for_summarization",
"(",
"headers_narrowed",
",",
"raw",
",",
"reintegrate",
",",
"qrange",
")",
"badfsns",
",",
"badfsns_datcmp",
",",
"tab",
",",
"rowavg",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"=",
"_stabilityassessment",
"(",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
",",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
",",
"dist",
",",
"fig_correlmatrices",
",",
"correlmatrixaxes",
"[",
"dist",
"]",
",",
"std_multiplier",
",",
"correlmatrix_colormap",
",",
"os",
".",
"path",
".",
"join",
"(",
"ip",
".",
"user_ns",
"[",
"'saveto_dir'",
"]",
",",
"'correlmatrix_%s_%s'",
"%",
"(",
"samplename",
",",
"(",
"'%.2f'",
"%",
"dist",
")",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
")",
"+",
"rawpart",
"+",
"'.npz'",
")",
",",
"logarithmic_correlmatrix",
"=",
"correlmatrix_logarithmic",
",",
"cormaptest",
"=",
"cormaptest",
")",
"if",
"'badfsns'",
"not",
"in",
"ip",
".",
"user_ns",
":",
"ip",
".",
"user_ns",
"[",
"'badfsns'",
"]",
"=",
"{",
"}",
"elif",
"'badfsns_datcmp'",
"not",
"in",
"ip",
".",
"user_ns",
":",
"ip",
".",
"user_ns",
"[",
"'badfsns_datcmp'",
"]",
"=",
"{",
"}",
"ip",
".",
"user_ns",
"[",
"'badfsns'",
"]",
"=",
"set",
"(",
"ip",
".",
"user_ns",
"[",
"'badfsns'",
"]",
")",
".",
"union",
"(",
"badfsns",
")",
"ip",
".",
"user_ns",
"[",
"'badfsns_datcmp'",
"]",
"=",
"set",
"(",
"ip",
".",
"user_ns",
"[",
"'badfsns_datcmp'",
"]",
")",
".",
"union",
"(",
"badfsns_datcmp",
")",
"display",
"(",
"tab",
")",
"# Plot the image",
"try",
":",
"data2d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
".",
"imshow",
"(",
"axes",
"=",
"distaxes",
"[",
"dist",
"]",
",",
"show_crosshair",
"=",
"False",
",",
"norm",
"=",
"matplotlib",
".",
"colors",
".",
"LogNorm",
"(",
")",
",",
"cmap",
"=",
"matplotlib",
".",
"cm",
".",
"get_cmap",
"(",
"image_colormap",
")",
")",
"except",
"ValueError",
":",
"print",
"(",
"'Error plotting 2D image for sample %s, distance %.2f'",
"%",
"(",
"samplename",
",",
"dist",
")",
")",
"distaxes",
"[",
"dist",
"]",
".",
"set_xlabel",
"(",
"'q ('",
"+",
"qunit",
"(",
")",
"+",
"')'",
")",
"distaxes",
"[",
"dist",
"]",
".",
"set_ylabel",
"(",
"'q ('",
"+",
"qunit",
"(",
")",
"+",
"')'",
")",
"distaxes",
"[",
"dist",
"]",
".",
"set_title",
"(",
"'%.2f mm (%d curve%s)'",
"%",
"(",
"dist",
",",
"len",
"(",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
",",
"[",
"''",
",",
"'s'",
"]",
"[",
"len",
"(",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
">",
"1",
"]",
")",
")",
"# Plot the curves",
"Istd",
"=",
"np",
".",
"stack",
"(",
"[",
"c",
".",
"Intensity",
"for",
"c",
"in",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"for",
"c",
",",
"h",
"in",
"zip",
"(",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
",",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
":",
"color",
"=",
"'green'",
"if",
"h",
".",
"fsn",
"in",
"badfsns_datcmp",
":",
"color",
"=",
"'magenta'",
"if",
"h",
".",
"fsn",
"in",
"badfsns",
":",
"color",
"=",
"'red'",
"c",
".",
"loglog",
"(",
"axes",
"=",
"onedimaxes",
",",
"color",
"=",
"color",
")",
"if",
"Istd",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"onedimstdaxes",
".",
"loglog",
"(",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"[",
"0",
"]",
".",
"q",
",",
"Istd",
".",
"std",
"(",
"axis",
"=",
"1",
")",
"/",
"Istd",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"*",
"100",
",",
"'b-'",
")",
"if",
"not",
"late_radavg",
":",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"=",
"Curve",
".",
"average",
"(",
"*",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
"else",
":",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"=",
"(",
"data2d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
".",
"radial_average",
"(",
"qrange",
",",
"errorpropagation",
"=",
"3",
",",
"abscissa_errorpropagation",
"=",
"3",
",",
"raw_result",
"=",
"False",
")",
")",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
".",
"loglog",
"(",
"label",
"=",
"'Average'",
",",
"lw",
"=",
"2",
",",
"color",
"=",
"'k'",
",",
"axes",
"=",
"onedimaxes",
")",
"##Saving image, headers, mask and curve",
"# data2d[samplename][dist].write(",
"# os.path.join(ip.user_ns['saveto_dir'],",
"# samplename + '_'+(",
"# '%.2f' % dist).replace('.', '_') +",
"# rawpart + '.npz'), plugin='CREDO Reduced')",
"# data2d[samplename][dist].header.write(",
"# os.path.join(ip.user_ns['saveto_dir'],",
"### samplename + '_'+(",
"# '%.2f' % dist).replace('.', '_') +",
"# rawpart +'.log'), plugin='CREDO Reduced')",
"# data2d[samplename][dist].mask.write_to_mat(",
"# os.path.join(ip.user_ns['saveto_dir'],",
"# data2d[samplename][dist].mask.maskid+'.mat'))",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
".",
"save",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ip",
".",
"user_ns",
"[",
"'saveto_dir'",
"]",
",",
"samplename",
"+",
"'_'",
"+",
"(",
"'%.2f'",
"%",
"dist",
")",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"+",
"rawpart",
"+",
"'.txt'",
")",
")",
"# Report on qrange and flux",
"q_",
"=",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
".",
"q",
"qmin",
"=",
"q_",
"[",
"q_",
">",
"0",
"]",
".",
"min",
"(",
")",
"writemarkdown",
"(",
"'#### Q-range & flux'",
")",
"writemarkdown",
"(",
"'- $q_{min}$: '",
"+",
"print_abscissavalue",
"(",
"qmin",
",",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"[",
"0",
"]",
".",
"wavelength",
",",
"dist",
")",
")",
"writemarkdown",
"(",
"'- $q_{max}$: '",
"+",
"print_abscissavalue",
"(",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
".",
"q",
".",
"max",
"(",
")",
",",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"[",
"0",
"]",
".",
"wavelength",
",",
"dist",
")",
")",
"writemarkdown",
"(",
"'- Number of $q$ points: '",
"+",
"str",
"(",
"len",
"(",
"data1d",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
")",
")",
"meastime",
"=",
"sum",
"(",
"[",
"h",
".",
"exposuretime",
"for",
"h",
"in",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"]",
")",
"writemarkdown",
"(",
"\"- from %d exposures, total exposure time %.0f sec <=> %.2f hr\"",
"%",
"(",
"len",
"(",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
")",
",",
"meastime",
",",
"meastime",
"/",
"3600.",
")",
")",
"try",
":",
"flux",
"=",
"[",
"h",
".",
"flux",
"for",
"h",
"in",
"headers_tosave",
"[",
"samplename",
"]",
"[",
"dist",
"]",
"]",
"flux",
"=",
"ErrorValue",
"(",
"np",
".",
"mean",
"(",
"flux",
")",
",",
"np",
".",
"std",
"(",
"flux",
")",
")",
"writemarkdown",
"(",
"\"- beam flux (photon/sec): %s\"",
"%",
"flux",
")",
"except",
"KeyError",
":",
"writemarkdown",
"(",
"\"- *No information on beam flux: dealing with raw data.*\"",
")",
"onedimaxes",
".",
"set_xlabel",
"(",
"''",
")",
"onedimaxes",
".",
"set_ylabel",
"(",
"'$d\\\\Sigma/d\\\\Omega$ (cm$^{-1}$ sr$^{-1}$)'",
")",
"# plt.legend(loc='best')",
"onedimaxes",
".",
"grid",
"(",
"True",
",",
"which",
"=",
"'both'",
")",
"onedimaxes",
".",
"axis",
"(",
"'tight'",
")",
"onedimaxes",
".",
"set_title",
"(",
"samplename",
")",
"onedimstdaxes",
".",
"set_xlabel",
"(",
"'q ('",
"+",
"qunit",
"(",
")",
"+",
"')'",
")",
"onedimstdaxes",
".",
"set_ylabel",
"(",
"'Rel.std.dev. of intensity (%)'",
")",
"onedimstdaxes",
".",
"grid",
"(",
"True",
",",
"which",
"=",
"'both'",
")",
"onedimstdaxes",
".",
"set_xlim",
"(",
"*",
"onedimaxes",
".",
"get_xlim",
"(",
")",
")",
"onedimstdaxes",
".",
"set_xscale",
"(",
"onedimaxes",
".",
"get_xscale",
"(",
")",
")",
"putlogo",
"(",
"fig_curves",
")",
"putlogo",
"(",
"fig_2d",
")",
"fig_2d",
".",
"tight_layout",
"(",
")",
"fig_correlmatrices",
".",
"suptitle",
"(",
"samplename",
")",
"fig_correlmatrices",
".",
"tight_layout",
"(",
")",
"fig_2d",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ip",
".",
"user_ns",
"[",
"'auximages_dir'",
"]",
",",
"'averaging2D_'",
"+",
"samplename",
"+",
"rawpart",
"+",
"'.'",
"+",
"graph_extension",
")",
",",
"dpi",
"=",
"graph_dpi",
")",
"fig_curves",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ip",
".",
"user_ns",
"[",
"'auximages_dir'",
"]",
",",
"'averaging1D_'",
"+",
"samplename",
"+",
"rawpart",
"+",
"'.'",
"+",
"graph_extension",
")",
",",
"dpi",
"=",
"graph_dpi",
")",
"putlogo",
"(",
"fig_correlmatrices",
")",
"fig_correlmatrices",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ip",
".",
"user_ns",
"[",
"'auximages_dir'",
"]",
",",
"'correlation_'",
"+",
"samplename",
"+",
"rawpart",
"+",
"'.'",
"+",
"graph_extension",
")",
",",
"dpi",
"=",
"graph_dpi",
")",
"writemarkdown",
"(",
"\"### Collected images from all distances\"",
")",
"plt",
".",
"show",
"(",
")",
"writemarkdown",
"(",
"\"Updated badfsns list:\"",
")",
"writemarkdown",
"(",
"'['",
"+",
"', '",
".",
"join",
"(",
"str",
"(",
"f",
")",
"for",
"f",
"in",
"ip",
".",
"user_ns",
"[",
"'badfsns'",
"]",
")",
"+",
"']'",
")",
"writemarkdown",
"(",
"\"Updated badfsns list using datcmp:\"",
")",
"writemarkdown",
"(",
"'['",
"+",
"', '",
".",
"join",
"(",
"str",
"(",
"f",
")",
"for",
"f",
"in",
"ip",
".",
"user_ns",
"[",
"'badfsns_datcmp'",
"]",
")",
"+",
"']'",
")",
"ip",
".",
"user_ns",
"[",
"'_data1d'",
"]",
"=",
"data1d",
"ip",
".",
"user_ns",
"[",
"'_data2d'",
"]",
"=",
"data2d",
"ip",
".",
"user_ns",
"[",
"'_headers_sample'",
"]",
"=",
"headers_tosave",
"ip",
".",
"user_ns",
"[",
"'_rowavg'",
"]",
"=",
"rowavg"
] | Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity. | [
"Summarize",
"scattering",
"patterns",
"and",
"curves",
"for",
"all",
"samples",
"defined",
"by",
"the",
"global",
"allsamplenames",
".",
"Inputs",
":",
"reintegrate",
"(",
"bool",
"default",
"=",
"True",
")",
":",
"if",
"the",
"curves",
"are",
"to",
"be",
"obained",
"by",
"reintegrating",
"the",
"patterns",
".",
"Otherwise",
"1D",
"curves",
"are",
"loaded",
".",
"dist_tolerance",
"(",
"float",
"default",
"=",
"3",
")",
":",
"sample",
"-",
"to",
"-",
"detector",
"distances",
"nearer",
"than",
"this",
"are",
"considered",
"the",
"same",
"qranges",
"(",
"dict",
")",
":",
"a",
"dictionary",
"mapping",
"approximate",
"sample",
"-",
"to",
"-",
"detector",
"distances",
"(",
"within",
"dist_tolerance",
")",
"to",
"one",
"-",
"dimensional",
"np",
".",
"ndarrays",
"of",
"the",
"desired",
"q",
"-",
"range",
"of",
"the",
"reintegration",
".",
"samples",
"(",
"list",
"or",
"None",
")",
":",
"the",
"names",
"of",
"the",
"samples",
"to",
"summarize",
".",
"If",
"None",
"all",
"samples",
"defined",
"by",
"allsamplenames",
"are",
"used",
".",
"raw",
"(",
"bool",
"default",
"=",
"False",
")",
":",
"if",
"raw",
"images",
"are",
"to",
"be",
"treated",
"instead",
"the",
"evaluated",
"ones",
"(",
"default",
")",
".",
"late_radavg",
"(",
"bool",
"default",
"=",
"True",
")",
":",
"if",
"the",
"scattering",
"curves",
"are",
"to",
"be",
"calculated",
"from",
"the",
"summarized",
"scattering",
"pattern",
".",
"If",
"False",
"scattering",
"curves",
"are",
"calculated",
"from",
"each",
"pattern",
"and",
"will",
"be",
"averaged",
".",
"graph_ncols",
":",
"the",
"number",
"of",
"columns",
"in",
"graphs",
"(",
"2D",
"patterns",
"correlation",
"matrices",
")",
"std_multiplier",
":",
"if",
"the",
"absolute",
"value",
"of",
"the",
"relative",
"discrepancy",
"is",
"larger",
"than",
"this",
"limit",
"the",
"exposure",
"is",
"deemed",
"an",
"outlier",
".",
"graph_extension",
":",
"the",
"extension",
"of",
"the",
"produced",
"hardcopy",
"files",
".",
"graph_dpi",
":",
"resolution",
"of",
"the",
"graphs",
"correlmatrix_colormap",
":",
"name",
"of",
"the",
"colormap",
"to",
"be",
"used",
"for",
"the",
"correlation",
"matrices",
"(",
"resolved",
"by",
"matplotlib",
".",
"cm",
".",
"get_cmap",
"()",
")",
"image_colormap",
":",
"name",
"of",
"the",
"colormap",
"to",
"be",
"used",
"for",
"the",
"scattering",
"patterns",
"(",
"resolved",
"by",
"matplotlib",
".",
"cm",
".",
"get_cmap",
"()",
")",
"correlmatrix_logarithmic",
":",
"if",
"the",
"correlation",
"matrix",
"has",
"to",
"be",
"calculated",
"from",
"the",
"logarithm",
"of",
"the",
"intensity",
"."
] | python | train |
saltstack/salt | salt/modules/nova.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nova.py#L292-L324 | def volume_create(name, size=100, snapshot=None, voltype=None, profile=None, **kwargs):
'''
Create a block storage volume
name
Name of the new volume (must be first)
size
Volume size
snapshot
Block storage snapshot id
voltype
Type of storage
profile
Profile to build on
CLI Example:
.. code-block:: bash
salt '*' nova.volume_create myblock size=300 profile=openstack
'''
conn = _auth(profile, **kwargs)
return conn.volume_create(
name,
size,
snapshot,
voltype
) | [
"def",
"volume_create",
"(",
"name",
",",
"size",
"=",
"100",
",",
"snapshot",
"=",
"None",
",",
"voltype",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
",",
"*",
"*",
"kwargs",
")",
"return",
"conn",
".",
"volume_create",
"(",
"name",
",",
"size",
",",
"snapshot",
",",
"voltype",
")"
] | Create a block storage volume
name
Name of the new volume (must be first)
size
Volume size
snapshot
Block storage snapshot id
voltype
Type of storage
profile
Profile to build on
CLI Example:
.. code-block:: bash
salt '*' nova.volume_create myblock size=300 profile=openstack | [
"Create",
"a",
"block",
"storage",
"volume"
] | python | train |
justquick/django-native-tags | native_tags/registry.py | https://github.com/justquick/django-native-tags/blob/d40b976ee1cb13faeb04f0dedf02933d4274abf2/native_tags/registry.py#L100-L107 | def unregister(self, bucket, name):
"""
Remove the function from the registry by name
"""
assert bucket in self, 'Bucket %s is unknown' % bucket
if not name in self[bucket]:
raise NotRegistered('The function %s is not registered' % name)
del self[bucket][name] | [
"def",
"unregister",
"(",
"self",
",",
"bucket",
",",
"name",
")",
":",
"assert",
"bucket",
"in",
"self",
",",
"'Bucket %s is unknown'",
"%",
"bucket",
"if",
"not",
"name",
"in",
"self",
"[",
"bucket",
"]",
":",
"raise",
"NotRegistered",
"(",
"'The function %s is not registered'",
"%",
"name",
")",
"del",
"self",
"[",
"bucket",
"]",
"[",
"name",
"]"
] | Remove the function from the registry by name | [
"Remove",
"the",
"function",
"from",
"the",
"registry",
"by",
"name"
] | python | train |
Microsoft/nni | examples/trials/network_morphism/FashionMNIST/utils.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/network_morphism/FashionMNIST/utils.py#L116-L137 | def data_transforms_cifar10(args):
""" data_transforms for cifar10 dataset
"""
cifar_mean = [0.49139968, 0.48215827, 0.44653124]
cifar_std = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cifar_mean, cifar_std),
]
)
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(cifar_mean, cifar_std)]
)
return train_transform, valid_transform | [
"def",
"data_transforms_cifar10",
"(",
"args",
")",
":",
"cifar_mean",
"=",
"[",
"0.49139968",
",",
"0.48215827",
",",
"0.44653124",
"]",
"cifar_std",
"=",
"[",
"0.24703233",
",",
"0.24348505",
",",
"0.26158768",
"]",
"train_transform",
"=",
"transforms",
".",
"Compose",
"(",
"[",
"transforms",
".",
"RandomCrop",
"(",
"32",
",",
"padding",
"=",
"4",
")",
",",
"transforms",
".",
"RandomHorizontalFlip",
"(",
")",
",",
"transforms",
".",
"ToTensor",
"(",
")",
",",
"transforms",
".",
"Normalize",
"(",
"cifar_mean",
",",
"cifar_std",
")",
",",
"]",
")",
"if",
"args",
".",
"cutout",
":",
"train_transform",
".",
"transforms",
".",
"append",
"(",
"Cutout",
"(",
"args",
".",
"cutout_length",
")",
")",
"valid_transform",
"=",
"transforms",
".",
"Compose",
"(",
"[",
"transforms",
".",
"ToTensor",
"(",
")",
",",
"transforms",
".",
"Normalize",
"(",
"cifar_mean",
",",
"cifar_std",
")",
"]",
")",
"return",
"train_transform",
",",
"valid_transform"
] | data_transforms for cifar10 dataset | [
"data_transforms",
"for",
"cifar10",
"dataset"
] | python | train |
ejeschke/ginga | ginga/Bindings.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1696-L1702 | def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True):
"""An interactive way to restore the colormap contrast settings after
a warp operation.
"""
if self.cancmap and (event.state == 'down'):
self.restore_contrast(viewer, msg=msg)
return True | [
"def",
"ms_contrast_restore",
"(",
"self",
",",
"viewer",
",",
"event",
",",
"data_x",
",",
"data_y",
",",
"msg",
"=",
"True",
")",
":",
"if",
"self",
".",
"cancmap",
"and",
"(",
"event",
".",
"state",
"==",
"'down'",
")",
":",
"self",
".",
"restore_contrast",
"(",
"viewer",
",",
"msg",
"=",
"msg",
")",
"return",
"True"
] | An interactive way to restore the colormap contrast settings after
a warp operation. | [
"An",
"interactive",
"way",
"to",
"restore",
"the",
"colormap",
"contrast",
"settings",
"after",
"a",
"warp",
"operation",
"."
] | python | train |
HazyResearch/fonduer | src/fonduer/candidates/mentions.py | https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/mentions.py#L246-L260 | def apply(self, doc):
"""
Generate MentionCaptions from a Document by parsing all of its Captions.
:param doc: The ``Document`` to parse.
:type doc: ``Document``
:raises TypeError: If the input doc is not of type ``Document``.
"""
if not isinstance(doc, Document):
raise TypeError(
"Input Contexts to MentionCaptions.apply() must be of type Document"
)
for caption in doc.captions:
yield TemporaryCaptionMention(caption) | [
"def",
"apply",
"(",
"self",
",",
"doc",
")",
":",
"if",
"not",
"isinstance",
"(",
"doc",
",",
"Document",
")",
":",
"raise",
"TypeError",
"(",
"\"Input Contexts to MentionCaptions.apply() must be of type Document\"",
")",
"for",
"caption",
"in",
"doc",
".",
"captions",
":",
"yield",
"TemporaryCaptionMention",
"(",
"caption",
")"
] | Generate MentionCaptions from a Document by parsing all of its Captions.
:param doc: The ``Document`` to parse.
:type doc: ``Document``
:raises TypeError: If the input doc is not of type ``Document``. | [
"Generate",
"MentionCaptions",
"from",
"a",
"Document",
"by",
"parsing",
"all",
"of",
"its",
"Captions",
"."
] | python | train |
oseledets/ttpy | tt/amen/amen_mv.py | https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/amen/amen_mv.py#L480-L608 | def _compute_next_Phi(Phi_prev, x, y, direction, A=None,
extnrm=None, return_norm=True):
'''
Performs the recurrent Phi (or Psi) matrix computation
Phi = Phi_prev * (x'Ay).
If direction is 'lr', computes Psi
if direction is 'rl', computes Phi
A can be empty, then only x'y is computed.
Phi1: rx1, ry1, ra1, or {rx1, ry1}_ra, or rx1, ry1
Phi2: ry2, ra2, rx2, or {ry2, rx2}_ra, or ry2, rx2
'''
[rx1, n, rx2] = x.shape
[ry1, m, ry2] = y.shape
if A is not None:
if isinstance(A, list): # ?????????????????????????????????
# A is a canonical block
ra = len(A)
else:
# Just full format
[ra1, ra2] = A.shape
ra1 = ra1 // n
ra2 = ra2 // m
# ?????????????????????????????????????
else:
[ra1, ra2] = [1, 1]
if isinstance(Phi_prev, list):
Phi = [None] * ra
if return_norm:
nrm = 0
if (direction == 'lr'):
# lr: Phi1
x = _reshape(x, (rx1, n * rx2))
y = _reshape(y, (ry1 * m, ry2))
for i in xrange(ra):
Phi[i] = _np.dot(_tconj(x), Phi_prev[i])
Phi[i] = _reshape(Phi[i], (n, rx2 * ry1))
Phi[i] = Phi[i].T
Phi[i] = _np.dot(Phi[i], A[i])
Phi[i] = _reshape(Phi[i], (rx2, ry1 * m))
Phi[i] = _np.dot(Phi[i], y)
if return_norm:
nrm = max(nrm, _np.linalg.norm(Phi[i])) # , 'fro'))
else:
# rl: Phi2
y = _reshape(y, (ry1, m * ry2))
x = _reshape(x, (rx1 * n, rx2))
for i in xrange(ra):
Phi[i] = _np.dot(Phi_prev[i], x.T)
Phi[i] = _reshape(Phi[i], (ry2 * rx1, n))
Phi[i] = _np.dot(Phi[i], A[i])
Phi[i] = Phi[i].T
Phi[i] = _reshape(Phi[i], (m * ry2, rx1))
Phi[i] = _np.dot(y, Phi[i])
if return_norm:
nrm = max(nrm, _np.linalg.norm(Phi[i])) # , 'fro'))
if return_norm:
# Extract the scale to prevent overload
if (nrm > 0):
for i in xrange(ra):
Phi[i] = Phi[i] / nrm
else:
nrm = 1
elif extnrm is not None:
# Override the normalization
for i in xrange(ra):
Phi[i] = Phi[i] / extnrm
else:
if (direction == 'lr'):
# lr: Phi1
x = _reshape(x, (rx1, n * rx2))
Phi = _reshape(Phi_prev, (rx1, ry1 * ra1))
Phi = _np.dot(_tconj(x), Phi)
if A is not None:
Phi = _reshape(Phi, (n * rx2 * ry1, ra1))
Phi = Phi.T
Phi = _reshape(Phi, (ra1 * n, rx2 * ry1))
Phi = _np.dot(A.T, Phi)
Phi = _reshape(Phi, (m, ra2 * rx2 * ry1))
else:
Phi = _reshape(Phi, (n, rx2 * ry1))
Phi = Phi.T
Phi = _reshape(Phi, (ra2 * rx2, ry1 * m))
y = _reshape(y, (ry1 * m, ry2))
Phi = _np.dot(Phi, y)
if A is not None:
Phi = _reshape(Phi, (ra2, rx2 * ry2))
Phi = Phi.T
Phi = _reshape(Phi, (rx2, ry2, ra2))
else:
Phi = _reshape(Phi, (rx2, ry2))
else:
# rl: Phi2
y = _reshape(y, (ry1 * m, ry2))
Phi = _reshape(Phi_prev, (ry2, ra2 * rx2))
Phi = _np.dot(y, Phi)
if A is not None:
Phi = _reshape(Phi, (ry1, m * ra2 * rx2))
Phi = Phi.T
Phi = _reshape(Phi, (m * ra2, rx2 * ry1))
Phi = _np.dot(A, Phi)
Phi = _reshape(Phi, (ra1 * n * rx2, ry1))
Phi = Phi.T
Phi = _reshape(Phi, (ry1 * ra1, n * rx2))
x = _reshape(x, (rx1, n * rx2))
Phi = _np.dot(Phi, _tconj(x))
if A is not None:
Phi = _reshape(Phi, (ry1, ra1, rx1))
else:
Phi = _reshape(Phi, (ry1, rx1))
if return_norm:
# Extract the scale to prevent overload
nrm = _np.linalg.norm(Phi) # , 'fro')
if (nrm > 0):
Phi = Phi / nrm
else:
nrm = 1
elif extnrm is not None:
# Override the normalization by the external one
Phi = Phi / extnrm
if return_norm:
return Phi, nrm
else:
return Phi | [
"def",
"_compute_next_Phi",
"(",
"Phi_prev",
",",
"x",
",",
"y",
",",
"direction",
",",
"A",
"=",
"None",
",",
"extnrm",
"=",
"None",
",",
"return_norm",
"=",
"True",
")",
":",
"[",
"rx1",
",",
"n",
",",
"rx2",
"]",
"=",
"x",
".",
"shape",
"[",
"ry1",
",",
"m",
",",
"ry2",
"]",
"=",
"y",
".",
"shape",
"if",
"A",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"A",
",",
"list",
")",
":",
"# ?????????????????????????????????",
"# A is a canonical block",
"ra",
"=",
"len",
"(",
"A",
")",
"else",
":",
"# Just full format",
"[",
"ra1",
",",
"ra2",
"]",
"=",
"A",
".",
"shape",
"ra1",
"=",
"ra1",
"//",
"n",
"ra2",
"=",
"ra2",
"//",
"m",
"# ?????????????????????????????????????",
"else",
":",
"[",
"ra1",
",",
"ra2",
"]",
"=",
"[",
"1",
",",
"1",
"]",
"if",
"isinstance",
"(",
"Phi_prev",
",",
"list",
")",
":",
"Phi",
"=",
"[",
"None",
"]",
"*",
"ra",
"if",
"return_norm",
":",
"nrm",
"=",
"0",
"if",
"(",
"direction",
"==",
"'lr'",
")",
":",
"# lr: Phi1",
"x",
"=",
"_reshape",
"(",
"x",
",",
"(",
"rx1",
",",
"n",
"*",
"rx2",
")",
")",
"y",
"=",
"_reshape",
"(",
"y",
",",
"(",
"ry1",
"*",
"m",
",",
"ry2",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"ra",
")",
":",
"Phi",
"[",
"i",
"]",
"=",
"_np",
".",
"dot",
"(",
"_tconj",
"(",
"x",
")",
",",
"Phi_prev",
"[",
"i",
"]",
")",
"Phi",
"[",
"i",
"]",
"=",
"_reshape",
"(",
"Phi",
"[",
"i",
"]",
",",
"(",
"n",
",",
"rx2",
"*",
"ry1",
")",
")",
"Phi",
"[",
"i",
"]",
"=",
"Phi",
"[",
"i",
"]",
".",
"T",
"Phi",
"[",
"i",
"]",
"=",
"_np",
".",
"dot",
"(",
"Phi",
"[",
"i",
"]",
",",
"A",
"[",
"i",
"]",
")",
"Phi",
"[",
"i",
"]",
"=",
"_reshape",
"(",
"Phi",
"[",
"i",
"]",
",",
"(",
"rx2",
",",
"ry1",
"*",
"m",
")",
")",
"Phi",
"[",
"i",
"]",
"=",
"_np",
".",
"dot",
"(",
"Phi",
"[",
"i",
"]",
",",
"y",
")",
"if",
"return_norm",
":",
"nrm",
"=",
"max",
"(",
"nrm",
",",
"_np",
".",
"linalg",
".",
"norm",
"(",
"Phi",
"[",
"i",
"]",
")",
")",
"# , 'fro'))",
"else",
":",
"# rl: Phi2",
"y",
"=",
"_reshape",
"(",
"y",
",",
"(",
"ry1",
",",
"m",
"*",
"ry2",
")",
")",
"x",
"=",
"_reshape",
"(",
"x",
",",
"(",
"rx1",
"*",
"n",
",",
"rx2",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"ra",
")",
":",
"Phi",
"[",
"i",
"]",
"=",
"_np",
".",
"dot",
"(",
"Phi_prev",
"[",
"i",
"]",
",",
"x",
".",
"T",
")",
"Phi",
"[",
"i",
"]",
"=",
"_reshape",
"(",
"Phi",
"[",
"i",
"]",
",",
"(",
"ry2",
"*",
"rx1",
",",
"n",
")",
")",
"Phi",
"[",
"i",
"]",
"=",
"_np",
".",
"dot",
"(",
"Phi",
"[",
"i",
"]",
",",
"A",
"[",
"i",
"]",
")",
"Phi",
"[",
"i",
"]",
"=",
"Phi",
"[",
"i",
"]",
".",
"T",
"Phi",
"[",
"i",
"]",
"=",
"_reshape",
"(",
"Phi",
"[",
"i",
"]",
",",
"(",
"m",
"*",
"ry2",
",",
"rx1",
")",
")",
"Phi",
"[",
"i",
"]",
"=",
"_np",
".",
"dot",
"(",
"y",
",",
"Phi",
"[",
"i",
"]",
")",
"if",
"return_norm",
":",
"nrm",
"=",
"max",
"(",
"nrm",
",",
"_np",
".",
"linalg",
".",
"norm",
"(",
"Phi",
"[",
"i",
"]",
")",
")",
"# , 'fro'))",
"if",
"return_norm",
":",
"# Extract the scale to prevent overload",
"if",
"(",
"nrm",
">",
"0",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"ra",
")",
":",
"Phi",
"[",
"i",
"]",
"=",
"Phi",
"[",
"i",
"]",
"/",
"nrm",
"else",
":",
"nrm",
"=",
"1",
"elif",
"extnrm",
"is",
"not",
"None",
":",
"# Override the normalization",
"for",
"i",
"in",
"xrange",
"(",
"ra",
")",
":",
"Phi",
"[",
"i",
"]",
"=",
"Phi",
"[",
"i",
"]",
"/",
"extnrm",
"else",
":",
"if",
"(",
"direction",
"==",
"'lr'",
")",
":",
"# lr: Phi1",
"x",
"=",
"_reshape",
"(",
"x",
",",
"(",
"rx1",
",",
"n",
"*",
"rx2",
")",
")",
"Phi",
"=",
"_reshape",
"(",
"Phi_prev",
",",
"(",
"rx1",
",",
"ry1",
"*",
"ra1",
")",
")",
"Phi",
"=",
"_np",
".",
"dot",
"(",
"_tconj",
"(",
"x",
")",
",",
"Phi",
")",
"if",
"A",
"is",
"not",
"None",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"n",
"*",
"rx2",
"*",
"ry1",
",",
"ra1",
")",
")",
"Phi",
"=",
"Phi",
".",
"T",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ra1",
"*",
"n",
",",
"rx2",
"*",
"ry1",
")",
")",
"Phi",
"=",
"_np",
".",
"dot",
"(",
"A",
".",
"T",
",",
"Phi",
")",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"m",
",",
"ra2",
"*",
"rx2",
"*",
"ry1",
")",
")",
"else",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"n",
",",
"rx2",
"*",
"ry1",
")",
")",
"Phi",
"=",
"Phi",
".",
"T",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ra2",
"*",
"rx2",
",",
"ry1",
"*",
"m",
")",
")",
"y",
"=",
"_reshape",
"(",
"y",
",",
"(",
"ry1",
"*",
"m",
",",
"ry2",
")",
")",
"Phi",
"=",
"_np",
".",
"dot",
"(",
"Phi",
",",
"y",
")",
"if",
"A",
"is",
"not",
"None",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ra2",
",",
"rx2",
"*",
"ry2",
")",
")",
"Phi",
"=",
"Phi",
".",
"T",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"rx2",
",",
"ry2",
",",
"ra2",
")",
")",
"else",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"rx2",
",",
"ry2",
")",
")",
"else",
":",
"# rl: Phi2",
"y",
"=",
"_reshape",
"(",
"y",
",",
"(",
"ry1",
"*",
"m",
",",
"ry2",
")",
")",
"Phi",
"=",
"_reshape",
"(",
"Phi_prev",
",",
"(",
"ry2",
",",
"ra2",
"*",
"rx2",
")",
")",
"Phi",
"=",
"_np",
".",
"dot",
"(",
"y",
",",
"Phi",
")",
"if",
"A",
"is",
"not",
"None",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ry1",
",",
"m",
"*",
"ra2",
"*",
"rx2",
")",
")",
"Phi",
"=",
"Phi",
".",
"T",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"m",
"*",
"ra2",
",",
"rx2",
"*",
"ry1",
")",
")",
"Phi",
"=",
"_np",
".",
"dot",
"(",
"A",
",",
"Phi",
")",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ra1",
"*",
"n",
"*",
"rx2",
",",
"ry1",
")",
")",
"Phi",
"=",
"Phi",
".",
"T",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ry1",
"*",
"ra1",
",",
"n",
"*",
"rx2",
")",
")",
"x",
"=",
"_reshape",
"(",
"x",
",",
"(",
"rx1",
",",
"n",
"*",
"rx2",
")",
")",
"Phi",
"=",
"_np",
".",
"dot",
"(",
"Phi",
",",
"_tconj",
"(",
"x",
")",
")",
"if",
"A",
"is",
"not",
"None",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ry1",
",",
"ra1",
",",
"rx1",
")",
")",
"else",
":",
"Phi",
"=",
"_reshape",
"(",
"Phi",
",",
"(",
"ry1",
",",
"rx1",
")",
")",
"if",
"return_norm",
":",
"# Extract the scale to prevent overload",
"nrm",
"=",
"_np",
".",
"linalg",
".",
"norm",
"(",
"Phi",
")",
"# , 'fro')",
"if",
"(",
"nrm",
">",
"0",
")",
":",
"Phi",
"=",
"Phi",
"/",
"nrm",
"else",
":",
"nrm",
"=",
"1",
"elif",
"extnrm",
"is",
"not",
"None",
":",
"# Override the normalization by the external one",
"Phi",
"=",
"Phi",
"/",
"extnrm",
"if",
"return_norm",
":",
"return",
"Phi",
",",
"nrm",
"else",
":",
"return",
"Phi"
] | Performs the recurrent Phi (or Psi) matrix computation
Phi = Phi_prev * (x'Ay).
If direction is 'lr', computes Psi
if direction is 'rl', computes Phi
A can be empty, then only x'y is computed.
Phi1: rx1, ry1, ra1, or {rx1, ry1}_ra, or rx1, ry1
Phi2: ry2, ra2, rx2, or {ry2, rx2}_ra, or ry2, rx2 | [
"Performs",
"the",
"recurrent",
"Phi",
"(",
"or",
"Psi",
")",
"matrix",
"computation",
"Phi",
"=",
"Phi_prev",
"*",
"(",
"x",
"Ay",
")",
".",
"If",
"direction",
"is",
"lr",
"computes",
"Psi",
"if",
"direction",
"is",
"rl",
"computes",
"Phi",
"A",
"can",
"be",
"empty",
"then",
"only",
"x",
"y",
"is",
"computed",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/charmsupport/nrpe.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/charmsupport/nrpe.py#L355-L410 | def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='service check {%s}' % unit_name,
check_cmd='check_status_file.py -f %s' % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
f = open(checkpath, 'w')
subprocess.call(
croncmd.split(),
stdout=f,
stderr=subprocess.STDOUT
)
f.close()
os.chmod(checkpath, 0o644) | [
"def",
"add_init_service_checks",
"(",
"nrpe",
",",
"services",
",",
"unit_name",
",",
"immediate_check",
"=",
"True",
")",
":",
"for",
"svc",
"in",
"services",
":",
"# Don't add a check for these services from neutron-gateway",
"if",
"svc",
"in",
"[",
"'ext-port'",
",",
"'os-charm-phy-nic-mtu'",
"]",
":",
"next",
"upstart_init",
"=",
"'/etc/init/%s.conf'",
"%",
"svc",
"sysv_init",
"=",
"'/etc/init.d/%s'",
"%",
"svc",
"if",
"host",
".",
"init_is_systemd",
"(",
")",
":",
"nrpe",
".",
"add_check",
"(",
"shortname",
"=",
"svc",
",",
"description",
"=",
"'process check {%s}'",
"%",
"unit_name",
",",
"check_cmd",
"=",
"'check_systemd.py %s'",
"%",
"svc",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"upstart_init",
")",
":",
"nrpe",
".",
"add_check",
"(",
"shortname",
"=",
"svc",
",",
"description",
"=",
"'process check {%s}'",
"%",
"unit_name",
",",
"check_cmd",
"=",
"'check_upstart_job %s'",
"%",
"svc",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"sysv_init",
")",
":",
"cronpath",
"=",
"'/etc/cron.d/nagios-service-check-%s'",
"%",
"svc",
"checkpath",
"=",
"'%s/service-check-%s.txt'",
"%",
"(",
"nrpe",
".",
"homedir",
",",
"svc",
")",
"croncmd",
"=",
"(",
"'/usr/local/lib/nagios/plugins/check_exit_status.pl '",
"'-e -s /etc/init.d/%s status'",
"%",
"svc",
")",
"cron_file",
"=",
"'*/5 * * * * root %s > %s\\n'",
"%",
"(",
"croncmd",
",",
"checkpath",
")",
"f",
"=",
"open",
"(",
"cronpath",
",",
"'w'",
")",
"f",
".",
"write",
"(",
"cron_file",
")",
"f",
".",
"close",
"(",
")",
"nrpe",
".",
"add_check",
"(",
"shortname",
"=",
"svc",
",",
"description",
"=",
"'service check {%s}'",
"%",
"unit_name",
",",
"check_cmd",
"=",
"'check_status_file.py -f %s'",
"%",
"checkpath",
",",
")",
"# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail",
"# (LP: #1670223).",
"if",
"immediate_check",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"nrpe",
".",
"homedir",
")",
":",
"f",
"=",
"open",
"(",
"checkpath",
",",
"'w'",
")",
"subprocess",
".",
"call",
"(",
"croncmd",
".",
"split",
"(",
")",
",",
"stdout",
"=",
"f",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"f",
".",
"close",
"(",
")",
"os",
".",
"chmod",
"(",
"checkpath",
",",
"0o644",
")"
] | Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately | [
"Add",
"checks",
"for",
"each",
"service",
"in",
"list"
] | python | train |
tcalmant/ipopo | pelix/internals/registry.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/internals/registry.py#L163-L187 | def _get_from_prototype(self, factory, svc_registration):
# type: (Any, ServiceRegistration) -> Any
"""
Returns a service instance from a Prototype Service Factory
:param factory: The service factory
:param svc_registration: The ServiceRegistration object
:return: The requested service instance returned by the factory
"""
svc_ref = svc_registration.get_reference()
service = factory.get_service(self.__bundle, svc_registration)
try:
# Check if the service already exists
services, counter = self.__factored[svc_ref]
services.append(service)
counter.inc()
except KeyError:
counter = _UsageCounter()
counter.inc()
# Store the counter
self.__factored[svc_ref] = ([service], counter)
return service | [
"def",
"_get_from_prototype",
"(",
"self",
",",
"factory",
",",
"svc_registration",
")",
":",
"# type: (Any, ServiceRegistration) -> Any",
"svc_ref",
"=",
"svc_registration",
".",
"get_reference",
"(",
")",
"service",
"=",
"factory",
".",
"get_service",
"(",
"self",
".",
"__bundle",
",",
"svc_registration",
")",
"try",
":",
"# Check if the service already exists",
"services",
",",
"counter",
"=",
"self",
".",
"__factored",
"[",
"svc_ref",
"]",
"services",
".",
"append",
"(",
"service",
")",
"counter",
".",
"inc",
"(",
")",
"except",
"KeyError",
":",
"counter",
"=",
"_UsageCounter",
"(",
")",
"counter",
".",
"inc",
"(",
")",
"# Store the counter",
"self",
".",
"__factored",
"[",
"svc_ref",
"]",
"=",
"(",
"[",
"service",
"]",
",",
"counter",
")",
"return",
"service"
] | Returns a service instance from a Prototype Service Factory
:param factory: The service factory
:param svc_registration: The ServiceRegistration object
:return: The requested service instance returned by the factory | [
"Returns",
"a",
"service",
"instance",
"from",
"a",
"Prototype",
"Service",
"Factory"
] | python | train |
vaexio/vaex | packages/vaex-core/vaex/ext/readcol.py | https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/ext/readcol.py#L223-L236 | def get_autotype(arr):
"""
Attempts to return a numpy array converted to the most sensible dtype
Value errors will be caught and simply return the original array
Tries to make dtype int, then float, then no change
"""
try:
narr = arr.astype('float')
if (narr < sys.maxsize).all() and (narr % 1).sum() == 0:
return narr.astype('int')
else:
return narr
except ValueError:
return arr | [
"def",
"get_autotype",
"(",
"arr",
")",
":",
"try",
":",
"narr",
"=",
"arr",
".",
"astype",
"(",
"'float'",
")",
"if",
"(",
"narr",
"<",
"sys",
".",
"maxsize",
")",
".",
"all",
"(",
")",
"and",
"(",
"narr",
"%",
"1",
")",
".",
"sum",
"(",
")",
"==",
"0",
":",
"return",
"narr",
".",
"astype",
"(",
"'int'",
")",
"else",
":",
"return",
"narr",
"except",
"ValueError",
":",
"return",
"arr"
] | Attempts to return a numpy array converted to the most sensible dtype
Value errors will be caught and simply return the original array
Tries to make dtype int, then float, then no change | [
"Attempts",
"to",
"return",
"a",
"numpy",
"array",
"converted",
"to",
"the",
"most",
"sensible",
"dtype",
"Value",
"errors",
"will",
"be",
"caught",
"and",
"simply",
"return",
"the",
"original",
"array",
"Tries",
"to",
"make",
"dtype",
"int",
"then",
"float",
"then",
"no",
"change"
] | python | test |
PredixDev/predixpy | predix/service.py | https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/service.py#L113-L126 | def _delete(self, uri):
"""
Simple DELETE operation for a given path.
"""
headers = self._get_headers()
response = self.session.delete(uri, headers=headers)
# Will return a 204 on successful delete
if response.status_code == 204:
return response
else:
logging.error(response.content)
response.raise_for_status() | [
"def",
"_delete",
"(",
"self",
",",
"uri",
")",
":",
"headers",
"=",
"self",
".",
"_get_headers",
"(",
")",
"response",
"=",
"self",
".",
"session",
".",
"delete",
"(",
"uri",
",",
"headers",
"=",
"headers",
")",
"# Will return a 204 on successful delete",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"return",
"response",
"else",
":",
"logging",
".",
"error",
"(",
"response",
".",
"content",
")",
"response",
".",
"raise_for_status",
"(",
")"
] | Simple DELETE operation for a given path. | [
"Simple",
"DELETE",
"operation",
"for",
"a",
"given",
"path",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/explorer/widgets.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L1052-L1057 | def set_scrollbar_position(self, position):
"""Set scrollbar positions"""
# Scrollbars will be restored after the expanded state
self._scrollbar_positions = position
if self._to_be_loaded is not None and len(self._to_be_loaded) == 0:
self.restore_scrollbar_positions() | [
"def",
"set_scrollbar_position",
"(",
"self",
",",
"position",
")",
":",
"# Scrollbars will be restored after the expanded state\r",
"self",
".",
"_scrollbar_positions",
"=",
"position",
"if",
"self",
".",
"_to_be_loaded",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"_to_be_loaded",
")",
"==",
"0",
":",
"self",
".",
"restore_scrollbar_positions",
"(",
")"
] | Set scrollbar positions | [
"Set",
"scrollbar",
"positions"
] | python | train |
python/performance | performance/benchmarks/bm_mdp.py | https://github.com/python/performance/blob/2a9524c0a5714e85106671bc61d750e800fe17db/performance/benchmarks/bm_mdp.py#L9-L33 | def topoSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node, 0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0:
# before recursing
if current not in visited:
visited.add(current)
stack.append((current, 1))
stack.extend((parent, 0) for parent in getParents(current))
else:
# after recursing
assert(current in visited)
results.append(current)
return results | [
"def",
"topoSort",
"(",
"roots",
",",
"getParents",
")",
":",
"results",
"=",
"[",
"]",
"visited",
"=",
"set",
"(",
")",
"# Use iterative version to avoid stack limits for large datasets",
"stack",
"=",
"[",
"(",
"node",
",",
"0",
")",
"for",
"node",
"in",
"roots",
"]",
"while",
"stack",
":",
"current",
",",
"state",
"=",
"stack",
".",
"pop",
"(",
")",
"if",
"state",
"==",
"0",
":",
"# before recursing",
"if",
"current",
"not",
"in",
"visited",
":",
"visited",
".",
"add",
"(",
"current",
")",
"stack",
".",
"append",
"(",
"(",
"current",
",",
"1",
")",
")",
"stack",
".",
"extend",
"(",
"(",
"parent",
",",
"0",
")",
"for",
"parent",
"in",
"getParents",
"(",
"current",
")",
")",
"else",
":",
"# after recursing",
"assert",
"(",
"current",
"in",
"visited",
")",
"results",
".",
"append",
"(",
"current",
")",
"return",
"results"
] | Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node | [
"Return",
"a",
"topological",
"sorting",
"of",
"nodes",
"in",
"a",
"graph",
"."
] | python | test |
bxlab/bx-python | lib/bx/intervals/random_intervals.py | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/intervals/random_intervals.py#L98-L212 | def throw_random_private( lengths, regions, save_interval_func, allow_overlap=False, three_args=True ):
"""
(Internal function; we expect calls only through the interface functions
above)
`lengths`: A list containing the length of each interval to be generated.
`regions`: A list of regions in which intervals can be placed, sorted by
decreasing length. Elements are triples of the form (length,
start, extra), This list CAN BE MODIFIED by this function.
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,extra) for each generated
interval.
"""
# Implementation:
# We keep a list of the regions, sorted from largest to smallest. We then
# place each length by following steps:
# (1) construct a candidate counts array (cc array)
# (2) choose a candidate at random
# (3) find region containing that candidate
# (4) map candidate to position in that region
# (5) split region if not allowing overlaps
# (6) report placed segment
#
# The cc array is only constructed if there's a change (different length
# to place, or the region list has changed). It contains, for each
# region, the total number of number of candidate positions in regions
# *preceding* it in the region list:
# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1
# where N is the number of regions and L is the length being thrown.
# At the same time, we determine the total number of candidates (the total
# number of places the current length can be placed) and the index range
# of regions into which the length will fit.
#
# example:
# for L = 20
# i = 0 1 2 3 4 5 6 7 8 9
# length[i] = 96 66 56 50 48 40 29 17 11 8
# cc[i] = 0 77 124 161 192 221 242 X X X
# candidates = 252
# lo_rgn = 0
# hi_rgn = 6
#
# The candidate is chosen in (0..candidates-1). The candidate counts
# array allows us to do a binary search to locate the region that holds that
# candidate. Continuing the example above, we choose a random candidate
# s in (0..251). If s happens to be in (124..160), it will be mapped to
# region 2 at start position s-124.
#
# During the binary search, if we are looking at region 3, if s < cc[3]
# then the desired region is region 2 or lower. Otherwise it is region 3 or
# higher.
min_length = min( lengths )
prev_length = None # (force initial cc array construction)
cc = [0] * (len( regions ) + len(lengths) - 1)
num_thrown = 0
for length in lengths:
# construct cc array (only needed if length has changed or region list has
# changed)
if length != prev_length:
prev_length = length
assert len( cc ) >= len( regions )
candidates = 0
hi_rgn = 0
for region in regions:
rgn_len = region[0]
if rgn_len < length:
break
cc[hi_rgn] = candidates
candidates += rgn_len - length + 1
hi_rgn += 1
if candidates == 0:
raise MaxtriesException( "No region can fit an interval of length %d (we threw %d of %d)" \
% ( length, num_thrown,len( lengths ) ) )
hi_rgn -= 1
# Select a candidate
s = random.randrange( candidates )
#..
#..for ix in range( len( regions ) ):
#.. region = regions[ix]
#.. if ix <= hi_rgn: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], cc[ix] )
#.. else: print "%2s: %5s %5s %5s" % ( ix, region[1], region[0], "X" )
#..print "s = %s (of %s candidates)" % ( s, candidates )
# Locate region containing that candidate, by binary search
lo = 0
hi = hi_rgn
while hi > lo:
mid = (lo + hi + 1) / 2 # (we round up to prevent infinite loop)
if s < cc[mid]: hi = mid-1 # (s < num candidates from 0..mid-1)
else: lo = mid # (s >= num candidates from 0..mid-1)
s -= cc[lo]
# If we are not allowing overlaps we will remove the placed interval
# from the region list
if allow_overlap:
rgn_length, rgn_start, rgn_extra = regions[lo]
else:
# Remove the chosen region and split
rgn_length, rgn_start, rgn_extra = regions.pop( lo )
rgn_end = rgn_start + rgn_length
assert s >= 0
assert rgn_start + s + length <= rgn_end, "Expected: %d + %d + %d == %d <= %d" % ( rgn_start, s, length, rgn_start + s + length, rgn_end )
regions.reverse()
if s >= min_length:
bisect.insort( regions, ( s, rgn_start, rgn_extra ) )
if s + length <= rgn_length - min_length:
bisect.insort( regions, ( rgn_length - ( s + length ), rgn_start + s + length, rgn_extra ) )
regions.reverse()
prev_length = None # (force cc array construction)
# Save the new interval
if (three_args):
save_interval_func( rgn_start + s, rgn_start + s + length, rgn_extra )
else:
save_interval_func( rgn_start + s, rgn_start + s + length )
num_thrown += 1 | [
"def",
"throw_random_private",
"(",
"lengths",
",",
"regions",
",",
"save_interval_func",
",",
"allow_overlap",
"=",
"False",
",",
"three_args",
"=",
"True",
")",
":",
"# Implementation:",
"# We keep a list of the regions, sorted from largest to smallest. We then",
"# place each length by following steps:",
"# (1) construct a candidate counts array (cc array)",
"# (2) choose a candidate at random",
"# (3) find region containing that candidate",
"# (4) map candidate to position in that region",
"# (5) split region if not allowing overlaps",
"# (6) report placed segment",
"#",
"# The cc array is only constructed if there's a change (different length",
"# to place, or the region list has changed). It contains, for each",
"# region, the total number of number of candidate positions in regions",
"# *preceding* it in the region list:",
"# cc[i] = sum over k in 0..(i-1) of length[i] - L + 1",
"# where N is the number of regions and L is the length being thrown.",
"# At the same time, we determine the total number of candidates (the total",
"# number of places the current length can be placed) and the index range",
"# of regions into which the length will fit.",
"#",
"# example:",
"# for L = 20",
"# i = 0 1 2 3 4 5 6 7 8 9",
"# length[i] = 96 66 56 50 48 40 29 17 11 8",
"# cc[i] = 0 77 124 161 192 221 242 X X X",
"# candidates = 252",
"# lo_rgn = 0",
"# hi_rgn = 6",
"#",
"# The candidate is chosen in (0..candidates-1). The candidate counts",
"# array allows us to do a binary search to locate the region that holds that",
"# candidate. Continuing the example above, we choose a random candidate",
"# s in (0..251). If s happens to be in (124..160), it will be mapped to",
"# region 2 at start position s-124.",
"#",
"# During the binary search, if we are looking at region 3, if s < cc[3]",
"# then the desired region is region 2 or lower. Otherwise it is region 3 or",
"# higher.",
"min_length",
"=",
"min",
"(",
"lengths",
")",
"prev_length",
"=",
"None",
"# (force initial cc array construction)",
"cc",
"=",
"[",
"0",
"]",
"*",
"(",
"len",
"(",
"regions",
")",
"+",
"len",
"(",
"lengths",
")",
"-",
"1",
")",
"num_thrown",
"=",
"0",
"for",
"length",
"in",
"lengths",
":",
"# construct cc array (only needed if length has changed or region list has",
"# changed)",
"if",
"length",
"!=",
"prev_length",
":",
"prev_length",
"=",
"length",
"assert",
"len",
"(",
"cc",
")",
">=",
"len",
"(",
"regions",
")",
"candidates",
"=",
"0",
"hi_rgn",
"=",
"0",
"for",
"region",
"in",
"regions",
":",
"rgn_len",
"=",
"region",
"[",
"0",
"]",
"if",
"rgn_len",
"<",
"length",
":",
"break",
"cc",
"[",
"hi_rgn",
"]",
"=",
"candidates",
"candidates",
"+=",
"rgn_len",
"-",
"length",
"+",
"1",
"hi_rgn",
"+=",
"1",
"if",
"candidates",
"==",
"0",
":",
"raise",
"MaxtriesException",
"(",
"\"No region can fit an interval of length %d (we threw %d of %d)\"",
"%",
"(",
"length",
",",
"num_thrown",
",",
"len",
"(",
"lengths",
")",
")",
")",
"hi_rgn",
"-=",
"1",
"# Select a candidate",
"s",
"=",
"random",
".",
"randrange",
"(",
"candidates",
")",
"#..",
"#..for ix in range( len( regions ) ):",
"#.. region = regions[ix]",
"#.. if ix <= hi_rgn: print \"%2s: %5s %5s %5s\" % ( ix, region[1], region[0], cc[ix] )",
"#.. else: print \"%2s: %5s %5s %5s\" % ( ix, region[1], region[0], \"X\" )",
"#..print \"s = %s (of %s candidates)\" % ( s, candidates )",
"# Locate region containing that candidate, by binary search",
"lo",
"=",
"0",
"hi",
"=",
"hi_rgn",
"while",
"hi",
">",
"lo",
":",
"mid",
"=",
"(",
"lo",
"+",
"hi",
"+",
"1",
")",
"/",
"2",
"# (we round up to prevent infinite loop)",
"if",
"s",
"<",
"cc",
"[",
"mid",
"]",
":",
"hi",
"=",
"mid",
"-",
"1",
"# (s < num candidates from 0..mid-1)",
"else",
":",
"lo",
"=",
"mid",
"# (s >= num candidates from 0..mid-1)",
"s",
"-=",
"cc",
"[",
"lo",
"]",
"# If we are not allowing overlaps we will remove the placed interval",
"# from the region list",
"if",
"allow_overlap",
":",
"rgn_length",
",",
"rgn_start",
",",
"rgn_extra",
"=",
"regions",
"[",
"lo",
"]",
"else",
":",
"# Remove the chosen region and split",
"rgn_length",
",",
"rgn_start",
",",
"rgn_extra",
"=",
"regions",
".",
"pop",
"(",
"lo",
")",
"rgn_end",
"=",
"rgn_start",
"+",
"rgn_length",
"assert",
"s",
">=",
"0",
"assert",
"rgn_start",
"+",
"s",
"+",
"length",
"<=",
"rgn_end",
",",
"\"Expected: %d + %d + %d == %d <= %d\"",
"%",
"(",
"rgn_start",
",",
"s",
",",
"length",
",",
"rgn_start",
"+",
"s",
"+",
"length",
",",
"rgn_end",
")",
"regions",
".",
"reverse",
"(",
")",
"if",
"s",
">=",
"min_length",
":",
"bisect",
".",
"insort",
"(",
"regions",
",",
"(",
"s",
",",
"rgn_start",
",",
"rgn_extra",
")",
")",
"if",
"s",
"+",
"length",
"<=",
"rgn_length",
"-",
"min_length",
":",
"bisect",
".",
"insort",
"(",
"regions",
",",
"(",
"rgn_length",
"-",
"(",
"s",
"+",
"length",
")",
",",
"rgn_start",
"+",
"s",
"+",
"length",
",",
"rgn_extra",
")",
")",
"regions",
".",
"reverse",
"(",
")",
"prev_length",
"=",
"None",
"# (force cc array construction)",
"# Save the new interval",
"if",
"(",
"three_args",
")",
":",
"save_interval_func",
"(",
"rgn_start",
"+",
"s",
",",
"rgn_start",
"+",
"s",
"+",
"length",
",",
"rgn_extra",
")",
"else",
":",
"save_interval_func",
"(",
"rgn_start",
"+",
"s",
",",
"rgn_start",
"+",
"s",
"+",
"length",
")",
"num_thrown",
"+=",
"1"
] | (Internal function; we expect calls only through the interface functions
above)
`lengths`: A list containing the length of each interval to be generated.
`regions`: A list of regions in which intervals can be placed, sorted by
decreasing length. Elements are triples of the form (length,
start, extra), This list CAN BE MODIFIED by this function.
`save_interval_func`: A function accepting three arguments which will be
passed the (start,stop,extra) for each generated
interval. | [
"(",
"Internal",
"function",
";",
"we",
"expect",
"calls",
"only",
"through",
"the",
"interface",
"functions",
"above",
")",
"lengths",
":",
"A",
"list",
"containing",
"the",
"length",
"of",
"each",
"interval",
"to",
"be",
"generated",
".",
"regions",
":",
"A",
"list",
"of",
"regions",
"in",
"which",
"intervals",
"can",
"be",
"placed",
"sorted",
"by",
"decreasing",
"length",
".",
"Elements",
"are",
"triples",
"of",
"the",
"form",
"(",
"length",
"start",
"extra",
")",
"This",
"list",
"CAN",
"BE",
"MODIFIED",
"by",
"this",
"function",
".",
"save_interval_func",
":",
"A",
"function",
"accepting",
"three",
"arguments",
"which",
"will",
"be",
"passed",
"the",
"(",
"start",
"stop",
"extra",
")",
"for",
"each",
"generated",
"interval",
"."
] | python | train |
LucidtechAI/las-sdk-python | las/api_client.py | https://github.com/LucidtechAI/las-sdk-python/blob/5f39dee7983baff28a1deb93c12d36414d835d12/las/api_client.py#L30-L57 | def predict(self, document_path: str, model_name: str, consent_id: str = None) -> Prediction:
"""Run inference and create prediction on document.
This method takes care of creating and uploading a document specified by document_path.
as well as running inference using model specified by model_name to create prediction on the document.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> api_client.predict(document_path='document.jpeg', model_name='invoice')
:param document_path: Path to document to run inference on
:type document_path: str
:param model_name: The name of the model to use for inference
:type model_name: str
:param consent_id: An identifier to mark the owner of the document handle
:type consent_id: str
:return: Prediction on document
:rtype: Prediction
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
content_type = self._get_content_type(document_path)
consent_id = consent_id or str(uuid4())
document_id = self._upload_document(document_path, content_type, consent_id)
prediction_response = self.post_predictions(document_id, model_name)
return Prediction(document_id, consent_id, model_name, prediction_response) | [
"def",
"predict",
"(",
"self",
",",
"document_path",
":",
"str",
",",
"model_name",
":",
"str",
",",
"consent_id",
":",
"str",
"=",
"None",
")",
"->",
"Prediction",
":",
"content_type",
"=",
"self",
".",
"_get_content_type",
"(",
"document_path",
")",
"consent_id",
"=",
"consent_id",
"or",
"str",
"(",
"uuid4",
"(",
")",
")",
"document_id",
"=",
"self",
".",
"_upload_document",
"(",
"document_path",
",",
"content_type",
",",
"consent_id",
")",
"prediction_response",
"=",
"self",
".",
"post_predictions",
"(",
"document_id",
",",
"model_name",
")",
"return",
"Prediction",
"(",
"document_id",
",",
"consent_id",
",",
"model_name",
",",
"prediction_response",
")"
] | Run inference and create prediction on document.
This method takes care of creating and uploading a document specified by document_path.
as well as running inference using model specified by model_name to create prediction on the document.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> api_client.predict(document_path='document.jpeg', model_name='invoice')
:param document_path: Path to document to run inference on
:type document_path: str
:param model_name: The name of the model to use for inference
:type model_name: str
:param consent_id: An identifier to mark the owner of the document handle
:type consent_id: str
:return: Prediction on document
:rtype: Prediction
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests | [
"Run",
"inference",
"and",
"create",
"prediction",
"on",
"document",
".",
"This",
"method",
"takes",
"care",
"of",
"creating",
"and",
"uploading",
"a",
"document",
"specified",
"by",
"document_path",
".",
"as",
"well",
"as",
"running",
"inference",
"using",
"model",
"specified",
"by",
"model_name",
"to",
"create",
"prediction",
"on",
"the",
"document",
"."
] | python | train |
sentinel-hub/sentinelhub-py | sentinelhub/geopedia.py | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geopedia.py#L220-L228 | def get_request(self, request):
"""Get a list of DownloadRequests for all data that are under the given field in the table of a Geopedia layer.
:return: list of items which have to be downloaded
:rtype: list(DownloadRequest)
"""
request.layer = self._parse_layer(request.layer, return_wms_name=True)
return super().get_request(request) | [
"def",
"get_request",
"(",
"self",
",",
"request",
")",
":",
"request",
".",
"layer",
"=",
"self",
".",
"_parse_layer",
"(",
"request",
".",
"layer",
",",
"return_wms_name",
"=",
"True",
")",
"return",
"super",
"(",
")",
".",
"get_request",
"(",
"request",
")"
] | Get a list of DownloadRequests for all data that are under the given field in the table of a Geopedia layer.
:return: list of items which have to be downloaded
:rtype: list(DownloadRequest) | [
"Get",
"a",
"list",
"of",
"DownloadRequests",
"for",
"all",
"data",
"that",
"are",
"under",
"the",
"given",
"field",
"in",
"the",
"table",
"of",
"a",
"Geopedia",
"layer",
"."
] | python | train |
saltant-org/saltant-py | saltant/models/resource.py | https://github.com/saltant-org/saltant-py/blob/bf3bdbc4ec9c772c7f621f8bd6a76c5932af68be/saltant/models/resource.py#L194-L220 | def validate_request_success(
response_text, request_url, status_code, expected_status_code
):
"""Validates that a request was successful.
Args:
response_text (str): The response body of the request.
request_url (str): The URL the request was made at.
status_code (int): The status code of the response.
expected_status_code (int): The expected status code of the
response.
Raises:
:class:`saltant.exceptions.BadHttpRequestError`: The HTTP
request failed.
"""
try:
assert status_code == expected_status_code
except AssertionError:
msg = (
"Request to {url} failed with status {status_code}:\n"
"The reponse from the request was as follows:\n\n"
"{content}"
).format(
url=request_url, status_code=status_code, content=response_text
)
raise BadHttpRequestError(msg) | [
"def",
"validate_request_success",
"(",
"response_text",
",",
"request_url",
",",
"status_code",
",",
"expected_status_code",
")",
":",
"try",
":",
"assert",
"status_code",
"==",
"expected_status_code",
"except",
"AssertionError",
":",
"msg",
"=",
"(",
"\"Request to {url} failed with status {status_code}:\\n\"",
"\"The reponse from the request was as follows:\\n\\n\"",
"\"{content}\"",
")",
".",
"format",
"(",
"url",
"=",
"request_url",
",",
"status_code",
"=",
"status_code",
",",
"content",
"=",
"response_text",
")",
"raise",
"BadHttpRequestError",
"(",
"msg",
")"
] | Validates that a request was successful.
Args:
response_text (str): The response body of the request.
request_url (str): The URL the request was made at.
status_code (int): The status code of the response.
expected_status_code (int): The expected status code of the
response.
Raises:
:class:`saltant.exceptions.BadHttpRequestError`: The HTTP
request failed. | [
"Validates",
"that",
"a",
"request",
"was",
"successful",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xganttwidget/xganttwidget.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidget.py#L255-L266 | def centerOnDateTime(self, dtime):
"""
Centers the view on a given datetime for the gantt widget.
:param dtime | <QDateTime>
"""
view = self.uiGanttVIEW
scene = view.scene()
point = view.mapToScene(0, 0)
x = scene.datetimeXPos(dtime)
y = point.y()
view.centerOn(x, y) | [
"def",
"centerOnDateTime",
"(",
"self",
",",
"dtime",
")",
":",
"view",
"=",
"self",
".",
"uiGanttVIEW",
"scene",
"=",
"view",
".",
"scene",
"(",
")",
"point",
"=",
"view",
".",
"mapToScene",
"(",
"0",
",",
"0",
")",
"x",
"=",
"scene",
".",
"datetimeXPos",
"(",
"dtime",
")",
"y",
"=",
"point",
".",
"y",
"(",
")",
"view",
".",
"centerOn",
"(",
"x",
",",
"y",
")"
] | Centers the view on a given datetime for the gantt widget.
:param dtime | <QDateTime> | [
"Centers",
"the",
"view",
"on",
"a",
"given",
"datetime",
"for",
"the",
"gantt",
"widget",
".",
":",
"param",
"dtime",
"|",
"<QDateTime",
">"
] | python | train |
sonyxperiadev/pygerrit | pygerrit/events.py | https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/events.py#L40-L56 | def register(cls, name):
""" Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
"""
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError("Duplicate event: %s" % name)
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate | [
"def",
"register",
"(",
"cls",
",",
"name",
")",
":",
"def",
"decorate",
"(",
"klazz",
")",
":",
"\"\"\" Decorator. \"\"\"",
"if",
"name",
"in",
"cls",
".",
"_events",
":",
"raise",
"GerritError",
"(",
"\"Duplicate event: %s\"",
"%",
"name",
")",
"cls",
".",
"_events",
"[",
"name",
"]",
"=",
"[",
"klazz",
".",
"__module__",
",",
"klazz",
".",
"__name__",
"]",
"klazz",
".",
"name",
"=",
"name",
"return",
"klazz",
"return",
"decorate"
] | Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered. | [
"Decorator",
"to",
"register",
"the",
"event",
"identified",
"by",
"name",
"."
] | python | train |
polyaxon/hestia | hestia/date_formatter.py | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/date_formatter.py#L84-L96 | def extract_datetime_hour(cls, datetime_str):
"""
Tries to extract a `datetime` object from the given string, including only hours.
Raises `DateTimeFormatterException` if the extraction fails.
"""
if not datetime_str:
raise DateTimeFormatterException('datetime_str must a valid string')
try:
return cls._extract_timestamp(datetime_str, cls.DATETIME_HOUR_FORMAT)
except (TypeError, ValueError):
raise DateTimeFormatterException('Invalid datetime string {}.'.format(datetime_str)) | [
"def",
"extract_datetime_hour",
"(",
"cls",
",",
"datetime_str",
")",
":",
"if",
"not",
"datetime_str",
":",
"raise",
"DateTimeFormatterException",
"(",
"'datetime_str must a valid string'",
")",
"try",
":",
"return",
"cls",
".",
"_extract_timestamp",
"(",
"datetime_str",
",",
"cls",
".",
"DATETIME_HOUR_FORMAT",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"DateTimeFormatterException",
"(",
"'Invalid datetime string {}.'",
".",
"format",
"(",
"datetime_str",
")",
")"
] | Tries to extract a `datetime` object from the given string, including only hours.
Raises `DateTimeFormatterException` if the extraction fails. | [
"Tries",
"to",
"extract",
"a",
"datetime",
"object",
"from",
"the",
"given",
"string",
"including",
"only",
"hours",
"."
] | python | train |
jaraco/rst.linker | rst/linker.py | https://github.com/jaraco/rst.linker/blob/5d0ff09756c325c46c471c217bdefcfd11ce1de4/rst/linker.py#L142-L153 | def from_definition(cls, defn, names={}):
"""
A definition may contain the following members:
- using: a dictionary of variables available for substitution
- replace: a list of replacement definitions.
"""
repls = map(Repl.from_defn, defn.get('replace', []))
self = cls(repls)
vars(self).update(names)
vars(self).update(defn.get('using', {}))
return self | [
"def",
"from_definition",
"(",
"cls",
",",
"defn",
",",
"names",
"=",
"{",
"}",
")",
":",
"repls",
"=",
"map",
"(",
"Repl",
".",
"from_defn",
",",
"defn",
".",
"get",
"(",
"'replace'",
",",
"[",
"]",
")",
")",
"self",
"=",
"cls",
"(",
"repls",
")",
"vars",
"(",
"self",
")",
".",
"update",
"(",
"names",
")",
"vars",
"(",
"self",
")",
".",
"update",
"(",
"defn",
".",
"get",
"(",
"'using'",
",",
"{",
"}",
")",
")",
"return",
"self"
] | A definition may contain the following members:
- using: a dictionary of variables available for substitution
- replace: a list of replacement definitions. | [
"A",
"definition",
"may",
"contain",
"the",
"following",
"members",
":"
] | python | train |
maartenbreddels/ipyvolume | ipyvolume/examples.py | https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/examples.py#L161-L229 | def brain(
draw=True, show=True, fiducial=True, flat=True, inflated=True, subject='S1', interval=1000, uv=True, color=None
):
"""Show a human brain model.
Requirement:
$ pip install https://github.com/gallantlab/pycortex
"""
import ipyvolume as ipv
try:
import cortex
except:
warnings.warn("it seems pycortex is not installed, which is needed for this example")
raise
xlist, ylist, zlist = [], [], []
polys_list = []
def add(pts, polys):
xlist.append(pts[:, 0])
ylist.append(pts[:, 1])
zlist.append(pts[:, 2])
polys_list.append(polys)
def n(x):
return (x - x.min()) / x.ptp()
if fiducial or color is True:
pts, polys = cortex.db.get_surf('S1', 'fiducial', merge=True)
x, y, z = pts.T
r = n(x)
g = n(y)
b = n(z)
if color is True:
color = np.array([r, g, b]).T.copy()
else:
color = None
if fiducial:
add(pts, polys)
else:
if color is False:
color = None
if inflated:
add(*cortex.db.get_surf('S1', 'inflated', merge=True, nudge=True))
u = v = None
if flat or uv:
pts, polys = cortex.db.get_surf('S1', 'flat', merge=True, nudge=True)
x, y, z = pts.T
u = n(x)
v = n(y)
if flat:
add(pts, polys)
polys_list.sort(key=lambda x: len(x))
polys = polys_list[0]
if draw:
if color is None:
mesh = ipv.plot_trisurf(xlist, ylist, zlist, polys, u=u, v=v)
else:
mesh = ipv.plot_trisurf(xlist, ylist, zlist, polys, color=color, u=u, v=v)
if show:
if len(x) > 1:
ipv.animation_control(mesh, interval=interval)
ipv.squarelim()
ipv.show()
return mesh
else:
return xlist, ylist, zlist, polys | [
"def",
"brain",
"(",
"draw",
"=",
"True",
",",
"show",
"=",
"True",
",",
"fiducial",
"=",
"True",
",",
"flat",
"=",
"True",
",",
"inflated",
"=",
"True",
",",
"subject",
"=",
"'S1'",
",",
"interval",
"=",
"1000",
",",
"uv",
"=",
"True",
",",
"color",
"=",
"None",
")",
":",
"import",
"ipyvolume",
"as",
"ipv",
"try",
":",
"import",
"cortex",
"except",
":",
"warnings",
".",
"warn",
"(",
"\"it seems pycortex is not installed, which is needed for this example\"",
")",
"raise",
"xlist",
",",
"ylist",
",",
"zlist",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"polys_list",
"=",
"[",
"]",
"def",
"add",
"(",
"pts",
",",
"polys",
")",
":",
"xlist",
".",
"append",
"(",
"pts",
"[",
":",
",",
"0",
"]",
")",
"ylist",
".",
"append",
"(",
"pts",
"[",
":",
",",
"1",
"]",
")",
"zlist",
".",
"append",
"(",
"pts",
"[",
":",
",",
"2",
"]",
")",
"polys_list",
".",
"append",
"(",
"polys",
")",
"def",
"n",
"(",
"x",
")",
":",
"return",
"(",
"x",
"-",
"x",
".",
"min",
"(",
")",
")",
"/",
"x",
".",
"ptp",
"(",
")",
"if",
"fiducial",
"or",
"color",
"is",
"True",
":",
"pts",
",",
"polys",
"=",
"cortex",
".",
"db",
".",
"get_surf",
"(",
"'S1'",
",",
"'fiducial'",
",",
"merge",
"=",
"True",
")",
"x",
",",
"y",
",",
"z",
"=",
"pts",
".",
"T",
"r",
"=",
"n",
"(",
"x",
")",
"g",
"=",
"n",
"(",
"y",
")",
"b",
"=",
"n",
"(",
"z",
")",
"if",
"color",
"is",
"True",
":",
"color",
"=",
"np",
".",
"array",
"(",
"[",
"r",
",",
"g",
",",
"b",
"]",
")",
".",
"T",
".",
"copy",
"(",
")",
"else",
":",
"color",
"=",
"None",
"if",
"fiducial",
":",
"add",
"(",
"pts",
",",
"polys",
")",
"else",
":",
"if",
"color",
"is",
"False",
":",
"color",
"=",
"None",
"if",
"inflated",
":",
"add",
"(",
"*",
"cortex",
".",
"db",
".",
"get_surf",
"(",
"'S1'",
",",
"'inflated'",
",",
"merge",
"=",
"True",
",",
"nudge",
"=",
"True",
")",
")",
"u",
"=",
"v",
"=",
"None",
"if",
"flat",
"or",
"uv",
":",
"pts",
",",
"polys",
"=",
"cortex",
".",
"db",
".",
"get_surf",
"(",
"'S1'",
",",
"'flat'",
",",
"merge",
"=",
"True",
",",
"nudge",
"=",
"True",
")",
"x",
",",
"y",
",",
"z",
"=",
"pts",
".",
"T",
"u",
"=",
"n",
"(",
"x",
")",
"v",
"=",
"n",
"(",
"y",
")",
"if",
"flat",
":",
"add",
"(",
"pts",
",",
"polys",
")",
"polys_list",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
")",
"polys",
"=",
"polys_list",
"[",
"0",
"]",
"if",
"draw",
":",
"if",
"color",
"is",
"None",
":",
"mesh",
"=",
"ipv",
".",
"plot_trisurf",
"(",
"xlist",
",",
"ylist",
",",
"zlist",
",",
"polys",
",",
"u",
"=",
"u",
",",
"v",
"=",
"v",
")",
"else",
":",
"mesh",
"=",
"ipv",
".",
"plot_trisurf",
"(",
"xlist",
",",
"ylist",
",",
"zlist",
",",
"polys",
",",
"color",
"=",
"color",
",",
"u",
"=",
"u",
",",
"v",
"=",
"v",
")",
"if",
"show",
":",
"if",
"len",
"(",
"x",
")",
">",
"1",
":",
"ipv",
".",
"animation_control",
"(",
"mesh",
",",
"interval",
"=",
"interval",
")",
"ipv",
".",
"squarelim",
"(",
")",
"ipv",
".",
"show",
"(",
")",
"return",
"mesh",
"else",
":",
"return",
"xlist",
",",
"ylist",
",",
"zlist",
",",
"polys"
] | Show a human brain model.
Requirement:
$ pip install https://github.com/gallantlab/pycortex | [
"Show",
"a",
"human",
"brain",
"model",
"."
] | python | train |
Parsl/parsl | parsl/providers/aws/aws.py | https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/providers/aws/aws.py#L207-L220 | def write_state_file(self):
"""Save information that must persist to a file.
We do not want to create a new VPC and new identical security groups, so we save
information about them in a file between runs.
"""
fh = open('awsproviderstate.json', 'w')
state = {}
state['vpcID'] = self.vpc_id
state['sgID'] = self.sg_id
state['snIDs'] = self.sn_ids
state['instances'] = self.instances
state["instanceState"] = self.instance_states
fh.write(json.dumps(state, indent=4)) | [
"def",
"write_state_file",
"(",
"self",
")",
":",
"fh",
"=",
"open",
"(",
"'awsproviderstate.json'",
",",
"'w'",
")",
"state",
"=",
"{",
"}",
"state",
"[",
"'vpcID'",
"]",
"=",
"self",
".",
"vpc_id",
"state",
"[",
"'sgID'",
"]",
"=",
"self",
".",
"sg_id",
"state",
"[",
"'snIDs'",
"]",
"=",
"self",
".",
"sn_ids",
"state",
"[",
"'instances'",
"]",
"=",
"self",
".",
"instances",
"state",
"[",
"\"instanceState\"",
"]",
"=",
"self",
".",
"instance_states",
"fh",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"state",
",",
"indent",
"=",
"4",
")",
")"
] | Save information that must persist to a file.
We do not want to create a new VPC and new identical security groups, so we save
information about them in a file between runs. | [
"Save",
"information",
"that",
"must",
"persist",
"to",
"a",
"file",
"."
] | python | valid |
edoburu/sphinxcontrib-django | sphinxcontrib_django/docstrings.py | https://github.com/edoburu/sphinxcontrib-django/blob/5116ac7f1510a76b1ff58cf7f8d2fab7d8bbe2a9/sphinxcontrib_django/docstrings.py#L88-L110 | def improve_model_docstring(app, what, name, obj, options, lines):
"""Hook that improves the autodoc docstrings for Django models.
:type app: sphinx.application.Sphinx
:param what: The parent type, ``class`` or ``module``
:type what: str
:param name: The dotted path to the child method/attribute.
:type name: str
:param obj: The Python object that i s being documented.
:param options: The current autodoc settings.
:type options: dict
:param lines: The current documentation lines
:type lines: list
"""
if what == 'class':
_improve_class_docs(app, obj, lines)
elif what == 'attribute':
_improve_attribute_docs(obj, name, lines)
elif what == 'method':
_improve_method_docs(obj, name, lines)
# Return the extended docstring
return lines | [
"def",
"improve_model_docstring",
"(",
"app",
",",
"what",
",",
"name",
",",
"obj",
",",
"options",
",",
"lines",
")",
":",
"if",
"what",
"==",
"'class'",
":",
"_improve_class_docs",
"(",
"app",
",",
"obj",
",",
"lines",
")",
"elif",
"what",
"==",
"'attribute'",
":",
"_improve_attribute_docs",
"(",
"obj",
",",
"name",
",",
"lines",
")",
"elif",
"what",
"==",
"'method'",
":",
"_improve_method_docs",
"(",
"obj",
",",
"name",
",",
"lines",
")",
"# Return the extended docstring",
"return",
"lines"
] | Hook that improves the autodoc docstrings for Django models.
:type app: sphinx.application.Sphinx
:param what: The parent type, ``class`` or ``module``
:type what: str
:param name: The dotted path to the child method/attribute.
:type name: str
:param obj: The Python object that i s being documented.
:param options: The current autodoc settings.
:type options: dict
:param lines: The current documentation lines
:type lines: list | [
"Hook",
"that",
"improves",
"the",
"autodoc",
"docstrings",
"for",
"Django",
"models",
"."
] | python | train |
singularityhub/singularity-cli | spython/main/parse/recipe.py | https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/recipe.py#L287-L305 | def _write_script(path, lines, chmod=True):
'''write a script with some lines content to path in the image. This
is done by way of adding echo statements to the install section.
Parameters
==========
path: the path to the file to write
lines: the lines to echo to the file
chmod: If true, change permission to make u+x
'''
if len(lines) > 0:
lastline = lines.pop()
for line in lines:
self.install.append('echo "%s" >> %s' %path)
self.install.append(lastline)
if chmod is True:
self.install.append('chmod u+x %s' %path) | [
"def",
"_write_script",
"(",
"path",
",",
"lines",
",",
"chmod",
"=",
"True",
")",
":",
"if",
"len",
"(",
"lines",
")",
">",
"0",
":",
"lastline",
"=",
"lines",
".",
"pop",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"self",
".",
"install",
".",
"append",
"(",
"'echo \"%s\" >> %s'",
"%",
"path",
")",
"self",
".",
"install",
".",
"append",
"(",
"lastline",
")",
"if",
"chmod",
"is",
"True",
":",
"self",
".",
"install",
".",
"append",
"(",
"'chmod u+x %s'",
"%",
"path",
")"
] | write a script with some lines content to path in the image. This
is done by way of adding echo statements to the install section.
Parameters
==========
path: the path to the file to write
lines: the lines to echo to the file
chmod: If true, change permission to make u+x | [
"write",
"a",
"script",
"with",
"some",
"lines",
"content",
"to",
"path",
"in",
"the",
"image",
".",
"This",
"is",
"done",
"by",
"way",
"of",
"adding",
"echo",
"statements",
"to",
"the",
"install",
"section",
"."
] | python | train |
nschloe/matplotlib2tikz | matplotlib2tikz/save.py | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L337-L415 | def _recurse(data, obj):
"""Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those.
"""
content = _ContentManager()
for child in obj.get_children():
# Some patches are Spines, too; skip those entirely.
# See <https://github.com/nschloe/matplotlib2tikz/issues/277>.
if isinstance(child, mpl.spines.Spine):
continue
if isinstance(child, mpl.axes.Axes):
ax = axes.Axes(data, child)
if ax.is_colorbar:
continue
# add extra axis options
if data["extra axis options [base]"]:
ax.axis_options.extend(data["extra axis options [base]"])
data["current mpl axes obj"] = child
data["current axes"] = ax
# Run through the child objects, gather the content.
data, children_content = _recurse(data, child)
# populate content and add axis environment if desired
if data["add axis environment"]:
content.extend(
ax.get_begin_code() + children_content + [ax.get_end_code(data)], 0
)
else:
content.extend(children_content, 0)
# print axis environment options, if told to show infos
if data["show_info"]:
print("=========================================================")
print("These would have been the properties of the environment:")
print("".join(ax.get_begin_code()[1:]))
print("=========================================================")
elif isinstance(child, mpl.lines.Line2D):
data, cont = line2d.draw_line2d(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.image.AxesImage):
data, cont = img.draw_image(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.patches.Patch):
data, cont = patch.draw_patch(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(
child, (mpl.collections.PatchCollection, mpl.collections.PolyCollection)
):
data, cont = patch.draw_patchcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.PathCollection):
data, cont = path.draw_pathcollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.LineCollection):
data, cont = line2d.draw_linecollection(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.collections.QuadMesh):
data, cont = qmsh.draw_quadmesh(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, mpl.legend.Legend):
data = legend.draw_legend(data, child)
if data["legend colors"]:
content.extend(data["legend colors"], 0)
elif isinstance(child, (mpl.text.Text, mpl.text.Annotation)):
data, cont = text.draw_text(data, child)
content.extend(cont, child.get_zorder())
elif isinstance(child, (mpl.axis.XAxis, mpl.axis.YAxis)):
pass
else:
warnings.warn(
"matplotlib2tikz: Don't know how to handle object {}.".format(
type(child)
)
)
return data, content.flatten() | [
"def",
"_recurse",
"(",
"data",
",",
"obj",
")",
":",
"content",
"=",
"_ContentManager",
"(",
")",
"for",
"child",
"in",
"obj",
".",
"get_children",
"(",
")",
":",
"# Some patches are Spines, too; skip those entirely.",
"# See <https://github.com/nschloe/matplotlib2tikz/issues/277>.",
"if",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"spines",
".",
"Spine",
")",
":",
"continue",
"if",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"axes",
".",
"Axes",
")",
":",
"ax",
"=",
"axes",
".",
"Axes",
"(",
"data",
",",
"child",
")",
"if",
"ax",
".",
"is_colorbar",
":",
"continue",
"# add extra axis options",
"if",
"data",
"[",
"\"extra axis options [base]\"",
"]",
":",
"ax",
".",
"axis_options",
".",
"extend",
"(",
"data",
"[",
"\"extra axis options [base]\"",
"]",
")",
"data",
"[",
"\"current mpl axes obj\"",
"]",
"=",
"child",
"data",
"[",
"\"current axes\"",
"]",
"=",
"ax",
"# Run through the child objects, gather the content.",
"data",
",",
"children_content",
"=",
"_recurse",
"(",
"data",
",",
"child",
")",
"# populate content and add axis environment if desired",
"if",
"data",
"[",
"\"add axis environment\"",
"]",
":",
"content",
".",
"extend",
"(",
"ax",
".",
"get_begin_code",
"(",
")",
"+",
"children_content",
"+",
"[",
"ax",
".",
"get_end_code",
"(",
"data",
")",
"]",
",",
"0",
")",
"else",
":",
"content",
".",
"extend",
"(",
"children_content",
",",
"0",
")",
"# print axis environment options, if told to show infos",
"if",
"data",
"[",
"\"show_info\"",
"]",
":",
"print",
"(",
"\"=========================================================\"",
")",
"print",
"(",
"\"These would have been the properties of the environment:\"",
")",
"print",
"(",
"\"\"",
".",
"join",
"(",
"ax",
".",
"get_begin_code",
"(",
")",
"[",
"1",
":",
"]",
")",
")",
"print",
"(",
"\"=========================================================\"",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"lines",
".",
"Line2D",
")",
":",
"data",
",",
"cont",
"=",
"line2d",
".",
"draw_line2d",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"image",
".",
"AxesImage",
")",
":",
"data",
",",
"cont",
"=",
"img",
".",
"draw_image",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"patches",
".",
"Patch",
")",
":",
"data",
",",
"cont",
"=",
"patch",
".",
"draw_patch",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"(",
"mpl",
".",
"collections",
".",
"PatchCollection",
",",
"mpl",
".",
"collections",
".",
"PolyCollection",
")",
")",
":",
"data",
",",
"cont",
"=",
"patch",
".",
"draw_patchcollection",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"collections",
".",
"PathCollection",
")",
":",
"data",
",",
"cont",
"=",
"path",
".",
"draw_pathcollection",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"collections",
".",
"LineCollection",
")",
":",
"data",
",",
"cont",
"=",
"line2d",
".",
"draw_linecollection",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"collections",
".",
"QuadMesh",
")",
":",
"data",
",",
"cont",
"=",
"qmsh",
".",
"draw_quadmesh",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"mpl",
".",
"legend",
".",
"Legend",
")",
":",
"data",
"=",
"legend",
".",
"draw_legend",
"(",
"data",
",",
"child",
")",
"if",
"data",
"[",
"\"legend colors\"",
"]",
":",
"content",
".",
"extend",
"(",
"data",
"[",
"\"legend colors\"",
"]",
",",
"0",
")",
"elif",
"isinstance",
"(",
"child",
",",
"(",
"mpl",
".",
"text",
".",
"Text",
",",
"mpl",
".",
"text",
".",
"Annotation",
")",
")",
":",
"data",
",",
"cont",
"=",
"text",
".",
"draw_text",
"(",
"data",
",",
"child",
")",
"content",
".",
"extend",
"(",
"cont",
",",
"child",
".",
"get_zorder",
"(",
")",
")",
"elif",
"isinstance",
"(",
"child",
",",
"(",
"mpl",
".",
"axis",
".",
"XAxis",
",",
"mpl",
".",
"axis",
".",
"YAxis",
")",
")",
":",
"pass",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"matplotlib2tikz: Don't know how to handle object {}.\"",
".",
"format",
"(",
"type",
"(",
"child",
")",
")",
")",
"return",
"data",
",",
"content",
".",
"flatten",
"(",
")"
] | Iterates over all children of the current object, gathers the contents
contributing to the resulting PGFPlots file, and returns those. | [
"Iterates",
"over",
"all",
"children",
"of",
"the",
"current",
"object",
"gathers",
"the",
"contents",
"contributing",
"to",
"the",
"resulting",
"PGFPlots",
"file",
"and",
"returns",
"those",
"."
] | python | train |
chrippa/python-librtmp | librtmp/stream.py | https://github.com/chrippa/python-librtmp/blob/6efefd5edd76cad7a3b53f7c87c1c7350448224d/librtmp/stream.py#L83-L92 | def seek(self, time):
"""Attempts to seek in the stream.
:param time: int, Time to seek to in seconds
"""
res = librtmp.RTMP_SendSeek(self.client.rtmp, time)
if res < 1:
raise RTMPError("Failed to seek") | [
"def",
"seek",
"(",
"self",
",",
"time",
")",
":",
"res",
"=",
"librtmp",
".",
"RTMP_SendSeek",
"(",
"self",
".",
"client",
".",
"rtmp",
",",
"time",
")",
"if",
"res",
"<",
"1",
":",
"raise",
"RTMPError",
"(",
"\"Failed to seek\"",
")"
] | Attempts to seek in the stream.
:param time: int, Time to seek to in seconds | [
"Attempts",
"to",
"seek",
"in",
"the",
"stream",
"."
] | python | train |
data61/clkhash | clkhash/cli.py | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/cli.py#L99-L110 | def status(server, output, verbose):
"""Connect to an entity matching server and check the service status.
Use "-" to output status to stdout.
"""
if verbose:
log("Connecting to Entity Matching Server: {}".format(server))
service_status = server_get_status(server)
if verbose:
log("Status: {}".format(service_status['status']))
print(json.dumps(service_status), file=output) | [
"def",
"status",
"(",
"server",
",",
"output",
",",
"verbose",
")",
":",
"if",
"verbose",
":",
"log",
"(",
"\"Connecting to Entity Matching Server: {}\"",
".",
"format",
"(",
"server",
")",
")",
"service_status",
"=",
"server_get_status",
"(",
"server",
")",
"if",
"verbose",
":",
"log",
"(",
"\"Status: {}\"",
".",
"format",
"(",
"service_status",
"[",
"'status'",
"]",
")",
")",
"print",
"(",
"json",
".",
"dumps",
"(",
"service_status",
")",
",",
"file",
"=",
"output",
")"
] | Connect to an entity matching server and check the service status.
Use "-" to output status to stdout. | [
"Connect",
"to",
"an",
"entity",
"matching",
"server",
"and",
"check",
"the",
"service",
"status",
"."
] | python | train |
marcomusy/vtkplotter | vtkplotter/actors.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L62-L114 | def isosurface(image, smoothing=0, threshold=None, connectivity=False):
"""Return a ``vtkActor`` isosurface extracted from a ``vtkImageData`` object.
:param float smoothing: gaussian filter to smooth vtkImageData, in units of sigmas
:param threshold: value or list of values to draw the isosurface(s)
:type threshold: float, list
:param bool connectivity: if True only keeps the largest portion of the polydata
.. hint:: |isosurfaces| |isosurfaces.py|_
"""
if smoothing:
smImg = vtk.vtkImageGaussianSmooth()
smImg.SetDimensionality(3)
smImg.SetInputData(image)
smImg.SetStandardDeviations(smoothing, smoothing, smoothing)
smImg.Update()
image = smImg.GetOutput()
scrange = image.GetScalarRange()
if scrange[1] > 1e10:
print("Warning, high scalar range detected:", scrange)
cf = vtk.vtkContourFilter()
cf.SetInputData(image)
cf.UseScalarTreeOn()
cf.ComputeScalarsOn()
if utils.isSequence(threshold):
cf.SetNumberOfContours(len(threshold))
for i, t in enumerate(threshold):
cf.SetValue(i, t)
cf.Update()
else:
if not threshold:
threshold = (2 * scrange[0] + scrange[1]) / 3.0
cf.SetValue(0, threshold)
cf.Update()
clp = vtk.vtkCleanPolyData()
clp.SetInputConnection(cf.GetOutputPort())
clp.Update()
poly = clp.GetOutput()
if connectivity:
conn = vtk.vtkPolyDataConnectivityFilter()
conn.SetExtractionModeToLargestRegion()
conn.SetInputData(poly)
conn.Update()
poly = conn.GetOutput()
a = Actor(poly, c=None).phong()
a.mapper.SetScalarRange(scrange[0], scrange[1])
return a | [
"def",
"isosurface",
"(",
"image",
",",
"smoothing",
"=",
"0",
",",
"threshold",
"=",
"None",
",",
"connectivity",
"=",
"False",
")",
":",
"if",
"smoothing",
":",
"smImg",
"=",
"vtk",
".",
"vtkImageGaussianSmooth",
"(",
")",
"smImg",
".",
"SetDimensionality",
"(",
"3",
")",
"smImg",
".",
"SetInputData",
"(",
"image",
")",
"smImg",
".",
"SetStandardDeviations",
"(",
"smoothing",
",",
"smoothing",
",",
"smoothing",
")",
"smImg",
".",
"Update",
"(",
")",
"image",
"=",
"smImg",
".",
"GetOutput",
"(",
")",
"scrange",
"=",
"image",
".",
"GetScalarRange",
"(",
")",
"if",
"scrange",
"[",
"1",
"]",
">",
"1e10",
":",
"print",
"(",
"\"Warning, high scalar range detected:\"",
",",
"scrange",
")",
"cf",
"=",
"vtk",
".",
"vtkContourFilter",
"(",
")",
"cf",
".",
"SetInputData",
"(",
"image",
")",
"cf",
".",
"UseScalarTreeOn",
"(",
")",
"cf",
".",
"ComputeScalarsOn",
"(",
")",
"if",
"utils",
".",
"isSequence",
"(",
"threshold",
")",
":",
"cf",
".",
"SetNumberOfContours",
"(",
"len",
"(",
"threshold",
")",
")",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"threshold",
")",
":",
"cf",
".",
"SetValue",
"(",
"i",
",",
"t",
")",
"cf",
".",
"Update",
"(",
")",
"else",
":",
"if",
"not",
"threshold",
":",
"threshold",
"=",
"(",
"2",
"*",
"scrange",
"[",
"0",
"]",
"+",
"scrange",
"[",
"1",
"]",
")",
"/",
"3.0",
"cf",
".",
"SetValue",
"(",
"0",
",",
"threshold",
")",
"cf",
".",
"Update",
"(",
")",
"clp",
"=",
"vtk",
".",
"vtkCleanPolyData",
"(",
")",
"clp",
".",
"SetInputConnection",
"(",
"cf",
".",
"GetOutputPort",
"(",
")",
")",
"clp",
".",
"Update",
"(",
")",
"poly",
"=",
"clp",
".",
"GetOutput",
"(",
")",
"if",
"connectivity",
":",
"conn",
"=",
"vtk",
".",
"vtkPolyDataConnectivityFilter",
"(",
")",
"conn",
".",
"SetExtractionModeToLargestRegion",
"(",
")",
"conn",
".",
"SetInputData",
"(",
"poly",
")",
"conn",
".",
"Update",
"(",
")",
"poly",
"=",
"conn",
".",
"GetOutput",
"(",
")",
"a",
"=",
"Actor",
"(",
"poly",
",",
"c",
"=",
"None",
")",
".",
"phong",
"(",
")",
"a",
".",
"mapper",
".",
"SetScalarRange",
"(",
"scrange",
"[",
"0",
"]",
",",
"scrange",
"[",
"1",
"]",
")",
"return",
"a"
] | Return a ``vtkActor`` isosurface extracted from a ``vtkImageData`` object.
:param float smoothing: gaussian filter to smooth vtkImageData, in units of sigmas
:param threshold: value or list of values to draw the isosurface(s)
:type threshold: float, list
:param bool connectivity: if True only keeps the largest portion of the polydata
.. hint:: |isosurfaces| |isosurfaces.py|_ | [
"Return",
"a",
"vtkActor",
"isosurface",
"extracted",
"from",
"a",
"vtkImageData",
"object",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.