repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
arcturial/clickatell-python | clickatell/__init__.py | https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/__init__.py#L107-L135 | def request(self, action, data={}, headers={}, method='GET'):
"""
Run the HTTP request against the Clickatell API
:param str action: The API action
:param dict data: The request parameters
:param dict headers: The request headers (if any)
:param str method: The HTTP method
:return: The request response
"""
url = ('https' if self.secure else 'http') + '://' + self.endpoint
url = url + '/' + action
# Set the User-Agent
userAgent = "".join(["ClickatellPython/0.1.2", " ", "Python/", platform.python_version()])
headers = self.merge({ "User-Agent": userAgent }, headers)
try:
func = getattr(requests, method.lower())
except AttributeError:
raise Exception('HTTP method ' + method + ' unsupported.')
resp = func(url, params=data, data=json.dumps(data), headers=headers)
# Set the coding before unwrapping the text
resp.encoding = 'utf-8'
content = resp.text
return content | [
"def",
"request",
"(",
"self",
",",
"action",
",",
"data",
"=",
"{",
"}",
",",
"headers",
"=",
"{",
"}",
",",
"method",
"=",
"'GET'",
")",
":",
"url",
"=",
"(",
"'https'",
"if",
"self",
".",
"secure",
"else",
"'http'",
")",
"+",
"'://'",
"+",
"self",
".",
"endpoint",
"url",
"=",
"url",
"+",
"'/'",
"+",
"action",
"# Set the User-Agent",
"userAgent",
"=",
"\"\"",
".",
"join",
"(",
"[",
"\"ClickatellPython/0.1.2\"",
",",
"\" \"",
",",
"\"Python/\"",
",",
"platform",
".",
"python_version",
"(",
")",
"]",
")",
"headers",
"=",
"self",
".",
"merge",
"(",
"{",
"\"User-Agent\"",
":",
"userAgent",
"}",
",",
"headers",
")",
"try",
":",
"func",
"=",
"getattr",
"(",
"requests",
",",
"method",
".",
"lower",
"(",
")",
")",
"except",
"AttributeError",
":",
"raise",
"Exception",
"(",
"'HTTP method '",
"+",
"method",
"+",
"' unsupported.'",
")",
"resp",
"=",
"func",
"(",
"url",
",",
"params",
"=",
"data",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"headers",
"=",
"headers",
")",
"# Set the coding before unwrapping the text",
"resp",
".",
"encoding",
"=",
"'utf-8'",
"content",
"=",
"resp",
".",
"text",
"return",
"content"
] | Run the HTTP request against the Clickatell API
:param str action: The API action
:param dict data: The request parameters
:param dict headers: The request headers (if any)
:param str method: The HTTP method
:return: The request response | [
"Run",
"the",
"HTTP",
"request",
"against",
"the",
"Clickatell",
"API"
] | python | train |
greenbone/ospd | ospd/ospd.py | https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L1524-L1560 | def run(self, address, port, unix_path):
""" Starts the Daemon, handling commands until interrupted.
@return False if error. Runs indefinitely otherwise.
"""
assert address or unix_path
if unix_path:
sock = bind_unix_socket(unix_path)
else:
sock = bind_socket(address, port)
if sock is None:
return False
sock.setblocking(False)
inputs = [sock]
outputs = []
try:
while True:
readable, _, _ = select.select(
inputs, outputs, inputs, SCHEDULER_CHECK_PERIOD)
for r_socket in readable:
if unix_path and r_socket is sock:
client_stream, _ = sock.accept()
logger.debug("New connection from %s", unix_path)
self.handle_client_stream(client_stream, True)
else:
client_stream = self.new_client_stream(sock)
if client_stream is None:
continue
self.handle_client_stream(client_stream, False)
close_client_stream(client_stream, unix_path)
self.scheduler()
except KeyboardInterrupt:
logger.info("Received Ctrl-C shutting-down ...")
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close() | [
"def",
"run",
"(",
"self",
",",
"address",
",",
"port",
",",
"unix_path",
")",
":",
"assert",
"address",
"or",
"unix_path",
"if",
"unix_path",
":",
"sock",
"=",
"bind_unix_socket",
"(",
"unix_path",
")",
"else",
":",
"sock",
"=",
"bind_socket",
"(",
"address",
",",
"port",
")",
"if",
"sock",
"is",
"None",
":",
"return",
"False",
"sock",
".",
"setblocking",
"(",
"False",
")",
"inputs",
"=",
"[",
"sock",
"]",
"outputs",
"=",
"[",
"]",
"try",
":",
"while",
"True",
":",
"readable",
",",
"_",
",",
"_",
"=",
"select",
".",
"select",
"(",
"inputs",
",",
"outputs",
",",
"inputs",
",",
"SCHEDULER_CHECK_PERIOD",
")",
"for",
"r_socket",
"in",
"readable",
":",
"if",
"unix_path",
"and",
"r_socket",
"is",
"sock",
":",
"client_stream",
",",
"_",
"=",
"sock",
".",
"accept",
"(",
")",
"logger",
".",
"debug",
"(",
"\"New connection from %s\"",
",",
"unix_path",
")",
"self",
".",
"handle_client_stream",
"(",
"client_stream",
",",
"True",
")",
"else",
":",
"client_stream",
"=",
"self",
".",
"new_client_stream",
"(",
"sock",
")",
"if",
"client_stream",
"is",
"None",
":",
"continue",
"self",
".",
"handle_client_stream",
"(",
"client_stream",
",",
"False",
")",
"close_client_stream",
"(",
"client_stream",
",",
"unix_path",
")",
"self",
".",
"scheduler",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"info",
"(",
"\"Received Ctrl-C shutting-down ...\"",
")",
"finally",
":",
"sock",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"sock",
".",
"close",
"(",
")"
] | Starts the Daemon, handling commands until interrupted.
@return False if error. Runs indefinitely otherwise. | [
"Starts",
"the",
"Daemon",
"handling",
"commands",
"until",
"interrupted",
"."
] | python | train |
yt-project/unyt | unyt/array.py | https://github.com/yt-project/unyt/blob/7a4eafc229f83784f4c63d639aee554f9a6b1ca0/unyt/array.py#L1006-L1037 | def to_equivalent(self, unit, equivalence, **kwargs):
"""
Return a copy of the unyt_array in the units specified units, assuming
the given equivalency. The dimensions of the specified units and the
dimensions of the original array need not match so long as there is an
appropriate conversion in the specified equivalency.
Parameters
----------
unit : string
The unit that you wish to convert to.
equivalence : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> from unyt import K
>>> a = 1.0e7*K
>>> print(a.to_equivalent("keV", "thermal"))
0.8617332401096504 keV
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equivalence]()
if self.has_equivalent(equivalence):
new_arr = this_equiv.convert(self, conv_unit.dimensions, **kwargs)
return new_arr.in_units(conv_unit)
else:
raise InvalidUnitEquivalence(equivalence, self.units, unit) | [
"def",
"to_equivalent",
"(",
"self",
",",
"unit",
",",
"equivalence",
",",
"*",
"*",
"kwargs",
")",
":",
"conv_unit",
"=",
"Unit",
"(",
"unit",
",",
"registry",
"=",
"self",
".",
"units",
".",
"registry",
")",
"if",
"self",
".",
"units",
".",
"same_dimensions_as",
"(",
"conv_unit",
")",
":",
"return",
"self",
".",
"in_units",
"(",
"conv_unit",
")",
"this_equiv",
"=",
"equivalence_registry",
"[",
"equivalence",
"]",
"(",
")",
"if",
"self",
".",
"has_equivalent",
"(",
"equivalence",
")",
":",
"new_arr",
"=",
"this_equiv",
".",
"convert",
"(",
"self",
",",
"conv_unit",
".",
"dimensions",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_arr",
".",
"in_units",
"(",
"conv_unit",
")",
"else",
":",
"raise",
"InvalidUnitEquivalence",
"(",
"equivalence",
",",
"self",
".",
"units",
",",
"unit",
")"
] | Return a copy of the unyt_array in the units specified units, assuming
the given equivalency. The dimensions of the specified units and the
dimensions of the original array need not match so long as there is an
appropriate conversion in the specified equivalency.
Parameters
----------
unit : string
The unit that you wish to convert to.
equivalence : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> from unyt import K
>>> a = 1.0e7*K
>>> print(a.to_equivalent("keV", "thermal"))
0.8617332401096504 keV | [
"Return",
"a",
"copy",
"of",
"the",
"unyt_array",
"in",
"the",
"units",
"specified",
"units",
"assuming",
"the",
"given",
"equivalency",
".",
"The",
"dimensions",
"of",
"the",
"specified",
"units",
"and",
"the",
"dimensions",
"of",
"the",
"original",
"array",
"need",
"not",
"match",
"so",
"long",
"as",
"there",
"is",
"an",
"appropriate",
"conversion",
"in",
"the",
"specified",
"equivalency",
"."
] | python | train |
daviddrysdale/python-phonenumbers | python/phonenumbers/shortnumberinfo.py | https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/shortnumberinfo.py#L259-L274 | def _region_code_for_short_number_from_region_list(numobj, region_codes):
"""Helper method to get the region code for a given phone number, from a list of possible region
codes. If the list contains more than one region, the first region for which the number is
valid is returned.
"""
if len(region_codes) == 0:
return None
elif len(region_codes) == 1:
return region_codes[0]
national_number = national_significant_number(numobj)
for region_code in region_codes:
metadata = PhoneMetadata.short_metadata_for_region(region_code)
if metadata is not None and _matches_possible_number_and_national_number(national_number, metadata.short_code):
# The number is valid for this region.
return region_code
return None | [
"def",
"_region_code_for_short_number_from_region_list",
"(",
"numobj",
",",
"region_codes",
")",
":",
"if",
"len",
"(",
"region_codes",
")",
"==",
"0",
":",
"return",
"None",
"elif",
"len",
"(",
"region_codes",
")",
"==",
"1",
":",
"return",
"region_codes",
"[",
"0",
"]",
"national_number",
"=",
"national_significant_number",
"(",
"numobj",
")",
"for",
"region_code",
"in",
"region_codes",
":",
"metadata",
"=",
"PhoneMetadata",
".",
"short_metadata_for_region",
"(",
"region_code",
")",
"if",
"metadata",
"is",
"not",
"None",
"and",
"_matches_possible_number_and_national_number",
"(",
"national_number",
",",
"metadata",
".",
"short_code",
")",
":",
"# The number is valid for this region.",
"return",
"region_code",
"return",
"None"
] | Helper method to get the region code for a given phone number, from a list of possible region
codes. If the list contains more than one region, the first region for which the number is
valid is returned. | [
"Helper",
"method",
"to",
"get",
"the",
"region",
"code",
"for",
"a",
"given",
"phone",
"number",
"from",
"a",
"list",
"of",
"possible",
"region",
"codes",
".",
"If",
"the",
"list",
"contains",
"more",
"than",
"one",
"region",
"the",
"first",
"region",
"for",
"which",
"the",
"number",
"is",
"valid",
"is",
"returned",
"."
] | python | train |
boriel/zxbasic | outfmt/tzx.py | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/outfmt/tzx.py#L127-L132 | def save_program(self, title, bytes, line=32768):
""" Saves the given bytes as a BASIC program.
"""
self.standard_program_header(title, len(bytes), line)
bytes = [self.BLOCK_TYPE_DATA] + [(int(x) & 0xFF) for x in bytes] # & 0xFF truncates to bytes
self.standard_block(bytes) | [
"def",
"save_program",
"(",
"self",
",",
"title",
",",
"bytes",
",",
"line",
"=",
"32768",
")",
":",
"self",
".",
"standard_program_header",
"(",
"title",
",",
"len",
"(",
"bytes",
")",
",",
"line",
")",
"bytes",
"=",
"[",
"self",
".",
"BLOCK_TYPE_DATA",
"]",
"+",
"[",
"(",
"int",
"(",
"x",
")",
"&",
"0xFF",
")",
"for",
"x",
"in",
"bytes",
"]",
"# & 0xFF truncates to bytes",
"self",
".",
"standard_block",
"(",
"bytes",
")"
] | Saves the given bytes as a BASIC program. | [
"Saves",
"the",
"given",
"bytes",
"as",
"a",
"BASIC",
"program",
"."
] | python | train |
apache/incubator-heron | heron/instance/src/python/utils/tuple.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/tuple.py#L83-L87 | def make_root_tuple_info(stream_id, tuple_id):
"""Creates a RootTupleInfo"""
key = random.getrandbits(TupleHelper.MAX_SFIXED64_RAND_BITS)
return RootTupleInfo(stream_id=stream_id, tuple_id=tuple_id,
insertion_time=time.time(), key=key) | [
"def",
"make_root_tuple_info",
"(",
"stream_id",
",",
"tuple_id",
")",
":",
"key",
"=",
"random",
".",
"getrandbits",
"(",
"TupleHelper",
".",
"MAX_SFIXED64_RAND_BITS",
")",
"return",
"RootTupleInfo",
"(",
"stream_id",
"=",
"stream_id",
",",
"tuple_id",
"=",
"tuple_id",
",",
"insertion_time",
"=",
"time",
".",
"time",
"(",
")",
",",
"key",
"=",
"key",
")"
] | Creates a RootTupleInfo | [
"Creates",
"a",
"RootTupleInfo"
] | python | valid |
apache/incubator-heron | third_party/python/cpplint/cpplint.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L1689-L1764 | def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack) | [
"def",
"FindEndOfExpressionInLine",
"(",
"line",
",",
"startpos",
",",
"stack",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"startpos",
",",
"len",
"(",
"line",
")",
")",
":",
"char",
"=",
"line",
"[",
"i",
"]",
"if",
"char",
"in",
"'([{'",
":",
"# Found start of parenthesized expression, push to expression stack",
"stack",
".",
"append",
"(",
"char",
")",
"elif",
"char",
"==",
"'<'",
":",
"# Found potential start of template argument list",
"if",
"i",
">",
"0",
"and",
"line",
"[",
"i",
"-",
"1",
"]",
"==",
"'<'",
":",
"# Left shift operator",
"if",
"stack",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'<'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"-",
"1",
",",
"None",
")",
"elif",
"i",
">",
"0",
"and",
"Search",
"(",
"r'\\boperator\\s*$'",
",",
"line",
"[",
"0",
":",
"i",
"]",
")",
":",
"# operator<, don't add to stack",
"continue",
"else",
":",
"# Tentative start of template argument list",
"stack",
".",
"append",
"(",
"'<'",
")",
"elif",
"char",
"in",
"')]}'",
":",
"# Found end of parenthesized expression.",
"#",
"# If we are currently expecting a matching '>', the pending '<'",
"# must have been an operator. Remove them from expression stack.",
"while",
"stack",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'<'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"-",
"1",
",",
"None",
")",
"if",
"(",
"(",
"stack",
"[",
"-",
"1",
"]",
"==",
"'('",
"and",
"char",
"==",
"')'",
")",
"or",
"(",
"stack",
"[",
"-",
"1",
"]",
"==",
"'['",
"and",
"char",
"==",
"']'",
")",
"or",
"(",
"stack",
"[",
"-",
"1",
"]",
"==",
"'{'",
"and",
"char",
"==",
"'}'",
")",
")",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"i",
"+",
"1",
",",
"None",
")",
"else",
":",
"# Mismatched parentheses",
"return",
"(",
"-",
"1",
",",
"None",
")",
"elif",
"char",
"==",
"'>'",
":",
"# Found potential end of template argument list.",
"# Ignore \"->\" and operator functions",
"if",
"(",
"i",
">",
"0",
"and",
"(",
"line",
"[",
"i",
"-",
"1",
"]",
"==",
"'-'",
"or",
"Search",
"(",
"r'\\boperator\\s*$'",
",",
"line",
"[",
"0",
":",
"i",
"-",
"1",
"]",
")",
")",
")",
":",
"continue",
"# Pop the stack if there is a matching '<'. Otherwise, ignore",
"# this '>' since it must be an operator.",
"if",
"stack",
":",
"if",
"stack",
"[",
"-",
"1",
"]",
"==",
"'<'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"i",
"+",
"1",
",",
"None",
")",
"elif",
"char",
"==",
"';'",
":",
"# Found something that look like end of statements. If we are currently",
"# expecting a '>', the matching '<' must have been an operator, since",
"# template argument list should not contain statements.",
"while",
"stack",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'<'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"-",
"1",
",",
"None",
")",
"# Did not find end of expression or unbalanced parentheses on this line",
"return",
"(",
"-",
"1",
",",
"stack",
")"
] | Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line) | [
"Find",
"the",
"position",
"just",
"after",
"the",
"end",
"of",
"current",
"parenthesized",
"expression",
"."
] | python | valid |
projecthamster/hamster | src/hamster/preferences.py | https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/preferences.py#L369-L376 | def activity_changed(self, selection, model):
""" enables and disables action buttons depending on selected item """
(model, iter) = selection.get_selected()
# treat any selected case
unsorted_selected = self._get_selected_category() == -1
self.get_widget('activity_edit').set_sensitive(iter != None)
self.get_widget('activity_remove').set_sensitive(iter != None) | [
"def",
"activity_changed",
"(",
"self",
",",
"selection",
",",
"model",
")",
":",
"(",
"model",
",",
"iter",
")",
"=",
"selection",
".",
"get_selected",
"(",
")",
"# treat any selected case",
"unsorted_selected",
"=",
"self",
".",
"_get_selected_category",
"(",
")",
"==",
"-",
"1",
"self",
".",
"get_widget",
"(",
"'activity_edit'",
")",
".",
"set_sensitive",
"(",
"iter",
"!=",
"None",
")",
"self",
".",
"get_widget",
"(",
"'activity_remove'",
")",
".",
"set_sensitive",
"(",
"iter",
"!=",
"None",
")"
] | enables and disables action buttons depending on selected item | [
"enables",
"and",
"disables",
"action",
"buttons",
"depending",
"on",
"selected",
"item"
] | python | train |
KelSolaar/Umbra | umbra/components/factory/script_editor/search_in_files.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/search_in_files.py#L1405-L1443 | def save_files(self, nodes):
"""
Saves user defined files using give nodes.
:param nodes: Nodes.
:type nodes: list
:return: Method success.
:rtype: bool
"""
metrics = {"Opened": 0, "Cached": 0}
for node in nodes:
file = node.file
if self.__container.get_editor(file):
if self.__container.save_file(file):
metrics["Opened"] += 1
self.__uncache(file)
else:
cache_data = self.__files_cache.get_content(file)
if cache_data is None:
LOGGER.warning(
"!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file))
continue
if cache_data.document:
file_handle = File(file)
file_handle.content = [cache_data.document.toPlainText().toUtf8()]
if file_handle.write():
metrics["Cached"] += 1
self.__uncache(file)
else:
LOGGER.warning(
"!> {0} | '{1}' file document doesn't exists in files cache!".format(self.__class__.__name__,
file))
self.__container.engine.notifications_manager.notify(
"{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!".format(self.__class__.__name__,
metrics["Opened"],
metrics["Cached"])) | [
"def",
"save_files",
"(",
"self",
",",
"nodes",
")",
":",
"metrics",
"=",
"{",
"\"Opened\"",
":",
"0",
",",
"\"Cached\"",
":",
"0",
"}",
"for",
"node",
"in",
"nodes",
":",
"file",
"=",
"node",
".",
"file",
"if",
"self",
".",
"__container",
".",
"get_editor",
"(",
"file",
")",
":",
"if",
"self",
".",
"__container",
".",
"save_file",
"(",
"file",
")",
":",
"metrics",
"[",
"\"Opened\"",
"]",
"+=",
"1",
"self",
".",
"__uncache",
"(",
"file",
")",
"else",
":",
"cache_data",
"=",
"self",
".",
"__files_cache",
".",
"get_content",
"(",
"file",
")",
"if",
"cache_data",
"is",
"None",
":",
"LOGGER",
".",
"warning",
"(",
"\"!> {0} | '{1}' file doesn't exists in files cache!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"file",
")",
")",
"continue",
"if",
"cache_data",
".",
"document",
":",
"file_handle",
"=",
"File",
"(",
"file",
")",
"file_handle",
".",
"content",
"=",
"[",
"cache_data",
".",
"document",
".",
"toPlainText",
"(",
")",
".",
"toUtf8",
"(",
")",
"]",
"if",
"file_handle",
".",
"write",
"(",
")",
":",
"metrics",
"[",
"\"Cached\"",
"]",
"+=",
"1",
"self",
".",
"__uncache",
"(",
"file",
")",
"else",
":",
"LOGGER",
".",
"warning",
"(",
"\"!> {0} | '{1}' file document doesn't exists in files cache!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"file",
")",
")",
"self",
".",
"__container",
".",
"engine",
".",
"notifications_manager",
".",
"notify",
"(",
"\"{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"metrics",
"[",
"\"Opened\"",
"]",
",",
"metrics",
"[",
"\"Cached\"",
"]",
")",
")"
] | Saves user defined files using give nodes.
:param nodes: Nodes.
:type nodes: list
:return: Method success.
:rtype: bool | [
"Saves",
"user",
"defined",
"files",
"using",
"give",
"nodes",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/msazure.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L2436-L2466 | def delete_affinity_group(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific affinity group associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_affinity_group my-azure name=my_affinity_group
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_affinity_group function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if not conn:
conn = get_conn()
try:
conn.delete_affinity_group(kwargs['name'])
return {'Success': 'The affinity group was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message)) | [
"def",
"delete_affinity_group",
"(",
"kwargs",
"=",
"None",
",",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The delete_affinity_group function must be called with -f or --function.'",
")",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'name'",
"not",
"in",
"kwargs",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'A name must be specified as \"name\"'",
")",
"if",
"not",
"conn",
":",
"conn",
"=",
"get_conn",
"(",
")",
"try",
":",
"conn",
".",
"delete_affinity_group",
"(",
"kwargs",
"[",
"'name'",
"]",
")",
"return",
"{",
"'Success'",
":",
"'The affinity group was successfully deleted'",
"}",
"except",
"AzureMissingResourceHttpError",
"as",
"exc",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'{0}: {1}'",
".",
"format",
"(",
"kwargs",
"[",
"'name'",
"]",
",",
"exc",
".",
"message",
")",
")"
] | .. versionadded:: 2015.8.0
Delete a specific affinity group associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_affinity_group my-azure name=my_affinity_group | [
"..",
"versionadded",
"::",
"2015",
".",
"8",
".",
"0"
] | python | train |
ARMmbed/icetea | icetea_lib/LogManager.py | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/LogManager.py#L480-L572 | def init_base_logging(directory="./log", verbose=0, silent=False, color=False, no_file=False,
truncate=True, config_location=None):
"""
Initialize the Icetea logging by creating a directory to store logs
for this run and initialize the console logger for Icetea itself.
:param directory: Directory where to store the resulting logs
:param verbose: Log level as integer
:param silent: Log level warning
:param no_file: Log to file
:param color: Log coloring
:param truncate: Log truncating
:param config_location: Location of config file.
:raises IOError if unable to read configuration file.
:raises OSError if log path already exists.
:raises ImportError if colored logging was requested but coloredlogs module is not installed.
"""
global LOGPATHDIR
global STANDALONE_LOGGING
global TRUNCATE_LOG
global COLOR_ON
global SILENT_ON
global VERBOSE_LEVEL
if config_location:
try:
_read_config(config_location)
except IOError as error:
raise IOError("Unable to read from configuration file {}: {}".format(config_location,
error))
except jsonschema.SchemaError as error:
raise jsonschema.SchemaError("Logging configuration schema "
"file malformed: {}".format(error))
LOGPATHDIR = os.path.join(directory, datetime.datetime.now().strftime(
"%Y-%m-%d_%H%M%S.%f").rstrip("0"))
# Initialize the simple console logger for IceteaManager
icetealogger = logging.getLogger("icetea")
icetealogger.propagate = False
icetealogger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
formatter = BenchFormatter(LOGGING_CONFIG.get("IceteaManager").get("format"),
LOGGING_CONFIG.get("IceteaManager").get("dateformat"))
if not color:
stream_handler.setFormatter(formatter)
elif color and not COLORS:
raise ImportError("Missing coloredlogs module. Please install with "
"pip to use colors in logging.")
else:
class ColoredBenchFormatter(coloredlogs.ColoredFormatter):
"""
This is defined as an internal class here because coloredlogs is and optional
dependency.
"""
converter = datetime.datetime.fromtimestamp
def formatTime(self, record, datefmt=None):
date_and_time = self.converter(record.created, tz=pytz.utc)
if "%F" in datefmt:
msec = "%03d" % record.msecs
datefmt = datefmt.replace("%F", msec)
str_time = date_and_time.strftime(datefmt)
return str_time
COLOR_ON = color
stream_handler.setFormatter(ColoredBenchFormatter(
LOGGING_CONFIG.get("IceteaManager").get("format"),
LOGGING_CONFIG.get("IceteaManager").get("dateformat"),
LEVEL_FORMATS, FIELD_STYLES))
SILENT_ON = silent
VERBOSE_LEVEL = verbose
if not no_file:
try:
os.makedirs(LOGPATHDIR)
except OSError:
raise OSError("Log path %s already exists." % LOGPATHDIR)
filename = LOGGING_CONFIG.get("IceteaManager").get("file").get("name", "icetea.log")
icetealogger = _add_filehandler(icetealogger, get_base_logfilename(filename),
formatter, "IceteaManager")
if verbose and not silent:
stream_handler.setLevel(logging.DEBUG)
elif silent:
stream_handler.setLevel(logging.WARN)
else:
stream_handler.setLevel(getattr(logging, LOGGING_CONFIG.get("IceteaManager").get("level")))
icetealogger.addHandler(stream_handler)
TRUNCATE_LOG = truncate
if TRUNCATE_LOG:
icetealogger.addFilter(ContextFilter())
STANDALONE_LOGGING = False | [
"def",
"init_base_logging",
"(",
"directory",
"=",
"\"./log\"",
",",
"verbose",
"=",
"0",
",",
"silent",
"=",
"False",
",",
"color",
"=",
"False",
",",
"no_file",
"=",
"False",
",",
"truncate",
"=",
"True",
",",
"config_location",
"=",
"None",
")",
":",
"global",
"LOGPATHDIR",
"global",
"STANDALONE_LOGGING",
"global",
"TRUNCATE_LOG",
"global",
"COLOR_ON",
"global",
"SILENT_ON",
"global",
"VERBOSE_LEVEL",
"if",
"config_location",
":",
"try",
":",
"_read_config",
"(",
"config_location",
")",
"except",
"IOError",
"as",
"error",
":",
"raise",
"IOError",
"(",
"\"Unable to read from configuration file {}: {}\"",
".",
"format",
"(",
"config_location",
",",
"error",
")",
")",
"except",
"jsonschema",
".",
"SchemaError",
"as",
"error",
":",
"raise",
"jsonschema",
".",
"SchemaError",
"(",
"\"Logging configuration schema \"",
"\"file malformed: {}\"",
".",
"format",
"(",
"error",
")",
")",
"LOGPATHDIR",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d_%H%M%S.%f\"",
")",
".",
"rstrip",
"(",
"\"0\"",
")",
")",
"# Initialize the simple console logger for IceteaManager",
"icetealogger",
"=",
"logging",
".",
"getLogger",
"(",
"\"icetea\"",
")",
"icetealogger",
".",
"propagate",
"=",
"False",
"icetealogger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"stream_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"formatter",
"=",
"BenchFormatter",
"(",
"LOGGING_CONFIG",
".",
"get",
"(",
"\"IceteaManager\"",
")",
".",
"get",
"(",
"\"format\"",
")",
",",
"LOGGING_CONFIG",
".",
"get",
"(",
"\"IceteaManager\"",
")",
".",
"get",
"(",
"\"dateformat\"",
")",
")",
"if",
"not",
"color",
":",
"stream_handler",
".",
"setFormatter",
"(",
"formatter",
")",
"elif",
"color",
"and",
"not",
"COLORS",
":",
"raise",
"ImportError",
"(",
"\"Missing coloredlogs module. Please install with \"",
"\"pip to use colors in logging.\"",
")",
"else",
":",
"class",
"ColoredBenchFormatter",
"(",
"coloredlogs",
".",
"ColoredFormatter",
")",
":",
"\"\"\"\n This is defined as an internal class here because coloredlogs is and optional\n dependency.\n \"\"\"",
"converter",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"def",
"formatTime",
"(",
"self",
",",
"record",
",",
"datefmt",
"=",
"None",
")",
":",
"date_and_time",
"=",
"self",
".",
"converter",
"(",
"record",
".",
"created",
",",
"tz",
"=",
"pytz",
".",
"utc",
")",
"if",
"\"%F\"",
"in",
"datefmt",
":",
"msec",
"=",
"\"%03d\"",
"%",
"record",
".",
"msecs",
"datefmt",
"=",
"datefmt",
".",
"replace",
"(",
"\"%F\"",
",",
"msec",
")",
"str_time",
"=",
"date_and_time",
".",
"strftime",
"(",
"datefmt",
")",
"return",
"str_time",
"COLOR_ON",
"=",
"color",
"stream_handler",
".",
"setFormatter",
"(",
"ColoredBenchFormatter",
"(",
"LOGGING_CONFIG",
".",
"get",
"(",
"\"IceteaManager\"",
")",
".",
"get",
"(",
"\"format\"",
")",
",",
"LOGGING_CONFIG",
".",
"get",
"(",
"\"IceteaManager\"",
")",
".",
"get",
"(",
"\"dateformat\"",
")",
",",
"LEVEL_FORMATS",
",",
"FIELD_STYLES",
")",
")",
"SILENT_ON",
"=",
"silent",
"VERBOSE_LEVEL",
"=",
"verbose",
"if",
"not",
"no_file",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"LOGPATHDIR",
")",
"except",
"OSError",
":",
"raise",
"OSError",
"(",
"\"Log path %s already exists.\"",
"%",
"LOGPATHDIR",
")",
"filename",
"=",
"LOGGING_CONFIG",
".",
"get",
"(",
"\"IceteaManager\"",
")",
".",
"get",
"(",
"\"file\"",
")",
".",
"get",
"(",
"\"name\"",
",",
"\"icetea.log\"",
")",
"icetealogger",
"=",
"_add_filehandler",
"(",
"icetealogger",
",",
"get_base_logfilename",
"(",
"filename",
")",
",",
"formatter",
",",
"\"IceteaManager\"",
")",
"if",
"verbose",
"and",
"not",
"silent",
":",
"stream_handler",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"elif",
"silent",
":",
"stream_handler",
".",
"setLevel",
"(",
"logging",
".",
"WARN",
")",
"else",
":",
"stream_handler",
".",
"setLevel",
"(",
"getattr",
"(",
"logging",
",",
"LOGGING_CONFIG",
".",
"get",
"(",
"\"IceteaManager\"",
")",
".",
"get",
"(",
"\"level\"",
")",
")",
")",
"icetealogger",
".",
"addHandler",
"(",
"stream_handler",
")",
"TRUNCATE_LOG",
"=",
"truncate",
"if",
"TRUNCATE_LOG",
":",
"icetealogger",
".",
"addFilter",
"(",
"ContextFilter",
"(",
")",
")",
"STANDALONE_LOGGING",
"=",
"False"
] | Initialize the Icetea logging by creating a directory to store logs
for this run and initialize the console logger for Icetea itself.
:param directory: Directory where to store the resulting logs
:param verbose: Log level as integer
:param silent: Log level warning
:param no_file: Log to file
:param color: Log coloring
:param truncate: Log truncating
:param config_location: Location of config file.
:raises IOError if unable to read configuration file.
:raises OSError if log path already exists.
:raises ImportError if colored logging was requested but coloredlogs module is not installed. | [
"Initialize",
"the",
"Icetea",
"logging",
"by",
"creating",
"a",
"directory",
"to",
"store",
"logs",
"for",
"this",
"run",
"and",
"initialize",
"the",
"console",
"logger",
"for",
"Icetea",
"itself",
"."
] | python | train |
python-gitlab/python-gitlab | gitlab/v4/objects.py | https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L2295-L2310 | def pipelines(self, **kwargs):
"""List the merge request pipelines.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes
"""
path = '%s/%s/pipelines' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs) | [
"def",
"pipelines",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"'%s/%s/pipelines'",
"%",
"(",
"self",
".",
"manager",
".",
"path",
",",
"self",
".",
"get_id",
"(",
")",
")",
"return",
"self",
".",
"manager",
".",
"gitlab",
".",
"http_get",
"(",
"path",
",",
"*",
"*",
"kwargs",
")"
] | List the merge request pipelines.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes | [
"List",
"the",
"merge",
"request",
"pipelines",
"."
] | python | train |
roclark/sportsreference | sportsreference/nba/boxscore.py | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/boxscore.py#L110-L119 | def minutes_played(self):
"""
Returns a ``float`` of the number of game minutes the player was on the
court for.
"""
if self._minutes_played[self._index]:
minutes, seconds = self._minutes_played[self._index].split(':')
minutes = float(minutes) + float(seconds) / 60
return float(minutes)
return None | [
"def",
"minutes_played",
"(",
"self",
")",
":",
"if",
"self",
".",
"_minutes_played",
"[",
"self",
".",
"_index",
"]",
":",
"minutes",
",",
"seconds",
"=",
"self",
".",
"_minutes_played",
"[",
"self",
".",
"_index",
"]",
".",
"split",
"(",
"':'",
")",
"minutes",
"=",
"float",
"(",
"minutes",
")",
"+",
"float",
"(",
"seconds",
")",
"/",
"60",
"return",
"float",
"(",
"minutes",
")",
"return",
"None"
] | Returns a ``float`` of the number of game minutes the player was on the
court for. | [
"Returns",
"a",
"float",
"of",
"the",
"number",
"of",
"game",
"minutes",
"the",
"player",
"was",
"on",
"the",
"court",
"for",
"."
] | python | train |
ambitioninc/python-logentries-api | logentries_api/resources.py | https://github.com/ambitioninc/python-logentries-api/blob/77ff1a7a2995d7ea2725b74e34c0f880f4ee23bc/logentries_api/resources.py#L378-L400 | def get(self, name_or_tag_id):
"""
Get hooks by name or tag_id.
:param name_or_tag_id: The hook's name or associated tag['id']
:type name_or_tag_id: str
:return: A list of matching tags. An empty list is returned if there are
not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
hooks = self.list()
return [
hook
for hook
in hooks
if name_or_tag_id in hook.get('actions')
or name_or_tag_id == hook.get('name')
] | [
"def",
"get",
"(",
"self",
",",
"name_or_tag_id",
")",
":",
"hooks",
"=",
"self",
".",
"list",
"(",
")",
"return",
"[",
"hook",
"for",
"hook",
"in",
"hooks",
"if",
"name_or_tag_id",
"in",
"hook",
".",
"get",
"(",
"'actions'",
")",
"or",
"name_or_tag_id",
"==",
"hook",
".",
"get",
"(",
"'name'",
")",
"]"
] | Get hooks by name or tag_id.
:param name_or_tag_id: The hook's name or associated tag['id']
:type name_or_tag_id: str
:return: A list of matching tags. An empty list is returned if there are
not any matches
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries | [
"Get",
"hooks",
"by",
"name",
"or",
"tag_id",
".",
":",
"param",
"name_or_tag_id",
":",
"The",
"hook",
"s",
"name",
"or",
"associated",
"tag",
"[",
"id",
"]",
":",
"type",
"name_or_tag_id",
":",
"str"
] | python | test |
lvieirajr/mongorest | mongorest/resource.py | https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/resource.py#L96-L114 | def create(self, request):
"""
Creates a new document based on the given data
"""
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) | [
"def",
"create",
"(",
"self",
",",
"request",
")",
":",
"document",
"=",
"self",
".",
"collection",
"(",
"request",
".",
"json",
")",
"document",
".",
"created_at",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"document",
".",
"updated_at",
"=",
"document",
".",
"created_at",
"created",
"=",
"document",
".",
"insert",
"(",
")",
"return",
"Response",
"(",
"response",
"=",
"serialize",
"(",
"created",
")",
",",
"status",
"=",
"(",
"201",
"if",
"not",
"all",
"(",
"key",
"in",
"created",
"for",
"key",
"in",
"[",
"'error_code'",
",",
"'error_type'",
",",
"'error_message'",
"]",
")",
"else",
"400",
")",
")"
] | Creates a new document based on the given data | [
"Creates",
"a",
"new",
"document",
"based",
"on",
"the",
"given",
"data"
] | python | train |
roboogle/gtkmvc3 | gtkmvco/gtkmvc3/controller.py | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/controller.py#L229-L281 | def setup_column(self, widget, column=0, attribute=None, renderer=None,
property=None, from_python=None, to_python=None, model=None):
# Maybe this is too overloaded.
"""
Set up a :class:`TreeView` to display attributes of Python objects
stored in its :class:`TreeModel`.
This assumes that :class:`TreeViewColumn` instances have already
been added and :class:`CellRenderer` instances packed into them.
Both can be done in Glade.
*model* is the instance displayed by the widget. You only need to pass
this if you set *renderer* to be editable.
If you use sorting or filtering this may not be the actual data store,
but all tree paths and column indexes are relative to this.
Defaults to our model.
*widget* is a column, or a string naming one in our view.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
*renderer* defaults to the first one found in *widget*.
*property* is a string naming the property of *renderer* to set. If not
given this is guessed based on the type of *renderer*.
*from_python* is a callable. It gets passed a value from the object and
must return it in a format suitable for *renderer*. If not given this
is guessed based on *property*.
*to_python* is a callable. It gets passed a value from *renderer* and
must return it in a format suitable for the attribute. If not given a
cast to the type of the previous attribute value is attempted.
If you need more flexibility, like setting multiple properties, setting
your own cell data function will override the internal one.
Returns an integer you can use to disconnect the internal editing
callback from *renderer*, or None.
.. versionadded:: 1.99.2
"""
if isinstance(widget, str):
widget = self.view[widget]
if not model and isinstance(self.model, Gtk.TreeModel):
model = self.model
return setup_column(widget, column=column, attribute=attribute,
renderer=renderer, property=property, from_python=from_python,
to_python=to_python, model=model) | [
"def",
"setup_column",
"(",
"self",
",",
"widget",
",",
"column",
"=",
"0",
",",
"attribute",
"=",
"None",
",",
"renderer",
"=",
"None",
",",
"property",
"=",
"None",
",",
"from_python",
"=",
"None",
",",
"to_python",
"=",
"None",
",",
"model",
"=",
"None",
")",
":",
"# Maybe this is too overloaded.",
"if",
"isinstance",
"(",
"widget",
",",
"str",
")",
":",
"widget",
"=",
"self",
".",
"view",
"[",
"widget",
"]",
"if",
"not",
"model",
"and",
"isinstance",
"(",
"self",
".",
"model",
",",
"Gtk",
".",
"TreeModel",
")",
":",
"model",
"=",
"self",
".",
"model",
"return",
"setup_column",
"(",
"widget",
",",
"column",
"=",
"column",
",",
"attribute",
"=",
"attribute",
",",
"renderer",
"=",
"renderer",
",",
"property",
"=",
"property",
",",
"from_python",
"=",
"from_python",
",",
"to_python",
"=",
"to_python",
",",
"model",
"=",
"model",
")"
] | Set up a :class:`TreeView` to display attributes of Python objects
stored in its :class:`TreeModel`.
This assumes that :class:`TreeViewColumn` instances have already
been added and :class:`CellRenderer` instances packed into them.
Both can be done in Glade.
*model* is the instance displayed by the widget. You only need to pass
this if you set *renderer* to be editable.
If you use sorting or filtering this may not be the actual data store,
but all tree paths and column indexes are relative to this.
Defaults to our model.
*widget* is a column, or a string naming one in our view.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
*renderer* defaults to the first one found in *widget*.
*property* is a string naming the property of *renderer* to set. If not
given this is guessed based on the type of *renderer*.
*from_python* is a callable. It gets passed a value from the object and
must return it in a format suitable for *renderer*. If not given this
is guessed based on *property*.
*to_python* is a callable. It gets passed a value from *renderer* and
must return it in a format suitable for the attribute. If not given a
cast to the type of the previous attribute value is attempted.
If you need more flexibility, like setting multiple properties, setting
your own cell data function will override the internal one.
Returns an integer you can use to disconnect the internal editing
callback from *renderer*, or None.
.. versionadded:: 1.99.2 | [
"Set",
"up",
"a",
":",
"class",
":",
"TreeView",
"to",
"display",
"attributes",
"of",
"Python",
"objects",
"stored",
"in",
"its",
":",
"class",
":",
"TreeModel",
"."
] | python | train |
concordusapps/alchemist | alchemist/db/model.py | https://github.com/concordusapps/alchemist/blob/822571366271b5dca0ac8bf41df988c6a3b61432/alchemist/db/model.py#L17-L37 | def _component_of(name):
"""Get the root package or module of the passed module.
"""
# Get the registered package this model belongs to.
segments = name.split('.')
while segments:
# Is this name a registered package?
test = '.'.join(segments)
if test in settings.get('COMPONENTS', []):
# This is the component we are in.
return test
# Remove the right-most segment.
segments.pop()
if not segments and '.models' in name:
# No package was found to be registered; attempt to guess the
# right package name; strip all occurrances of '.models' from the
# pacakge name.
return _component_of(name.replace('.models', '')) | [
"def",
"_component_of",
"(",
"name",
")",
":",
"# Get the registered package this model belongs to.",
"segments",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"while",
"segments",
":",
"# Is this name a registered package?",
"test",
"=",
"'.'",
".",
"join",
"(",
"segments",
")",
"if",
"test",
"in",
"settings",
".",
"get",
"(",
"'COMPONENTS'",
",",
"[",
"]",
")",
":",
"# This is the component we are in.",
"return",
"test",
"# Remove the right-most segment.",
"segments",
".",
"pop",
"(",
")",
"if",
"not",
"segments",
"and",
"'.models'",
"in",
"name",
":",
"# No package was found to be registered; attempt to guess the",
"# right package name; strip all occurrances of '.models' from the",
"# pacakge name.",
"return",
"_component_of",
"(",
"name",
".",
"replace",
"(",
"'.models'",
",",
"''",
")",
")"
] | Get the root package or module of the passed module. | [
"Get",
"the",
"root",
"package",
"or",
"module",
"of",
"the",
"passed",
"module",
"."
] | python | train |
pallets/werkzeug | examples/simplewiki/utils.py | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/examples/simplewiki/utils.py#L65-L75 | def href(*args, **kw):
"""
Simple function for URL generation. Position arguments are used for the
URL path and keyword arguments are used for the url parameters.
"""
result = [(request.script_root if request else "") + "/"]
for idx, arg in enumerate(args):
result.append(("/" if idx else "") + url_quote(arg))
if kw:
result.append("?" + url_encode(kw))
return "".join(result) | [
"def",
"href",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"result",
"=",
"[",
"(",
"request",
".",
"script_root",
"if",
"request",
"else",
"\"\"",
")",
"+",
"\"/\"",
"]",
"for",
"idx",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"result",
".",
"append",
"(",
"(",
"\"/\"",
"if",
"idx",
"else",
"\"\"",
")",
"+",
"url_quote",
"(",
"arg",
")",
")",
"if",
"kw",
":",
"result",
".",
"append",
"(",
"\"?\"",
"+",
"url_encode",
"(",
"kw",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"result",
")"
] | Simple function for URL generation. Position arguments are used for the
URL path and keyword arguments are used for the url parameters. | [
"Simple",
"function",
"for",
"URL",
"generation",
".",
"Position",
"arguments",
"are",
"used",
"for",
"the",
"URL",
"path",
"and",
"keyword",
"arguments",
"are",
"used",
"for",
"the",
"url",
"parameters",
"."
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/deform_utils.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/deform_utils.py#L205-L213 | def gen_fields(field: Field) -> Generator[Field, None, None]:
"""
Starting with a Deform :class:`Field`, yield the field itself and any
children.
"""
yield field
for c in field.children:
for f in gen_fields(c):
yield f | [
"def",
"gen_fields",
"(",
"field",
":",
"Field",
")",
"->",
"Generator",
"[",
"Field",
",",
"None",
",",
"None",
"]",
":",
"yield",
"field",
"for",
"c",
"in",
"field",
".",
"children",
":",
"for",
"f",
"in",
"gen_fields",
"(",
"c",
")",
":",
"yield",
"f"
] | Starting with a Deform :class:`Field`, yield the field itself and any
children. | [
"Starting",
"with",
"a",
"Deform",
":",
"class",
":",
"Field",
"yield",
"the",
"field",
"itself",
"and",
"any",
"children",
"."
] | python | train |
Kozea/pygal | pygal/interpolate.py | https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/interpolate.py#L32-L59 | def quadratic_interpolate(x, y, precision=250, **kwargs):
"""
Interpolate x, y using a quadratic algorithm
https://en.wikipedia.org/wiki/Spline_(mathematics)
"""
n = len(x) - 1
delta_x = [x2 - x1 for x1, x2 in zip(x, x[1:])]
delta_y = [y2 - y1 for y1, y2 in zip(y, y[1:])]
slope = [delta_y[i] / delta_x[i] if delta_x[i] else 1 for i in range(n)]
# Quadratic spline: a + bx + cx²
a = y
b = [0] * (n + 1)
c = [0] * (n + 1)
for i in range(1, n):
b[i] = 2 * slope[i - 1] - b[i - 1]
c = [(slope[i] - b[i]) / delta_x[i] if delta_x[i] else 0 for i in range(n)]
for i in range(n + 1):
yield x[i], a[i]
if i == n or delta_x[i] == 0:
continue
for s in range(1, precision):
X = s * delta_x[i] / precision
X2 = X * X
yield x[i] + X, a[i] + b[i] * X + c[i] * X2 | [
"def",
"quadratic_interpolate",
"(",
"x",
",",
"y",
",",
"precision",
"=",
"250",
",",
"*",
"*",
"kwargs",
")",
":",
"n",
"=",
"len",
"(",
"x",
")",
"-",
"1",
"delta_x",
"=",
"[",
"x2",
"-",
"x1",
"for",
"x1",
",",
"x2",
"in",
"zip",
"(",
"x",
",",
"x",
"[",
"1",
":",
"]",
")",
"]",
"delta_y",
"=",
"[",
"y2",
"-",
"y1",
"for",
"y1",
",",
"y2",
"in",
"zip",
"(",
"y",
",",
"y",
"[",
"1",
":",
"]",
")",
"]",
"slope",
"=",
"[",
"delta_y",
"[",
"i",
"]",
"/",
"delta_x",
"[",
"i",
"]",
"if",
"delta_x",
"[",
"i",
"]",
"else",
"1",
"for",
"i",
"in",
"range",
"(",
"n",
")",
"]",
"# Quadratic spline: a + bx + cx²",
"a",
"=",
"y",
"b",
"=",
"[",
"0",
"]",
"*",
"(",
"n",
"+",
"1",
")",
"c",
"=",
"[",
"0",
"]",
"*",
"(",
"n",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"b",
"[",
"i",
"]",
"=",
"2",
"*",
"slope",
"[",
"i",
"-",
"1",
"]",
"-",
"b",
"[",
"i",
"-",
"1",
"]",
"c",
"=",
"[",
"(",
"slope",
"[",
"i",
"]",
"-",
"b",
"[",
"i",
"]",
")",
"/",
"delta_x",
"[",
"i",
"]",
"if",
"delta_x",
"[",
"i",
"]",
"else",
"0",
"for",
"i",
"in",
"range",
"(",
"n",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
"+",
"1",
")",
":",
"yield",
"x",
"[",
"i",
"]",
",",
"a",
"[",
"i",
"]",
"if",
"i",
"==",
"n",
"or",
"delta_x",
"[",
"i",
"]",
"==",
"0",
":",
"continue",
"for",
"s",
"in",
"range",
"(",
"1",
",",
"precision",
")",
":",
"X",
"=",
"s",
"*",
"delta_x",
"[",
"i",
"]",
"/",
"precision",
"X2",
"=",
"X",
"*",
"X",
"yield",
"x",
"[",
"i",
"]",
"+",
"X",
",",
"a",
"[",
"i",
"]",
"+",
"b",
"[",
"i",
"]",
"*",
"X",
"+",
"c",
"[",
"i",
"]",
"*",
"X2"
] | Interpolate x, y using a quadratic algorithm
https://en.wikipedia.org/wiki/Spline_(mathematics) | [
"Interpolate",
"x",
"y",
"using",
"a",
"quadratic",
"algorithm",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki",
"/",
"Spline_",
"(",
"mathematics",
")"
] | python | train |
clalancette/pycdlib | pycdlib/rockridge.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L85-L99 | def new(self, bytes_to_skip):
# type: (int) -> None
'''
Create a new Rock Ridge Sharing Protocol record.
Parameters:
bytes_to_skip - The number of bytes to skip.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('SP record already initialized!')
self.bytes_to_skip = bytes_to_skip
self._initialized = True | [
"def",
"new",
"(",
"self",
",",
"bytes_to_skip",
")",
":",
"# type: (int) -> None",
"if",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'SP record already initialized!'",
")",
"self",
".",
"bytes_to_skip",
"=",
"bytes_to_skip",
"self",
".",
"_initialized",
"=",
"True"
] | Create a new Rock Ridge Sharing Protocol record.
Parameters:
bytes_to_skip - The number of bytes to skip.
Returns:
Nothing. | [
"Create",
"a",
"new",
"Rock",
"Ridge",
"Sharing",
"Protocol",
"record",
"."
] | python | train |
onicagroup/runway | runway/module/staticsite.py | https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/module/staticsite.py#L147-L154 | def plan(self):
"""Create website CFN module and run stacker diff."""
if self.options.get('environments', {}).get(self.context.env_name):
self.setup_website_module(command='plan')
else:
LOGGER.info("Skipping staticsite plan of %s; no environment "
"config found for this environment/region",
self.options['path']) | [
"def",
"plan",
"(",
"self",
")",
":",
"if",
"self",
".",
"options",
".",
"get",
"(",
"'environments'",
",",
"{",
"}",
")",
".",
"get",
"(",
"self",
".",
"context",
".",
"env_name",
")",
":",
"self",
".",
"setup_website_module",
"(",
"command",
"=",
"'plan'",
")",
"else",
":",
"LOGGER",
".",
"info",
"(",
"\"Skipping staticsite plan of %s; no environment \"",
"\"config found for this environment/region\"",
",",
"self",
".",
"options",
"[",
"'path'",
"]",
")"
] | Create website CFN module and run stacker diff. | [
"Create",
"website",
"CFN",
"module",
"and",
"run",
"stacker",
"diff",
"."
] | python | train |
googleapis/google-cloud-python | spanner/google/cloud/spanner_v1/database.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/database.py#L283-L291 | def drop(self):
"""Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
api.drop_database(self.name, metadata=metadata) | [
"def",
"drop",
"(",
"self",
")",
":",
"api",
"=",
"self",
".",
"_instance",
".",
"_client",
".",
"database_admin_api",
"metadata",
"=",
"_metadata_with_prefix",
"(",
"self",
".",
"name",
")",
"api",
".",
"drop_database",
"(",
"self",
".",
"name",
",",
"metadata",
"=",
"metadata",
")"
] | Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase | [
"Drop",
"this",
"database",
"."
] | python | train |
TrafficSenseMSD/SumoTools | traci/_route.py | https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_route.py#L38-L46 | def add(self, routeID, edges):
"""add(string, list(string)) -> None
Adds a new route with the given id consisting of the given list of edge IDs.
"""
self._connection._beginMessage(tc.CMD_SET_ROUTE_VARIABLE, tc.ADD, routeID,
1 + 4 + sum(map(len, edges)) + 4 * len(edges))
self._connection._packStringList(edges)
self._connection._sendExact() | [
"def",
"add",
"(",
"self",
",",
"routeID",
",",
"edges",
")",
":",
"self",
".",
"_connection",
".",
"_beginMessage",
"(",
"tc",
".",
"CMD_SET_ROUTE_VARIABLE",
",",
"tc",
".",
"ADD",
",",
"routeID",
",",
"1",
"+",
"4",
"+",
"sum",
"(",
"map",
"(",
"len",
",",
"edges",
")",
")",
"+",
"4",
"*",
"len",
"(",
"edges",
")",
")",
"self",
".",
"_connection",
".",
"_packStringList",
"(",
"edges",
")",
"self",
".",
"_connection",
".",
"_sendExact",
"(",
")"
] | add(string, list(string)) -> None
Adds a new route with the given id consisting of the given list of edge IDs. | [
"add",
"(",
"string",
"list",
"(",
"string",
"))",
"-",
">",
"None"
] | python | train |
20c/vodka | vodka/config/__init__.py | https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/config/__init__.py#L408-L431 | def read(self, config_dir=None, clear=False, config_file=None):
"""
The munge Config's read function only allows to read from
a config directory, but we also want to be able to read
straight from a config file as well
"""
if config_file:
data_file = os.path.basename(config_file)
data_path = os.path.dirname(config_file)
if clear:
self.clear()
config = munge.load_datafile(data_file, data_path, default=None)
if not config:
raise IOError("Config file not found: %s" % config_file)
munge.util.recursive_update(self.data, config)
self._meta_config_dir = data_path
return
else:
return super(Config, self).read(config_dir=config_dir, clear=clear) | [
"def",
"read",
"(",
"self",
",",
"config_dir",
"=",
"None",
",",
"clear",
"=",
"False",
",",
"config_file",
"=",
"None",
")",
":",
"if",
"config_file",
":",
"data_file",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"config_file",
")",
"data_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_file",
")",
"if",
"clear",
":",
"self",
".",
"clear",
"(",
")",
"config",
"=",
"munge",
".",
"load_datafile",
"(",
"data_file",
",",
"data_path",
",",
"default",
"=",
"None",
")",
"if",
"not",
"config",
":",
"raise",
"IOError",
"(",
"\"Config file not found: %s\"",
"%",
"config_file",
")",
"munge",
".",
"util",
".",
"recursive_update",
"(",
"self",
".",
"data",
",",
"config",
")",
"self",
".",
"_meta_config_dir",
"=",
"data_path",
"return",
"else",
":",
"return",
"super",
"(",
"Config",
",",
"self",
")",
".",
"read",
"(",
"config_dir",
"=",
"config_dir",
",",
"clear",
"=",
"clear",
")"
] | The munge Config's read function only allows to read from
a config directory, but we also want to be able to read
straight from a config file as well | [
"The",
"munge",
"Config",
"s",
"read",
"function",
"only",
"allows",
"to",
"read",
"from",
"a",
"config",
"directory",
"but",
"we",
"also",
"want",
"to",
"be",
"able",
"to",
"read",
"straight",
"from",
"a",
"config",
"file",
"as",
"well"
] | python | train |
nerdvegas/rez | src/rez/vendor/pygraph/algorithms/critical.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/critical.py#L38-L55 | def _intersection(A,B):
"""
A simple function to find an intersection between two arrays.
@type A: List
@param A: First List
@type B: List
@param B: Second List
@rtype: List
@return: List of Intersections
"""
intersection = []
for i in A:
if i in B:
intersection.append(i)
return intersection | [
"def",
"_intersection",
"(",
"A",
",",
"B",
")",
":",
"intersection",
"=",
"[",
"]",
"for",
"i",
"in",
"A",
":",
"if",
"i",
"in",
"B",
":",
"intersection",
".",
"append",
"(",
"i",
")",
"return",
"intersection"
] | A simple function to find an intersection between two arrays.
@type A: List
@param A: First List
@type B: List
@param B: Second List
@rtype: List
@return: List of Intersections | [
"A",
"simple",
"function",
"to",
"find",
"an",
"intersection",
"between",
"two",
"arrays",
"."
] | python | train |
calston/rhumba | rhumba/client.py | https://github.com/calston/rhumba/blob/05e3cbf4e531cc51b4777912eb98a4f006893f5e/rhumba/client.py#L129-L141 | def getResult(self, queue, uid, suid=None):
"""
Retrieve the result of a job from its ID
"""
if suid:
r = self._get_client().get('rhumba.dq.%s.%s.%s' % (suid, queue, uid))
else:
r = self._get_client().get('rhumba.q.%s.%s' % (queue, uid))
if r:
return json.loads(r)
else:
return None | [
"def",
"getResult",
"(",
"self",
",",
"queue",
",",
"uid",
",",
"suid",
"=",
"None",
")",
":",
"if",
"suid",
":",
"r",
"=",
"self",
".",
"_get_client",
"(",
")",
".",
"get",
"(",
"'rhumba.dq.%s.%s.%s'",
"%",
"(",
"suid",
",",
"queue",
",",
"uid",
")",
")",
"else",
":",
"r",
"=",
"self",
".",
"_get_client",
"(",
")",
".",
"get",
"(",
"'rhumba.q.%s.%s'",
"%",
"(",
"queue",
",",
"uid",
")",
")",
"if",
"r",
":",
"return",
"json",
".",
"loads",
"(",
"r",
")",
"else",
":",
"return",
"None"
] | Retrieve the result of a job from its ID | [
"Retrieve",
"the",
"result",
"of",
"a",
"job",
"from",
"its",
"ID"
] | python | train |
Shizmob/pydle | pydle/features/isupport.py | https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/isupport.py#L132-L135 | async def on_isupport_extban(self, value):
""" Extended ban prefixes. """
self._extban_prefix, types = value.split(',')
self._extban_types = set(types) | [
"async",
"def",
"on_isupport_extban",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_extban_prefix",
",",
"types",
"=",
"value",
".",
"split",
"(",
"','",
")",
"self",
".",
"_extban_types",
"=",
"set",
"(",
"types",
")"
] | Extended ban prefixes. | [
"Extended",
"ban",
"prefixes",
"."
] | python | train |
tomduck/pandoc-xnos | pandocxnos/pandocattributes.py | https://github.com/tomduck/pandoc-xnos/blob/df8e162d257a548cea7eebf597efb2c21a1a4ba3/pandocxnos/pandocattributes.py#L122-L146 | def parse_html(self, attr_string):
"""Read a html string to attributes."""
splitter = re.compile(self.split_regex(separator=self.spnl))
attrs = splitter.split(attr_string)[1::2]
idre = re.compile(r'''id=["']?([\w ]*)['"]?''')
clsre = re.compile(r'''class=["']?([\w ]*)['"]?''')
id_matches = [idre.search(a) for a in attrs]
cls_matches = [clsre.search(a) for a in attrs]
try:
id = [m.groups()[0] for m in id_matches if m][0]
except IndexError:
id = ''
classes = [m.groups()[0] for m in cls_matches if m][0].split()
special = ['unnumbered' for a in attrs if '-' in a]
classes.extend(special)
kvs = [a.split('=', 1) for a in attrs if '=' in a]
kvs = OrderedDict((k, v) for k, v in kvs if k not in ('id', 'class'))
return id, classes, kvs | [
"def",
"parse_html",
"(",
"self",
",",
"attr_string",
")",
":",
"splitter",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"split_regex",
"(",
"separator",
"=",
"self",
".",
"spnl",
")",
")",
"attrs",
"=",
"splitter",
".",
"split",
"(",
"attr_string",
")",
"[",
"1",
":",
":",
"2",
"]",
"idre",
"=",
"re",
".",
"compile",
"(",
"r'''id=[\"']?([\\w ]*)['\"]?'''",
")",
"clsre",
"=",
"re",
".",
"compile",
"(",
"r'''class=[\"']?([\\w ]*)['\"]?'''",
")",
"id_matches",
"=",
"[",
"idre",
".",
"search",
"(",
"a",
")",
"for",
"a",
"in",
"attrs",
"]",
"cls_matches",
"=",
"[",
"clsre",
".",
"search",
"(",
"a",
")",
"for",
"a",
"in",
"attrs",
"]",
"try",
":",
"id",
"=",
"[",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"for",
"m",
"in",
"id_matches",
"if",
"m",
"]",
"[",
"0",
"]",
"except",
"IndexError",
":",
"id",
"=",
"''",
"classes",
"=",
"[",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"for",
"m",
"in",
"cls_matches",
"if",
"m",
"]",
"[",
"0",
"]",
".",
"split",
"(",
")",
"special",
"=",
"[",
"'unnumbered'",
"for",
"a",
"in",
"attrs",
"if",
"'-'",
"in",
"a",
"]",
"classes",
".",
"extend",
"(",
"special",
")",
"kvs",
"=",
"[",
"a",
".",
"split",
"(",
"'='",
",",
"1",
")",
"for",
"a",
"in",
"attrs",
"if",
"'='",
"in",
"a",
"]",
"kvs",
"=",
"OrderedDict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kvs",
"if",
"k",
"not",
"in",
"(",
"'id'",
",",
"'class'",
")",
")",
"return",
"id",
",",
"classes",
",",
"kvs"
] | Read a html string to attributes. | [
"Read",
"a",
"html",
"string",
"to",
"attributes",
"."
] | python | train |
fermiPy/fermipy | fermipy/timing.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/timing.py#L14-L21 | def elapsed_time(self):
"""Get the elapsed time."""
# Timer is running
if self._t0 is not None:
return self._time + self._get_time()
else:
return self._time | [
"def",
"elapsed_time",
"(",
"self",
")",
":",
"# Timer is running",
"if",
"self",
".",
"_t0",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_time",
"+",
"self",
".",
"_get_time",
"(",
")",
"else",
":",
"return",
"self",
".",
"_time"
] | Get the elapsed time. | [
"Get",
"the",
"elapsed",
"time",
"."
] | python | train |
dsoprea/PySchedules | pyschedules/xml_callbacks.py | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L143-L163 | def _startSchedulesNode(self, name, attrs):
"""Process the start of a node under xtvd/schedules"""
if name == 'schedule':
self._programId = attrs.get('program')
self._stationId = attrs.get('station')
self._time = self._parseDateTime(attrs.get('time'))
self._duration = self._parseDuration(attrs.get('duration'))
self._new = attrs.has_key('new')
self._stereo = attrs.has_key('stereo')
self._subtitled = attrs.has_key('subtitled')
self._hdtv = attrs.has_key('hdtv')
self._closeCaptioned = attrs.has_key('closeCaptioned')
self._ei = attrs.has_key('ei')
self._tvRating = attrs.get('tvRating')
self._dolby = attrs.get('dolby')
self._partNumber = None
self._partTotal = None
elif name == 'part':
self._partNumber = attrs.get('number')
self._partTotal = attrs.get('total') | [
"def",
"_startSchedulesNode",
"(",
"self",
",",
"name",
",",
"attrs",
")",
":",
"if",
"name",
"==",
"'schedule'",
":",
"self",
".",
"_programId",
"=",
"attrs",
".",
"get",
"(",
"'program'",
")",
"self",
".",
"_stationId",
"=",
"attrs",
".",
"get",
"(",
"'station'",
")",
"self",
".",
"_time",
"=",
"self",
".",
"_parseDateTime",
"(",
"attrs",
".",
"get",
"(",
"'time'",
")",
")",
"self",
".",
"_duration",
"=",
"self",
".",
"_parseDuration",
"(",
"attrs",
".",
"get",
"(",
"'duration'",
")",
")",
"self",
".",
"_new",
"=",
"attrs",
".",
"has_key",
"(",
"'new'",
")",
"self",
".",
"_stereo",
"=",
"attrs",
".",
"has_key",
"(",
"'stereo'",
")",
"self",
".",
"_subtitled",
"=",
"attrs",
".",
"has_key",
"(",
"'subtitled'",
")",
"self",
".",
"_hdtv",
"=",
"attrs",
".",
"has_key",
"(",
"'hdtv'",
")",
"self",
".",
"_closeCaptioned",
"=",
"attrs",
".",
"has_key",
"(",
"'closeCaptioned'",
")",
"self",
".",
"_ei",
"=",
"attrs",
".",
"has_key",
"(",
"'ei'",
")",
"self",
".",
"_tvRating",
"=",
"attrs",
".",
"get",
"(",
"'tvRating'",
")",
"self",
".",
"_dolby",
"=",
"attrs",
".",
"get",
"(",
"'dolby'",
")",
"self",
".",
"_partNumber",
"=",
"None",
"self",
".",
"_partTotal",
"=",
"None",
"elif",
"name",
"==",
"'part'",
":",
"self",
".",
"_partNumber",
"=",
"attrs",
".",
"get",
"(",
"'number'",
")",
"self",
".",
"_partTotal",
"=",
"attrs",
".",
"get",
"(",
"'total'",
")"
] | Process the start of a node under xtvd/schedules | [
"Process",
"the",
"start",
"of",
"a",
"node",
"under",
"xtvd",
"/",
"schedules"
] | python | train |
JoelBender/bacpypes | py25/bacpypes/bsllservice.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/bsllservice.py#L1041-L1063 | def connect(self, addr):
"""Initiate a connection request to the peer router."""
if _debug: RouterToRouterService._debug("connect %r", addr)
# make a connection
conn = ConnectionState(addr)
self.multiplexer.connections[addr] = conn
# associate with this service, but it is not connected until the ack comes back
conn.service = self
# keep a list of pending NPDU objects until the ack comes back
conn.pendingNPDU = []
# build a service request
request = ServiceRequest(ROUTER_TO_ROUTER_SERVICE_ID)
request.pduDestination = addr
# send it
self.service_request(request)
# return the connection object
return conn | [
"def",
"connect",
"(",
"self",
",",
"addr",
")",
":",
"if",
"_debug",
":",
"RouterToRouterService",
".",
"_debug",
"(",
"\"connect %r\"",
",",
"addr",
")",
"# make a connection",
"conn",
"=",
"ConnectionState",
"(",
"addr",
")",
"self",
".",
"multiplexer",
".",
"connections",
"[",
"addr",
"]",
"=",
"conn",
"# associate with this service, but it is not connected until the ack comes back",
"conn",
".",
"service",
"=",
"self",
"# keep a list of pending NPDU objects until the ack comes back",
"conn",
".",
"pendingNPDU",
"=",
"[",
"]",
"# build a service request",
"request",
"=",
"ServiceRequest",
"(",
"ROUTER_TO_ROUTER_SERVICE_ID",
")",
"request",
".",
"pduDestination",
"=",
"addr",
"# send it",
"self",
".",
"service_request",
"(",
"request",
")",
"# return the connection object",
"return",
"conn"
] | Initiate a connection request to the peer router. | [
"Initiate",
"a",
"connection",
"request",
"to",
"the",
"peer",
"router",
"."
] | python | train |
pjuren/pyokit | src/pyokit/scripts/join.py | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L418-L440 | def __output_unpaired_vals(d_vals, used_ff_keys, f_f_header, sf_d, s_f_header,
missing_val, out_handler, outfh, delim="\t"):
"""
Use an output handler to output keys that could not be paired.
Go over the keys in d_vals and for any that were not used (i.e. not in
used_ff_keys), build an output line using the values from d_vals,
populated the missing columns with missing_val, and output these using the
provided output hander.
"""
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
for k in d_vals:
if k not in used_ff_keys:
f_f_flds = d_vals[k]
if s_f_header is not None:
s_f_flds = [dict(zip(s_f_header, [missing_val] * len(s_f_header)))]
else:
s_f_num_cols = len(sf_d[d_vals.keys()[0]][0])
s_f_flds = [[missing_val] * s_f_num_cols]
out_handler.write_output(outfh, delim, s_f_flds, f_f_flds,
s_f_header, f_f_header) | [
"def",
"__output_unpaired_vals",
"(",
"d_vals",
",",
"used_ff_keys",
",",
"f_f_header",
",",
"sf_d",
",",
"s_f_header",
",",
"missing_val",
",",
"out_handler",
",",
"outfh",
",",
"delim",
"=",
"\"\\t\"",
")",
":",
"if",
"missing_val",
"is",
"None",
":",
"raise",
"MissingValueError",
"(",
"\"Need missing value to output \"",
"+",
"\" unpaired lines\"",
")",
"for",
"k",
"in",
"d_vals",
":",
"if",
"k",
"not",
"in",
"used_ff_keys",
":",
"f_f_flds",
"=",
"d_vals",
"[",
"k",
"]",
"if",
"s_f_header",
"is",
"not",
"None",
":",
"s_f_flds",
"=",
"[",
"dict",
"(",
"zip",
"(",
"s_f_header",
",",
"[",
"missing_val",
"]",
"*",
"len",
"(",
"s_f_header",
")",
")",
")",
"]",
"else",
":",
"s_f_num_cols",
"=",
"len",
"(",
"sf_d",
"[",
"d_vals",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
"[",
"0",
"]",
")",
"s_f_flds",
"=",
"[",
"[",
"missing_val",
"]",
"*",
"s_f_num_cols",
"]",
"out_handler",
".",
"write_output",
"(",
"outfh",
",",
"delim",
",",
"s_f_flds",
",",
"f_f_flds",
",",
"s_f_header",
",",
"f_f_header",
")"
] | Use an output handler to output keys that could not be paired.
Go over the keys in d_vals and for any that were not used (i.e. not in
used_ff_keys), build an output line using the values from d_vals,
populated the missing columns with missing_val, and output these using the
provided output hander. | [
"Use",
"an",
"output",
"handler",
"to",
"output",
"keys",
"that",
"could",
"not",
"be",
"paired",
"."
] | python | train |
MacHu-GWU/angora-project | angora/bot/macro.py | https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/macro.py#L148-L154 | def Ctrl_Fn(self, n, dl = 0):
"""Ctrl + Fn1~12 组合键
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key(self.keyboard.function_keys[n])
self.keyboard.release_key(self.keyboard.control_key) | [
"def",
"Ctrl_Fn",
"(",
"self",
",",
"n",
",",
"dl",
"=",
"0",
")",
":",
"self",
".",
"Delay",
"(",
"dl",
")",
"self",
".",
"keyboard",
".",
"press_key",
"(",
"self",
".",
"keyboard",
".",
"control_key",
")",
"self",
".",
"keyboard",
".",
"tap_key",
"(",
"self",
".",
"keyboard",
".",
"function_keys",
"[",
"n",
"]",
")",
"self",
".",
"keyboard",
".",
"release_key",
"(",
"self",
".",
"keyboard",
".",
"control_key",
")"
] | Ctrl + Fn1~12 组合键 | [
"Ctrl",
"+",
"Fn1~12",
"组合键"
] | python | train |
mwickert/scikit-dsp-comm | sk_dsp_comm/fir_design_helper.py | https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L48-L56 | def firwin_bpf(N_taps, f1, f2, fs = 1.0, pass_zero=False):
"""
Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(N_taps,2*(f1,f2)/fs,pass_zero=pass_zero) | [
"def",
"firwin_bpf",
"(",
"N_taps",
",",
"f1",
",",
"f2",
",",
"fs",
"=",
"1.0",
",",
"pass_zero",
"=",
"False",
")",
":",
"return",
"signal",
".",
"firwin",
"(",
"N_taps",
",",
"2",
"*",
"(",
"f1",
",",
"f2",
")",
"/",
"fs",
",",
"pass_zero",
"=",
"pass_zero",
")"
] | Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016 | [
"Design",
"a",
"windowed",
"FIR",
"bandpass",
"filter",
"in",
"terms",
"of",
"passband",
"critical",
"frequencies",
"f1",
"<",
"f2",
"in",
"Hz",
"relative",
"to",
"sampling",
"rate",
"fs",
"in",
"Hz",
".",
"The",
"number",
"of",
"taps",
"must",
"be",
"provided",
".",
"Mark",
"Wickert",
"October",
"2016"
] | python | valid |
dpkp/kafka-python | kafka/consumer/subscription_state.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/subscription_state.py#L76-L124 | def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with assign_from_user()
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
"""
if self._user_assignment or (topics and pattern):
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
assert topics or pattern, 'Must provide topics or pattern'
if pattern:
log.info('Subscribing to pattern: /%s/', pattern)
self.subscription = set()
self.subscribed_pattern = re.compile(pattern)
else:
self.change_subscription(topics)
if listener and not isinstance(listener, ConsumerRebalanceListener):
raise TypeError('listener must be a ConsumerRebalanceListener')
self.listener = listener | [
"def",
"subscribe",
"(",
"self",
",",
"topics",
"=",
"(",
")",
",",
"pattern",
"=",
"None",
",",
"listener",
"=",
"None",
")",
":",
"if",
"self",
".",
"_user_assignment",
"or",
"(",
"topics",
"and",
"pattern",
")",
":",
"raise",
"IllegalStateError",
"(",
"self",
".",
"_SUBSCRIPTION_EXCEPTION_MESSAGE",
")",
"assert",
"topics",
"or",
"pattern",
",",
"'Must provide topics or pattern'",
"if",
"pattern",
":",
"log",
".",
"info",
"(",
"'Subscribing to pattern: /%s/'",
",",
"pattern",
")",
"self",
".",
"subscription",
"=",
"set",
"(",
")",
"self",
".",
"subscribed_pattern",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"else",
":",
"self",
".",
"change_subscription",
"(",
"topics",
")",
"if",
"listener",
"and",
"not",
"isinstance",
"(",
"listener",
",",
"ConsumerRebalanceListener",
")",
":",
"raise",
"TypeError",
"(",
"'listener must be a ConsumerRebalanceListener'",
")",
"self",
".",
"listener",
"=",
"listener"
] | Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with assign_from_user()
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call. | [
"Subscribe",
"to",
"a",
"list",
"of",
"topics",
"or",
"a",
"topic",
"regex",
"pattern",
"."
] | python | train |
PeerAssets/pypeerassets | pypeerassets/provider/explorer.py | https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L78-L81 | def getblock(self, hash: str) -> dict:
'''Returns information about the block with the given hash.'''
return cast(dict, self.api_fetch('getblock?hash=' + hash)) | [
"def",
"getblock",
"(",
"self",
",",
"hash",
":",
"str",
")",
"->",
"dict",
":",
"return",
"cast",
"(",
"dict",
",",
"self",
".",
"api_fetch",
"(",
"'getblock?hash='",
"+",
"hash",
")",
")"
] | Returns information about the block with the given hash. | [
"Returns",
"information",
"about",
"the",
"block",
"with",
"the",
"given",
"hash",
"."
] | python | train |
googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L636-L638 | def _render_table(data, fields=None):
""" Helper to render a list of dictionaries as an HTML display object. """
return IPython.core.display.HTML(datalab.utils.commands.HtmlBuilder.render_table(data, fields)) | [
"def",
"_render_table",
"(",
"data",
",",
"fields",
"=",
"None",
")",
":",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"HtmlBuilder",
".",
"render_table",
"(",
"data",
",",
"fields",
")",
")"
] | Helper to render a list of dictionaries as an HTML display object. | [
"Helper",
"to",
"render",
"a",
"list",
"of",
"dictionaries",
"as",
"an",
"HTML",
"display",
"object",
"."
] | python | train |
nickfrostatx/flask-hookserver | flask_hookserver.py | https://github.com/nickfrostatx/flask-hookserver/blob/fb5c226473f54e3469234403ec56a354374d2c41/flask_hookserver.py#L132-L156 | def _load_github_hooks(github_url='https://api.github.com'):
"""Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503.
"""
try:
resp = requests.get(github_url + '/meta')
if resp.status_code == 200:
return resp.json()['hooks']
else:
if resp.headers.get('X-RateLimit-Remaining') == '0':
reset_ts = int(resp.headers['X-RateLimit-Reset'])
reset_string = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(reset_ts))
raise ServiceUnavailable('Rate limited from GitHub until ' +
reset_string)
else:
raise ServiceUnavailable('Error reaching GitHub')
except (KeyError, ValueError, requests.exceptions.ConnectionError):
raise ServiceUnavailable('Error reaching GitHub') | [
"def",
"_load_github_hooks",
"(",
"github_url",
"=",
"'https://api.github.com'",
")",
":",
"try",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"github_url",
"+",
"'/meta'",
")",
"if",
"resp",
".",
"status_code",
"==",
"200",
":",
"return",
"resp",
".",
"json",
"(",
")",
"[",
"'hooks'",
"]",
"else",
":",
"if",
"resp",
".",
"headers",
".",
"get",
"(",
"'X-RateLimit-Remaining'",
")",
"==",
"'0'",
":",
"reset_ts",
"=",
"int",
"(",
"resp",
".",
"headers",
"[",
"'X-RateLimit-Reset'",
"]",
")",
"reset_string",
"=",
"time",
".",
"strftime",
"(",
"'%a, %d %b %Y %H:%M:%S GMT'",
",",
"time",
".",
"gmtime",
"(",
"reset_ts",
")",
")",
"raise",
"ServiceUnavailable",
"(",
"'Rate limited from GitHub until '",
"+",
"reset_string",
")",
"else",
":",
"raise",
"ServiceUnavailable",
"(",
"'Error reaching GitHub'",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"requests",
".",
"exceptions",
".",
"ConnectionError",
")",
":",
"raise",
"ServiceUnavailable",
"(",
"'Error reaching GitHub'",
")"
] | Request GitHub's IP block from their API.
Return the IP network.
If we detect a rate-limit error, raise an error message stating when
the rate limit will reset.
If something else goes wrong, raise a generic 503. | [
"Request",
"GitHub",
"s",
"IP",
"block",
"from",
"their",
"API",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/py_pi_api/py_pi_api_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/py_pi_api/py_pi_api_client.py#L52-L69 | def delete_package_version_from_recycle_bin(self, feed_id, package_name, package_version):
"""DeletePackageVersionFromRecycleBin.
[Preview API] Delete a package version from the feed, moving it to the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package.
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
self._send(http_method='DELETE',
location_id='07143752-3d94-45fd-86c2-0c77ed87847b',
version='5.0-preview.1',
route_values=route_values) | [
"def",
"delete_package_version_from_recycle_bin",
"(",
"self",
",",
"feed_id",
",",
"package_name",
",",
"package_version",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"feed_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'feedId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'feed_id'",
",",
"feed_id",
",",
"'str'",
")",
"if",
"package_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'packageName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'package_name'",
",",
"package_name",
",",
"'str'",
")",
"if",
"package_version",
"is",
"not",
"None",
":",
"route_values",
"[",
"'packageVersion'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'package_version'",
",",
"package_version",
",",
"'str'",
")",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'DELETE'",
",",
"location_id",
"=",
"'07143752-3d94-45fd-86c2-0c77ed87847b'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"route_values",
"=",
"route_values",
")"
] | DeletePackageVersionFromRecycleBin.
[Preview API] Delete a package version from the feed, moving it to the recycle bin.
:param str feed_id: Name or ID of the feed.
:param str package_name: Name of the package.
:param str package_version: Version of the package. | [
"DeletePackageVersionFromRecycleBin",
".",
"[",
"Preview",
"API",
"]",
"Delete",
"a",
"package",
"version",
"from",
"the",
"feed",
"moving",
"it",
"to",
"the",
"recycle",
"bin",
".",
":",
"param",
"str",
"feed_id",
":",
"Name",
"or",
"ID",
"of",
"the",
"feed",
".",
":",
"param",
"str",
"package_name",
":",
"Name",
"of",
"the",
"package",
".",
":",
"param",
"str",
"package_version",
":",
"Version",
"of",
"the",
"package",
"."
] | python | train |
erocarrera/pefile | pefile.py | https://github.com/erocarrera/pefile/blob/8a78a2e251a3f2336c232bf411133927b479edf2/pefile.py#L704-L714 | def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr-2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False | [
"def",
"ask_unicode_16",
"(",
"self",
",",
"next_rva_ptr",
")",
":",
"if",
"self",
".",
"__get_word_value_at_rva",
"(",
"next_rva_ptr",
"-",
"2",
")",
"==",
"0",
":",
"self",
".",
"length",
"=",
"next_rva_ptr",
"-",
"self",
".",
"rva_ptr",
"return",
"True",
"return",
"False"
] | The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there. | [
"The",
"next",
"RVA",
"is",
"taken",
"to",
"be",
"the",
"one",
"immediately",
"following",
"this",
"one",
"."
] | python | train |
jameslyons/pycipher | pycipher/polybius.py | https://github.com/jameslyons/pycipher/blob/8f1d7cf3cba4e12171e27d9ce723ad890194de19/pycipher/polybius.py#L36-L50 | def encipher(self,string):
"""Encipher string using Polybius square cipher according to initialised key.
Example::
ciphertext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string. The ciphertext will be twice the length of the plaintext.
"""
string = self.remove_punctuation(string)#,filter='[^'+self.key+']')
ret = ''
for c in range(0,len(string)):
ret += self.encipher_char(string[c])
return ret | [
"def",
"encipher",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"self",
".",
"remove_punctuation",
"(",
"string",
")",
"#,filter='[^'+self.key+']')",
"ret",
"=",
"''",
"for",
"c",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"string",
")",
")",
":",
"ret",
"+=",
"self",
".",
"encipher_char",
"(",
"string",
"[",
"c",
"]",
")",
"return",
"ret"
] | Encipher string using Polybius square cipher according to initialised key.
Example::
ciphertext = Polybius('APCZWRLFBDKOTYUQGENHXMIVS',5,'MKSBU').encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string. The ciphertext will be twice the length of the plaintext. | [
"Encipher",
"string",
"using",
"Polybius",
"square",
"cipher",
"according",
"to",
"initialised",
"key",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/elasticity/elastic.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L417-L441 | def get_structure_property_dict(self, structure, include_base_props=True,
ignore_errors=False):
"""
returns a dictionary of properties derived from the elastic tensor
and an associated structure
Args:
structure (Structure): structure object for which to calculate
associated properties
include_base_props (bool): whether to include base properties,
like k_vrh, etc.
ignore_errors (bool): if set to true, will set problem properties
that depend on a physical tensor to None, defaults to False
"""
s_props = ["trans_v", "long_v", "snyder_ac", "snyder_opt",
"snyder_total", "clarke_thermalcond", "cahill_thermalcond",
"debye_temperature"]
if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0):
sp_dict = {prop: None for prop in s_props}
else:
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict["structure"] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict | [
"def",
"get_structure_property_dict",
"(",
"self",
",",
"structure",
",",
"include_base_props",
"=",
"True",
",",
"ignore_errors",
"=",
"False",
")",
":",
"s_props",
"=",
"[",
"\"trans_v\"",
",",
"\"long_v\"",
",",
"\"snyder_ac\"",
",",
"\"snyder_opt\"",
",",
"\"snyder_total\"",
",",
"\"clarke_thermalcond\"",
",",
"\"cahill_thermalcond\"",
",",
"\"debye_temperature\"",
"]",
"if",
"ignore_errors",
"and",
"(",
"self",
".",
"k_vrh",
"<",
"0",
"or",
"self",
".",
"g_vrh",
"<",
"0",
")",
":",
"sp_dict",
"=",
"{",
"prop",
":",
"None",
"for",
"prop",
"in",
"s_props",
"}",
"else",
":",
"sp_dict",
"=",
"{",
"prop",
":",
"getattr",
"(",
"self",
",",
"prop",
")",
"(",
"structure",
")",
"for",
"prop",
"in",
"s_props",
"}",
"sp_dict",
"[",
"\"structure\"",
"]",
"=",
"structure",
"if",
"include_base_props",
":",
"sp_dict",
".",
"update",
"(",
"self",
".",
"property_dict",
")",
"return",
"sp_dict"
] | returns a dictionary of properties derived from the elastic tensor
and an associated structure
Args:
structure (Structure): structure object for which to calculate
associated properties
include_base_props (bool): whether to include base properties,
like k_vrh, etc.
ignore_errors (bool): if set to true, will set problem properties
that depend on a physical tensor to None, defaults to False | [
"returns",
"a",
"dictionary",
"of",
"properties",
"derived",
"from",
"the",
"elastic",
"tensor",
"and",
"an",
"associated",
"structure"
] | python | train |
simon-anders/htseq | python3/HTSeq/__init__.py | https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python3/HTSeq/__init__.py#L463-L479 | def bundle_multiple_alignments(sequence_of_alignments):
"""Some alignment programs, e.g., Bowtie, can output multiple alignments,
i.e., the same read is reported consecutively with different alignments.
This function takes an iterator over alignments and bundles consecutive
alignments regarding the same read to a list of Alignment objects and
returns an iterator over these.
"""
alignment_iter = iter(sequence_of_alignments)
algnt = next(alignment_iter)
ma = [algnt]
for algnt in alignment_iter:
if algnt.read.name != ma[0].read.name:
yield ma
ma = [algnt]
else:
ma.append(algnt)
yield ma | [
"def",
"bundle_multiple_alignments",
"(",
"sequence_of_alignments",
")",
":",
"alignment_iter",
"=",
"iter",
"(",
"sequence_of_alignments",
")",
"algnt",
"=",
"next",
"(",
"alignment_iter",
")",
"ma",
"=",
"[",
"algnt",
"]",
"for",
"algnt",
"in",
"alignment_iter",
":",
"if",
"algnt",
".",
"read",
".",
"name",
"!=",
"ma",
"[",
"0",
"]",
".",
"read",
".",
"name",
":",
"yield",
"ma",
"ma",
"=",
"[",
"algnt",
"]",
"else",
":",
"ma",
".",
"append",
"(",
"algnt",
")",
"yield",
"ma"
] | Some alignment programs, e.g., Bowtie, can output multiple alignments,
i.e., the same read is reported consecutively with different alignments.
This function takes an iterator over alignments and bundles consecutive
alignments regarding the same read to a list of Alignment objects and
returns an iterator over these. | [
"Some",
"alignment",
"programs",
"e",
".",
"g",
".",
"Bowtie",
"can",
"output",
"multiple",
"alignments",
"i",
".",
"e",
".",
"the",
"same",
"read",
"is",
"reported",
"consecutively",
"with",
"different",
"alignments",
".",
"This",
"function",
"takes",
"an",
"iterator",
"over",
"alignments",
"and",
"bundles",
"consecutive",
"alignments",
"regarding",
"the",
"same",
"read",
"to",
"a",
"list",
"of",
"Alignment",
"objects",
"and",
"returns",
"an",
"iterator",
"over",
"these",
"."
] | python | train |
mattja/nsim | nsim/analysesN/epochs.py | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/epochs.py#L60-L88 | def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_distributed(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs)(
ts, variability, threshold, minlength, plot) | [
"def",
"epochs",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"plot",
"=",
"True",
")",
":",
"if",
"ts",
".",
"ndim",
"<=",
"2",
":",
"return",
"analyses1",
".",
"epochs_distributed",
"(",
"ts",
",",
"variability",
",",
"threshold",
",",
"minlength",
",",
"plot",
")",
"else",
":",
"return",
"distob",
".",
"vectorize",
"(",
"analyses1",
".",
"epochs",
")",
"(",
"ts",
",",
"variability",
",",
"threshold",
",",
"minlength",
",",
"plot",
")"
] | Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | [
"Identify",
"stationary",
"epochs",
"within",
"a",
"time",
"series",
"based",
"on",
"a",
"continuous",
"measure",
"of",
"variability",
".",
"Epochs",
"are",
"defined",
"to",
"contain",
"the",
"points",
"of",
"minimal",
"variability",
"and",
"to",
"extend",
"as",
"wide",
"as",
"possible",
"with",
"variability",
"not",
"exceeding",
"the",
"threshold",
"."
] | python | train |
wreckage/django-happenings | happenings/utils/common.py | https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/common.py#L48-L57 | def get_net(req):
"""Get the net of any 'next' and 'prev' querystrings."""
try:
nxt, prev = map(
int, (req.GET.get('cal_next', 0), req.GET.get('cal_prev', 0))
)
net = nxt - prev
except Exception:
net = 0
return net | [
"def",
"get_net",
"(",
"req",
")",
":",
"try",
":",
"nxt",
",",
"prev",
"=",
"map",
"(",
"int",
",",
"(",
"req",
".",
"GET",
".",
"get",
"(",
"'cal_next'",
",",
"0",
")",
",",
"req",
".",
"GET",
".",
"get",
"(",
"'cal_prev'",
",",
"0",
")",
")",
")",
"net",
"=",
"nxt",
"-",
"prev",
"except",
"Exception",
":",
"net",
"=",
"0",
"return",
"net"
] | Get the net of any 'next' and 'prev' querystrings. | [
"Get",
"the",
"net",
"of",
"any",
"next",
"and",
"prev",
"querystrings",
"."
] | python | test |
knipknap/exscript | Exscript/emulators/vdevice.py | https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/emulators/vdevice.py#L144-L160 | def add_commands_from_file(self, filename, autoprompt=True):
"""
Wrapper around add_command_handler that reads the handlers from the
file with the given name. The file is a Python script containing
a list named 'commands' of tuples that map command names to
handlers.
:type filename: str
:param filename: The name of the file containing the tuples.
:type autoprompt: bool
:param autoprompt: Whether to append a prompt to each response.
"""
if autoprompt:
deco = self._create_autoprompt_handler
else:
deco = None
self.commands.add_from_file(filename, deco) | [
"def",
"add_commands_from_file",
"(",
"self",
",",
"filename",
",",
"autoprompt",
"=",
"True",
")",
":",
"if",
"autoprompt",
":",
"deco",
"=",
"self",
".",
"_create_autoprompt_handler",
"else",
":",
"deco",
"=",
"None",
"self",
".",
"commands",
".",
"add_from_file",
"(",
"filename",
",",
"deco",
")"
] | Wrapper around add_command_handler that reads the handlers from the
file with the given name. The file is a Python script containing
a list named 'commands' of tuples that map command names to
handlers.
:type filename: str
:param filename: The name of the file containing the tuples.
:type autoprompt: bool
:param autoprompt: Whether to append a prompt to each response. | [
"Wrapper",
"around",
"add_command_handler",
"that",
"reads",
"the",
"handlers",
"from",
"the",
"file",
"with",
"the",
"given",
"name",
".",
"The",
"file",
"is",
"a",
"Python",
"script",
"containing",
"a",
"list",
"named",
"commands",
"of",
"tuples",
"that",
"map",
"command",
"names",
"to",
"handlers",
"."
] | python | train |
MeaningCloud/meaningcloud-python | meaningcloud/Response.py | https://github.com/MeaningCloud/meaningcloud-python/blob/1dd76ecabeedd80c9bb14a1716d39657d645775f/meaningcloud/Response.py#L37-L52 | def getStatusCode(self):
"""
Returns the code of the status or None if it does not exist
:return:
Status code of the response
"""
if 'status' in self._response.keys():
if (self._response['status'] is not None) and ('code' in self._response['status'].keys()) and (self._response['status']['code'] is not None):
return self._response['status']['code']
else:
return None
else:
return None | [
"def",
"getStatusCode",
"(",
"self",
")",
":",
"if",
"'status'",
"in",
"self",
".",
"_response",
".",
"keys",
"(",
")",
":",
"if",
"(",
"self",
".",
"_response",
"[",
"'status'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"'code'",
"in",
"self",
".",
"_response",
"[",
"'status'",
"]",
".",
"keys",
"(",
")",
")",
"and",
"(",
"self",
".",
"_response",
"[",
"'status'",
"]",
"[",
"'code'",
"]",
"is",
"not",
"None",
")",
":",
"return",
"self",
".",
"_response",
"[",
"'status'",
"]",
"[",
"'code'",
"]",
"else",
":",
"return",
"None",
"else",
":",
"return",
"None"
] | Returns the code of the status or None if it does not exist
:return:
Status code of the response | [
"Returns",
"the",
"code",
"of",
"the",
"status",
"or",
"None",
"if",
"it",
"does",
"not",
"exist"
] | python | train |
sorgerlab/indra | indra/assemblers/english/assembler.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L246-L251 | def _assemble_association(stmt):
"""Assemble Association statements into text."""
member_strs = [_assemble_agent_str(m.concept) for m in stmt.members]
stmt_str = member_strs[0] + ' is associated with ' + \
_join_list(member_strs[1:])
return _make_sentence(stmt_str) | [
"def",
"_assemble_association",
"(",
"stmt",
")",
":",
"member_strs",
"=",
"[",
"_assemble_agent_str",
"(",
"m",
".",
"concept",
")",
"for",
"m",
"in",
"stmt",
".",
"members",
"]",
"stmt_str",
"=",
"member_strs",
"[",
"0",
"]",
"+",
"' is associated with '",
"+",
"_join_list",
"(",
"member_strs",
"[",
"1",
":",
"]",
")",
"return",
"_make_sentence",
"(",
"stmt_str",
")"
] | Assemble Association statements into text. | [
"Assemble",
"Association",
"statements",
"into",
"text",
"."
] | python | train |
tek/amino | amino/string/hues.py | https://github.com/tek/amino/blob/51b314933e047a45587a24ecff02c836706d27ff/amino/string/hues.py#L50-L60 | def gen_keywords(*args: Union[ANSIColors, ANSIStyles], **kwargs: Union[ANSIColors, ANSIStyles]) -> tuple:
'''generate single escape sequence mapping.'''
fields: tuple = tuple()
values: tuple = tuple()
for tpl in args:
fields += tpl._fields
values += tpl
for prefix, tpl in kwargs.items():
fields += tuple(map(lambda x: '_'.join([prefix, x]), tpl._fields))
values += tpl
return namedtuple('ANSISequences', fields)(*values) | [
"def",
"gen_keywords",
"(",
"*",
"args",
":",
"Union",
"[",
"ANSIColors",
",",
"ANSIStyles",
"]",
",",
"*",
"*",
"kwargs",
":",
"Union",
"[",
"ANSIColors",
",",
"ANSIStyles",
"]",
")",
"->",
"tuple",
":",
"fields",
":",
"tuple",
"=",
"tuple",
"(",
")",
"values",
":",
"tuple",
"=",
"tuple",
"(",
")",
"for",
"tpl",
"in",
"args",
":",
"fields",
"+=",
"tpl",
".",
"_fields",
"values",
"+=",
"tpl",
"for",
"prefix",
",",
"tpl",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"fields",
"+=",
"tuple",
"(",
"map",
"(",
"lambda",
"x",
":",
"'_'",
".",
"join",
"(",
"[",
"prefix",
",",
"x",
"]",
")",
",",
"tpl",
".",
"_fields",
")",
")",
"values",
"+=",
"tpl",
"return",
"namedtuple",
"(",
"'ANSISequences'",
",",
"fields",
")",
"(",
"*",
"values",
")"
] | generate single escape sequence mapping. | [
"generate",
"single",
"escape",
"sequence",
"mapping",
"."
] | python | test |
mcocdawc/chemcoord | src/chemcoord/cartesian_coordinates/_cartesian_class_core.py | https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/_cartesian_class_core.py#L781-L840 | def get_dihedral_degrees(self, indices, start_row=0):
"""Return the dihedrals between given atoms.
Calculates the dihedral angle in degrees between the atoms with
indices ``i, b, a, d``.
The indices can be given in three ways:
* As simple list ``[i, b, a, d]``
* As list of lists: ``[[i1, b1, a1, d1], [i2, b2, a2, d2]...]``
* As :class:`pandas.DataFrame` where ``i`` is taken from the index and
``b``, ``a`` and ``d``from the respective columns
``'b'``, ``'a'`` and ``'d'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees.
"""
coords = ['x', 'y', 'z']
if isinstance(indices, pd.DataFrame):
i_pos = self.loc[indices.index, coords].values
b_pos = self.loc[indices.loc[:, 'b'], coords].values
a_pos = self.loc[indices.loc[:, 'a'], coords].values
d_pos = self.loc[indices.loc[:, 'd'], coords].values
else:
indices = np.array(indices)
if len(indices.shape) == 1:
indices = indices[None, :]
i_pos = self.loc[indices[:, 0], coords].values
b_pos = self.loc[indices[:, 1], coords].values
a_pos = self.loc[indices[:, 2], coords].values
d_pos = self.loc[indices[:, 3], coords].values
IB = b_pos - i_pos
BA = a_pos - b_pos
AD = d_pos - a_pos
N1 = np.cross(IB, BA, axis=1)
N2 = np.cross(BA, AD, axis=1)
n1, n2 = [v / np.linalg.norm(v, axis=1)[:, None] for v in (N1, N2)]
dot_product = np.sum(n1 * n2, axis=1)
dot_product[dot_product > 1] = 1
dot_product[dot_product < -1] = -1
dihedrals = np.degrees(np.arccos(dot_product))
# the next lines are to test the direction of rotation.
# is a dihedral really 90 or 270 degrees?
# Equivalent to direction of rotation of dihedral
where_to_modify = np.sum(BA * np.cross(n1, n2, axis=1), axis=1) > 0
where_to_modify = np.nonzero(where_to_modify)[0]
length = indices.shape[0] - start_row
sign = np.full(length, 1, dtype='float64')
to_add = np.full(length, 0, dtype='float64')
sign[where_to_modify] = -1
to_add[where_to_modify] = 360
dihedrals = to_add + sign * dihedrals
return dihedrals | [
"def",
"get_dihedral_degrees",
"(",
"self",
",",
"indices",
",",
"start_row",
"=",
"0",
")",
":",
"coords",
"=",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"if",
"isinstance",
"(",
"indices",
",",
"pd",
".",
"DataFrame",
")",
":",
"i_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
".",
"index",
",",
"coords",
"]",
".",
"values",
"b_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
".",
"loc",
"[",
":",
",",
"'b'",
"]",
",",
"coords",
"]",
".",
"values",
"a_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
".",
"loc",
"[",
":",
",",
"'a'",
"]",
",",
"coords",
"]",
".",
"values",
"d_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
".",
"loc",
"[",
":",
",",
"'d'",
"]",
",",
"coords",
"]",
".",
"values",
"else",
":",
"indices",
"=",
"np",
".",
"array",
"(",
"indices",
")",
"if",
"len",
"(",
"indices",
".",
"shape",
")",
"==",
"1",
":",
"indices",
"=",
"indices",
"[",
"None",
",",
":",
"]",
"i_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
"[",
":",
",",
"0",
"]",
",",
"coords",
"]",
".",
"values",
"b_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
"[",
":",
",",
"1",
"]",
",",
"coords",
"]",
".",
"values",
"a_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
"[",
":",
",",
"2",
"]",
",",
"coords",
"]",
".",
"values",
"d_pos",
"=",
"self",
".",
"loc",
"[",
"indices",
"[",
":",
",",
"3",
"]",
",",
"coords",
"]",
".",
"values",
"IB",
"=",
"b_pos",
"-",
"i_pos",
"BA",
"=",
"a_pos",
"-",
"b_pos",
"AD",
"=",
"d_pos",
"-",
"a_pos",
"N1",
"=",
"np",
".",
"cross",
"(",
"IB",
",",
"BA",
",",
"axis",
"=",
"1",
")",
"N2",
"=",
"np",
".",
"cross",
"(",
"BA",
",",
"AD",
",",
"axis",
"=",
"1",
")",
"n1",
",",
"n2",
"=",
"[",
"v",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"v",
",",
"axis",
"=",
"1",
")",
"[",
":",
",",
"None",
"]",
"for",
"v",
"in",
"(",
"N1",
",",
"N2",
")",
"]",
"dot_product",
"=",
"np",
".",
"sum",
"(",
"n1",
"*",
"n2",
",",
"axis",
"=",
"1",
")",
"dot_product",
"[",
"dot_product",
">",
"1",
"]",
"=",
"1",
"dot_product",
"[",
"dot_product",
"<",
"-",
"1",
"]",
"=",
"-",
"1",
"dihedrals",
"=",
"np",
".",
"degrees",
"(",
"np",
".",
"arccos",
"(",
"dot_product",
")",
")",
"# the next lines are to test the direction of rotation.",
"# is a dihedral really 90 or 270 degrees?",
"# Equivalent to direction of rotation of dihedral",
"where_to_modify",
"=",
"np",
".",
"sum",
"(",
"BA",
"*",
"np",
".",
"cross",
"(",
"n1",
",",
"n2",
",",
"axis",
"=",
"1",
")",
",",
"axis",
"=",
"1",
")",
">",
"0",
"where_to_modify",
"=",
"np",
".",
"nonzero",
"(",
"where_to_modify",
")",
"[",
"0",
"]",
"length",
"=",
"indices",
".",
"shape",
"[",
"0",
"]",
"-",
"start_row",
"sign",
"=",
"np",
".",
"full",
"(",
"length",
",",
"1",
",",
"dtype",
"=",
"'float64'",
")",
"to_add",
"=",
"np",
".",
"full",
"(",
"length",
",",
"0",
",",
"dtype",
"=",
"'float64'",
")",
"sign",
"[",
"where_to_modify",
"]",
"=",
"-",
"1",
"to_add",
"[",
"where_to_modify",
"]",
"=",
"360",
"dihedrals",
"=",
"to_add",
"+",
"sign",
"*",
"dihedrals",
"return",
"dihedrals"
] | Return the dihedrals between given atoms.
Calculates the dihedral angle in degrees between the atoms with
indices ``i, b, a, d``.
The indices can be given in three ways:
* As simple list ``[i, b, a, d]``
* As list of lists: ``[[i1, b1, a1, d1], [i2, b2, a2, d2]...]``
* As :class:`pandas.DataFrame` where ``i`` is taken from the index and
``b``, ``a`` and ``d``from the respective columns
``'b'``, ``'a'`` and ``'d'``.
Args:
indices (list):
Returns:
:class:`numpy.ndarray`: Vector of angles in degrees. | [
"Return",
"the",
"dihedrals",
"between",
"given",
"atoms",
"."
] | python | train |
7sDream/zhihu-py3 | zhihu/author.py | https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/author.py#L102-L114 | def name(self):
"""获取用户名字.
:return: 用户名字
:rtype: str
"""
if self.url is None:
return '匿名用户'
if self.soup is not None:
return self.soup.find('div', class_='title-section').span.text
else:
assert self.card is not None
return self.card.find('span', class_='name').text | [
"def",
"name",
"(",
"self",
")",
":",
"if",
"self",
".",
"url",
"is",
"None",
":",
"return",
"'匿名用户'",
"if",
"self",
".",
"soup",
"is",
"not",
"None",
":",
"return",
"self",
".",
"soup",
".",
"find",
"(",
"'div'",
",",
"class_",
"=",
"'title-section'",
")",
".",
"span",
".",
"text",
"else",
":",
"assert",
"self",
".",
"card",
"is",
"not",
"None",
"return",
"self",
".",
"card",
".",
"find",
"(",
"'span'",
",",
"class_",
"=",
"'name'",
")",
".",
"text"
] | 获取用户名字.
:return: 用户名字
:rtype: str | [
"获取用户名字",
"."
] | python | train |
senaite/senaite.core | bika/lims/numbergenerator.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/numbergenerator.py#L63-L69 | def storage(self):
""" get the counter storage
"""
annotation = get_portal_annotation()
if annotation.get(NUMBER_STORAGE) is None:
annotation[NUMBER_STORAGE] = OIBTree()
return annotation[NUMBER_STORAGE] | [
"def",
"storage",
"(",
"self",
")",
":",
"annotation",
"=",
"get_portal_annotation",
"(",
")",
"if",
"annotation",
".",
"get",
"(",
"NUMBER_STORAGE",
")",
"is",
"None",
":",
"annotation",
"[",
"NUMBER_STORAGE",
"]",
"=",
"OIBTree",
"(",
")",
"return",
"annotation",
"[",
"NUMBER_STORAGE",
"]"
] | get the counter storage | [
"get",
"the",
"counter",
"storage"
] | python | train |
HewlettPackard/python-hpOneView | hpOneView/resources/servers/server_hardware.py | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/server_hardware.py#L166-L196 | def get_all_firmwares(self, filter='', start=0, count=-1, query='', sort=''):
"""
Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory.
"""
uri = self.URI + "/*/firmware"
return self._helper.get_all(start, count, filter, query, sort, '', '', uri) | [
"def",
"get_all_firmwares",
"(",
"self",
",",
"filter",
"=",
"''",
",",
"start",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"query",
"=",
"''",
",",
"sort",
"=",
"''",
")",
":",
"uri",
"=",
"self",
".",
"URI",
"+",
"\"/*/firmware\"",
"return",
"self",
".",
"_helper",
".",
"get_all",
"(",
"start",
",",
"count",
",",
"filter",
",",
"query",
",",
"sort",
",",
"''",
",",
"''",
",",
"uri",
")"
] | Gets a list of firmware inventory across all servers. To filter the returned data, specify a filter
expression to select a particular server model, component name, and/or component firmware version.
Note:
This method is available for API version 300 or later.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query; all resources
are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: List of firmware inventory. | [
"Gets",
"a",
"list",
"of",
"firmware",
"inventory",
"across",
"all",
"servers",
".",
"To",
"filter",
"the",
"returned",
"data",
"specify",
"a",
"filter",
"expression",
"to",
"select",
"a",
"particular",
"server",
"model",
"component",
"name",
"and",
"/",
"or",
"component",
"firmware",
"version",
"."
] | python | train |
pvlib/pvlib-python | pvlib/forecast.py | https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/forecast.py#L377-L391 | def set_time(self, time):
'''
Converts time data into a pandas date object.
Parameters
----------
time: netcdf
Contains time information.
Returns
-------
pandas.DatetimeIndex
'''
times = num2date(time[:].squeeze(), time.units)
self.time = pd.DatetimeIndex(pd.Series(times), tz=self.location.tz) | [
"def",
"set_time",
"(",
"self",
",",
"time",
")",
":",
"times",
"=",
"num2date",
"(",
"time",
"[",
":",
"]",
".",
"squeeze",
"(",
")",
",",
"time",
".",
"units",
")",
"self",
".",
"time",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"pd",
".",
"Series",
"(",
"times",
")",
",",
"tz",
"=",
"self",
".",
"location",
".",
"tz",
")"
] | Converts time data into a pandas date object.
Parameters
----------
time: netcdf
Contains time information.
Returns
-------
pandas.DatetimeIndex | [
"Converts",
"time",
"data",
"into",
"a",
"pandas",
"date",
"object",
"."
] | python | train |
mozilla/python-zeppelin | zeppelin/executors/notebook_executor.py | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/executors/notebook_executor.py#L69-L79 | def save_notebook(self, body):
"""Save notebook depending on user provided output path."""
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.') | [
"def",
"save_notebook",
"(",
"self",
",",
"body",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"output_path",
")",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"self",
".",
"notebook_name",
")",
"try",
":",
"with",
"open",
"(",
"full_path",
",",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"body",
",",
"indent",
"=",
"2",
")",
")",
"except",
"ValueError",
":",
"print",
"(",
"'ERROR: Could not save executed notebook to path: '",
"+",
"self",
".",
"output_path",
"+",
"' -- Please provide a valid absolute path.'",
")"
] | Save notebook depending on user provided output path. | [
"Save",
"notebook",
"depending",
"on",
"user",
"provided",
"output",
"path",
"."
] | python | train |
quantmind/dynts | dynts/utils/version.py | https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/utils/version.py#L31-L46 | def get_git_changeset(filename=None):
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
dirname = os.path.dirname(filename or __file__)
git_show = sh('git show --pretty=format:%ct --quiet HEAD',
cwd=dirname)
timestamp = git_show.partition('\n')[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S') | [
"def",
"get_git_changeset",
"(",
"filename",
"=",
"None",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
"or",
"__file__",
")",
"git_show",
"=",
"sh",
"(",
"'git show --pretty=format:%ct --quiet HEAD'",
",",
"cwd",
"=",
"dirname",
")",
"timestamp",
"=",
"git_show",
".",
"partition",
"(",
"'\\n'",
")",
"[",
"0",
"]",
"try",
":",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"int",
"(",
"timestamp",
")",
")",
"except",
"ValueError",
":",
"return",
"None",
"return",
"timestamp",
".",
"strftime",
"(",
"'%Y%m%d%H%M%S'",
")"
] | Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers. | [
"Returns",
"a",
"numeric",
"identifier",
"of",
"the",
"latest",
"git",
"changeset",
".",
"The",
"result",
"is",
"the",
"UTC",
"timestamp",
"of",
"the",
"changeset",
"in",
"YYYYMMDDHHMMSS",
"format",
".",
"This",
"value",
"isn",
"t",
"guaranteed",
"to",
"be",
"unique",
"but",
"collisions",
"are",
"very",
"unlikely",
"so",
"it",
"s",
"sufficient",
"for",
"generating",
"the",
"development",
"version",
"numbers",
"."
] | python | train |
255BITS/hyperchamber | hyperchamber/selector.py | https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/hyperchamber/selector.py#L44-L49 | def get_config_value(self, k, i):
"""Gets the ith config value for k. e.g. get_config_value('x', 1)"""
if(not isinstance(self.store[k], list)):
return self.store[k]
else:
return self.store[k][i] | [
"def",
"get_config_value",
"(",
"self",
",",
"k",
",",
"i",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"self",
".",
"store",
"[",
"k",
"]",
",",
"list",
")",
")",
":",
"return",
"self",
".",
"store",
"[",
"k",
"]",
"else",
":",
"return",
"self",
".",
"store",
"[",
"k",
"]",
"[",
"i",
"]"
] | Gets the ith config value for k. e.g. get_config_value('x', 1) | [
"Gets",
"the",
"ith",
"config",
"value",
"for",
"k",
".",
"e",
".",
"g",
".",
"get_config_value",
"(",
"x",
"1",
")"
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_topology.py#L492-L518 | def insert_1d_permute_layers(self):
"""
Insert permutation layers before a 1D start point or after 1D end point
"""
idx, nb_layers = 0, len(self.layer_list)
in_edges, out_edges = self._get_1d_interface_edges()
# Hacky Warning: (1) use a 4-D permute, which is not likely to happen in Keras,
# to represent actual permutation needed for (seq, c, h, w) in CoreML
# (2) Assume 2-D input shape has meaning (seq, c), and during CoreML runtime,
# it is represented as 4D blob, (seq, c, h, w)
for in_edge in in_edges:
src, snk = in_edge
if src is None:
permute_layer = '_permute_' + snk
else:
permute_layer = src + '_permute_' + snk
keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w
self._insert_layer_between(src, snk, permute_layer, keras_permute)
for out_edge in out_edges:
src, snk = out_edge
if snk is None:
permute_layer = src + '_permute_'
else:
permute_layer = src + '_permute_' + snk
keras_permute = _keras.layers.Permute(dims=(3,1,2,0)) # assume w = 1, switch seq and w back
self._insert_layer_between(src, snk, permute_layer, keras_permute) | [
"def",
"insert_1d_permute_layers",
"(",
"self",
")",
":",
"idx",
",",
"nb_layers",
"=",
"0",
",",
"len",
"(",
"self",
".",
"layer_list",
")",
"in_edges",
",",
"out_edges",
"=",
"self",
".",
"_get_1d_interface_edges",
"(",
")",
"# Hacky Warning: (1) use a 4-D permute, which is not likely to happen in Keras,",
"# to represent actual permutation needed for (seq, c, h, w) in CoreML",
"# (2) Assume 2-D input shape has meaning (seq, c), and during CoreML runtime,",
"# it is represented as 4D blob, (seq, c, h, w)",
"for",
"in_edge",
"in",
"in_edges",
":",
"src",
",",
"snk",
"=",
"in_edge",
"if",
"src",
"is",
"None",
":",
"permute_layer",
"=",
"'_permute_'",
"+",
"snk",
"else",
":",
"permute_layer",
"=",
"src",
"+",
"'_permute_'",
"+",
"snk",
"keras_permute",
"=",
"_keras",
".",
"layers",
".",
"Permute",
"(",
"dims",
"=",
"(",
"3",
",",
"1",
",",
"2",
",",
"0",
")",
")",
"# assume w = 1, switch seq and w",
"self",
".",
"_insert_layer_between",
"(",
"src",
",",
"snk",
",",
"permute_layer",
",",
"keras_permute",
")",
"for",
"out_edge",
"in",
"out_edges",
":",
"src",
",",
"snk",
"=",
"out_edge",
"if",
"snk",
"is",
"None",
":",
"permute_layer",
"=",
"src",
"+",
"'_permute_'",
"else",
":",
"permute_layer",
"=",
"src",
"+",
"'_permute_'",
"+",
"snk",
"keras_permute",
"=",
"_keras",
".",
"layers",
".",
"Permute",
"(",
"dims",
"=",
"(",
"3",
",",
"1",
",",
"2",
",",
"0",
")",
")",
"# assume w = 1, switch seq and w back",
"self",
".",
"_insert_layer_between",
"(",
"src",
",",
"snk",
",",
"permute_layer",
",",
"keras_permute",
")"
] | Insert permutation layers before a 1D start point or after 1D end point | [
"Insert",
"permutation",
"layers",
"before",
"a",
"1D",
"start",
"point",
"or",
"after",
"1D",
"end",
"point"
] | python | train |
vijayvarma392/surfinBH | surfinBH/_fit_evaluators/fit_7dq2.py | https://github.com/vijayvarma392/surfinBH/blob/9f2d25d00f894ee2ce9ffbb02f4e4a41fa7989eb/surfinBH/_fit_evaluators/fit_7dq2.py#L298-L363 | def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs):
"""Evaluates the surfinBH7dq2 model.
"""
chiA = np.array(chiA)
chiB = np.array(chiB)
# Warn/Exit if extrapolating
allow_extrap = kwargs.pop('allow_extrap', False)
self._check_param_limits(q, chiA, chiB, allow_extrap)
omega0 = kwargs.pop('omega0', None)
PN_approximant = kwargs.pop('PN_approximant', 'SpinTaylorT4')
PN_dt = kwargs.pop('PN_dt', 0.1)
PN_spin_order = kwargs.pop('PN_spin_order', 7)
PN_phase_order = kwargs.pop('PN_phase_order', 7)
omega_switch = kwargs.pop('omega_switch', 0.018)
self._check_unused_kwargs(kwargs)
if omega0 is None:
# If omega0 is given, assume chiA, chiB are the coorbital frame
# spins at t=-100 M.
x = np.concatenate(([q], chiA, chiB))
else:
# If omega0 is given, evolve the spins from omega0
# to t = -100 M from the peak.
chiA_coorb_fitnode, chiB_coorb_fitnode, quat_fitnode, \
orbphase_fitnode \
= self._evolve_spins(q, chiA, chiB, omega0,
PN_approximant, PN_dt, PN_spin_order,
PN_phase_order, omega_switch)
# x should contain coorbital frame spins at t=-100M
x = np.concatenate(([q], chiA_coorb_fitnode, chiB_coorb_fitnode))
def eval_vector_fit(x, fit_key):
res = self._evaluate_fits(x, fit_key)
fit_val = res.T[0]
fit_err = res.T[1]
if omega0 is not None:
# If spins were given in inertial frame at omega0,
# transform vectors and errors back to the same frame.
fit_val = utils.transform_vector_coorb_to_inertial(fit_val,
orbphase_fitnode, quat_fitnode)
fit_err = utils.transform_error_coorb_to_inertial(fit_val,
fit_err, orbphase_fitnode, quat_fitnode)
return fit_val, fit_err
if fit_key == 'mf' or fit_key == 'all':
mf, mf_err = self._evaluate_fits(x, 'mf')
if fit_key == 'mf':
return mf, mf_err
if fit_key == 'chif' or fit_key == 'all':
chif, chif_err = eval_vector_fit(x, 'chif')
if fit_key == 'chif':
return chif, chif_err
if fit_key == 'vf' or fit_key == 'all':
vf, vf_err = eval_vector_fit(x, 'vf')
if fit_key == 'vf':
return vf, vf_err
if fit_key == 'all':
return mf, chif, vf, mf_err, chif_err, vf_err | [
"def",
"_eval_wrapper",
"(",
"self",
",",
"fit_key",
",",
"q",
",",
"chiA",
",",
"chiB",
",",
"*",
"*",
"kwargs",
")",
":",
"chiA",
"=",
"np",
".",
"array",
"(",
"chiA",
")",
"chiB",
"=",
"np",
".",
"array",
"(",
"chiB",
")",
"# Warn/Exit if extrapolating",
"allow_extrap",
"=",
"kwargs",
".",
"pop",
"(",
"'allow_extrap'",
",",
"False",
")",
"self",
".",
"_check_param_limits",
"(",
"q",
",",
"chiA",
",",
"chiB",
",",
"allow_extrap",
")",
"omega0",
"=",
"kwargs",
".",
"pop",
"(",
"'omega0'",
",",
"None",
")",
"PN_approximant",
"=",
"kwargs",
".",
"pop",
"(",
"'PN_approximant'",
",",
"'SpinTaylorT4'",
")",
"PN_dt",
"=",
"kwargs",
".",
"pop",
"(",
"'PN_dt'",
",",
"0.1",
")",
"PN_spin_order",
"=",
"kwargs",
".",
"pop",
"(",
"'PN_spin_order'",
",",
"7",
")",
"PN_phase_order",
"=",
"kwargs",
".",
"pop",
"(",
"'PN_phase_order'",
",",
"7",
")",
"omega_switch",
"=",
"kwargs",
".",
"pop",
"(",
"'omega_switch'",
",",
"0.018",
")",
"self",
".",
"_check_unused_kwargs",
"(",
"kwargs",
")",
"if",
"omega0",
"is",
"None",
":",
"# If omega0 is given, assume chiA, chiB are the coorbital frame",
"# spins at t=-100 M.",
"x",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"q",
"]",
",",
"chiA",
",",
"chiB",
")",
")",
"else",
":",
"# If omega0 is given, evolve the spins from omega0",
"# to t = -100 M from the peak.",
"chiA_coorb_fitnode",
",",
"chiB_coorb_fitnode",
",",
"quat_fitnode",
",",
"orbphase_fitnode",
"=",
"self",
".",
"_evolve_spins",
"(",
"q",
",",
"chiA",
",",
"chiB",
",",
"omega0",
",",
"PN_approximant",
",",
"PN_dt",
",",
"PN_spin_order",
",",
"PN_phase_order",
",",
"omega_switch",
")",
"# x should contain coorbital frame spins at t=-100M",
"x",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"q",
"]",
",",
"chiA_coorb_fitnode",
",",
"chiB_coorb_fitnode",
")",
")",
"def",
"eval_vector_fit",
"(",
"x",
",",
"fit_key",
")",
":",
"res",
"=",
"self",
".",
"_evaluate_fits",
"(",
"x",
",",
"fit_key",
")",
"fit_val",
"=",
"res",
".",
"T",
"[",
"0",
"]",
"fit_err",
"=",
"res",
".",
"T",
"[",
"1",
"]",
"if",
"omega0",
"is",
"not",
"None",
":",
"# If spins were given in inertial frame at omega0,",
"# transform vectors and errors back to the same frame.",
"fit_val",
"=",
"utils",
".",
"transform_vector_coorb_to_inertial",
"(",
"fit_val",
",",
"orbphase_fitnode",
",",
"quat_fitnode",
")",
"fit_err",
"=",
"utils",
".",
"transform_error_coorb_to_inertial",
"(",
"fit_val",
",",
"fit_err",
",",
"orbphase_fitnode",
",",
"quat_fitnode",
")",
"return",
"fit_val",
",",
"fit_err",
"if",
"fit_key",
"==",
"'mf'",
"or",
"fit_key",
"==",
"'all'",
":",
"mf",
",",
"mf_err",
"=",
"self",
".",
"_evaluate_fits",
"(",
"x",
",",
"'mf'",
")",
"if",
"fit_key",
"==",
"'mf'",
":",
"return",
"mf",
",",
"mf_err",
"if",
"fit_key",
"==",
"'chif'",
"or",
"fit_key",
"==",
"'all'",
":",
"chif",
",",
"chif_err",
"=",
"eval_vector_fit",
"(",
"x",
",",
"'chif'",
")",
"if",
"fit_key",
"==",
"'chif'",
":",
"return",
"chif",
",",
"chif_err",
"if",
"fit_key",
"==",
"'vf'",
"or",
"fit_key",
"==",
"'all'",
":",
"vf",
",",
"vf_err",
"=",
"eval_vector_fit",
"(",
"x",
",",
"'vf'",
")",
"if",
"fit_key",
"==",
"'vf'",
":",
"return",
"vf",
",",
"vf_err",
"if",
"fit_key",
"==",
"'all'",
":",
"return",
"mf",
",",
"chif",
",",
"vf",
",",
"mf_err",
",",
"chif_err",
",",
"vf_err"
] | Evaluates the surfinBH7dq2 model. | [
"Evaluates",
"the",
"surfinBH7dq2",
"model",
"."
] | python | train |
altair-viz/vega_datasets | vega_datasets/utils.py | https://github.com/altair-viz/vega_datasets/blob/391243aa0ff8eecd7ec7082c747dc0a67de0ccc7/vega_datasets/utils.py#L5-L18 | def connection_ok():
"""Check web connection.
Returns True if web connection is OK, False otherwise.
"""
try:
urlopen(Dataset.base_url, timeout=1)
# if an index page is ever added, this will pass through
return True
except HTTPError:
# There's no index for BASE_URL so Error 404 is expected
return True
except URLError:
# This is raised if there is no internet connection
return False | [
"def",
"connection_ok",
"(",
")",
":",
"try",
":",
"urlopen",
"(",
"Dataset",
".",
"base_url",
",",
"timeout",
"=",
"1",
")",
"# if an index page is ever added, this will pass through",
"return",
"True",
"except",
"HTTPError",
":",
"# There's no index for BASE_URL so Error 404 is expected",
"return",
"True",
"except",
"URLError",
":",
"# This is raised if there is no internet connection",
"return",
"False"
] | Check web connection.
Returns True if web connection is OK, False otherwise. | [
"Check",
"web",
"connection",
".",
"Returns",
"True",
"if",
"web",
"connection",
"is",
"OK",
"False",
"otherwise",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/abinit/tasks.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L109-L126 | def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super().from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new | [
"def",
"from_node",
"(",
"cls",
",",
"task",
")",
":",
"new",
"=",
"super",
"(",
")",
".",
"from_node",
"(",
"task",
")",
"new",
".",
"update",
"(",
"executable",
"=",
"task",
".",
"executable",
",",
"#executable_version:",
"#task_events=",
"pseudos",
"=",
"[",
"p",
".",
"as_dict",
"(",
")",
"for",
"p",
"in",
"task",
".",
"input",
".",
"pseudos",
"]",
",",
"#input=task.input",
")",
"new",
".",
"register_gridfs_files",
"(",
"run_abi",
"=",
"(",
"task",
".",
"input_file",
".",
"path",
",",
"\"t\"",
")",
",",
"run_abo",
"=",
"(",
"task",
".",
"output_file",
".",
"path",
",",
"\"t\"",
")",
",",
")",
"return",
"new"
] | Initialize an instance from an :class:`AbinitTask` instance. | [
"Initialize",
"an",
"instance",
"from",
"an",
":",
"class",
":",
"AbinitTask",
"instance",
"."
] | python | train |
saltstack/salt | salt/states/rbac_solaris.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rbac_solaris.py#L51-L184 | def managed(name, roles=None, profiles=None, authorizations=None):
'''
Manage RBAC properties for user
name : string
username
roles : list
list of roles for user
profiles : list
list of profiles for user
authorizations : list
list of authorizations for user
.. warning::
All existing roles, profiles and authorizations will be replaced!
An empty list will remove everything.
Set the property to `None` to not manage it.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
## check properties
if name not in __salt__['user.list_users']():
ret['result'] = False
ret['comment'] = 'User {0} does not exist!'.format(name)
return ret
if roles and not isinstance(roles, (list)):
ret['result'] = False
ret['comment'] = 'Property roles is not None or list!'
return ret
if profiles and not isinstance(profiles, (list)):
ret['result'] = False
ret['comment'] = 'Property profiles is not None or list!'
return ret
if authorizations and not isinstance(authorizations, (list)):
ret['result'] = False
ret['comment'] = 'Property authorizations is not None or list!'
return ret
log.debug('rbac.managed - roles=%s, profiles=%s, authorizations=%s',
roles, profiles, authorizations
)
## update roles
if isinstance(roles, (list)):
# compute changed
roles_current = __salt__['rbac.role_get'](name)
roles_add = [r for r in roles if r not in roles_current]
roles_rm = [r for r in roles_current if r not in roles]
# execute and verify changes
if roles_add:
res_roles_add = __salt__['rbac.role_add'](name, ','.join(roles_add).strip())
roles_current = __salt__['rbac.role_get'](name)
for role in roles_add:
if 'roles' not in ret['changes']:
ret['changes']['roles'] = {}
ret['changes']['roles'][role] = 'Added' if role in roles_current else 'Failed'
if ret['changes']['roles'][role] == 'Failed':
ret['result'] = False
if roles_rm:
res_roles_rm = __salt__['rbac.role_rm'](name, ','.join(roles_rm).strip())
roles_current = __salt__['rbac.role_get'](name)
for role in roles_rm:
if 'roles' not in ret['changes']:
ret['changes']['roles'] = {}
ret['changes']['roles'][role] = 'Removed' if role not in roles_current else 'Failed'
if ret['changes']['roles'][role] == 'Failed':
ret['result'] = False
## update profiles
if isinstance(profiles, (list)):
# compute changed
profiles_current = __salt__['rbac.profile_get'](name)
profiles_add = [r for r in profiles if r not in profiles_current]
profiles_rm = [r for r in profiles_current if r not in profiles]
# execute and verify changes
if profiles_add:
res_profiles_add = __salt__['rbac.profile_add'](name, ','.join(profiles_add).strip())
profiles_current = __salt__['rbac.profile_get'](name)
for profile in profiles_add:
if 'profiles' not in ret['changes']:
ret['changes']['profiles'] = {}
ret['changes']['profiles'][profile] = 'Added' if profile in profiles_current else 'Failed'
if ret['changes']['profiles'][profile] == 'Failed':
ret['result'] = False
if profiles_rm:
res_profiles_rm = __salt__['rbac.profile_rm'](name, ','.join(profiles_rm).strip())
profiles_current = __salt__['rbac.profile_get'](name)
for profile in profiles_rm:
if 'profiles' not in ret['changes']:
ret['changes']['profiles'] = {}
ret['changes']['profiles'][profile] = 'Removed' if profile not in profiles_current else 'Failed'
if ret['changes']['profiles'][profile] == 'Failed':
ret['result'] = False
## update auths
if isinstance(authorizations, (list)):
# compute changed
auths_current = __salt__['rbac.auth_get'](name, False)
auths_add = [r for r in authorizations if r not in auths_current]
auths_rm = [r for r in auths_current if r not in authorizations]
# execute and verify changes
if auths_add:
res_auths_add = __salt__['rbac.auth_add'](name, ','.join(auths_add).strip())
auths_current = __salt__['rbac.auth_get'](name)
for auth in auths_add:
if 'authorizations' not in ret['changes']:
ret['changes']['authorizations'] = {}
ret['changes']['authorizations'][auth] = 'Added' if auth in auths_current else 'Failed'
if ret['changes']['authorizations'][auth] == 'Failed':
ret['result'] = False
if auths_rm:
res_auths_rm = __salt__['rbac.auth_rm'](name, ','.join(auths_rm).strip())
auths_current = __salt__['rbac.auth_get'](name)
for auth in auths_rm:
if 'authorizations' not in ret['changes']:
ret['changes']['authorizations'] = {}
ret['changes']['authorizations'][auth] = 'Removed' if auth not in auths_current else 'Failed'
if ret['changes']['authorizations'][auth] == 'Failed':
ret['result'] = False
return ret | [
"def",
"managed",
"(",
"name",
",",
"roles",
"=",
"None",
",",
"profiles",
"=",
"None",
",",
"authorizations",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
"}",
"## check properties",
"if",
"name",
"not",
"in",
"__salt__",
"[",
"'user.list_users'",
"]",
"(",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'User {0} does not exist!'",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"roles",
"and",
"not",
"isinstance",
"(",
"roles",
",",
"(",
"list",
")",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Property roles is not None or list!'",
"return",
"ret",
"if",
"profiles",
"and",
"not",
"isinstance",
"(",
"profiles",
",",
"(",
"list",
")",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Property profiles is not None or list!'",
"return",
"ret",
"if",
"authorizations",
"and",
"not",
"isinstance",
"(",
"authorizations",
",",
"(",
"list",
")",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Property authorizations is not None or list!'",
"return",
"ret",
"log",
".",
"debug",
"(",
"'rbac.managed - roles=%s, profiles=%s, authorizations=%s'",
",",
"roles",
",",
"profiles",
",",
"authorizations",
")",
"## update roles",
"if",
"isinstance",
"(",
"roles",
",",
"(",
"list",
")",
")",
":",
"# compute changed",
"roles_current",
"=",
"__salt__",
"[",
"'rbac.role_get'",
"]",
"(",
"name",
")",
"roles_add",
"=",
"[",
"r",
"for",
"r",
"in",
"roles",
"if",
"r",
"not",
"in",
"roles_current",
"]",
"roles_rm",
"=",
"[",
"r",
"for",
"r",
"in",
"roles_current",
"if",
"r",
"not",
"in",
"roles",
"]",
"# execute and verify changes",
"if",
"roles_add",
":",
"res_roles_add",
"=",
"__salt__",
"[",
"'rbac.role_add'",
"]",
"(",
"name",
",",
"','",
".",
"join",
"(",
"roles_add",
")",
".",
"strip",
"(",
")",
")",
"roles_current",
"=",
"__salt__",
"[",
"'rbac.role_get'",
"]",
"(",
"name",
")",
"for",
"role",
"in",
"roles_add",
":",
"if",
"'roles'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"[",
"role",
"]",
"=",
"'Added'",
"if",
"role",
"in",
"roles_current",
"else",
"'Failed'",
"if",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"[",
"role",
"]",
"==",
"'Failed'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"if",
"roles_rm",
":",
"res_roles_rm",
"=",
"__salt__",
"[",
"'rbac.role_rm'",
"]",
"(",
"name",
",",
"','",
".",
"join",
"(",
"roles_rm",
")",
".",
"strip",
"(",
")",
")",
"roles_current",
"=",
"__salt__",
"[",
"'rbac.role_get'",
"]",
"(",
"name",
")",
"for",
"role",
"in",
"roles_rm",
":",
"if",
"'roles'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"[",
"role",
"]",
"=",
"'Removed'",
"if",
"role",
"not",
"in",
"roles_current",
"else",
"'Failed'",
"if",
"ret",
"[",
"'changes'",
"]",
"[",
"'roles'",
"]",
"[",
"role",
"]",
"==",
"'Failed'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"## update profiles",
"if",
"isinstance",
"(",
"profiles",
",",
"(",
"list",
")",
")",
":",
"# compute changed",
"profiles_current",
"=",
"__salt__",
"[",
"'rbac.profile_get'",
"]",
"(",
"name",
")",
"profiles_add",
"=",
"[",
"r",
"for",
"r",
"in",
"profiles",
"if",
"r",
"not",
"in",
"profiles_current",
"]",
"profiles_rm",
"=",
"[",
"r",
"for",
"r",
"in",
"profiles_current",
"if",
"r",
"not",
"in",
"profiles",
"]",
"# execute and verify changes",
"if",
"profiles_add",
":",
"res_profiles_add",
"=",
"__salt__",
"[",
"'rbac.profile_add'",
"]",
"(",
"name",
",",
"','",
".",
"join",
"(",
"profiles_add",
")",
".",
"strip",
"(",
")",
")",
"profiles_current",
"=",
"__salt__",
"[",
"'rbac.profile_get'",
"]",
"(",
"name",
")",
"for",
"profile",
"in",
"profiles_add",
":",
"if",
"'profiles'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'profiles'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'profiles'",
"]",
"[",
"profile",
"]",
"=",
"'Added'",
"if",
"profile",
"in",
"profiles_current",
"else",
"'Failed'",
"if",
"ret",
"[",
"'changes'",
"]",
"[",
"'profiles'",
"]",
"[",
"profile",
"]",
"==",
"'Failed'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"if",
"profiles_rm",
":",
"res_profiles_rm",
"=",
"__salt__",
"[",
"'rbac.profile_rm'",
"]",
"(",
"name",
",",
"','",
".",
"join",
"(",
"profiles_rm",
")",
".",
"strip",
"(",
")",
")",
"profiles_current",
"=",
"__salt__",
"[",
"'rbac.profile_get'",
"]",
"(",
"name",
")",
"for",
"profile",
"in",
"profiles_rm",
":",
"if",
"'profiles'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'profiles'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'profiles'",
"]",
"[",
"profile",
"]",
"=",
"'Removed'",
"if",
"profile",
"not",
"in",
"profiles_current",
"else",
"'Failed'",
"if",
"ret",
"[",
"'changes'",
"]",
"[",
"'profiles'",
"]",
"[",
"profile",
"]",
"==",
"'Failed'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"## update auths",
"if",
"isinstance",
"(",
"authorizations",
",",
"(",
"list",
")",
")",
":",
"# compute changed",
"auths_current",
"=",
"__salt__",
"[",
"'rbac.auth_get'",
"]",
"(",
"name",
",",
"False",
")",
"auths_add",
"=",
"[",
"r",
"for",
"r",
"in",
"authorizations",
"if",
"r",
"not",
"in",
"auths_current",
"]",
"auths_rm",
"=",
"[",
"r",
"for",
"r",
"in",
"auths_current",
"if",
"r",
"not",
"in",
"authorizations",
"]",
"# execute and verify changes",
"if",
"auths_add",
":",
"res_auths_add",
"=",
"__salt__",
"[",
"'rbac.auth_add'",
"]",
"(",
"name",
",",
"','",
".",
"join",
"(",
"auths_add",
")",
".",
"strip",
"(",
")",
")",
"auths_current",
"=",
"__salt__",
"[",
"'rbac.auth_get'",
"]",
"(",
"name",
")",
"for",
"auth",
"in",
"auths_add",
":",
"if",
"'authorizations'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'authorizations'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'authorizations'",
"]",
"[",
"auth",
"]",
"=",
"'Added'",
"if",
"auth",
"in",
"auths_current",
"else",
"'Failed'",
"if",
"ret",
"[",
"'changes'",
"]",
"[",
"'authorizations'",
"]",
"[",
"auth",
"]",
"==",
"'Failed'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"if",
"auths_rm",
":",
"res_auths_rm",
"=",
"__salt__",
"[",
"'rbac.auth_rm'",
"]",
"(",
"name",
",",
"','",
".",
"join",
"(",
"auths_rm",
")",
".",
"strip",
"(",
")",
")",
"auths_current",
"=",
"__salt__",
"[",
"'rbac.auth_get'",
"]",
"(",
"name",
")",
"for",
"auth",
"in",
"auths_rm",
":",
"if",
"'authorizations'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'authorizations'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'authorizations'",
"]",
"[",
"auth",
"]",
"=",
"'Removed'",
"if",
"auth",
"not",
"in",
"auths_current",
"else",
"'Failed'",
"if",
"ret",
"[",
"'changes'",
"]",
"[",
"'authorizations'",
"]",
"[",
"auth",
"]",
"==",
"'Failed'",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret"
] | Manage RBAC properties for user
name : string
username
roles : list
list of roles for user
profiles : list
list of profiles for user
authorizations : list
list of authorizations for user
.. warning::
All existing roles, profiles and authorizations will be replaced!
An empty list will remove everything.
Set the property to `None` to not manage it. | [
"Manage",
"RBAC",
"properties",
"for",
"user"
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Builder.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Builder.py#L874-L882 | def is_a_Builder(obj):
""""Returns True if the specified obj is one of our Builder classes.
The test is complicated a bit by the fact that CompositeBuilder
is a proxy, not a subclass of BuilderBase.
"""
return (isinstance(obj, BuilderBase)
or isinstance(obj, CompositeBuilder)
or callable(obj)) | [
"def",
"is_a_Builder",
"(",
"obj",
")",
":",
"return",
"(",
"isinstance",
"(",
"obj",
",",
"BuilderBase",
")",
"or",
"isinstance",
"(",
"obj",
",",
"CompositeBuilder",
")",
"or",
"callable",
"(",
"obj",
")",
")"
] | Returns True if the specified obj is one of our Builder classes.
The test is complicated a bit by the fact that CompositeBuilder
is a proxy, not a subclass of BuilderBase. | [
"Returns",
"True",
"if",
"the",
"specified",
"obj",
"is",
"one",
"of",
"our",
"Builder",
"classes",
"."
] | python | train |
coinbase/coinbase-python | coinbase/wallet/client.py | https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L326-L329 | def get_address(self, account_id, address_id, **params):
"""https://developers.coinbase.com/api/v2#show-addresss"""
response = self._get('v2', 'accounts', account_id, 'addresses', address_id, params=params)
return self._make_api_object(response, Address) | [
"def",
"get_address",
"(",
"self",
",",
"account_id",
",",
"address_id",
",",
"*",
"*",
"params",
")",
":",
"response",
"=",
"self",
".",
"_get",
"(",
"'v2'",
",",
"'accounts'",
",",
"account_id",
",",
"'addresses'",
",",
"address_id",
",",
"params",
"=",
"params",
")",
"return",
"self",
".",
"_make_api_object",
"(",
"response",
",",
"Address",
")"
] | https://developers.coinbase.com/api/v2#show-addresss | [
"https",
":",
"//",
"developers",
".",
"coinbase",
".",
"com",
"/",
"api",
"/",
"v2#show",
"-",
"addresss"
] | python | train |
mozilla/treeherder | treeherder/log_parser/parsers.py | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/log_parser/parsers.py#L436-L472 | def parse_line(self, line, lineno):
"""Check a single line for an error. Keeps track of the linenumber"""
# TaskCluster logs are a bit wonky.
#
# TaskCluster logs begin with output coming from TaskCluster itself,
# before it has transitioned control of the task to the configured
# process. These "internal" logs look like the following:
#
# [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b
#
# If an error occurs during this "setup" phase, TaskCluster may emit
# lines beginning with ``[taskcluster:error]``.
#
# Once control has transitioned from TaskCluster to the configured
# task process, lines can be whatever the configured process emits.
# The popular ``run-task`` wrapper prefixes output to emulate
# TaskCluster's "internal" logs. e.g.
#
# [vcs 2016-09-09T17:45:02.842230Z] adding changesets
#
# This prefixing can confuse error parsing. So, we strip it.
#
# Because regular expression matching and string manipulation can be
# expensive when performed on every line, we only strip the TaskCluster
# log prefix if we know we're in a TaskCluster log.
# First line of TaskCluster logs almost certainly has this.
if line.startswith('[taskcluster '):
self.is_taskcluster = True
# For performance reasons, only do this if we have identified as
# a TC task.
if self.is_taskcluster:
line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line)
if self.is_error_line(line):
self.add(line, lineno) | [
"def",
"parse_line",
"(",
"self",
",",
"line",
",",
"lineno",
")",
":",
"# TaskCluster logs are a bit wonky.",
"#",
"# TaskCluster logs begin with output coming from TaskCluster itself,",
"# before it has transitioned control of the task to the configured",
"# process. These \"internal\" logs look like the following:",
"#",
"# [taskcluster 2016-09-09 17:41:43.544Z] Worker Group: us-west-2b",
"#",
"# If an error occurs during this \"setup\" phase, TaskCluster may emit",
"# lines beginning with ``[taskcluster:error]``.",
"#",
"# Once control has transitioned from TaskCluster to the configured",
"# task process, lines can be whatever the configured process emits.",
"# The popular ``run-task`` wrapper prefixes output to emulate",
"# TaskCluster's \"internal\" logs. e.g.",
"#",
"# [vcs 2016-09-09T17:45:02.842230Z] adding changesets",
"#",
"# This prefixing can confuse error parsing. So, we strip it.",
"#",
"# Because regular expression matching and string manipulation can be",
"# expensive when performed on every line, we only strip the TaskCluster",
"# log prefix if we know we're in a TaskCluster log.",
"# First line of TaskCluster logs almost certainly has this.",
"if",
"line",
".",
"startswith",
"(",
"'[taskcluster '",
")",
":",
"self",
".",
"is_taskcluster",
"=",
"True",
"# For performance reasons, only do this if we have identified as",
"# a TC task.",
"if",
"self",
".",
"is_taskcluster",
":",
"line",
"=",
"re",
".",
"sub",
"(",
"self",
".",
"RE_TASKCLUSTER_NORMAL_PREFIX",
",",
"\"\"",
",",
"line",
")",
"if",
"self",
".",
"is_error_line",
"(",
"line",
")",
":",
"self",
".",
"add",
"(",
"line",
",",
"lineno",
")"
] | Check a single line for an error. Keeps track of the linenumber | [
"Check",
"a",
"single",
"line",
"for",
"an",
"error",
".",
"Keeps",
"track",
"of",
"the",
"linenumber"
] | python | train |
rsgalloway/grit | grit/server/cherrypy/__init__.py | https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/cherrypy/__init__.py#L2004-L2017 | def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk) | [
"def",
"write",
"(",
"self",
",",
"chunk",
")",
":",
"if",
"not",
"self",
".",
"started_response",
":",
"raise",
"AssertionError",
"(",
"\"WSGI write called before start_response.\"",
")",
"if",
"not",
"self",
".",
"req",
".",
"sent_headers",
":",
"self",
".",
"req",
".",
"sent_headers",
"=",
"True",
"self",
".",
"req",
".",
"send_headers",
"(",
")",
"self",
".",
"req",
".",
"write",
"(",
"chunk",
")"
] | WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application). | [
"WSGI",
"callable",
"to",
"write",
"unbuffered",
"data",
"to",
"the",
"client",
".",
"This",
"method",
"is",
"also",
"used",
"internally",
"by",
"start_response",
"(",
"to",
"write",
"data",
"from",
"the",
"iterable",
"returned",
"by",
"the",
"WSGI",
"application",
")",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/phonon/bandstructure.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/phonon/bandstructure.py#L507-L534 | def band_reorder(self):
"""
Re-order the eigenvalues according to the similarity of the eigenvectors
"""
eiv = self.eigendisplacements
eig = self.bands
nphonons,nqpoints = self.bands.shape
order = np.zeros([nqpoints,nphonons],dtype=int)
order[0] = np.array(range(nphonons))
#get the atomic masses
atomic_masses = [ site.specie.atomic_mass for site in self.structure.sites ]
#get order
for nq in range(1,nqpoints):
old_eiv = eigenvectors_from_displacements(eiv[:,nq-1],atomic_masses)
new_eiv = eigenvectors_from_displacements(eiv[:,nq], atomic_masses)
order[nq] = estimate_band_connection(old_eiv.reshape([nphonons,nphonons]).T,
new_eiv.reshape([nphonons,nphonons]).T,
order[nq-1])
#reorder
for nq in range(1,nqpoints):
eivq=eiv[:,nq]
eigq=eig[:,nq]
eiv[:,nq] = eivq[order[nq]]
eig[:,nq] = eigq[order[nq]] | [
"def",
"band_reorder",
"(",
"self",
")",
":",
"eiv",
"=",
"self",
".",
"eigendisplacements",
"eig",
"=",
"self",
".",
"bands",
"nphonons",
",",
"nqpoints",
"=",
"self",
".",
"bands",
".",
"shape",
"order",
"=",
"np",
".",
"zeros",
"(",
"[",
"nqpoints",
",",
"nphonons",
"]",
",",
"dtype",
"=",
"int",
")",
"order",
"[",
"0",
"]",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"nphonons",
")",
")",
"#get the atomic masses",
"atomic_masses",
"=",
"[",
"site",
".",
"specie",
".",
"atomic_mass",
"for",
"site",
"in",
"self",
".",
"structure",
".",
"sites",
"]",
"#get order",
"for",
"nq",
"in",
"range",
"(",
"1",
",",
"nqpoints",
")",
":",
"old_eiv",
"=",
"eigenvectors_from_displacements",
"(",
"eiv",
"[",
":",
",",
"nq",
"-",
"1",
"]",
",",
"atomic_masses",
")",
"new_eiv",
"=",
"eigenvectors_from_displacements",
"(",
"eiv",
"[",
":",
",",
"nq",
"]",
",",
"atomic_masses",
")",
"order",
"[",
"nq",
"]",
"=",
"estimate_band_connection",
"(",
"old_eiv",
".",
"reshape",
"(",
"[",
"nphonons",
",",
"nphonons",
"]",
")",
".",
"T",
",",
"new_eiv",
".",
"reshape",
"(",
"[",
"nphonons",
",",
"nphonons",
"]",
")",
".",
"T",
",",
"order",
"[",
"nq",
"-",
"1",
"]",
")",
"#reorder",
"for",
"nq",
"in",
"range",
"(",
"1",
",",
"nqpoints",
")",
":",
"eivq",
"=",
"eiv",
"[",
":",
",",
"nq",
"]",
"eigq",
"=",
"eig",
"[",
":",
",",
"nq",
"]",
"eiv",
"[",
":",
",",
"nq",
"]",
"=",
"eivq",
"[",
"order",
"[",
"nq",
"]",
"]",
"eig",
"[",
":",
",",
"nq",
"]",
"=",
"eigq",
"[",
"order",
"[",
"nq",
"]",
"]"
] | Re-order the eigenvalues according to the similarity of the eigenvectors | [
"Re",
"-",
"order",
"the",
"eigenvalues",
"according",
"to",
"the",
"similarity",
"of",
"the",
"eigenvectors"
] | python | train |
DLR-RM/RAFCON | source/rafcon/core/states/container_state.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1810-L1822 | def _check_data_flow_id(self, data_flow):
"""Checks the validity of a data flow id
Checks whether the id of the given data flow is already by anther data flow used within the state.
:param rafcon.core.data_flow.DataFlow data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid
"""
data_flow_id = data_flow.data_flow_id
if data_flow_id in self.data_flows and data_flow is not self.data_flows[data_flow_id]:
return False, "data_flow_id already existing"
return True, "valid" | [
"def",
"_check_data_flow_id",
"(",
"self",
",",
"data_flow",
")",
":",
"data_flow_id",
"=",
"data_flow",
".",
"data_flow_id",
"if",
"data_flow_id",
"in",
"self",
".",
"data_flows",
"and",
"data_flow",
"is",
"not",
"self",
".",
"data_flows",
"[",
"data_flow_id",
"]",
":",
"return",
"False",
",",
"\"data_flow_id already existing\"",
"return",
"True",
",",
"\"valid\""
] | Checks the validity of a data flow id
Checks whether the id of the given data flow is already by anther data flow used within the state.
:param rafcon.core.data_flow.DataFlow data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid | [
"Checks",
"the",
"validity",
"of",
"a",
"data",
"flow",
"id"
] | python | train |
edx/edx-enterprise | enterprise/api_client/discovery.py | https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api_client/discovery.py#L294-L312 | def get_program_by_title(self, program_title):
"""
Return single program by name, or None if not found.
Arguments:
program_title(string): Program title as seen by students and in Course Catalog Admin
Returns:
dict: Program data provided by Course Catalog API
"""
all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[])
matching_programs = [program for program in all_programs if program.get('title') == program_title]
if len(matching_programs) > 1:
raise MultipleProgramMatchError(len(matching_programs))
elif len(matching_programs) == 1:
return matching_programs[0]
else:
return None | [
"def",
"get_program_by_title",
"(",
"self",
",",
"program_title",
")",
":",
"all_programs",
"=",
"self",
".",
"_load_data",
"(",
"self",
".",
"PROGRAMS_ENDPOINT",
",",
"default",
"=",
"[",
"]",
")",
"matching_programs",
"=",
"[",
"program",
"for",
"program",
"in",
"all_programs",
"if",
"program",
".",
"get",
"(",
"'title'",
")",
"==",
"program_title",
"]",
"if",
"len",
"(",
"matching_programs",
")",
">",
"1",
":",
"raise",
"MultipleProgramMatchError",
"(",
"len",
"(",
"matching_programs",
")",
")",
"elif",
"len",
"(",
"matching_programs",
")",
"==",
"1",
":",
"return",
"matching_programs",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] | Return single program by name, or None if not found.
Arguments:
program_title(string): Program title as seen by students and in Course Catalog Admin
Returns:
dict: Program data provided by Course Catalog API | [
"Return",
"single",
"program",
"by",
"name",
"or",
"None",
"if",
"not",
"found",
"."
] | python | valid |
lehins/python-wepay | wepay/calls/account.py | https://github.com/lehins/python-wepay/blob/414d25a1a8d0ecb22a3ddd1f16c60b805bb52a1f/wepay/calls/account.py#L151-L173 | def __get_update_uri(self, account_id, **kwargs):
"""Call documentation: `/account/get_update_uri
<https://www.wepay.com/developer/reference/account#update_uri>`_, plus extra
keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'account_id': account_id
}
return self.make_call(self.__get_update_uri, params, kwargs) | [
"def",
"__get_update_uri",
"(",
"self",
",",
"account_id",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'account_id'",
":",
"account_id",
"}",
"return",
"self",
".",
"make_call",
"(",
"self",
".",
"__get_update_uri",
",",
"params",
",",
"kwargs",
")"
] | Call documentation: `/account/get_update_uri
<https://www.wepay.com/developer/reference/account#update_uri>`_, plus extra
keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` | [
"Call",
"documentation",
":",
"/",
"account",
"/",
"get_update_uri",
"<https",
":",
"//",
"www",
".",
"wepay",
".",
"com",
"/",
"developer",
"/",
"reference",
"/",
"account#update_uri",
">",
"_",
"plus",
"extra",
"keyword",
"parameters",
":",
":",
"keyword",
"str",
"access_token",
":",
"will",
"be",
"used",
"instead",
"of",
"instance",
"s",
"access_token",
"with",
"batch_mode",
"=",
"True",
"will",
"set",
"authorization",
"param",
"to",
"it",
"s",
"value",
"."
] | python | train |
zabuldon/teslajsonpy | teslajsonpy/gps.py | https://github.com/zabuldon/teslajsonpy/blob/673ecdb5c9483160fb1b97e30e62f2c863761c39/teslajsonpy/gps.py#L53-L64 | def update(self):
"""Update the current GPS location."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_drive_params(self._id)
if data:
self.__longitude = data['longitude']
self.__latitude = data['latitude']
self.__heading = data['heading']
if self.__longitude and self.__latitude and self.__heading:
self.__location = {'longitude': self.__longitude,
'latitude': self.__latitude,
'heading': self.__heading} | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"_controller",
".",
"update",
"(",
"self",
".",
"_id",
",",
"wake_if_asleep",
"=",
"False",
")",
"data",
"=",
"self",
".",
"_controller",
".",
"get_drive_params",
"(",
"self",
".",
"_id",
")",
"if",
"data",
":",
"self",
".",
"__longitude",
"=",
"data",
"[",
"'longitude'",
"]",
"self",
".",
"__latitude",
"=",
"data",
"[",
"'latitude'",
"]",
"self",
".",
"__heading",
"=",
"data",
"[",
"'heading'",
"]",
"if",
"self",
".",
"__longitude",
"and",
"self",
".",
"__latitude",
"and",
"self",
".",
"__heading",
":",
"self",
".",
"__location",
"=",
"{",
"'longitude'",
":",
"self",
".",
"__longitude",
",",
"'latitude'",
":",
"self",
".",
"__latitude",
",",
"'heading'",
":",
"self",
".",
"__heading",
"}"
] | Update the current GPS location. | [
"Update",
"the",
"current",
"GPS",
"location",
"."
] | python | train |
nephila/djangocms-installer | djangocms_installer/utils.py | https://github.com/nephila/djangocms-installer/blob/9fec66d5f8b1e9a0f3c0ec66dd777db578fab07e/djangocms_installer/utils.py#L126-L136 | def format_val(val):
"""
Returns val as integer or as escaped string according to its value
:param val: any value
:return: formatted string
"""
val = text_type(val)
if val.isdigit():
return int(val)
else:
return '\'{0}\''.format(val) | [
"def",
"format_val",
"(",
"val",
")",
":",
"val",
"=",
"text_type",
"(",
"val",
")",
"if",
"val",
".",
"isdigit",
"(",
")",
":",
"return",
"int",
"(",
"val",
")",
"else",
":",
"return",
"'\\'{0}\\''",
".",
"format",
"(",
"val",
")"
] | Returns val as integer or as escaped string according to its value
:param val: any value
:return: formatted string | [
"Returns",
"val",
"as",
"integer",
"or",
"as",
"escaped",
"string",
"according",
"to",
"its",
"value",
":",
"param",
"val",
":",
"any",
"value",
":",
"return",
":",
"formatted",
"string"
] | python | valid |
Azure/azure-uamqp-python | uamqp/client.py | https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/client.py#L728-L748 | def send_all_messages(self, close_on_done=True):
"""Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
"""
self.open()
running = True
try:
messages = self._pending_messages[:]
running = self.wait()
results = [m.state for m in messages]
return results
finally:
if close_on_done or not running:
self.close() | [
"def",
"send_all_messages",
"(",
"self",
",",
"close_on_done",
"=",
"True",
")",
":",
"self",
".",
"open",
"(",
")",
"running",
"=",
"True",
"try",
":",
"messages",
"=",
"self",
".",
"_pending_messages",
"[",
":",
"]",
"running",
"=",
"self",
".",
"wait",
"(",
")",
"results",
"=",
"[",
"m",
".",
"state",
"for",
"m",
"in",
"messages",
"]",
"return",
"results",
"finally",
":",
"if",
"close_on_done",
"or",
"not",
"running",
":",
"self",
".",
"close",
"(",
")"
] | Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState] | [
"Send",
"all",
"pending",
"messages",
"in",
"the",
"queue",
".",
"This",
"will",
"return",
"a",
"list",
"of",
"the",
"send",
"result",
"of",
"all",
"the",
"pending",
"messages",
"so",
"it",
"can",
"be",
"determined",
"if",
"any",
"messages",
"failed",
"to",
"send",
".",
"This",
"function",
"will",
"open",
"the",
"client",
"if",
"it",
"is",
"not",
"already",
"open",
"."
] | python | train |
MediaFire/mediafire-python-open-sdk | mediafire/client.py | https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/client.py#L558-L582 | def update_file_metadata(self, uri, filename=None, description=None,
mtime=None, privacy=None):
"""Update file metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public'
"""
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, File):
raise ValueError('Expected File, got {}'.format(type(resource)))
result = self.api.file_update(resource['quickkey'], filename=filename,
description=description,
mtime=mtime, privacy=privacy)
return result | [
"def",
"update_file_metadata",
"(",
"self",
",",
"uri",
",",
"filename",
"=",
"None",
",",
"description",
"=",
"None",
",",
"mtime",
"=",
"None",
",",
"privacy",
"=",
"None",
")",
":",
"resource",
"=",
"self",
".",
"get_resource_by_uri",
"(",
"uri",
")",
"if",
"not",
"isinstance",
"(",
"resource",
",",
"File",
")",
":",
"raise",
"ValueError",
"(",
"'Expected File, got {}'",
".",
"format",
"(",
"type",
"(",
"resource",
")",
")",
")",
"result",
"=",
"self",
".",
"api",
".",
"file_update",
"(",
"resource",
"[",
"'quickkey'",
"]",
",",
"filename",
"=",
"filename",
",",
"description",
"=",
"description",
",",
"mtime",
"=",
"mtime",
",",
"privacy",
"=",
"privacy",
")",
"return",
"result"
] | Update file metadata.
uri -- MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side:
filename -- rename file
description -- set file description string
mtime -- set file modification time
privacy -- set file privacy - 'private' or 'public' | [
"Update",
"file",
"metadata",
"."
] | python | train |
AntagonistHQ/openprovider.py | openprovider/modules/customer.py | https://github.com/AntagonistHQ/openprovider.py/blob/5871c3d5b3661e23667f147f49f20389c817a0a4/openprovider/modules/customer.py#L45-L75 | def create_customer(self, name, gender, address, phone, email, vat=None, fax=None,
company_name=None, additional_data=None, extension_additional_data=None):
"""Create a customer"""
response = self.request(E.createCustomerRequest(
E.companyName(company_name),
E.vat(vat),
E.name(
E.initials(name.initials),
E.firstName(name.first_name),
E.prefix(name.prefix or ''),
E.lastName(name.last_name),
),
E.gender(gender),
_get_phone_xml('phone', phone),
_get_phone_xml('fax', fax),
E.address(
E.street(address.street),
E.number(address.number),
E.suffix(address.suffix or ''),
E.zipcode(address.zipcode),
E.city(address.city),
E.state(address.state or ''),
E.country(address.country),
),
E.email(email),
_additional_data(additional_data),
_extension_additional_data(extension_additional_data),
))
return str(response.data.handle) | [
"def",
"create_customer",
"(",
"self",
",",
"name",
",",
"gender",
",",
"address",
",",
"phone",
",",
"email",
",",
"vat",
"=",
"None",
",",
"fax",
"=",
"None",
",",
"company_name",
"=",
"None",
",",
"additional_data",
"=",
"None",
",",
"extension_additional_data",
"=",
"None",
")",
":",
"response",
"=",
"self",
".",
"request",
"(",
"E",
".",
"createCustomerRequest",
"(",
"E",
".",
"companyName",
"(",
"company_name",
")",
",",
"E",
".",
"vat",
"(",
"vat",
")",
",",
"E",
".",
"name",
"(",
"E",
".",
"initials",
"(",
"name",
".",
"initials",
")",
",",
"E",
".",
"firstName",
"(",
"name",
".",
"first_name",
")",
",",
"E",
".",
"prefix",
"(",
"name",
".",
"prefix",
"or",
"''",
")",
",",
"E",
".",
"lastName",
"(",
"name",
".",
"last_name",
")",
",",
")",
",",
"E",
".",
"gender",
"(",
"gender",
")",
",",
"_get_phone_xml",
"(",
"'phone'",
",",
"phone",
")",
",",
"_get_phone_xml",
"(",
"'fax'",
",",
"fax",
")",
",",
"E",
".",
"address",
"(",
"E",
".",
"street",
"(",
"address",
".",
"street",
")",
",",
"E",
".",
"number",
"(",
"address",
".",
"number",
")",
",",
"E",
".",
"suffix",
"(",
"address",
".",
"suffix",
"or",
"''",
")",
",",
"E",
".",
"zipcode",
"(",
"address",
".",
"zipcode",
")",
",",
"E",
".",
"city",
"(",
"address",
".",
"city",
")",
",",
"E",
".",
"state",
"(",
"address",
".",
"state",
"or",
"''",
")",
",",
"E",
".",
"country",
"(",
"address",
".",
"country",
")",
",",
")",
",",
"E",
".",
"email",
"(",
"email",
")",
",",
"_additional_data",
"(",
"additional_data",
")",
",",
"_extension_additional_data",
"(",
"extension_additional_data",
")",
",",
")",
")",
"return",
"str",
"(",
"response",
".",
"data",
".",
"handle",
")"
] | Create a customer | [
"Create",
"a",
"customer"
] | python | train |
alecthomas/importmagic | importmagic/index.py | https://github.com/alecthomas/importmagic/blob/c00f2b282d933e0a9780146a20792f9e31fc8e6f/importmagic/index.py#L302-L312 | def find(self, path):
"""Return the node for a path, or None."""
path = path.split('.')
node = self
while node._parent:
node = node._parent
for name in path:
node = node._tree.get(name, None)
if node is None or type(node) is float:
return None
return node | [
"def",
"find",
"(",
"self",
",",
"path",
")",
":",
"path",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"node",
"=",
"self",
"while",
"node",
".",
"_parent",
":",
"node",
"=",
"node",
".",
"_parent",
"for",
"name",
"in",
"path",
":",
"node",
"=",
"node",
".",
"_tree",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"node",
"is",
"None",
"or",
"type",
"(",
"node",
")",
"is",
"float",
":",
"return",
"None",
"return",
"node"
] | Return the node for a path, or None. | [
"Return",
"the",
"node",
"for",
"a",
"path",
"or",
"None",
"."
] | python | train |
GNS3/gns3-server | gns3server/compute/iou/iou_vm.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/iou/iou_vm.py#L1053-L1077 | def private_config_content(self, private_config):
"""
Update the private config
:param private_config: content of the private configuration file
"""
try:
private_config_path = os.path.join(self.working_dir, "private-config.cfg")
if private_config is None:
private_config = ''
# We disallow erasing the private config file
if len(private_config) == 0 and os.path.exists(private_config_path):
return
with open(private_config_path, 'w+', encoding='utf-8') as f:
if len(private_config) == 0:
f.write('')
else:
private_config = private_config.replace("%h", self._name)
f.write(private_config)
except OSError as e:
raise IOUError("Can't write private-config file '{}': {}".format(private_config_path, e)) | [
"def",
"private_config_content",
"(",
"self",
",",
"private_config",
")",
":",
"try",
":",
"private_config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"working_dir",
",",
"\"private-config.cfg\"",
")",
"if",
"private_config",
"is",
"None",
":",
"private_config",
"=",
"''",
"# We disallow erasing the private config file",
"if",
"len",
"(",
"private_config",
")",
"==",
"0",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"private_config_path",
")",
":",
"return",
"with",
"open",
"(",
"private_config_path",
",",
"'w+'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"if",
"len",
"(",
"private_config",
")",
"==",
"0",
":",
"f",
".",
"write",
"(",
"''",
")",
"else",
":",
"private_config",
"=",
"private_config",
".",
"replace",
"(",
"\"%h\"",
",",
"self",
".",
"_name",
")",
"f",
".",
"write",
"(",
"private_config",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"IOUError",
"(",
"\"Can't write private-config file '{}': {}\"",
".",
"format",
"(",
"private_config_path",
",",
"e",
")",
")"
] | Update the private config
:param private_config: content of the private configuration file | [
"Update",
"the",
"private",
"config"
] | python | train |
Contraz/demosys-py | demosys/scene/programs.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/scene/programs.py#L17-L28 | def draw(self, mesh, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0):
"""
Draw code for the mesh. Should be overriden.
:param projection_matrix: projection_matrix (bytes)
:param view_matrix: view_matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
:param time: The current time
"""
self.program["m_proj"].write(projection_matrix)
self.program["m_mv"].write(view_matrix)
mesh.vao.render(self.program) | [
"def",
"draw",
"(",
"self",
",",
"mesh",
",",
"projection_matrix",
"=",
"None",
",",
"view_matrix",
"=",
"None",
",",
"camera_matrix",
"=",
"None",
",",
"time",
"=",
"0",
")",
":",
"self",
".",
"program",
"[",
"\"m_proj\"",
"]",
".",
"write",
"(",
"projection_matrix",
")",
"self",
".",
"program",
"[",
"\"m_mv\"",
"]",
".",
"write",
"(",
"view_matrix",
")",
"mesh",
".",
"vao",
".",
"render",
"(",
"self",
".",
"program",
")"
] | Draw code for the mesh. Should be overriden.
:param projection_matrix: projection_matrix (bytes)
:param view_matrix: view_matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
:param time: The current time | [
"Draw",
"code",
"for",
"the",
"mesh",
".",
"Should",
"be",
"overriden",
"."
] | python | valid |
sci-bots/mpm | mpm/api.py | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L36-L54 | def _islinklike(dir_path):
'''
Parameters
----------
dir_path : str
Directory path.
Returns
-------
bool
``True`` if :data:`dir_path` is a link *or* junction.
'''
dir_path = ph.path(dir_path)
if platform.system() == 'Windows':
if dir_path.isjunction():
return True
elif dir_path.islink():
return True
return False | [
"def",
"_islinklike",
"(",
"dir_path",
")",
":",
"dir_path",
"=",
"ph",
".",
"path",
"(",
"dir_path",
")",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"if",
"dir_path",
".",
"isjunction",
"(",
")",
":",
"return",
"True",
"elif",
"dir_path",
".",
"islink",
"(",
")",
":",
"return",
"True",
"return",
"False"
] | Parameters
----------
dir_path : str
Directory path.
Returns
-------
bool
``True`` if :data:`dir_path` is a link *or* junction. | [
"Parameters",
"----------",
"dir_path",
":",
"str",
"Directory",
"path",
"."
] | python | train |
opentok/Opentok-Python-SDK | opentok/opentok.py | https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L497-L531 | def signal(self, session_id, payload, connection_id=None):
"""
Send signals to all participants in an active OpenTok session or to a specific client
connected to that session.
:param String session_id: The session ID of the OpenTok session that receives the signal
:param Dictionary payload: Structure that contains both the type and data fields. These
correspond to the type and data parameters passed in the client signal received handlers
:param String connection_id: The connection_id parameter is an optional string used to
specify the connection ID of a client connected to the session. If you specify this value,
the signal is sent to the specified client. Otherwise, the signal is sent to all clients
connected to the session
"""
response = requests.post(
self.endpoints.signaling_url(session_id, connection_id),
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 204:
pass
elif response.status_code == 400:
raise SignalingError('One of the signal properties - data, type, sessionId or connectionId - is invalid.')
elif response.status_code == 403:
raise AuthError('You are not authorized to send the signal. Check your authentication credentials.')
elif response.status_code == 404:
raise SignalingError('The client specified by the connectionId property is not connected to the session.')
elif response.status_code == 413:
raise SignalingError('The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).')
else:
raise RequestError('An unexpected error occurred', response.status_code) | [
"def",
"signal",
"(",
"self",
",",
"session_id",
",",
"payload",
",",
"connection_id",
"=",
"None",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"endpoints",
".",
"signaling_url",
"(",
"session_id",
",",
"connection_id",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"payload",
")",
",",
"headers",
"=",
"self",
".",
"json_headers",
"(",
")",
",",
"proxies",
"=",
"self",
".",
"proxies",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"pass",
"elif",
"response",
".",
"status_code",
"==",
"400",
":",
"raise",
"SignalingError",
"(",
"'One of the signal properties - data, type, sessionId or connectionId - is invalid.'",
")",
"elif",
"response",
".",
"status_code",
"==",
"403",
":",
"raise",
"AuthError",
"(",
"'You are not authorized to send the signal. Check your authentication credentials.'",
")",
"elif",
"response",
".",
"status_code",
"==",
"404",
":",
"raise",
"SignalingError",
"(",
"'The client specified by the connectionId property is not connected to the session.'",
")",
"elif",
"response",
".",
"status_code",
"==",
"413",
":",
"raise",
"SignalingError",
"(",
"'The type string exceeds the maximum length (128 bytes), or the data string exceeds the maximum size (8 kB).'",
")",
"else",
":",
"raise",
"RequestError",
"(",
"'An unexpected error occurred'",
",",
"response",
".",
"status_code",
")"
] | Send signals to all participants in an active OpenTok session or to a specific client
connected to that session.
:param String session_id: The session ID of the OpenTok session that receives the signal
:param Dictionary payload: Structure that contains both the type and data fields. These
correspond to the type and data parameters passed in the client signal received handlers
:param String connection_id: The connection_id parameter is an optional string used to
specify the connection ID of a client connected to the session. If you specify this value,
the signal is sent to the specified client. Otherwise, the signal is sent to all clients
connected to the session | [
"Send",
"signals",
"to",
"all",
"participants",
"in",
"an",
"active",
"OpenTok",
"session",
"or",
"to",
"a",
"specific",
"client",
"connected",
"to",
"that",
"session",
"."
] | python | train |
Erotemic/utool | utool/util_grabdata.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L1135-L1148 | def list_remote(remote_uri, verbose=False):
"""
remote_uri = '[email protected]'
"""
remote_uri1, remote_dpath = remote_uri.split(':')
if not remote_dpath:
remote_dpath = '.'
import utool as ut
out = ut.cmd('ssh', remote_uri1, 'ls -l %s' % (remote_dpath,), verbose=verbose)
import re
# Find lines that look like ls output
split_lines = [re.split(r'\s+', t) for t in out[0].split('\n')]
paths = [' '.join(t2[8:]) for t2 in split_lines if len(t2) > 8]
return paths | [
"def",
"list_remote",
"(",
"remote_uri",
",",
"verbose",
"=",
"False",
")",
":",
"remote_uri1",
",",
"remote_dpath",
"=",
"remote_uri",
".",
"split",
"(",
"':'",
")",
"if",
"not",
"remote_dpath",
":",
"remote_dpath",
"=",
"'.'",
"import",
"utool",
"as",
"ut",
"out",
"=",
"ut",
".",
"cmd",
"(",
"'ssh'",
",",
"remote_uri1",
",",
"'ls -l %s'",
"%",
"(",
"remote_dpath",
",",
")",
",",
"verbose",
"=",
"verbose",
")",
"import",
"re",
"# Find lines that look like ls output",
"split_lines",
"=",
"[",
"re",
".",
"split",
"(",
"r'\\s+'",
",",
"t",
")",
"for",
"t",
"in",
"out",
"[",
"0",
"]",
".",
"split",
"(",
"'\\n'",
")",
"]",
"paths",
"=",
"[",
"' '",
".",
"join",
"(",
"t2",
"[",
"8",
":",
"]",
")",
"for",
"t2",
"in",
"split_lines",
"if",
"len",
"(",
"t2",
")",
">",
"8",
"]",
"return",
"paths"
] | remote_uri = '[email protected]' | [
"remote_uri",
"=",
"user"
] | python | train |
django-auth-ldap/django-auth-ldap | django_auth_ldap/backend.py | https://github.com/django-auth-ldap/django-auth-ldap/blob/9ce3c2825527f8faa1793958b041816e63d839af/django_auth_ldap/backend.py#L822-L833 | def _bind_as(self, bind_dn, bind_password, sticky=False):
"""
Binds to the LDAP server with the given credentials. This does not trap
exceptions.
If sticky is True, then we will consider the connection to be bound for
the life of this object. If False, then the caller only wishes to test
the credentials, after which the connection will be considered unbound.
"""
self._get_connection().simple_bind_s(bind_dn, bind_password)
self._connection_bound = sticky | [
"def",
"_bind_as",
"(",
"self",
",",
"bind_dn",
",",
"bind_password",
",",
"sticky",
"=",
"False",
")",
":",
"self",
".",
"_get_connection",
"(",
")",
".",
"simple_bind_s",
"(",
"bind_dn",
",",
"bind_password",
")",
"self",
".",
"_connection_bound",
"=",
"sticky"
] | Binds to the LDAP server with the given credentials. This does not trap
exceptions.
If sticky is True, then we will consider the connection to be bound for
the life of this object. If False, then the caller only wishes to test
the credentials, after which the connection will be considered unbound. | [
"Binds",
"to",
"the",
"LDAP",
"server",
"with",
"the",
"given",
"credentials",
".",
"This",
"does",
"not",
"trap",
"exceptions",
"."
] | python | train |
bxlab/bx-python | lib/bx/bitset_builders.py | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/bitset_builders.py#L100-L128 | def binned_bitsets_proximity( f, chrom_col=0, start_col=1, end_col=2, strand_col=5, upstream=0, downstream=0 ):
"""Read a file into a dictionary of bitsets"""
last_chrom = None
last_bitset = None
bitsets = dict()
for line in f:
if line.startswith("#"): continue
# print "input=%s" % ( line ),
fields = line.split()
strand = "+"
if len(fields) >= strand_col + 1:
if fields[strand_col] == "-": strand = "-"
chrom = fields[chrom_col]
if chrom != last_chrom:
if chrom not in bitsets:
bitsets[chrom] = BinnedBitSet( MAX )
last_chrom = chrom
last_bitset = bitsets[chrom]
start, end = int( fields[start_col] ), int( fields[end_col] )
if strand == "+":
if upstream: start = max( 0, start - upstream )
if downstream: end = min( MAX, end + downstream )
if strand == "-":
if upstream: end = min( MAX, end + upstream )
if downstream: start = max( 0, start - downstream )
# print "set: start=%d\tend=%d" % ( start, end )
if end-start > 0:
last_bitset.set_range( start, end-start )
return bitsets | [
"def",
"binned_bitsets_proximity",
"(",
"f",
",",
"chrom_col",
"=",
"0",
",",
"start_col",
"=",
"1",
",",
"end_col",
"=",
"2",
",",
"strand_col",
"=",
"5",
",",
"upstream",
"=",
"0",
",",
"downstream",
"=",
"0",
")",
":",
"last_chrom",
"=",
"None",
"last_bitset",
"=",
"None",
"bitsets",
"=",
"dict",
"(",
")",
"for",
"line",
"in",
"f",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"# print \"input=%s\" % ( line ),",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"strand",
"=",
"\"+\"",
"if",
"len",
"(",
"fields",
")",
">=",
"strand_col",
"+",
"1",
":",
"if",
"fields",
"[",
"strand_col",
"]",
"==",
"\"-\"",
":",
"strand",
"=",
"\"-\"",
"chrom",
"=",
"fields",
"[",
"chrom_col",
"]",
"if",
"chrom",
"!=",
"last_chrom",
":",
"if",
"chrom",
"not",
"in",
"bitsets",
":",
"bitsets",
"[",
"chrom",
"]",
"=",
"BinnedBitSet",
"(",
"MAX",
")",
"last_chrom",
"=",
"chrom",
"last_bitset",
"=",
"bitsets",
"[",
"chrom",
"]",
"start",
",",
"end",
"=",
"int",
"(",
"fields",
"[",
"start_col",
"]",
")",
",",
"int",
"(",
"fields",
"[",
"end_col",
"]",
")",
"if",
"strand",
"==",
"\"+\"",
":",
"if",
"upstream",
":",
"start",
"=",
"max",
"(",
"0",
",",
"start",
"-",
"upstream",
")",
"if",
"downstream",
":",
"end",
"=",
"min",
"(",
"MAX",
",",
"end",
"+",
"downstream",
")",
"if",
"strand",
"==",
"\"-\"",
":",
"if",
"upstream",
":",
"end",
"=",
"min",
"(",
"MAX",
",",
"end",
"+",
"upstream",
")",
"if",
"downstream",
":",
"start",
"=",
"max",
"(",
"0",
",",
"start",
"-",
"downstream",
")",
"# print \"set: start=%d\\tend=%d\" % ( start, end )",
"if",
"end",
"-",
"start",
">",
"0",
":",
"last_bitset",
".",
"set_range",
"(",
"start",
",",
"end",
"-",
"start",
")",
"return",
"bitsets"
] | Read a file into a dictionary of bitsets | [
"Read",
"a",
"file",
"into",
"a",
"dictionary",
"of",
"bitsets"
] | python | train |
XuShaohua/bcloud | bcloud/pcs.py | https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/pcs.py#L43-L54 | def get_user_uk(cookie, tokens):
'''获取用户的uk'''
url = 'http://yun.baidu.com'
req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
if req:
content = req.data.decode()
match = re.findall('/share/home\?uk=(\d+)" target=', content)
if len(match) == 1:
return match[0]
else:
logger.warn('pcs.get_user_uk(), failed to parse uk, %s' % url)
return None | [
"def",
"get_user_uk",
"(",
"cookie",
",",
"tokens",
")",
":",
"url",
"=",
"'http://yun.baidu.com'",
"req",
"=",
"net",
".",
"urlopen",
"(",
"url",
",",
"headers",
"=",
"{",
"'Cookie'",
":",
"cookie",
".",
"header_output",
"(",
")",
"}",
")",
"if",
"req",
":",
"content",
"=",
"req",
".",
"data",
".",
"decode",
"(",
")",
"match",
"=",
"re",
".",
"findall",
"(",
"'/share/home\\?uk=(\\d+)\" target='",
",",
"content",
")",
"if",
"len",
"(",
"match",
")",
"==",
"1",
":",
"return",
"match",
"[",
"0",
"]",
"else",
":",
"logger",
".",
"warn",
"(",
"'pcs.get_user_uk(), failed to parse uk, %s'",
"%",
"url",
")",
"return",
"None"
] | 获取用户的uk | [
"获取用户的uk"
] | python | train |
mitsei/dlkit | dlkit/handcar/repository/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L2187-L2219 | def get_asset_notification_session_for_repository(self, asset_receiver, repository_id, proxy):
"""Gets the asset notification session for the given repository.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NotFound - repository_id not found
raise: NullArgument - asset_receiver or repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_notfication() and
supports_visible_federation() are true.
"""
if not repository_id or not asset_receiver:
raise NullArgument()
if not self.supports_asset_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed('import error')
proxy = self._convert_proxy(proxy)
try:
session = sessions.AssetAdminSession(asset_receiver, repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed('attribute error')
return session | [
"def",
"get_asset_notification_session_for_repository",
"(",
"self",
",",
"asset_receiver",
",",
"repository_id",
",",
"proxy",
")",
":",
"if",
"not",
"repository_id",
"or",
"not",
"asset_receiver",
":",
"raise",
"NullArgument",
"(",
")",
"if",
"not",
"self",
".",
"supports_asset_lookup",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"OperationFailed",
"(",
"'import error'",
")",
"proxy",
"=",
"self",
".",
"_convert_proxy",
"(",
"proxy",
")",
"try",
":",
"session",
"=",
"sessions",
".",
"AssetAdminSession",
"(",
"asset_receiver",
",",
"repository_id",
",",
"proxy",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"except",
"AttributeError",
":",
"raise",
"OperationFailed",
"(",
"'attribute error'",
")",
"return",
"session"
] | Gets the asset notification session for the given repository.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetNotificationSession) - an
AssetNotificationSession
raise: NotFound - repository_id not found
raise: NullArgument - asset_receiver or repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_asset_notification() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_asset_notfication() and
supports_visible_federation() are true. | [
"Gets",
"the",
"asset",
"notification",
"session",
"for",
"the",
"given",
"repository",
"."
] | python | train |
F-Secure/see | see/observer.py | https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/see/observer.py#L128-L134 | def asynchronous(function, event):
"""
Runs the function asynchronously taking care of exceptions.
"""
thread = Thread(target=synchronous, args=(function, event))
thread.daemon = True
thread.start() | [
"def",
"asynchronous",
"(",
"function",
",",
"event",
")",
":",
"thread",
"=",
"Thread",
"(",
"target",
"=",
"synchronous",
",",
"args",
"=",
"(",
"function",
",",
"event",
")",
")",
"thread",
".",
"daemon",
"=",
"True",
"thread",
".",
"start",
"(",
")"
] | Runs the function asynchronously taking care of exceptions. | [
"Runs",
"the",
"function",
"asynchronously",
"taking",
"care",
"of",
"exceptions",
"."
] | python | train |
saltstack/salt | salt/modules/restartcheck.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/restartcheck.py#L243-L278 | def _kernel_versions_debian():
'''
Last installed kernel name, for Debian based systems.
Returns:
List with possible names of last installed kernel
as they are probably interpreted in output of `uname -a` command.
'''
kernel_get_selections = __salt__['cmd.run']('dpkg --get-selections linux-image-*')
kernels = []
kernel_versions = []
for line in kernel_get_selections.splitlines():
kernels.append(line)
try:
kernel = kernels[-2]
except IndexError:
kernel = kernels[0]
kernel = kernel.rstrip('\t\tinstall')
kernel_get_version = __salt__['cmd.run']('apt-cache policy ' + kernel)
for line in kernel_get_version.splitlines():
if line.startswith(' Installed: '):
kernel_v = line.strip(' Installed: ')
kernel_versions.append(kernel_v)
break
if __grains__['os'] == 'Ubuntu':
kernel_v = kernel_versions[0].rsplit('.', 1)
kernel_ubuntu_generic = kernel_v[0] + '-generic #' + kernel_v[1]
kernel_ubuntu_lowlatency = kernel_v[0] + '-lowlatency #' + kernel_v[1]
kernel_versions.extend([kernel_ubuntu_generic, kernel_ubuntu_lowlatency])
return kernel_versions | [
"def",
"_kernel_versions_debian",
"(",
")",
":",
"kernel_get_selections",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'dpkg --get-selections linux-image-*'",
")",
"kernels",
"=",
"[",
"]",
"kernel_versions",
"=",
"[",
"]",
"for",
"line",
"in",
"kernel_get_selections",
".",
"splitlines",
"(",
")",
":",
"kernels",
".",
"append",
"(",
"line",
")",
"try",
":",
"kernel",
"=",
"kernels",
"[",
"-",
"2",
"]",
"except",
"IndexError",
":",
"kernel",
"=",
"kernels",
"[",
"0",
"]",
"kernel",
"=",
"kernel",
".",
"rstrip",
"(",
"'\\t\\tinstall'",
")",
"kernel_get_version",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'apt-cache policy '",
"+",
"kernel",
")",
"for",
"line",
"in",
"kernel_get_version",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"' Installed: '",
")",
":",
"kernel_v",
"=",
"line",
".",
"strip",
"(",
"' Installed: '",
")",
"kernel_versions",
".",
"append",
"(",
"kernel_v",
")",
"break",
"if",
"__grains__",
"[",
"'os'",
"]",
"==",
"'Ubuntu'",
":",
"kernel_v",
"=",
"kernel_versions",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"kernel_ubuntu_generic",
"=",
"kernel_v",
"[",
"0",
"]",
"+",
"'-generic #'",
"+",
"kernel_v",
"[",
"1",
"]",
"kernel_ubuntu_lowlatency",
"=",
"kernel_v",
"[",
"0",
"]",
"+",
"'-lowlatency #'",
"+",
"kernel_v",
"[",
"1",
"]",
"kernel_versions",
".",
"extend",
"(",
"[",
"kernel_ubuntu_generic",
",",
"kernel_ubuntu_lowlatency",
"]",
")",
"return",
"kernel_versions"
] | Last installed kernel name, for Debian based systems.
Returns:
List with possible names of last installed kernel
as they are probably interpreted in output of `uname -a` command. | [
"Last",
"installed",
"kernel",
"name",
"for",
"Debian",
"based",
"systems",
"."
] | python | train |
ebu/PlugIt | plugit_proxy/views.py | https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L936-L944 | def setUser(request):
"""In standalone mode, change the current user"""
if not settings.PIAPI_STANDALONE or settings.PIAPI_REALUSERS:
raise Http404
request.session['plugit-standalone-usermode'] = request.GET.get('mode')
return HttpResponse('') | [
"def",
"setUser",
"(",
"request",
")",
":",
"if",
"not",
"settings",
".",
"PIAPI_STANDALONE",
"or",
"settings",
".",
"PIAPI_REALUSERS",
":",
"raise",
"Http404",
"request",
".",
"session",
"[",
"'plugit-standalone-usermode'",
"]",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'mode'",
")",
"return",
"HttpResponse",
"(",
"''",
")"
] | In standalone mode, change the current user | [
"In",
"standalone",
"mode",
"change",
"the",
"current",
"user"
] | python | train |
yahoo/TensorFlowOnSpark | examples/imagenet/inception/image_processing.py | https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/imagenet/inception/image_processing.py#L164-L195 | def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
color-distorted image
"""
with tf.name_scope(values=[image], name=scope, default_name='distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image | [
"def",
"distort_color",
"(",
"image",
",",
"thread_id",
"=",
"0",
",",
"scope",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"values",
"=",
"[",
"image",
"]",
",",
"name",
"=",
"scope",
",",
"default_name",
"=",
"'distort_color'",
")",
":",
"color_ordering",
"=",
"thread_id",
"%",
"2",
"if",
"color_ordering",
"==",
"0",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"image",
",",
"max_delta",
"=",
"32.",
"/",
"255.",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"image",
",",
"max_delta",
"=",
"0.2",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"elif",
"color_ordering",
"==",
"1",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_brightness",
"(",
"image",
",",
"max_delta",
"=",
"32.",
"/",
"255.",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_contrast",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_saturation",
"(",
"image",
",",
"lower",
"=",
"0.5",
",",
"upper",
"=",
"1.5",
")",
"image",
"=",
"tf",
".",
"image",
".",
"random_hue",
"(",
"image",
",",
"max_delta",
"=",
"0.2",
")",
"# The random_* ops do not necessarily clamp.",
"image",
"=",
"tf",
".",
"clip_by_value",
"(",
"image",
",",
"0.0",
",",
"1.0",
")",
"return",
"image"
] | Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
color-distorted image | [
"Distort",
"the",
"color",
"of",
"the",
"image",
"."
] | python | train |
redcap-tools/PyCap | redcap/project.py | https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L637-L671 | def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0] | [
"def",
"export_users",
"(",
"self",
",",
"format",
"=",
"'json'",
")",
":",
"pl",
"=",
"self",
".",
"__basepl",
"(",
"content",
"=",
"'user'",
",",
"format",
"=",
"format",
")",
"return",
"self",
".",
"_call_api",
"(",
"pl",
",",
"'exp_user'",
")",
"[",
"0",
"]"
] | Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string | [
"Export",
"the",
"users",
"of",
"the",
"Project"
] | python | train |
skelsec/minidump | minidump/minidumpreader.py | https://github.com/skelsec/minidump/blob/0c4dcabe6f11d7a403440919ffa9e3c9889c5212/minidump/minidumpreader.py#L87-L101 | def align(self, alignment = None):
"""
Repositions the current reader to match architecture alignment
"""
if alignment is None:
if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64:
alignment = 8
else:
alignment = 4
offset = self.current_position % alignment
if offset == 0:
return
offset_to_aligned = (alignment - offset) % alignment
self.seek(offset_to_aligned, 1)
return | [
"def",
"align",
"(",
"self",
",",
"alignment",
"=",
"None",
")",
":",
"if",
"alignment",
"is",
"None",
":",
"if",
"self",
".",
"reader",
".",
"sysinfo",
".",
"ProcessorArchitecture",
"==",
"PROCESSOR_ARCHITECTURE",
".",
"AMD64",
":",
"alignment",
"=",
"8",
"else",
":",
"alignment",
"=",
"4",
"offset",
"=",
"self",
".",
"current_position",
"%",
"alignment",
"if",
"offset",
"==",
"0",
":",
"return",
"offset_to_aligned",
"=",
"(",
"alignment",
"-",
"offset",
")",
"%",
"alignment",
"self",
".",
"seek",
"(",
"offset_to_aligned",
",",
"1",
")",
"return"
] | Repositions the current reader to match architecture alignment | [
"Repositions",
"the",
"current",
"reader",
"to",
"match",
"architecture",
"alignment"
] | python | train |
markovmodel/msmtools | msmtools/estimation/dense/covariance.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/dense/covariance.py#L31-L73 | def tmatrix_cov(C, row=None):
r"""Covariance tensor for the non-reversible transition matrix ensemble
Normally the covariance tensor cov(p_ij, p_kl) would carry four indices
(i,j,k,l). In the non-reversible case rows are independent so that
cov(p_ij, p_kl)=0 for i not equal to k. Therefore the function will only
return cov(p_ij, p_ik).
Parameters
----------
C : (M, M) ndarray
Count matrix
row : int (optional)
If row is given return covariance matrix for specified row only
Returns
-------
cov : (M, M, M) ndarray
Covariance tensor
"""
if row is None:
alpha = C + 1.0 # Dirichlet parameters
alpha0 = alpha.sum(axis=1) # Sum of paramters (per row)
norm = alpha0 ** 2 * (alpha0 + 1.0)
"""Non-normalized covariance tensor"""
Z = -alpha[:, :, np.newaxis] * alpha[:, np.newaxis, :]
"""Correct-diagonal"""
ind = np.diag_indices(C.shape[0])
Z[:, ind[0], ind[1]] += alpha0[:, np.newaxis] * alpha
"""Covariance matrix"""
cov = Z / norm[:, np.newaxis, np.newaxis]
return cov
else:
alpha = C[row, :] + 1.0
return dirichlet_covariance(alpha) | [
"def",
"tmatrix_cov",
"(",
"C",
",",
"row",
"=",
"None",
")",
":",
"if",
"row",
"is",
"None",
":",
"alpha",
"=",
"C",
"+",
"1.0",
"# Dirichlet parameters",
"alpha0",
"=",
"alpha",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"# Sum of paramters (per row)",
"norm",
"=",
"alpha0",
"**",
"2",
"*",
"(",
"alpha0",
"+",
"1.0",
")",
"\"\"\"Non-normalized covariance tensor\"\"\"",
"Z",
"=",
"-",
"alpha",
"[",
":",
",",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"alpha",
"[",
":",
",",
"np",
".",
"newaxis",
",",
":",
"]",
"\"\"\"Correct-diagonal\"\"\"",
"ind",
"=",
"np",
".",
"diag_indices",
"(",
"C",
".",
"shape",
"[",
"0",
"]",
")",
"Z",
"[",
":",
",",
"ind",
"[",
"0",
"]",
",",
"ind",
"[",
"1",
"]",
"]",
"+=",
"alpha0",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"alpha",
"\"\"\"Covariance matrix\"\"\"",
"cov",
"=",
"Z",
"/",
"norm",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"return",
"cov",
"else",
":",
"alpha",
"=",
"C",
"[",
"row",
",",
":",
"]",
"+",
"1.0",
"return",
"dirichlet_covariance",
"(",
"alpha",
")"
] | r"""Covariance tensor for the non-reversible transition matrix ensemble
Normally the covariance tensor cov(p_ij, p_kl) would carry four indices
(i,j,k,l). In the non-reversible case rows are independent so that
cov(p_ij, p_kl)=0 for i not equal to k. Therefore the function will only
return cov(p_ij, p_ik).
Parameters
----------
C : (M, M) ndarray
Count matrix
row : int (optional)
If row is given return covariance matrix for specified row only
Returns
-------
cov : (M, M, M) ndarray
Covariance tensor | [
"r",
"Covariance",
"tensor",
"for",
"the",
"non",
"-",
"reversible",
"transition",
"matrix",
"ensemble"
] | python | train |
SeattleTestbed/seash | pyreadline/console/console.py | https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/console/console.py#L508-L523 | def getkeypress(self):
u'''Return next key press event from the queue, ignoring others.'''
while 1:
e = self.get()
if e.type == u'KeyPress' and e.keycode not in key_modifiers:
log(u"console.getkeypress %s"%e)
if e.keyinfo.keyname == u'next':
self.scroll_window(12)
elif e.keyinfo.keyname == u'prior':
self.scroll_window(-12)
else:
return e
elif ((e.type == u'KeyRelease') and
(e.keyinfo == KeyPress('S', False, True, False, 'S'))):
log(u"getKeypress:%s,%s,%s"%(e.keyinfo, e.keycode, e.type))
return e | [
"def",
"getkeypress",
"(",
"self",
")",
":",
"while",
"1",
":",
"e",
"=",
"self",
".",
"get",
"(",
")",
"if",
"e",
".",
"type",
"==",
"u'KeyPress'",
"and",
"e",
".",
"keycode",
"not",
"in",
"key_modifiers",
":",
"log",
"(",
"u\"console.getkeypress %s\"",
"%",
"e",
")",
"if",
"e",
".",
"keyinfo",
".",
"keyname",
"==",
"u'next'",
":",
"self",
".",
"scroll_window",
"(",
"12",
")",
"elif",
"e",
".",
"keyinfo",
".",
"keyname",
"==",
"u'prior'",
":",
"self",
".",
"scroll_window",
"(",
"-",
"12",
")",
"else",
":",
"return",
"e",
"elif",
"(",
"(",
"e",
".",
"type",
"==",
"u'KeyRelease'",
")",
"and",
"(",
"e",
".",
"keyinfo",
"==",
"KeyPress",
"(",
"'S'",
",",
"False",
",",
"True",
",",
"False",
",",
"'S'",
")",
")",
")",
":",
"log",
"(",
"u\"getKeypress:%s,%s,%s\"",
"%",
"(",
"e",
".",
"keyinfo",
",",
"e",
".",
"keycode",
",",
"e",
".",
"type",
")",
")",
"return",
"e"
] | u'''Return next key press event from the queue, ignoring others. | [
"u",
"Return",
"next",
"key",
"press",
"event",
"from",
"the",
"queue",
"ignoring",
"others",
"."
] | python | train |
twilio/twilio-python | twilio/rest/api/v2010/account/call/feedback_summary.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/call/feedback_summary.py#L118-L127 | def get_instance(self, payload):
"""
Build an instance of FeedbackSummaryInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance
"""
return FeedbackSummaryInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | [
"def",
"get_instance",
"(",
"self",
",",
"payload",
")",
":",
"return",
"FeedbackSummaryInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
")"
] | Build an instance of FeedbackSummaryInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance | [
"Build",
"an",
"instance",
"of",
"FeedbackSummaryInstance"
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/eos.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/eos.py#L220-L265 | def plot_ax(self, ax=None, fontsize=12, **kwargs):
"""
Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object.
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa" %
(self.b0, self.b0_GPa),
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
ax.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
ax.grid(True)
ax.set_xlabel("Volume $\\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
ax.text(0.5, 0.5, text, fontsize=fontsize, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
return fig | [
"def",
"plot_ax",
"(",
"self",
",",
"ax",
"=",
"None",
",",
"fontsize",
"=",
"12",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
",",
"fig",
",",
"plt",
"=",
"get_ax_fig_plt",
"(",
"ax",
"=",
"ax",
")",
"color",
"=",
"kwargs",
".",
"get",
"(",
"\"color\"",
",",
"\"r\"",
")",
"label",
"=",
"kwargs",
".",
"get",
"(",
"\"label\"",
",",
"\"{} fit\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"lines",
"=",
"[",
"\"Equation of State: %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
",",
"\"Minimum energy = %1.2f eV\"",
"%",
"self",
".",
"e0",
",",
"\"Minimum or reference volume = %1.2f Ang^3\"",
"%",
"self",
".",
"v0",
",",
"\"Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa\"",
"%",
"(",
"self",
".",
"b0",
",",
"self",
".",
"b0_GPa",
")",
",",
"\"Derivative of bulk modulus wrt pressure = %1.2f\"",
"%",
"self",
".",
"b1",
"]",
"text",
"=",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
"text",
"=",
"kwargs",
".",
"get",
"(",
"\"text\"",
",",
"text",
")",
"# Plot input data.",
"ax",
".",
"plot",
"(",
"self",
".",
"volumes",
",",
"self",
".",
"energies",
",",
"linestyle",
"=",
"\"None\"",
",",
"marker",
"=",
"\"o\"",
",",
"color",
"=",
"color",
")",
"# Plot eos fit.",
"vmin",
",",
"vmax",
"=",
"min",
"(",
"self",
".",
"volumes",
")",
",",
"max",
"(",
"self",
".",
"volumes",
")",
"vmin",
",",
"vmax",
"=",
"(",
"vmin",
"-",
"0.01",
"*",
"abs",
"(",
"vmin",
")",
",",
"vmax",
"+",
"0.01",
"*",
"abs",
"(",
"vmax",
")",
")",
"vfit",
"=",
"np",
".",
"linspace",
"(",
"vmin",
",",
"vmax",
",",
"100",
")",
"ax",
".",
"plot",
"(",
"vfit",
",",
"self",
".",
"func",
"(",
"vfit",
")",
",",
"linestyle",
"=",
"\"dashed\"",
",",
"color",
"=",
"color",
",",
"label",
"=",
"label",
")",
"ax",
".",
"grid",
"(",
"True",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Volume $\\\\AA^3$\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Energy (eV)\"",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"\"best\"",
",",
"shadow",
"=",
"True",
")",
"# Add text with fit parameters.",
"ax",
".",
"text",
"(",
"0.5",
",",
"0.5",
",",
"text",
",",
"fontsize",
"=",
"fontsize",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
")",
"return",
"fig"
] | Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object. | [
"Plot",
"the",
"equation",
"of",
"state",
"on",
"axis",
"ax"
] | python | train |
Clinical-Genomics/scout | scout/adapter/mongo/panel.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/panel.py#L52-L115 | def load_omim_panel(self, api_key, institute=None):
"""Create and load the OMIM-AUTO panel"""
existing_panel = self.gene_panel(panel_id='OMIM-AUTO')
if not existing_panel:
LOG.warning("OMIM-AUTO does not exists in database")
LOG.info('Creating a first version')
version = 1.0
if existing_panel:
version = float(math.floor(existing_panel['version']) + 1)
LOG.info("Setting version to %s", version)
try:
mim_files = fetch_mim_files(api_key=api_key, genemap2=True, mim2genes=True)
except Exception as err:
raise err
date_string = None
# Get the correct date when omim files where released
for line in mim_files['genemap2']:
if 'Generated' in line:
date_string = line.split(':')[-1].lstrip().rstrip()
date_obj = get_date(date_string)
if existing_panel:
if existing_panel['date'] == date_obj:
LOG.warning("There is no new version of OMIM")
return
panel_data = {}
panel_data['path'] = None
panel_data['type'] = 'clinical'
panel_data['date'] = date_obj
panel_data['panel_id'] = 'OMIM-AUTO'
panel_data['institute'] = institute or 'cust002'
panel_data['version'] = version
panel_data['display_name'] = 'OMIM-AUTO'
panel_data['genes'] = []
alias_genes = self.genes_by_alias()
genes = get_omim_panel_genes(
genemap2_lines = mim_files['genemap2'],
mim2gene_lines = mim_files['mim2genes'],
alias_genes = alias_genes,
)
for gene in genes:
panel_data['genes'].append(gene)
panel_obj = build_panel(panel_data, self)
if existing_panel:
new_genes = self.compare_mim_panels(existing_panel, panel_obj)
if new_genes:
self.update_mim_version(new_genes, panel_obj, old_version=existing_panel['version'])
else:
LOG.info("The new version of omim does not differ from the old one")
LOG.info("No update is added")
return
self.add_gene_panel(panel_obj) | [
"def",
"load_omim_panel",
"(",
"self",
",",
"api_key",
",",
"institute",
"=",
"None",
")",
":",
"existing_panel",
"=",
"self",
".",
"gene_panel",
"(",
"panel_id",
"=",
"'OMIM-AUTO'",
")",
"if",
"not",
"existing_panel",
":",
"LOG",
".",
"warning",
"(",
"\"OMIM-AUTO does not exists in database\"",
")",
"LOG",
".",
"info",
"(",
"'Creating a first version'",
")",
"version",
"=",
"1.0",
"if",
"existing_panel",
":",
"version",
"=",
"float",
"(",
"math",
".",
"floor",
"(",
"existing_panel",
"[",
"'version'",
"]",
")",
"+",
"1",
")",
"LOG",
".",
"info",
"(",
"\"Setting version to %s\"",
",",
"version",
")",
"try",
":",
"mim_files",
"=",
"fetch_mim_files",
"(",
"api_key",
"=",
"api_key",
",",
"genemap2",
"=",
"True",
",",
"mim2genes",
"=",
"True",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"err",
"date_string",
"=",
"None",
"# Get the correct date when omim files where released",
"for",
"line",
"in",
"mim_files",
"[",
"'genemap2'",
"]",
":",
"if",
"'Generated'",
"in",
"line",
":",
"date_string",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
".",
"lstrip",
"(",
")",
".",
"rstrip",
"(",
")",
"date_obj",
"=",
"get_date",
"(",
"date_string",
")",
"if",
"existing_panel",
":",
"if",
"existing_panel",
"[",
"'date'",
"]",
"==",
"date_obj",
":",
"LOG",
".",
"warning",
"(",
"\"There is no new version of OMIM\"",
")",
"return",
"panel_data",
"=",
"{",
"}",
"panel_data",
"[",
"'path'",
"]",
"=",
"None",
"panel_data",
"[",
"'type'",
"]",
"=",
"'clinical'",
"panel_data",
"[",
"'date'",
"]",
"=",
"date_obj",
"panel_data",
"[",
"'panel_id'",
"]",
"=",
"'OMIM-AUTO'",
"panel_data",
"[",
"'institute'",
"]",
"=",
"institute",
"or",
"'cust002'",
"panel_data",
"[",
"'version'",
"]",
"=",
"version",
"panel_data",
"[",
"'display_name'",
"]",
"=",
"'OMIM-AUTO'",
"panel_data",
"[",
"'genes'",
"]",
"=",
"[",
"]",
"alias_genes",
"=",
"self",
".",
"genes_by_alias",
"(",
")",
"genes",
"=",
"get_omim_panel_genes",
"(",
"genemap2_lines",
"=",
"mim_files",
"[",
"'genemap2'",
"]",
",",
"mim2gene_lines",
"=",
"mim_files",
"[",
"'mim2genes'",
"]",
",",
"alias_genes",
"=",
"alias_genes",
",",
")",
"for",
"gene",
"in",
"genes",
":",
"panel_data",
"[",
"'genes'",
"]",
".",
"append",
"(",
"gene",
")",
"panel_obj",
"=",
"build_panel",
"(",
"panel_data",
",",
"self",
")",
"if",
"existing_panel",
":",
"new_genes",
"=",
"self",
".",
"compare_mim_panels",
"(",
"existing_panel",
",",
"panel_obj",
")",
"if",
"new_genes",
":",
"self",
".",
"update_mim_version",
"(",
"new_genes",
",",
"panel_obj",
",",
"old_version",
"=",
"existing_panel",
"[",
"'version'",
"]",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"\"The new version of omim does not differ from the old one\"",
")",
"LOG",
".",
"info",
"(",
"\"No update is added\"",
")",
"return",
"self",
".",
"add_gene_panel",
"(",
"panel_obj",
")"
] | Create and load the OMIM-AUTO panel | [
"Create",
"and",
"load",
"the",
"OMIM",
"-",
"AUTO",
"panel"
] | python | test |
gwastro/pycbc | pycbc/population/scale_injections.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/scale_injections.py#L238-L270 | def process_injections(hdffile):
"""Function to read in the injection file and
extract the found injections and all injections
Parameters
----------
hdffile: hdf file
File for which injections are to be processed
Returns
-------
data: dictionary
Dictionary containing injection read from the input file
"""
data = {}
with h5py.File(hdffile, 'r') as inp:
found_index = inp['found_after_vetoes/injection_index'][:]
for param in _save_params:
data[param] = inp['injections/'+param][:]
ifar = np.zeros_like(data[_save_params[0]])
ifar[found_index] = inp['found_after_vetoes/ifar'][:]
data['ifar'] = ifar
stat = np.zeros_like(data[_save_params[0]])
stat[found_index] = inp['found_after_vetoes/stat'][:]
data['stat'] = stat
return data | [
"def",
"process_injections",
"(",
"hdffile",
")",
":",
"data",
"=",
"{",
"}",
"with",
"h5py",
".",
"File",
"(",
"hdffile",
",",
"'r'",
")",
"as",
"inp",
":",
"found_index",
"=",
"inp",
"[",
"'found_after_vetoes/injection_index'",
"]",
"[",
":",
"]",
"for",
"param",
"in",
"_save_params",
":",
"data",
"[",
"param",
"]",
"=",
"inp",
"[",
"'injections/'",
"+",
"param",
"]",
"[",
":",
"]",
"ifar",
"=",
"np",
".",
"zeros_like",
"(",
"data",
"[",
"_save_params",
"[",
"0",
"]",
"]",
")",
"ifar",
"[",
"found_index",
"]",
"=",
"inp",
"[",
"'found_after_vetoes/ifar'",
"]",
"[",
":",
"]",
"data",
"[",
"'ifar'",
"]",
"=",
"ifar",
"stat",
"=",
"np",
".",
"zeros_like",
"(",
"data",
"[",
"_save_params",
"[",
"0",
"]",
"]",
")",
"stat",
"[",
"found_index",
"]",
"=",
"inp",
"[",
"'found_after_vetoes/stat'",
"]",
"[",
":",
"]",
"data",
"[",
"'stat'",
"]",
"=",
"stat",
"return",
"data"
] | Function to read in the injection file and
extract the found injections and all injections
Parameters
----------
hdffile: hdf file
File for which injections are to be processed
Returns
-------
data: dictionary
Dictionary containing injection read from the input file | [
"Function",
"to",
"read",
"in",
"the",
"injection",
"file",
"and",
"extract",
"the",
"found",
"injections",
"and",
"all",
"injections"
] | python | train |
inveniosoftware-contrib/json-merger | json_merger/contrib/inspirehep/author_util.py | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/contrib/inspirehep/author_util.py#L71-L82 | def simple_tokenize(name):
"""Simple tokenizer function to be used with the normalizers."""
last_names, first_names = name.split(',')
last_names = _RE_NAME_TOKEN_SEPARATOR.split(last_names)
first_names = _RE_NAME_TOKEN_SEPARATOR.split(first_names)
first_names = [NameToken(n) if len(n) > 1 else NameInitial(n)
for n in first_names if n]
last_names = [NameToken(n) if len(n) > 1 else NameInitial(n)
for n in last_names if n]
return {'lastnames': last_names,
'nonlastnames': first_names} | [
"def",
"simple_tokenize",
"(",
"name",
")",
":",
"last_names",
",",
"first_names",
"=",
"name",
".",
"split",
"(",
"','",
")",
"last_names",
"=",
"_RE_NAME_TOKEN_SEPARATOR",
".",
"split",
"(",
"last_names",
")",
"first_names",
"=",
"_RE_NAME_TOKEN_SEPARATOR",
".",
"split",
"(",
"first_names",
")",
"first_names",
"=",
"[",
"NameToken",
"(",
"n",
")",
"if",
"len",
"(",
"n",
")",
">",
"1",
"else",
"NameInitial",
"(",
"n",
")",
"for",
"n",
"in",
"first_names",
"if",
"n",
"]",
"last_names",
"=",
"[",
"NameToken",
"(",
"n",
")",
"if",
"len",
"(",
"n",
")",
">",
"1",
"else",
"NameInitial",
"(",
"n",
")",
"for",
"n",
"in",
"last_names",
"if",
"n",
"]",
"return",
"{",
"'lastnames'",
":",
"last_names",
",",
"'nonlastnames'",
":",
"first_names",
"}"
] | Simple tokenizer function to be used with the normalizers. | [
"Simple",
"tokenizer",
"function",
"to",
"be",
"used",
"with",
"the",
"normalizers",
"."
] | python | train |
ssato/python-anyconfig | src/anyconfig/backend/xml.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L474-L492 | def load_from_string(self, content, container, **opts):
"""
Load config from XML snippet (a string 'content').
:param content:
XML snippet string of str (python 2) or bytes (python 3) type
:param container: callble to make a container object
:param opts: optional keyword parameters passed to
:return: Dict-like object holding config parameters
"""
root = ET.fromstring(content)
if anyconfig.compat.IS_PYTHON_3:
stream = BytesIO(content)
else:
stream = anyconfig.compat.StringIO(content)
nspaces = _namespaces_from_file(stream)
return root_to_container(root, container=container,
nspaces=nspaces, **opts) | [
"def",
"load_from_string",
"(",
"self",
",",
"content",
",",
"container",
",",
"*",
"*",
"opts",
")",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"content",
")",
"if",
"anyconfig",
".",
"compat",
".",
"IS_PYTHON_3",
":",
"stream",
"=",
"BytesIO",
"(",
"content",
")",
"else",
":",
"stream",
"=",
"anyconfig",
".",
"compat",
".",
"StringIO",
"(",
"content",
")",
"nspaces",
"=",
"_namespaces_from_file",
"(",
"stream",
")",
"return",
"root_to_container",
"(",
"root",
",",
"container",
"=",
"container",
",",
"nspaces",
"=",
"nspaces",
",",
"*",
"*",
"opts",
")"
] | Load config from XML snippet (a string 'content').
:param content:
XML snippet string of str (python 2) or bytes (python 3) type
:param container: callble to make a container object
:param opts: optional keyword parameters passed to
:return: Dict-like object holding config parameters | [
"Load",
"config",
"from",
"XML",
"snippet",
"(",
"a",
"string",
"content",
")",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.