repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
bbengfort/confire | confire/config.py | https://github.com/bbengfort/confire/blob/0879aea2516b39a438e202dcc0c6882ca64eb613/confire/config.py#L166-L178 | def options(self):
"""
Returns an iterable of sorted option names in order to loop
through all the configuration directives specified in the class.
"""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys.keys())
for opt in keys:
val = self.get(opt)
if val is not None:
yield opt, val | [
"def",
"options",
"(",
"self",
")",
":",
"keys",
"=",
"self",
".",
"__class__",
".",
"__dict__",
".",
"copy",
"(",
")",
"keys",
".",
"update",
"(",
"self",
".",
"__dict__",
")",
"keys",
"=",
"sorted",
"(",
"keys",
".",
"keys",
"(",
")",
")",
"for",
"opt",
"in",
"keys",
":",
"val",
"=",
"self",
".",
"get",
"(",
"opt",
")",
"if",
"val",
"is",
"not",
"None",
":",
"yield",
"opt",
",",
"val"
]
| Returns an iterable of sorted option names in order to loop
through all the configuration directives specified in the class. | [
"Returns",
"an",
"iterable",
"of",
"sorted",
"option",
"names",
"in",
"order",
"to",
"loop",
"through",
"all",
"the",
"configuration",
"directives",
"specified",
"in",
"the",
"class",
"."
]
| python | train |
bwesterb/mirte | src/mirteFile.py | https://github.com/bwesterb/mirte/blob/c58db8c993cd15ffdc64b52703cd466213913200/src/mirteFile.py#L112-L156 | def _load_mirteFile(d, m):
""" Loads the dictionary from the mirteFile into <m> """
defs = d['definitions'] if 'definitions' in d else {}
insts = d['instances'] if 'instances' in d else {}
# Filter out existing instances
insts_to_skip = []
for k in insts:
if k in m.insts:
m.update_instance(k, dict(insts[k]))
insts_to_skip.append(k)
for k in insts_to_skip:
del(insts[k])
# Sort module definitions by dependency
it = sort_by_successors(
six.viewkeys(defs),
dual_cover(
six.viewkeys(defs),
restricted_cover(
six.viewkeys(defs),
depsOf_of_mirteFile_module_definition(defs)
)
)
)
# Add module definitions
for k in it:
m.add_module_definition(
k,
module_definition_from_mirteFile_dict(m, defs[k])
)
# Sort instance declarations by dependency
it = sort_by_successors(
six.viewkeys(insts),
dual_cover(
six.viewkeys(insts),
restricted_cover(
six.viewkeys(insts),
depsOf_of_mirteFile_instance_definition(m, insts)
)
)
)
# Create instances
for k in it:
settings = dict(insts[k])
del(settings['module'])
m.create_instance(k, insts[k]['module'], settings) | [
"def",
"_load_mirteFile",
"(",
"d",
",",
"m",
")",
":",
"defs",
"=",
"d",
"[",
"'definitions'",
"]",
"if",
"'definitions'",
"in",
"d",
"else",
"{",
"}",
"insts",
"=",
"d",
"[",
"'instances'",
"]",
"if",
"'instances'",
"in",
"d",
"else",
"{",
"}",
"# Filter out existing instances",
"insts_to_skip",
"=",
"[",
"]",
"for",
"k",
"in",
"insts",
":",
"if",
"k",
"in",
"m",
".",
"insts",
":",
"m",
".",
"update_instance",
"(",
"k",
",",
"dict",
"(",
"insts",
"[",
"k",
"]",
")",
")",
"insts_to_skip",
".",
"append",
"(",
"k",
")",
"for",
"k",
"in",
"insts_to_skip",
":",
"del",
"(",
"insts",
"[",
"k",
"]",
")",
"# Sort module definitions by dependency",
"it",
"=",
"sort_by_successors",
"(",
"six",
".",
"viewkeys",
"(",
"defs",
")",
",",
"dual_cover",
"(",
"six",
".",
"viewkeys",
"(",
"defs",
")",
",",
"restricted_cover",
"(",
"six",
".",
"viewkeys",
"(",
"defs",
")",
",",
"depsOf_of_mirteFile_module_definition",
"(",
"defs",
")",
")",
")",
")",
"# Add module definitions",
"for",
"k",
"in",
"it",
":",
"m",
".",
"add_module_definition",
"(",
"k",
",",
"module_definition_from_mirteFile_dict",
"(",
"m",
",",
"defs",
"[",
"k",
"]",
")",
")",
"# Sort instance declarations by dependency",
"it",
"=",
"sort_by_successors",
"(",
"six",
".",
"viewkeys",
"(",
"insts",
")",
",",
"dual_cover",
"(",
"six",
".",
"viewkeys",
"(",
"insts",
")",
",",
"restricted_cover",
"(",
"six",
".",
"viewkeys",
"(",
"insts",
")",
",",
"depsOf_of_mirteFile_instance_definition",
"(",
"m",
",",
"insts",
")",
")",
")",
")",
"# Create instances",
"for",
"k",
"in",
"it",
":",
"settings",
"=",
"dict",
"(",
"insts",
"[",
"k",
"]",
")",
"del",
"(",
"settings",
"[",
"'module'",
"]",
")",
"m",
".",
"create_instance",
"(",
"k",
",",
"insts",
"[",
"k",
"]",
"[",
"'module'",
"]",
",",
"settings",
")"
]
| Loads the dictionary from the mirteFile into <m> | [
"Loads",
"the",
"dictionary",
"from",
"the",
"mirteFile",
"into",
"<m",
">"
]
| python | train |
mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L635-L641 | def read_memory_block32(self, addr, size):
"""
read a block of aligned words in memory. Returns
an array of word values
"""
data = self.ap.read_memory_block32(addr, size)
return self.bp_manager.filter_memory_aligned_32(addr, size, data) | [
"def",
"read_memory_block32",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"data",
"=",
"self",
".",
"ap",
".",
"read_memory_block32",
"(",
"addr",
",",
"size",
")",
"return",
"self",
".",
"bp_manager",
".",
"filter_memory_aligned_32",
"(",
"addr",
",",
"size",
",",
"data",
")"
]
| read a block of aligned words in memory. Returns
an array of word values | [
"read",
"a",
"block",
"of",
"aligned",
"words",
"in",
"memory",
".",
"Returns",
"an",
"array",
"of",
"word",
"values"
]
| python | train |
jim-easterbrook/pyctools | src/pyctools/core/qt.py | https://github.com/jim-easterbrook/pyctools/blob/2a958665326892f45f249bebe62c2c23f306732b/src/pyctools/core/qt.py#L72-L86 | def queue_command(self, command):
"""Put a command on the queue to be called in the component's
thread.
:param callable command: the method to be invoked, e.g.
:py:meth:`~Component.new_frame_event`.
"""
if self._running:
# queue event normally
QtCore.QCoreApplication.postEvent(
self, ActionEvent(command), QtCore.Qt.LowEventPriority)
else:
# save event until we are started
self._incoming.append(command) | [
"def",
"queue_command",
"(",
"self",
",",
"command",
")",
":",
"if",
"self",
".",
"_running",
":",
"# queue event normally",
"QtCore",
".",
"QCoreApplication",
".",
"postEvent",
"(",
"self",
",",
"ActionEvent",
"(",
"command",
")",
",",
"QtCore",
".",
"Qt",
".",
"LowEventPriority",
")",
"else",
":",
"# save event until we are started",
"self",
".",
"_incoming",
".",
"append",
"(",
"command",
")"
]
| Put a command on the queue to be called in the component's
thread.
:param callable command: the method to be invoked, e.g.
:py:meth:`~Component.new_frame_event`. | [
"Put",
"a",
"command",
"on",
"the",
"queue",
"to",
"be",
"called",
"in",
"the",
"component",
"s",
"thread",
"."
]
| python | train |
exhuma/python-cluster | cluster/matrix.py | https://github.com/exhuma/python-cluster/blob/4c0ac14d9beafcd51f0d849151514083c296402f/cluster/matrix.py#L25-L59 | def _encapsulate_item_for_combinfunc(item):
"""
This function has been extracted in order to
make Github issue #28 easier to investigate.
It replaces the following two lines of code,
which occur twice in method genmatrix, just
before the invocation of combinfunc.
if not hasattr(item, '__iter__') or isinstance(item, tuple):
item = [item]
Logging was added to the original two lines
and shows that the outcome of this snippet
has changed between Python2.7 and Python3.5.
This logging showed that the difference in
outcome consisted of the handling of the builtin
str class, which was encapsulated into a list in
Python2.7 but returned naked in Python3.5.
Adding a test for this specific class to the
set of conditions appears to give correct behaviour
under both versions.
"""
encapsulated_item = None
if (
not hasattr(item, '__iter__') or
isinstance(item, tuple) or
isinstance(item, str)
):
encapsulated_item = [item]
else:
encapsulated_item = item
logging.debug(
"item class:%s encapsulated as:%s ",
item.__class__.__name__,
encapsulated_item.__class__.__name__
)
return encapsulated_item | [
"def",
"_encapsulate_item_for_combinfunc",
"(",
"item",
")",
":",
"encapsulated_item",
"=",
"None",
"if",
"(",
"not",
"hasattr",
"(",
"item",
",",
"'__iter__'",
")",
"or",
"isinstance",
"(",
"item",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"item",
",",
"str",
")",
")",
":",
"encapsulated_item",
"=",
"[",
"item",
"]",
"else",
":",
"encapsulated_item",
"=",
"item",
"logging",
".",
"debug",
"(",
"\"item class:%s encapsulated as:%s \"",
",",
"item",
".",
"__class__",
".",
"__name__",
",",
"encapsulated_item",
".",
"__class__",
".",
"__name__",
")",
"return",
"encapsulated_item"
]
| This function has been extracted in order to
make Github issue #28 easier to investigate.
It replaces the following two lines of code,
which occur twice in method genmatrix, just
before the invocation of combinfunc.
if not hasattr(item, '__iter__') or isinstance(item, tuple):
item = [item]
Logging was added to the original two lines
and shows that the outcome of this snippet
has changed between Python2.7 and Python3.5.
This logging showed that the difference in
outcome consisted of the handling of the builtin
str class, which was encapsulated into a list in
Python2.7 but returned naked in Python3.5.
Adding a test for this specific class to the
set of conditions appears to give correct behaviour
under both versions. | [
"This",
"function",
"has",
"been",
"extracted",
"in",
"order",
"to",
"make",
"Github",
"issue",
"#28",
"easier",
"to",
"investigate",
".",
"It",
"replaces",
"the",
"following",
"two",
"lines",
"of",
"code",
"which",
"occur",
"twice",
"in",
"method",
"genmatrix",
"just",
"before",
"the",
"invocation",
"of",
"combinfunc",
".",
"if",
"not",
"hasattr",
"(",
"item",
"__iter__",
")",
"or",
"isinstance",
"(",
"item",
"tuple",
")",
":",
"item",
"=",
"[",
"item",
"]",
"Logging",
"was",
"added",
"to",
"the",
"original",
"two",
"lines",
"and",
"shows",
"that",
"the",
"outcome",
"of",
"this",
"snippet",
"has",
"changed",
"between",
"Python2",
".",
"7",
"and",
"Python3",
".",
"5",
".",
"This",
"logging",
"showed",
"that",
"the",
"difference",
"in",
"outcome",
"consisted",
"of",
"the",
"handling",
"of",
"the",
"builtin",
"str",
"class",
"which",
"was",
"encapsulated",
"into",
"a",
"list",
"in",
"Python2",
".",
"7",
"but",
"returned",
"naked",
"in",
"Python3",
".",
"5",
".",
"Adding",
"a",
"test",
"for",
"this",
"specific",
"class",
"to",
"the",
"set",
"of",
"conditions",
"appears",
"to",
"give",
"correct",
"behaviour",
"under",
"both",
"versions",
"."
]
| python | train |
victorlei/smop | smop/parse.py | https://github.com/victorlei/smop/blob/bdad96b715d1dd75ce8ab4724f76b9b1bb1f61cd/smop/parse.py#L266-L274 | def p_expr1(p):
"""expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
| PLUSPLUS ident
| MINUSMINUS ident
"""
p[0] = node.expr(op=p[1], args=node.expr_list([p[2]])) | [
"def",
"p_expr1",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"node",
".",
"expr",
"(",
"op",
"=",
"p",
"[",
"1",
"]",
",",
"args",
"=",
"node",
".",
"expr_list",
"(",
"[",
"p",
"[",
"2",
"]",
"]",
")",
")"
]
| expr1 : MINUS expr %prec UMINUS
| PLUS expr %prec UMINUS
| NEG expr
| HANDLE ident
| PLUSPLUS ident
| MINUSMINUS ident | [
"expr1",
":",
"MINUS",
"expr",
"%prec",
"UMINUS",
"|",
"PLUS",
"expr",
"%prec",
"UMINUS",
"|",
"NEG",
"expr",
"|",
"HANDLE",
"ident",
"|",
"PLUSPLUS",
"ident",
"|",
"MINUSMINUS",
"ident"
]
| python | train |
JarryShaw/PyPCAPKit | src/protocols/internet/ipv6_frag.py | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/ipv6_frag.py#L87-L128 | def read_ipv6_frag(self, length, extension):
"""Read Fragment Header for IPv6.
Structure of IPv6-Frag header [RFC 8200]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Reserved | Fragment Offset |Res|M|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 frag.next Next Header
1 8 - Reserved
2 16 frag.offset Fragment Offset
3 29 - Reserved
3 31 frag.mf More Flag
4 32 frag.id Identification
"""
if length is None:
length = len(self)
_next = self._read_protos(1)
_temp = self._read_fileng(1)
_offm = self._read_binary(2)
_ipid = self._read_unpack(4)
ipv6_frag = dict(
next=_next,
length=8,
offset=int(_offm[:13], base=2),
mf=True if int(_offm[15], base=2) else False,
id=_ipid,
)
length -= ipv6_frag['length']
ipv6_frag['packet'] = self._read_packet(header=8, payload=length)
if extension:
self._protos = None
return ipv6_frag
return self._decode_next_layer(ipv6_frag, _next, length) | [
"def",
"read_ipv6_frag",
"(",
"self",
",",
"length",
",",
"extension",
")",
":",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"len",
"(",
"self",
")",
"_next",
"=",
"self",
".",
"_read_protos",
"(",
"1",
")",
"_temp",
"=",
"self",
".",
"_read_fileng",
"(",
"1",
")",
"_offm",
"=",
"self",
".",
"_read_binary",
"(",
"2",
")",
"_ipid",
"=",
"self",
".",
"_read_unpack",
"(",
"4",
")",
"ipv6_frag",
"=",
"dict",
"(",
"next",
"=",
"_next",
",",
"length",
"=",
"8",
",",
"offset",
"=",
"int",
"(",
"_offm",
"[",
":",
"13",
"]",
",",
"base",
"=",
"2",
")",
",",
"mf",
"=",
"True",
"if",
"int",
"(",
"_offm",
"[",
"15",
"]",
",",
"base",
"=",
"2",
")",
"else",
"False",
",",
"id",
"=",
"_ipid",
",",
")",
"length",
"-=",
"ipv6_frag",
"[",
"'length'",
"]",
"ipv6_frag",
"[",
"'packet'",
"]",
"=",
"self",
".",
"_read_packet",
"(",
"header",
"=",
"8",
",",
"payload",
"=",
"length",
")",
"if",
"extension",
":",
"self",
".",
"_protos",
"=",
"None",
"return",
"ipv6_frag",
"return",
"self",
".",
"_decode_next_layer",
"(",
"ipv6_frag",
",",
"_next",
",",
"length",
")"
]
| Read Fragment Header for IPv6.
Structure of IPv6-Frag header [RFC 8200]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Reserved | Fragment Offset |Res|M|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 frag.next Next Header
1 8 - Reserved
2 16 frag.offset Fragment Offset
3 29 - Reserved
3 31 frag.mf More Flag
4 32 frag.id Identification | [
"Read",
"Fragment",
"Header",
"for",
"IPv6",
"."
]
| python | train |
wonambi-python/wonambi | wonambi/widgets/channels.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/channels.py#L536-L548 | def reset(self):
"""Reset all the information of this widget."""
self.filename = None
self.groups = []
self.tabs.clear()
self.setEnabled(False)
self.button_color.setEnabled(False)
self.button_del.setEnabled(False)
self.button_apply.setEnabled(False)
self.action['load_channels'].setEnabled(False)
self.action['save_channels'].setEnabled(False) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"filename",
"=",
"None",
"self",
".",
"groups",
"=",
"[",
"]",
"self",
".",
"tabs",
".",
"clear",
"(",
")",
"self",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"button_color",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"button_del",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"button_apply",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"action",
"[",
"'load_channels'",
"]",
".",
"setEnabled",
"(",
"False",
")",
"self",
".",
"action",
"[",
"'save_channels'",
"]",
".",
"setEnabled",
"(",
"False",
")"
]
| Reset all the information of this widget. | [
"Reset",
"all",
"the",
"information",
"of",
"this",
"widget",
"."
]
| python | train |
secdev/scapy | scapy/contrib/http2.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/http2.py#L1330-L1337 | def self_build(self, field_pos_list=None):
# type: (Any) -> str
"""self_build is overridden because type and len are determined at
build time, based on the "data" field internal type
"""
if self.getfieldval('type') is None:
self.type = 1 if isinstance(self.getfieldval('data'), HPackZString) else 0 # noqa: E501
return super(HPackHdrString, self).self_build(field_pos_list) | [
"def",
"self_build",
"(",
"self",
",",
"field_pos_list",
"=",
"None",
")",
":",
"# type: (Any) -> str",
"if",
"self",
".",
"getfieldval",
"(",
"'type'",
")",
"is",
"None",
":",
"self",
".",
"type",
"=",
"1",
"if",
"isinstance",
"(",
"self",
".",
"getfieldval",
"(",
"'data'",
")",
",",
"HPackZString",
")",
"else",
"0",
"# noqa: E501",
"return",
"super",
"(",
"HPackHdrString",
",",
"self",
")",
".",
"self_build",
"(",
"field_pos_list",
")"
]
| self_build is overridden because type and len are determined at
build time, based on the "data" field internal type | [
"self_build",
"is",
"overridden",
"because",
"type",
"and",
"len",
"are",
"determined",
"at",
"build",
"time",
"based",
"on",
"the",
"data",
"field",
"internal",
"type"
]
| python | train |
openstack/horizon | openstack_auth/user.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/user.py#L282-L289 | def is_superuser(self):
"""Evaluates whether this user has admin privileges.
:returns: ``True`` or ``False``.
"""
admin_roles = utils.get_admin_roles()
user_roles = {role['name'].lower() for role in self.roles}
return not admin_roles.isdisjoint(user_roles) | [
"def",
"is_superuser",
"(",
"self",
")",
":",
"admin_roles",
"=",
"utils",
".",
"get_admin_roles",
"(",
")",
"user_roles",
"=",
"{",
"role",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
"for",
"role",
"in",
"self",
".",
"roles",
"}",
"return",
"not",
"admin_roles",
".",
"isdisjoint",
"(",
"user_roles",
")"
]
| Evaluates whether this user has admin privileges.
:returns: ``True`` or ``False``. | [
"Evaluates",
"whether",
"this",
"user",
"has",
"admin",
"privileges",
"."
]
| python | train |
sffjunkie/astral | src/astral.py | https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L1709-L1729 | def _get_timezone(self, location):
"""Query the timezone information with the latitude and longitude of
the specified `location`.
This function assumes the timezone of the location has always been
the same as it is now by using time() in the query string.
"""
url = self._timezone_query_base % (
location.latitude,
location.longitude,
int(time()),
)
if self.api_key != "":
url += "&key=%s" % self.api_key
data = self._read_from_url(url)
response = json.loads(data)
if response["status"] == "OK":
location.timezone = response["timeZoneId"]
else:
location.timezone = "UTC" | [
"def",
"_get_timezone",
"(",
"self",
",",
"location",
")",
":",
"url",
"=",
"self",
".",
"_timezone_query_base",
"%",
"(",
"location",
".",
"latitude",
",",
"location",
".",
"longitude",
",",
"int",
"(",
"time",
"(",
")",
")",
",",
")",
"if",
"self",
".",
"api_key",
"!=",
"\"\"",
":",
"url",
"+=",
"\"&key=%s\"",
"%",
"self",
".",
"api_key",
"data",
"=",
"self",
".",
"_read_from_url",
"(",
"url",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"if",
"response",
"[",
"\"status\"",
"]",
"==",
"\"OK\"",
":",
"location",
".",
"timezone",
"=",
"response",
"[",
"\"timeZoneId\"",
"]",
"else",
":",
"location",
".",
"timezone",
"=",
"\"UTC\""
]
| Query the timezone information with the latitude and longitude of
the specified `location`.
This function assumes the timezone of the location has always been
the same as it is now by using time() in the query string. | [
"Query",
"the",
"timezone",
"information",
"with",
"the",
"latitude",
"and",
"longitude",
"of",
"the",
"specified",
"location",
"."
]
| python | train |
zyga/libpci | libpci/_native.py | https://github.com/zyga/libpci/blob/5da0cf464192afff2fae8687c9133329897ec631/libpci/_native.py#L90-L131 | def Function(
library: CDLL,
name_or_ordinal: 'Union[str, int, None]'=None,
proto_factory: ('Union[ctypes.CFUNCTYPE, ctypes.WINFUNCTYPE,'
' ctypes.PYFUNCTYPE]')=CFUNCTYPE,
use_errno: bool=False,
use_last_error: bool=False,
) -> 'Callable':
"""
Decorator factory for creating callables for native functions.
Decorator factory for constructing relatively-nicely-looking callables that
call into existing native functions exposed from a dynamically-linkable
library.
:param library:
The library to look at
:param name_or_ordinal:
Typically the name of the symbol to load from the library. In rare
cases it may also be the index of the function inside the library.
:param proto_factory:
The prototype factory.
:param use_last_error:
Passed directly to the prototype factory.
:param use_last_error:
Passed directly to the prototype factory.
:returns:
A decorator for a function with particular, special annotations.
.. note::
Since nested functions have hard-to-reach documentation, the
documentation of the function returned from ``native()`` is documented
below.
"""
def decorator(fn: 'Callable') -> 'Callable':
metadata = _ctypes_metadata(fn)
prototype = proto_factory(
metadata.restype, *metadata.argtypes,
use_errno=use_errno, use_last_error=use_last_error)
func_spec = (name_or_ordinal or fn.__name__, library)
return prototype(func_spec, metadata.paramflags)
return decorator | [
"def",
"Function",
"(",
"library",
":",
"CDLL",
",",
"name_or_ordinal",
":",
"'Union[str, int, None]'",
"=",
"None",
",",
"proto_factory",
":",
"(",
"'Union[ctypes.CFUNCTYPE, ctypes.WINFUNCTYPE,'",
"' ctypes.PYFUNCTYPE]'",
")",
"=",
"CFUNCTYPE",
",",
"use_errno",
":",
"bool",
"=",
"False",
",",
"use_last_error",
":",
"bool",
"=",
"False",
",",
")",
"->",
"'Callable'",
":",
"def",
"decorator",
"(",
"fn",
":",
"'Callable'",
")",
"->",
"'Callable'",
":",
"metadata",
"=",
"_ctypes_metadata",
"(",
"fn",
")",
"prototype",
"=",
"proto_factory",
"(",
"metadata",
".",
"restype",
",",
"*",
"metadata",
".",
"argtypes",
",",
"use_errno",
"=",
"use_errno",
",",
"use_last_error",
"=",
"use_last_error",
")",
"func_spec",
"=",
"(",
"name_or_ordinal",
"or",
"fn",
".",
"__name__",
",",
"library",
")",
"return",
"prototype",
"(",
"func_spec",
",",
"metadata",
".",
"paramflags",
")",
"return",
"decorator"
]
| Decorator factory for creating callables for native functions.
Decorator factory for constructing relatively-nicely-looking callables that
call into existing native functions exposed from a dynamically-linkable
library.
:param library:
The library to look at
:param name_or_ordinal:
Typically the name of the symbol to load from the library. In rare
cases it may also be the index of the function inside the library.
:param proto_factory:
The prototype factory.
:param use_last_error:
Passed directly to the prototype factory.
:param use_last_error:
Passed directly to the prototype factory.
:returns:
A decorator for a function with particular, special annotations.
.. note::
Since nested functions have hard-to-reach documentation, the
documentation of the function returned from ``native()`` is documented
below. | [
"Decorator",
"factory",
"for",
"creating",
"callables",
"for",
"native",
"functions",
"."
]
| python | train |
MaxStrange/AudioSegment | audiosegment.py | https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/audiosegment.py#L1057-L1072 | def zero_extend(self, duration_s=None, num_samples=None):
"""
Adds a number of zeros (digital silence) to the AudioSegment (returning a new one).
:param duration_s: The number of seconds of zeros to add. If this is specified, `num_samples` must be None.
:param num_samples: The number of zeros to add. If this is specified, `duration_s` must be None.
:returns: A new AudioSegment object that has been zero extended.
:raises: ValueError if duration_s and num_samples are both specified.
"""
if duration_s is not None and num_samples is not None:
raise ValueError("`duration_s` and `num_samples` cannot both be specified.")
elif duration_s is not None:
num_samples = self.frame_rate * duration_s
seg = AudioSegment(self.seg, self.name)
zeros = silent(duration=num_samples / self.frame_rate, frame_rate=self.frame_rate)
return zeros.overlay(seg) | [
"def",
"zero_extend",
"(",
"self",
",",
"duration_s",
"=",
"None",
",",
"num_samples",
"=",
"None",
")",
":",
"if",
"duration_s",
"is",
"not",
"None",
"and",
"num_samples",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"`duration_s` and `num_samples` cannot both be specified.\"",
")",
"elif",
"duration_s",
"is",
"not",
"None",
":",
"num_samples",
"=",
"self",
".",
"frame_rate",
"*",
"duration_s",
"seg",
"=",
"AudioSegment",
"(",
"self",
".",
"seg",
",",
"self",
".",
"name",
")",
"zeros",
"=",
"silent",
"(",
"duration",
"=",
"num_samples",
"/",
"self",
".",
"frame_rate",
",",
"frame_rate",
"=",
"self",
".",
"frame_rate",
")",
"return",
"zeros",
".",
"overlay",
"(",
"seg",
")"
]
| Adds a number of zeros (digital silence) to the AudioSegment (returning a new one).
:param duration_s: The number of seconds of zeros to add. If this is specified, `num_samples` must be None.
:param num_samples: The number of zeros to add. If this is specified, `duration_s` must be None.
:returns: A new AudioSegment object that has been zero extended.
:raises: ValueError if duration_s and num_samples are both specified. | [
"Adds",
"a",
"number",
"of",
"zeros",
"(",
"digital",
"silence",
")",
"to",
"the",
"AudioSegment",
"(",
"returning",
"a",
"new",
"one",
")",
"."
]
| python | test |
kennethreitz/clint | clint/textui/cols.py | https://github.com/kennethreitz/clint/blob/9d3693d644b8587d985972b6075d970096f6439e/clint/textui/cols.py#L56-L71 | def console_width(kwargs):
""""Determine console_width."""
if sys.platform.startswith('win'):
console_width = _find_windows_console_width()
else:
console_width = _find_unix_console_width()
_width = kwargs.get('width', None)
if _width:
console_width = _width
else:
if not console_width:
console_width = 80
return console_width | [
"def",
"console_width",
"(",
"kwargs",
")",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"console_width",
"=",
"_find_windows_console_width",
"(",
")",
"else",
":",
"console_width",
"=",
"_find_unix_console_width",
"(",
")",
"_width",
"=",
"kwargs",
".",
"get",
"(",
"'width'",
",",
"None",
")",
"if",
"_width",
":",
"console_width",
"=",
"_width",
"else",
":",
"if",
"not",
"console_width",
":",
"console_width",
"=",
"80",
"return",
"console_width"
]
| Determine console_width. | [
"Determine",
"console_width",
"."
]
| python | train |
irgangla/icalevents | icalevents/icaldownload.py | https://github.com/irgangla/icalevents/blob/bb34c770ae6ffbf4c793e09dc5176344b285f386/icalevents/icaldownload.py#L63-L77 | def data_from_file(self, file, apple_fix=False):
"""
Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data
"""
with open(file, mode='rb') as f:
content = f.read()
if not content:
raise IOError("File %f is not readable or is empty!" % file)
return self.decode(content, apple_fix=apple_fix) | [
"def",
"data_from_file",
"(",
"self",
",",
"file",
",",
"apple_fix",
"=",
"False",
")",
":",
"with",
"open",
"(",
"file",
",",
"mode",
"=",
"'rb'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"if",
"not",
"content",
":",
"raise",
"IOError",
"(",
"\"File %f is not readable or is empty!\"",
"%",
"file",
")",
"return",
"self",
".",
"decode",
"(",
"content",
",",
"apple_fix",
"=",
"apple_fix",
")"
]
| Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data | [
"Read",
"iCal",
"data",
"from",
"file",
"."
]
| python | train |
PlaidWeb/Pushl | pushl/utils.py | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/utils.py#L109-L112 | async def retry_get(config, url, *args, **kwargs):
""" aiohttp wrapper for GET """
return await _retry_do(config.session.get, url, *args,
**_make_headers(config, kwargs)) | [
"async",
"def",
"retry_get",
"(",
"config",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"_retry_do",
"(",
"config",
".",
"session",
".",
"get",
",",
"url",
",",
"*",
"args",
",",
"*",
"*",
"_make_headers",
"(",
"config",
",",
"kwargs",
")",
")"
]
| aiohttp wrapper for GET | [
"aiohttp",
"wrapper",
"for",
"GET"
]
| python | train |
nfcpy/nfcpy | src/nfc/tag/tt1_broadcom.py | https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt1_broadcom.py#L68-L77 | def protect(self, password=None, read_protect=False, protect_from=0):
"""In addtion to :meth:`nfc.tag.tt1.Type1Tag.protect` this method
tries to set the lock bits to irreversibly protect the tag
memory. However, it appears that tags sold have the lock bytes
write protected, so this additional effort most likely doesn't
have any effect.
"""
return super(Topaz, self).protect(
password, read_protect, protect_from) | [
"def",
"protect",
"(",
"self",
",",
"password",
"=",
"None",
",",
"read_protect",
"=",
"False",
",",
"protect_from",
"=",
"0",
")",
":",
"return",
"super",
"(",
"Topaz",
",",
"self",
")",
".",
"protect",
"(",
"password",
",",
"read_protect",
",",
"protect_from",
")"
]
| In addtion to :meth:`nfc.tag.tt1.Type1Tag.protect` this method
tries to set the lock bits to irreversibly protect the tag
memory. However, it appears that tags sold have the lock bytes
write protected, so this additional effort most likely doesn't
have any effect. | [
"In",
"addtion",
"to",
":",
"meth",
":",
"nfc",
".",
"tag",
".",
"tt1",
".",
"Type1Tag",
".",
"protect",
"this",
"method",
"tries",
"to",
"set",
"the",
"lock",
"bits",
"to",
"irreversibly",
"protect",
"the",
"tag",
"memory",
".",
"However",
"it",
"appears",
"that",
"tags",
"sold",
"have",
"the",
"lock",
"bytes",
"write",
"protected",
"so",
"this",
"additional",
"effort",
"most",
"likely",
"doesn",
"t",
"have",
"any",
"effect",
"."
]
| python | train |
NASA-AMMOS/AIT-Core | ait/core/val.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/val.py#L514-L520 | def validate(self, ymldata=None, messages=None):
"""Validates the Telemetry Dictionary definitions"""
schema_val = self.schema_val(messages)
if len(messages) == 0:
content_val = self.content_val(ymldata, messages)
return schema_val and content_val | [
"def",
"validate",
"(",
"self",
",",
"ymldata",
"=",
"None",
",",
"messages",
"=",
"None",
")",
":",
"schema_val",
"=",
"self",
".",
"schema_val",
"(",
"messages",
")",
"if",
"len",
"(",
"messages",
")",
"==",
"0",
":",
"content_val",
"=",
"self",
".",
"content_val",
"(",
"ymldata",
",",
"messages",
")",
"return",
"schema_val",
"and",
"content_val"
]
| Validates the Telemetry Dictionary definitions | [
"Validates",
"the",
"Telemetry",
"Dictionary",
"definitions"
]
| python | train |
apacha/OMR-Datasets | omrdatasettools/image_generators/HomusImageGenerator.py | https://github.com/apacha/OMR-Datasets/blob/d0a22a03ae35caeef211729efa340e1ec0e01ea5/omrdatasettools/image_generators/HomusImageGenerator.py#L13-L105 | def create_images(raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int],
canvas_width: int = None,
canvas_height: int = None,
staff_line_spacing: int = 14,
staff_line_vertical_offsets: List[int] = None,
random_position_on_canvas: bool = False) -> dict:
"""
Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol.
"""
all_symbol_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.txt'))]
staff_line_multiplier = 1
if staff_line_vertical_offsets is not None and staff_line_vertical_offsets:
staff_line_multiplier = len(staff_line_vertical_offsets)
total_number_of_symbols = len(all_symbol_files) * len(stroke_thicknesses) * staff_line_multiplier
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(all_symbol_files), len(stroke_thicknesses), stroke_thicknesses)
if staff_line_vertical_offsets is not None:
output += " and with staff-lines with {0} different offsets from the top ({1})".format(
staff_line_multiplier, staff_line_vertical_offsets)
if canvas_width is not None and canvas_height is not None:
if random_position_on_canvas is False:
output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
else:
output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)".format(canvas_width,
canvas_height)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
bounding_boxes = dict()
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25)
for symbol_file in all_symbol_files:
with open(symbol_file) as file:
content = file.read()
symbol = HomusSymbol.initialize_from_string(content)
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = os.path.splitext(os.path.basename(symbol_file))[0]
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
if canvas_width is None and canvas_height is None:
symbol.draw_into_bitmap(export_path, stroke_thickness, margin=2)
else:
symbol.draw_onto_canvas(export_path, stroke_thickness, 0, canvas_width,
canvas_height, staff_line_spacing, staff_line_vertical_offsets,
bounding_boxes, random_position_on_canvas)
progress_bar.update(1 * staff_line_multiplier)
progress_bar.close()
return bounding_boxes | [
"def",
"create_images",
"(",
"raw_data_directory",
":",
"str",
",",
"destination_directory",
":",
"str",
",",
"stroke_thicknesses",
":",
"List",
"[",
"int",
"]",
",",
"canvas_width",
":",
"int",
"=",
"None",
",",
"canvas_height",
":",
"int",
"=",
"None",
",",
"staff_line_spacing",
":",
"int",
"=",
"14",
",",
"staff_line_vertical_offsets",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"random_position_on_canvas",
":",
"bool",
"=",
"False",
")",
"->",
"dict",
":",
"all_symbol_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"raw_data_directory",
")",
"for",
"y",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
"[",
"0",
"]",
",",
"'*.txt'",
")",
")",
"]",
"staff_line_multiplier",
"=",
"1",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
"and",
"staff_line_vertical_offsets",
":",
"staff_line_multiplier",
"=",
"len",
"(",
"staff_line_vertical_offsets",
")",
"total_number_of_symbols",
"=",
"len",
"(",
"all_symbol_files",
")",
"*",
"len",
"(",
"stroke_thicknesses",
")",
"*",
"staff_line_multiplier",
"output",
"=",
"\"Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})\"",
".",
"format",
"(",
"total_number_of_symbols",
",",
"len",
"(",
"all_symbol_files",
")",
",",
"len",
"(",
"stroke_thicknesses",
")",
",",
"stroke_thicknesses",
")",
"if",
"staff_line_vertical_offsets",
"is",
"not",
"None",
":",
"output",
"+=",
"\" and with staff-lines with {0} different offsets from the top ({1})\"",
".",
"format",
"(",
"staff_line_multiplier",
",",
"staff_line_vertical_offsets",
")",
"if",
"canvas_width",
"is",
"not",
"None",
"and",
"canvas_height",
"is",
"not",
"None",
":",
"if",
"random_position_on_canvas",
"is",
"False",
":",
"output",
"+=",
"\"\\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)\"",
".",
"format",
"(",
"canvas_width",
",",
"canvas_height",
")",
"else",
":",
"output",
"+=",
"\"\\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)\"",
".",
"format",
"(",
"canvas_width",
",",
"canvas_height",
")",
"print",
"(",
"output",
")",
"print",
"(",
"\"In directory {0}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"destination_directory",
")",
")",
",",
"flush",
"=",
"True",
")",
"bounding_boxes",
"=",
"dict",
"(",
")",
"progress_bar",
"=",
"tqdm",
"(",
"total",
"=",
"total_number_of_symbols",
",",
"mininterval",
"=",
"0.25",
")",
"for",
"symbol_file",
"in",
"all_symbol_files",
":",
"with",
"open",
"(",
"symbol_file",
")",
"as",
"file",
":",
"content",
"=",
"file",
".",
"read",
"(",
")",
"symbol",
"=",
"HomusSymbol",
".",
"initialize_from_string",
"(",
"content",
")",
"target_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
")",
"os",
".",
"makedirs",
"(",
"target_directory",
",",
"exist_ok",
"=",
"True",
")",
"raw_file_name_without_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"symbol_file",
")",
")",
"[",
"0",
"]",
"for",
"stroke_thickness",
"in",
"stroke_thicknesses",
":",
"export_path",
"=",
"ExportPath",
"(",
"destination_directory",
",",
"symbol",
".",
"symbol_class",
",",
"raw_file_name_without_extension",
",",
"'png'",
",",
"stroke_thickness",
")",
"if",
"canvas_width",
"is",
"None",
"and",
"canvas_height",
"is",
"None",
":",
"symbol",
".",
"draw_into_bitmap",
"(",
"export_path",
",",
"stroke_thickness",
",",
"margin",
"=",
"2",
")",
"else",
":",
"symbol",
".",
"draw_onto_canvas",
"(",
"export_path",
",",
"stroke_thickness",
",",
"0",
",",
"canvas_width",
",",
"canvas_height",
",",
"staff_line_spacing",
",",
"staff_line_vertical_offsets",
",",
"bounding_boxes",
",",
"random_position_on_canvas",
")",
"progress_bar",
".",
"update",
"(",
"1",
"*",
"staff_line_multiplier",
")",
"progress_bar",
".",
"close",
"(",
")",
"return",
"bounding_boxes"
]
| Creates a visual representation of the Homus Dataset by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
Each symbol will be drawn in the center of a fixed canvas, specified by width and height.
:param raw_data_directory: The directory, that contains the text-files that contain the textual representation
of the music symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
:param canvas_width: The width of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here.
:param canvas_height: The height of the canvas, that each image will be drawn upon, regardless of the original size of
the symbol. Larger symbols will be cropped. If the original size of the symbol should be used,
provided None here
:param staff_line_spacing: Number of pixels spacing between each of the five staff-lines
:param staff_line_vertical_offsets: List of vertical offsets, where the staff-lines will be superimposed over
the drawn images. If None is provided, no staff-lines will be superimposed.
If multiple values are provided, multiple versions of each symbol will be
generated with the appropriate staff-lines, e.g. 1-5_3_offset_70.png and
1-5_3_offset_77.png for two versions of the symbol 1-5 with stroke thickness
3 and staff-line offsets 70 and 77 pixels from the top.
:param random_position_on_canvas: True, if the symbols should be randomly placed on the fixed canvas.
False, if the symbols should be centered in the fixed canvas.
Note that this flag only has an effect, if fixed canvas sizes are used.
:return: A dictionary that contains the file-names of all generated symbols and the respective bounding-boxes
of each symbol. | [
"Creates",
"a",
"visual",
"representation",
"of",
"the",
"Homus",
"Dataset",
"by",
"parsing",
"all",
"text",
"-",
"files",
"and",
"the",
"symbols",
"as",
"specified",
"by",
"the",
"parameters",
"by",
"drawing",
"lines",
"that",
"connect",
"the",
"points",
"from",
"each",
"stroke",
"of",
"each",
"symbol",
"."
]
| python | train |
wal-e/wal-e | wal_e/worker/pg/psql_worker.py | https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/psql_worker.py#L34-L71 | def psql_csv_run(sql_command, error_handler=None):
"""
Runs psql and returns a CSVReader object from the query
This CSVReader includes header names as the first record in all
situations. The output is fully buffered into Python.
"""
csv_query = ('COPY ({query}) TO STDOUT WITH CSV HEADER;'
.format(query=sql_command))
new_env = os.environ.copy()
new_env.setdefault('PGOPTIONS', '')
new_env["PGOPTIONS"] += ' --statement-timeout=0'
psql_proc = popen_nonblock([PSQL_BIN, '-d', 'postgres', '--no-password',
'--no-psqlrc', '-c', csv_query],
stdout=PIPE,
env=new_env)
stdout = psql_proc.communicate()[0].decode('utf-8')
if psql_proc.returncode != 0:
if error_handler is not None:
error_handler(psql_proc)
else:
assert error_handler is None
raise UserException(
'could not csv-execute a query successfully via psql',
'Query was "{query}".'.format(sql_command),
'You may have to set some libpq environment '
'variables if you are sure the server is running.')
# Previous code must raise any desired exceptions for non-zero
# exit codes
assert psql_proc.returncode == 0
# Fake enough iterator interface to get a CSV Reader object
# that works.
return csv.reader(iter(stdout.strip().split('\n'))) | [
"def",
"psql_csv_run",
"(",
"sql_command",
",",
"error_handler",
"=",
"None",
")",
":",
"csv_query",
"=",
"(",
"'COPY ({query}) TO STDOUT WITH CSV HEADER;'",
".",
"format",
"(",
"query",
"=",
"sql_command",
")",
")",
"new_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"new_env",
".",
"setdefault",
"(",
"'PGOPTIONS'",
",",
"''",
")",
"new_env",
"[",
"\"PGOPTIONS\"",
"]",
"+=",
"' --statement-timeout=0'",
"psql_proc",
"=",
"popen_nonblock",
"(",
"[",
"PSQL_BIN",
",",
"'-d'",
",",
"'postgres'",
",",
"'--no-password'",
",",
"'--no-psqlrc'",
",",
"'-c'",
",",
"csv_query",
"]",
",",
"stdout",
"=",
"PIPE",
",",
"env",
"=",
"new_env",
")",
"stdout",
"=",
"psql_proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"psql_proc",
".",
"returncode",
"!=",
"0",
":",
"if",
"error_handler",
"is",
"not",
"None",
":",
"error_handler",
"(",
"psql_proc",
")",
"else",
":",
"assert",
"error_handler",
"is",
"None",
"raise",
"UserException",
"(",
"'could not csv-execute a query successfully via psql'",
",",
"'Query was \"{query}\".'",
".",
"format",
"(",
"sql_command",
")",
",",
"'You may have to set some libpq environment '",
"'variables if you are sure the server is running.'",
")",
"# Previous code must raise any desired exceptions for non-zero",
"# exit codes",
"assert",
"psql_proc",
".",
"returncode",
"==",
"0",
"# Fake enough iterator interface to get a CSV Reader object",
"# that works.",
"return",
"csv",
".",
"reader",
"(",
"iter",
"(",
"stdout",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
")",
")"
]
| Runs psql and returns a CSVReader object from the query
This CSVReader includes header names as the first record in all
situations. The output is fully buffered into Python. | [
"Runs",
"psql",
"and",
"returns",
"a",
"CSVReader",
"object",
"from",
"the",
"query"
]
| python | train |
carpedm20/fbchat | fbchat/_client.py | https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L3189-L3216 | def onMessage(
self,
mid=None,
author_id=None,
message=None,
message_object=None,
thread_id=None,
thread_type=ThreadType.USER,
ts=None,
metadata=None,
msg=None,
):
"""
Called when the client is listening, and somebody sends a message
:param mid: The message ID
:param author_id: The ID of the author
:param message: (deprecated. Use `message_object.text` instead)
:param message_object: The message (As a `Message` object)
:param thread_id: Thread ID that the message was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the message was sent to. See :ref:`intro_threads`
:param ts: The timestamp of the message
:param metadata: Extra metadata about the message
:param msg: A full set of the data recieved
:type message_object: models.Message
:type thread_type: models.ThreadType
"""
log.info("{} from {} in {}".format(message_object, thread_id, thread_type.name)) | [
"def",
"onMessage",
"(",
"self",
",",
"mid",
"=",
"None",
",",
"author_id",
"=",
"None",
",",
"message",
"=",
"None",
",",
"message_object",
"=",
"None",
",",
"thread_id",
"=",
"None",
",",
"thread_type",
"=",
"ThreadType",
".",
"USER",
",",
"ts",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"msg",
"=",
"None",
",",
")",
":",
"log",
".",
"info",
"(",
"\"{} from {} in {}\"",
".",
"format",
"(",
"message_object",
",",
"thread_id",
",",
"thread_type",
".",
"name",
")",
")"
]
| Called when the client is listening, and somebody sends a message
:param mid: The message ID
:param author_id: The ID of the author
:param message: (deprecated. Use `message_object.text` instead)
:param message_object: The message (As a `Message` object)
:param thread_id: Thread ID that the message was sent to. See :ref:`intro_threads`
:param thread_type: Type of thread that the message was sent to. See :ref:`intro_threads`
:param ts: The timestamp of the message
:param metadata: Extra metadata about the message
:param msg: A full set of the data recieved
:type message_object: models.Message
:type thread_type: models.ThreadType | [
"Called",
"when",
"the",
"client",
"is",
"listening",
"and",
"somebody",
"sends",
"a",
"message"
]
| python | train |
wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/__init__.py | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L842-L861 | def _upgrade(self):
"""
Upgrade the serialized object if necessary.
Raises:
FutureVersionError: file was written by a future version of the
software.
"""
logging.debug("[FeedbackResultsSeries]._upgrade()")
version = Version.fromstring(self.version)
logging.debug('[FeedbackResultsSeries] version=%s, class_version=%s',
str(version), self.class_version)
if version > Version.fromstring(self.class_version):
logging.debug('[FeedbackResultsSeries] version>class_version')
raise FutureVersionError(Version.fromstring(self.class_version),
version)
elif version < Version.fromstring(self.class_version):
if version < Version(0, 1):
self.time = [None]*len(self.data)
self.version = str(Version(0, 1)) | [
"def",
"_upgrade",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"\"[FeedbackResultsSeries]._upgrade()\"",
")",
"version",
"=",
"Version",
".",
"fromstring",
"(",
"self",
".",
"version",
")",
"logging",
".",
"debug",
"(",
"'[FeedbackResultsSeries] version=%s, class_version=%s'",
",",
"str",
"(",
"version",
")",
",",
"self",
".",
"class_version",
")",
"if",
"version",
">",
"Version",
".",
"fromstring",
"(",
"self",
".",
"class_version",
")",
":",
"logging",
".",
"debug",
"(",
"'[FeedbackResultsSeries] version>class_version'",
")",
"raise",
"FutureVersionError",
"(",
"Version",
".",
"fromstring",
"(",
"self",
".",
"class_version",
")",
",",
"version",
")",
"elif",
"version",
"<",
"Version",
".",
"fromstring",
"(",
"self",
".",
"class_version",
")",
":",
"if",
"version",
"<",
"Version",
"(",
"0",
",",
"1",
")",
":",
"self",
".",
"time",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"self",
".",
"data",
")",
"self",
".",
"version",
"=",
"str",
"(",
"Version",
"(",
"0",
",",
"1",
")",
")"
]
| Upgrade the serialized object if necessary.
Raises:
FutureVersionError: file was written by a future version of the
software. | [
"Upgrade",
"the",
"serialized",
"object",
"if",
"necessary",
"."
]
| python | train |
MeaningCloud/meaningcloud-python | meaningcloud/Response.py | https://github.com/MeaningCloud/meaningcloud-python/blob/1dd76ecabeedd80c9bb14a1716d39657d645775f/meaningcloud/Response.py#L54-L66 | def getStatusMsg(self):
"""
Returns the message of the status or an empty string if it does not exist
:return:
Status message of the response
"""
if 'status' in self._response.keys():
if (self._response['status'] is not None) and ('msg' in self._response['status'].keys()) and (self._response['status']['msg'] is not None):
return self._response['status']['msg']
else:
return '' | [
"def",
"getStatusMsg",
"(",
"self",
")",
":",
"if",
"'status'",
"in",
"self",
".",
"_response",
".",
"keys",
"(",
")",
":",
"if",
"(",
"self",
".",
"_response",
"[",
"'status'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"'msg'",
"in",
"self",
".",
"_response",
"[",
"'status'",
"]",
".",
"keys",
"(",
")",
")",
"and",
"(",
"self",
".",
"_response",
"[",
"'status'",
"]",
"[",
"'msg'",
"]",
"is",
"not",
"None",
")",
":",
"return",
"self",
".",
"_response",
"[",
"'status'",
"]",
"[",
"'msg'",
"]",
"else",
":",
"return",
"''"
]
| Returns the message of the status or an empty string if it does not exist
:return:
Status message of the response | [
"Returns",
"the",
"message",
"of",
"the",
"status",
"or",
"an",
"empty",
"string",
"if",
"it",
"does",
"not",
"exist"
]
| python | train |
materialsproject/pymatgen | pymatgen/io/vasp/inputs.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L526-L532 | def write_file(self, filename, **kwargs):
"""
Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs)) | [
"def",
"write_file",
"(",
"self",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"zopen",
"(",
"filename",
",",
"\"wt\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"get_string",
"(",
"*",
"*",
"kwargs",
")",
")"
]
| Writes POSCAR to a file. The supported kwargs are the same as those for
the Poscar.get_string method and are passed through directly. | [
"Writes",
"POSCAR",
"to",
"a",
"file",
".",
"The",
"supported",
"kwargs",
"are",
"the",
"same",
"as",
"those",
"for",
"the",
"Poscar",
".",
"get_string",
"method",
"and",
"are",
"passed",
"through",
"directly",
"."
]
| python | train |
mitsei/dlkit | dlkit/json_/resource/objects.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/objects.py#L255-L269 | def set_avatar(self, asset_id):
"""Sets the avatar asset.
arg: asset_id (osid.id.Id): an asset ``Id``
raise: InvalidArgument - ``asset_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_avatar_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(asset_id):
raise errors.InvalidArgument()
self._my_map['avatarId'] = str(asset_id) | [
"def",
"set_avatar",
"(",
"self",
",",
"asset_id",
")",
":",
"# Implemented from template for osid.resource.ResourceForm.set_avatar_template",
"if",
"self",
".",
"get_avatar_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"errors",
".",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"_is_valid_id",
"(",
"asset_id",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
")",
"self",
".",
"_my_map",
"[",
"'avatarId'",
"]",
"=",
"str",
"(",
"asset_id",
")"
]
| Sets the avatar asset.
arg: asset_id (osid.id.Id): an asset ``Id``
raise: InvalidArgument - ``asset_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | [
"Sets",
"the",
"avatar",
"asset",
"."
]
| python | train |
google/grumpy | third_party/stdlib/traceback.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/traceback.py#L16-L25 | def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
if line:
_print(file, ' %s' % line.strip()) | [
"def",
"print_list",
"(",
"extracted_list",
",",
"file",
"=",
"None",
")",
":",
"if",
"file",
"is",
"None",
":",
"file",
"=",
"sys",
".",
"stderr",
"for",
"filename",
",",
"lineno",
",",
"name",
",",
"line",
"in",
"extracted_list",
":",
"_print",
"(",
"file",
",",
"' File \"%s\", line %d, in %s'",
"%",
"(",
"filename",
",",
"lineno",
",",
"name",
")",
")",
"if",
"line",
":",
"_print",
"(",
"file",
",",
"' %s'",
"%",
"line",
".",
"strip",
"(",
")",
")"
]
| Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file. | [
"Print",
"the",
"list",
"of",
"tuples",
"as",
"returned",
"by",
"extract_tb",
"()",
"or",
"extract_stack",
"()",
"as",
"a",
"formatted",
"stack",
"trace",
"to",
"the",
"given",
"file",
"."
]
| python | valid |
xzased/lvm2py | lvm2py/vg.py | https://github.com/xzased/lvm2py/blob/34ce69304531a474c2fe4a4009ca445a8c103cd6/lvm2py/vg.py#L137-L144 | def extent_count(self):
"""
Returns the volume group extent count.
"""
self.open()
count = lvm_vg_get_extent_count(self.handle)
self.close()
return count | [
"def",
"extent_count",
"(",
"self",
")",
":",
"self",
".",
"open",
"(",
")",
"count",
"=",
"lvm_vg_get_extent_count",
"(",
"self",
".",
"handle",
")",
"self",
".",
"close",
"(",
")",
"return",
"count"
]
| Returns the volume group extent count. | [
"Returns",
"the",
"volume",
"group",
"extent",
"count",
"."
]
| python | train |
tjkessler/PyGenetics | pygenetics/ga_core.py | https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/ga_core.py#L145-L155 | def ave_cost_fn_val(self):
'''Returns average cost function return value for all members'''
if len(self.__members) != 0:
if self.__num_processes > 1:
members = [m.get() for m in self.__members]
else:
members = self.__members
return sum(m.cost_fn_val for m in members) / len(members)
else:
return None | [
"def",
"ave_cost_fn_val",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"__members",
")",
"!=",
"0",
":",
"if",
"self",
".",
"__num_processes",
">",
"1",
":",
"members",
"=",
"[",
"m",
".",
"get",
"(",
")",
"for",
"m",
"in",
"self",
".",
"__members",
"]",
"else",
":",
"members",
"=",
"self",
".",
"__members",
"return",
"sum",
"(",
"m",
".",
"cost_fn_val",
"for",
"m",
"in",
"members",
")",
"/",
"len",
"(",
"members",
")",
"else",
":",
"return",
"None"
]
| Returns average cost function return value for all members | [
"Returns",
"average",
"cost",
"function",
"return",
"value",
"for",
"all",
"members"
]
| python | test |
Bogdanp/dramatiq | dramatiq/results/backend.py | https://github.com/Bogdanp/dramatiq/blob/a8cc2728478e794952a5a50c3fb19ec455fe91b6/dramatiq/results/backend.py#L136-L143 | def _store(self, message_key: str, result: Result, ttl: int) -> None: # pragma: no cover
"""Store a result in the backend. Subclasses may implement
this method if they want to use the default implementation of
set_result.
"""
raise NotImplementedError("%(classname)r does not implement _store()" % {
"classname": type(self).__name__,
}) | [
"def",
"_store",
"(",
"self",
",",
"message_key",
":",
"str",
",",
"result",
":",
"Result",
",",
"ttl",
":",
"int",
")",
"->",
"None",
":",
"# pragma: no cover",
"raise",
"NotImplementedError",
"(",
"\"%(classname)r does not implement _store()\"",
"%",
"{",
"\"classname\"",
":",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"}",
")"
]
| Store a result in the backend. Subclasses may implement
this method if they want to use the default implementation of
set_result. | [
"Store",
"a",
"result",
"in",
"the",
"backend",
".",
"Subclasses",
"may",
"implement",
"this",
"method",
"if",
"they",
"want",
"to",
"use",
"the",
"default",
"implementation",
"of",
"set_result",
"."
]
| python | train |
twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_cumulative_statistics.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_cumulative_statistics.py#L93-L107 | def get_instance(self, payload):
"""
Build an instance of TaskQueueCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
"""
return TaskQueueCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['task_queue_sid'],
) | [
"def",
"get_instance",
"(",
"self",
",",
"payload",
")",
":",
"return",
"TaskQueueCumulativeStatisticsInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"workspace_sid",
"=",
"self",
".",
"_solution",
"[",
"'workspace_sid'",
"]",
",",
"task_queue_sid",
"=",
"self",
".",
"_solution",
"[",
"'task_queue_sid'",
"]",
",",
")"
]
| Build an instance of TaskQueueCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance | [
"Build",
"an",
"instance",
"of",
"TaskQueueCumulativeStatisticsInstance"
]
| python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py#L2667-L2681 | def confd_state_internal_callpoints_authorization_callbacks_registration_type_range_path(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
callpoints = ET.SubElement(internal, "callpoints")
authorization_callbacks = ET.SubElement(callpoints, "authorization-callbacks")
registration_type = ET.SubElement(authorization_callbacks, "registration-type")
range = ET.SubElement(registration_type, "range")
path = ET.SubElement(range, "path")
path.text = kwargs.pop('path')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"confd_state_internal_callpoints_authorization_callbacks_registration_type_range_path",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"confd_state",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"confd-state\"",
",",
"xmlns",
"=",
"\"http://tail-f.com/yang/confd-monitoring\"",
")",
"internal",
"=",
"ET",
".",
"SubElement",
"(",
"confd_state",
",",
"\"internal\"",
")",
"callpoints",
"=",
"ET",
".",
"SubElement",
"(",
"internal",
",",
"\"callpoints\"",
")",
"authorization_callbacks",
"=",
"ET",
".",
"SubElement",
"(",
"callpoints",
",",
"\"authorization-callbacks\"",
")",
"registration_type",
"=",
"ET",
".",
"SubElement",
"(",
"authorization_callbacks",
",",
"\"registration-type\"",
")",
"range",
"=",
"ET",
".",
"SubElement",
"(",
"registration_type",
",",
"\"range\"",
")",
"path",
"=",
"ET",
".",
"SubElement",
"(",
"range",
",",
"\"path\"",
")",
"path",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'path'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| Auto Generated Code | [
"Auto",
"Generated",
"Code"
]
| python | train |
DataBiosphere/toil | src/toil/utils/toilStatus.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/utils/toilStatus.py#L239-L254 | def fetchUserJobs(self, jobs):
"""
Takes a user input array of jobs, verifies that they are in the jobStore
and returns the array of jobsToReport.
:param list jobs: A list of jobs to be verified.
:returns jobsToReport: A list of jobs which are verified to be in the jobStore.
"""
jobsToReport = []
for jobID in jobs:
try:
jobsToReport.append(self.jobStore.load(jobID))
except JobException:
print('The job %s could not be found.' % jobID, file=sys.stderr)
raise
return jobsToReport | [
"def",
"fetchUserJobs",
"(",
"self",
",",
"jobs",
")",
":",
"jobsToReport",
"=",
"[",
"]",
"for",
"jobID",
"in",
"jobs",
":",
"try",
":",
"jobsToReport",
".",
"append",
"(",
"self",
".",
"jobStore",
".",
"load",
"(",
"jobID",
")",
")",
"except",
"JobException",
":",
"print",
"(",
"'The job %s could not be found.'",
"%",
"jobID",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"raise",
"return",
"jobsToReport"
]
| Takes a user input array of jobs, verifies that they are in the jobStore
and returns the array of jobsToReport.
:param list jobs: A list of jobs to be verified.
:returns jobsToReport: A list of jobs which are verified to be in the jobStore. | [
"Takes",
"a",
"user",
"input",
"array",
"of",
"jobs",
"verifies",
"that",
"they",
"are",
"in",
"the",
"jobStore",
"and",
"returns",
"the",
"array",
"of",
"jobsToReport",
"."
]
| python | train |
guaix-ucm/numina | numina/modeling/gaussbox.py | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/modeling/gaussbox.py#L32-L47 | def gauss_box_model_deriv(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5):
"""Derivative of the integral of a Gaussian profile."""
z = (x - mean) / stddev
z2 = z + hpix / stddev
z1 = z - hpix / stddev
da = norm.cdf(z2) - norm.cdf(z1)
fp2 = norm_pdf_t(z2)
fp1 = norm_pdf_t(z1)
dl = -amplitude / stddev * (fp2 - fp1)
ds = -amplitude / stddev * (fp2 * z2 - fp1 * z1)
dd = amplitude / stddev * (fp2 + fp1)
return da, dl, ds, dd | [
"def",
"gauss_box_model_deriv",
"(",
"x",
",",
"amplitude",
"=",
"1.0",
",",
"mean",
"=",
"0.0",
",",
"stddev",
"=",
"1.0",
",",
"hpix",
"=",
"0.5",
")",
":",
"z",
"=",
"(",
"x",
"-",
"mean",
")",
"/",
"stddev",
"z2",
"=",
"z",
"+",
"hpix",
"/",
"stddev",
"z1",
"=",
"z",
"-",
"hpix",
"/",
"stddev",
"da",
"=",
"norm",
".",
"cdf",
"(",
"z2",
")",
"-",
"norm",
".",
"cdf",
"(",
"z1",
")",
"fp2",
"=",
"norm_pdf_t",
"(",
"z2",
")",
"fp1",
"=",
"norm_pdf_t",
"(",
"z1",
")",
"dl",
"=",
"-",
"amplitude",
"/",
"stddev",
"*",
"(",
"fp2",
"-",
"fp1",
")",
"ds",
"=",
"-",
"amplitude",
"/",
"stddev",
"*",
"(",
"fp2",
"*",
"z2",
"-",
"fp1",
"*",
"z1",
")",
"dd",
"=",
"amplitude",
"/",
"stddev",
"*",
"(",
"fp2",
"+",
"fp1",
")",
"return",
"da",
",",
"dl",
",",
"ds",
",",
"dd"
]
| Derivative of the integral of a Gaussian profile. | [
"Derivative",
"of",
"the",
"integral",
"of",
"a",
"Gaussian",
"profile",
"."
]
| python | train |
VonStruddle/PyHunter | pyhunter/pyhunter.py | https://github.com/VonStruddle/PyHunter/blob/e14882d22527102515458cddeb8e0aa1c02da549/pyhunter/pyhunter.py#L255-L309 | def create_lead(self, first_name, last_name, email=None, position=None,
company=None, company_industry=None, company_size=None,
confidence_score=None, website=None, country_code=None,
postal_code=None, source=None, linkedin_url=None,
phone_number=None, twitter=None, leads_list_id=None):
"""
Create a lead on your account.
:param first_name: The first name of the lead to create. Must be
defined.
:param last_name: The last name of the lead to create. Must be defined.
:param email: The email of the lead to create.
:param position: The professional position of the lead to create.
:param company: The company of the lead to create.
:param company_industry: The type of industry of the company where the
lead works.
:param company_size: The size of the company where the lead works.
:param confidence_score: The confidence score of the lead's email.
:param website: The website of the lead's company.
:param country_code: The country code of the lead's company.
:param postal_code: The postal code of the lead's company.
:param source: The source of the lead's email.
:param linkedin_url: The URL of the lead's LinkedIn profile.
:param phone_number: The phone number of the lead to create.
:param twitter: The lead's Twitter account.
:param leads_list_id: The id of the leads list where to save the new
lead.
:return: The newly created lead as a dict.
"""
args = locals()
payload = dict((key, value) for key, value in args.items() if value
is not None)
payload.pop('self')
params = self.base_params
endpoint = self.base_endpoint.format('leads')
return self._query_hunter(endpoint, params, 'post', payload) | [
"def",
"create_lead",
"(",
"self",
",",
"first_name",
",",
"last_name",
",",
"email",
"=",
"None",
",",
"position",
"=",
"None",
",",
"company",
"=",
"None",
",",
"company_industry",
"=",
"None",
",",
"company_size",
"=",
"None",
",",
"confidence_score",
"=",
"None",
",",
"website",
"=",
"None",
",",
"country_code",
"=",
"None",
",",
"postal_code",
"=",
"None",
",",
"source",
"=",
"None",
",",
"linkedin_url",
"=",
"None",
",",
"phone_number",
"=",
"None",
",",
"twitter",
"=",
"None",
",",
"leads_list_id",
"=",
"None",
")",
":",
"args",
"=",
"locals",
"(",
")",
"payload",
"=",
"dict",
"(",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"args",
".",
"items",
"(",
")",
"if",
"value",
"is",
"not",
"None",
")",
"payload",
".",
"pop",
"(",
"'self'",
")",
"params",
"=",
"self",
".",
"base_params",
"endpoint",
"=",
"self",
".",
"base_endpoint",
".",
"format",
"(",
"'leads'",
")",
"return",
"self",
".",
"_query_hunter",
"(",
"endpoint",
",",
"params",
",",
"'post'",
",",
"payload",
")"
]
| Create a lead on your account.
:param first_name: The first name of the lead to create. Must be
defined.
:param last_name: The last name of the lead to create. Must be defined.
:param email: The email of the lead to create.
:param position: The professional position of the lead to create.
:param company: The company of the lead to create.
:param company_industry: The type of industry of the company where the
lead works.
:param company_size: The size of the company where the lead works.
:param confidence_score: The confidence score of the lead's email.
:param website: The website of the lead's company.
:param country_code: The country code of the lead's company.
:param postal_code: The postal code of the lead's company.
:param source: The source of the lead's email.
:param linkedin_url: The URL of the lead's LinkedIn profile.
:param phone_number: The phone number of the lead to create.
:param twitter: The lead's Twitter account.
:param leads_list_id: The id of the leads list where to save the new
lead.
:return: The newly created lead as a dict. | [
"Create",
"a",
"lead",
"on",
"your",
"account",
"."
]
| python | train |
klmitch/turnstile | turnstile/remote.py | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/remote.py#L171-L210 | def remote(func):
"""
Decorator to mark a function as invoking a remote procedure call.
When invoked in server mode, the function will be called; when
invoked in client mode, an RPC will be initiated.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.mode == 'server':
# In server mode, call the function
return func(self, *args, **kwargs)
# Make sure we're connected
if not self.conn:
self.connect()
# Call the remote function
self.conn.send('CALL', func.__name__, args, kwargs)
# Receive the response
cmd, payload = self.conn.recv()
if cmd == 'ERR':
self.close()
raise Exception("Catastrophic error from server: %s" %
payload[0])
elif cmd == 'EXC':
exc_type = utils.find_entrypoint(None, payload[0])
raise exc_type(payload[1])
elif cmd != 'RES':
self.close()
raise Exception("Invalid command response from server: %s" % cmd)
return payload[0]
# Mark it a callable
wrapper._remote = True
# Return the wrapped function
return wrapper | [
"def",
"remote",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'server'",
":",
"# In server mode, call the function",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Make sure we're connected",
"if",
"not",
"self",
".",
"conn",
":",
"self",
".",
"connect",
"(",
")",
"# Call the remote function",
"self",
".",
"conn",
".",
"send",
"(",
"'CALL'",
",",
"func",
".",
"__name__",
",",
"args",
",",
"kwargs",
")",
"# Receive the response",
"cmd",
",",
"payload",
"=",
"self",
".",
"conn",
".",
"recv",
"(",
")",
"if",
"cmd",
"==",
"'ERR'",
":",
"self",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"\"Catastrophic error from server: %s\"",
"%",
"payload",
"[",
"0",
"]",
")",
"elif",
"cmd",
"==",
"'EXC'",
":",
"exc_type",
"=",
"utils",
".",
"find_entrypoint",
"(",
"None",
",",
"payload",
"[",
"0",
"]",
")",
"raise",
"exc_type",
"(",
"payload",
"[",
"1",
"]",
")",
"elif",
"cmd",
"!=",
"'RES'",
":",
"self",
".",
"close",
"(",
")",
"raise",
"Exception",
"(",
"\"Invalid command response from server: %s\"",
"%",
"cmd",
")",
"return",
"payload",
"[",
"0",
"]",
"# Mark it a callable",
"wrapper",
".",
"_remote",
"=",
"True",
"# Return the wrapped function",
"return",
"wrapper"
]
| Decorator to mark a function as invoking a remote procedure call.
When invoked in server mode, the function will be called; when
invoked in client mode, an RPC will be initiated. | [
"Decorator",
"to",
"mark",
"a",
"function",
"as",
"invoking",
"a",
"remote",
"procedure",
"call",
".",
"When",
"invoked",
"in",
"server",
"mode",
"the",
"function",
"will",
"be",
"called",
";",
"when",
"invoked",
"in",
"client",
"mode",
"an",
"RPC",
"will",
"be",
"initiated",
"."
]
| python | train |
riga/tfdeploy | tfdeploy.py | https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1840-L1845 | def Mean(a, axis, keep_dims):
"""
Mean reduction op.
"""
return np.mean(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis),
keepdims=keep_dims), | [
"def",
"Mean",
"(",
"a",
",",
"axis",
",",
"keep_dims",
")",
":",
"return",
"np",
".",
"mean",
"(",
"a",
",",
"axis",
"=",
"axis",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
"else",
"tuple",
"(",
"axis",
")",
",",
"keepdims",
"=",
"keep_dims",
")",
","
]
| Mean reduction op. | [
"Mean",
"reduction",
"op",
"."
]
| python | train |
fastai/fastai | fastai/vision/image.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L175-L178 | def coord(self, func:CoordFunc, *args, **kwargs)->'Image':
"Equivalent to `image.flow = func(image.flow, image.size)`."
self.flow = func(self.flow, *args, **kwargs)
return self | [
"def",
"coord",
"(",
"self",
",",
"func",
":",
"CoordFunc",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"'Image'",
":",
"self",
".",
"flow",
"=",
"func",
"(",
"self",
".",
"flow",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"self"
]
| Equivalent to `image.flow = func(image.flow, image.size)`. | [
"Equivalent",
"to",
"image",
".",
"flow",
"=",
"func",
"(",
"image",
".",
"flow",
"image",
".",
"size",
")",
"."
]
| python | train |
Shoobx/xmldiff | xmldiff/_diff_match_patch_py3.py | https://github.com/Shoobx/xmldiff/blob/ec7835bce9ba69ff4ce03ab6c11397183b6f8411/xmldiff/_diff_match_patch_py3.py#L1242-L1346 | def match_bitap(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
"""Compute and return the score for a match with e errors and x location.
Accesses loc and pattern through being a closure.
Args:
e: Number of errors in match.
x: Location of match.
Returns:
Overall score for match (0.0 = good, 1.0 = bad).
"""
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in range(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) // 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = [0] * (finish + 2)
rd[finish + 1] = (1 << d) - 1
for j in range(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc | [
"def",
"match_bitap",
"(",
"self",
",",
"text",
",",
"pattern",
",",
"loc",
")",
":",
"# Python doesn't have a maxint limit, so ignore this check.",
"#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:",
"# raise ValueError(\"Pattern too long for this application.\")",
"# Initialise the alphabet.",
"s",
"=",
"self",
".",
"match_alphabet",
"(",
"pattern",
")",
"def",
"match_bitapScore",
"(",
"e",
",",
"x",
")",
":",
"\"\"\"Compute and return the score for a match with e errors and x location.\n Accesses loc and pattern through being a closure.\n\n Args:\n e: Number of errors in match.\n x: Location of match.\n\n Returns:\n Overall score for match (0.0 = good, 1.0 = bad).\n \"\"\"",
"accuracy",
"=",
"float",
"(",
"e",
")",
"/",
"len",
"(",
"pattern",
")",
"proximity",
"=",
"abs",
"(",
"loc",
"-",
"x",
")",
"if",
"not",
"self",
".",
"Match_Distance",
":",
"# Dodge divide by zero error.",
"return",
"proximity",
"and",
"1.0",
"or",
"accuracy",
"return",
"accuracy",
"+",
"(",
"proximity",
"/",
"float",
"(",
"self",
".",
"Match_Distance",
")",
")",
"# Highest score beyond which we give up.",
"score_threshold",
"=",
"self",
".",
"Match_Threshold",
"# Is there a nearby exact match? (speedup)",
"best_loc",
"=",
"text",
".",
"find",
"(",
"pattern",
",",
"loc",
")",
"if",
"best_loc",
"!=",
"-",
"1",
":",
"score_threshold",
"=",
"min",
"(",
"match_bitapScore",
"(",
"0",
",",
"best_loc",
")",
",",
"score_threshold",
")",
"# What about in the other direction? (speedup)",
"best_loc",
"=",
"text",
".",
"rfind",
"(",
"pattern",
",",
"loc",
"+",
"len",
"(",
"pattern",
")",
")",
"if",
"best_loc",
"!=",
"-",
"1",
":",
"score_threshold",
"=",
"min",
"(",
"match_bitapScore",
"(",
"0",
",",
"best_loc",
")",
",",
"score_threshold",
")",
"# Initialise the bit arrays.",
"matchmask",
"=",
"1",
"<<",
"(",
"len",
"(",
"pattern",
")",
"-",
"1",
")",
"best_loc",
"=",
"-",
"1",
"bin_max",
"=",
"len",
"(",
"pattern",
")",
"+",
"len",
"(",
"text",
")",
"# Empty initialization added to appease pychecker.",
"last_rd",
"=",
"None",
"for",
"d",
"in",
"range",
"(",
"len",
"(",
"pattern",
")",
")",
":",
"# Scan for the best match each iteration allows for one more error.",
"# Run a binary search to determine how far from 'loc' we can stray at",
"# this error level.",
"bin_min",
"=",
"0",
"bin_mid",
"=",
"bin_max",
"while",
"bin_min",
"<",
"bin_mid",
":",
"if",
"match_bitapScore",
"(",
"d",
",",
"loc",
"+",
"bin_mid",
")",
"<=",
"score_threshold",
":",
"bin_min",
"=",
"bin_mid",
"else",
":",
"bin_max",
"=",
"bin_mid",
"bin_mid",
"=",
"(",
"bin_max",
"-",
"bin_min",
")",
"//",
"2",
"+",
"bin_min",
"# Use the result from this iteration as the maximum for the next.",
"bin_max",
"=",
"bin_mid",
"start",
"=",
"max",
"(",
"1",
",",
"loc",
"-",
"bin_mid",
"+",
"1",
")",
"finish",
"=",
"min",
"(",
"loc",
"+",
"bin_mid",
",",
"len",
"(",
"text",
")",
")",
"+",
"len",
"(",
"pattern",
")",
"rd",
"=",
"[",
"0",
"]",
"*",
"(",
"finish",
"+",
"2",
")",
"rd",
"[",
"finish",
"+",
"1",
"]",
"=",
"(",
"1",
"<<",
"d",
")",
"-",
"1",
"for",
"j",
"in",
"range",
"(",
"finish",
",",
"start",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"len",
"(",
"text",
")",
"<=",
"j",
"-",
"1",
":",
"# Out of range.",
"charMatch",
"=",
"0",
"else",
":",
"charMatch",
"=",
"s",
".",
"get",
"(",
"text",
"[",
"j",
"-",
"1",
"]",
",",
"0",
")",
"if",
"d",
"==",
"0",
":",
"# First pass: exact match.",
"rd",
"[",
"j",
"]",
"=",
"(",
"(",
"rd",
"[",
"j",
"+",
"1",
"]",
"<<",
"1",
")",
"|",
"1",
")",
"&",
"charMatch",
"else",
":",
"# Subsequent passes: fuzzy match.",
"rd",
"[",
"j",
"]",
"=",
"(",
"(",
"(",
"rd",
"[",
"j",
"+",
"1",
"]",
"<<",
"1",
")",
"|",
"1",
")",
"&",
"charMatch",
")",
"|",
"(",
"(",
"(",
"last_rd",
"[",
"j",
"+",
"1",
"]",
"|",
"last_rd",
"[",
"j",
"]",
")",
"<<",
"1",
")",
"|",
"1",
")",
"|",
"last_rd",
"[",
"j",
"+",
"1",
"]",
"if",
"rd",
"[",
"j",
"]",
"&",
"matchmask",
":",
"score",
"=",
"match_bitapScore",
"(",
"d",
",",
"j",
"-",
"1",
")",
"# This match will almost certainly be better than any existing match.",
"# But check anyway.",
"if",
"score",
"<=",
"score_threshold",
":",
"# Told you so.",
"score_threshold",
"=",
"score",
"best_loc",
"=",
"j",
"-",
"1",
"if",
"best_loc",
">",
"loc",
":",
"# When passing loc, don't exceed our current distance from loc.",
"start",
"=",
"max",
"(",
"1",
",",
"2",
"*",
"loc",
"-",
"best_loc",
")",
"else",
":",
"# Already passed loc, downhill from here on in.",
"break",
"# No hope for a (better) match at greater error levels.",
"if",
"match_bitapScore",
"(",
"d",
"+",
"1",
",",
"loc",
")",
">",
"score_threshold",
":",
"break",
"last_rd",
"=",
"rd",
"return",
"best_loc"
]
| Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1. | [
"Locate",
"the",
"best",
"instance",
"of",
"pattern",
"in",
"text",
"near",
"loc",
"using",
"the",
"Bitap",
"algorithm",
"."
]
| python | train |
noxdafox/clipspy | clips/classes.py | https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L204-L219 | def make_instance(self, command):
"""Create and initialize an instance of a user-defined class.
command must be a string in the form:
(<instance-name> of <class-name> <slot-override>*)
<slot-override> :== (<slot-name> <constant>*)
Python equivalent of the CLIPS make-instance command.
"""
ist = lib.EnvMakeInstance(self._env, command.encode())
if ist == ffi.NULL:
raise CLIPSError(self._env)
return Instance(self._env, ist) | [
"def",
"make_instance",
"(",
"self",
",",
"command",
")",
":",
"ist",
"=",
"lib",
".",
"EnvMakeInstance",
"(",
"self",
".",
"_env",
",",
"command",
".",
"encode",
"(",
")",
")",
"if",
"ist",
"==",
"ffi",
".",
"NULL",
":",
"raise",
"CLIPSError",
"(",
"self",
".",
"_env",
")",
"return",
"Instance",
"(",
"self",
".",
"_env",
",",
"ist",
")"
]
| Create and initialize an instance of a user-defined class.
command must be a string in the form:
(<instance-name> of <class-name> <slot-override>*)
<slot-override> :== (<slot-name> <constant>*)
Python equivalent of the CLIPS make-instance command. | [
"Create",
"and",
"initialize",
"an",
"instance",
"of",
"a",
"user",
"-",
"defined",
"class",
"."
]
| python | train |
wbond/asn1crypto | asn1crypto/core.py | https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L2620-L2634 | def _copy(self, other, copy_func):
"""
Copies the contents of another ParsableOctetString object to itself
:param object:
Another instance of the same class
:param copy_func:
An reference of copy.copy() or copy.deepcopy() to use when copying
lists, dicts and objects
"""
super(ParsableOctetString, self)._copy(other, copy_func)
self._bytes = other._bytes
self._parsed = copy_func(other._parsed) | [
"def",
"_copy",
"(",
"self",
",",
"other",
",",
"copy_func",
")",
":",
"super",
"(",
"ParsableOctetString",
",",
"self",
")",
".",
"_copy",
"(",
"other",
",",
"copy_func",
")",
"self",
".",
"_bytes",
"=",
"other",
".",
"_bytes",
"self",
".",
"_parsed",
"=",
"copy_func",
"(",
"other",
".",
"_parsed",
")"
]
| Copies the contents of another ParsableOctetString object to itself
:param object:
Another instance of the same class
:param copy_func:
An reference of copy.copy() or copy.deepcopy() to use when copying
lists, dicts and objects | [
"Copies",
"the",
"contents",
"of",
"another",
"ParsableOctetString",
"object",
"to",
"itself"
]
| python | train |
kevin-brown/drf-json-api | rest_framework_json_api/renderers.py | https://github.com/kevin-brown/drf-json-api/blob/664643bd02c0d92eadbd1f8c9d8507adf0538df6/rest_framework_json_api/renderers.py#L121-L169 | def wrap_field_error(self, data, renderer_context):
"""
Convert field error native data to the JSON API Error format
See the note about the JSON API Error format on `wrap_error`.
The native format for field errors is a dictionary where the keys are
field names (or 'non_field_errors' for additional errors) and the
values are a list of error strings:
{
"min": [
"min must be greater than 0.",
"min must be an even number."
],
"max": ["max must be a positive number."],
"non_field_errors": [
"Select either a range or an enumeration, not both."]
}
It is rendered into this JSON API error format:
{
"errors": [{
"status": "400",
"path": "/min",
"detail": "min must be greater than 0."
},{
"status": "400",
"path": "/min",
"detail": "min must be an even number."
},{
"status": "400",
"path": "/max",
"detail": "max must be a positive number."
},{
"status": "400",
"path": "/-",
"detail": "Select either a range or an enumeration, not both."
}]
}
"""
response = renderer_context.get("response", None)
status_code = response and response.status_code
if status_code != 400:
raise WrapperNotApplicable('Status code must be 400.')
return self.wrap_error(
data, renderer_context, keys_are_fields=True, issue_is_title=False) | [
"def",
"wrap_field_error",
"(",
"self",
",",
"data",
",",
"renderer_context",
")",
":",
"response",
"=",
"renderer_context",
".",
"get",
"(",
"\"response\"",
",",
"None",
")",
"status_code",
"=",
"response",
"and",
"response",
".",
"status_code",
"if",
"status_code",
"!=",
"400",
":",
"raise",
"WrapperNotApplicable",
"(",
"'Status code must be 400.'",
")",
"return",
"self",
".",
"wrap_error",
"(",
"data",
",",
"renderer_context",
",",
"keys_are_fields",
"=",
"True",
",",
"issue_is_title",
"=",
"False",
")"
]
| Convert field error native data to the JSON API Error format
See the note about the JSON API Error format on `wrap_error`.
The native format for field errors is a dictionary where the keys are
field names (or 'non_field_errors' for additional errors) and the
values are a list of error strings:
{
"min": [
"min must be greater than 0.",
"min must be an even number."
],
"max": ["max must be a positive number."],
"non_field_errors": [
"Select either a range or an enumeration, not both."]
}
It is rendered into this JSON API error format:
{
"errors": [{
"status": "400",
"path": "/min",
"detail": "min must be greater than 0."
},{
"status": "400",
"path": "/min",
"detail": "min must be an even number."
},{
"status": "400",
"path": "/max",
"detail": "max must be a positive number."
},{
"status": "400",
"path": "/-",
"detail": "Select either a range or an enumeration, not both."
}]
} | [
"Convert",
"field",
"error",
"native",
"data",
"to",
"the",
"JSON",
"API",
"Error",
"format"
]
| python | train |
manns/pyspread | pyspread/src/gui/_dialogs.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_dialogs.py#L580-L590 | def get_digest_keys(self):
"""Returns a list of the type choices"""
digest_keys = []
for col in xrange(self.GetNumberCols()):
digest_key = self.GetCellValue(self.has_header, col)
if digest_key == "":
digest_key = self.digest_types.keys()[0]
digest_keys.append(digest_key)
return digest_keys | [
"def",
"get_digest_keys",
"(",
"self",
")",
":",
"digest_keys",
"=",
"[",
"]",
"for",
"col",
"in",
"xrange",
"(",
"self",
".",
"GetNumberCols",
"(",
")",
")",
":",
"digest_key",
"=",
"self",
".",
"GetCellValue",
"(",
"self",
".",
"has_header",
",",
"col",
")",
"if",
"digest_key",
"==",
"\"\"",
":",
"digest_key",
"=",
"self",
".",
"digest_types",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"digest_keys",
".",
"append",
"(",
"digest_key",
")",
"return",
"digest_keys"
]
| Returns a list of the type choices | [
"Returns",
"a",
"list",
"of",
"the",
"type",
"choices"
]
| python | train |
materialsproject/pymatgen | pymatgen/io/abinit/abitimer.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abitimer.py#L63-L81 | def walk(cls, top=".", ext=".abo"):
"""
Scan directory tree starting from top, look for files with extension `ext` and
parse timing data.
Return: (parser, paths, okfiles)
where `parser` is the new object, `paths` is the list of files found and `okfiles`
is the list of files that have been parsed successfully.
(okfiles == paths) if all files have been parsed.
"""
paths = []
for root, dirs, files in os.walk(top):
for f in files:
if f.endswith(ext):
paths.append(os.path.join(root, f))
parser = cls()
okfiles = parser.parse(paths)
return parser, paths, okfiles | [
"def",
"walk",
"(",
"cls",
",",
"top",
"=",
"\".\"",
",",
"ext",
"=",
"\".abo\"",
")",
":",
"paths",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"top",
")",
":",
"for",
"f",
"in",
"files",
":",
"if",
"f",
".",
"endswith",
"(",
"ext",
")",
":",
"paths",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
")",
"parser",
"=",
"cls",
"(",
")",
"okfiles",
"=",
"parser",
".",
"parse",
"(",
"paths",
")",
"return",
"parser",
",",
"paths",
",",
"okfiles"
]
| Scan directory tree starting from top, look for files with extension `ext` and
parse timing data.
Return: (parser, paths, okfiles)
where `parser` is the new object, `paths` is the list of files found and `okfiles`
is the list of files that have been parsed successfully.
(okfiles == paths) if all files have been parsed. | [
"Scan",
"directory",
"tree",
"starting",
"from",
"top",
"look",
"for",
"files",
"with",
"extension",
"ext",
"and",
"parse",
"timing",
"data",
"."
]
| python | train |
saltstack/salt | salt/runners/smartos_vmadm.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/smartos_vmadm.py#L36-L133 | def _action(action='get', search=None, one=True, force=False):
'''
Multi action helper for start, stop, get, ...
'''
vms = {}
matched_vms = []
client = salt.client.get_local_client(__opts__['conf_file'])
## lookup vms
try:
vmadm_args = {}
vmadm_args['order'] = 'uuid,alias,hostname,state'
if '=' in search:
vmadm_args['search'] = search
for cn in client.cmd_iter('G@virtual:physical and G@os:smartos',
'vmadm.list', kwarg=vmadm_args,
tgt_type='compound'):
if not cn:
continue
node = next(six.iterkeys(cn))
if not isinstance(cn[node], dict) or \
'ret' not in cn[node] or \
not isinstance(cn[node]['ret'], dict):
continue
for vm in cn[node]['ret']:
vmcfg = cn[node]['ret'][vm]
vmcfg['node'] = node
vms[vm] = vmcfg
except SaltClientError as client_error:
pass
## check if we have vms
if not vms:
return {'Error': 'No vms found.'}
## simple search
if '=' not in search:
loop_pass = 0
while loop_pass < 3:
## each pass will try a different field
if loop_pass == 0:
field = 'uuid'
elif loop_pass == 1:
field = 'hostname'
else:
field = 'alias'
## loop vms and try to match
for vm in vms:
if field == 'uuid' and vm == search:
matched_vms.append(vm)
break # exit for on uuid match (max = 1)
elif field in vms[vm] and vms[vm][field] == search:
matched_vms.append(vm)
## exit on match(es) or try again
if matched_vms:
break
else:
loop_pass += 1
else:
for vm in vms:
matched_vms.append(vm)
## check if we have vms
if not matched_vms:
return {'Error': 'No vms matched.'}
## multiple allowed?
if one and len(matched_vms) > 1:
return {
'Error': 'Matched {0} vms, only one allowed!'.format(len(matched_vms)),
'Matches': matched_vms
}
## perform action
ret = {}
if action in ['start', 'stop', 'reboot', 'get']:
for vm in matched_vms:
vmadm_args = {
'key': 'uuid',
'vm': vm
}
try:
for vmadm_res in client.cmd_iter(vms[vm]['node'], 'vmadm.{0}'.format(action), kwarg=vmadm_args):
if not vmadm_res:
continue
if vms[vm]['node'] in vmadm_res:
ret[vm] = vmadm_res[vms[vm]['node']]['ret']
except SaltClientError as client_error:
ret[vm] = False
elif action in ['is_running']:
ret = True
for vm in matched_vms:
if vms[vm]['state'] != 'running':
ret = False
break
return ret | [
"def",
"_action",
"(",
"action",
"=",
"'get'",
",",
"search",
"=",
"None",
",",
"one",
"=",
"True",
",",
"force",
"=",
"False",
")",
":",
"vms",
"=",
"{",
"}",
"matched_vms",
"=",
"[",
"]",
"client",
"=",
"salt",
".",
"client",
".",
"get_local_client",
"(",
"__opts__",
"[",
"'conf_file'",
"]",
")",
"## lookup vms",
"try",
":",
"vmadm_args",
"=",
"{",
"}",
"vmadm_args",
"[",
"'order'",
"]",
"=",
"'uuid,alias,hostname,state'",
"if",
"'='",
"in",
"search",
":",
"vmadm_args",
"[",
"'search'",
"]",
"=",
"search",
"for",
"cn",
"in",
"client",
".",
"cmd_iter",
"(",
"'G@virtual:physical and G@os:smartos'",
",",
"'vmadm.list'",
",",
"kwarg",
"=",
"vmadm_args",
",",
"tgt_type",
"=",
"'compound'",
")",
":",
"if",
"not",
"cn",
":",
"continue",
"node",
"=",
"next",
"(",
"six",
".",
"iterkeys",
"(",
"cn",
")",
")",
"if",
"not",
"isinstance",
"(",
"cn",
"[",
"node",
"]",
",",
"dict",
")",
"or",
"'ret'",
"not",
"in",
"cn",
"[",
"node",
"]",
"or",
"not",
"isinstance",
"(",
"cn",
"[",
"node",
"]",
"[",
"'ret'",
"]",
",",
"dict",
")",
":",
"continue",
"for",
"vm",
"in",
"cn",
"[",
"node",
"]",
"[",
"'ret'",
"]",
":",
"vmcfg",
"=",
"cn",
"[",
"node",
"]",
"[",
"'ret'",
"]",
"[",
"vm",
"]",
"vmcfg",
"[",
"'node'",
"]",
"=",
"node",
"vms",
"[",
"vm",
"]",
"=",
"vmcfg",
"except",
"SaltClientError",
"as",
"client_error",
":",
"pass",
"## check if we have vms",
"if",
"not",
"vms",
":",
"return",
"{",
"'Error'",
":",
"'No vms found.'",
"}",
"## simple search",
"if",
"'='",
"not",
"in",
"search",
":",
"loop_pass",
"=",
"0",
"while",
"loop_pass",
"<",
"3",
":",
"## each pass will try a different field",
"if",
"loop_pass",
"==",
"0",
":",
"field",
"=",
"'uuid'",
"elif",
"loop_pass",
"==",
"1",
":",
"field",
"=",
"'hostname'",
"else",
":",
"field",
"=",
"'alias'",
"## loop vms and try to match",
"for",
"vm",
"in",
"vms",
":",
"if",
"field",
"==",
"'uuid'",
"and",
"vm",
"==",
"search",
":",
"matched_vms",
".",
"append",
"(",
"vm",
")",
"break",
"# exit for on uuid match (max = 1)",
"elif",
"field",
"in",
"vms",
"[",
"vm",
"]",
"and",
"vms",
"[",
"vm",
"]",
"[",
"field",
"]",
"==",
"search",
":",
"matched_vms",
".",
"append",
"(",
"vm",
")",
"## exit on match(es) or try again",
"if",
"matched_vms",
":",
"break",
"else",
":",
"loop_pass",
"+=",
"1",
"else",
":",
"for",
"vm",
"in",
"vms",
":",
"matched_vms",
".",
"append",
"(",
"vm",
")",
"## check if we have vms",
"if",
"not",
"matched_vms",
":",
"return",
"{",
"'Error'",
":",
"'No vms matched.'",
"}",
"## multiple allowed?",
"if",
"one",
"and",
"len",
"(",
"matched_vms",
")",
">",
"1",
":",
"return",
"{",
"'Error'",
":",
"'Matched {0} vms, only one allowed!'",
".",
"format",
"(",
"len",
"(",
"matched_vms",
")",
")",
",",
"'Matches'",
":",
"matched_vms",
"}",
"## perform action",
"ret",
"=",
"{",
"}",
"if",
"action",
"in",
"[",
"'start'",
",",
"'stop'",
",",
"'reboot'",
",",
"'get'",
"]",
":",
"for",
"vm",
"in",
"matched_vms",
":",
"vmadm_args",
"=",
"{",
"'key'",
":",
"'uuid'",
",",
"'vm'",
":",
"vm",
"}",
"try",
":",
"for",
"vmadm_res",
"in",
"client",
".",
"cmd_iter",
"(",
"vms",
"[",
"vm",
"]",
"[",
"'node'",
"]",
",",
"'vmadm.{0}'",
".",
"format",
"(",
"action",
")",
",",
"kwarg",
"=",
"vmadm_args",
")",
":",
"if",
"not",
"vmadm_res",
":",
"continue",
"if",
"vms",
"[",
"vm",
"]",
"[",
"'node'",
"]",
"in",
"vmadm_res",
":",
"ret",
"[",
"vm",
"]",
"=",
"vmadm_res",
"[",
"vms",
"[",
"vm",
"]",
"[",
"'node'",
"]",
"]",
"[",
"'ret'",
"]",
"except",
"SaltClientError",
"as",
"client_error",
":",
"ret",
"[",
"vm",
"]",
"=",
"False",
"elif",
"action",
"in",
"[",
"'is_running'",
"]",
":",
"ret",
"=",
"True",
"for",
"vm",
"in",
"matched_vms",
":",
"if",
"vms",
"[",
"vm",
"]",
"[",
"'state'",
"]",
"!=",
"'running'",
":",
"ret",
"=",
"False",
"break",
"return",
"ret"
]
| Multi action helper for start, stop, get, ... | [
"Multi",
"action",
"helper",
"for",
"start",
"stop",
"get",
"..."
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/models/transformer.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2677-L2694 | def transformer_wikitext103_l16k_memory_v0():
"""HParams for training languagemodel_wikitext103_l16k with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.max_length = 16384
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
return hparams | [
"def",
"transformer_wikitext103_l16k_memory_v0",
"(",
")",
":",
"hparams",
"=",
"transformer_wikitext103_l4k_memory_v0",
"(",
")",
"hparams",
".",
"max_length",
"=",
"16384",
"hparams",
".",
"split_targets_chunk_length",
"=",
"64",
"hparams",
".",
"split_targets_max_chunks",
"=",
"int",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
"# The hparams specify batch size *before* chunking, but we want to have a",
"# consistent 4K batch size *after* chunking to fully utilize the hardware.",
"target_tokens_per_batch",
"=",
"4096",
"hparams",
".",
"batch_size",
"=",
"int",
"(",
"target_tokens_per_batch",
"*",
"(",
"hparams",
".",
"max_length",
"/",
"hparams",
".",
"split_targets_chunk_length",
")",
")",
"hparams",
".",
"max_relative_position",
"=",
"2",
"*",
"hparams",
".",
"split_targets_chunk_length",
"return",
"hparams"
]
| HParams for training languagemodel_wikitext103_l16k with memory. | [
"HParams",
"for",
"training",
"languagemodel_wikitext103_l16k",
"with",
"memory",
"."
]
| python | train |
clalancette/pycdlib | pycdlib/rockridge.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L2798-L2816 | def get_file_mode(self):
# type: () -> int
'''
Get the POSIX file mode bits for this Rock Ridge entry.
Parameters:
None.
Returns:
The POSIX file mode bits for this Rock Ridge entry.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized')
if self.dr_entries.px_record is None:
if self.ce_entries.px_record is None:
raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file mode')
return self.ce_entries.px_record.posix_file_mode
return self.dr_entries.px_record.posix_file_mode | [
"def",
"get_file_mode",
"(",
"self",
")",
":",
"# type: () -> int",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalError",
"(",
"'Rock Ridge extension not yet initialized'",
")",
"if",
"self",
".",
"dr_entries",
".",
"px_record",
"is",
"None",
":",
"if",
"self",
".",
"ce_entries",
".",
"px_record",
"is",
"None",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidInput",
"(",
"'No Rock Ridge file mode'",
")",
"return",
"self",
".",
"ce_entries",
".",
"px_record",
".",
"posix_file_mode",
"return",
"self",
".",
"dr_entries",
".",
"px_record",
".",
"posix_file_mode"
]
| Get the POSIX file mode bits for this Rock Ridge entry.
Parameters:
None.
Returns:
The POSIX file mode bits for this Rock Ridge entry. | [
"Get",
"the",
"POSIX",
"file",
"mode",
"bits",
"for",
"this",
"Rock",
"Ridge",
"entry",
"."
]
| python | train |
PMEAL/OpenPNM | openpnm/algorithms/ReactiveTransport.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/ReactiveTransport.py#L200-L220 | def _update_physics(self):
"""r
Update physics using the current value of 'quantity'
Notes
-----
The algorithm directly writes the value of 'quantity' into the phase.
This method was implemented relaxing one of the OpenPNM rules of
algorithms not being able to write into phases.
"""
phase = self.project.phases()[self.settings['phase']]
physics = self.project.find_physics(phase=phase)
for item in self.settings['sources']:
# Regenerate models with new guess
quantity = self.settings['quantity']
# Put quantity on phase so physics finds it when regenerating
phase[quantity] = self[quantity]
# Regenerate models, on either phase or physics
phase.regenerate_models(propnames=item)
for phys in physics:
phys.regenerate_models(propnames=item) | [
"def",
"_update_physics",
"(",
"self",
")",
":",
"phase",
"=",
"self",
".",
"project",
".",
"phases",
"(",
")",
"[",
"self",
".",
"settings",
"[",
"'phase'",
"]",
"]",
"physics",
"=",
"self",
".",
"project",
".",
"find_physics",
"(",
"phase",
"=",
"phase",
")",
"for",
"item",
"in",
"self",
".",
"settings",
"[",
"'sources'",
"]",
":",
"# Regenerate models with new guess",
"quantity",
"=",
"self",
".",
"settings",
"[",
"'quantity'",
"]",
"# Put quantity on phase so physics finds it when regenerating",
"phase",
"[",
"quantity",
"]",
"=",
"self",
"[",
"quantity",
"]",
"# Regenerate models, on either phase or physics",
"phase",
".",
"regenerate_models",
"(",
"propnames",
"=",
"item",
")",
"for",
"phys",
"in",
"physics",
":",
"phys",
".",
"regenerate_models",
"(",
"propnames",
"=",
"item",
")"
]
| r
Update physics using the current value of 'quantity'
Notes
-----
The algorithm directly writes the value of 'quantity' into the phase.
This method was implemented relaxing one of the OpenPNM rules of
algorithms not being able to write into phases. | [
"r",
"Update",
"physics",
"using",
"the",
"current",
"value",
"of",
"quantity"
]
| python | train |
nathforge/pydentifier | src/pydentifier/__init__.py | https://github.com/nathforge/pydentifier/blob/b8d27076254c65cfd7893c1401e2a198abd6afb4/src/pydentifier/__init__.py#L11-L30 | def lower_underscore(string, prefix='', suffix=''):
"""
Generate an underscore-separated lower-case identifier, given English text,
a prefix, and an optional suffix.
Useful for function names and variable names.
`prefix` can be set to `''`, though be careful - without a prefix, the
function will throw `InvalidIdentifier` when your string starts with a
number.
Example:
>>> lower_underscore("This is an identifier", prefix='')
'this_is_an_identifier'
"""
return require_valid(append_underscore_if_keyword('_'.join(
word.lower()
for word in en.words(' '.join([prefix, string, suffix])))
)) | [
"def",
"lower_underscore",
"(",
"string",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
")",
":",
"return",
"require_valid",
"(",
"append_underscore_if_keyword",
"(",
"'_'",
".",
"join",
"(",
"word",
".",
"lower",
"(",
")",
"for",
"word",
"in",
"en",
".",
"words",
"(",
"' '",
".",
"join",
"(",
"[",
"prefix",
",",
"string",
",",
"suffix",
"]",
")",
")",
")",
")",
")"
]
| Generate an underscore-separated lower-case identifier, given English text,
a prefix, and an optional suffix.
Useful for function names and variable names.
`prefix` can be set to `''`, though be careful - without a prefix, the
function will throw `InvalidIdentifier` when your string starts with a
number.
Example:
>>> lower_underscore("This is an identifier", prefix='')
'this_is_an_identifier' | [
"Generate",
"an",
"underscore",
"-",
"separated",
"lower",
"-",
"case",
"identifier",
"given",
"English",
"text",
"a",
"prefix",
"and",
"an",
"optional",
"suffix",
"."
]
| python | train |
rackerlabs/simpl | simpl/git.py | https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/git.py#L369-L378 | def git_ls_tree(repo_dir, treeish='HEAD'):
"""Run git ls-tree."""
command = ['git', 'ls-tree', '-r', '--full-tree', treeish]
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
# <mode> <type> <object> <file>
# make a list of lists with clean elements of equal length
breakout = [k.split(None, 3) for k in output]
headers = ['mode', 'type', 'object', 'file']
return [dict(zip(headers, vals)) for vals in breakout] | [
"def",
"git_ls_tree",
"(",
"repo_dir",
",",
"treeish",
"=",
"'HEAD'",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'ls-tree'",
",",
"'-r'",
",",
"'--full-tree'",
",",
"treeish",
"]",
"raw",
"=",
"execute_git_command",
"(",
"command",
",",
"repo_dir",
"=",
"repo_dir",
")",
".",
"splitlines",
"(",
")",
"output",
"=",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"raw",
"if",
"l",
".",
"strip",
"(",
")",
"]",
"# <mode> <type> <object> <file>",
"# make a list of lists with clean elements of equal length",
"breakout",
"=",
"[",
"k",
".",
"split",
"(",
"None",
",",
"3",
")",
"for",
"k",
"in",
"output",
"]",
"headers",
"=",
"[",
"'mode'",
",",
"'type'",
",",
"'object'",
",",
"'file'",
"]",
"return",
"[",
"dict",
"(",
"zip",
"(",
"headers",
",",
"vals",
")",
")",
"for",
"vals",
"in",
"breakout",
"]"
]
| Run git ls-tree. | [
"Run",
"git",
"ls",
"-",
"tree",
"."
]
| python | train |
aio-libs/janus | janus/__init__.py | https://github.com/aio-libs/janus/blob/8dc80530db1144fbd1dba75d4a1c1a54bb520c21/janus/__init__.py#L478-L499 | def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
self._parent._check_closing()
with self._parent._all_tasks_done:
if self._parent._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._parent._unfinished_tasks -= 1
if self._parent._unfinished_tasks == 0:
self._parent._finished.set()
self._parent._all_tasks_done.notify_all() | [
"def",
"task_done",
"(",
"self",
")",
":",
"self",
".",
"_parent",
".",
"_check_closing",
"(",
")",
"with",
"self",
".",
"_parent",
".",
"_all_tasks_done",
":",
"if",
"self",
".",
"_parent",
".",
"_unfinished_tasks",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'task_done() called too many times'",
")",
"self",
".",
"_parent",
".",
"_unfinished_tasks",
"-=",
"1",
"if",
"self",
".",
"_parent",
".",
"_unfinished_tasks",
"==",
"0",
":",
"self",
".",
"_parent",
".",
"_finished",
".",
"set",
"(",
")",
"self",
".",
"_parent",
".",
"_all_tasks_done",
".",
"notify_all",
"(",
")"
]
| Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue. | [
"Indicate",
"that",
"a",
"formerly",
"enqueued",
"task",
"is",
"complete",
"."
]
| python | train |
cloudera/cm_api | python/src/cm_api/endpoints/clusters.py | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L152-L161 | def create_service(self, name, service_type):
"""
Create a service.
@param name: Service name
@param service_type: Service type
@return: An ApiService object
"""
return services.create_service(self._get_resource_root(), name,
service_type, self.name) | [
"def",
"create_service",
"(",
"self",
",",
"name",
",",
"service_type",
")",
":",
"return",
"services",
".",
"create_service",
"(",
"self",
".",
"_get_resource_root",
"(",
")",
",",
"name",
",",
"service_type",
",",
"self",
".",
"name",
")"
]
| Create a service.
@param name: Service name
@param service_type: Service type
@return: An ApiService object | [
"Create",
"a",
"service",
"."
]
| python | train |
toumorokoshi/miura | miura/__init__.py | https://github.com/toumorokoshi/miura/blob/f23e270a9507e5946798b1e897220c9fb1b8d5fa/miura/__init__.py#L74-L81 | def _create_stdout_logger():
""" create a logger to stdout """
log = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO) | [
"def",
"_create_stdout_logger",
"(",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"out_hdlr",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"out_hdlr",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'%(message)s'",
")",
")",
"out_hdlr",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"log",
".",
"addHandler",
"(",
"out_hdlr",
")",
"log",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")"
]
| create a logger to stdout | [
"create",
"a",
"logger",
"to",
"stdout"
]
| python | train |
aheadley/python-crunchyroll | crunchyroll/apis/scraper.py | https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/scraper.py#L34-L53 | def get_media_formats(self, media_id):
"""CR doesn't seem to provide the video_format and video_quality params
through any of the APIs so we have to scrape the video page
"""
url = (SCRAPER.API_URL + 'media-' + media_id).format(
protocol=SCRAPER.PROTOCOL_INSECURE)
format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN)
formats = {}
for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS):
resp = self._connector.get(url, params={param: '1'})
if not resp.ok:
continue
try:
match = format_pattern.search(resp.content)
except TypeError:
match = format_pattern.search(resp.text)
if match:
formats[format] = (int(match.group(1)), int(match.group(2)))
return formats | [
"def",
"get_media_formats",
"(",
"self",
",",
"media_id",
")",
":",
"url",
"=",
"(",
"SCRAPER",
".",
"API_URL",
"+",
"'media-'",
"+",
"media_id",
")",
".",
"format",
"(",
"protocol",
"=",
"SCRAPER",
".",
"PROTOCOL_INSECURE",
")",
"format_pattern",
"=",
"re",
".",
"compile",
"(",
"SCRAPER",
".",
"VIDEO",
".",
"FORMAT_PATTERN",
")",
"formats",
"=",
"{",
"}",
"for",
"format",
",",
"param",
"in",
"iteritems",
"(",
"SCRAPER",
".",
"VIDEO",
".",
"FORMAT_PARAMS",
")",
":",
"resp",
"=",
"self",
".",
"_connector",
".",
"get",
"(",
"url",
",",
"params",
"=",
"{",
"param",
":",
"'1'",
"}",
")",
"if",
"not",
"resp",
".",
"ok",
":",
"continue",
"try",
":",
"match",
"=",
"format_pattern",
".",
"search",
"(",
"resp",
".",
"content",
")",
"except",
"TypeError",
":",
"match",
"=",
"format_pattern",
".",
"search",
"(",
"resp",
".",
"text",
")",
"if",
"match",
":",
"formats",
"[",
"format",
"]",
"=",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
",",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"return",
"formats"
]
| CR doesn't seem to provide the video_format and video_quality params
through any of the APIs so we have to scrape the video page | [
"CR",
"doesn",
"t",
"seem",
"to",
"provide",
"the",
"video_format",
"and",
"video_quality",
"params",
"through",
"any",
"of",
"the",
"APIs",
"so",
"we",
"have",
"to",
"scrape",
"the",
"video",
"page"
]
| python | train |
tensorflow/tensor2tensor | tensor2tensor/models/research/glow_ops.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L1279-L1297 | def uniform_binning_correction(x, n_bits=8):
"""Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
"""
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minval=0.0, maxval=1.0/n_bins)
objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
return x, objective | [
"def",
"uniform_binning_correction",
"(",
"x",
",",
"n_bits",
"=",
"8",
")",
":",
"n_bins",
"=",
"2",
"**",
"n_bits",
"batch_size",
",",
"height",
",",
"width",
",",
"n_channels",
"=",
"common_layers",
".",
"shape_list",
"(",
"x",
")",
"hwc",
"=",
"float",
"(",
"height",
"*",
"width",
"*",
"n_channels",
")",
"x",
"=",
"x",
"+",
"tf",
".",
"random_uniform",
"(",
"shape",
"=",
"(",
"batch_size",
",",
"height",
",",
"width",
",",
"n_channels",
")",
",",
"minval",
"=",
"0.0",
",",
"maxval",
"=",
"1.0",
"/",
"n_bins",
")",
"objective",
"=",
"-",
"np",
".",
"log",
"(",
"n_bins",
")",
"*",
"hwc",
"*",
"tf",
".",
"ones",
"(",
"batch_size",
")",
"return",
"x",
",",
"objective"
]
| Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)). | [
"Replaces",
"x^i",
"with",
"q^i",
"(",
"x",
")",
"=",
"U",
"(",
"x",
"x",
"+",
"1",
".",
"0",
"/",
"256",
".",
"0",
")",
"."
]
| python | train |
IBMStreams/pypi.streamsx | streamsx/rest.py | https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest.py#L154-L161 | def _get_element_by_id(self, resource_name, eclass, id):
"""Get a single element matching an id"""
elements = self._get_elements(resource_name, eclass, id=id)
if not elements:
raise ValueError("No resource matching: {0}".format(id))
if len(elements) == 1:
return elements[0]
raise ValueError("Multiple resources matching: {0}".format(id)) | [
"def",
"_get_element_by_id",
"(",
"self",
",",
"resource_name",
",",
"eclass",
",",
"id",
")",
":",
"elements",
"=",
"self",
".",
"_get_elements",
"(",
"resource_name",
",",
"eclass",
",",
"id",
"=",
"id",
")",
"if",
"not",
"elements",
":",
"raise",
"ValueError",
"(",
"\"No resource matching: {0}\"",
".",
"format",
"(",
"id",
")",
")",
"if",
"len",
"(",
"elements",
")",
"==",
"1",
":",
"return",
"elements",
"[",
"0",
"]",
"raise",
"ValueError",
"(",
"\"Multiple resources matching: {0}\"",
".",
"format",
"(",
"id",
")",
")"
]
| Get a single element matching an id | [
"Get",
"a",
"single",
"element",
"matching",
"an",
"id"
]
| python | train |
bachiraoun/pysimplelog | SimpleLog.py | https://github.com/bachiraoun/pysimplelog/blob/2681ed5b1b8d7e66c3fff3ec3cca2b14ac571238/SimpleLog.py#L1141-L1179 | def force_log(self, logType, message, data=None, tback=None, stdout=True, file=True):
"""
Force logging a message of a certain logtype whether logtype level is allowed or not.
:Parameters:
#. logType (string): A defined logging type.
#. message (string): Any message to log.
#. tback (None, str, list): Stack traceback to print and/or write to
log file. In general, this should be traceback.extract_stack
#. stdout (boolean): Whether to force logging to standard output.
#. file (boolean): Whether to force logging to file.
"""
# log to stdout
log = self._format_message(logType=logType, message=message, data=data, tback=tback)
if stdout:
self.__log_to_stdout(self.__logTypeFormat[logType][0] + log + self.__logTypeFormat[logType][1] + "\n")
try:
self.__stdout.flush()
except:
pass
try:
os.fsync(self.__stdout.fileno())
except:
pass
if file:
# log to file
self.__log_to_file(log)
self.__log_to_file("\n")
try:
self.__logFileStream.flush()
except:
pass
try:
os.fsync(self.__logFileStream.fileno())
except:
pass
# set last logged message
self.__lastLogged[logType] = log
self.__lastLogged[-1] = log | [
"def",
"force_log",
"(",
"self",
",",
"logType",
",",
"message",
",",
"data",
"=",
"None",
",",
"tback",
"=",
"None",
",",
"stdout",
"=",
"True",
",",
"file",
"=",
"True",
")",
":",
"# log to stdout",
"log",
"=",
"self",
".",
"_format_message",
"(",
"logType",
"=",
"logType",
",",
"message",
"=",
"message",
",",
"data",
"=",
"data",
",",
"tback",
"=",
"tback",
")",
"if",
"stdout",
":",
"self",
".",
"__log_to_stdout",
"(",
"self",
".",
"__logTypeFormat",
"[",
"logType",
"]",
"[",
"0",
"]",
"+",
"log",
"+",
"self",
".",
"__logTypeFormat",
"[",
"logType",
"]",
"[",
"1",
"]",
"+",
"\"\\n\"",
")",
"try",
":",
"self",
".",
"__stdout",
".",
"flush",
"(",
")",
"except",
":",
"pass",
"try",
":",
"os",
".",
"fsync",
"(",
"self",
".",
"__stdout",
".",
"fileno",
"(",
")",
")",
"except",
":",
"pass",
"if",
"file",
":",
"# log to file",
"self",
".",
"__log_to_file",
"(",
"log",
")",
"self",
".",
"__log_to_file",
"(",
"\"\\n\"",
")",
"try",
":",
"self",
".",
"__logFileStream",
".",
"flush",
"(",
")",
"except",
":",
"pass",
"try",
":",
"os",
".",
"fsync",
"(",
"self",
".",
"__logFileStream",
".",
"fileno",
"(",
")",
")",
"except",
":",
"pass",
"# set last logged message",
"self",
".",
"__lastLogged",
"[",
"logType",
"]",
"=",
"log",
"self",
".",
"__lastLogged",
"[",
"-",
"1",
"]",
"=",
"log"
]
| Force logging a message of a certain logtype whether logtype level is allowed or not.
:Parameters:
#. logType (string): A defined logging type.
#. message (string): Any message to log.
#. tback (None, str, list): Stack traceback to print and/or write to
log file. In general, this should be traceback.extract_stack
#. stdout (boolean): Whether to force logging to standard output.
#. file (boolean): Whether to force logging to file. | [
"Force",
"logging",
"a",
"message",
"of",
"a",
"certain",
"logtype",
"whether",
"logtype",
"level",
"is",
"allowed",
"or",
"not",
"."
]
| python | train |
wbond/oscrypto | oscrypto/trust_list.py | https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/trust_list.py#L303-L330 | def _cached_path_needs_update(ca_path, cache_length):
"""
Checks to see if a cache file needs to be refreshed
:param ca_path:
A unicode string of the path to the cache file
:param cache_length:
An integer representing the number of hours the cache is valid for
:return:
A boolean - True if the cache needs to be updated, False if the file
is up-to-date
"""
exists = os.path.exists(ca_path)
if not exists:
return True
stats = os.stat(ca_path)
if stats.st_mtime < time.time() - cache_length * 60 * 60:
return True
if stats.st_size == 0:
return True
return False | [
"def",
"_cached_path_needs_update",
"(",
"ca_path",
",",
"cache_length",
")",
":",
"exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"ca_path",
")",
"if",
"not",
"exists",
":",
"return",
"True",
"stats",
"=",
"os",
".",
"stat",
"(",
"ca_path",
")",
"if",
"stats",
".",
"st_mtime",
"<",
"time",
".",
"time",
"(",
")",
"-",
"cache_length",
"*",
"60",
"*",
"60",
":",
"return",
"True",
"if",
"stats",
".",
"st_size",
"==",
"0",
":",
"return",
"True",
"return",
"False"
]
| Checks to see if a cache file needs to be refreshed
:param ca_path:
A unicode string of the path to the cache file
:param cache_length:
An integer representing the number of hours the cache is valid for
:return:
A boolean - True if the cache needs to be updated, False if the file
is up-to-date | [
"Checks",
"to",
"see",
"if",
"a",
"cache",
"file",
"needs",
"to",
"be",
"refreshed"
]
| python | valid |
mwouts/jupytext | jupytext/contentsmanager.py | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/contentsmanager.py#L439-L468 | def rename_file(self, old_path, new_path):
"""Rename the current notebook, as well as its alternative representations"""
if old_path not in self.paired_notebooks:
try:
# we do not know yet if this is a paired notebook (#190)
# -> to get this information we open the notebook
self.get(old_path, content=True)
except Exception:
pass
if old_path not in self.paired_notebooks:
super(TextFileContentsManager, self).rename_file(old_path, new_path)
return
fmt, formats = self.paired_notebooks.get(old_path)
old_alt_paths = paired_paths(old_path, fmt, formats)
# Is the new file name consistent with suffix?
try:
new_base = base_path(new_path, fmt)
except Exception as err:
raise HTTPError(400, str(err))
for old_alt_path, alt_fmt in old_alt_paths:
new_alt_path = full_path(new_base, alt_fmt)
if self.exists(old_alt_path):
super(TextFileContentsManager, self).rename_file(old_alt_path, new_alt_path)
self.drop_paired_notebook(old_path)
self.update_paired_notebooks(new_path, fmt, formats) | [
"def",
"rename_file",
"(",
"self",
",",
"old_path",
",",
"new_path",
")",
":",
"if",
"old_path",
"not",
"in",
"self",
".",
"paired_notebooks",
":",
"try",
":",
"# we do not know yet if this is a paired notebook (#190)",
"# -> to get this information we open the notebook",
"self",
".",
"get",
"(",
"old_path",
",",
"content",
"=",
"True",
")",
"except",
"Exception",
":",
"pass",
"if",
"old_path",
"not",
"in",
"self",
".",
"paired_notebooks",
":",
"super",
"(",
"TextFileContentsManager",
",",
"self",
")",
".",
"rename_file",
"(",
"old_path",
",",
"new_path",
")",
"return",
"fmt",
",",
"formats",
"=",
"self",
".",
"paired_notebooks",
".",
"get",
"(",
"old_path",
")",
"old_alt_paths",
"=",
"paired_paths",
"(",
"old_path",
",",
"fmt",
",",
"formats",
")",
"# Is the new file name consistent with suffix?",
"try",
":",
"new_base",
"=",
"base_path",
"(",
"new_path",
",",
"fmt",
")",
"except",
"Exception",
"as",
"err",
":",
"raise",
"HTTPError",
"(",
"400",
",",
"str",
"(",
"err",
")",
")",
"for",
"old_alt_path",
",",
"alt_fmt",
"in",
"old_alt_paths",
":",
"new_alt_path",
"=",
"full_path",
"(",
"new_base",
",",
"alt_fmt",
")",
"if",
"self",
".",
"exists",
"(",
"old_alt_path",
")",
":",
"super",
"(",
"TextFileContentsManager",
",",
"self",
")",
".",
"rename_file",
"(",
"old_alt_path",
",",
"new_alt_path",
")",
"self",
".",
"drop_paired_notebook",
"(",
"old_path",
")",
"self",
".",
"update_paired_notebooks",
"(",
"new_path",
",",
"fmt",
",",
"formats",
")"
]
| Rename the current notebook, as well as its alternative representations | [
"Rename",
"the",
"current",
"notebook",
"as",
"well",
"as",
"its",
"alternative",
"representations"
]
| python | train |
uw-it-aca/uw-restclients-iasystem | uw_iasystem/__init__.py | https://github.com/uw-it-aca/uw-restclients-iasystem/blob/f65f169d54b0d39e2d732cba529ccd8b6cb49f8a/uw_iasystem/__init__.py#L33-L57 | def __get_resource(dao, url):
"""
Issue a GET request to IASystem with the given url
and return a response in Collection+json format.
:returns: http response with content in json
"""
headers = {"Accept": "application/vnd.collection+json"}
response = dao.getURL(url, headers)
status = response.status
logger.debug("{} ==status==> {}".format(url, status))
if status != 200:
message = str(response.data)
if status == 404:
# the URL not exists on the specific domain
return None
if status == 400:
if "Term is out of range" in message:
raise TermEvalNotCreated(url, status, message)
raise DataFailureException(url, status, message)
return json.loads(response.data) | [
"def",
"__get_resource",
"(",
"dao",
",",
"url",
")",
":",
"headers",
"=",
"{",
"\"Accept\"",
":",
"\"application/vnd.collection+json\"",
"}",
"response",
"=",
"dao",
".",
"getURL",
"(",
"url",
",",
"headers",
")",
"status",
"=",
"response",
".",
"status",
"logger",
".",
"debug",
"(",
"\"{} ==status==> {}\"",
".",
"format",
"(",
"url",
",",
"status",
")",
")",
"if",
"status",
"!=",
"200",
":",
"message",
"=",
"str",
"(",
"response",
".",
"data",
")",
"if",
"status",
"==",
"404",
":",
"# the URL not exists on the specific domain",
"return",
"None",
"if",
"status",
"==",
"400",
":",
"if",
"\"Term is out of range\"",
"in",
"message",
":",
"raise",
"TermEvalNotCreated",
"(",
"url",
",",
"status",
",",
"message",
")",
"raise",
"DataFailureException",
"(",
"url",
",",
"status",
",",
"message",
")",
"return",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")"
]
| Issue a GET request to IASystem with the given url
and return a response in Collection+json format.
:returns: http response with content in json | [
"Issue",
"a",
"GET",
"request",
"to",
"IASystem",
"with",
"the",
"given",
"url",
"and",
"return",
"a",
"response",
"in",
"Collection",
"+",
"json",
"format",
".",
":",
"returns",
":",
"http",
"response",
"with",
"content",
"in",
"json"
]
| python | train |
opencobra/memote | memote/support/matrix.py | https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/matrix.py#L46-L62 | def number_independent_conservation_relations(model):
"""
Return the number of conserved metabolite pools.
This number is given by the left null space of the stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
ln_matrix = con_helpers.nullspace(s_matrix.T)
return ln_matrix.shape[1] | [
"def",
"number_independent_conservation_relations",
"(",
"model",
")",
":",
"s_matrix",
",",
"_",
",",
"_",
"=",
"con_helpers",
".",
"stoichiometry_matrix",
"(",
"model",
".",
"metabolites",
",",
"model",
".",
"reactions",
")",
"ln_matrix",
"=",
"con_helpers",
".",
"nullspace",
"(",
"s_matrix",
".",
"T",
")",
"return",
"ln_matrix",
".",
"shape",
"[",
"1",
"]"
]
| Return the number of conserved metabolite pools.
This number is given by the left null space of the stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | [
"Return",
"the",
"number",
"of",
"conserved",
"metabolite",
"pools",
"."
]
| python | train |
spookylukey/django-paypal | paypal/pro/creditcard.py | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/creditcard.py#L44-L52 | def is_mod10(self):
"""Returns True if number is valid according to mod10."""
double = 0
total = 0
for i in range(len(self.number) - 1, -1, -1):
for c in str((double + 1) * int(self.number[i])):
total = total + int(c)
double = (double + 1) % 2
return (total % 10) == 0 | [
"def",
"is_mod10",
"(",
"self",
")",
":",
"double",
"=",
"0",
"total",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"number",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"for",
"c",
"in",
"str",
"(",
"(",
"double",
"+",
"1",
")",
"*",
"int",
"(",
"self",
".",
"number",
"[",
"i",
"]",
")",
")",
":",
"total",
"=",
"total",
"+",
"int",
"(",
"c",
")",
"double",
"=",
"(",
"double",
"+",
"1",
")",
"%",
"2",
"return",
"(",
"total",
"%",
"10",
")",
"==",
"0"
]
| Returns True if number is valid according to mod10. | [
"Returns",
"True",
"if",
"number",
"is",
"valid",
"according",
"to",
"mod10",
"."
]
| python | train |
klahnakoski/pyLibrary | mo_threads/signal.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/signal.py#L57-L77 | def wait(self):
"""
PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED
"""
if self._go:
return True
with self.lock:
if self._go:
return True
stopper = _allocate_lock()
stopper.acquire()
if not self.waiting_threads:
self.waiting_threads = [stopper]
else:
self.waiting_threads.append(stopper)
DEBUG and self._name and Log.note("wait for go {{name|quote}}", name=self.name)
stopper.acquire()
DEBUG and self._name and Log.note("GOing! {{name|quote}}", name=self.name)
return True | [
"def",
"wait",
"(",
"self",
")",
":",
"if",
"self",
".",
"_go",
":",
"return",
"True",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"_go",
":",
"return",
"True",
"stopper",
"=",
"_allocate_lock",
"(",
")",
"stopper",
".",
"acquire",
"(",
")",
"if",
"not",
"self",
".",
"waiting_threads",
":",
"self",
".",
"waiting_threads",
"=",
"[",
"stopper",
"]",
"else",
":",
"self",
".",
"waiting_threads",
".",
"append",
"(",
"stopper",
")",
"DEBUG",
"and",
"self",
".",
"_name",
"and",
"Log",
".",
"note",
"(",
"\"wait for go {{name|quote}}\"",
",",
"name",
"=",
"self",
".",
"name",
")",
"stopper",
".",
"acquire",
"(",
")",
"DEBUG",
"and",
"self",
".",
"_name",
"and",
"Log",
".",
"note",
"(",
"\"GOing! {{name|quote}}\"",
",",
"name",
"=",
"self",
".",
"name",
")",
"return",
"True"
]
| PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED | [
"PUT",
"THREAD",
"IN",
"WAIT",
"STATE",
"UNTIL",
"SIGNAL",
"IS",
"ACTIVATED"
]
| python | train |
saltstack/salt | salt/netapi/rest_tornado/saltnado.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L618-L655 | def get(self):
'''
All logins are done over post, this is a parked endpoint
.. http:get:: /login
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Content-Length: 58
{"status": "401 Unauthorized", "return": "Please log in"}
'''
self.set_status(401)
self.set_header('WWW-Authenticate', 'Session')
ret = {'status': '401 Unauthorized',
'return': 'Please log in'}
self.write(self.serialize(ret)) | [
"def",
"get",
"(",
"self",
")",
":",
"self",
".",
"set_status",
"(",
"401",
")",
"self",
".",
"set_header",
"(",
"'WWW-Authenticate'",
",",
"'Session'",
")",
"ret",
"=",
"{",
"'status'",
":",
"'401 Unauthorized'",
",",
"'return'",
":",
"'Please log in'",
"}",
"self",
".",
"write",
"(",
"self",
".",
"serialize",
"(",
"ret",
")",
")"
]
| All logins are done over post, this is a parked endpoint
.. http:get:: /login
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 401 Unauthorized
Content-Type: application/json
Content-Length: 58
{"status": "401 Unauthorized", "return": "Please log in"} | [
"All",
"logins",
"are",
"done",
"over",
"post",
"this",
"is",
"a",
"parked",
"endpoint"
]
| python | train |
pandas-dev/pandas | pandas/core/arrays/datetimes.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1112-L1171 | def to_period(self, freq=None):
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]', freq='M')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]', freq='D')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn("Converting to PeriodArray/Index representation "
"will drop timezone information.", UserWarning)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError("You must pass a freq argument as "
"current index has none.")
freq = get_period_alias(freq)
return PeriodArray._from_datetime64(self._data, freq, tz=self.tz) | [
"def",
"to_period",
"(",
"self",
",",
"freq",
"=",
"None",
")",
":",
"from",
"pandas",
".",
"core",
".",
"arrays",
"import",
"PeriodArray",
"if",
"self",
".",
"tz",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"Converting to PeriodArray/Index representation \"",
"\"will drop timezone information.\"",
",",
"UserWarning",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"self",
".",
"freqstr",
"or",
"self",
".",
"inferred_freq",
"if",
"freq",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"You must pass a freq argument as \"",
"\"current index has none.\"",
")",
"freq",
"=",
"get_period_alias",
"(",
"freq",
")",
"return",
"PeriodArray",
".",
"_from_datetime64",
"(",
"self",
".",
"_data",
",",
"freq",
",",
"tz",
"=",
"self",
".",
"tz",
")"
]
| Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]', freq='M')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]', freq='D') | [
"Cast",
"to",
"PeriodArray",
"/",
"Index",
"at",
"a",
"particular",
"frequency",
"."
]
| python | train |
llazzaro/django-scheduler | schedule/templatetags/scheduletags.py | https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/templatetags/scheduletags.py#L54-L71 | def daily_table(context, day, start=8, end=20, increment=30):
"""
Display a nice table with occurrences and action buttons.
Arguments:
start - hour at which the day starts
end - hour at which the day ends
increment - size of a time slot (in minutes)
"""
user = context['request'].user
addable = CHECK_EVENT_PERM_FUNC(None, user)
if 'calendar' in context:
addable = addable and CHECK_CALENDAR_PERM_FUNC(context['calendar'], user)
context['addable'] = addable
day_part = day.get_time_slot(day.start + datetime.timedelta(hours=start), day.start + datetime.timedelta(hours=end))
# get slots to display on the left
slots = _cook_slots(day_part, increment)
context['slots'] = slots
return context | [
"def",
"daily_table",
"(",
"context",
",",
"day",
",",
"start",
"=",
"8",
",",
"end",
"=",
"20",
",",
"increment",
"=",
"30",
")",
":",
"user",
"=",
"context",
"[",
"'request'",
"]",
".",
"user",
"addable",
"=",
"CHECK_EVENT_PERM_FUNC",
"(",
"None",
",",
"user",
")",
"if",
"'calendar'",
"in",
"context",
":",
"addable",
"=",
"addable",
"and",
"CHECK_CALENDAR_PERM_FUNC",
"(",
"context",
"[",
"'calendar'",
"]",
",",
"user",
")",
"context",
"[",
"'addable'",
"]",
"=",
"addable",
"day_part",
"=",
"day",
".",
"get_time_slot",
"(",
"day",
".",
"start",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"start",
")",
",",
"day",
".",
"start",
"+",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"end",
")",
")",
"# get slots to display on the left",
"slots",
"=",
"_cook_slots",
"(",
"day_part",
",",
"increment",
")",
"context",
"[",
"'slots'",
"]",
"=",
"slots",
"return",
"context"
]
| Display a nice table with occurrences and action buttons.
Arguments:
start - hour at which the day starts
end - hour at which the day ends
increment - size of a time slot (in minutes) | [
"Display",
"a",
"nice",
"table",
"with",
"occurrences",
"and",
"action",
"buttons",
".",
"Arguments",
":",
"start",
"-",
"hour",
"at",
"which",
"the",
"day",
"starts",
"end",
"-",
"hour",
"at",
"which",
"the",
"day",
"ends",
"increment",
"-",
"size",
"of",
"a",
"time",
"slot",
"(",
"in",
"minutes",
")"
]
| python | train |
OCR-D/core | ocrd/ocrd/cli/workspace.py | https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/cli/workspace.py#L32-L36 | def workspace_cli(ctx, directory, mets_basename, backup):
"""
Working with workspace
"""
ctx.obj = WorkspaceCtx(os.path.abspath(directory), mets_basename, automatic_backup=backup) | [
"def",
"workspace_cli",
"(",
"ctx",
",",
"directory",
",",
"mets_basename",
",",
"backup",
")",
":",
"ctx",
".",
"obj",
"=",
"WorkspaceCtx",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
",",
"mets_basename",
",",
"automatic_backup",
"=",
"backup",
")"
]
| Working with workspace | [
"Working",
"with",
"workspace"
]
| python | train |
NatLibFi/Skosify | skosify/skosify.py | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/skosify.py#L77-L101 | def get_concept_scheme(rdf):
"""Return a skos:ConceptScheme contained in the model.
Returns None if no skos:ConceptScheme is present.
"""
# add explicit type
for s, o in rdf.subject_objects(SKOS.inScheme):
if not isinstance(o, Literal):
rdf.add((o, RDF.type, SKOS.ConceptScheme))
else:
logging.warning(
"Literal value %s for skos:inScheme detected, ignoring.", o)
css = list(rdf.subjects(RDF.type, SKOS.ConceptScheme))
if len(css) > 1:
css.sort()
cs = css[0]
logging.warning(
"Multiple concept schemes found. "
"Selecting %s as default concept scheme.", cs)
elif len(css) == 1:
cs = css[0]
else:
cs = None
return cs | [
"def",
"get_concept_scheme",
"(",
"rdf",
")",
":",
"# add explicit type",
"for",
"s",
",",
"o",
"in",
"rdf",
".",
"subject_objects",
"(",
"SKOS",
".",
"inScheme",
")",
":",
"if",
"not",
"isinstance",
"(",
"o",
",",
"Literal",
")",
":",
"rdf",
".",
"add",
"(",
"(",
"o",
",",
"RDF",
".",
"type",
",",
"SKOS",
".",
"ConceptScheme",
")",
")",
"else",
":",
"logging",
".",
"warning",
"(",
"\"Literal value %s for skos:inScheme detected, ignoring.\"",
",",
"o",
")",
"css",
"=",
"list",
"(",
"rdf",
".",
"subjects",
"(",
"RDF",
".",
"type",
",",
"SKOS",
".",
"ConceptScheme",
")",
")",
"if",
"len",
"(",
"css",
")",
">",
"1",
":",
"css",
".",
"sort",
"(",
")",
"cs",
"=",
"css",
"[",
"0",
"]",
"logging",
".",
"warning",
"(",
"\"Multiple concept schemes found. \"",
"\"Selecting %s as default concept scheme.\"",
",",
"cs",
")",
"elif",
"len",
"(",
"css",
")",
"==",
"1",
":",
"cs",
"=",
"css",
"[",
"0",
"]",
"else",
":",
"cs",
"=",
"None",
"return",
"cs"
]
| Return a skos:ConceptScheme contained in the model.
Returns None if no skos:ConceptScheme is present. | [
"Return",
"a",
"skos",
":",
"ConceptScheme",
"contained",
"in",
"the",
"model",
"."
]
| python | train |
nooperpudd/weibopy | weibopy/weibo.py | https://github.com/nooperpudd/weibopy/blob/61f3fb0502c1f07a591388aaa7526e74c63eaeb1/weibopy/weibo.py#L66-L80 | def get(self, suffix, params=None):
"""
request weibo api
:param suffix: str,
:param params: dict, url query parameters
:return:
"""
url = self.base + suffix
params = filter_params(params)
response = self.session.get(url=url, params=params)
return self._handler_response(response) | [
"def",
"get",
"(",
"self",
",",
"suffix",
",",
"params",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"base",
"+",
"suffix",
"params",
"=",
"filter_params",
"(",
"params",
")",
"response",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
"=",
"url",
",",
"params",
"=",
"params",
")",
"return",
"self",
".",
"_handler_response",
"(",
"response",
")"
]
| request weibo api
:param suffix: str,
:param params: dict, url query parameters
:return: | [
"request",
"weibo",
"api",
":",
"param",
"suffix",
":",
"str",
":",
"param",
"params",
":",
"dict",
"url",
"query",
"parameters",
":",
"return",
":"
]
| python | train |
agile-geoscience/striplog | striplog/legend.py | https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/legend.py#L696-L726 | def get_decor(self, c, match_only=None):
"""
Get the decor for a component.
Args:
c (component): The component to look up.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
Decor. The matching Decor from the Legend, or None if not found.
"""
if isinstance(c, Component):
if c:
if match_only:
# Filter the component only those attributes
c = Component({k: getattr(c, k, None) for k in match_only})
for decor in self.__list:
try:
if c == decor.component:
return decor
except AttributeError:
continue
else:
for decor in self.__list:
try:
if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic:
return decor
except AttributeError:
continue
return Decor({'colour': '#eeeeee', 'component': Component()}) | [
"def",
"get_decor",
"(",
"self",
",",
"c",
",",
"match_only",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"c",
",",
"Component",
")",
":",
"if",
"c",
":",
"if",
"match_only",
":",
"# Filter the component only those attributes",
"c",
"=",
"Component",
"(",
"{",
"k",
":",
"getattr",
"(",
"c",
",",
"k",
",",
"None",
")",
"for",
"k",
"in",
"match_only",
"}",
")",
"for",
"decor",
"in",
"self",
".",
"__list",
":",
"try",
":",
"if",
"c",
"==",
"decor",
".",
"component",
":",
"return",
"decor",
"except",
"AttributeError",
":",
"continue",
"else",
":",
"for",
"decor",
"in",
"self",
".",
"__list",
":",
"try",
":",
"if",
"getattr",
"(",
"c",
",",
"'mnemonic'",
")",
".",
"lower",
"(",
")",
"==",
"decor",
".",
"curve",
".",
"mnemonic",
":",
"return",
"decor",
"except",
"AttributeError",
":",
"continue",
"return",
"Decor",
"(",
"{",
"'colour'",
":",
"'#eeeeee'",
",",
"'component'",
":",
"Component",
"(",
")",
"}",
")"
]
| Get the decor for a component.
Args:
c (component): The component to look up.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
Decor. The matching Decor from the Legend, or None if not found. | [
"Get",
"the",
"decor",
"for",
"a",
"component",
"."
]
| python | test |
sibirrer/lenstronomy | lenstronomy/LensModel/single_plane.py | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/single_plane.py#L191-L209 | def mass_2d(self, r, kwargs, bool_list=None):
"""
computes the mass enclosed a projected (2d) radius r
:param r: radius (in angular units)
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param bool_list: list of bools that are part of the output
:return: projected mass (in angular units, modulo epsilon_crit)
"""
bool_list = self._bool_list(bool_list)
mass_2d = 0
for i, func in enumerate(self.func_list):
if bool_list[i] is True:
kwargs_i = {k: v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']}
mass_2d_i = func.mass_2d_lens(r, **kwargs_i)
mass_2d += mass_2d_i
#except:
# raise ValueError('Lens profile %s does not support a 2d mass function!' % self.model_list[i])
return mass_2d | [
"def",
"mass_2d",
"(",
"self",
",",
"r",
",",
"kwargs",
",",
"bool_list",
"=",
"None",
")",
":",
"bool_list",
"=",
"self",
".",
"_bool_list",
"(",
"bool_list",
")",
"mass_2d",
"=",
"0",
"for",
"i",
",",
"func",
"in",
"enumerate",
"(",
"self",
".",
"func_list",
")",
":",
"if",
"bool_list",
"[",
"i",
"]",
"is",
"True",
":",
"kwargs_i",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
"[",
"i",
"]",
".",
"items",
"(",
")",
"if",
"not",
"k",
"in",
"[",
"'center_x'",
",",
"'center_y'",
"]",
"}",
"mass_2d_i",
"=",
"func",
".",
"mass_2d_lens",
"(",
"r",
",",
"*",
"*",
"kwargs_i",
")",
"mass_2d",
"+=",
"mass_2d_i",
"#except:",
"# raise ValueError('Lens profile %s does not support a 2d mass function!' % self.model_list[i])",
"return",
"mass_2d"
]
| computes the mass enclosed a projected (2d) radius r
:param r: radius (in angular units)
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param bool_list: list of bools that are part of the output
:return: projected mass (in angular units, modulo epsilon_crit) | [
"computes",
"the",
"mass",
"enclosed",
"a",
"projected",
"(",
"2d",
")",
"radius",
"r"
]
| python | train |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAARP/QAAccount.py | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QAAccount.py#L429-L489 | def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
} | [
"def",
"message",
"(",
"self",
")",
":",
"return",
"{",
"'source'",
":",
"'account'",
",",
"'frequence'",
":",
"self",
".",
"frequence",
",",
"'account_cookie'",
":",
"self",
".",
"account_cookie",
",",
"'portfolio_cookie'",
":",
"self",
".",
"portfolio_cookie",
",",
"'user_cookie'",
":",
"self",
".",
"user_cookie",
",",
"'broker'",
":",
"self",
".",
"broker",
",",
"'market_type'",
":",
"self",
".",
"market_type",
",",
"'strategy_name'",
":",
"self",
".",
"strategy_name",
",",
"'current_time'",
":",
"str",
"(",
"self",
".",
"_currenttime",
")",
",",
"'allow_sellopen'",
":",
"self",
".",
"allow_sellopen",
",",
"'allow_margin'",
":",
"self",
".",
"allow_margin",
",",
"'allow_t0'",
":",
"self",
".",
"allow_t0",
",",
"'margin_level'",
":",
"self",
".",
"margin_level",
",",
"'init_assets'",
":",
"self",
".",
"init_assets",
",",
"'init_cash'",
":",
"self",
".",
"init_cash",
",",
"'init_hold'",
":",
"self",
".",
"init_hold",
".",
"to_dict",
"(",
")",
",",
"'commission_coeff'",
":",
"self",
".",
"commission_coeff",
",",
"'tax_coeff'",
":",
"self",
".",
"tax_coeff",
",",
"'cash'",
":",
"self",
".",
"cash",
",",
"'history'",
":",
"self",
".",
"history",
",",
"'trade_index'",
":",
"self",
".",
"time_index_max",
",",
"'running_time'",
":",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
"if",
"self",
".",
"running_time",
"is",
"None",
"else",
"str",
"(",
"self",
".",
"running_time",
")",
",",
"'quantaxis_version'",
":",
"self",
".",
"quantaxis_version",
",",
"'running_environment'",
":",
"self",
".",
"running_environment",
",",
"'start_date'",
":",
"self",
".",
"start_date",
",",
"'end_date'",
":",
"self",
".",
"end_date",
",",
"'frozen'",
":",
"self",
".",
"frozen",
",",
"'finished_id'",
":",
"self",
".",
"finishedOrderid",
"}"
]
| the standard message which can be transfer | [
"the",
"standard",
"message",
"which",
"can",
"be",
"transfer"
]
| python | train |
CalebBell/thermo | thermo/critical.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/critical.py#L1510-L1570 | def modified_Wilson_Tc(zs, Tcs, Aijs):
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_. Equation
.. math::
T_{cm} = \sum_i x_i T_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)T_{ref}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = x_1 T_{c1} + x_2 T_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})]
Parameters
----------
zs : float
Mole fractions of all components
Tcs : float
Critical temperatures of all components, [K]
Aijs : matrix
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
The equation and original article has been reviewed.
[1]_ has 75 binary systems, and additional multicomponent mixture parameters.
All parameters, even if zero, must be given to this function.
2rd example is from [2]_, for:
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
Its result is identical to that calculated in the article.
Examples
--------
>>> modified_Wilson_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
... [[0, 1.174450, 1.274390], [0.835914, 0, 1.21038],
... [0.746878, 0.80677, 0]])
450.0305966823031
References
----------
.. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the
Calculation of Gas-Liquid Critical Temperatures and Pressures of
Multicomponent Mixtures." Industrial & Engineering Chemistry Process
Design and Development 22, no. 4 (1983): 672-76.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
'''
if not none_and_length_check([zs, Tcs]):
raise Exception('Function inputs are incorrect format')
C = -2500
Tcm = sum(zs[i]*Tcs[i] for i in range(len(zs)))
for i in range(len(zs)):
Tcm += C*zs[i]*log(zs[i] + sum(zs[j]*Aijs[i][j] for j in range(len(zs))))
return Tcm | [
"def",
"modified_Wilson_Tc",
"(",
"zs",
",",
"Tcs",
",",
"Aijs",
")",
":",
"if",
"not",
"none_and_length_check",
"(",
"[",
"zs",
",",
"Tcs",
"]",
")",
":",
"raise",
"Exception",
"(",
"'Function inputs are incorrect format'",
")",
"C",
"=",
"-",
"2500",
"Tcm",
"=",
"sum",
"(",
"zs",
"[",
"i",
"]",
"*",
"Tcs",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"zs",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"zs",
")",
")",
":",
"Tcm",
"+=",
"C",
"*",
"zs",
"[",
"i",
"]",
"*",
"log",
"(",
"zs",
"[",
"i",
"]",
"+",
"sum",
"(",
"zs",
"[",
"j",
"]",
"*",
"Aijs",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"zs",
")",
")",
")",
")",
"return",
"Tcm"
]
| r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_. Equation
.. math::
T_{cm} = \sum_i x_i T_{ci} + C\sum_i x_i \ln \left(x_i + \sum_j x_j A_{ij}\right)T_{ref}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = x_1 T_{c1} + x_2 T_{c2} + C[x_1 \ln(x_1 + x_2A_{12}) + x_2\ln(x_2 + x_1 A_{21})]
Parameters
----------
zs : float
Mole fractions of all components
Tcs : float
Critical temperatures of all components, [K]
Aijs : matrix
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
The equation and original article has been reviewed.
[1]_ has 75 binary systems, and additional multicomponent mixture parameters.
All parameters, even if zero, must be given to this function.
2rd example is from [2]_, for:
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
Its result is identical to that calculated in the article.
Examples
--------
>>> modified_Wilson_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
... [[0, 1.174450, 1.274390], [0.835914, 0, 1.21038],
... [0.746878, 0.80677, 0]])
450.0305966823031
References
----------
.. [1] Teja, Amyn S., Kul B. Garg, and Richard L. Smith. "A Method for the
Calculation of Gas-Liquid Critical Temperatures and Pressures of
Multicomponent Mixtures." Industrial & Engineering Chemistry Process
Design and Development 22, no. 4 (1983): 672-76.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001. | [
"r",
"Calculates",
"critical",
"temperature",
"of",
"a",
"mixture",
"according",
"to",
"mixing",
"rules",
"in",
"[",
"1",
"]",
"_",
".",
"Equation"
]
| python | valid |
gwastro/pycbc | pycbc/filter/matchedfilter.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/matchedfilter.py#L1814-L1892 | def compute_followup_snr_series(data_reader, htilde, trig_time,
duration=0.095, check_state=True,
coinc_window=0.05):
"""Given a StrainBuffer, a template frequency series and a trigger time,
compute a portion of the SNR time series centered on the trigger for its
rapid sky localization and followup.
If the trigger time is too close to the boundary of the valid data segment
the SNR series is calculated anyway and might be slightly contaminated by
filter and wrap-around effects. For reasonable durations this will only
affect a small fraction of the triggers and probably in a negligible way.
Parameters
----------
data_reader : StrainBuffer
The StrainBuffer object to read strain data from.
htilde : FrequencySeries
The frequency series containing the template waveform.
trig_time : {float, lal.LIGOTimeGPS}
The trigger time.
duration : float (optional)
Duration of the computed SNR series in seconds. If omitted, it defaults
to twice the Earth light travel time plus 10 ms of timing uncertainty.
check_state : boolean
If True, and the detector was offline or flagged for bad data quality
at any point during the inspiral, then return (None, None) instead.
coinc_window : float (optional)
Maximum possible time between coincident triggers at different
detectors. This is needed to properly determine data padding.
Returns
-------
snr : TimeSeries
The portion of SNR around the trigger. None if the detector is offline
or has bad data quality, and check_state is True.
"""
if check_state:
# was the detector observing for the full amount of involved data?
state_start_time = trig_time - duration / 2 - htilde.length_in_time
state_end_time = trig_time + duration / 2
state_duration = state_end_time - state_start_time
if data_reader.state is not None:
if not data_reader.state.is_extent_valid(state_start_time,
state_duration):
return None
# was the data quality ok for the full amount of involved data?
dq_start_time = state_start_time - data_reader.dq_padding
dq_duration = state_duration + 2 * data_reader.dq_padding
if data_reader.dq is not None:
if not data_reader.dq.is_extent_valid(dq_start_time, dq_duration):
return None
stilde = data_reader.overwhitened_data(htilde.delta_f)
snr, _, norm = matched_filter_core(htilde, stilde,
h_norm=htilde.sigmasq(stilde.psd))
valid_end = int(len(snr) - data_reader.trim_padding)
valid_start = int(valid_end - data_reader.blocksize * snr.sample_rate)
half_dur_samples = int(snr.sample_rate * duration / 2)
coinc_samples = int(snr.sample_rate * coinc_window)
valid_start -= half_dur_samples + coinc_samples
valid_end += half_dur_samples
if valid_start < 0 or valid_end > len(snr)-1:
raise ValueError(('Requested SNR duration ({0} s)'
' too long').format(duration))
# Onsource slice for Bayestar followup
onsource_idx = float(trig_time - snr.start_time) * snr.sample_rate
onsource_idx = int(round(onsource_idx))
onsource_slice = slice(onsource_idx - half_dur_samples,
onsource_idx + half_dur_samples + 1)
return snr[onsource_slice] * norm | [
"def",
"compute_followup_snr_series",
"(",
"data_reader",
",",
"htilde",
",",
"trig_time",
",",
"duration",
"=",
"0.095",
",",
"check_state",
"=",
"True",
",",
"coinc_window",
"=",
"0.05",
")",
":",
"if",
"check_state",
":",
"# was the detector observing for the full amount of involved data?",
"state_start_time",
"=",
"trig_time",
"-",
"duration",
"/",
"2",
"-",
"htilde",
".",
"length_in_time",
"state_end_time",
"=",
"trig_time",
"+",
"duration",
"/",
"2",
"state_duration",
"=",
"state_end_time",
"-",
"state_start_time",
"if",
"data_reader",
".",
"state",
"is",
"not",
"None",
":",
"if",
"not",
"data_reader",
".",
"state",
".",
"is_extent_valid",
"(",
"state_start_time",
",",
"state_duration",
")",
":",
"return",
"None",
"# was the data quality ok for the full amount of involved data?",
"dq_start_time",
"=",
"state_start_time",
"-",
"data_reader",
".",
"dq_padding",
"dq_duration",
"=",
"state_duration",
"+",
"2",
"*",
"data_reader",
".",
"dq_padding",
"if",
"data_reader",
".",
"dq",
"is",
"not",
"None",
":",
"if",
"not",
"data_reader",
".",
"dq",
".",
"is_extent_valid",
"(",
"dq_start_time",
",",
"dq_duration",
")",
":",
"return",
"None",
"stilde",
"=",
"data_reader",
".",
"overwhitened_data",
"(",
"htilde",
".",
"delta_f",
")",
"snr",
",",
"_",
",",
"norm",
"=",
"matched_filter_core",
"(",
"htilde",
",",
"stilde",
",",
"h_norm",
"=",
"htilde",
".",
"sigmasq",
"(",
"stilde",
".",
"psd",
")",
")",
"valid_end",
"=",
"int",
"(",
"len",
"(",
"snr",
")",
"-",
"data_reader",
".",
"trim_padding",
")",
"valid_start",
"=",
"int",
"(",
"valid_end",
"-",
"data_reader",
".",
"blocksize",
"*",
"snr",
".",
"sample_rate",
")",
"half_dur_samples",
"=",
"int",
"(",
"snr",
".",
"sample_rate",
"*",
"duration",
"/",
"2",
")",
"coinc_samples",
"=",
"int",
"(",
"snr",
".",
"sample_rate",
"*",
"coinc_window",
")",
"valid_start",
"-=",
"half_dur_samples",
"+",
"coinc_samples",
"valid_end",
"+=",
"half_dur_samples",
"if",
"valid_start",
"<",
"0",
"or",
"valid_end",
">",
"len",
"(",
"snr",
")",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"(",
"'Requested SNR duration ({0} s)'",
"' too long'",
")",
".",
"format",
"(",
"duration",
")",
")",
"# Onsource slice for Bayestar followup",
"onsource_idx",
"=",
"float",
"(",
"trig_time",
"-",
"snr",
".",
"start_time",
")",
"*",
"snr",
".",
"sample_rate",
"onsource_idx",
"=",
"int",
"(",
"round",
"(",
"onsource_idx",
")",
")",
"onsource_slice",
"=",
"slice",
"(",
"onsource_idx",
"-",
"half_dur_samples",
",",
"onsource_idx",
"+",
"half_dur_samples",
"+",
"1",
")",
"return",
"snr",
"[",
"onsource_slice",
"]",
"*",
"norm"
]
| Given a StrainBuffer, a template frequency series and a trigger time,
compute a portion of the SNR time series centered on the trigger for its
rapid sky localization and followup.
If the trigger time is too close to the boundary of the valid data segment
the SNR series is calculated anyway and might be slightly contaminated by
filter and wrap-around effects. For reasonable durations this will only
affect a small fraction of the triggers and probably in a negligible way.
Parameters
----------
data_reader : StrainBuffer
The StrainBuffer object to read strain data from.
htilde : FrequencySeries
The frequency series containing the template waveform.
trig_time : {float, lal.LIGOTimeGPS}
The trigger time.
duration : float (optional)
Duration of the computed SNR series in seconds. If omitted, it defaults
to twice the Earth light travel time plus 10 ms of timing uncertainty.
check_state : boolean
If True, and the detector was offline or flagged for bad data quality
at any point during the inspiral, then return (None, None) instead.
coinc_window : float (optional)
Maximum possible time between coincident triggers at different
detectors. This is needed to properly determine data padding.
Returns
-------
snr : TimeSeries
The portion of SNR around the trigger. None if the detector is offline
or has bad data quality, and check_state is True. | [
"Given",
"a",
"StrainBuffer",
"a",
"template",
"frequency",
"series",
"and",
"a",
"trigger",
"time",
"compute",
"a",
"portion",
"of",
"the",
"SNR",
"time",
"series",
"centered",
"on",
"the",
"trigger",
"for",
"its",
"rapid",
"sky",
"localization",
"and",
"followup",
"."
]
| python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2711-L2725 | def enable_host_check(self, host):
"""Enable checks for a host
Format of the line that triggers function call::
ENABLE_HOST_CHECK;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
if not host.active_checks_enabled:
host.active_checks_enabled = True
host.modified_attributes |= \
DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.send_an_element(host.get_update_status_brok()) | [
"def",
"enable_host_check",
"(",
"self",
",",
"host",
")",
":",
"if",
"not",
"host",
".",
"active_checks_enabled",
":",
"host",
".",
"active_checks_enabled",
"=",
"True",
"host",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_ACTIVE_CHECKS_ENABLED\"",
"]",
".",
"value",
"self",
".",
"send_an_element",
"(",
"host",
".",
"get_update_status_brok",
"(",
")",
")"
]
| Enable checks for a host
Format of the line that triggers function call::
ENABLE_HOST_CHECK;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None | [
"Enable",
"checks",
"for",
"a",
"host",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
]
| python | train |
HHammond/PrettyPandas | prettypandas/formatters.py | https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/formatters.py#L12-L24 | def _surpress_formatting_errors(fn):
"""
I know this is dangerous and the wrong way to solve the problem, but when
using both row and columns summaries it's easier to just swallow errors
so users can format their tables how they need.
"""
@wraps(fn)
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
except ValueError:
return ""
return inner | [
"def",
"_surpress_formatting_errors",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"ValueError",
":",
"return",
"\"\"",
"return",
"inner"
]
| I know this is dangerous and the wrong way to solve the problem, but when
using both row and columns summaries it's easier to just swallow errors
so users can format their tables how they need. | [
"I",
"know",
"this",
"is",
"dangerous",
"and",
"the",
"wrong",
"way",
"to",
"solve",
"the",
"problem",
"but",
"when",
"using",
"both",
"row",
"and",
"columns",
"summaries",
"it",
"s",
"easier",
"to",
"just",
"swallow",
"errors",
"so",
"users",
"can",
"format",
"their",
"tables",
"how",
"they",
"need",
"."
]
| python | train |
paramiko/paramiko | paramiko/ssh_gss.py | https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/ssh_gss.py#L532-L560 | def ssh_check_mic(self, mic_token, session_id, username=None):
"""
Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``sspi.error`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
if username is not None:
# server mode
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
# Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_srv_ctxt.verify(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
# Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_ctxt.verify(self._session_id, mic_token) | [
"def",
"ssh_check_mic",
"(",
"self",
",",
"mic_token",
",",
"session_id",
",",
"username",
"=",
"None",
")",
":",
"self",
".",
"_session_id",
"=",
"session_id",
"self",
".",
"_username",
"=",
"username",
"if",
"username",
"is",
"not",
"None",
":",
"# server mode",
"mic_field",
"=",
"self",
".",
"_ssh_build_mic",
"(",
"self",
".",
"_session_id",
",",
"self",
".",
"_username",
",",
"self",
".",
"_service",
",",
"self",
".",
"_auth_method",
",",
")",
"# Verifies data and its signature. If verification fails, an",
"# sspi.error will be raised.",
"self",
".",
"_gss_srv_ctxt",
".",
"verify",
"(",
"mic_field",
",",
"mic_token",
")",
"else",
":",
"# for key exchange with gssapi-keyex",
"# client mode",
"# Verifies data and its signature. If verification fails, an",
"# sspi.error will be raised.",
"self",
".",
"_gss_ctxt",
".",
"verify",
"(",
"self",
".",
"_session_id",
",",
"mic_token",
")"
]
| Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``sspi.error`` -- if the MIC check failed | [
"Verify",
"the",
"MIC",
"token",
"for",
"a",
"SSH2",
"message",
"."
]
| python | train |
pescadores/pescador | pescador/core.py | https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/core.py#L169-L177 | def _activate(self):
"""Activates the stream."""
if six.callable(self.streamer):
# If it's a function, create the stream.
self.stream_ = self.streamer(*(self.args), **(self.kwargs))
else:
# If it's iterable, use it directly.
self.stream_ = iter(self.streamer) | [
"def",
"_activate",
"(",
"self",
")",
":",
"if",
"six",
".",
"callable",
"(",
"self",
".",
"streamer",
")",
":",
"# If it's a function, create the stream.",
"self",
".",
"stream_",
"=",
"self",
".",
"streamer",
"(",
"*",
"(",
"self",
".",
"args",
")",
",",
"*",
"*",
"(",
"self",
".",
"kwargs",
")",
")",
"else",
":",
"# If it's iterable, use it directly.",
"self",
".",
"stream_",
"=",
"iter",
"(",
"self",
".",
"streamer",
")"
]
| Activates the stream. | [
"Activates",
"the",
"stream",
"."
]
| python | train |
openstax/cnx-archive | cnxarchive/views/exports.py | https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/exports.py#L50-L89 | def get_export(request):
"""Retrieve an export file."""
settings = get_current_registry().settings
exports_dirs = settings['exports-directories'].split()
args = request.matchdict
ident_hash, type = args['ident_hash'], args['type']
id, version = split_ident_hash(ident_hash)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
try:
results = get_export_files(cursor, id, version, [type],
exports_dirs, read_file=True)
if not results:
raise httpexceptions.HTTPNotFound()
filename, mimetype, size, modtime, state, file_content \
= results[0]
except ExportError as e:
logger.debug(str(e))
raise httpexceptions.HTTPNotFound()
if state == 'missing':
raise httpexceptions.HTTPNotFound()
encoded_filename = urllib.quote(filename.encode('utf-8'))
resp = request.response
resp.status = "200 OK"
resp.content_type = mimetype
# Need both filename and filename* below for various browsers
# See: https://fastmail.blog/2011/06/24/download-non-english-filenames/
resp.content_disposition = "attachment; filename={fname};" \
" filename*=UTF-8''{fname}".format(
fname=encoded_filename)
resp.body = file_content
# Remove version and extension from filename, to recover title slug
slug_title = '-'.join(encoded_filename.split('-')[:-1])
resp.headerlist.append(
('Link', '<https://{}/contents/{}/{}> ;rel="Canonical"'.format(
request.host, id, slug_title)))
return resp | [
"def",
"get_export",
"(",
"request",
")",
":",
"settings",
"=",
"get_current_registry",
"(",
")",
".",
"settings",
"exports_dirs",
"=",
"settings",
"[",
"'exports-directories'",
"]",
".",
"split",
"(",
")",
"args",
"=",
"request",
".",
"matchdict",
"ident_hash",
",",
"type",
"=",
"args",
"[",
"'ident_hash'",
"]",
",",
"args",
"[",
"'type'",
"]",
"id",
",",
"version",
"=",
"split_ident_hash",
"(",
"ident_hash",
")",
"with",
"db_connect",
"(",
")",
"as",
"db_connection",
":",
"with",
"db_connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"try",
":",
"results",
"=",
"get_export_files",
"(",
"cursor",
",",
"id",
",",
"version",
",",
"[",
"type",
"]",
",",
"exports_dirs",
",",
"read_file",
"=",
"True",
")",
"if",
"not",
"results",
":",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"filename",
",",
"mimetype",
",",
"size",
",",
"modtime",
",",
"state",
",",
"file_content",
"=",
"results",
"[",
"0",
"]",
"except",
"ExportError",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"str",
"(",
"e",
")",
")",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"if",
"state",
"==",
"'missing'",
":",
"raise",
"httpexceptions",
".",
"HTTPNotFound",
"(",
")",
"encoded_filename",
"=",
"urllib",
".",
"quote",
"(",
"filename",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"resp",
"=",
"request",
".",
"response",
"resp",
".",
"status",
"=",
"\"200 OK\"",
"resp",
".",
"content_type",
"=",
"mimetype",
"# Need both filename and filename* below for various browsers",
"# See: https://fastmail.blog/2011/06/24/download-non-english-filenames/",
"resp",
".",
"content_disposition",
"=",
"\"attachment; filename={fname};\"",
"\" filename*=UTF-8''{fname}\"",
".",
"format",
"(",
"fname",
"=",
"encoded_filename",
")",
"resp",
".",
"body",
"=",
"file_content",
"# Remove version and extension from filename, to recover title slug",
"slug_title",
"=",
"'-'",
".",
"join",
"(",
"encoded_filename",
".",
"split",
"(",
"'-'",
")",
"[",
":",
"-",
"1",
"]",
")",
"resp",
".",
"headerlist",
".",
"append",
"(",
"(",
"'Link'",
",",
"'<https://{}/contents/{}/{}> ;rel=\"Canonical\"'",
".",
"format",
"(",
"request",
".",
"host",
",",
"id",
",",
"slug_title",
")",
")",
")",
"return",
"resp"
]
| Retrieve an export file. | [
"Retrieve",
"an",
"export",
"file",
"."
]
| python | train |
basvandenbroek/gcloud_taskqueue | gcloud_taskqueue/taskqueue.py | https://github.com/basvandenbroek/gcloud_taskqueue/blob/b147b57f7c0ad9e8030ee9797d6526a448aa5007/gcloud_taskqueue/taskqueue.py#L51-L61 | def get_items_from_response(self, response):
"""Yield :class:`.taskqueue.task.Task` items from response.
:type response: dict
:param response: The JSON API response for a page of tasks.
"""
for item in response.get('items', []):
id = item.get('id')
task = Task(id, taskqueue=self.taskqueue)
task._set_properties(item)
yield task | [
"def",
"get_items_from_response",
"(",
"self",
",",
"response",
")",
":",
"for",
"item",
"in",
"response",
".",
"get",
"(",
"'items'",
",",
"[",
"]",
")",
":",
"id",
"=",
"item",
".",
"get",
"(",
"'id'",
")",
"task",
"=",
"Task",
"(",
"id",
",",
"taskqueue",
"=",
"self",
".",
"taskqueue",
")",
"task",
".",
"_set_properties",
"(",
"item",
")",
"yield",
"task"
]
| Yield :class:`.taskqueue.task.Task` items from response.
:type response: dict
:param response: The JSON API response for a page of tasks. | [
"Yield",
":",
"class",
":",
".",
"taskqueue",
".",
"task",
".",
"Task",
"items",
"from",
"response",
"."
]
| python | train |
instacart/jardin | jardin/model.py | https://github.com/instacart/jardin/blob/007e283b9ccd621b60b86679148cacd9eab7c4e3/jardin/model.py#L378-L387 | def delete(self, **kwargs):
"""
Performs a DELETE statement on the model's table in the master database.
:param where: The WHERE clause. This can be a plain string, a dict or an array.
:type where: string, dict, array
"""
kwargs['stack'] = self.stack_mark(inspect.stack())
return self.db_adapter(role='master').delete(**kwargs) | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'stack'",
"]",
"=",
"self",
".",
"stack_mark",
"(",
"inspect",
".",
"stack",
"(",
")",
")",
"return",
"self",
".",
"db_adapter",
"(",
"role",
"=",
"'master'",
")",
".",
"delete",
"(",
"*",
"*",
"kwargs",
")"
]
| Performs a DELETE statement on the model's table in the master database.
:param where: The WHERE clause. This can be a plain string, a dict or an array.
:type where: string, dict, array | [
"Performs",
"a",
"DELETE",
"statement",
"on",
"the",
"model",
"s",
"table",
"in",
"the",
"master",
"database",
"."
]
| python | train |
ctuning/ck | ck/repo/module/repo/module.py | https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/repo/module/repo/module.py#L1632-L1767 | def get_and_unzip_archive(i):
"""
Input: {
zip - zip filename or URL
path - path to extract
(overwrite) - if 'yes', overwrite files when unarchiving
(path_to_remove) - if !='', remove this part of the path from extracted archive
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
zp=i['zip']
p=i['path']
pr=i.get('path_to_remove','')
overwrite=i.get('overwrite','')
# If zip, get (download) and unzip file ...
rm_zip=False
if zp.find('://')>=0:
if o=='con':
ck.out('Downloading CK archive ('+zp+') - it may take some time ...')
rm_zip=True
# Generate tmp file
import tempfile
fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-') # suffix is important - CK will delete such file!
os.close(fd)
os.remove(fn)
# Import modules compatible with Python 2.x and 3.x
import urllib
try:
import urllib.request as urllib2
except:
import urllib2
# Prepare request
request = urllib2.Request(zp)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Failed downloading CK archive ('+format(e)+')'}
import time
t = time.time()
t0 = t
chunk=32767
size=0
try:
fo=open(fn, 'wb')
except Exception as e:
return {'return':1, 'error':'problem opening file='+fn+' ('+format(e)+')'}
# Read from Internet
try:
while True:
s=f.read(chunk)
if not s: break
fo.write(s)
size+=len(s)
if o=='con' and (time.time()-t)>3:
speed='%.1d' % (size/(1000*(time.time()-t0)))
ck.out(' Downloaded '+str(int(size/1000))+' KB ('+speed+' KB/sec.) ...')
t=time.time()
f.close()
except Exception as e:
return {'return':1, 'error':'Failed downlading CK archive ('+format(e)+')'}
fo.close()
zp=fn
# Unzip if zip
if zp!='':
if o=='con':
ck.out(' Extracting to '+p+' ...')
import zipfile
f=open(zp,'rb')
z=zipfile.ZipFile(f)
# First, try to find .ckr.json
xprefix=''
for dx in z.namelist():
if pr!='' and dx.startswith(pr): dx=dx[len(pr):]
if dx.endswith(ck.cfg['repo_file']):
xprefix=dx[:-len(ck.cfg['repo_file'])]
break
# Second, extract files
for dx in z.namelist():
dx1=dx
if pr!=''and dx1.startswith(pr): dx1=dx1[len(pr):]
if xprefix!='' and dx1.startswith(xprefix): dx1=dx1[len(xprefix):]
if dx1!='':
pp=os.path.join(p,dx1)
if dx.endswith('/'):
# create directory
if not os.path.exists(pp): os.makedirs(pp)
else:
# extract file
ppd=os.path.dirname(pp)
if not os.path.exists(ppd): os.makedirs(ppd)
if os.path.isfile(pp) and overwrite!='yes':
if o=='con':
ck.out('File '+dx+' already exists in the entry - skipping ...')
else:
fo=open(pp, 'wb')
fo.write(z.read(dx))
fo.close()
f.close()
if rm_zip:
os.remove(zp)
return {'return':0} | [
"def",
"get_and_unzip_archive",
"(",
"i",
")",
":",
"o",
"=",
"i",
".",
"get",
"(",
"'out'",
",",
"''",
")",
"zp",
"=",
"i",
"[",
"'zip'",
"]",
"p",
"=",
"i",
"[",
"'path'",
"]",
"pr",
"=",
"i",
".",
"get",
"(",
"'path_to_remove'",
",",
"''",
")",
"overwrite",
"=",
"i",
".",
"get",
"(",
"'overwrite'",
",",
"''",
")",
"# If zip, get (download) and unzip file ...",
"rm_zip",
"=",
"False",
"if",
"zp",
".",
"find",
"(",
"'://'",
")",
">=",
"0",
":",
"if",
"o",
"==",
"'con'",
":",
"ck",
".",
"out",
"(",
"'Downloading CK archive ('",
"+",
"zp",
"+",
"') - it may take some time ...'",
")",
"rm_zip",
"=",
"True",
"# Generate tmp file",
"import",
"tempfile",
"fd",
",",
"fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.tmp'",
",",
"prefix",
"=",
"'ck-'",
")",
"# suffix is important - CK will delete such file!",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"remove",
"(",
"fn",
")",
"# Import modules compatible with Python 2.x and 3.x",
"import",
"urllib",
"try",
":",
"import",
"urllib",
".",
"request",
"as",
"urllib2",
"except",
":",
"import",
"urllib2",
"# Prepare request",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"zp",
")",
"# Connect",
"try",
":",
"f",
"=",
"urllib2",
".",
"urlopen",
"(",
"request",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'Failed downloading CK archive ('",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"import",
"time",
"t",
"=",
"time",
".",
"time",
"(",
")",
"t0",
"=",
"t",
"chunk",
"=",
"32767",
"size",
"=",
"0",
"try",
":",
"fo",
"=",
"open",
"(",
"fn",
",",
"'wb'",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'problem opening file='",
"+",
"fn",
"+",
"' ('",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"# Read from Internet",
"try",
":",
"while",
"True",
":",
"s",
"=",
"f",
".",
"read",
"(",
"chunk",
")",
"if",
"not",
"s",
":",
"break",
"fo",
".",
"write",
"(",
"s",
")",
"size",
"+=",
"len",
"(",
"s",
")",
"if",
"o",
"==",
"'con'",
"and",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t",
")",
">",
"3",
":",
"speed",
"=",
"'%.1d'",
"%",
"(",
"size",
"/",
"(",
"1000",
"*",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t0",
")",
")",
")",
"ck",
".",
"out",
"(",
"' Downloaded '",
"+",
"str",
"(",
"int",
"(",
"size",
"/",
"1000",
")",
")",
"+",
"' KB ('",
"+",
"speed",
"+",
"' KB/sec.) ...'",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"f",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"{",
"'return'",
":",
"1",
",",
"'error'",
":",
"'Failed downlading CK archive ('",
"+",
"format",
"(",
"e",
")",
"+",
"')'",
"}",
"fo",
".",
"close",
"(",
")",
"zp",
"=",
"fn",
"# Unzip if zip",
"if",
"zp",
"!=",
"''",
":",
"if",
"o",
"==",
"'con'",
":",
"ck",
".",
"out",
"(",
"' Extracting to '",
"+",
"p",
"+",
"' ...'",
")",
"import",
"zipfile",
"f",
"=",
"open",
"(",
"zp",
",",
"'rb'",
")",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"f",
")",
"# First, try to find .ckr.json",
"xprefix",
"=",
"''",
"for",
"dx",
"in",
"z",
".",
"namelist",
"(",
")",
":",
"if",
"pr",
"!=",
"''",
"and",
"dx",
".",
"startswith",
"(",
"pr",
")",
":",
"dx",
"=",
"dx",
"[",
"len",
"(",
"pr",
")",
":",
"]",
"if",
"dx",
".",
"endswith",
"(",
"ck",
".",
"cfg",
"[",
"'repo_file'",
"]",
")",
":",
"xprefix",
"=",
"dx",
"[",
":",
"-",
"len",
"(",
"ck",
".",
"cfg",
"[",
"'repo_file'",
"]",
")",
"]",
"break",
"# Second, extract files",
"for",
"dx",
"in",
"z",
".",
"namelist",
"(",
")",
":",
"dx1",
"=",
"dx",
"if",
"pr",
"!=",
"''",
"and",
"dx1",
".",
"startswith",
"(",
"pr",
")",
":",
"dx1",
"=",
"dx1",
"[",
"len",
"(",
"pr",
")",
":",
"]",
"if",
"xprefix",
"!=",
"''",
"and",
"dx1",
".",
"startswith",
"(",
"xprefix",
")",
":",
"dx1",
"=",
"dx1",
"[",
"len",
"(",
"xprefix",
")",
":",
"]",
"if",
"dx1",
"!=",
"''",
":",
"pp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"dx1",
")",
"if",
"dx",
".",
"endswith",
"(",
"'/'",
")",
":",
"# create directory ",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"pp",
")",
":",
"os",
".",
"makedirs",
"(",
"pp",
")",
"else",
":",
"# extract file",
"ppd",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"pp",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"ppd",
")",
":",
"os",
".",
"makedirs",
"(",
"ppd",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"pp",
")",
"and",
"overwrite",
"!=",
"'yes'",
":",
"if",
"o",
"==",
"'con'",
":",
"ck",
".",
"out",
"(",
"'File '",
"+",
"dx",
"+",
"' already exists in the entry - skipping ...'",
")",
"else",
":",
"fo",
"=",
"open",
"(",
"pp",
",",
"'wb'",
")",
"fo",
".",
"write",
"(",
"z",
".",
"read",
"(",
"dx",
")",
")",
"fo",
".",
"close",
"(",
")",
"f",
".",
"close",
"(",
")",
"if",
"rm_zip",
":",
"os",
".",
"remove",
"(",
"zp",
")",
"return",
"{",
"'return'",
":",
"0",
"}"
]
| Input: {
zip - zip filename or URL
path - path to extract
(overwrite) - if 'yes', overwrite files when unarchiving
(path_to_remove) - if !='', remove this part of the path from extracted archive
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} | [
"Input",
":",
"{",
"zip",
"-",
"zip",
"filename",
"or",
"URL",
"path",
"-",
"path",
"to",
"extract",
"(",
"overwrite",
")",
"-",
"if",
"yes",
"overwrite",
"files",
"when",
"unarchiving",
"(",
"path_to_remove",
")",
"-",
"if",
"!",
"=",
"remove",
"this",
"part",
"of",
"the",
"path",
"from",
"extracted",
"archive",
"}"
]
| python | train |
fabioz/PyDev.Debugger | third_party/pep8/lib2to3/lib2to3/pytree.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L513-L521 | def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results) | [
"def",
"match_seq",
"(",
"self",
",",
"nodes",
",",
"results",
"=",
"None",
")",
":",
"if",
"len",
"(",
"nodes",
")",
"!=",
"1",
":",
"return",
"False",
"return",
"self",
".",
"match",
"(",
"nodes",
"[",
"0",
"]",
",",
"results",
")"
]
| Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns. | [
"Does",
"this",
"pattern",
"exactly",
"match",
"a",
"sequence",
"of",
"nodes?"
]
| python | train |
shazow/unstdlib.py | unstdlib/standard/datetime_.py | https://github.com/shazow/unstdlib.py/blob/e0632fe165cfbfdb5a7e4bc7b412c9d6f2ebad83/unstdlib/standard/datetime_.py#L87-L100 | def to_timezone(dt, timezone):
"""
Return an aware datetime which is ``dt`` converted to ``timezone``.
If ``dt`` is naive, it is assumed to be UTC.
For example, if ``dt`` is "06:00 UTC+0000" and ``timezone`` is "EDT-0400",
then the result will be "02:00 EDT-0400".
This method follows the guidelines in http://pytz.sourceforge.net/
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_UTC)
return timezone.normalize(dt.astimezone(timezone)) | [
"def",
"to_timezone",
"(",
"dt",
",",
"timezone",
")",
":",
"if",
"dt",
".",
"tzinfo",
"is",
"None",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"_UTC",
")",
"return",
"timezone",
".",
"normalize",
"(",
"dt",
".",
"astimezone",
"(",
"timezone",
")",
")"
]
| Return an aware datetime which is ``dt`` converted to ``timezone``.
If ``dt`` is naive, it is assumed to be UTC.
For example, if ``dt`` is "06:00 UTC+0000" and ``timezone`` is "EDT-0400",
then the result will be "02:00 EDT-0400".
This method follows the guidelines in http://pytz.sourceforge.net/ | [
"Return",
"an",
"aware",
"datetime",
"which",
"is",
"dt",
"converted",
"to",
"timezone",
"."
]
| python | train |
fhs/pyhdf | pyhdf/SD.py | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1651-L1689 | def attributes(self, full=0):
"""Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of global attributes.
nsds, natts = self.info()
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res | [
"def",
"attributes",
"(",
"self",
",",
"full",
"=",
"0",
")",
":",
"# Get the number of global attributes.",
"nsds",
",",
"natts",
"=",
"self",
".",
"info",
"(",
")",
"# Inquire each attribute",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"natts",
")",
":",
"a",
"=",
"self",
".",
"attr",
"(",
"n",
")",
"name",
",",
"aType",
",",
"nVal",
"=",
"a",
".",
"info",
"(",
")",
"if",
"full",
":",
"res",
"[",
"name",
"]",
"=",
"(",
"a",
".",
"get",
"(",
")",
",",
"a",
".",
"index",
"(",
")",
",",
"aType",
",",
"nVal",
")",
"else",
":",
"res",
"[",
"name",
"]",
"=",
"a",
".",
"get",
"(",
")",
"return",
"res"
]
| Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"every",
"global",
"attribute",
"attached",
"to",
"the",
"SD",
"interface",
"."
]
| python | train |
unbservices/clams | clams/__init__.py | https://github.com/unbservices/clams/blob/2ae0a36eb8f82a153d27f74ef37688f976952789/clams/__init__.py#L381-L398 | def _init(self, parser):
"""Initialize/Build the ``argparse.ArgumentParser`` and subparsers.
This internal version of ``init`` is used to ensure that all
subcommands have a properly initialized parser.
Args
----
parser : argparse.ArgumentParser
The parser for this command.
"""
assert isinstance(parser, argparse.ArgumentParser)
self._init_parser(parser)
self._attach_arguments()
self._attach_subcommands()
self.initialized = True | [
"def",
"_init",
"(",
"self",
",",
"parser",
")",
":",
"assert",
"isinstance",
"(",
"parser",
",",
"argparse",
".",
"ArgumentParser",
")",
"self",
".",
"_init_parser",
"(",
"parser",
")",
"self",
".",
"_attach_arguments",
"(",
")",
"self",
".",
"_attach_subcommands",
"(",
")",
"self",
".",
"initialized",
"=",
"True"
]
| Initialize/Build the ``argparse.ArgumentParser`` and subparsers.
This internal version of ``init`` is used to ensure that all
subcommands have a properly initialized parser.
Args
----
parser : argparse.ArgumentParser
The parser for this command. | [
"Initialize",
"/",
"Build",
"the",
"argparse",
".",
"ArgumentParser",
"and",
"subparsers",
"."
]
| python | train |
edx/edx-val | edxval/models.py | https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/models.py#L129-L138 | def get_or_none(cls, **filter_kwargs):
"""
Returns a video or None.
"""
try:
video = cls.objects.get(**filter_kwargs)
except cls.DoesNotExist:
video = None
return video | [
"def",
"get_or_none",
"(",
"cls",
",",
"*",
"*",
"filter_kwargs",
")",
":",
"try",
":",
"video",
"=",
"cls",
".",
"objects",
".",
"get",
"(",
"*",
"*",
"filter_kwargs",
")",
"except",
"cls",
".",
"DoesNotExist",
":",
"video",
"=",
"None",
"return",
"video"
]
| Returns a video or None. | [
"Returns",
"a",
"video",
"or",
"None",
"."
]
| python | train |
lxc/python2-lxc | lxc/__init__.py | https://github.com/lxc/python2-lxc/blob/b7ec757d2bea1e5787c3e65b1359b8893491ef90/lxc/__init__.py#L364-L413 | def set_config_item(self, key, value):
"""
Set a config key to a provided value.
The value can be a list for the keys supporting multiple values.
"""
try:
old_value = self.get_config_item(key)
except KeyError:
old_value = None
# Get everything to unicode with python2
if isinstance(value, str):
value = value.decode()
elif isinstance(value, list):
for i in range(len(value)):
if isinstance(value[i], str):
value[i] = value[i].decode()
# Check if it's a list
def set_key(key, value):
self.clear_config_item(key)
if isinstance(value, list):
for entry in value:
if not _lxc.Container.set_config_item(self, key, entry):
return False
else:
_lxc.Container.set_config_item(self, key, value)
set_key(key, value)
new_value = self.get_config_item(key)
# loglevel is special and won't match the string we set
if key == "lxc.loglevel":
new_value = value
if (isinstance(value, unicode) and isinstance(new_value, unicode) and
value == new_value):
return True
elif (isinstance(value, list) and isinstance(new_value, list) and
set(value) == set(new_value)):
return True
elif (isinstance(value, unicode) and isinstance(new_value, list) and
set([value]) == set(new_value)):
return True
elif old_value:
set_key(key, old_value)
return False
else:
self.clear_config_item(key)
return False | [
"def",
"set_config_item",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"try",
":",
"old_value",
"=",
"self",
".",
"get_config_item",
"(",
"key",
")",
"except",
"KeyError",
":",
"old_value",
"=",
"None",
"# Get everything to unicode with python2",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"value",
".",
"decode",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"value",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
"[",
"i",
"]",
",",
"str",
")",
":",
"value",
"[",
"i",
"]",
"=",
"value",
"[",
"i",
"]",
".",
"decode",
"(",
")",
"# Check if it's a list",
"def",
"set_key",
"(",
"key",
",",
"value",
")",
":",
"self",
".",
"clear_config_item",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"entry",
"in",
"value",
":",
"if",
"not",
"_lxc",
".",
"Container",
".",
"set_config_item",
"(",
"self",
",",
"key",
",",
"entry",
")",
":",
"return",
"False",
"else",
":",
"_lxc",
".",
"Container",
".",
"set_config_item",
"(",
"self",
",",
"key",
",",
"value",
")",
"set_key",
"(",
"key",
",",
"value",
")",
"new_value",
"=",
"self",
".",
"get_config_item",
"(",
"key",
")",
"# loglevel is special and won't match the string we set",
"if",
"key",
"==",
"\"lxc.loglevel\"",
":",
"new_value",
"=",
"value",
"if",
"(",
"isinstance",
"(",
"value",
",",
"unicode",
")",
"and",
"isinstance",
"(",
"new_value",
",",
"unicode",
")",
"and",
"value",
"==",
"new_value",
")",
":",
"return",
"True",
"elif",
"(",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"isinstance",
"(",
"new_value",
",",
"list",
")",
"and",
"set",
"(",
"value",
")",
"==",
"set",
"(",
"new_value",
")",
")",
":",
"return",
"True",
"elif",
"(",
"isinstance",
"(",
"value",
",",
"unicode",
")",
"and",
"isinstance",
"(",
"new_value",
",",
"list",
")",
"and",
"set",
"(",
"[",
"value",
"]",
")",
"==",
"set",
"(",
"new_value",
")",
")",
":",
"return",
"True",
"elif",
"old_value",
":",
"set_key",
"(",
"key",
",",
"old_value",
")",
"return",
"False",
"else",
":",
"self",
".",
"clear_config_item",
"(",
"key",
")",
"return",
"False"
]
| Set a config key to a provided value.
The value can be a list for the keys supporting multiple values. | [
"Set",
"a",
"config",
"key",
"to",
"a",
"provided",
"value",
".",
"The",
"value",
"can",
"be",
"a",
"list",
"for",
"the",
"keys",
"supporting",
"multiple",
"values",
"."
]
| python | train |
voidpp/python-tools | voidpp_tools/daemon.py | https://github.com/voidpp/python-tools/blob/0fc7460c827b02d8914411cedddadc23ccb3cc73/voidpp_tools/daemon.py#L118-L144 | def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = "Pidfile %s does not exist. Daemon not running?" % self.pidfile
self.logger.error(message)
return message # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
self.logger.error(err)
sys.exit(1)
return 'Daemon is stopped' | [
"def",
"stop",
"(",
"self",
")",
":",
"# Get the pid from the pidfile",
"pid",
"=",
"self",
".",
"get_pid",
"(",
")",
"if",
"not",
"pid",
":",
"message",
"=",
"\"Pidfile %s does not exist. Daemon not running?\"",
"%",
"self",
".",
"pidfile",
"self",
".",
"logger",
".",
"error",
"(",
"message",
")",
"return",
"message",
"# not an error in a restart",
"# Try killing the daemon process",
"try",
":",
"while",
"1",
":",
"os",
".",
"kill",
"(",
"pid",
",",
"SIGTERM",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"except",
"OSError",
"as",
"err",
":",
"err",
"=",
"str",
"(",
"err",
")",
"if",
"err",
".",
"find",
"(",
"\"No such process\"",
")",
">",
"0",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"pidfile",
")",
":",
"os",
".",
"remove",
"(",
"self",
".",
"pidfile",
")",
"else",
":",
"self",
".",
"logger",
".",
"error",
"(",
"err",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"'Daemon is stopped'"
]
| Stop the daemon | [
"Stop",
"the",
"daemon"
]
| python | train |
pypa/pipenv | pipenv/vendor/delegator.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/delegator.py#L266-L287 | def pipe(self, command, timeout=None, cwd=None):
"""Runs the current command and passes its output to the next
given process.
"""
if not timeout:
timeout = self.timeout
if not self.was_run:
self.run(block=False, cwd=cwd)
data = self.out
if timeout:
c = Command(command, timeout)
else:
c = Command(command)
c.run(block=False, cwd=cwd)
if data:
c.send(data)
c.block()
return c | [
"def",
"pipe",
"(",
"self",
",",
"command",
",",
"timeout",
"=",
"None",
",",
"cwd",
"=",
"None",
")",
":",
"if",
"not",
"timeout",
":",
"timeout",
"=",
"self",
".",
"timeout",
"if",
"not",
"self",
".",
"was_run",
":",
"self",
".",
"run",
"(",
"block",
"=",
"False",
",",
"cwd",
"=",
"cwd",
")",
"data",
"=",
"self",
".",
"out",
"if",
"timeout",
":",
"c",
"=",
"Command",
"(",
"command",
",",
"timeout",
")",
"else",
":",
"c",
"=",
"Command",
"(",
"command",
")",
"c",
".",
"run",
"(",
"block",
"=",
"False",
",",
"cwd",
"=",
"cwd",
")",
"if",
"data",
":",
"c",
".",
"send",
"(",
"data",
")",
"c",
".",
"block",
"(",
")",
"return",
"c"
]
| Runs the current command and passes its output to the next
given process. | [
"Runs",
"the",
"current",
"command",
"and",
"passes",
"its",
"output",
"to",
"the",
"next",
"given",
"process",
"."
]
| python | train |
iotile/coretools | iotilecore/iotile/core/utilities/intelhex/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/intelhex/__init__.py#L289-L295 | def frombytes(self, bytes, offset=0):
"""Load data from array or list of bytes.
Similar to loadbin() method but works directly with iterable bytes.
"""
for b in bytes:
self._buf[offset] = b
offset += 1 | [
"def",
"frombytes",
"(",
"self",
",",
"bytes",
",",
"offset",
"=",
"0",
")",
":",
"for",
"b",
"in",
"bytes",
":",
"self",
".",
"_buf",
"[",
"offset",
"]",
"=",
"b",
"offset",
"+=",
"1"
]
| Load data from array or list of bytes.
Similar to loadbin() method but works directly with iterable bytes. | [
"Load",
"data",
"from",
"array",
"or",
"list",
"of",
"bytes",
".",
"Similar",
"to",
"loadbin",
"()",
"method",
"but",
"works",
"directly",
"with",
"iterable",
"bytes",
"."
]
| python | train |
google/grr | grr/server/grr_response_server/databases/mysql.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql.py#L138-L146 | def _CheckDatabaseEncoding(cursor):
"""Enforces a sane UTF-8 encoding for the database."""
cur_character_set = _ReadVariable("character_set_database", cursor)
if cur_character_set != CHARACTER_SET:
raise EncodingEnforcementError(
"Require MySQL character_set_database of {}, got {}."
" To create your database, use: {}".format(CHARACTER_SET,
cur_character_set,
CREATE_DATABASE_QUERY)) | [
"def",
"_CheckDatabaseEncoding",
"(",
"cursor",
")",
":",
"cur_character_set",
"=",
"_ReadVariable",
"(",
"\"character_set_database\"",
",",
"cursor",
")",
"if",
"cur_character_set",
"!=",
"CHARACTER_SET",
":",
"raise",
"EncodingEnforcementError",
"(",
"\"Require MySQL character_set_database of {}, got {}.\"",
"\" To create your database, use: {}\"",
".",
"format",
"(",
"CHARACTER_SET",
",",
"cur_character_set",
",",
"CREATE_DATABASE_QUERY",
")",
")"
]
| Enforces a sane UTF-8 encoding for the database. | [
"Enforces",
"a",
"sane",
"UTF",
"-",
"8",
"encoding",
"for",
"the",
"database",
"."
]
| python | train |
DataDog/integrations-core | mcache/datadog_checks/mcache/mcache.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/mcache/datadog_checks/mcache/mcache.py#L112-L123 | def _process_response(cls, response):
"""
Examine the response and raise an error is something is off
"""
if len(response) != 1:
raise BadResponseError("Malformed response: {}".format(response))
stats = list(itervalues(response))[0]
if not len(stats):
raise BadResponseError("Malformed response for host: {}".format(stats))
return stats | [
"def",
"_process_response",
"(",
"cls",
",",
"response",
")",
":",
"if",
"len",
"(",
"response",
")",
"!=",
"1",
":",
"raise",
"BadResponseError",
"(",
"\"Malformed response: {}\"",
".",
"format",
"(",
"response",
")",
")",
"stats",
"=",
"list",
"(",
"itervalues",
"(",
"response",
")",
")",
"[",
"0",
"]",
"if",
"not",
"len",
"(",
"stats",
")",
":",
"raise",
"BadResponseError",
"(",
"\"Malformed response for host: {}\"",
".",
"format",
"(",
"stats",
")",
")",
"return",
"stats"
]
| Examine the response and raise an error is something is off | [
"Examine",
"the",
"response",
"and",
"raise",
"an",
"error",
"is",
"something",
"is",
"off"
]
| python | train |
yv/pathconfig | py_src/pathconfig/factory.py | https://github.com/yv/pathconfig/blob/ae13901773b8465061e2aa93b2a53fd436ab6c69/py_src/pathconfig/factory.py#L132-L143 | def open_by_pat(self, name, mode='r', **kwargs):
'''
opens the file for the pattern given by *name*,
substituting the object's properties and the
additional keyword arguments given.
'''
fname = self.fname_by_pat(name, **kwargs)
if mode == 'w':
print >>sys.stderr, "Write[%s]: %s" % (name, fname)
else:
print >>sys.stderr, "Open[%s]: %s" % (name, fname)
return file(fname, mode) | [
"def",
"open_by_pat",
"(",
"self",
",",
"name",
",",
"mode",
"=",
"'r'",
",",
"*",
"*",
"kwargs",
")",
":",
"fname",
"=",
"self",
".",
"fname_by_pat",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
"if",
"mode",
"==",
"'w'",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Write[%s]: %s\"",
"%",
"(",
"name",
",",
"fname",
")",
"else",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Open[%s]: %s\"",
"%",
"(",
"name",
",",
"fname",
")",
"return",
"file",
"(",
"fname",
",",
"mode",
")"
]
| opens the file for the pattern given by *name*,
substituting the object's properties and the
additional keyword arguments given. | [
"opens",
"the",
"file",
"for",
"the",
"pattern",
"given",
"by",
"*",
"name",
"*",
"substituting",
"the",
"object",
"s",
"properties",
"and",
"the",
"additional",
"keyword",
"arguments",
"given",
"."
]
| python | train |
GoogleCloudPlatform/appengine-pipelines | python/src/pipeline/pipeline.py | https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2803-L2867 | def run_callback(self):
"""Runs the callback for the pipeline specified in the request.
Raises:
_CallbackTaskError if something was wrong with the request parameters.
"""
pipeline_id = self.request.get('pipeline_id')
if not pipeline_id:
raise _CallbackTaskError('"pipeline_id" parameter missing.')
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
raise _CallbackTaskError(
'Pipeline ID "%s" for callback does not exist.' % pipeline_id)
params = pipeline_record.params
real_class_path = params['class_path']
try:
pipeline_func_class = mr_util.for_name(real_class_path)
except ImportError, e:
raise _CallbackTaskError(
'Cannot load class named "%s" for pipeline ID "%s".'
% (real_class_path, pipeline_id))
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
if pipeline_func_class.public_callbacks:
pass
elif pipeline_func_class.admin_callbacks:
if not users.is_current_user_admin():
raise _CallbackTaskError(
'Unauthorized callback for admin-only pipeline ID "%s"'
% pipeline_id)
else:
raise _CallbackTaskError(
'External callback for internal-only pipeline ID "%s"'
% pipeline_id)
kwargs = {}
for key in self.request.arguments():
if key != 'pipeline_id':
kwargs[str(key)] = self.request.get(key)
def perform_callback():
stage = pipeline_func_class.from_id(pipeline_id)
if stage is None:
raise _CallbackTaskError(
'Pipeline ID "%s" deleted during callback' % pipeline_id)
return stage._callback_internal(kwargs)
# callback_xg_transaction is a 3-valued setting (None=no trans,
# False=1-eg-trans, True=xg-trans)
if pipeline_func_class._callback_xg_transaction is not None:
transaction_options = db.create_transaction_options(
xg=pipeline_func_class._callback_xg_transaction)
callback_result = db.run_in_transaction_options(transaction_options,
perform_callback)
else:
callback_result = perform_callback()
if callback_result is not None:
status_code, content_type, content = callback_result
self.response.set_status(status_code)
self.response.headers['Content-Type'] = content_type
self.response.out.write(content) | [
"def",
"run_callback",
"(",
"self",
")",
":",
"pipeline_id",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'pipeline_id'",
")",
"if",
"not",
"pipeline_id",
":",
"raise",
"_CallbackTaskError",
"(",
"'\"pipeline_id\" parameter missing.'",
")",
"pipeline_key",
"=",
"db",
".",
"Key",
".",
"from_path",
"(",
"_PipelineRecord",
".",
"kind",
"(",
")",
",",
"pipeline_id",
")",
"pipeline_record",
"=",
"db",
".",
"get",
"(",
"pipeline_key",
")",
"if",
"pipeline_record",
"is",
"None",
":",
"raise",
"_CallbackTaskError",
"(",
"'Pipeline ID \"%s\" for callback does not exist.'",
"%",
"pipeline_id",
")",
"params",
"=",
"pipeline_record",
".",
"params",
"real_class_path",
"=",
"params",
"[",
"'class_path'",
"]",
"try",
":",
"pipeline_func_class",
"=",
"mr_util",
".",
"for_name",
"(",
"real_class_path",
")",
"except",
"ImportError",
",",
"e",
":",
"raise",
"_CallbackTaskError",
"(",
"'Cannot load class named \"%s\" for pipeline ID \"%s\".'",
"%",
"(",
"real_class_path",
",",
"pipeline_id",
")",
")",
"if",
"'HTTP_X_APPENGINE_TASKNAME'",
"not",
"in",
"self",
".",
"request",
".",
"environ",
":",
"if",
"pipeline_func_class",
".",
"public_callbacks",
":",
"pass",
"elif",
"pipeline_func_class",
".",
"admin_callbacks",
":",
"if",
"not",
"users",
".",
"is_current_user_admin",
"(",
")",
":",
"raise",
"_CallbackTaskError",
"(",
"'Unauthorized callback for admin-only pipeline ID \"%s\"'",
"%",
"pipeline_id",
")",
"else",
":",
"raise",
"_CallbackTaskError",
"(",
"'External callback for internal-only pipeline ID \"%s\"'",
"%",
"pipeline_id",
")",
"kwargs",
"=",
"{",
"}",
"for",
"key",
"in",
"self",
".",
"request",
".",
"arguments",
"(",
")",
":",
"if",
"key",
"!=",
"'pipeline_id'",
":",
"kwargs",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"self",
".",
"request",
".",
"get",
"(",
"key",
")",
"def",
"perform_callback",
"(",
")",
":",
"stage",
"=",
"pipeline_func_class",
".",
"from_id",
"(",
"pipeline_id",
")",
"if",
"stage",
"is",
"None",
":",
"raise",
"_CallbackTaskError",
"(",
"'Pipeline ID \"%s\" deleted during callback'",
"%",
"pipeline_id",
")",
"return",
"stage",
".",
"_callback_internal",
"(",
"kwargs",
")",
"# callback_xg_transaction is a 3-valued setting (None=no trans,",
"# False=1-eg-trans, True=xg-trans)",
"if",
"pipeline_func_class",
".",
"_callback_xg_transaction",
"is",
"not",
"None",
":",
"transaction_options",
"=",
"db",
".",
"create_transaction_options",
"(",
"xg",
"=",
"pipeline_func_class",
".",
"_callback_xg_transaction",
")",
"callback_result",
"=",
"db",
".",
"run_in_transaction_options",
"(",
"transaction_options",
",",
"perform_callback",
")",
"else",
":",
"callback_result",
"=",
"perform_callback",
"(",
")",
"if",
"callback_result",
"is",
"not",
"None",
":",
"status_code",
",",
"content_type",
",",
"content",
"=",
"callback_result",
"self",
".",
"response",
".",
"set_status",
"(",
"status_code",
")",
"self",
".",
"response",
".",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"content_type",
"self",
".",
"response",
".",
"out",
".",
"write",
"(",
"content",
")"
]
| Runs the callback for the pipeline specified in the request.
Raises:
_CallbackTaskError if something was wrong with the request parameters. | [
"Runs",
"the",
"callback",
"for",
"the",
"pipeline",
"specified",
"in",
"the",
"request",
"."
]
| python | train |
pymacaron/pymacaron-core | pymacaron_core/swagger/spec.py | https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L142-L213 | def call_on_each_endpoint(self, callback):
"""Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument.
"""
if 'paths' not in self.swagger_dict:
return
for path, d in list(self.swagger_dict['paths'].items()):
for method, op_spec in list(d.items()):
data = EndpointData(path, method)
# Which server method handles this endpoint?
if 'x-bind-server' not in op_spec:
if 'x-no-bind-server' in op_spec:
# That route should not be auto-generated
log.info("Skipping generation of %s %s" % (method, path))
continue
else:
raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path))
data.handler_server = op_spec['x-bind-server']
# Make sure that endpoint only produces 'application/json'
if 'produces' not in op_spec:
raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path))
if len(op_spec['produces']) != 1:
raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path))
if op_spec['produces'][0] == 'application/json':
data.produces_json = True
elif op_spec['produces'][0] == 'text/html':
data.produces_html = True
else:
raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path))
# Which client method handles this endpoint?
if 'x-bind-client' in op_spec:
data.handler_client = op_spec['x-bind-client']
# Should we decorate the server handler?
if 'x-decorate-server' in op_spec:
data.decorate_server = op_spec['x-decorate-server']
# Should we manipulate the requests parameters?
if 'x-decorate-request' in op_spec:
data.decorate_request = op_spec['x-decorate-request']
# Generate a bravado-core operation object
data.operation = Operation.from_spec(self.spec, path, method, op_spec)
# Figure out how parameters are passed: one json in body? one or
# more values in query?
if 'parameters' in op_spec:
params = op_spec['parameters']
for p in params:
if p['in'] == 'body':
data.param_in_body = True
if p['in'] == 'query':
data.param_in_query = True
if p['in'] == 'path':
data.param_in_path = True
if data.param_in_path:
# Substitute {...} with <...> in path, to make a Flask friendly path
data.path = data.path.replace('{', '<').replace('}', '>')
if data.param_in_body and data.param_in_query:
raise Exception("Cannot support params in both body and param (%s %s)" % (method, path))
else:
data.no_params = True
callback(data) | [
"def",
"call_on_each_endpoint",
"(",
"self",
",",
"callback",
")",
":",
"if",
"'paths'",
"not",
"in",
"self",
".",
"swagger_dict",
":",
"return",
"for",
"path",
",",
"d",
"in",
"list",
"(",
"self",
".",
"swagger_dict",
"[",
"'paths'",
"]",
".",
"items",
"(",
")",
")",
":",
"for",
"method",
",",
"op_spec",
"in",
"list",
"(",
"d",
".",
"items",
"(",
")",
")",
":",
"data",
"=",
"EndpointData",
"(",
"path",
",",
"method",
")",
"# Which server method handles this endpoint?",
"if",
"'x-bind-server'",
"not",
"in",
"op_spec",
":",
"if",
"'x-no-bind-server'",
"in",
"op_spec",
":",
"# That route should not be auto-generated",
"log",
".",
"info",
"(",
"\"Skipping generation of %s %s\"",
"%",
"(",
"method",
",",
"path",
")",
")",
"continue",
"else",
":",
"raise",
"Exception",
"(",
"\"Swagger api defines no x-bind-server for %s %s\"",
"%",
"(",
"method",
",",
"path",
")",
")",
"data",
".",
"handler_server",
"=",
"op_spec",
"[",
"'x-bind-server'",
"]",
"# Make sure that endpoint only produces 'application/json'",
"if",
"'produces'",
"not",
"in",
"op_spec",
":",
"raise",
"Exception",
"(",
"\"Swagger api has no 'produces' section for %s %s\"",
"%",
"(",
"method",
",",
"path",
")",
")",
"if",
"len",
"(",
"op_spec",
"[",
"'produces'",
"]",
")",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"\"Expecting only one type under 'produces' for %s %s\"",
"%",
"(",
"method",
",",
"path",
")",
")",
"if",
"op_spec",
"[",
"'produces'",
"]",
"[",
"0",
"]",
"==",
"'application/json'",
":",
"data",
".",
"produces_json",
"=",
"True",
"elif",
"op_spec",
"[",
"'produces'",
"]",
"[",
"0",
"]",
"==",
"'text/html'",
":",
"data",
".",
"produces_html",
"=",
"True",
"else",
":",
"raise",
"Exception",
"(",
"\"Only 'application/json' or 'text/html' are supported. See %s %s\"",
"%",
"(",
"method",
",",
"path",
")",
")",
"# Which client method handles this endpoint?",
"if",
"'x-bind-client'",
"in",
"op_spec",
":",
"data",
".",
"handler_client",
"=",
"op_spec",
"[",
"'x-bind-client'",
"]",
"# Should we decorate the server handler?",
"if",
"'x-decorate-server'",
"in",
"op_spec",
":",
"data",
".",
"decorate_server",
"=",
"op_spec",
"[",
"'x-decorate-server'",
"]",
"# Should we manipulate the requests parameters?",
"if",
"'x-decorate-request'",
"in",
"op_spec",
":",
"data",
".",
"decorate_request",
"=",
"op_spec",
"[",
"'x-decorate-request'",
"]",
"# Generate a bravado-core operation object",
"data",
".",
"operation",
"=",
"Operation",
".",
"from_spec",
"(",
"self",
".",
"spec",
",",
"path",
",",
"method",
",",
"op_spec",
")",
"# Figure out how parameters are passed: one json in body? one or",
"# more values in query?",
"if",
"'parameters'",
"in",
"op_spec",
":",
"params",
"=",
"op_spec",
"[",
"'parameters'",
"]",
"for",
"p",
"in",
"params",
":",
"if",
"p",
"[",
"'in'",
"]",
"==",
"'body'",
":",
"data",
".",
"param_in_body",
"=",
"True",
"if",
"p",
"[",
"'in'",
"]",
"==",
"'query'",
":",
"data",
".",
"param_in_query",
"=",
"True",
"if",
"p",
"[",
"'in'",
"]",
"==",
"'path'",
":",
"data",
".",
"param_in_path",
"=",
"True",
"if",
"data",
".",
"param_in_path",
":",
"# Substitute {...} with <...> in path, to make a Flask friendly path",
"data",
".",
"path",
"=",
"data",
".",
"path",
".",
"replace",
"(",
"'{'",
",",
"'<'",
")",
".",
"replace",
"(",
"'}'",
",",
"'>'",
")",
"if",
"data",
".",
"param_in_body",
"and",
"data",
".",
"param_in_query",
":",
"raise",
"Exception",
"(",
"\"Cannot support params in both body and param (%s %s)\"",
"%",
"(",
"method",
",",
"path",
")",
")",
"else",
":",
"data",
".",
"no_params",
"=",
"True",
"callback",
"(",
"data",
")"
]
| Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument. | [
"Find",
"all",
"server",
"endpoints",
"defined",
"in",
"the",
"swagger",
"spec",
"and",
"calls",
"callback",
"for",
"each",
"with",
"an",
"instance",
"of",
"EndpointData",
"as",
"argument",
"."
]
| python | train |
CI-WATER/mapkit | mapkit/RasterLoader.py | https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/RasterLoader.py#L114-L178 | def grassAsciiRasterToWKB(cls, session, grassRasterPath, srid, noData=0):
"""
Load GRASS ASCII rasters directly using the makeSingleBandWKBRaster method. Do this to eliminate the raster2pgsql
dependency.
"""
# Constants
NUM_HEADER_LINES = 6
# Defaults
north = 0.0
east = 0.0
west = 0.0
rows = 0
columns = 0
if grassRasterPath is not None:
# If the path to the file is given, open the file and extract contents.
with open(grassRasterPath, 'r') as f:
rasterLines = f.readlines()
else:
print("RASTER LOAD ERROR: Must provide the path the raster.")
raise
# Extract the headers from the file and derive metadata
for line in rasterLines[0:NUM_HEADER_LINES]:
spline = line.split()
if 'north' in spline[0].lower():
north = float(spline[1])
elif 'east' in spline[0].lower():
east = float(spline[1])
elif 'west' in spline[0].lower():
west = float(spline[1])
elif 'rows' in spline[0].lower():
rows = int(spline[1])
elif 'cols' in spline[0].lower():
columns = int(spline[1])
# Define raster metadata from headers
width = columns
height = rows
upperLeftX = west
upperLeftY = north
cellSizeX = int(abs(west - east) / columns)
cellSizeY = -1 * cellSizeX
# Assemble the data array string
dataArrayList = []
for line in rasterLines[NUM_HEADER_LINES:len(rasterLines)]:
dataArrayList.append('[{0}]'.format(', '.join(line.split())))
dataArrayString = '[{0}]'.format(', '.join(dataArrayList))
# Create well known binary raster
wellKnownBinary = cls.makeSingleBandWKBRaster(session=session,
width=width, height=height,
upperLeftX=upperLeftX, upperLeftY=upperLeftY,
cellSizeX=cellSizeX, cellSizeY=cellSizeY,
skewX=0, skewY=0,
srid=srid,
dataArray=dataArrayString,
noDataValue=noData)
return wellKnownBinary | [
"def",
"grassAsciiRasterToWKB",
"(",
"cls",
",",
"session",
",",
"grassRasterPath",
",",
"srid",
",",
"noData",
"=",
"0",
")",
":",
"# Constants",
"NUM_HEADER_LINES",
"=",
"6",
"# Defaults",
"north",
"=",
"0.0",
"east",
"=",
"0.0",
"west",
"=",
"0.0",
"rows",
"=",
"0",
"columns",
"=",
"0",
"if",
"grassRasterPath",
"is",
"not",
"None",
":",
"# If the path to the file is given, open the file and extract contents.",
"with",
"open",
"(",
"grassRasterPath",
",",
"'r'",
")",
"as",
"f",
":",
"rasterLines",
"=",
"f",
".",
"readlines",
"(",
")",
"else",
":",
"print",
"(",
"\"RASTER LOAD ERROR: Must provide the path the raster.\"",
")",
"raise",
"# Extract the headers from the file and derive metadata",
"for",
"line",
"in",
"rasterLines",
"[",
"0",
":",
"NUM_HEADER_LINES",
"]",
":",
"spline",
"=",
"line",
".",
"split",
"(",
")",
"if",
"'north'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"north",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'east'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"east",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'west'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"west",
"=",
"float",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'rows'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"rows",
"=",
"int",
"(",
"spline",
"[",
"1",
"]",
")",
"elif",
"'cols'",
"in",
"spline",
"[",
"0",
"]",
".",
"lower",
"(",
")",
":",
"columns",
"=",
"int",
"(",
"spline",
"[",
"1",
"]",
")",
"# Define raster metadata from headers",
"width",
"=",
"columns",
"height",
"=",
"rows",
"upperLeftX",
"=",
"west",
"upperLeftY",
"=",
"north",
"cellSizeX",
"=",
"int",
"(",
"abs",
"(",
"west",
"-",
"east",
")",
"/",
"columns",
")",
"cellSizeY",
"=",
"-",
"1",
"*",
"cellSizeX",
"# Assemble the data array string",
"dataArrayList",
"=",
"[",
"]",
"for",
"line",
"in",
"rasterLines",
"[",
"NUM_HEADER_LINES",
":",
"len",
"(",
"rasterLines",
")",
"]",
":",
"dataArrayList",
".",
"append",
"(",
"'[{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"line",
".",
"split",
"(",
")",
")",
")",
")",
"dataArrayString",
"=",
"'[{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"dataArrayList",
")",
")",
"# Create well known binary raster",
"wellKnownBinary",
"=",
"cls",
".",
"makeSingleBandWKBRaster",
"(",
"session",
"=",
"session",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
",",
"upperLeftX",
"=",
"upperLeftX",
",",
"upperLeftY",
"=",
"upperLeftY",
",",
"cellSizeX",
"=",
"cellSizeX",
",",
"cellSizeY",
"=",
"cellSizeY",
",",
"skewX",
"=",
"0",
",",
"skewY",
"=",
"0",
",",
"srid",
"=",
"srid",
",",
"dataArray",
"=",
"dataArrayString",
",",
"noDataValue",
"=",
"noData",
")",
"return",
"wellKnownBinary"
]
| Load GRASS ASCII rasters directly using the makeSingleBandWKBRaster method. Do this to eliminate the raster2pgsql
dependency. | [
"Load",
"GRASS",
"ASCII",
"rasters",
"directly",
"using",
"the",
"makeSingleBandWKBRaster",
"method",
".",
"Do",
"this",
"to",
"eliminate",
"the",
"raster2pgsql",
"dependency",
"."
]
| python | train |
tensorflow/datasets | tensorflow_datasets/text/cnn_dailymail.py | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L163-L207 | def _get_art_abs(story_file):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract | [
"def",
"_get_art_abs",
"(",
"story_file",
")",
":",
"# Based on https://github.com/abisee/cnn-dailymail/blob/master/",
"# make_datafiles.py",
"lines",
"=",
"_read_text_file",
"(",
"story_file",
")",
"# Lowercase everything",
"lines",
"=",
"[",
"line",
".",
"lower",
"(",
")",
"for",
"line",
"in",
"lines",
"]",
"# Put periods on the ends of lines that are missing them",
"# (this is a problem in the dataset because many image captions don't end in",
"# periods; consequently they end up in the body of the article as run-on",
"# sentences)",
"def",
"fix_missing_period",
"(",
"line",
")",
":",
"\"\"\"Adds a period to a line that is missing a period.\"\"\"",
"if",
"'@highlight'",
"in",
"line",
":",
"return",
"line",
"if",
"not",
"line",
":",
"return",
"line",
"if",
"line",
"[",
"-",
"1",
"]",
"in",
"END_TOKENS",
":",
"return",
"line",
"return",
"line",
"+",
"' .'",
"lines",
"=",
"[",
"fix_missing_period",
"(",
"line",
")",
"for",
"line",
"in",
"lines",
"]",
"# Separate out article and abstract sentences",
"article_lines",
"=",
"[",
"]",
"highlights",
"=",
"[",
"]",
"next_is_highlight",
"=",
"False",
"for",
"line",
"in",
"lines",
":",
"if",
"not",
"line",
":",
"continue",
"# empty line",
"elif",
"line",
".",
"startswith",
"(",
"'@highlight'",
")",
":",
"next_is_highlight",
"=",
"True",
"elif",
"next_is_highlight",
":",
"highlights",
".",
"append",
"(",
"line",
")",
"else",
":",
"article_lines",
".",
"append",
"(",
"line",
")",
"# Make article into a single string",
"article",
"=",
"' '",
".",
"join",
"(",
"article_lines",
")",
"# Make abstract into a single string, putting <s> and </s> tags around",
"# the sentences.",
"abstract",
"=",
"' '",
".",
"join",
"(",
"[",
"'%s %s %s'",
"%",
"(",
"SENTENCE_START",
",",
"sent",
",",
"SENTENCE_END",
")",
"for",
"sent",
"in",
"highlights",
"]",
")",
"return",
"article",
",",
"abstract"
]
| Get abstract (highlights) and article from a story file path. | [
"Get",
"abstract",
"(",
"highlights",
")",
"and",
"article",
"from",
"a",
"story",
"file",
"path",
"."
]
| python | train |
datajoint/datajoint-python | datajoint/schema.py | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/schema.py#L102-L144 | def spawn_missing_classes(self, context=None):
"""
Creates the appropriate python user relation classes from tables in the schema and places them
in the context.
:param context: alternative context to place the missing classes into, e.g. locals()
"""
if context is None:
if self.context is not None:
context = self.context
else:
# if context is missing, use the calling namespace
frame = inspect.currentframe().f_back
context = frame.f_locals
del frame
tables = [
row[0] for row in self.connection.query('SHOW TABLES in `%s`' % self.database)
if lookup_class_name('`{db}`.`{tab}`'.format(db=self.database, tab=row[0]), context, 0) is None]
master_classes = (Lookup, Manual, Imported, Computed)
part_tables = []
for table_name in tables:
class_name = to_camel_case(table_name)
if class_name not in context:
try:
cls = next(cls for cls in master_classes if re.fullmatch(cls.tier_regexp, table_name))
except StopIteration:
if re.fullmatch(Part.tier_regexp, table_name):
part_tables.append(table_name)
else:
# declare and decorate master relation classes
context[class_name] = self(type(class_name, (cls,), dict()))
# attach parts to masters
for table_name in part_tables:
groups = re.fullmatch(Part.tier_regexp, table_name).groupdict()
class_name = to_camel_case(groups['part'])
try:
master_class = context[to_camel_case(groups['master'])]
except KeyError:
raise DataJointError('The table %s does not follow DataJoint naming conventions' % table_name)
part_class = type(class_name, (Part,), dict(definition=...))
part_class._master = master_class
self.process_relation_class(part_class, context=context, assert_declared=True)
setattr(master_class, class_name, part_class) | [
"def",
"spawn_missing_classes",
"(",
"self",
",",
"context",
"=",
"None",
")",
":",
"if",
"context",
"is",
"None",
":",
"if",
"self",
".",
"context",
"is",
"not",
"None",
":",
"context",
"=",
"self",
".",
"context",
"else",
":",
"# if context is missing, use the calling namespace",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
".",
"f_back",
"context",
"=",
"frame",
".",
"f_locals",
"del",
"frame",
"tables",
"=",
"[",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"self",
".",
"connection",
".",
"query",
"(",
"'SHOW TABLES in `%s`'",
"%",
"self",
".",
"database",
")",
"if",
"lookup_class_name",
"(",
"'`{db}`.`{tab}`'",
".",
"format",
"(",
"db",
"=",
"self",
".",
"database",
",",
"tab",
"=",
"row",
"[",
"0",
"]",
")",
",",
"context",
",",
"0",
")",
"is",
"None",
"]",
"master_classes",
"=",
"(",
"Lookup",
",",
"Manual",
",",
"Imported",
",",
"Computed",
")",
"part_tables",
"=",
"[",
"]",
"for",
"table_name",
"in",
"tables",
":",
"class_name",
"=",
"to_camel_case",
"(",
"table_name",
")",
"if",
"class_name",
"not",
"in",
"context",
":",
"try",
":",
"cls",
"=",
"next",
"(",
"cls",
"for",
"cls",
"in",
"master_classes",
"if",
"re",
".",
"fullmatch",
"(",
"cls",
".",
"tier_regexp",
",",
"table_name",
")",
")",
"except",
"StopIteration",
":",
"if",
"re",
".",
"fullmatch",
"(",
"Part",
".",
"tier_regexp",
",",
"table_name",
")",
":",
"part_tables",
".",
"append",
"(",
"table_name",
")",
"else",
":",
"# declare and decorate master relation classes",
"context",
"[",
"class_name",
"]",
"=",
"self",
"(",
"type",
"(",
"class_name",
",",
"(",
"cls",
",",
")",
",",
"dict",
"(",
")",
")",
")",
"# attach parts to masters",
"for",
"table_name",
"in",
"part_tables",
":",
"groups",
"=",
"re",
".",
"fullmatch",
"(",
"Part",
".",
"tier_regexp",
",",
"table_name",
")",
".",
"groupdict",
"(",
")",
"class_name",
"=",
"to_camel_case",
"(",
"groups",
"[",
"'part'",
"]",
")",
"try",
":",
"master_class",
"=",
"context",
"[",
"to_camel_case",
"(",
"groups",
"[",
"'master'",
"]",
")",
"]",
"except",
"KeyError",
":",
"raise",
"DataJointError",
"(",
"'The table %s does not follow DataJoint naming conventions'",
"%",
"table_name",
")",
"part_class",
"=",
"type",
"(",
"class_name",
",",
"(",
"Part",
",",
")",
",",
"dict",
"(",
"definition",
"=",
"...",
")",
")",
"part_class",
".",
"_master",
"=",
"master_class",
"self",
".",
"process_relation_class",
"(",
"part_class",
",",
"context",
"=",
"context",
",",
"assert_declared",
"=",
"True",
")",
"setattr",
"(",
"master_class",
",",
"class_name",
",",
"part_class",
")"
]
| Creates the appropriate python user relation classes from tables in the schema and places them
in the context.
:param context: alternative context to place the missing classes into, e.g. locals() | [
"Creates",
"the",
"appropriate",
"python",
"user",
"relation",
"classes",
"from",
"tables",
"in",
"the",
"schema",
"and",
"places",
"them",
"in",
"the",
"context",
".",
":",
"param",
"context",
":",
"alternative",
"context",
"to",
"place",
"the",
"missing",
"classes",
"into",
"e",
".",
"g",
".",
"locals",
"()"
]
| python | train |
KnowledgeLinks/rdfframework | rdfframework/datatypes/xsdtypes.py | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/xsdtypes.py#L342-L353 | def _internal_add(self, other):
""" Used for specifing addition methods for
__add__, __iadd__, __radd__
"""
if hasattr(other, "datatype"):
if other.datatype == "xsd:decimal":
rtn_val = self.value + Decimal(str(other.value))
else:
rtn_val = self.value + Decimal(str(other.value))
else:
rtn_val = self.value + Decimal(str(other))
return XsdDecimal(str(float(rtn_val))) | [
"def",
"_internal_add",
"(",
"self",
",",
"other",
")",
":",
"if",
"hasattr",
"(",
"other",
",",
"\"datatype\"",
")",
":",
"if",
"other",
".",
"datatype",
"==",
"\"xsd:decimal\"",
":",
"rtn_val",
"=",
"self",
".",
"value",
"+",
"Decimal",
"(",
"str",
"(",
"other",
".",
"value",
")",
")",
"else",
":",
"rtn_val",
"=",
"self",
".",
"value",
"+",
"Decimal",
"(",
"str",
"(",
"other",
".",
"value",
")",
")",
"else",
":",
"rtn_val",
"=",
"self",
".",
"value",
"+",
"Decimal",
"(",
"str",
"(",
"other",
")",
")",
"return",
"XsdDecimal",
"(",
"str",
"(",
"float",
"(",
"rtn_val",
")",
")",
")"
]
| Used for specifing addition methods for
__add__, __iadd__, __radd__ | [
"Used",
"for",
"specifing",
"addition",
"methods",
"for",
"__add__",
"__iadd__",
"__radd__"
]
| python | train |
pzs741/TEDT | TEDT/corpus_decision_model.py | https://github.com/pzs741/TEDT/blob/6b6663227b755005fe1a1e3e807a05bdb521e066/TEDT/corpus_decision_model.py#L68-L81 | def get_spn(unit):
"""获取文本行中非中文字符数的个数
Keyword arguments:
unit -- 文本行
Return:
spn -- 特殊字符数
"""
spn = 0
match_re = re.findall(no_chinese, unit)
if match_re:
string = ''.join(match_re)
spn = len(string)
return int(spn) | [
"def",
"get_spn",
"(",
"unit",
")",
":",
"spn",
"=",
"0",
"match_re",
"=",
"re",
".",
"findall",
"(",
"no_chinese",
",",
"unit",
")",
"if",
"match_re",
":",
"string",
"=",
"''",
".",
"join",
"(",
"match_re",
")",
"spn",
"=",
"len",
"(",
"string",
")",
"return",
"int",
"(",
"spn",
")"
]
| 获取文本行中非中文字符数的个数
Keyword arguments:
unit -- 文本行
Return:
spn -- 特殊字符数 | [
"获取文本行中非中文字符数的个数"
]
| python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.