nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
slgobinath/SafeEyes
|
7ef644e6a2ef8541d929d5355a38185ccd138cbd
|
safeeyes/safeeyes.py
|
python
|
SafeEyes.start_break
|
(self, break_obj)
|
Pass the break information to break screen.
|
Pass the break information to break screen.
|
[
"Pass",
"the",
"break",
"information",
"to",
"break",
"screen",
"."
] |
def start_break(self, break_obj):
"""
Pass the break information to break screen.
"""
# Get the HTML widgets content from plugins
widget = self.plugins_manager.get_break_screen_widgets(break_obj)
actions = self.plugins_manager.get_break_screen_tray_actions(break_obj)
self.break_screen.show_message(break_obj, widget, actions)
|
[
"def",
"start_break",
"(",
"self",
",",
"break_obj",
")",
":",
"# Get the HTML widgets content from plugins",
"widget",
"=",
"self",
".",
"plugins_manager",
".",
"get_break_screen_widgets",
"(",
"break_obj",
")",
"actions",
"=",
"self",
".",
"plugins_manager",
".",
"get_break_screen_tray_actions",
"(",
"break_obj",
")",
"self",
".",
"break_screen",
".",
"show_message",
"(",
"break_obj",
",",
"widget",
",",
"actions",
")"
] |
https://github.com/slgobinath/SafeEyes/blob/7ef644e6a2ef8541d929d5355a38185ccd138cbd/safeeyes/safeeyes.py#L262-L269
|
||
mgear-dev/mgear
|
06ddc26c5adb5eab07ca470c7fafa77404c8a1de
|
scripts/mgear/maya/shifter/component/guide.py
|
python
|
ComponentGuide.addDispCurve
|
(self, name, centers=[], degree=1)
|
return icon.connection_display_curve(self.getName(name),
centers,
degree)
|
Add a display curve object to the guide.
Display curve object is a simple curve to show the connection between
different guide element..
Args:
name (str): Local name of the element.
centers (list of dagNode): List of object to define the curve.
degree (int): Curve degree. Default 1 = lineal.
Returns:
dagNode: The newly creted curve.
|
Add a display curve object to the guide.
|
[
"Add",
"a",
"display",
"curve",
"object",
"to",
"the",
"guide",
"."
] |
def addDispCurve(self, name, centers=[], degree=1):
"""Add a display curve object to the guide.
Display curve object is a simple curve to show the connection between
different guide element..
Args:
name (str): Local name of the element.
centers (list of dagNode): List of object to define the curve.
degree (int): Curve degree. Default 1 = lineal.
Returns:
dagNode: The newly creted curve.
"""
return icon.connection_display_curve(self.getName(name),
centers,
degree)
|
[
"def",
"addDispCurve",
"(",
"self",
",",
"name",
",",
"centers",
"=",
"[",
"]",
",",
"degree",
"=",
"1",
")",
":",
"return",
"icon",
".",
"connection_display_curve",
"(",
"self",
".",
"getName",
"(",
"name",
")",
",",
"centers",
",",
"degree",
")"
] |
https://github.com/mgear-dev/mgear/blob/06ddc26c5adb5eab07ca470c7fafa77404c8a1de/scripts/mgear/maya/shifter/component/guide.py#L591-L608
|
|
isocpp/CppCoreGuidelines
|
171fda35972cb0678c26274547be3d5dfaf73156
|
scripts/python/cpplint.py
|
python
|
FlagCxx14Features
|
(filename, clean_lines, linenum, error)
|
Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
Flag those C++14 features that we restrict.
|
[
"Flag",
"those",
"C",
"++",
"14",
"features",
"that",
"we",
"restrict",
"."
] |
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
|
[
"def",
"FlagCxx14Features",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"include",
"=",
"Match",
"(",
"r'\\s*#\\s*include\\s+[<\"]([^<\"]+)[\">]'",
",",
"line",
")",
"# Flag unapproved C++14 headers.",
"if",
"include",
"and",
"include",
".",
"group",
"(",
"1",
")",
"in",
"(",
"'scoped_allocator'",
",",
"'shared_mutex'",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/c++14'",
",",
"5",
",",
"(",
"'<%s> is an unapproved C++14 header.'",
")",
"%",
"include",
".",
"group",
"(",
"1",
")",
")"
] |
https://github.com/isocpp/CppCoreGuidelines/blob/171fda35972cb0678c26274547be3d5dfaf73156/scripts/python/cpplint.py#L6036-L6052
|
||
facebookresearch/detectron2
|
cb92ae1763cd7d3777c243f07749574cdaec6cb8
|
detectron2/modeling/roi_heads/keypoint_head.py
|
python
|
BaseKeypointRCNNHead.layers
|
(self, x)
|
Neural network layers that makes predictions from regional input features.
|
Neural network layers that makes predictions from regional input features.
|
[
"Neural",
"network",
"layers",
"that",
"makes",
"predictions",
"from",
"regional",
"input",
"features",
"."
] |
def layers(self, x):
"""
Neural network layers that makes predictions from regional input features.
"""
raise NotImplementedError
|
[
"def",
"layers",
"(",
"self",
",",
"x",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/facebookresearch/detectron2/blob/cb92ae1763cd7d3777c243f07749574cdaec6cb8/detectron2/modeling/roi_heads/keypoint_head.py#L207-L211
|
||
exaile/exaile
|
a7b58996c5c15b3aa7b9975ac13ee8f784ef4689
|
xlgui/main.py
|
python
|
MainWindow._update_track_information
|
(self)
|
Sets track information
|
Sets track information
|
[
"Sets",
"track",
"information"
] |
def _update_track_information(self):
"""
Sets track information
"""
track = player.PLAYER.current
if not track:
return
self.window.set_title(self.title_formatter.format(track))
|
[
"def",
"_update_track_information",
"(",
"self",
")",
":",
"track",
"=",
"player",
".",
"PLAYER",
".",
"current",
"if",
"not",
"track",
":",
"return",
"self",
".",
"window",
".",
"set_title",
"(",
"self",
".",
"title_formatter",
".",
"format",
"(",
"track",
")",
")"
] |
https://github.com/exaile/exaile/blob/a7b58996c5c15b3aa7b9975ac13ee8f784ef4689/xlgui/main.py#L1020-L1029
|
||
roglew/guppy-proxy
|
01df16be71dd9f23d7de415a315821659c29bc63
|
guppyproxy/proxy.py
|
python
|
ProxyConnection.generate_pem_certificates
|
(self)
|
return ret
|
[] |
def generate_pem_certificates(self):
cmd = {
"Command": "GenPEMCerts",
}
result = self.reqrsp_cmd(cmd)
ret = GenPemCertsResult(result["KeyPEMData"], result["CertificatePEMData"])
return ret
|
[
"def",
"generate_pem_certificates",
"(",
"self",
")",
":",
"cmd",
"=",
"{",
"\"Command\"",
":",
"\"GenPEMCerts\"",
",",
"}",
"result",
"=",
"self",
".",
"reqrsp_cmd",
"(",
"cmd",
")",
"ret",
"=",
"GenPemCertsResult",
"(",
"result",
"[",
"\"KeyPEMData\"",
"]",
",",
"result",
"[",
"\"CertificatePEMData\"",
"]",
")",
"return",
"ret"
] |
https://github.com/roglew/guppy-proxy/blob/01df16be71dd9f23d7de415a315821659c29bc63/guppyproxy/proxy.py#L960-L966
|
|||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/boto/route53/record.py
|
python
|
ResourceRecordSets.__iter__
|
(self)
|
Override the next function to support paging
|
Override the next function to support paging
|
[
"Override",
"the",
"next",
"function",
"to",
"support",
"paging"
] |
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name,
type=self.next_record_type,
identifier=self.next_record_identifier)
else:
results = None
self.is_truncated = truncated
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"results",
"=",
"super",
"(",
"ResourceRecordSets",
",",
"self",
")",
".",
"__iter__",
"(",
")",
"truncated",
"=",
"self",
".",
"is_truncated",
"while",
"results",
":",
"for",
"obj",
"in",
"results",
":",
"yield",
"obj",
"if",
"self",
".",
"is_truncated",
":",
"self",
".",
"is_truncated",
"=",
"False",
"results",
"=",
"self",
".",
"connection",
".",
"get_all_rrsets",
"(",
"self",
".",
"hosted_zone_id",
",",
"name",
"=",
"self",
".",
"next_record_name",
",",
"type",
"=",
"self",
".",
"next_record_type",
",",
"identifier",
"=",
"self",
".",
"next_record_identifier",
")",
"else",
":",
"results",
"=",
"None",
"self",
".",
"is_truncated",
"=",
"truncated"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/boto/route53/record.py#L182-L196
|
||
nipy/mindboggle
|
bc10812979d42e94b8a01ad8f98b4ceae33169e5
|
mindboggle/guts/utilities.py
|
python
|
list_strings
|
(string1='', string2='', string3='', string4='')
|
return string_list
|
Put strings in a list.
Parameters
----------
string1 : string
string2 : string
string3 : string
string4 : string
Returns
-------
string_list : list of strings
Examples
--------
>>> from mindboggle.guts.utilities import list_strings
>>> string1 = 'a b c'
>>> string2 = 'd e f'
>>> string3 = ''
>>> string4 = 'j k l'
>>> string_list = list_strings(string1, string2, string3, string4)
>>> string_list
['a b c', 'd e f', 'j k l']
|
Put strings in a list.
|
[
"Put",
"strings",
"in",
"a",
"list",
"."
] |
def list_strings(string1='', string2='', string3='', string4=''):
"""
Put strings in a list.
Parameters
----------
string1 : string
string2 : string
string3 : string
string4 : string
Returns
-------
string_list : list of strings
Examples
--------
>>> from mindboggle.guts.utilities import list_strings
>>> string1 = 'a b c'
>>> string2 = 'd e f'
>>> string3 = ''
>>> string4 = 'j k l'
>>> string_list = list_strings(string1, string2, string3, string4)
>>> string_list
['a b c', 'd e f', 'j k l']
"""
string_list = []
if string1 and isinstance(string1, str):
string_list.append(string1)
if string2 and isinstance(string1, str):
string_list.append(string2)
if string3 and isinstance(string1, str):
string_list.append(string3)
if string4 and isinstance(string1, str):
string_list.append(string4)
return string_list
|
[
"def",
"list_strings",
"(",
"string1",
"=",
"''",
",",
"string2",
"=",
"''",
",",
"string3",
"=",
"''",
",",
"string4",
"=",
"''",
")",
":",
"string_list",
"=",
"[",
"]",
"if",
"string1",
"and",
"isinstance",
"(",
"string1",
",",
"str",
")",
":",
"string_list",
".",
"append",
"(",
"string1",
")",
"if",
"string2",
"and",
"isinstance",
"(",
"string1",
",",
"str",
")",
":",
"string_list",
".",
"append",
"(",
"string2",
")",
"if",
"string3",
"and",
"isinstance",
"(",
"string1",
",",
"str",
")",
":",
"string_list",
".",
"append",
"(",
"string3",
")",
"if",
"string4",
"and",
"isinstance",
"(",
"string1",
",",
"str",
")",
":",
"string_list",
".",
"append",
"(",
"string4",
")",
"return",
"string_list"
] |
https://github.com/nipy/mindboggle/blob/bc10812979d42e94b8a01ad8f98b4ceae33169e5/mindboggle/guts/utilities.py#L75-L113
|
|
openedx/edx-platform
|
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
|
lms/djangoapps/discussion/apps.py
|
python
|
DiscussionConfig.ready
|
(self)
|
Connect handlers to send notifications about discussions.
|
Connect handlers to send notifications about discussions.
|
[
"Connect",
"handlers",
"to",
"send",
"notifications",
"about",
"discussions",
"."
] |
def ready(self):
"""
Connect handlers to send notifications about discussions.
"""
from .signals import handlers
|
[
"def",
"ready",
"(",
"self",
")",
":",
"from",
".",
"signals",
"import",
"handlers"
] |
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/lms/djangoapps/discussion/apps.py#L39-L43
|
||
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/networkx/generators/small.py
|
python
|
tutte_graph
|
(create_using=None)
|
return G
|
Return the Tutte graph.
|
Return the Tutte graph.
|
[
"Return",
"the",
"Tutte",
"graph",
"."
] |
def tutte_graph(create_using=None):
"""Return the Tutte graph."""
description = [
"adjacencylist",
"Tutte's Graph",
46,
[[2, 3, 4], [5, 27], [11, 12], [19, 20], [6, 34],
[7, 30], [8, 28], [9, 15], [10, 39], [11, 38],
[40], [13, 40], [14, 36], [15, 16], [35],
[17, 23], [18, 45], [19, 44], [46], [21, 46],
[22, 42], [23, 24], [41], [25, 28], [26, 33],
[27, 32], [34], [29], [30, 33], [31],
[32, 34], [33], [], [], [36, 39],
[37], [38, 40], [39], [], [],
[42, 45], [43], [44, 46], [45], [], []]
]
G = make_small_undirected_graph(description, create_using)
return G
|
[
"def",
"tutte_graph",
"(",
"create_using",
"=",
"None",
")",
":",
"description",
"=",
"[",
"\"adjacencylist\"",
",",
"\"Tutte's Graph\"",
",",
"46",
",",
"[",
"[",
"2",
",",
"3",
",",
"4",
"]",
",",
"[",
"5",
",",
"27",
"]",
",",
"[",
"11",
",",
"12",
"]",
",",
"[",
"19",
",",
"20",
"]",
",",
"[",
"6",
",",
"34",
"]",
",",
"[",
"7",
",",
"30",
"]",
",",
"[",
"8",
",",
"28",
"]",
",",
"[",
"9",
",",
"15",
"]",
",",
"[",
"10",
",",
"39",
"]",
",",
"[",
"11",
",",
"38",
"]",
",",
"[",
"40",
"]",
",",
"[",
"13",
",",
"40",
"]",
",",
"[",
"14",
",",
"36",
"]",
",",
"[",
"15",
",",
"16",
"]",
",",
"[",
"35",
"]",
",",
"[",
"17",
",",
"23",
"]",
",",
"[",
"18",
",",
"45",
"]",
",",
"[",
"19",
",",
"44",
"]",
",",
"[",
"46",
"]",
",",
"[",
"21",
",",
"46",
"]",
",",
"[",
"22",
",",
"42",
"]",
",",
"[",
"23",
",",
"24",
"]",
",",
"[",
"41",
"]",
",",
"[",
"25",
",",
"28",
"]",
",",
"[",
"26",
",",
"33",
"]",
",",
"[",
"27",
",",
"32",
"]",
",",
"[",
"34",
"]",
",",
"[",
"29",
"]",
",",
"[",
"30",
",",
"33",
"]",
",",
"[",
"31",
"]",
",",
"[",
"32",
",",
"34",
"]",
",",
"[",
"33",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"36",
",",
"39",
"]",
",",
"[",
"37",
"]",
",",
"[",
"38",
",",
"40",
"]",
",",
"[",
"39",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"42",
",",
"45",
"]",
",",
"[",
"43",
"]",
",",
"[",
"44",
",",
"46",
"]",
",",
"[",
"45",
"]",
",",
"[",
"]",
",",
"[",
"]",
"]",
"]",
"G",
"=",
"make_small_undirected_graph",
"(",
"description",
",",
"create_using",
")",
"return",
"G"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/networkx/generators/small.py#L433-L450
|
|
landlab/landlab
|
a5dd80b8ebfd03d1ba87ef6c4368c409485f222c
|
landlab/ca/boundaries/hex_lattice_tectonicizer.py
|
python
|
LatticeNormalFault._setup_links_to_update_after_offset
|
(self, in_footwall)
|
Create and store array with IDs of links for which to update
transitions after fault offset.
These are: all active boundary links with at least one node in the
footwall, plus the lowest non-boundary links, including the
next-to-lowest vertical links and those angling that are below them,
plus the fault-crossing links.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid((5, 5), orientation="vertical", node_layout="rect")
>>> lu = LatticeNormalFault(fault_x_intercept=-0.01, grid=hg)
>>> lu.first_link_shifted_to
35
>>> lu.links_to_update
array([ 5, 8, 9, 11, 12, 13, 14, 15, 16, 18, 20, 21, 22, 23, 24, 25, 27,
28, 29, 31, 34, 36, 40, 42, 44, 48, 49, 51])
|
Create and store array with IDs of links for which to update
transitions after fault offset.
|
[
"Create",
"and",
"store",
"array",
"with",
"IDs",
"of",
"links",
"for",
"which",
"to",
"update",
"transitions",
"after",
"fault",
"offset",
"."
] |
def _setup_links_to_update_after_offset(self, in_footwall):
"""Create and store array with IDs of links for which to update
transitions after fault offset.
These are: all active boundary links with at least one node in the
footwall, plus the lowest non-boundary links, including the
next-to-lowest vertical links and those angling that are below them,
plus the fault-crossing links.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid((5, 5), orientation="vertical", node_layout="rect")
>>> lu = LatticeNormalFault(fault_x_intercept=-0.01, grid=hg)
>>> lu.first_link_shifted_to
35
>>> lu.links_to_update
array([ 5, 8, 9, 11, 12, 13, 14, 15, 16, 18, 20, 21, 22, 23, 24, 25, 27,
28, 29, 31, 34, 36, 40, 42, 44, 48, 49, 51])
"""
g = self.grid
lower_active = logical_and(
arange(g.number_of_links) < self.first_link_shifted_to,
g.status_at_link == LinkStatus.ACTIVE,
)
link_in_fw = logical_or(
in_footwall[g.node_at_link_tail], in_footwall[g.node_at_link_head]
)
lower_active_fw = logical_and(lower_active, link_in_fw)
active_bnd = logical_and(
g.status_at_link == LinkStatus.ACTIVE,
logical_or(
g.status_at_node[g.node_at_link_tail] != 0,
g.status_at_node[g.node_at_link_head] != 0,
),
)
active_bnd_fw = logical_and(active_bnd, link_in_fw)
crosses_fw = logical_and(
g.status_at_link == LinkStatus.ACTIVE,
logical_xor(
in_footwall[g.node_at_link_tail], in_footwall[g.node_at_link_head]
),
)
update = logical_or(logical_or(lower_active_fw, active_bnd_fw), crosses_fw)
self.links_to_update = as_id_array(where(update)[0])
|
[
"def",
"_setup_links_to_update_after_offset",
"(",
"self",
",",
"in_footwall",
")",
":",
"g",
"=",
"self",
".",
"grid",
"lower_active",
"=",
"logical_and",
"(",
"arange",
"(",
"g",
".",
"number_of_links",
")",
"<",
"self",
".",
"first_link_shifted_to",
",",
"g",
".",
"status_at_link",
"==",
"LinkStatus",
".",
"ACTIVE",
",",
")",
"link_in_fw",
"=",
"logical_or",
"(",
"in_footwall",
"[",
"g",
".",
"node_at_link_tail",
"]",
",",
"in_footwall",
"[",
"g",
".",
"node_at_link_head",
"]",
")",
"lower_active_fw",
"=",
"logical_and",
"(",
"lower_active",
",",
"link_in_fw",
")",
"active_bnd",
"=",
"logical_and",
"(",
"g",
".",
"status_at_link",
"==",
"LinkStatus",
".",
"ACTIVE",
",",
"logical_or",
"(",
"g",
".",
"status_at_node",
"[",
"g",
".",
"node_at_link_tail",
"]",
"!=",
"0",
",",
"g",
".",
"status_at_node",
"[",
"g",
".",
"node_at_link_head",
"]",
"!=",
"0",
",",
")",
",",
")",
"active_bnd_fw",
"=",
"logical_and",
"(",
"active_bnd",
",",
"link_in_fw",
")",
"crosses_fw",
"=",
"logical_and",
"(",
"g",
".",
"status_at_link",
"==",
"LinkStatus",
".",
"ACTIVE",
",",
"logical_xor",
"(",
"in_footwall",
"[",
"g",
".",
"node_at_link_tail",
"]",
",",
"in_footwall",
"[",
"g",
".",
"node_at_link_head",
"]",
")",
",",
")",
"update",
"=",
"logical_or",
"(",
"logical_or",
"(",
"lower_active_fw",
",",
"active_bnd_fw",
")",
",",
"crosses_fw",
")",
"self",
".",
"links_to_update",
"=",
"as_id_array",
"(",
"where",
"(",
"update",
")",
"[",
"0",
"]",
")"
] |
https://github.com/landlab/landlab/blob/a5dd80b8ebfd03d1ba87ef6c4368c409485f222c/landlab/ca/boundaries/hex_lattice_tectonicizer.py#L512-L556
|
||
psychopy/psychopy
|
01b674094f38d0e0bd51c45a6f66f671d7041696
|
psychopy/sound/audioclip.py
|
python
|
AudioClip.convertToWAV
|
(self)
|
return np.asarray(
self._samples * ((1 << 15) - 1), dtype=np.int16).tobytes()
|
Get a copy of stored audio samples in WAV PCM format.
Returns
-------
ndarray
Array with the same shapes as `.samples` but in 16-bit WAV PCM
format.
|
Get a copy of stored audio samples in WAV PCM format.
|
[
"Get",
"a",
"copy",
"of",
"stored",
"audio",
"samples",
"in",
"WAV",
"PCM",
"format",
"."
] |
def convertToWAV(self):
"""Get a copy of stored audio samples in WAV PCM format.
Returns
-------
ndarray
Array with the same shapes as `.samples` but in 16-bit WAV PCM
format.
"""
return np.asarray(
self._samples * ((1 << 15) - 1), dtype=np.int16).tobytes()
|
[
"def",
"convertToWAV",
"(",
"self",
")",
":",
"return",
"np",
".",
"asarray",
"(",
"self",
".",
"_samples",
"*",
"(",
"(",
"1",
"<<",
"15",
")",
"-",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"int16",
")",
".",
"tobytes",
"(",
")"
] |
https://github.com/psychopy/psychopy/blob/01b674094f38d0e0bd51c45a6f66f671d7041696/psychopy/sound/audioclip.py#L645-L656
|
|
rougier/freetype-py
|
7cf83720274806aad190e1ea04a5f2c36bcc2501
|
freetype/__init__.py
|
python
|
Face.select_charmap
|
( self, encoding )
|
Select a given charmap by its encoding tag (as listed in 'freetype.h').
**Note**:
This function returns an error if no charmap in the face corresponds to
the encoding queried here.
Because many fonts contain more than a single cmap for Unicode
encoding, this function has some special code to select the one which
covers Unicode best ('best' in the sense that a UCS-4 cmap is preferred
to a UCS-2 cmap). It is thus preferable to FT_Set_Charmap in this case.
|
Select a given charmap by its encoding tag (as listed in 'freetype.h').
|
[
"Select",
"a",
"given",
"charmap",
"by",
"its",
"encoding",
"tag",
"(",
"as",
"listed",
"in",
"freetype",
".",
"h",
")",
"."
] |
def select_charmap( self, encoding ):
'''
Select a given charmap by its encoding tag (as listed in 'freetype.h').
**Note**:
This function returns an error if no charmap in the face corresponds to
the encoding queried here.
Because many fonts contain more than a single cmap for Unicode
encoding, this function has some special code to select the one which
covers Unicode best ('best' in the sense that a UCS-4 cmap is preferred
to a UCS-2 cmap). It is thus preferable to FT_Set_Charmap in this case.
'''
error = FT_Select_Charmap( self._FT_Face, encoding )
if error: raise FT_Exception(error)
|
[
"def",
"select_charmap",
"(",
"self",
",",
"encoding",
")",
":",
"error",
"=",
"FT_Select_Charmap",
"(",
"self",
".",
"_FT_Face",
",",
"encoding",
")",
"if",
"error",
":",
"raise",
"FT_Exception",
"(",
"error",
")"
] |
https://github.com/rougier/freetype-py/blob/7cf83720274806aad190e1ea04a5f2c36bcc2501/freetype/__init__.py#L1312-L1327
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/openshift_logging/library/openshift_logging_facts.py
|
python
|
OpenshiftLoggingFacts.facts_for_configmaps
|
(self, namespace)
|
Gathers facts for configmaps in logging namespace
|
Gathers facts for configmaps in logging namespace
|
[
"Gathers",
"facts",
"for",
"configmaps",
"in",
"logging",
"namespace"
] |
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
self.add_facts_for(comp, "configmaps", name, dict(item["data"]))
if comp in ["elasticsearch", "elasticsearch_ops"]:
for config_key in item["data"]:
self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key])
|
[
"def",
"facts_for_configmaps",
"(",
"self",
",",
"namespace",
")",
":",
"self",
".",
"default_keys_for",
"(",
"\"configmaps\"",
")",
"a_list",
"=",
"self",
".",
"oc_command",
"(",
"\"get\"",
",",
"\"configmaps\"",
",",
"namespace",
"=",
"namespace",
")",
"if",
"len",
"(",
"a_list",
"[",
"\"items\"",
"]",
")",
"==",
"0",
":",
"return",
"for",
"item",
"in",
"a_list",
"[",
"\"items\"",
"]",
":",
"name",
"=",
"item",
"[",
"\"metadata\"",
"]",
"[",
"\"name\"",
"]",
"comp",
"=",
"self",
".",
"comp",
"(",
"name",
")",
"if",
"comp",
"is",
"not",
"None",
":",
"self",
".",
"add_facts_for",
"(",
"comp",
",",
"\"configmaps\"",
",",
"name",
",",
"dict",
"(",
"item",
"[",
"\"data\"",
"]",
")",
")",
"if",
"comp",
"in",
"[",
"\"elasticsearch\"",
",",
"\"elasticsearch_ops\"",
"]",
":",
"for",
"config_key",
"in",
"item",
"[",
"\"data\"",
"]",
":",
"self",
".",
"facts_from_configmap",
"(",
"comp",
",",
"\"configmaps\"",
",",
"name",
",",
"config_key",
",",
"item",
"[",
"\"data\"",
"]",
"[",
"config_key",
"]",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/openshift_logging/library/openshift_logging_facts.py#L218-L231
|
||
wbond/packagecontrol.io
|
9f5eb7e3392e6bc2ad979ad32d3dd27ef9c00b20
|
app/lib/package_control/deps/asn1crypto/x509.py
|
python
|
Certificate.ocsp_no_check_value
|
(self)
|
return self._ocsp_no_check_value
|
This extension is used on certificates of OCSP responders, indicating
that revocation information for the certificate should never need to
be verified, thus preventing possible loops in path validation.
:return:
None or a Null object (if present)
|
This extension is used on certificates of OCSP responders, indicating
that revocation information for the certificate should never need to
be verified, thus preventing possible loops in path validation.
|
[
"This",
"extension",
"is",
"used",
"on",
"certificates",
"of",
"OCSP",
"responders",
"indicating",
"that",
"revocation",
"information",
"for",
"the",
"certificate",
"should",
"never",
"need",
"to",
"be",
"verified",
"thus",
"preventing",
"possible",
"loops",
"in",
"path",
"validation",
"."
] |
def ocsp_no_check_value(self):
"""
This extension is used on certificates of OCSP responders, indicating
that revocation information for the certificate should never need to
be verified, thus preventing possible loops in path validation.
:return:
None or a Null object (if present)
"""
if not self._processed_extensions:
self._set_extensions()
return self._ocsp_no_check_value
|
[
"def",
"ocsp_no_check_value",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_processed_extensions",
":",
"self",
".",
"_set_extensions",
"(",
")",
"return",
"self",
".",
"_ocsp_no_check_value"
] |
https://github.com/wbond/packagecontrol.io/blob/9f5eb7e3392e6bc2ad979ad32d3dd27ef9c00b20/app/lib/package_control/deps/asn1crypto/x509.py#L2484-L2496
|
|
OmniSharp/omnisharp-sublime
|
19baf3c4d350193af0f8da1ae5a8df0fa6cded61
|
lib/urllib3/util/timeout.py
|
python
|
Timeout.start_connect
|
(self)
|
return self._start_connect
|
Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
|
Start the timeout clock, used during a connect() attempt
|
[
"Start",
"the",
"timeout",
"clock",
"used",
"during",
"a",
"connect",
"()",
"attempt"
] |
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
|
[
"def",
"start_connect",
"(",
"self",
")",
":",
"if",
"self",
".",
"_start_connect",
"is",
"not",
"None",
":",
"raise",
"TimeoutStateError",
"(",
"\"Timeout timer has already been started.\"",
")",
"self",
".",
"_start_connect",
"=",
"current_time",
"(",
")",
"return",
"self",
".",
"_start_connect"
] |
https://github.com/OmniSharp/omnisharp-sublime/blob/19baf3c4d350193af0f8da1ae5a8df0fa6cded61/lib/urllib3/util/timeout.py#L169-L178
|
|
mdiazcl/fuzzbunch-debian
|
2b76c2249ade83a389ae3badb12a1bd09901fd2c
|
windows/fuzzbunch/plugin.py
|
python
|
Plugin.isHiddenParameter
|
(self, name)
|
return False
|
Given a parameter name, is that parameter hidden?
|
Given a parameter name, is that parameter hidden?
|
[
"Given",
"a",
"parameter",
"name",
"is",
"that",
"parameter",
"hidden?"
] |
def isHiddenParameter(self, name):
"""Given a parameter name, is that parameter hidden?"""
param = self.getParameter(name)
if self.isParameter(name):
return param.isHidden()
return False
|
[
"def",
"isHiddenParameter",
"(",
"self",
",",
"name",
")",
":",
"param",
"=",
"self",
".",
"getParameter",
"(",
"name",
")",
"if",
"self",
".",
"isParameter",
"(",
"name",
")",
":",
"return",
"param",
".",
"isHidden",
"(",
")",
"return",
"False"
] |
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/fuzzbunch/plugin.py#L189-L194
|
|
Drakkar-Software/OctoBot
|
c80ed2270e5d085994213955c0f56b9e3b70b476
|
octobot/logger.py
|
python
|
init_octobot_chan_logger
|
(bot_id: str)
|
[] |
async def init_octobot_chan_logger(bot_id: str):
await channel_instances.get_chan_at_id(constants.OCTOBOT_CHANNEL, bot_id).new_consumer(
octobot_channel_callback,
priority_level=LOGGER_PRIORITY_LEVEL,
bot_id=bot_id,
subject=[enums.OctoBotChannelSubjects.NOTIFICATION.value, enums.OctoBotChannelSubjects.ERROR.value]
)
|
[
"async",
"def",
"init_octobot_chan_logger",
"(",
"bot_id",
":",
"str",
")",
":",
"await",
"channel_instances",
".",
"get_chan_at_id",
"(",
"constants",
".",
"OCTOBOT_CHANNEL",
",",
"bot_id",
")",
".",
"new_consumer",
"(",
"octobot_channel_callback",
",",
"priority_level",
"=",
"LOGGER_PRIORITY_LEVEL",
",",
"bot_id",
"=",
"bot_id",
",",
"subject",
"=",
"[",
"enums",
".",
"OctoBotChannelSubjects",
".",
"NOTIFICATION",
".",
"value",
",",
"enums",
".",
"OctoBotChannelSubjects",
".",
"ERROR",
".",
"value",
"]",
")"
] |
https://github.com/Drakkar-Software/OctoBot/blob/c80ed2270e5d085994213955c0f56b9e3b70b476/octobot/logger.py#L170-L176
|
||||
ethereum/trinity
|
6383280c5044feb06695ac2f7bc1100b7bcf4fe0
|
trinity/config.py
|
python
|
TrinityConfig.__init__
|
(self,
network_id: int,
app_identifier: str = "",
genesis_config: Dict[str, Any] = None,
max_peers: int = 25,
trinity_root_dir: Path = None,
trinity_tmp_root_dir: bool = False,
data_dir: Path = None,
nodekey_path: Path = None,
nodekey: PrivateKey = None,
port: int = 30303,
preferred_nodes: Tuple[KademliaNode, ...] = None,
bootstrap_nodes: Tuple[KademliaNode, ...] = None)
|
[] |
def __init__(self,
network_id: int,
app_identifier: str = "",
genesis_config: Dict[str, Any] = None,
max_peers: int = 25,
trinity_root_dir: Path = None,
trinity_tmp_root_dir: bool = False,
data_dir: Path = None,
nodekey_path: Path = None,
nodekey: PrivateKey = None,
port: int = 30303,
preferred_nodes: Tuple[KademliaNode, ...] = None,
bootstrap_nodes: Tuple[KademliaNode, ...] = None) -> None:
self.app_identifier = app_identifier
self.network_id = network_id
self.max_peers = max_peers
self.port = port
self._app_configs = {}
if genesis_config is not None:
self.genesis_config = genesis_config
elif network_id in PRECONFIGURED_NETWORKS:
self.genesis_config = _load_preconfigured_genesis_config(network_id)
else:
raise TypeError(
"No `genesis_config` was provided and the `network_id` is not "
"in the known preconfigured networks. Cannot initialize "
"ChainConfig"
)
if trinity_root_dir is not None:
self.trinity_root_dir = trinity_root_dir
self.trinity_tmp_root_dir = trinity_tmp_root_dir
if not preferred_nodes and self.network_id in DEFAULT_PREFERRED_NODES:
self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id]
else:
self.preferred_nodes = preferred_nodes
if bootstrap_nodes is None:
if self.network_id in PRECONFIGURED_NETWORKS:
bootnodes = PRECONFIGURED_NETWORKS[self.network_id].bootnodes
self.bootstrap_nodes = tuple(
KademliaNode.from_uri(enode) for enode in bootnodes
)
else:
self.bootstrap_nodes = tuple()
else:
self.bootstrap_nodes = bootstrap_nodes
if data_dir is not None:
self.data_dir = data_dir
if nodekey is not None and nodekey_path is not None:
raise ValueError("It is invalid to provide both a `nodekey` and a `nodekey_path`")
elif nodekey_path is not None:
self.nodekey_path = nodekey_path
elif nodekey is not None:
self.nodekey = nodekey
|
[
"def",
"__init__",
"(",
"self",
",",
"network_id",
":",
"int",
",",
"app_identifier",
":",
"str",
"=",
"\"\"",
",",
"genesis_config",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"max_peers",
":",
"int",
"=",
"25",
",",
"trinity_root_dir",
":",
"Path",
"=",
"None",
",",
"trinity_tmp_root_dir",
":",
"bool",
"=",
"False",
",",
"data_dir",
":",
"Path",
"=",
"None",
",",
"nodekey_path",
":",
"Path",
"=",
"None",
",",
"nodekey",
":",
"PrivateKey",
"=",
"None",
",",
"port",
":",
"int",
"=",
"30303",
",",
"preferred_nodes",
":",
"Tuple",
"[",
"KademliaNode",
",",
"...",
"]",
"=",
"None",
",",
"bootstrap_nodes",
":",
"Tuple",
"[",
"KademliaNode",
",",
"...",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"app_identifier",
"=",
"app_identifier",
"self",
".",
"network_id",
"=",
"network_id",
"self",
".",
"max_peers",
"=",
"max_peers",
"self",
".",
"port",
"=",
"port",
"self",
".",
"_app_configs",
"=",
"{",
"}",
"if",
"genesis_config",
"is",
"not",
"None",
":",
"self",
".",
"genesis_config",
"=",
"genesis_config",
"elif",
"network_id",
"in",
"PRECONFIGURED_NETWORKS",
":",
"self",
".",
"genesis_config",
"=",
"_load_preconfigured_genesis_config",
"(",
"network_id",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"No `genesis_config` was provided and the `network_id` is not \"",
"\"in the known preconfigured networks. Cannot initialize \"",
"\"ChainConfig\"",
")",
"if",
"trinity_root_dir",
"is",
"not",
"None",
":",
"self",
".",
"trinity_root_dir",
"=",
"trinity_root_dir",
"self",
".",
"trinity_tmp_root_dir",
"=",
"trinity_tmp_root_dir",
"if",
"not",
"preferred_nodes",
"and",
"self",
".",
"network_id",
"in",
"DEFAULT_PREFERRED_NODES",
":",
"self",
".",
"preferred_nodes",
"=",
"DEFAULT_PREFERRED_NODES",
"[",
"self",
".",
"network_id",
"]",
"else",
":",
"self",
".",
"preferred_nodes",
"=",
"preferred_nodes",
"if",
"bootstrap_nodes",
"is",
"None",
":",
"if",
"self",
".",
"network_id",
"in",
"PRECONFIGURED_NETWORKS",
":",
"bootnodes",
"=",
"PRECONFIGURED_NETWORKS",
"[",
"self",
".",
"network_id",
"]",
".",
"bootnodes",
"self",
".",
"bootstrap_nodes",
"=",
"tuple",
"(",
"KademliaNode",
".",
"from_uri",
"(",
"enode",
")",
"for",
"enode",
"in",
"bootnodes",
")",
"else",
":",
"self",
".",
"bootstrap_nodes",
"=",
"tuple",
"(",
")",
"else",
":",
"self",
".",
"bootstrap_nodes",
"=",
"bootstrap_nodes",
"if",
"data_dir",
"is",
"not",
"None",
":",
"self",
".",
"data_dir",
"=",
"data_dir",
"if",
"nodekey",
"is",
"not",
"None",
"and",
"nodekey_path",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"It is invalid to provide both a `nodekey` and a `nodekey_path`\"",
")",
"elif",
"nodekey_path",
"is",
"not",
"None",
":",
"self",
".",
"nodekey_path",
"=",
"nodekey_path",
"elif",
"nodekey",
"is",
"not",
"None",
":",
"self",
".",
"nodekey",
"=",
"nodekey"
] |
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/trinity/config.py#L278-L336
|
||||
PrefectHQ/prefect
|
67bdc94e2211726d99561f6f52614bec8970e981
|
src/prefect/_version.py
|
python
|
render
|
(pieces, style)
|
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
|
Render the given version pieces into the requested style.
|
Render the given version pieces into the requested style.
|
[
"Render",
"the",
"given",
"version",
"pieces",
"into",
"the",
"requested",
"style",
"."
] |
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
|
[
"def",
"render",
"(",
"pieces",
",",
"style",
")",
":",
"if",
"pieces",
"[",
"\"error\"",
"]",
":",
"return",
"{",
"\"version\"",
":",
"\"unknown\"",
",",
"\"full-revisionid\"",
":",
"pieces",
".",
"get",
"(",
"\"long\"",
")",
",",
"\"dirty\"",
":",
"None",
",",
"\"error\"",
":",
"pieces",
"[",
"\"error\"",
"]",
",",
"\"date\"",
":",
"None",
",",
"}",
"if",
"not",
"style",
"or",
"style",
"==",
"\"default\"",
":",
"style",
"=",
"\"pep440\"",
"# the default",
"if",
"style",
"==",
"\"pep440\"",
":",
"rendered",
"=",
"render_pep440",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-pre\"",
":",
"rendered",
"=",
"render_pep440_pre",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-post\"",
":",
"rendered",
"=",
"render_pep440_post",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"pep440-old\"",
":",
"rendered",
"=",
"render_pep440_old",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"git-describe\"",
":",
"rendered",
"=",
"render_git_describe",
"(",
"pieces",
")",
"elif",
"style",
"==",
"\"git-describe-long\"",
":",
"rendered",
"=",
"render_git_describe_long",
"(",
"pieces",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown style '%s'\"",
"%",
"style",
")",
"return",
"{",
"\"version\"",
":",
"rendered",
",",
"\"full-revisionid\"",
":",
"pieces",
"[",
"\"long\"",
"]",
",",
"\"dirty\"",
":",
"pieces",
"[",
"\"dirty\"",
"]",
",",
"\"error\"",
":",
"None",
",",
"\"date\"",
":",
"pieces",
".",
"get",
"(",
"\"date\"",
")",
",",
"}"
] |
https://github.com/PrefectHQ/prefect/blob/67bdc94e2211726d99561f6f52614bec8970e981/src/prefect/_version.py#L469-L504
|
|
zzzeek/sqlalchemy
|
fc5c54fcd4d868c2a4c7ac19668d72f506fe821e
|
lib/sqlalchemy/ext/asyncio/result.py
|
python
|
AsyncMappingResult.one
|
(self)
|
return await greenlet_spawn(self._only_one_row, True, True, False)
|
Return exactly one object or raise an exception.
Equivalent to :meth:`_asyncio.AsyncResult.one` except that
mapping values, rather than :class:`_result.Row` objects,
are returned.
|
Return exactly one object or raise an exception.
|
[
"Return",
"exactly",
"one",
"object",
"or",
"raise",
"an",
"exception",
"."
] |
async def one(self):
"""Return exactly one object or raise an exception.
Equivalent to :meth:`_asyncio.AsyncResult.one` except that
mapping values, rather than :class:`_result.Row` objects,
are returned.
"""
return await greenlet_spawn(self._only_one_row, True, True, False)
|
[
"async",
"def",
"one",
"(",
"self",
")",
":",
"return",
"await",
"greenlet_spawn",
"(",
"self",
".",
"_only_one_row",
",",
"True",
",",
"True",
",",
"False",
")"
] |
https://github.com/zzzeek/sqlalchemy/blob/fc5c54fcd4d868c2a4c7ac19668d72f506fe821e/lib/sqlalchemy/ext/asyncio/result.py#L640-L648
|
|
EtienneCmb/visbrain
|
b599038e095919dc193b12d5e502d127de7d03c9
|
visbrain/objects/source_obj.py
|
python
|
SourceObj.symbol
|
(self, value)
|
Set symbol value.
|
Set symbol value.
|
[
"Set",
"symbol",
"value",
"."
] |
def symbol(self, value):
"""Set symbol value."""
assert isinstance(value, str)
self._sources.symbol = value
self._sources.update()
|
[
"def",
"symbol",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"str",
")",
"self",
".",
"_sources",
".",
"symbol",
"=",
"value",
"self",
".",
"_sources",
".",
"update",
"(",
")"
] |
https://github.com/EtienneCmb/visbrain/blob/b599038e095919dc193b12d5e502d127de7d03c9/visbrain/objects/source_obj.py#L652-L656
|
||
richshaw2015/oh-my-rss
|
68b9284e0acaf44ea389d675b71949177f9f3256
|
web/utils.py
|
python
|
set_active_rss
|
(feeds)
|
访问过的源,设置一个标识,3天缓存
:param feeds:
:return:
|
访问过的源,设置一个标识,3天缓存
:param feeds:
:return:
|
[
"访问过的源,设置一个标识,3天缓存",
":",
"param",
"feeds",
":",
":",
"return",
":"
] |
def set_active_rss(feeds):
"""
访问过的源,设置一个标识,3天缓存
:param feeds:
:return:
"""
with R.pipeline(transaction=False) as p:
for feed in feeds:
p.set(settings.REDIS_ACTIVE_RSS_KEY % feed, 1, 3*24*3600)
p.execute()
|
[
"def",
"set_active_rss",
"(",
"feeds",
")",
":",
"with",
"R",
".",
"pipeline",
"(",
"transaction",
"=",
"False",
")",
"as",
"p",
":",
"for",
"feed",
"in",
"feeds",
":",
"p",
".",
"set",
"(",
"settings",
".",
"REDIS_ACTIVE_RSS_KEY",
"%",
"feed",
",",
"1",
",",
"3",
"*",
"24",
"*",
"3600",
")",
"p",
".",
"execute",
"(",
")"
] |
https://github.com/richshaw2015/oh-my-rss/blob/68b9284e0acaf44ea389d675b71949177f9f3256/web/utils.py#L124-L133
|
||
bleachbit/bleachbit
|
88fc4452936d02b56a76f07ce2142306bb47262b
|
bleachbit/GUI.py
|
python
|
Bleachbit.cb_shred_clipboard
|
(self, action, param)
|
Callback for menu option: shred paths from clipboard
|
Callback for menu option: shred paths from clipboard
|
[
"Callback",
"for",
"menu",
"option",
":",
"shred",
"paths",
"from",
"clipboard"
] |
def cb_shred_clipboard(self, action, param):
"""Callback for menu option: shred paths from clipboard"""
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.request_targets(self.cb_clipboard_uri_received)
|
[
"def",
"cb_shred_clipboard",
"(",
"self",
",",
"action",
",",
"param",
")",
":",
"clipboard",
"=",
"Gtk",
".",
"Clipboard",
".",
"get",
"(",
"Gdk",
".",
"SELECTION_CLIPBOARD",
")",
"clipboard",
".",
"request_targets",
"(",
"self",
".",
"cb_clipboard_uri_received",
")"
] |
https://github.com/bleachbit/bleachbit/blob/88fc4452936d02b56a76f07ce2142306bb47262b/bleachbit/GUI.py#L225-L228
|
||
pyg-team/pytorch_geometric
|
b920e9a3a64e22c8356be55301c88444ff051cae
|
torch_geometric/nn/dense/dense_sage_conv.py
|
python
|
DenseSAGEConv.reset_parameters
|
(self)
|
[] |
def reset_parameters(self):
self.lin_rel.reset_parameters()
self.lin_root.reset_parameters()
|
[
"def",
"reset_parameters",
"(",
"self",
")",
":",
"self",
".",
"lin_rel",
".",
"reset_parameters",
"(",
")",
"self",
".",
"lin_root",
".",
"reset_parameters",
"(",
")"
] |
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/torch_geometric/nn/dense/dense_sage_conv.py#L29-L31
|
||||
tensorflow/graphics
|
86997957324bfbdd85848daae989b4c02588faa0
|
tensorflow_graphics/projects/local_implicit_grid/core/model_g2v.py
|
python
|
ResBlock3D.__init__
|
(self, neck_channels, out_channels)
|
Initialization.
Args:
neck_channels: int, number of channels in bottleneck layer.
out_channels: int, number of output channels.
|
Initialization.
|
[
"Initialization",
"."
] |
def __init__(self, neck_channels, out_channels):
"""Initialization.
Args:
neck_channels: int, number of channels in bottleneck layer.
out_channels: int, number of output channels.
"""
super(ResBlock3D, self).__init__()
self.neck_channels = neck_channels
self.out_channels = out_channels
self.conv1 = layers.Conv3D(neck_channels, kernel_size=1, strides=1)
self.conv2 = layers.Conv3D(
neck_channels, kernel_size=3, strides=1, padding="same")
self.conv3 = layers.Conv3D(out_channels, kernel_size=1, strides=1)
self.bn1 = layers.BatchNormalization(axis=-1)
self.bn2 = layers.BatchNormalization(axis=-1)
self.bn3 = layers.BatchNormalization(axis=-1)
self.shortcut = layers.Conv3D(out_channels, kernel_size=1, strides=1)
|
[
"def",
"__init__",
"(",
"self",
",",
"neck_channels",
",",
"out_channels",
")",
":",
"super",
"(",
"ResBlock3D",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"neck_channels",
"=",
"neck_channels",
"self",
".",
"out_channels",
"=",
"out_channels",
"self",
".",
"conv1",
"=",
"layers",
".",
"Conv3D",
"(",
"neck_channels",
",",
"kernel_size",
"=",
"1",
",",
"strides",
"=",
"1",
")",
"self",
".",
"conv2",
"=",
"layers",
".",
"Conv3D",
"(",
"neck_channels",
",",
"kernel_size",
"=",
"3",
",",
"strides",
"=",
"1",
",",
"padding",
"=",
"\"same\"",
")",
"self",
".",
"conv3",
"=",
"layers",
".",
"Conv3D",
"(",
"out_channels",
",",
"kernel_size",
"=",
"1",
",",
"strides",
"=",
"1",
")",
"self",
".",
"bn1",
"=",
"layers",
".",
"BatchNormalization",
"(",
"axis",
"=",
"-",
"1",
")",
"self",
".",
"bn2",
"=",
"layers",
".",
"BatchNormalization",
"(",
"axis",
"=",
"-",
"1",
")",
"self",
".",
"bn3",
"=",
"layers",
".",
"BatchNormalization",
"(",
"axis",
"=",
"-",
"1",
")",
"self",
".",
"shortcut",
"=",
"layers",
".",
"Conv3D",
"(",
"out_channels",
",",
"kernel_size",
"=",
"1",
",",
"strides",
"=",
"1",
")"
] |
https://github.com/tensorflow/graphics/blob/86997957324bfbdd85848daae989b4c02588faa0/tensorflow_graphics/projects/local_implicit_grid/core/model_g2v.py#L32-L50
|
||
apache/incubator-spot
|
2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb
|
spot-ingest/pipelines/proxy/bluecoat.py
|
python
|
spot_decoder
|
(s)
|
return s
|
Dummy decoder function.
:param s: input to decode
:returns: s
|
Dummy decoder function.
|
[
"Dummy",
"decoder",
"function",
"."
] |
def spot_decoder(s):
"""
Dummy decoder function.
:param s: input to decode
:returns: s
"""
if s is None:
return None
return s
|
[
"def",
"spot_decoder",
"(",
"s",
")",
":",
"if",
"s",
"is",
"None",
":",
"return",
"None",
"return",
"s"
] |
https://github.com/apache/incubator-spot/blob/2d60a2adae7608b43e90ce1b9ec0adf24f6cc8eb/spot-ingest/pipelines/proxy/bluecoat.py#L94-L103
|
|
CedricGuillemet/Imogen
|
ee417b42747ed5b46cb11b02ef0c3630000085b3
|
bin/Lib/logging/config.py
|
python
|
stopListening
|
()
|
Stop the listening server which was created with a call to listen().
|
Stop the listening server which was created with a call to listen().
|
[
"Stop",
"the",
"listening",
"server",
"which",
"was",
"created",
"with",
"a",
"call",
"to",
"listen",
"()",
"."
] |
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
[
"def",
"stopListening",
"(",
")",
":",
"global",
"_listener",
"logging",
".",
"_acquireLock",
"(",
")",
"try",
":",
"if",
"_listener",
":",
"_listener",
".",
"abort",
"=",
"1",
"_listener",
"=",
"None",
"finally",
":",
"logging",
".",
"_releaseLock",
"(",
")"
] |
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/logging/config.py#L924-L935
|
||
stratosphereips/StratosphereLinuxIPS
|
985ac0f141dd71fe9c6faa8307bcf95a3754951d
|
modules/virustotal/virustotal.py
|
python
|
Module.set_url_data_in_URLInfo
|
(self,url,cached_data)
|
Function to set VirusTotal data of the URL in the URLInfo.
|
Function to set VirusTotal data of the URL in the URLInfo.
|
[
"Function",
"to",
"set",
"VirusTotal",
"data",
"of",
"the",
"URL",
"in",
"the",
"URLInfo",
"."
] |
def set_url_data_in_URLInfo(self,url,cached_data):
"""
Function to set VirusTotal data of the URL in the URLInfo.
"""
score = self.get_url_vt_data(url)
# Score of this url didn't change
vtdata = {"URL" : score,
"timestamp": time.time()}
data = {"VirusTotal" : vtdata}
__database__.setInfoForURLs(url, data)
|
[
"def",
"set_url_data_in_URLInfo",
"(",
"self",
",",
"url",
",",
"cached_data",
")",
":",
"score",
"=",
"self",
".",
"get_url_vt_data",
"(",
"url",
")",
"# Score of this url didn't change",
"vtdata",
"=",
"{",
"\"URL\"",
":",
"score",
",",
"\"timestamp\"",
":",
"time",
".",
"time",
"(",
")",
"}",
"data",
"=",
"{",
"\"VirusTotal\"",
":",
"vtdata",
"}",
"__database__",
".",
"setInfoForURLs",
"(",
"url",
",",
"data",
")"
] |
https://github.com/stratosphereips/StratosphereLinuxIPS/blob/985ac0f141dd71fe9c6faa8307bcf95a3754951d/modules/virustotal/virustotal.py#L159-L168
|
||
oracle/graalpython
|
577e02da9755d916056184ec441c26e00b70145c
|
graalpython/lib-python/3/typing.py
|
python
|
Generic.__init_subclass__
|
(cls, *args, **kwargs)
|
[] |
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
|
[
"def",
"__init_subclass__",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init_subclass__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"tvars",
"=",
"[",
"]",
"if",
"'__orig_bases__'",
"in",
"cls",
".",
"__dict__",
":",
"error",
"=",
"Generic",
"in",
"cls",
".",
"__orig_bases__",
"else",
":",
"error",
"=",
"Generic",
"in",
"cls",
".",
"__bases__",
"and",
"cls",
".",
"__name__",
"!=",
"'Protocol'",
"if",
"error",
":",
"raise",
"TypeError",
"(",
"\"Cannot inherit from plain Generic\"",
")",
"if",
"'__orig_bases__'",
"in",
"cls",
".",
"__dict__",
":",
"tvars",
"=",
"_collect_type_vars",
"(",
"cls",
".",
"__orig_bases__",
")",
"# Look for Generic[T1, ..., Tn].",
"# If found, tvars must be a subset of it.",
"# If not found, tvars is it.",
"# Also check for and reject plain Generic,",
"# and reject multiple Generic[...].",
"gvars",
"=",
"None",
"for",
"base",
"in",
"cls",
".",
"__orig_bases__",
":",
"if",
"(",
"isinstance",
"(",
"base",
",",
"_GenericAlias",
")",
"and",
"base",
".",
"__origin__",
"is",
"Generic",
")",
":",
"if",
"gvars",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"Cannot inherit from Generic[...] multiple types.\"",
")",
"gvars",
"=",
"base",
".",
"__parameters__",
"if",
"gvars",
"is",
"not",
"None",
":",
"tvarset",
"=",
"set",
"(",
"tvars",
")",
"gvarset",
"=",
"set",
"(",
"gvars",
")",
"if",
"not",
"tvarset",
"<=",
"gvarset",
":",
"s_vars",
"=",
"', '",
".",
"join",
"(",
"str",
"(",
"t",
")",
"for",
"t",
"in",
"tvars",
"if",
"t",
"not",
"in",
"gvarset",
")",
"s_args",
"=",
"', '",
".",
"join",
"(",
"str",
"(",
"g",
")",
"for",
"g",
"in",
"gvars",
")",
"raise",
"TypeError",
"(",
"f\"Some type variables ({s_vars}) are\"",
"f\" not listed in Generic[{s_args}]\"",
")",
"tvars",
"=",
"gvars",
"cls",
".",
"__parameters__",
"=",
"tuple",
"(",
"tvars",
")"
] |
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/typing.py#L900-L933
|
||||
osmr/imgclsmob
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
tensorflow_/tensorflowcv/models/seresnet.py
|
python
|
get_seresnet
|
(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs)
|
return net
|
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
|
Create SE-ResNet model with specific parameters.
|
[
"Create",
"SE",
"-",
"ResNet",
"model",
"with",
"specific",
"parameters",
"."
] |
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
|
[
"def",
"get_seresnet",
"(",
"blocks",
",",
"bottleneck",
"=",
"None",
",",
"conv1_stride",
"=",
"True",
",",
"model_name",
"=",
"None",
",",
"pretrained",
"=",
"False",
",",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"~\"",
",",
"\".tensorflow\"",
",",
"\"models\"",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"bottleneck",
"is",
"None",
":",
"bottleneck",
"=",
"(",
"blocks",
">=",
"50",
")",
"if",
"blocks",
"==",
"10",
":",
"layers",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
"elif",
"blocks",
"==",
"12",
":",
"layers",
"=",
"[",
"2",
",",
"1",
",",
"1",
",",
"1",
"]",
"elif",
"blocks",
"==",
"14",
"and",
"not",
"bottleneck",
":",
"layers",
"=",
"[",
"2",
",",
"2",
",",
"1",
",",
"1",
"]",
"elif",
"(",
"blocks",
"==",
"14",
")",
"and",
"bottleneck",
":",
"layers",
"=",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
"elif",
"blocks",
"==",
"16",
":",
"layers",
"=",
"[",
"2",
",",
"2",
",",
"2",
",",
"1",
"]",
"elif",
"blocks",
"==",
"18",
":",
"layers",
"=",
"[",
"2",
",",
"2",
",",
"2",
",",
"2",
"]",
"elif",
"(",
"blocks",
"==",
"26",
")",
"and",
"not",
"bottleneck",
":",
"layers",
"=",
"[",
"3",
",",
"3",
",",
"3",
",",
"3",
"]",
"elif",
"(",
"blocks",
"==",
"26",
")",
"and",
"bottleneck",
":",
"layers",
"=",
"[",
"2",
",",
"2",
",",
"2",
",",
"2",
"]",
"elif",
"blocks",
"==",
"34",
":",
"layers",
"=",
"[",
"3",
",",
"4",
",",
"6",
",",
"3",
"]",
"elif",
"(",
"blocks",
"==",
"38",
")",
"and",
"bottleneck",
":",
"layers",
"=",
"[",
"3",
",",
"3",
",",
"3",
",",
"3",
"]",
"elif",
"blocks",
"==",
"50",
":",
"layers",
"=",
"[",
"3",
",",
"4",
",",
"6",
",",
"3",
"]",
"elif",
"blocks",
"==",
"101",
":",
"layers",
"=",
"[",
"3",
",",
"4",
",",
"23",
",",
"3",
"]",
"elif",
"blocks",
"==",
"152",
":",
"layers",
"=",
"[",
"3",
",",
"8",
",",
"36",
",",
"3",
"]",
"elif",
"blocks",
"==",
"200",
":",
"layers",
"=",
"[",
"3",
",",
"24",
",",
"36",
",",
"3",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported SE-ResNet with number of blocks: {}\"",
".",
"format",
"(",
"blocks",
")",
")",
"if",
"bottleneck",
":",
"assert",
"(",
"sum",
"(",
"layers",
")",
"*",
"3",
"+",
"2",
"==",
"blocks",
")",
"else",
":",
"assert",
"(",
"sum",
"(",
"layers",
")",
"*",
"2",
"+",
"2",
"==",
"blocks",
")",
"init_block_channels",
"=",
"64",
"channels_per_layers",
"=",
"[",
"64",
",",
"128",
",",
"256",
",",
"512",
"]",
"if",
"bottleneck",
":",
"bottleneck_factor",
"=",
"4",
"channels_per_layers",
"=",
"[",
"ci",
"*",
"bottleneck_factor",
"for",
"ci",
"in",
"channels_per_layers",
"]",
"channels",
"=",
"[",
"[",
"ci",
"]",
"*",
"li",
"for",
"(",
"ci",
",",
"li",
")",
"in",
"zip",
"(",
"channels_per_layers",
",",
"layers",
")",
"]",
"net",
"=",
"SEResNet",
"(",
"channels",
"=",
"channels",
",",
"init_block_channels",
"=",
"init_block_channels",
",",
"bottleneck",
"=",
"bottleneck",
",",
"conv1_stride",
"=",
"conv1_stride",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"if",
"(",
"model_name",
"is",
"None",
")",
"or",
"(",
"not",
"model_name",
")",
":",
"raise",
"ValueError",
"(",
"\"Parameter `model_name` should be properly initialized for loading pretrained model.\"",
")",
"from",
".",
"model_store",
"import",
"download_state_dict",
"net",
".",
"state_dict",
",",
"net",
".",
"file_path",
"=",
"download_state_dict",
"(",
"model_name",
"=",
"model_name",
",",
"local_model_store_dir_path",
"=",
"root",
")",
"else",
":",
"net",
".",
"state_dict",
"=",
"None",
"net",
".",
"file_path",
"=",
"None",
"return",
"net"
] |
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/tensorflow_/tensorflowcv/models/seresnet.py#L202-L298
|
|
PowerScript/KatanaFramework
|
0f6ad90a88de865d58ec26941cb4460501e75496
|
lib/setuptools/setuptools/command/easy_install.py
|
python
|
CommandSpec.from_string
|
(cls, string)
|
return cls(items)
|
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
|
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
|
[
"Construct",
"a",
"command",
"spec",
"from",
"a",
"simple",
"string",
"representing",
"a",
"command",
"line",
"parseable",
"by",
"shlex",
".",
"split",
"."
] |
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
|
[
"def",
"from_string",
"(",
"cls",
",",
"string",
")",
":",
"items",
"=",
"shlex",
".",
"split",
"(",
"string",
",",
"*",
"*",
"cls",
".",
"split_args",
")",
"return",
"cls",
"(",
"items",
")"
] |
https://github.com/PowerScript/KatanaFramework/blob/0f6ad90a88de865d58ec26941cb4460501e75496/lib/setuptools/setuptools/command/easy_install.py#L1955-L1961
|
|
JohnHammond/katana
|
4f58537428dc6776b0dcb8a852f7ccdde87dbebe
|
katana/units/raw/strings.py
|
python
|
Unit.evaluate
|
(self, case: Any)
|
Evaluate the target. Run ``strings`` on the target and
recurse on any newfound information.
:param case: A case returned by ``enumerate``. For this unit,\
the ``enumerate`` function is not used.
:return: None. This function should not return any data.
|
Evaluate the target. Run ``strings`` on the target and
recurse on any newfound information.
|
[
"Evaluate",
"the",
"target",
".",
"Run",
"strings",
"on",
"the",
"target",
"and",
"recurse",
"on",
"any",
"newfound",
"information",
"."
] |
def evaluate(self, case: Any):
"""
Evaluate the target. Run ``strings`` on the target and
recurse on any newfound information.
:param case: A case returned by ``enumerate``. For this unit,\
the ``enumerate`` function is not used.
:return: None. This function should not return any data.
"""
# Run the process.
command = ["strings", self.target.path, "-n", self.get("length", "10")]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Queuing recursion and registering data can be slow on large files.
# Look for flags first
lines = []
for line in p.stdout:
self.manager.find_flag(self, line)
lines.append(line)
for line in lines:
self.manager.register_data(self, line.rstrip(b"\n"))
|
[
"def",
"evaluate",
"(",
"self",
",",
"case",
":",
"Any",
")",
":",
"# Run the process.",
"command",
"=",
"[",
"\"strings\"",
",",
"self",
".",
"target",
".",
"path",
",",
"\"-n\"",
",",
"self",
".",
"get",
"(",
"\"length\"",
",",
"\"10\"",
")",
"]",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"# Queuing recursion and registering data can be slow on large files.",
"# Look for flags first",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"p",
".",
"stdout",
":",
"self",
".",
"manager",
".",
"find_flag",
"(",
"self",
",",
"line",
")",
"lines",
".",
"append",
"(",
"line",
")",
"for",
"line",
"in",
"lines",
":",
"self",
".",
"manager",
".",
"register_data",
"(",
"self",
",",
"line",
".",
"rstrip",
"(",
"b\"\\n\"",
")",
")"
] |
https://github.com/JohnHammond/katana/blob/4f58537428dc6776b0dcb8a852f7ccdde87dbebe/katana/units/raw/strings.py#L48-L71
|
||
XX-net/XX-Net
|
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
|
python3.8.2/Lib/statistics.py
|
python
|
NormalDist.__rsub__
|
(x1, x2)
|
return -(x1 - x2)
|
Subtract a NormalDist from a constant or another NormalDist.
|
Subtract a NormalDist from a constant or another NormalDist.
|
[
"Subtract",
"a",
"NormalDist",
"from",
"a",
"constant",
"or",
"another",
"NormalDist",
"."
] |
def __rsub__(x1, x2):
"Subtract a NormalDist from a constant or another NormalDist."
return -(x1 - x2)
|
[
"def",
"__rsub__",
"(",
"x1",
",",
"x2",
")",
":",
"return",
"-",
"(",
"x1",
"-",
"x2",
")"
] |
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/statistics.py#L1085-L1087
|
|
iclavera/learning_to_adapt
|
bd7d99ba402521c96631e7d09714128f549db0f1
|
learning_to_adapt/mujoco_py/mjtypes.py
|
python
|
MjVisualWrapper.headlight
|
(self)
|
return self._wrapped.contents.headlight
|
[] |
def headlight(self):
return self._wrapped.contents.headlight
|
[
"def",
"headlight",
"(",
"self",
")",
":",
"return",
"self",
".",
"_wrapped",
".",
"contents",
".",
"headlight"
] |
https://github.com/iclavera/learning_to_adapt/blob/bd7d99ba402521c96631e7d09714128f549db0f1/learning_to_adapt/mujoco_py/mjtypes.py#L2070-L2071
|
|||
brendano/tweetmotif
|
1b0b1e3a941745cd5a26eba01f554688b7c4b27e
|
everything_else/djfrontend/django-1.0.2/contrib/gis/geos/libgeos.py
|
python
|
get_pointer_arr
|
(n)
|
return GeomArr()
|
Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer.
|
Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer.
|
[
"Gets",
"a",
"ctypes",
"pointer",
"array",
"(",
"of",
"length",
"n",
")",
"for",
"GEOSGeom_t",
"opaque",
"pointer",
"."
] |
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
|
[
"def",
"get_pointer_arr",
"(",
"n",
")",
":",
"GeomArr",
"=",
"GEOM_PTR",
"*",
"n",
"return",
"GeomArr",
"(",
")"
] |
https://github.com/brendano/tweetmotif/blob/1b0b1e3a941745cd5a26eba01f554688b7c4b27e/everything_else/djfrontend/django-1.0.2/contrib/gis/geos/libgeos.py#L99-L102
|
|
Robot-Will/Stino
|
a94831cd1bf40a59587a7b6cc2e9b5c4306b1bf2
|
libs/serial/urlhandler/protocol_socket.py
|
python
|
Serial.close
|
(self)
|
Close port
|
Close port
|
[
"Close",
"port"
] |
def close(self):
"""Close port"""
if self.is_open:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
self.is_open = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_open",
":",
"if",
"self",
".",
"_socket",
":",
"try",
":",
"self",
".",
"_socket",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"self",
".",
"_socket",
".",
"close",
"(",
")",
"except",
":",
"# ignore errors.",
"pass",
"self",
".",
"_socket",
"=",
"None",
"self",
".",
"is_open",
"=",
"False",
"# in case of quick reconnects, give the server some time",
"time",
".",
"sleep",
"(",
"0.3",
")"
] |
https://github.com/Robot-Will/Stino/blob/a94831cd1bf40a59587a7b6cc2e9b5c4306b1bf2/libs/serial/urlhandler/protocol_socket.py#L86-L99
|
||
enthought/traitsui
|
b7c38c7a47bf6ae7971f9ddab70c8a358647dd25
|
traitsui/wx/range_editor.py
|
python
|
LargeRangeSliderEditor.update_range_ui
|
(self)
|
Updates the slider range controls.
|
Updates the slider range controls.
|
[
"Updates",
"the",
"slider",
"range",
"controls",
"."
] |
def update_range_ui(self):
"""Updates the slider range controls."""
low, high = self.cur_low, self.cur_high
value = self.value
self._set_format()
self.control.label_lo.SetLabel(self._format % low)
self.control.label_hi.SetLabel(self._format % high)
if high > low:
ivalue = int((float(value - low) / (high - low)) * 10000.0)
else:
ivalue = low
self.control.slider.SetValue(ivalue)
text = self._format % self.value
self.control.text.SetValue(text)
factory = self.factory
f_low, f_high = self.low, self.high
if low == f_low:
self.control.button_lo.Disable()
else:
self.control.button_lo.Enable()
if high == f_high:
self.control.button_hi.Disable()
else:
self.control.button_hi.Enable()
|
[
"def",
"update_range_ui",
"(",
"self",
")",
":",
"low",
",",
"high",
"=",
"self",
".",
"cur_low",
",",
"self",
".",
"cur_high",
"value",
"=",
"self",
".",
"value",
"self",
".",
"_set_format",
"(",
")",
"self",
".",
"control",
".",
"label_lo",
".",
"SetLabel",
"(",
"self",
".",
"_format",
"%",
"low",
")",
"self",
".",
"control",
".",
"label_hi",
".",
"SetLabel",
"(",
"self",
".",
"_format",
"%",
"high",
")",
"if",
"high",
">",
"low",
":",
"ivalue",
"=",
"int",
"(",
"(",
"float",
"(",
"value",
"-",
"low",
")",
"/",
"(",
"high",
"-",
"low",
")",
")",
"*",
"10000.0",
")",
"else",
":",
"ivalue",
"=",
"low",
"self",
".",
"control",
".",
"slider",
".",
"SetValue",
"(",
"ivalue",
")",
"text",
"=",
"self",
".",
"_format",
"%",
"self",
".",
"value",
"self",
".",
"control",
".",
"text",
".",
"SetValue",
"(",
"text",
")",
"factory",
"=",
"self",
".",
"factory",
"f_low",
",",
"f_high",
"=",
"self",
".",
"low",
",",
"self",
".",
"high",
"if",
"low",
"==",
"f_low",
":",
"self",
".",
"control",
".",
"button_lo",
".",
"Disable",
"(",
")",
"else",
":",
"self",
".",
"control",
".",
"button_lo",
".",
"Enable",
"(",
")",
"if",
"high",
"==",
"f_high",
":",
"self",
".",
"control",
".",
"button_hi",
".",
"Disable",
"(",
")",
"else",
":",
"self",
".",
"control",
".",
"button_hi",
".",
"Enable",
"(",
")"
] |
https://github.com/enthought/traitsui/blob/b7c38c7a47bf6ae7971f9ddab70c8a358647dd25/traitsui/wx/range_editor.py#L572-L597
|
||
LMFDB/lmfdb
|
6cf48a4c18a96e6298da6ae43f587f96845bcb43
|
lmfdb/verify/char_dir_orbits.py
|
python
|
char_dir_orbits.check_char_dir_values_agg
|
(self)
|
return self.check_crosstable_count('char_dir_values', 'char_degree', 'orbit_label')
|
The number of entries in char_dir_values matching a given orbit_label should be char_degree
|
The number of entries in char_dir_values matching a given orbit_label should be char_degree
|
[
"The",
"number",
"of",
"entries",
"in",
"char_dir_values",
"matching",
"a",
"given",
"orbit_label",
"should",
"be",
"char_degree"
] |
def check_char_dir_values_agg(self):
"""
The number of entries in char_dir_values matching a given orbit_label should be char_degree
"""
# TIME about 750s
return self.check_crosstable_count('char_dir_values', 'char_degree', 'orbit_label')
|
[
"def",
"check_char_dir_values_agg",
"(",
"self",
")",
":",
"# TIME about 750s",
"return",
"self",
".",
"check_crosstable_count",
"(",
"'char_dir_values'",
",",
"'char_degree'",
",",
"'orbit_label'",
")"
] |
https://github.com/LMFDB/lmfdb/blob/6cf48a4c18a96e6298da6ae43f587f96845bcb43/lmfdb/verify/char_dir_orbits.py#L63-L68
|
|
CastagnaIT/plugin.video.netflix
|
5cf5fa436eb9956576c0f62aa31a4c7d6c5b8a4a
|
resources/lib/navigation/actions.py
|
python
|
AddonActionExecutor.change_watched_status
|
(self, videoid)
|
Change the watched status of a video, only when sync of watched status with NF is enabled
|
Change the watched status of a video, only when sync of watched status with NF is enabled
|
[
"Change",
"the",
"watched",
"status",
"of",
"a",
"video",
"only",
"when",
"sync",
"of",
"watched",
"status",
"with",
"NF",
"is",
"enabled"
] |
def change_watched_status(self, videoid):
"""Change the watched status of a video, only when sync of watched status with NF is enabled"""
change_watched_status_locally(videoid)
|
[
"def",
"change_watched_status",
"(",
"self",
",",
"videoid",
")",
":",
"change_watched_status_locally",
"(",
"videoid",
")"
] |
https://github.com/CastagnaIT/plugin.video.netflix/blob/5cf5fa436eb9956576c0f62aa31a4c7d6c5b8a4a/resources/lib/navigation/actions.py#L180-L182
|
||
Trusted-AI/adversarial-robustness-toolbox
|
9fabffdbb92947efa1ecc5d825d634d30dfbaf29
|
art/estimators/mxnet.py
|
python
|
MXEstimator.__init__
|
(self, **kwargs)
|
Estimator class for MXNet Gluon models.
|
Estimator class for MXNet Gluon models.
|
[
"Estimator",
"class",
"for",
"MXNet",
"Gluon",
"models",
"."
] |
def __init__(self, **kwargs) -> None:
"""
Estimator class for MXNet Gluon models.
"""
super().__init__(**kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")"
] |
https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/estimators/mxnet.py#L43-L47
|
||
Komodo/KomodoEdit
|
61edab75dce2bdb03943b387b0608ea36f548e8e
|
src/codeintel/play/core.py
|
python
|
Window.PopupMenuXY
|
(*args, **kwargs)
|
return _core.Window_PopupMenuXY(*args, **kwargs)
|
PopupMenuXY(Menu menu, int x, int y) -> bool
Pops up the given menu at the specified coordinates, relative to this
window, and returns control when the user has dismissed the menu. If a
menu item is selected, the corresponding menu event is generated and
will be processed as usual.
|
PopupMenuXY(Menu menu, int x, int y) -> bool
|
[
"PopupMenuXY",
"(",
"Menu",
"menu",
"int",
"x",
"int",
"y",
")",
"-",
">",
"bool"
] |
def PopupMenuXY(*args, **kwargs):
"""
PopupMenuXY(Menu menu, int x, int y) -> bool
Pops up the given menu at the specified coordinates, relative to this
window, and returns control when the user has dismissed the menu. If a
menu item is selected, the corresponding menu event is generated and
will be processed as usual.
"""
return _core.Window_PopupMenuXY(*args, **kwargs)
|
[
"def",
"PopupMenuXY",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core",
".",
"Window_PopupMenuXY",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/codeintel/play/core.py#L6718-L6727
|
|
FederatedAI/FATE
|
32540492623568ecd1afcb367360133616e02fa3
|
python/federatedml/feature/binning/bin_result.py
|
python
|
MultiClassBinResult.generated_pb_list
|
(self, split_points=None)
|
return res
|
[] |
def generated_pb_list(self, split_points=None):
res = []
for br in self.bin_results:
res.append(br.generated_pb(split_points))
return res
|
[
"def",
"generated_pb_list",
"(",
"self",
",",
"split_points",
"=",
"None",
")",
":",
"res",
"=",
"[",
"]",
"for",
"br",
"in",
"self",
".",
"bin_results",
":",
"res",
".",
"append",
"(",
"br",
".",
"generated_pb",
"(",
"split_points",
")",
")",
"return",
"res"
] |
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/feature/binning/bin_result.py#L263-L267
|
|||
lutris/lutris
|
66675a4d5537f6b2a2ba2b6df0b3cdf8924c823a
|
lutris/util/graphics/displayconfig.py
|
python
|
MutterDisplayManager.get_current_resolution
|
(self)
|
return str(current_mode.width), str(current_mode.height)
|
Return the current resolution for the primary display
|
Return the current resolution for the primary display
|
[
"Return",
"the",
"current",
"resolution",
"for",
"the",
"primary",
"display"
] |
def get_current_resolution(self):
"""Return the current resolution for the primary display"""
logger.debug("Retrieving current resolution")
current_mode = self.display_config.current_state.get_current_mode()
if not current_mode:
logger.error("Could not retrieve the current display mode")
return "", ""
return str(current_mode.width), str(current_mode.height)
|
[
"def",
"get_current_resolution",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Retrieving current resolution\"",
")",
"current_mode",
"=",
"self",
".",
"display_config",
".",
"current_state",
".",
"get_current_mode",
"(",
")",
"if",
"not",
"current_mode",
":",
"logger",
".",
"error",
"(",
"\"Could not retrieve the current display mode\"",
")",
"return",
"\"\"",
",",
"\"\"",
"return",
"str",
"(",
"current_mode",
".",
"width",
")",
",",
"str",
"(",
"current_mode",
".",
"height",
")"
] |
https://github.com/lutris/lutris/blob/66675a4d5537f6b2a2ba2b6df0b3cdf8924c823a/lutris/util/graphics/displayconfig.py#L622-L629
|
|
airnotifier/airnotifier
|
27207bc9e8ee8f3ac9db428a456020d9e55eabda
|
pushservices/wns.py
|
python
|
WNSClient.request_token
|
(self)
|
return accesstoken
|
[] |
def request_token(self):
payload = {
"grant_type": "client_credentials",
"client_id": self.clientid,
"client_secret": self.clientsecret,
"scope": "notify.windows.com",
}
response = requests.post(WNSACCESSTOKEN_URL, data=payload)
responsedata = response.json()
accesstoken = responsedata["access_token"]
self.app["wnsaccesstoken"] = accesstoken
self.app["wnstokenexpiry"] = int(responsedata["expires_in"]) + int(time.time())
self.masterdb.applications.update(
{"shortname": self.app["shortname"]}, self.app
)
return accesstoken
|
[
"def",
"request_token",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"\"grant_type\"",
":",
"\"client_credentials\"",
",",
"\"client_id\"",
":",
"self",
".",
"clientid",
",",
"\"client_secret\"",
":",
"self",
".",
"clientsecret",
",",
"\"scope\"",
":",
"\"notify.windows.com\"",
",",
"}",
"response",
"=",
"requests",
".",
"post",
"(",
"WNSACCESSTOKEN_URL",
",",
"data",
"=",
"payload",
")",
"responsedata",
"=",
"response",
".",
"json",
"(",
")",
"accesstoken",
"=",
"responsedata",
"[",
"\"access_token\"",
"]",
"self",
".",
"app",
"[",
"\"wnsaccesstoken\"",
"]",
"=",
"accesstoken",
"self",
".",
"app",
"[",
"\"wnstokenexpiry\"",
"]",
"=",
"int",
"(",
"responsedata",
"[",
"\"expires_in\"",
"]",
")",
"+",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"self",
".",
"masterdb",
".",
"applications",
".",
"update",
"(",
"{",
"\"shortname\"",
":",
"self",
".",
"app",
"[",
"\"shortname\"",
"]",
"}",
",",
"self",
".",
"app",
")",
"return",
"accesstoken"
] |
https://github.com/airnotifier/airnotifier/blob/27207bc9e8ee8f3ac9db428a456020d9e55eabda/pushservices/wns.py#L100-L115
|
|||
ninthDevilHAUNSTER/ArknightsAutoHelper
|
a27a930502d6e432368d9f62595a1d69a992f4e6
|
vendor/penguin_client/penguin_client/models/stage.py
|
python
|
Stage.is_gacha
|
(self, is_gacha)
|
Sets the is_gacha of this Stage.
:param is_gacha: The is_gacha of this Stage. # noqa: E501
:type: bool
|
Sets the is_gacha of this Stage.
|
[
"Sets",
"the",
"is_gacha",
"of",
"this",
"Stage",
"."
] |
def is_gacha(self, is_gacha):
"""Sets the is_gacha of this Stage.
:param is_gacha: The is_gacha of this Stage. # noqa: E501
:type: bool
"""
self._is_gacha = is_gacha
|
[
"def",
"is_gacha",
"(",
"self",
",",
"is_gacha",
")",
":",
"self",
".",
"_is_gacha",
"=",
"is_gacha"
] |
https://github.com/ninthDevilHAUNSTER/ArknightsAutoHelper/blob/a27a930502d6e432368d9f62595a1d69a992f4e6/vendor/penguin_client/penguin_client/models/stage.py#L216-L224
|
||
secdev/scapy
|
65089071da1acf54622df0b4fa7fc7673d47d3cd
|
scapy/layers/inet.py
|
python
|
fragment
|
(pkt, fragsize=1480)
|
return lst
|
Fragment a big IP datagram
|
Fragment a big IP datagram
|
[
"Fragment",
"a",
"big",
"IP",
"datagram"
] |
def fragment(pkt, fragsize=1480):
"""Fragment a big IP datagram"""
lastfragsz = fragsize
fragsize -= fragsize % 8
lst = []
for p in pkt:
s = raw(p[IP].payload)
nb = (len(s) - lastfragsz + fragsize - 1) // fragsize + 1
for i in range(nb):
q = p.copy()
del(q[IP].payload)
del(q[IP].chksum)
del(q[IP].len)
if i != nb - 1:
q[IP].flags |= 1
fragend = (i + 1) * fragsize
else:
fragend = i * fragsize + lastfragsz
q[IP].frag += i * fragsize // 8
r = conf.raw_layer(load=s[i * fragsize:fragend])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
|
[
"def",
"fragment",
"(",
"pkt",
",",
"fragsize",
"=",
"1480",
")",
":",
"lastfragsz",
"=",
"fragsize",
"fragsize",
"-=",
"fragsize",
"%",
"8",
"lst",
"=",
"[",
"]",
"for",
"p",
"in",
"pkt",
":",
"s",
"=",
"raw",
"(",
"p",
"[",
"IP",
"]",
".",
"payload",
")",
"nb",
"=",
"(",
"len",
"(",
"s",
")",
"-",
"lastfragsz",
"+",
"fragsize",
"-",
"1",
")",
"//",
"fragsize",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"nb",
")",
":",
"q",
"=",
"p",
".",
"copy",
"(",
")",
"del",
"(",
"q",
"[",
"IP",
"]",
".",
"payload",
")",
"del",
"(",
"q",
"[",
"IP",
"]",
".",
"chksum",
")",
"del",
"(",
"q",
"[",
"IP",
"]",
".",
"len",
")",
"if",
"i",
"!=",
"nb",
"-",
"1",
":",
"q",
"[",
"IP",
"]",
".",
"flags",
"|=",
"1",
"fragend",
"=",
"(",
"i",
"+",
"1",
")",
"*",
"fragsize",
"else",
":",
"fragend",
"=",
"i",
"*",
"fragsize",
"+",
"lastfragsz",
"q",
"[",
"IP",
"]",
".",
"frag",
"+=",
"i",
"*",
"fragsize",
"//",
"8",
"r",
"=",
"conf",
".",
"raw_layer",
"(",
"load",
"=",
"s",
"[",
"i",
"*",
"fragsize",
":",
"fragend",
"]",
")",
"r",
".",
"overload_fields",
"=",
"p",
"[",
"IP",
"]",
".",
"payload",
".",
"overload_fields",
".",
"copy",
"(",
")",
"q",
".",
"add_payload",
"(",
"r",
")",
"lst",
".",
"append",
"(",
"q",
")",
"return",
"lst"
] |
https://github.com/secdev/scapy/blob/65089071da1acf54622df0b4fa7fc7673d47d3cd/scapy/layers/inet.py#L1060-L1083
|
|
czq142857/BSP-NET-original
|
f69862990b1e9d95718053fc229d3b35aebfb511
|
modelSVR.py
|
python
|
BSP_SVR.__init__
|
(self, sess, phase, sample_vox_size, is_training = False, prev_ef_dim=32, ef_dim=64, c_dim=256, p_dim=4096, dataset_name='default', checkpoint_dir=None, sample_dir=None, data_dir='./data')
|
Args:
too lazy to explain
|
Args:
too lazy to explain
|
[
"Args",
":",
"too",
"lazy",
"to",
"explain"
] |
def __init__(self, sess, phase, sample_vox_size, is_training = False, prev_ef_dim=32, ef_dim=64, c_dim=256, p_dim=4096, dataset_name='default', checkpoint_dir=None, sample_dir=None, data_dir='./data'):
"""
Args:
too lazy to explain
"""
self.sess = sess
#progressive training
#1-- (16, 16*16*16)
#2-- (32, 16*16*16)
#3-- (64, 16*16*16*4)
self.sample_vox_size = sample_vox_size
if self.sample_vox_size==16:
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==32:
self.point_batch_size = 16*16*16
self.shape_batch_size = 32
elif self.sample_vox_size==64:
self.point_batch_size = 16*16*16*4
self.shape_batch_size = 8
self.input_size = 64 #input voxel grid size
#actual batch size
self.shape_batch_size = 64
self.view_size = 137
self.crop_size = 128
self.view_num = 24
self.crop_edge = self.view_size-self.crop_size
self.test_idx = 23
self.p_dim = p_dim
self.ef_dim = ef_dim
self.c_dim = c_dim
self.prev_ef_dim = prev_ef_dim
self.z_dim = prev_ef_dim*8
self.dataset_name = dataset_name
self.dataset_load = dataset_name + '_train'
if not is_training:
self.dataset_load = dataset_name + '_test'
self.checkpoint_dir = checkpoint_dir
self.data_dir = data_dir
data_hdf5_name = self.data_dir+'/'+self.dataset_load+'.hdf5'
if os.path.exists(data_hdf5_name):
data_dict = h5py.File(data_hdf5_name, 'r')
offset_x = int(self.crop_edge/2)
offset_y = int(self.crop_edge/2)
self.data_pixels = np.reshape(data_dict['pixels'][:,:,offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size], [-1,self.view_num,self.crop_size,self.crop_size,1])
else:
print("error: cannot load "+data_hdf5_name)
exit(0)
dataz_hdf5_name = self.checkpoint_dir+'/'+self.modelAE_dir+'/'+self.dataset_name+'_train_z.hdf5'
if os.path.exists(dataz_hdf5_name):
dataz_dict = h5py.File(dataz_hdf5_name, 'r')
self.data_zs = dataz_dict['zs'][:]
else:
print("warning: cannot load "+dataz_hdf5_name)
self.real_size = 64 #output point-value voxel grid size in testing
self.test_size = 64 #related to testing batch_size, adjust according to gpu memory size
test_point_batch_size = self.test_size*self.test_size*self.test_size #do not change
#get coords
dima = self.test_size
dim = self.real_size
self.aux_x = np.zeros([dima,dima,dima],np.uint8)
self.aux_y = np.zeros([dima,dima,dima],np.uint8)
self.aux_z = np.zeros([dima,dima,dima],np.uint8)
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
for i in range(dima):
for j in range(dima):
for k in range(dima):
self.aux_x[i,j,k] = i*multiplier
self.aux_y[i,j,k] = j*multiplier
self.aux_z[i,j,k] = k*multiplier
self.coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
self.coords[i*multiplier2+j*multiplier+k,:,:,:,0] = self.aux_x+i
self.coords[i*multiplier2+j*multiplier+k,:,:,:,1] = self.aux_y+j
self.coords[i*multiplier2+j*multiplier+k,:,:,:,2] = self.aux_z+k
self.coords = (self.coords+0.5)/dim-0.5
self.coords = np.reshape(self.coords,[multiplier3,test_point_batch_size,3])
self.build_model(phase)
|
[
"def",
"__init__",
"(",
"self",
",",
"sess",
",",
"phase",
",",
"sample_vox_size",
",",
"is_training",
"=",
"False",
",",
"prev_ef_dim",
"=",
"32",
",",
"ef_dim",
"=",
"64",
",",
"c_dim",
"=",
"256",
",",
"p_dim",
"=",
"4096",
",",
"dataset_name",
"=",
"'default'",
",",
"checkpoint_dir",
"=",
"None",
",",
"sample_dir",
"=",
"None",
",",
"data_dir",
"=",
"'./data'",
")",
":",
"self",
".",
"sess",
"=",
"sess",
"#progressive training",
"#1-- (16, 16*16*16)",
"#2-- (32, 16*16*16)",
"#3-- (64, 16*16*16*4)",
"self",
".",
"sample_vox_size",
"=",
"sample_vox_size",
"if",
"self",
".",
"sample_vox_size",
"==",
"16",
":",
"self",
".",
"point_batch_size",
"=",
"16",
"*",
"16",
"*",
"16",
"self",
".",
"shape_batch_size",
"=",
"32",
"elif",
"self",
".",
"sample_vox_size",
"==",
"32",
":",
"self",
".",
"point_batch_size",
"=",
"16",
"*",
"16",
"*",
"16",
"self",
".",
"shape_batch_size",
"=",
"32",
"elif",
"self",
".",
"sample_vox_size",
"==",
"64",
":",
"self",
".",
"point_batch_size",
"=",
"16",
"*",
"16",
"*",
"16",
"*",
"4",
"self",
".",
"shape_batch_size",
"=",
"8",
"self",
".",
"input_size",
"=",
"64",
"#input voxel grid size",
"#actual batch size",
"self",
".",
"shape_batch_size",
"=",
"64",
"self",
".",
"view_size",
"=",
"137",
"self",
".",
"crop_size",
"=",
"128",
"self",
".",
"view_num",
"=",
"24",
"self",
".",
"crop_edge",
"=",
"self",
".",
"view_size",
"-",
"self",
".",
"crop_size",
"self",
".",
"test_idx",
"=",
"23",
"self",
".",
"p_dim",
"=",
"p_dim",
"self",
".",
"ef_dim",
"=",
"ef_dim",
"self",
".",
"c_dim",
"=",
"c_dim",
"self",
".",
"prev_ef_dim",
"=",
"prev_ef_dim",
"self",
".",
"z_dim",
"=",
"prev_ef_dim",
"*",
"8",
"self",
".",
"dataset_name",
"=",
"dataset_name",
"self",
".",
"dataset_load",
"=",
"dataset_name",
"+",
"'_train'",
"if",
"not",
"is_training",
":",
"self",
".",
"dataset_load",
"=",
"dataset_name",
"+",
"'_test'",
"self",
".",
"checkpoint_dir",
"=",
"checkpoint_dir",
"self",
".",
"data_dir",
"=",
"data_dir",
"data_hdf5_name",
"=",
"self",
".",
"data_dir",
"+",
"'/'",
"+",
"self",
".",
"dataset_load",
"+",
"'.hdf5'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"data_hdf5_name",
")",
":",
"data_dict",
"=",
"h5py",
".",
"File",
"(",
"data_hdf5_name",
",",
"'r'",
")",
"offset_x",
"=",
"int",
"(",
"self",
".",
"crop_edge",
"/",
"2",
")",
"offset_y",
"=",
"int",
"(",
"self",
".",
"crop_edge",
"/",
"2",
")",
"self",
".",
"data_pixels",
"=",
"np",
".",
"reshape",
"(",
"data_dict",
"[",
"'pixels'",
"]",
"[",
":",
",",
":",
",",
"offset_y",
":",
"offset_y",
"+",
"self",
".",
"crop_size",
",",
"offset_x",
":",
"offset_x",
"+",
"self",
".",
"crop_size",
"]",
",",
"[",
"-",
"1",
",",
"self",
".",
"view_num",
",",
"self",
".",
"crop_size",
",",
"self",
".",
"crop_size",
",",
"1",
"]",
")",
"else",
":",
"print",
"(",
"\"error: cannot load \"",
"+",
"data_hdf5_name",
")",
"exit",
"(",
"0",
")",
"dataz_hdf5_name",
"=",
"self",
".",
"checkpoint_dir",
"+",
"'/'",
"+",
"self",
".",
"modelAE_dir",
"+",
"'/'",
"+",
"self",
".",
"dataset_name",
"+",
"'_train_z.hdf5'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dataz_hdf5_name",
")",
":",
"dataz_dict",
"=",
"h5py",
".",
"File",
"(",
"dataz_hdf5_name",
",",
"'r'",
")",
"self",
".",
"data_zs",
"=",
"dataz_dict",
"[",
"'zs'",
"]",
"[",
":",
"]",
"else",
":",
"print",
"(",
"\"warning: cannot load \"",
"+",
"dataz_hdf5_name",
")",
"self",
".",
"real_size",
"=",
"64",
"#output point-value voxel grid size in testing",
"self",
".",
"test_size",
"=",
"64",
"#related to testing batch_size, adjust according to gpu memory size",
"test_point_batch_size",
"=",
"self",
".",
"test_size",
"*",
"self",
".",
"test_size",
"*",
"self",
".",
"test_size",
"#do not change",
"#get coords",
"dima",
"=",
"self",
".",
"test_size",
"dim",
"=",
"self",
".",
"real_size",
"self",
".",
"aux_x",
"=",
"np",
".",
"zeros",
"(",
"[",
"dima",
",",
"dima",
",",
"dima",
"]",
",",
"np",
".",
"uint8",
")",
"self",
".",
"aux_y",
"=",
"np",
".",
"zeros",
"(",
"[",
"dima",
",",
"dima",
",",
"dima",
"]",
",",
"np",
".",
"uint8",
")",
"self",
".",
"aux_z",
"=",
"np",
".",
"zeros",
"(",
"[",
"dima",
",",
"dima",
",",
"dima",
"]",
",",
"np",
".",
"uint8",
")",
"multiplier",
"=",
"int",
"(",
"dim",
"/",
"dima",
")",
"multiplier2",
"=",
"multiplier",
"*",
"multiplier",
"multiplier3",
"=",
"multiplier",
"*",
"multiplier",
"*",
"multiplier",
"for",
"i",
"in",
"range",
"(",
"dima",
")",
":",
"for",
"j",
"in",
"range",
"(",
"dima",
")",
":",
"for",
"k",
"in",
"range",
"(",
"dima",
")",
":",
"self",
".",
"aux_x",
"[",
"i",
",",
"j",
",",
"k",
"]",
"=",
"i",
"*",
"multiplier",
"self",
".",
"aux_y",
"[",
"i",
",",
"j",
",",
"k",
"]",
"=",
"j",
"*",
"multiplier",
"self",
".",
"aux_z",
"[",
"i",
",",
"j",
",",
"k",
"]",
"=",
"k",
"*",
"multiplier",
"self",
".",
"coords",
"=",
"np",
".",
"zeros",
"(",
"[",
"multiplier3",
",",
"dima",
",",
"dima",
",",
"dima",
",",
"3",
"]",
",",
"np",
".",
"float32",
")",
"for",
"i",
"in",
"range",
"(",
"multiplier",
")",
":",
"for",
"j",
"in",
"range",
"(",
"multiplier",
")",
":",
"for",
"k",
"in",
"range",
"(",
"multiplier",
")",
":",
"self",
".",
"coords",
"[",
"i",
"*",
"multiplier2",
"+",
"j",
"*",
"multiplier",
"+",
"k",
",",
":",
",",
":",
",",
":",
",",
"0",
"]",
"=",
"self",
".",
"aux_x",
"+",
"i",
"self",
".",
"coords",
"[",
"i",
"*",
"multiplier2",
"+",
"j",
"*",
"multiplier",
"+",
"k",
",",
":",
",",
":",
",",
":",
",",
"1",
"]",
"=",
"self",
".",
"aux_y",
"+",
"j",
"self",
".",
"coords",
"[",
"i",
"*",
"multiplier2",
"+",
"j",
"*",
"multiplier",
"+",
"k",
",",
":",
",",
":",
",",
":",
",",
"2",
"]",
"=",
"self",
".",
"aux_z",
"+",
"k",
"self",
".",
"coords",
"=",
"(",
"self",
".",
"coords",
"+",
"0.5",
")",
"/",
"dim",
"-",
"0.5",
"self",
".",
"coords",
"=",
"np",
".",
"reshape",
"(",
"self",
".",
"coords",
",",
"[",
"multiplier3",
",",
"test_point_batch_size",
",",
"3",
"]",
")",
"self",
".",
"build_model",
"(",
"phase",
")"
] |
https://github.com/czq142857/BSP-NET-original/blob/f69862990b1e9d95718053fc229d3b35aebfb511/modelSVR.py#L16-L106
|
||
1040003585/WebScrapingWithPython
|
a770fa5b03894076c8c9539b1ffff34424ffc016
|
portia_examle/lib/python2.7/site-packages/wheel/bdist_wheel.py
|
python
|
bdist_wheel.wheel_dist_name
|
(self)
|
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
|
Return distribution full name with - replaced with _
|
Return distribution full name with - replaced with _
|
[
"Return",
"distribution",
"full",
"name",
"with",
"-",
"replaced",
"with",
"_"
] |
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
|
[
"def",
"wheel_dist_name",
"(",
"self",
")",
":",
"return",
"'-'",
".",
"join",
"(",
"(",
"safer_name",
"(",
"self",
".",
"distribution",
".",
"get_name",
"(",
")",
")",
",",
"safer_version",
"(",
"self",
".",
"distribution",
".",
"get_version",
"(",
")",
")",
")",
")"
] |
https://github.com/1040003585/WebScrapingWithPython/blob/a770fa5b03894076c8c9539b1ffff34424ffc016/portia_examle/lib/python2.7/site-packages/wheel/bdist_wheel.py#L128-L131
|
|
learningequality/kolibri
|
d056dbc477aaf651ab843caa141a6a1e0a491046
|
kolibri/core/auth/management/commands/bulkimportusers.py
|
python
|
Validator.validate
|
(self, data)
|
Validate `data` and return an iterator over errors found.
|
Validate `data` and return an iterator over errors found.
|
[
"Validate",
"data",
"and",
"return",
"an",
"iterator",
"over",
"errors",
"found",
"."
] |
def validate(self, data):
"""
Validate `data` and return an iterator over errors found.
"""
for index, row in enumerate(data):
error_flag = False
username = self.get_username(row)
if not username:
error = {
"row": index + 1,
"message": MESSAGES[DUPLICATED_USERNAME],
"field": "USERNAME",
"value": row.get(self.header_translation["USERNAME"]),
}
error_flag = True
yield error
for header_name, check, message in self._checks:
value = row[self.header_translation[header_name]]
try:
check(value)
except ValueError:
error = {
"row": index + 1,
"message": message,
"field": header_name,
"value": value,
}
error_flag = True
yield error
except Exception as e:
error = {
"row": index + 1,
"message": MESSAGES[UNEXPECTED_EXCEPTION].format(
(e.__class__.__name__, e)
),
"field": header_name,
"value": value,
}
error_flag = True
yield error
# if there aren't any errors, let's add the user and classes
if not error_flag:
self.check_classroom(row, username)
row["position"] = index + 1
self.users[username] = row
|
[
"def",
"validate",
"(",
"self",
",",
"data",
")",
":",
"for",
"index",
",",
"row",
"in",
"enumerate",
"(",
"data",
")",
":",
"error_flag",
"=",
"False",
"username",
"=",
"self",
".",
"get_username",
"(",
"row",
")",
"if",
"not",
"username",
":",
"error",
"=",
"{",
"\"row\"",
":",
"index",
"+",
"1",
",",
"\"message\"",
":",
"MESSAGES",
"[",
"DUPLICATED_USERNAME",
"]",
",",
"\"field\"",
":",
"\"USERNAME\"",
",",
"\"value\"",
":",
"row",
".",
"get",
"(",
"self",
".",
"header_translation",
"[",
"\"USERNAME\"",
"]",
")",
",",
"}",
"error_flag",
"=",
"True",
"yield",
"error",
"for",
"header_name",
",",
"check",
",",
"message",
"in",
"self",
".",
"_checks",
":",
"value",
"=",
"row",
"[",
"self",
".",
"header_translation",
"[",
"header_name",
"]",
"]",
"try",
":",
"check",
"(",
"value",
")",
"except",
"ValueError",
":",
"error",
"=",
"{",
"\"row\"",
":",
"index",
"+",
"1",
",",
"\"message\"",
":",
"message",
",",
"\"field\"",
":",
"header_name",
",",
"\"value\"",
":",
"value",
",",
"}",
"error_flag",
"=",
"True",
"yield",
"error",
"except",
"Exception",
"as",
"e",
":",
"error",
"=",
"{",
"\"row\"",
":",
"index",
"+",
"1",
",",
"\"message\"",
":",
"MESSAGES",
"[",
"UNEXPECTED_EXCEPTION",
"]",
".",
"format",
"(",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"e",
")",
")",
",",
"\"field\"",
":",
"header_name",
",",
"\"value\"",
":",
"value",
",",
"}",
"error_flag",
"=",
"True",
"yield",
"error",
"# if there aren't any errors, let's add the user and classes",
"if",
"not",
"error_flag",
":",
"self",
".",
"check_classroom",
"(",
"row",
",",
"username",
")",
"row",
"[",
"\"position\"",
"]",
"=",
"index",
"+",
"1",
"self",
".",
"users",
"[",
"username",
"]",
"=",
"row"
] |
https://github.com/learningequality/kolibri/blob/d056dbc477aaf651ab843caa141a6a1e0a491046/kolibri/core/auth/management/commands/bulkimportusers.py#L306-L351
|
||
google/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
grr/server/grr_response_server/databases/db.py
|
python
|
Database.CountHuntResults
|
(self, hunt_id, with_tag=None, with_type=None)
|
Counts hunt results of a given hunt using given query options.
If both with_tag and with_type arguments are provided, they will be applied
using AND boolean operator.
Args:
hunt_id: The id of the hunt to count results for.
with_tag: (Optional) When specified, should be a string. Only results
having specified tag will be accounted for.
with_type: (Optional) When specified, should be a string. Only results of
a specified type will be accounted for.
Returns:
A number of hunt results of a given hunt matching given query options.
|
Counts hunt results of a given hunt using given query options.
|
[
"Counts",
"hunt",
"results",
"of",
"a",
"given",
"hunt",
"using",
"given",
"query",
"options",
"."
] |
def CountHuntResults(self, hunt_id, with_tag=None, with_type=None):
"""Counts hunt results of a given hunt using given query options.
If both with_tag and with_type arguments are provided, they will be applied
using AND boolean operator.
Args:
hunt_id: The id of the hunt to count results for.
with_tag: (Optional) When specified, should be a string. Only results
having specified tag will be accounted for.
with_type: (Optional) When specified, should be a string. Only results of
a specified type will be accounted for.
Returns:
A number of hunt results of a given hunt matching given query options.
"""
|
[
"def",
"CountHuntResults",
"(",
"self",
",",
"hunt_id",
",",
"with_tag",
"=",
"None",
",",
"with_type",
"=",
"None",
")",
":"
] |
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/databases/db.py#L2693-L2708
|
||
zorkian/nagios-api
|
15706da83aa78ef0a196ecaad1738e9a011141e0
|
nagios/core.py
|
python
|
Nagios.__init__
|
(self, statusfile=None)
|
Create a new Nagios state store. One argument, statusfile, is used to
indicate where the status file is. This object is intended to be read-only
once it has been created.
|
Create a new Nagios state store. One argument, statusfile, is used to
indicate where the status file is. This object is intended to be read-only
once it has been created.
|
[
"Create",
"a",
"new",
"Nagios",
"state",
"store",
".",
"One",
"argument",
"statusfile",
"is",
"used",
"to",
"indicate",
"where",
"the",
"status",
"file",
"is",
".",
"This",
"object",
"is",
"intended",
"to",
"be",
"read",
"-",
"only",
"once",
"it",
"has",
"been",
"created",
"."
] |
def __init__(self, statusfile=None):
'''Create a new Nagios state store. One argument, statusfile, is used to
indicate where the status file is. This object is intended to be read-only
once it has been created.
'''
self.info = Info({})
self.program = Program({})
self.hosts = {}
self.services = {}
self.comments = {}
self.downtimes = {}
if statusfile is not None:
self._update(statusfile)
|
[
"def",
"__init__",
"(",
"self",
",",
"statusfile",
"=",
"None",
")",
":",
"self",
".",
"info",
"=",
"Info",
"(",
"{",
"}",
")",
"self",
".",
"program",
"=",
"Program",
"(",
"{",
"}",
")",
"self",
".",
"hosts",
"=",
"{",
"}",
"self",
".",
"services",
"=",
"{",
"}",
"self",
".",
"comments",
"=",
"{",
"}",
"self",
".",
"downtimes",
"=",
"{",
"}",
"if",
"statusfile",
"is",
"not",
"None",
":",
"self",
".",
"_update",
"(",
"statusfile",
")"
] |
https://github.com/zorkian/nagios-api/blob/15706da83aa78ef0a196ecaad1738e9a011141e0/nagios/core.py#L11-L24
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/whoosh/filedb/gae.py
|
python
|
DatastoreStorage.total_size
|
(self)
|
return sum(self.file_length(f) for f in self.list())
|
[] |
def total_size(self):
return sum(self.file_length(f) for f in self.list())
|
[
"def",
"total_size",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"self",
".",
"file_length",
"(",
"f",
")",
"for",
"f",
"in",
"self",
".",
"list",
"(",
")",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/filedb/gae.py#L127-L128
|
|||
AndrewAnnex/SpiceyPy
|
9f8b626338f119bacd39ef2ba94a6f71bd6341c0
|
src/spiceypy/spiceypy.py
|
python
|
irfnum
|
(name: str)
|
return index.value
|
Return the index of one of the standard inertial reference
frames supported by :func:`irfrot`
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/spicelib/irfnum.html
:param name: Name of standard inertial reference frame.
:return: is the index of the frame.
|
Return the index of one of the standard inertial reference
frames supported by :func:`irfrot`
|
[
"Return",
"the",
"index",
"of",
"one",
"of",
"the",
"standard",
"inertial",
"reference",
"frames",
"supported",
"by",
":",
"func",
":",
"irfrot"
] |
def irfnum(name: str) -> int:
"""
Return the index of one of the standard inertial reference
frames supported by :func:`irfrot`
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/spicelib/irfnum.html
:param name: Name of standard inertial reference frame.
:return: is the index of the frame.
"""
index = ctypes.c_int()
name_len = ctypes.c_int(len(name))
name = stypes.string_to_char_p(name)
libspice.irfnum_(name, ctypes.byref(index), name_len)
return index.value
|
[
"def",
"irfnum",
"(",
"name",
":",
"str",
")",
"->",
"int",
":",
"index",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"name_len",
"=",
"ctypes",
".",
"c_int",
"(",
"len",
"(",
"name",
")",
")",
"name",
"=",
"stypes",
".",
"string_to_char_p",
"(",
"name",
")",
"libspice",
".",
"irfnum_",
"(",
"name",
",",
"ctypes",
".",
"byref",
"(",
"index",
")",
",",
"name_len",
")",
"return",
"index",
".",
"value"
] |
https://github.com/AndrewAnnex/SpiceyPy/blob/9f8b626338f119bacd39ef2ba94a6f71bd6341c0/src/spiceypy/spiceypy.py#L7548-L7562
|
|
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/pandas/io/pytables.py
|
python
|
AppendableMultiSeriesTable.write
|
(self, obj, **kwargs)
|
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
|
we are going to write this as a frame table
|
we are going to write this as a frame table
|
[
"we",
"are",
"going",
"to",
"write",
"this",
"as",
"a",
"frame",
"table"
] |
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
|
[
"def",
"write",
"(",
"self",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"obj",
".",
"name",
"or",
"'values'",
"obj",
",",
"self",
".",
"levels",
"=",
"self",
".",
"validate_multiindex",
"(",
"obj",
")",
"cols",
"=",
"list",
"(",
"self",
".",
"levels",
")",
"cols",
".",
"append",
"(",
"name",
")",
"obj",
".",
"columns",
"=",
"cols",
"return",
"super",
"(",
"AppendableMultiSeriesTable",
",",
"self",
")",
".",
"write",
"(",
"obj",
"=",
"obj",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pandas/io/pytables.py#L4198-L4205
|
|
nosmokingbandit/Watcher3
|
0217e75158b563bdefc8e01c3be7620008cf3977
|
lib/requests/packages/urllib3/connectionpool.py
|
python
|
HTTPConnectionPool._get_timeout
|
(self, timeout)
|
Helper that always returns a :class:`urllib3.util.Timeout`
|
Helper that always returns a :class:`urllib3.util.Timeout`
|
[
"Helper",
"that",
"always",
"returns",
"a",
":",
"class",
":",
"urllib3",
".",
"util",
".",
"Timeout"
] |
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
|
[
"def",
"_get_timeout",
"(",
"self",
",",
"timeout",
")",
":",
"if",
"timeout",
"is",
"_Default",
":",
"return",
"self",
".",
"timeout",
".",
"clone",
"(",
")",
"if",
"isinstance",
"(",
"timeout",
",",
"Timeout",
")",
":",
"return",
"timeout",
".",
"clone",
"(",
")",
"else",
":",
"# User passed us an int/float. This is for backwards compatibility,",
"# can be removed later",
"return",
"Timeout",
".",
"from_float",
"(",
"timeout",
")"
] |
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/requests/packages/urllib3/connectionpool.py#L292-L302
|
||
InvestmentSystems/static-frame
|
0b19d6969bf6c17fb0599871aca79eb3b52cf2ed
|
static_frame/core/type_blocks.py
|
python
|
TypeBlocks.fillfalsy_backward
|
(self,
limit: int = 0,
*,
axis: int = 0)
|
Return a new ``TypeBlocks`` after feeding backward the last non-falsy observation across contiguous missing values. Backward, axis 0 fills columns, going from bottom to top. Backward axis 1 fills rows, going from right to left.
|
Return a new ``TypeBlocks`` after feeding backward the last non-falsy observation across contiguous missing values. Backward, axis 0 fills columns, going from bottom to top. Backward axis 1 fills rows, going from right to left.
|
[
"Return",
"a",
"new",
"TypeBlocks",
"after",
"feeding",
"backward",
"the",
"last",
"non",
"-",
"falsy",
"observation",
"across",
"contiguous",
"missing",
"values",
".",
"Backward",
"axis",
"0",
"fills",
"columns",
"going",
"from",
"bottom",
"to",
"top",
".",
"Backward",
"axis",
"1",
"fills",
"rows",
"going",
"from",
"right",
"to",
"left",
"."
] |
def fillfalsy_backward(self,
limit: int = 0,
*,
axis: int = 0) -> 'TypeBlocks':
'''Return a new ``TypeBlocks`` after feeding backward the last non-falsy observation across contiguous missing values. Backward, axis 0 fills columns, going from bottom to top. Backward axis 1 fills rows, going from right to left.
'''
if axis == 0:
return self.from_blocks(self._fill_missing_directional_axis_0(
blocks=self._blocks,
directional_forward=False,
func_target=isfalsy_array,
limit=limit
))
elif axis == 1:
blocks = reversed(tuple(self._fill_missing_directional_axis_1(
blocks=self._blocks,
directional_forward=False,
func_target=isfalsy_array,
limit=limit
)))
return self.from_blocks(blocks)
raise AxisInvalid(f'no support for axis {axis}')
|
[
"def",
"fillfalsy_backward",
"(",
"self",
",",
"limit",
":",
"int",
"=",
"0",
",",
"*",
",",
"axis",
":",
"int",
"=",
"0",
")",
"->",
"'TypeBlocks'",
":",
"if",
"axis",
"==",
"0",
":",
"return",
"self",
".",
"from_blocks",
"(",
"self",
".",
"_fill_missing_directional_axis_0",
"(",
"blocks",
"=",
"self",
".",
"_blocks",
",",
"directional_forward",
"=",
"False",
",",
"func_target",
"=",
"isfalsy_array",
",",
"limit",
"=",
"limit",
")",
")",
"elif",
"axis",
"==",
"1",
":",
"blocks",
"=",
"reversed",
"(",
"tuple",
"(",
"self",
".",
"_fill_missing_directional_axis_1",
"(",
"blocks",
"=",
"self",
".",
"_blocks",
",",
"directional_forward",
"=",
"False",
",",
"func_target",
"=",
"isfalsy_array",
",",
"limit",
"=",
"limit",
")",
")",
")",
"return",
"self",
".",
"from_blocks",
"(",
"blocks",
")",
"raise",
"AxisInvalid",
"(",
"f'no support for axis {axis}'",
")"
] |
https://github.com/InvestmentSystems/static-frame/blob/0b19d6969bf6c17fb0599871aca79eb3b52cf2ed/static_frame/core/type_blocks.py#L3383-L3405
|
||
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
|
cb692f527e4e819b6c228187c5702d990a180043
|
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/email/utils.py
|
python
|
formatdate
|
(timeval=None, localtime=False, usegmt=False)
|
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
now[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
now[0], now[3], now[4], now[5],
zone)
|
Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
|
Returns a date string as specified by RFC 2822, e.g.:
|
[
"Returns",
"a",
"date",
"string",
"as",
"specified",
"by",
"RFC",
"2822",
"e",
".",
"g",
".",
":"
] |
def formatdate(timeval=None, localtime=False, usegmt=False):
"""Returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Optional timeval if given is a floating point time value as accepted by
gmtime() and localtime(), otherwise the current time is used.
Optional localtime is a flag that when True, interprets timeval, and
returns a date relative to the local timezone instead of UTC, properly
taking daylight savings time into account.
Optional argument usegmt means that the timezone is written out as
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
is needed for HTTP, and is only used when localtime==False.
"""
# Note: we cannot use strftime() because that honors the locale and RFC
# 2822 requires that day and month names be the English abbreviations.
if timeval is None:
timeval = time.time()
if localtime:
now = time.localtime(timeval)
# Calculate timezone offset, based on whether the local zone has
# daylight savings time, and whether DST is in effect.
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
# Remember offset is in seconds west of UTC, but the timezone is in
# minutes east of UTC, so the signs differ.
if offset > 0:
sign = '-'
else:
sign = '+'
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
else:
now = time.gmtime(timeval)
# Timezone offset is always -0000
if usegmt:
zone = 'GMT'
else:
zone = '-0000'
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
now[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
now[0], now[3], now[4], now[5],
zone)
|
[
"def",
"formatdate",
"(",
"timeval",
"=",
"None",
",",
"localtime",
"=",
"False",
",",
"usegmt",
"=",
"False",
")",
":",
"# Note: we cannot use strftime() because that honors the locale and RFC",
"# 2822 requires that day and month names be the English abbreviations.",
"if",
"timeval",
"is",
"None",
":",
"timeval",
"=",
"time",
".",
"time",
"(",
")",
"if",
"localtime",
":",
"now",
"=",
"time",
".",
"localtime",
"(",
"timeval",
")",
"# Calculate timezone offset, based on whether the local zone has",
"# daylight savings time, and whether DST is in effect.",
"if",
"time",
".",
"daylight",
"and",
"now",
"[",
"-",
"1",
"]",
":",
"offset",
"=",
"time",
".",
"altzone",
"else",
":",
"offset",
"=",
"time",
".",
"timezone",
"hours",
",",
"minutes",
"=",
"divmod",
"(",
"abs",
"(",
"offset",
")",
",",
"3600",
")",
"# Remember offset is in seconds west of UTC, but the timezone is in",
"# minutes east of UTC, so the signs differ.",
"if",
"offset",
">",
"0",
":",
"sign",
"=",
"'-'",
"else",
":",
"sign",
"=",
"'+'",
"zone",
"=",
"'%s%02d%02d'",
"%",
"(",
"sign",
",",
"hours",
",",
"minutes",
"//",
"60",
")",
"else",
":",
"now",
"=",
"time",
".",
"gmtime",
"(",
"timeval",
")",
"# Timezone offset is always -0000",
"if",
"usegmt",
":",
"zone",
"=",
"'GMT'",
"else",
":",
"zone",
"=",
"'-0000'",
"return",
"'%s, %02d %s %04d %02d:%02d:%02d %s'",
"%",
"(",
"[",
"'Mon'",
",",
"'Tue'",
",",
"'Wed'",
",",
"'Thu'",
",",
"'Fri'",
",",
"'Sat'",
",",
"'Sun'",
"]",
"[",
"now",
"[",
"6",
"]",
"]",
",",
"now",
"[",
"2",
"]",
",",
"[",
"'Jan'",
",",
"'Feb'",
",",
"'Mar'",
",",
"'Apr'",
",",
"'May'",
",",
"'Jun'",
",",
"'Jul'",
",",
"'Aug'",
",",
"'Sep'",
",",
"'Oct'",
",",
"'Nov'",
",",
"'Dec'",
"]",
"[",
"now",
"[",
"1",
"]",
"-",
"1",
"]",
",",
"now",
"[",
"0",
"]",
",",
"now",
"[",
"3",
"]",
",",
"now",
"[",
"4",
"]",
",",
"now",
"[",
"5",
"]",
",",
"zone",
")"
] |
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/email/utils.py#L124-L173
|
|
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/mako/util.py
|
python
|
LRUCache.__getitem__
|
(self, key)
|
return item.value
|
[] |
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = timeit.default_timer()
return item.value
|
[
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"item",
"=",
"dict",
".",
"__getitem__",
"(",
"self",
",",
"key",
")",
"item",
".",
"timestamp",
"=",
"timeit",
".",
"default_timer",
"(",
")",
"return",
"item",
".",
"value"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/mako/util.py#L195-L198
|
|||
google/grr
|
8ad8a4d2c5a93c92729206b7771af19d92d4f915
|
grr/client/grr_response_client/comms.py
|
python
|
SizeLimitedQueue.Size
|
(self)
|
return self._total_size
|
[] |
def Size(self):
return self._total_size
|
[
"def",
"Size",
"(",
"self",
")",
":",
"return",
"self",
".",
"_total_size"
] |
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/client/grr_response_client/comms.py#L911-L912
|
|||
chubin/cheat.sh
|
46d1a5f73c6b88da15d809154245dbf234e9479e
|
lib/search.py
|
python
|
_parse_options
|
(options)
|
return search_options
|
Parse search options string into optiond_dict
|
Parse search options string into optiond_dict
|
[
"Parse",
"search",
"options",
"string",
"into",
"optiond_dict"
] |
def _parse_options(options):
"""Parse search options string into optiond_dict
"""
if options is None:
return {}
search_options = {
'insensitive': 'i' in options,
'word_boundaries': 'b' in options,
'recursive': 'r' in options,
}
return search_options
|
[
"def",
"_parse_options",
"(",
"options",
")",
":",
"if",
"options",
"is",
"None",
":",
"return",
"{",
"}",
"search_options",
"=",
"{",
"'insensitive'",
":",
"'i'",
"in",
"options",
",",
"'word_boundaries'",
":",
"'b'",
"in",
"options",
",",
"'recursive'",
":",
"'r'",
"in",
"options",
",",
"}",
"return",
"search_options"
] |
https://github.com/chubin/cheat.sh/blob/46d1a5f73c6b88da15d809154245dbf234e9479e/lib/search.py#L35-L47
|
|
asyncio-docs/asyncio-doc
|
8ac6b818fd060163b265da42842a6fd1c5d34181
|
examples/simple_server.py
|
python
|
MyRequestHandler.do_GET
|
(self)
|
Respond after seconds given in path.
|
Respond after seconds given in path.
|
[
"Respond",
"after",
"seconds",
"given",
"in",
"path",
"."
] |
def do_GET(self): # pylint: disable=invalid-name
"""Respond after seconds given in path.
"""
try:
seconds = float(self.path[1:])
except ValueError:
seconds = 0.0
if seconds < 0:
seconds = 0.0
text = "Waited for {:4.2f} seconds.\nThat's all.\n"
msg = text.format(seconds).encode(ENCODING)
time.sleep(seconds)
self.send_response(200)
self.send_header("Content-type", 'text/plain; charset=utf-8')
self.send_header("Content-length", str(len(msg)))
self.end_headers()
self.wfile.write(msg)
|
[
"def",
"do_GET",
"(",
"self",
")",
":",
"# pylint: disable=invalid-name",
"try",
":",
"seconds",
"=",
"float",
"(",
"self",
".",
"path",
"[",
"1",
":",
"]",
")",
"except",
"ValueError",
":",
"seconds",
"=",
"0.0",
"if",
"seconds",
"<",
"0",
":",
"seconds",
"=",
"0.0",
"text",
"=",
"\"Waited for {:4.2f} seconds.\\nThat's all.\\n\"",
"msg",
"=",
"text",
".",
"format",
"(",
"seconds",
")",
".",
"encode",
"(",
"ENCODING",
")",
"time",
".",
"sleep",
"(",
"seconds",
")",
"self",
".",
"send_response",
"(",
"200",
")",
"self",
".",
"send_header",
"(",
"\"Content-type\"",
",",
"'text/plain; charset=utf-8'",
")",
"self",
".",
"send_header",
"(",
"\"Content-length\"",
",",
"str",
"(",
"len",
"(",
"msg",
")",
")",
")",
"self",
".",
"end_headers",
"(",
")",
"self",
".",
"wfile",
".",
"write",
"(",
"msg",
")"
] |
https://github.com/asyncio-docs/asyncio-doc/blob/8ac6b818fd060163b265da42842a6fd1c5d34181/examples/simple_server.py#L24-L40
|
||
miketeo/pysmb
|
fc3faca073385b8abc4a503bb4439f849840f94c
|
python3/smb/base.py
|
python
|
SharedFile.isDirectory
|
(self)
|
return bool(self.file_attributes & ATTR_DIRECTORY)
|
A convenience property to return True if this file resource is a directory on the remote server
|
A convenience property to return True if this file resource is a directory on the remote server
|
[
"A",
"convenience",
"property",
"to",
"return",
"True",
"if",
"this",
"file",
"resource",
"is",
"a",
"directory",
"on",
"the",
"remote",
"server"
] |
def isDirectory(self):
"""A convenience property to return True if this file resource is a directory on the remote server"""
return bool(self.file_attributes & ATTR_DIRECTORY)
|
[
"def",
"isDirectory",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"file_attributes",
"&",
"ATTR_DIRECTORY",
")"
] |
https://github.com/miketeo/pysmb/blob/fc3faca073385b8abc4a503bb4439f849840f94c/python3/smb/base.py#L3010-L3012
|
|
pydicom/pydicom
|
935de3b4ac94a5f520f3c91b42220ff0f13bce54
|
pydicom/benchmarks/bench_handler_numpy.py
|
python
|
TimeGetPixelData.time_32bit_3sample_1frame
|
(self)
|
Time retrieval of 32-bit, 3 sample/pixel, 1 frame.
|
Time retrieval of 32-bit, 3 sample/pixel, 1 frame.
|
[
"Time",
"retrieval",
"of",
"32",
"-",
"bit",
"3",
"sample",
"/",
"pixel",
"1",
"frame",
"."
] |
def time_32bit_3sample_1frame(self):
"""Time retrieval of 32-bit, 3 sample/pixel, 1 frame."""
for ii in range(self.no_runs):
get_pixeldata(self.ds_32_3_1)
|
[
"def",
"time_32bit_3sample_1frame",
"(",
"self",
")",
":",
"for",
"ii",
"in",
"range",
"(",
"self",
".",
"no_runs",
")",
":",
"get_pixeldata",
"(",
"self",
".",
"ds_32_3_1",
")"
] |
https://github.com/pydicom/pydicom/blob/935de3b4ac94a5f520f3c91b42220ff0f13bce54/pydicom/benchmarks/bench_handler_numpy.py#L203-L206
|
||
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/twisted/twisted/trial/reporter.py
|
python
|
TestResult.startTest
|
(self, test)
|
This must be called before the given test is commenced.
@type test: L{pyunit.TestCase}
|
This must be called before the given test is commenced.
|
[
"This",
"must",
"be",
"called",
"before",
"the",
"given",
"test",
"is",
"commenced",
"."
] |
def startTest(self, test):
"""
This must be called before the given test is commenced.
@type test: L{pyunit.TestCase}
"""
super(TestResult, self).startTest(test)
self._testStarted = self._getTime()
|
[
"def",
"startTest",
"(",
"self",
",",
"test",
")",
":",
"super",
"(",
"TestResult",
",",
"self",
")",
".",
"startTest",
"(",
"test",
")",
"self",
".",
"_testStarted",
"=",
"self",
".",
"_getTime",
"(",
")"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/trial/reporter.py#L88-L95
|
||
zopefoundation/Zope
|
ea04dd670d1a48d4d5c879d3db38fc2e9b4330bb
|
src/OFS/interfaces.py
|
python
|
IManageable.tabs_path_default
|
(REQUEST)
|
[] |
def tabs_path_default(REQUEST):
"""
"""
|
[
"def",
"tabs_path_default",
"(",
"REQUEST",
")",
":"
] |
https://github.com/zopefoundation/Zope/blob/ea04dd670d1a48d4d5c879d3db38fc2e9b4330bb/src/OFS/interfaces.py#L293-L295
|
||||
djblets/djblets
|
0496e1ec49e43d43d776768c9fc5b6f8af56ec2c
|
djblets/datagrid/grids.py
|
python
|
DataGrid.precompute_objects
|
(self, render_context=None)
|
Pre-compute all objects used to render the datagrid.
This builds the queryset and stores the list of objects for use in
rendering the datagrid. It takes into consideration sorting,
the current page, and augmented queries from columns.
Args:
render_context (Context):
The common template variable context to render on the datagrid,
provided in the constructor.
|
Pre-compute all objects used to render the datagrid.
|
[
"Pre",
"-",
"compute",
"all",
"objects",
"used",
"to",
"render",
"the",
"datagrid",
"."
] |
def precompute_objects(self, render_context=None):
"""Pre-compute all objects used to render the datagrid.
This builds the queryset and stores the list of objects for use in
rendering the datagrid. It takes into consideration sorting,
the current page, and augmented queries from columns.
Args:
render_context (Context):
The common template variable context to render on the datagrid,
provided in the constructor.
"""
query = self.queryset
use_select_related = False
# Generate the actual list of fields we'll be sorting by
sort_list = []
for sort_item in self.sort_list:
if sort_item[0] == '-':
base_sort_item = sort_item[1:]
prefix = '-'
else:
base_sort_item = sort_item
prefix = ''
if sort_item:
column = self.get_column(base_sort_item)
if not column:
logger.warning('Skipping non-existing sort column "%s"',
base_sort_item,
request=self.request)
continue
elif not column.sortable:
logger.warning('Skipping column "%s" which is not '
'sortable',
base_sort_item,
request=self.request.user.username)
continue
stateful_column = self.get_stateful_column(column)
if stateful_column:
try:
sort_field = stateful_column.get_sort_field()
except Exception as e:
logger.exception('Error when calling get_sort_field '
'for DataGrid Column %r: %s',
column, e,
request=self.request)
continue
if sort_field:
sort_list.append(prefix + sort_field)
# Lookups spanning tables require that we query from those
# tables. In order to keep things simple, we'll just use
# select_related so that we don't have to figure out the
# table relationships. We only do this if we have a lookup
# spanning tables.
if '.' in sort_field:
use_select_related = True
if sort_list:
query = query.order_by(*sort_list)
query = self.post_process_queryset(query)
if hasattr(query, 'distinct'):
query = query.distinct()
self.paginator = self.build_paginator(query)
page_num = self.request.GET.get('page', 1)
# Accept either "last" or a valid page number.
if page_num == "last":
page_num = self.paginator.num_pages
try:
self.page = self.paginator.page(page_num)
except InvalidPage:
raise Http404
self.id_list = []
if self.optimize_sorts and len(sort_list) > 0:
# This can be slow when sorting by multiple columns. If we
# have multiple items in the sort list, we'll request just the
# IDs and then fetch the actual details from that.
if hasattr(self.page.object_list, 'values_list'):
# This is a standard QuerySet.
self.id_list = list(self.page.object_list.values_list(
'pk', flat=True))
else:
# This is something more custom. Perhaps a Haystack
# SearchQuerySet. It must have a 'pk' or it won't work.
self.id_list = [int(obj.pk) for obj in self.page.object_list]
# Make sure to unset the order. We can't meaningfully order these
# results in the query, as what we really want is to keep it in
# the order specified in id_list, and we certainly don't want
# the database to do any special ordering (possibly slowing things
# down). We'll set the order properly in a minute.
self.page.object_list = self.post_process_queryset(
self.model.objects.filter(pk__in=self.id_list).order_by())
if use_select_related:
self.page.object_list = \
self.page.object_list.select_related(depth=1)
if self.id_list:
# The database will give us the items in a more or less random
# order, since it doesn't know to keep it in the order provided by
# the ID list. This will place the results back in the order we
# expect.
index = dict([(id, pos) for (pos, id) in enumerate(self.id_list)])
object_list = [None] * len(self.id_list)
for obj in list(self.page.object_list):
object_list[index[obj.pk]] = obj
else:
# Grab the whole list at once. We know it won't be too large,
# and it will prevent one query per row.
object_list = list(self.page.object_list)
for column in self.columns:
column.collect_objects(object_list)
if render_context is None:
render_context = self._build_render_context()
try:
self.rows = []
for obj in object_list:
if obj is None:
continue
if hasattr(obj, 'get_absolute_url'):
obj_url = obj.get_absolute_url()
else:
obj_url = None
render_context['_datagrid_object_url'] = obj_url
self.rows.append({
'object': obj,
'cells': [column.render_cell(obj, render_context)
for column in self.columns],
'url': obj_url,
})
except Exception as e:
logger.exception('Error when calling render_cell for DataGrid '
'Column %r: %s',
column, e)
|
[
"def",
"precompute_objects",
"(",
"self",
",",
"render_context",
"=",
"None",
")",
":",
"query",
"=",
"self",
".",
"queryset",
"use_select_related",
"=",
"False",
"# Generate the actual list of fields we'll be sorting by",
"sort_list",
"=",
"[",
"]",
"for",
"sort_item",
"in",
"self",
".",
"sort_list",
":",
"if",
"sort_item",
"[",
"0",
"]",
"==",
"'-'",
":",
"base_sort_item",
"=",
"sort_item",
"[",
"1",
":",
"]",
"prefix",
"=",
"'-'",
"else",
":",
"base_sort_item",
"=",
"sort_item",
"prefix",
"=",
"''",
"if",
"sort_item",
":",
"column",
"=",
"self",
".",
"get_column",
"(",
"base_sort_item",
")",
"if",
"not",
"column",
":",
"logger",
".",
"warning",
"(",
"'Skipping non-existing sort column \"%s\"'",
",",
"base_sort_item",
",",
"request",
"=",
"self",
".",
"request",
")",
"continue",
"elif",
"not",
"column",
".",
"sortable",
":",
"logger",
".",
"warning",
"(",
"'Skipping column \"%s\" which is not '",
"'sortable'",
",",
"base_sort_item",
",",
"request",
"=",
"self",
".",
"request",
".",
"user",
".",
"username",
")",
"continue",
"stateful_column",
"=",
"self",
".",
"get_stateful_column",
"(",
"column",
")",
"if",
"stateful_column",
":",
"try",
":",
"sort_field",
"=",
"stateful_column",
".",
"get_sort_field",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"'Error when calling get_sort_field '",
"'for DataGrid Column %r: %s'",
",",
"column",
",",
"e",
",",
"request",
"=",
"self",
".",
"request",
")",
"continue",
"if",
"sort_field",
":",
"sort_list",
".",
"append",
"(",
"prefix",
"+",
"sort_field",
")",
"# Lookups spanning tables require that we query from those",
"# tables. In order to keep things simple, we'll just use",
"# select_related so that we don't have to figure out the",
"# table relationships. We only do this if we have a lookup",
"# spanning tables.",
"if",
"'.'",
"in",
"sort_field",
":",
"use_select_related",
"=",
"True",
"if",
"sort_list",
":",
"query",
"=",
"query",
".",
"order_by",
"(",
"*",
"sort_list",
")",
"query",
"=",
"self",
".",
"post_process_queryset",
"(",
"query",
")",
"if",
"hasattr",
"(",
"query",
",",
"'distinct'",
")",
":",
"query",
"=",
"query",
".",
"distinct",
"(",
")",
"self",
".",
"paginator",
"=",
"self",
".",
"build_paginator",
"(",
"query",
")",
"page_num",
"=",
"self",
".",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
",",
"1",
")",
"# Accept either \"last\" or a valid page number.",
"if",
"page_num",
"==",
"\"last\"",
":",
"page_num",
"=",
"self",
".",
"paginator",
".",
"num_pages",
"try",
":",
"self",
".",
"page",
"=",
"self",
".",
"paginator",
".",
"page",
"(",
"page_num",
")",
"except",
"InvalidPage",
":",
"raise",
"Http404",
"self",
".",
"id_list",
"=",
"[",
"]",
"if",
"self",
".",
"optimize_sorts",
"and",
"len",
"(",
"sort_list",
")",
">",
"0",
":",
"# This can be slow when sorting by multiple columns. If we",
"# have multiple items in the sort list, we'll request just the",
"# IDs and then fetch the actual details from that.",
"if",
"hasattr",
"(",
"self",
".",
"page",
".",
"object_list",
",",
"'values_list'",
")",
":",
"# This is a standard QuerySet.",
"self",
".",
"id_list",
"=",
"list",
"(",
"self",
".",
"page",
".",
"object_list",
".",
"values_list",
"(",
"'pk'",
",",
"flat",
"=",
"True",
")",
")",
"else",
":",
"# This is something more custom. Perhaps a Haystack",
"# SearchQuerySet. It must have a 'pk' or it won't work.",
"self",
".",
"id_list",
"=",
"[",
"int",
"(",
"obj",
".",
"pk",
")",
"for",
"obj",
"in",
"self",
".",
"page",
".",
"object_list",
"]",
"# Make sure to unset the order. We can't meaningfully order these",
"# results in the query, as what we really want is to keep it in",
"# the order specified in id_list, and we certainly don't want",
"# the database to do any special ordering (possibly slowing things",
"# down). We'll set the order properly in a minute.",
"self",
".",
"page",
".",
"object_list",
"=",
"self",
".",
"post_process_queryset",
"(",
"self",
".",
"model",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"self",
".",
"id_list",
")",
".",
"order_by",
"(",
")",
")",
"if",
"use_select_related",
":",
"self",
".",
"page",
".",
"object_list",
"=",
"self",
".",
"page",
".",
"object_list",
".",
"select_related",
"(",
"depth",
"=",
"1",
")",
"if",
"self",
".",
"id_list",
":",
"# The database will give us the items in a more or less random",
"# order, since it doesn't know to keep it in the order provided by",
"# the ID list. This will place the results back in the order we",
"# expect.",
"index",
"=",
"dict",
"(",
"[",
"(",
"id",
",",
"pos",
")",
"for",
"(",
"pos",
",",
"id",
")",
"in",
"enumerate",
"(",
"self",
".",
"id_list",
")",
"]",
")",
"object_list",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"self",
".",
"id_list",
")",
"for",
"obj",
"in",
"list",
"(",
"self",
".",
"page",
".",
"object_list",
")",
":",
"object_list",
"[",
"index",
"[",
"obj",
".",
"pk",
"]",
"]",
"=",
"obj",
"else",
":",
"# Grab the whole list at once. We know it won't be too large,",
"# and it will prevent one query per row.",
"object_list",
"=",
"list",
"(",
"self",
".",
"page",
".",
"object_list",
")",
"for",
"column",
"in",
"self",
".",
"columns",
":",
"column",
".",
"collect_objects",
"(",
"object_list",
")",
"if",
"render_context",
"is",
"None",
":",
"render_context",
"=",
"self",
".",
"_build_render_context",
"(",
")",
"try",
":",
"self",
".",
"rows",
"=",
"[",
"]",
"for",
"obj",
"in",
"object_list",
":",
"if",
"obj",
"is",
"None",
":",
"continue",
"if",
"hasattr",
"(",
"obj",
",",
"'get_absolute_url'",
")",
":",
"obj_url",
"=",
"obj",
".",
"get_absolute_url",
"(",
")",
"else",
":",
"obj_url",
"=",
"None",
"render_context",
"[",
"'_datagrid_object_url'",
"]",
"=",
"obj_url",
"self",
".",
"rows",
".",
"append",
"(",
"{",
"'object'",
":",
"obj",
",",
"'cells'",
":",
"[",
"column",
".",
"render_cell",
"(",
"obj",
",",
"render_context",
")",
"for",
"column",
"in",
"self",
".",
"columns",
"]",
",",
"'url'",
":",
"obj_url",
",",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"'Error when calling render_cell for DataGrid '",
"'Column %r: %s'",
",",
"column",
",",
"e",
")"
] |
https://github.com/djblets/djblets/blob/0496e1ec49e43d43d776768c9fc5b6f8af56ec2c/djblets/datagrid/grids.py#L1215-L1371
|
||
isce-framework/isce2
|
0e5114a8bede3caf1d533d98e44dfe4b983e3f48
|
components/isceobj/TopsProc/runPrepESD.py
|
python
|
multilook
|
(intname, alks=5, rlks=15)
|
return outFile
|
Take looks.
|
Take looks.
|
[
"Take",
"looks",
"."
] |
def multilook(intname, alks=5, rlks=15):
'''
Take looks.
'''
from mroipac.looks.Looks import Looks
inimg = isceobj.createImage()
inimg.load(intname + '.xml')
spl = os.path.splitext(intname)
ext = '.{0}alks_{1}rlks'.format(alks, rlks)
outFile = spl[0] + ext + spl[1]
lkObj = Looks()
lkObj.setDownLooks(alks)
lkObj.setAcrossLooks(rlks)
lkObj.setInputImage(inimg)
lkObj.setOutputFilename(outFile)
lkObj.looks()
print('Output: ', outFile)
return outFile
|
[
"def",
"multilook",
"(",
"intname",
",",
"alks",
"=",
"5",
",",
"rlks",
"=",
"15",
")",
":",
"from",
"mroipac",
".",
"looks",
".",
"Looks",
"import",
"Looks",
"inimg",
"=",
"isceobj",
".",
"createImage",
"(",
")",
"inimg",
".",
"load",
"(",
"intname",
"+",
"'.xml'",
")",
"spl",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"intname",
")",
"ext",
"=",
"'.{0}alks_{1}rlks'",
".",
"format",
"(",
"alks",
",",
"rlks",
")",
"outFile",
"=",
"spl",
"[",
"0",
"]",
"+",
"ext",
"+",
"spl",
"[",
"1",
"]",
"lkObj",
"=",
"Looks",
"(",
")",
"lkObj",
".",
"setDownLooks",
"(",
"alks",
")",
"lkObj",
".",
"setAcrossLooks",
"(",
"rlks",
")",
"lkObj",
".",
"setInputImage",
"(",
"inimg",
")",
"lkObj",
".",
"setOutputFilename",
"(",
"outFile",
")",
"lkObj",
".",
"looks",
"(",
")",
"print",
"(",
"'Output: '",
",",
"outFile",
")",
"return",
"outFile"
] |
https://github.com/isce-framework/isce2/blob/0e5114a8bede3caf1d533d98e44dfe4b983e3f48/components/isceobj/TopsProc/runPrepESD.py#L16-L39
|
|
kuri65536/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
python-modules/twisted/twisted/words/protocols/msn.py
|
python
|
SwitchboardClient.sendTypingNotification
|
(self)
|
used to send a typing notification. Upon receiving this
message the official client will display a 'user is typing'
message to all other users in the chat session for 10 seconds.
The official client sends one of these every 5 seconds (I think)
as long as you continue to type.
|
used to send a typing notification. Upon receiving this
message the official client will display a 'user is typing'
message to all other users in the chat session for 10 seconds.
The official client sends one of these every 5 seconds (I think)
as long as you continue to type.
|
[
"used",
"to",
"send",
"a",
"typing",
"notification",
".",
"Upon",
"receiving",
"this",
"message",
"the",
"official",
"client",
"will",
"display",
"a",
"user",
"is",
"typing",
"message",
"to",
"all",
"other",
"users",
"in",
"the",
"chat",
"session",
"for",
"10",
"seconds",
".",
"The",
"official",
"client",
"sends",
"one",
"of",
"these",
"every",
"5",
"seconds",
"(",
"I",
"think",
")",
"as",
"long",
"as",
"you",
"continue",
"to",
"type",
"."
] |
def sendTypingNotification(self):
"""
used to send a typing notification. Upon receiving this
message the official client will display a 'user is typing'
message to all other users in the chat session for 10 seconds.
The official client sends one of these every 5 seconds (I think)
as long as you continue to type.
"""
m = MSNMessage()
m.ack = m.MESSAGE_ACK_NONE
m.setHeader('Content-Type', 'text/x-msmsgscontrol')
m.setHeader('TypingUser', self.userHandle)
m.message = "\r\n"
self.sendMessage(m)
|
[
"def",
"sendTypingNotification",
"(",
"self",
")",
":",
"m",
"=",
"MSNMessage",
"(",
")",
"m",
".",
"ack",
"=",
"m",
".",
"MESSAGE_ACK_NONE",
"m",
".",
"setHeader",
"(",
"'Content-Type'",
",",
"'text/x-msmsgscontrol'",
")",
"m",
".",
"setHeader",
"(",
"'TypingUser'",
",",
"self",
".",
"userHandle",
")",
"m",
".",
"message",
"=",
"\"\\r\\n\"",
"self",
".",
"sendMessage",
"(",
"m",
")"
] |
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/words/protocols/msn.py#L1977-L1990
|
||
sensepost/objection
|
658675f0e7716bd0899c9f6e9c45d25f38a699d8
|
objection/commands/plugin_manager.py
|
python
|
load_plugin
|
(args: list = None)
|
Loads an objection plugin.
:param args:
:return:
|
Loads an objection plugin.
|
[
"Loads",
"an",
"objection",
"plugin",
"."
] |
def load_plugin(args: list = None) -> None:
"""
Loads an objection plugin.
:param args:
:return:
"""
if len(args) <= 0:
click.secho('Usage: plugin load <plugin path> (<plugin namespace>)', bold=True)
return
path = os.path.abspath(args[0])
if os.path.isdir(path):
path = os.path.join(path, '__init__.py')
if not os.path.exists(path):
click.secho('[plugin] {0} does not appear to be a valid plugin. Missing __init__.py'.format(
os.path.dirname(path)), fg='red', dim=True)
return
spec = importlib.util.spec_from_file_location(str(uuid.uuid4())[:8], path)
plugin = spec.loader.load_module()
spec.loader.exec_module(plugin)
namespace = plugin.namespace
if len(args) >= 2:
namespace = args[1]
plugin.__name__ = namespace
# try and load the plugin (aka: run its __init__)
try:
instance = plugin.plugin(namespace)
assert isinstance(instance, PluginType)
except AssertionError:
click.secho('Failed to load plugin \'{0}\'. Invalid plugin type.'.format(namespace), fg='red', bold=True)
return
except Exception as e:
click.secho('Failed to load plugin \'{0}\' with error: {1}'.format(namespace, str(e)), fg='red', bold=True)
click.secho('{0}'.format(traceback.format_exc()), dim=True)
return
from ..console import commands
commands.COMMANDS['plugin']['commands'][instance.namespace] = instance.implementation
click.secho('Loaded plugin: {0}'.format(plugin.__name__), bold=True)
|
[
"def",
"load_plugin",
"(",
"args",
":",
"list",
"=",
"None",
")",
"->",
"None",
":",
"if",
"len",
"(",
"args",
")",
"<=",
"0",
":",
"click",
".",
"secho",
"(",
"'Usage: plugin load <plugin path> (<plugin namespace>)'",
",",
"bold",
"=",
"True",
")",
"return",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"args",
"[",
"0",
"]",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'__init__.py'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"click",
".",
"secho",
"(",
"'[plugin] {0} does not appear to be a valid plugin. Missing __init__.py'",
".",
"format",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
",",
"fg",
"=",
"'red'",
",",
"dim",
"=",
"True",
")",
"return",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"[",
":",
"8",
"]",
",",
"path",
")",
"plugin",
"=",
"spec",
".",
"loader",
".",
"load_module",
"(",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"plugin",
")",
"namespace",
"=",
"plugin",
".",
"namespace",
"if",
"len",
"(",
"args",
")",
">=",
"2",
":",
"namespace",
"=",
"args",
"[",
"1",
"]",
"plugin",
".",
"__name__",
"=",
"namespace",
"# try and load the plugin (aka: run its __init__)",
"try",
":",
"instance",
"=",
"plugin",
".",
"plugin",
"(",
"namespace",
")",
"assert",
"isinstance",
"(",
"instance",
",",
"PluginType",
")",
"except",
"AssertionError",
":",
"click",
".",
"secho",
"(",
"'Failed to load plugin \\'{0}\\'. Invalid plugin type.'",
".",
"format",
"(",
"namespace",
")",
",",
"fg",
"=",
"'red'",
",",
"bold",
"=",
"True",
")",
"return",
"except",
"Exception",
"as",
"e",
":",
"click",
".",
"secho",
"(",
"'Failed to load plugin \\'{0}\\' with error: {1}'",
".",
"format",
"(",
"namespace",
",",
"str",
"(",
"e",
")",
")",
",",
"fg",
"=",
"'red'",
",",
"bold",
"=",
"True",
")",
"click",
".",
"secho",
"(",
"'{0}'",
".",
"format",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"dim",
"=",
"True",
")",
"return",
"from",
".",
".",
"console",
"import",
"commands",
"commands",
".",
"COMMANDS",
"[",
"'plugin'",
"]",
"[",
"'commands'",
"]",
"[",
"instance",
".",
"namespace",
"]",
"=",
"instance",
".",
"implementation",
"click",
".",
"secho",
"(",
"'Loaded plugin: {0}'",
".",
"format",
"(",
"plugin",
".",
"__name__",
")",
",",
"bold",
"=",
"True",
")"
] |
https://github.com/sensepost/objection/blob/658675f0e7716bd0899c9f6e9c45d25f38a699d8/objection/commands/plugin_manager.py#L11-L59
|
||
enkore/i3pystatus
|
34af13547dfb8407d9cf47473b8068e681dcc55f
|
i3pystatus/timer.py
|
python
|
Timer.start
|
(self, seconds=300)
|
Starts timer.
If timer is already running it will increase remaining time instead.
:param int seconds: Initial time.
|
Starts timer.
If timer is already running it will increase remaining time instead.
|
[
"Starts",
"timer",
".",
"If",
"timer",
"is",
"already",
"running",
"it",
"will",
"increase",
"remaining",
"time",
"instead",
"."
] |
def start(self, seconds=300):
"""
Starts timer.
If timer is already running it will increase remaining time instead.
:param int seconds: Initial time.
"""
if self.state is TimerState.stopped:
self.compare = time.time() + abs(seconds)
self.state = TimerState.running
elif self.state is TimerState.running:
self.increase(seconds)
|
[
"def",
"start",
"(",
"self",
",",
"seconds",
"=",
"300",
")",
":",
"if",
"self",
".",
"state",
"is",
"TimerState",
".",
"stopped",
":",
"self",
".",
"compare",
"=",
"time",
".",
"time",
"(",
")",
"+",
"abs",
"(",
"seconds",
")",
"self",
".",
"state",
"=",
"TimerState",
".",
"running",
"elif",
"self",
".",
"state",
"is",
"TimerState",
".",
"running",
":",
"self",
".",
"increase",
"(",
"seconds",
")"
] |
https://github.com/enkore/i3pystatus/blob/34af13547dfb8407d9cf47473b8068e681dcc55f/i3pystatus/timer.py#L147-L158
|
||
biopython/biopython
|
2dd97e71762af7b046d7f7f8a4f1e38db6b06c86
|
Bio/UniGene/__init__.py
|
python
|
STSLine.__init__
|
(self, text=None)
|
Initialize the class.
|
Initialize the class.
|
[
"Initialize",
"the",
"class",
"."
] |
def __init__(self, text=None):
"""Initialize the class."""
self.acc = ""
self.unists = ""
if text is not None:
self.text = text
self._init_from_text(text)
|
[
"def",
"__init__",
"(",
"self",
",",
"text",
"=",
"None",
")",
":",
"self",
".",
"acc",
"=",
"\"\"",
"self",
".",
"unists",
"=",
"\"\"",
"if",
"text",
"is",
"not",
"None",
":",
"self",
".",
"text",
"=",
"text",
"self",
".",
"_init_from_text",
"(",
"text",
")"
] |
https://github.com/biopython/biopython/blob/2dd97e71762af7b046d7f7f8a4f1e38db6b06c86/Bio/UniGene/__init__.py#L184-L190
|
||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/cme/v20191029/models.py
|
python
|
DescribeClassRequest.__init__
|
(self)
|
r"""
:param Platform: 平台名称,指定访问的平台。
:type Platform: str
:param Owner: 归属者。
:type Owner: :class:`tencentcloud.cme.v20191029.models.Entity`
:param Operator: 操作者。填写用户的 Id,用于标识调用者及校验操作权限。
:type Operator: str
|
r"""
:param Platform: 平台名称,指定访问的平台。
:type Platform: str
:param Owner: 归属者。
:type Owner: :class:`tencentcloud.cme.v20191029.models.Entity`
:param Operator: 操作者。填写用户的 Id,用于标识调用者及校验操作权限。
:type Operator: str
|
[
"r",
":",
"param",
"Platform",
":",
"平台名称,指定访问的平台。",
":",
"type",
"Platform",
":",
"str",
":",
"param",
"Owner",
":",
"归属者。",
":",
"type",
"Owner",
":",
":",
"class",
":",
"tencentcloud",
".",
"cme",
".",
"v20191029",
".",
"models",
".",
"Entity",
":",
"param",
"Operator",
":",
"操作者。填写用户的",
"Id,用于标识调用者及校验操作权限。",
":",
"type",
"Operator",
":",
"str"
] |
def __init__(self):
r"""
:param Platform: 平台名称,指定访问的平台。
:type Platform: str
:param Owner: 归属者。
:type Owner: :class:`tencentcloud.cme.v20191029.models.Entity`
:param Operator: 操作者。填写用户的 Id,用于标识调用者及校验操作权限。
:type Operator: str
"""
self.Platform = None
self.Owner = None
self.Operator = None
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"Platform",
"=",
"None",
"self",
".",
"Owner",
"=",
"None",
"self",
".",
"Operator",
"=",
"None"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/cme/v20191029/models.py#L1350-L1361
|
||
titusjan/argos
|
5a9c31a8a9a2ca825bbf821aa1e685740e3682d7
|
argos/inspector/pgplugins/pgctis.py
|
python
|
PgColorMapCtiEditor.getData
|
(self)
|
return self.selectionWidget.getCurrentColorMap()
|
Gets data from the editor widget.
|
Gets data from the editor widget.
|
[
"Gets",
"data",
"from",
"the",
"editor",
"widget",
"."
] |
def getData(self):
""" Gets data from the editor widget.
"""
return self.selectionWidget.getCurrentColorMap()
|
[
"def",
"getData",
"(",
"self",
")",
":",
"return",
"self",
".",
"selectionWidget",
".",
"getCurrentColorMap",
"(",
")"
] |
https://github.com/titusjan/argos/blob/5a9c31a8a9a2ca825bbf821aa1e685740e3682d7/argos/inspector/pgplugins/pgctis.py#L1053-L1056
|
|
huggingface/transformers
|
623b4f7c63f60cce917677ee704d6c93ee960b4b
|
src/transformers/models/electra/modeling_flax_electra.py
|
python
|
FlaxElectraTiedDense.setup
|
(self)
|
[] |
def setup(self):
self.bias = self.param("bias", self.bias_init, (self.embedding_size,))
|
[
"def",
"setup",
"(",
"self",
")",
":",
"self",
".",
"bias",
"=",
"self",
".",
"param",
"(",
"\"bias\"",
",",
"self",
".",
"bias_init",
",",
"(",
"self",
".",
"embedding_size",
",",
")",
")"
] |
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/electra/modeling_flax_electra.py#L678-L679
|
||||
plotly/plotly.py
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
packages/python/plotly/plotly/graph_objs/densitymapbox/_colorbar.py
|
python
|
ColorBar.ticks
|
(self)
|
return self["ticks"]
|
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
|
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
|
[
"Determines",
"whether",
"ticks",
"are",
"drawn",
"or",
"not",
".",
"If",
"this",
"axis",
"ticks",
"are",
"not",
"drawn",
".",
"If",
"outside",
"(",
"inside",
")",
"this",
"axis",
"are",
"drawn",
"outside",
"(",
"inside",
")",
"the",
"axis",
"lines",
".",
"The",
"ticks",
"property",
"is",
"an",
"enumeration",
"that",
"may",
"be",
"specified",
"as",
":",
"-",
"One",
"of",
"the",
"following",
"enumeration",
"values",
":",
"[",
"outside",
"inside",
"]"
] |
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
|
[
"def",
"ticks",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"ticks\"",
"]"
] |
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/densitymapbox/_colorbar.py#L996-L1010
|
|
seemoo-lab/internalblue
|
ba6ba0b99f835964395d6dd1b1eb7dd850398fd6
|
internalblue/fw/fw.py
|
python
|
Firmware._module_to_firmware_definition
|
(self, fw: ModuleType)
|
Wrap existing usages where the module was used and extract the new FirmwareDefinition class
:param fw:
:return:
|
Wrap existing usages where the module was used and extract the new FirmwareDefinition class
|
[
"Wrap",
"existing",
"usages",
"where",
"the",
"module",
"was",
"used",
"and",
"extract",
"the",
"new",
"FirmwareDefinition",
"class"
] |
def _module_to_firmware_definition(self, fw: ModuleType) -> FirmwareDefinition:
"""
Wrap existing usages where the module was used and extract the new FirmwareDefinition class
:param fw:
:return:
"""
_types = {
name: cls
for name, cls in fw.__dict__.items()
if isinstance(cls, type)
and issubclass(cls, FirmwareDefinition)
and cls is not FirmwareDefinition
}
if len(_types) == 1:
return list(_types.values())[0]
|
[
"def",
"_module_to_firmware_definition",
"(",
"self",
",",
"fw",
":",
"ModuleType",
")",
"->",
"FirmwareDefinition",
":",
"_types",
"=",
"{",
"name",
":",
"cls",
"for",
"name",
",",
"cls",
"in",
"fw",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"cls",
",",
"type",
")",
"and",
"issubclass",
"(",
"cls",
",",
"FirmwareDefinition",
")",
"and",
"cls",
"is",
"not",
"FirmwareDefinition",
"}",
"if",
"len",
"(",
"_types",
")",
"==",
"1",
":",
"return",
"list",
"(",
"_types",
".",
"values",
"(",
")",
")",
"[",
"0",
"]"
] |
https://github.com/seemoo-lab/internalblue/blob/ba6ba0b99f835964395d6dd1b1eb7dd850398fd6/internalblue/fw/fw.py#L152-L168
|
||
NVlabs/STEP
|
59da38af240869fa6f1bc565803cff34aafdaa99
|
external/ActivityNet/Evaluation/ava/np_box_list.py
|
python
|
BoxList.get_field
|
(self, field)
|
return self.data[field]
|
Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
|
Accesses data associated with the specified field in the box collection.
|
[
"Accesses",
"data",
"associated",
"with",
"the",
"specified",
"field",
"in",
"the",
"box",
"collection",
"."
] |
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
|
[
"def",
"get_field",
"(",
"self",
",",
"field",
")",
":",
"if",
"not",
"self",
".",
"has_field",
"(",
"field",
")",
":",
"raise",
"ValueError",
"(",
"'field {} does not exist'",
".",
"format",
"(",
"field",
")",
")",
"return",
"self",
".",
"data",
"[",
"field",
"]"
] |
https://github.com/NVlabs/STEP/blob/59da38af240869fa6f1bc565803cff34aafdaa99/external/ActivityNet/Evaluation/ava/np_box_list.py#L90-L104
|
|
ring04h/wyportmap
|
c4201e2313504e780a7f25238eba2a2d3223e739
|
sqlalchemy/sql/operators.py
|
python
|
ColumnOperators.concat
|
(self, other)
|
return self.operate(concat_op, other)
|
Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
|
Implement the 'concat' operator.
|
[
"Implement",
"the",
"concat",
"operator",
"."
] |
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
|
[
"def",
"concat",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"operate",
"(",
"concat_op",
",",
"other",
")"
] |
https://github.com/ring04h/wyportmap/blob/c4201e2313504e780a7f25238eba2a2d3223e739/sqlalchemy/sql/operators.py#L360-L367
|
|
fossasia/pslab-python
|
bb53a334b729d0956ed9f4ce6899903f3e4868ef
|
pslab/external/display.py
|
python
|
SSD1306.draw_horizontal_line
|
(self, x0, y0, length, color, update: bool = True)
|
Draw a horizontal line.
|
Draw a horizontal line.
|
[
"Draw",
"a",
"horizontal",
"line",
"."
] |
def draw_horizontal_line(self, x0, y0, length, color, update: bool = True):
"""Draw a horizontal line."""
self.draw_line(x0, y0, x0 + length - 1, y0, color, update)
|
[
"def",
"draw_horizontal_line",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"length",
",",
"color",
",",
"update",
":",
"bool",
"=",
"True",
")",
":",
"self",
".",
"draw_line",
"(",
"x0",
",",
"y0",
",",
"x0",
"+",
"length",
"-",
"1",
",",
"y0",
",",
"color",
",",
"update",
")"
] |
https://github.com/fossasia/pslab-python/blob/bb53a334b729d0956ed9f4ce6899903f3e4868ef/pslab/external/display.py#L294-L296
|
||
Shizmob/pydle
|
9798eb3db6665622fdd1391cdb563ebdb3014c6c
|
pydle/__init__.py
|
python
|
featurize
|
(*features)
|
return type(name, tuple(sorted_features), {})
|
Put features into proper MRO order.
|
Put features into proper MRO order.
|
[
"Put",
"features",
"into",
"proper",
"MRO",
"order",
"."
] |
def featurize(*features):
""" Put features into proper MRO order. """
from functools import cmp_to_key
def compare_subclass(left, right):
if issubclass(left, right):
return -1
elif issubclass(right, left):
return 1
return 0
sorted_features = sorted(features, key=cmp_to_key(compare_subclass))
name = 'FeaturizedClient[{features}]'.format(
features=', '.join(feature.__name__ for feature in sorted_features))
return type(name, tuple(sorted_features), {})
|
[
"def",
"featurize",
"(",
"*",
"features",
")",
":",
"from",
"functools",
"import",
"cmp_to_key",
"def",
"compare_subclass",
"(",
"left",
",",
"right",
")",
":",
"if",
"issubclass",
"(",
"left",
",",
"right",
")",
":",
"return",
"-",
"1",
"elif",
"issubclass",
"(",
"right",
",",
"left",
")",
":",
"return",
"1",
"return",
"0",
"sorted_features",
"=",
"sorted",
"(",
"features",
",",
"key",
"=",
"cmp_to_key",
"(",
"compare_subclass",
")",
")",
"name",
"=",
"'FeaturizedClient[{features}]'",
".",
"format",
"(",
"features",
"=",
"', '",
".",
"join",
"(",
"feature",
".",
"__name__",
"for",
"feature",
"in",
"sorted_features",
")",
")",
"return",
"type",
"(",
"name",
",",
"tuple",
"(",
"sorted_features",
")",
",",
"{",
"}",
")"
] |
https://github.com/Shizmob/pydle/blob/9798eb3db6665622fdd1391cdb563ebdb3014c6c/pydle/__init__.py#L16-L30
|
|
NeuromorphicProcessorProject/snn_toolbox
|
a85ada7b5d060500703285ef8a68f06ea1ffda65
|
snntoolbox/simulation/utils.py
|
python
|
AbstractSNN.add_layer
|
(self, layer)
|
Do anything that concerns adding any layer independently of its
type.
Parameters
----------
layer: keras.layers.Layer | keras.layers.Conv
Layer
|
Do anything that concerns adding any layer independently of its
type.
|
[
"Do",
"anything",
"that",
"concerns",
"adding",
"any",
"layer",
"independently",
"of",
"its",
"type",
"."
] |
def add_layer(self, layer):
"""Do anything that concerns adding any layer independently of its
type.
Parameters
----------
layer: keras.layers.Layer | keras.layers.Conv
Layer
"""
pass
|
[
"def",
"add_layer",
"(",
"self",
",",
"layer",
")",
":",
"pass"
] |
https://github.com/NeuromorphicProcessorProject/snn_toolbox/blob/a85ada7b5d060500703285ef8a68f06ea1ffda65/snntoolbox/simulation/utils.py#L212-L223
|
||
taomujian/linbing
|
fe772a58f41e3b046b51a866bdb7e4655abaf51a
|
python/app/thirdparty/oneforall/modules/check/sitemap.py
|
python
|
Sitemap.check
|
(self)
|
正则匹配域名的sitemap文件中的子域
|
正则匹配域名的sitemap文件中的子域
|
[
"正则匹配域名的sitemap文件中的子域"
] |
def check(self):
"""
正则匹配域名的sitemap文件中的子域
"""
filenames = {'sitemap.xml', 'sitemap.txt', 'sitemap.html', 'sitemapindex.xml'}
self.to_check(filenames)
|
[
"def",
"check",
"(",
"self",
")",
":",
"filenames",
"=",
"{",
"'sitemap.xml'",
",",
"'sitemap.txt'",
",",
"'sitemap.html'",
",",
"'sitemapindex.xml'",
"}",
"self",
".",
"to_check",
"(",
"filenames",
")"
] |
https://github.com/taomujian/linbing/blob/fe772a58f41e3b046b51a866bdb7e4655abaf51a/python/app/thirdparty/oneforall/modules/check/sitemap.py#L14-L19
|
||
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/conversations/v1/service/conversation/webhook.py
|
python
|
WebhookInstance._proxy
|
(self)
|
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WebhookContext for this WebhookInstance
:rtype: twilio.rest.conversations.v1.service.conversation.webhook.WebhookContext
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
|
[
"Generate",
"an",
"instance",
"context",
"for",
"the",
"instance",
"the",
"context",
"is",
"capable",
"of",
"performing",
"various",
"actions",
".",
"All",
"instance",
"actions",
"are",
"proxied",
"to",
"the",
"context"
] |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WebhookContext for this WebhookInstance
:rtype: twilio.rest.conversations.v1.service.conversation.webhook.WebhookContext
"""
if self._context is None:
self._context = WebhookContext(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
conversation_sid=self._solution['conversation_sid'],
sid=self._solution['sid'],
)
return self._context
|
[
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"WebhookContext",
"(",
"self",
".",
"_version",
",",
"chat_service_sid",
"=",
"self",
".",
"_solution",
"[",
"'chat_service_sid'",
"]",
",",
"conversation_sid",
"=",
"self",
".",
"_solution",
"[",
"'conversation_sid'",
"]",
",",
"sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")",
"return",
"self",
".",
"_context"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/conversations/v1/service/conversation/webhook.py#L379-L394
|
|
steeve/xbmctorrent
|
e6bcb1037668959e1e3cb5ba8cf3e379c6638da9
|
resources/site-packages/concurrent/futures/process.py
|
python
|
_python_exit
|
()
|
[] |
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
|
[
"def",
"_python_exit",
"(",
")",
":",
"global",
"_shutdown",
"_shutdown",
"=",
"True",
"items",
"=",
"list",
"(",
"_threads_queues",
".",
"items",
"(",
")",
")",
"for",
"t",
",",
"q",
"in",
"items",
":",
"q",
".",
"put",
"(",
"None",
")",
"for",
"t",
",",
"q",
"in",
"items",
":",
"t",
".",
"join",
"(",
")"
] |
https://github.com/steeve/xbmctorrent/blob/e6bcb1037668959e1e3cb5ba8cf3e379c6638da9/resources/site-packages/concurrent/futures/process.py#L79-L86
|
||||
mesalock-linux/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
pypy/module/_cffi_backend/ffi_obj.py
|
python
|
W_FFIObject.descr_addressof
|
(self, w_arg, args_w)
|
return W_CData(space, cdata, w_ctypeptr)
|
\
Limited equivalent to the '&' operator in C:
1. ffi.addressof(<cdata 'struct-or-union'>) returns a cdata that is a
pointer to this struct or union.
2. ffi.addressof(<cdata>, field-or-index...) returns the address of a
field or array item inside the given structure or array, recursively
in case of nested structures or arrays.
3. ffi.addressof(<library>, "name") returns the address of the named
function or global variable.
|
\
Limited equivalent to the '&' operator in C:
|
[
"\\",
"Limited",
"equivalent",
"to",
"the",
"&",
"operator",
"in",
"C",
":"
] |
def descr_addressof(self, w_arg, args_w):
"""\
Limited equivalent to the '&' operator in C:
1. ffi.addressof(<cdata 'struct-or-union'>) returns a cdata that is a
pointer to this struct or union.
2. ffi.addressof(<cdata>, field-or-index...) returns the address of a
field or array item inside the given structure or array, recursively
in case of nested structures or arrays.
3. ffi.addressof(<library>, "name") returns the address of the named
function or global variable."""
#
from pypy.module._cffi_backend.lib_obj import W_LibObject
space = self.space
if isinstance(w_arg, W_LibObject) and len(args_w) == 1:
# case 3 in the docstring
return w_arg.address_of_func_or_global_var(space.text_w(args_w[0]))
#
w_ctype = self.ffi_type(w_arg, ACCEPT_CDATA)
if len(args_w) == 0:
# case 1 in the docstring
if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) and
not isinstance(w_ctype, ctypearray.W_CTypeArray)):
raise oefmt(space.w_TypeError,
"expected a cdata struct/union/array object")
offset = 0
else:
# case 2 in the docstring
if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) and
not isinstance(w_ctype, ctypearray.W_CTypeArray) and
not isinstance(w_ctype, ctypeptr.W_CTypePointer)):
raise oefmt(space.w_TypeError,
"expected a cdata struct/union/array/pointer object")
if len(args_w) == 1:
w_ctype, offset = w_ctype.direct_typeoffsetof(args_w[0], False)
else:
w_ctype, offset = self._more_addressof(args_w, w_ctype)
#
assert isinstance(w_arg, W_CData)
cdata = w_arg.unsafe_escaping_ptr()
cdata = rffi.ptradd(cdata, offset)
w_ctypeptr = newtype.new_pointer_type(space, w_ctype)
return W_CData(space, cdata, w_ctypeptr)
|
[
"def",
"descr_addressof",
"(",
"self",
",",
"w_arg",
",",
"args_w",
")",
":",
"#",
"from",
"pypy",
".",
"module",
".",
"_cffi_backend",
".",
"lib_obj",
"import",
"W_LibObject",
"space",
"=",
"self",
".",
"space",
"if",
"isinstance",
"(",
"w_arg",
",",
"W_LibObject",
")",
"and",
"len",
"(",
"args_w",
")",
"==",
"1",
":",
"# case 3 in the docstring",
"return",
"w_arg",
".",
"address_of_func_or_global_var",
"(",
"space",
".",
"text_w",
"(",
"args_w",
"[",
"0",
"]",
")",
")",
"#",
"w_ctype",
"=",
"self",
".",
"ffi_type",
"(",
"w_arg",
",",
"ACCEPT_CDATA",
")",
"if",
"len",
"(",
"args_w",
")",
"==",
"0",
":",
"# case 1 in the docstring",
"if",
"(",
"not",
"isinstance",
"(",
"w_ctype",
",",
"ctypestruct",
".",
"W_CTypeStructOrUnion",
")",
"and",
"not",
"isinstance",
"(",
"w_ctype",
",",
"ctypearray",
".",
"W_CTypeArray",
")",
")",
":",
"raise",
"oefmt",
"(",
"space",
".",
"w_TypeError",
",",
"\"expected a cdata struct/union/array object\"",
")",
"offset",
"=",
"0",
"else",
":",
"# case 2 in the docstring",
"if",
"(",
"not",
"isinstance",
"(",
"w_ctype",
",",
"ctypestruct",
".",
"W_CTypeStructOrUnion",
")",
"and",
"not",
"isinstance",
"(",
"w_ctype",
",",
"ctypearray",
".",
"W_CTypeArray",
")",
"and",
"not",
"isinstance",
"(",
"w_ctype",
",",
"ctypeptr",
".",
"W_CTypePointer",
")",
")",
":",
"raise",
"oefmt",
"(",
"space",
".",
"w_TypeError",
",",
"\"expected a cdata struct/union/array/pointer object\"",
")",
"if",
"len",
"(",
"args_w",
")",
"==",
"1",
":",
"w_ctype",
",",
"offset",
"=",
"w_ctype",
".",
"direct_typeoffsetof",
"(",
"args_w",
"[",
"0",
"]",
",",
"False",
")",
"else",
":",
"w_ctype",
",",
"offset",
"=",
"self",
".",
"_more_addressof",
"(",
"args_w",
",",
"w_ctype",
")",
"#",
"assert",
"isinstance",
"(",
"w_arg",
",",
"W_CData",
")",
"cdata",
"=",
"w_arg",
".",
"unsafe_escaping_ptr",
"(",
")",
"cdata",
"=",
"rffi",
".",
"ptradd",
"(",
"cdata",
",",
"offset",
")",
"w_ctypeptr",
"=",
"newtype",
".",
"new_pointer_type",
"(",
"space",
",",
"w_ctype",
")",
"return",
"W_CData",
"(",
"space",
",",
"cdata",
",",
"w_ctypeptr",
")"
] |
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/pypy/module/_cffi_backend/ffi_obj.py#L211-L255
|
|
WikidPad/WikidPad
|
558109638807bc76b4672922686e416ab2d5f79c
|
WikidPad/lib/pwiki/WindowLayout.py
|
python
|
StorablePerspective.getStoredPerspective
|
(self)
|
Returns a unistring describing the contents of the window for
later recreation or None if this window can't be stored.
If a window doesn't need to store additional perspective data,
return empty unistring
|
Returns a unistring describing the contents of the window for
later recreation or None if this window can't be stored.
If a window doesn't need to store additional perspective data,
return empty unistring
|
[
"Returns",
"a",
"unistring",
"describing",
"the",
"contents",
"of",
"the",
"window",
"for",
"later",
"recreation",
"or",
"None",
"if",
"this",
"window",
"can",
"t",
"be",
"stored",
".",
"If",
"a",
"window",
"doesn",
"t",
"need",
"to",
"store",
"additional",
"perspective",
"data",
"return",
"empty",
"unistring"
] |
def getStoredPerspective(self):
"""
Returns a unistring describing the contents of the window for
later recreation or None if this window can't be stored.
If a window doesn't need to store additional perspective data,
return empty unistring
"""
raise NotImplementedError
|
[
"def",
"getStoredPerspective",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/WikidPad/WikidPad/blob/558109638807bc76b4672922686e416ab2d5f79c/WikidPad/lib/pwiki/WindowLayout.py#L984-L991
|
||
mitshell/libmich
|
fe63c12d1d9466f2c39824625e63ac05b712a3a6
|
libmich/asn1/parsers.py
|
python
|
parse_set
|
(Obj, text='')
|
return text
|
parses any set assigned to an ASN.1 type or class
set a dict {'root': list of values, 'ext': list of values or None} in Obj['val'],
each list containing ASN.1 values corresponding to the Obj type
returns the rest of the text
|
parses any set assigned to an ASN.1 type or class
set a dict {'root': list of values, 'ext': list of values or None} in Obj['val'],
each list containing ASN.1 values corresponding to the Obj type
returns the rest of the text
|
[
"parses",
"any",
"set",
"assigned",
"to",
"an",
"ASN",
".",
"1",
"type",
"or",
"class",
"set",
"a",
"dict",
"{",
"root",
":",
"list",
"of",
"values",
"ext",
":",
"list",
"of",
"values",
"or",
"None",
"}",
"in",
"Obj",
"[",
"val",
"]",
"each",
"list",
"containing",
"ASN",
".",
"1",
"values",
"corresponding",
"to",
"the",
"Obj",
"type",
"returns",
"the",
"rest",
"of",
"the",
"text"
] |
def parse_set(Obj, text=''):
'''
parses any set assigned to an ASN.1 type or class
set a dict {'root': list of values, 'ext': list of values or None} in Obj['val'],
each list containing ASN.1 values corresponding to the Obj type
returns the rest of the text
'''
text, text_set = extract_curlybrack(text)
if text_set is None:
raise(ASN1_PROC_TEXT('%s: invalid set: %s'\
% (Obj.get_fullname(), text)))
#
# check coma for extension marker
coma_offsets = [-1] + search_top_lvl_sep(text_set, ',') + [len(text_set)]
sets = map(stripper, [text_set[coma_offsets[i]+1:coma_offsets[i+1]] \
for i in range(len(coma_offsets)-1)])
#
Root, Ext = [], None
if len(sets) == 1:
# rootSet or "..."
if sets[0] == '...':
Ext = []
else:
Root = parse_set_elements(Obj, sets[0])
elif len(sets) == 2:
# rootSet + "..." or "..." + extSet
if sets[0] == '...':
Ext = parse_set_elements(Obj, sets[1])
elif sets[1] == '...':
Ext = []
Root = parse_set_elements(Obj, sets[0])
else:
raise(ASN1_PROC_TEXT('%s: invalid set: %s'\
% (Obj.get_fullname(), text_set)))
elif len(sets) == 3:
# rootSet + "..." + extSet
if sets[1] != '...':
raise(ASN1_PROC_TEXT('%s: invalid set: %s'\
% (Obj.get_fullname(), text_set)))
else:
Root = parse_set_elements(Obj, sets[0])
Ext = parse_set_elements(Obj, sets[2])
else:
raise(ASN1_PROC_TEXT('%s: invalid set: %s'\
% (Obj.get_fullname(), text_set)))
#
Obj['val'] = {'root':Root, 'ext':Ext}
return text
|
[
"def",
"parse_set",
"(",
"Obj",
",",
"text",
"=",
"''",
")",
":",
"text",
",",
"text_set",
"=",
"extract_curlybrack",
"(",
"text",
")",
"if",
"text_set",
"is",
"None",
":",
"raise",
"(",
"ASN1_PROC_TEXT",
"(",
"'%s: invalid set: %s'",
"%",
"(",
"Obj",
".",
"get_fullname",
"(",
")",
",",
"text",
")",
")",
")",
"#",
"# check coma for extension marker",
"coma_offsets",
"=",
"[",
"-",
"1",
"]",
"+",
"search_top_lvl_sep",
"(",
"text_set",
",",
"','",
")",
"+",
"[",
"len",
"(",
"text_set",
")",
"]",
"sets",
"=",
"map",
"(",
"stripper",
",",
"[",
"text_set",
"[",
"coma_offsets",
"[",
"i",
"]",
"+",
"1",
":",
"coma_offsets",
"[",
"i",
"+",
"1",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"coma_offsets",
")",
"-",
"1",
")",
"]",
")",
"#",
"Root",
",",
"Ext",
"=",
"[",
"]",
",",
"None",
"if",
"len",
"(",
"sets",
")",
"==",
"1",
":",
"# rootSet or \"...\"",
"if",
"sets",
"[",
"0",
"]",
"==",
"'...'",
":",
"Ext",
"=",
"[",
"]",
"else",
":",
"Root",
"=",
"parse_set_elements",
"(",
"Obj",
",",
"sets",
"[",
"0",
"]",
")",
"elif",
"len",
"(",
"sets",
")",
"==",
"2",
":",
"# rootSet + \"...\" or \"...\" + extSet",
"if",
"sets",
"[",
"0",
"]",
"==",
"'...'",
":",
"Ext",
"=",
"parse_set_elements",
"(",
"Obj",
",",
"sets",
"[",
"1",
"]",
")",
"elif",
"sets",
"[",
"1",
"]",
"==",
"'...'",
":",
"Ext",
"=",
"[",
"]",
"Root",
"=",
"parse_set_elements",
"(",
"Obj",
",",
"sets",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"(",
"ASN1_PROC_TEXT",
"(",
"'%s: invalid set: %s'",
"%",
"(",
"Obj",
".",
"get_fullname",
"(",
")",
",",
"text_set",
")",
")",
")",
"elif",
"len",
"(",
"sets",
")",
"==",
"3",
":",
"# rootSet + \"...\" + extSet",
"if",
"sets",
"[",
"1",
"]",
"!=",
"'...'",
":",
"raise",
"(",
"ASN1_PROC_TEXT",
"(",
"'%s: invalid set: %s'",
"%",
"(",
"Obj",
".",
"get_fullname",
"(",
")",
",",
"text_set",
")",
")",
")",
"else",
":",
"Root",
"=",
"parse_set_elements",
"(",
"Obj",
",",
"sets",
"[",
"0",
"]",
")",
"Ext",
"=",
"parse_set_elements",
"(",
"Obj",
",",
"sets",
"[",
"2",
"]",
")",
"else",
":",
"raise",
"(",
"ASN1_PROC_TEXT",
"(",
"'%s: invalid set: %s'",
"%",
"(",
"Obj",
".",
"get_fullname",
"(",
")",
",",
"text_set",
")",
")",
")",
"#",
"Obj",
"[",
"'val'",
"]",
"=",
"{",
"'root'",
":",
"Root",
",",
"'ext'",
":",
"Ext",
"}",
"return",
"text"
] |
https://github.com/mitshell/libmich/blob/fe63c12d1d9466f2c39824625e63ac05b712a3a6/libmich/asn1/parsers.py#L1444-L1493
|
|
Jenyay/outwiker
|
50530cf7b3f71480bb075b2829bc0669773b835b
|
plugins/datagraph/datagraph/libs/dateutil/tz/tz.py
|
python
|
tzutc.is_ambiguous
|
(self, dt)
|
return False
|
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
|
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
|
[
"Whether",
"or",
"not",
"the",
"wall",
"time",
"of",
"a",
"given",
"datetime",
"is",
"ambiguous",
"in",
"this",
"zone",
"."
] |
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
return False
|
[
"def",
"is_ambiguous",
"(",
"self",
",",
"dt",
")",
":",
"return",
"False"
] |
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/plugins/datagraph/datagraph/libs/dateutil/tz/tz.py#L46-L60
|
|
datalad/datalad
|
d8c8383d878a207bb586415314219a60c345f732
|
datalad/support/parallel.py
|
python
|
ProducerConsumer._producer_iter
|
(self)
|
return self.producer() if inspect.isgeneratorfunction(self.producer) else self.producer
|
A little helper to also support generator functions
|
A little helper to also support generator functions
|
[
"A",
"little",
"helper",
"to",
"also",
"support",
"generator",
"functions"
] |
def _producer_iter(self):
"""A little helper to also support generator functions"""
return self.producer() if inspect.isgeneratorfunction(self.producer) else self.producer
|
[
"def",
"_producer_iter",
"(",
"self",
")",
":",
"return",
"self",
".",
"producer",
"(",
")",
"if",
"inspect",
".",
"isgeneratorfunction",
"(",
"self",
".",
"producer",
")",
"else",
"self",
".",
"producer"
] |
https://github.com/datalad/datalad/blob/d8c8383d878a207bb586415314219a60c345f732/datalad/support/parallel.py#L309-L311
|
|
WerWolv/EdiZon_CheatsConfigsAndScripts
|
d16d36c7509c01dca770f402babd83ff2e9ae6e7
|
Scripts/lib/python3.5/poplib.py
|
python
|
POP3.user
|
(self, user)
|
return self._shortcmd('USER %s' % user)
|
Send user name, return response
(should indicate password required).
|
Send user name, return response
|
[
"Send",
"user",
"name",
"return",
"response"
] |
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
|
[
"def",
"user",
"(",
"self",
",",
"user",
")",
":",
"return",
"self",
".",
"_shortcmd",
"(",
"'USER %s'",
"%",
"user",
")"
] |
https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/poplib.py#L198-L203
|
|
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/cloudinary/api.py
|
python
|
resources_by_asset_ids
|
(asset_ids, **options)
|
return call_api("get", uri, params, **options)
|
Retrieves the resources (assets) indicated in the asset IDs.
This method does not return deleted assets even if they have been backed up.
See: `Get resources by context API reference
<https://cloudinary.com/documentation/admin_api#get_resources>`_
:param asset_ids: The requested asset IDs.
:type asset_ids: list[str]
:param options: Additional options
:type options: dict, optional
:return: Resources (assets) as indicated in the asset IDs
:rtype: Response
|
Retrieves the resources (assets) indicated in the asset IDs.
This method does not return deleted assets even if they have been backed up.
|
[
"Retrieves",
"the",
"resources",
"(",
"assets",
")",
"indicated",
"in",
"the",
"asset",
"IDs",
".",
"This",
"method",
"does",
"not",
"return",
"deleted",
"assets",
"even",
"if",
"they",
"have",
"been",
"backed",
"up",
"."
] |
def resources_by_asset_ids(asset_ids, **options):
"""Retrieves the resources (assets) indicated in the asset IDs.
This method does not return deleted assets even if they have been backed up.
See: `Get resources by context API reference
<https://cloudinary.com/documentation/admin_api#get_resources>`_
:param asset_ids: The requested asset IDs.
:type asset_ids: list[str]
:param options: Additional options
:type options: dict, optional
:return: Resources (assets) as indicated in the asset IDs
:rtype: Response
"""
uri = ["resources", 'by_asset_ids']
params = dict(only(options, "tags", "moderations", "context"), asset_ids=asset_ids)
return call_api("get", uri, params, **options)
|
[
"def",
"resources_by_asset_ids",
"(",
"asset_ids",
",",
"*",
"*",
"options",
")",
":",
"uri",
"=",
"[",
"\"resources\"",
",",
"'by_asset_ids'",
"]",
"params",
"=",
"dict",
"(",
"only",
"(",
"options",
",",
"\"tags\"",
",",
"\"moderations\"",
",",
"\"context\"",
")",
",",
"asset_ids",
"=",
"asset_ids",
")",
"return",
"call_api",
"(",
"\"get\"",
",",
"uri",
",",
"params",
",",
"*",
"*",
"options",
")"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/cloudinary/api.py#L96-L112
|
|
ynhacler/RedKindle
|
7c970920dc840f869e38cbda480d630cc2e7b200
|
rq2/compat/dictconfig.py
|
python
|
DictConfigurator.add_handlers
|
(self, logger, handlers)
|
Add handlers to a logger from a list of names.
|
Add handlers to a logger from a list of names.
|
[
"Add",
"handlers",
"to",
"a",
"logger",
"from",
"a",
"list",
"of",
"names",
"."
] |
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
|
[
"def",
"add_handlers",
"(",
"self",
",",
"logger",
",",
"handlers",
")",
":",
"for",
"h",
"in",
"handlers",
":",
"try",
":",
"logger",
".",
"addHandler",
"(",
"self",
".",
"config",
"[",
"'handlers'",
"]",
"[",
"h",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Unable to add handler %r: %s'",
"%",
"(",
"h",
",",
"e",
")",
")"
] |
https://github.com/ynhacler/RedKindle/blob/7c970920dc840f869e38cbda480d630cc2e7b200/rq2/compat/dictconfig.py#L510-L516
|
||
OCA/l10n-spain
|
99050907670a70307fcd8cdfb6f3400d9e120df4
|
l10n_es_ticketbai_api/models/ticketbai_invoice_customer.py
|
python
|
TicketBaiInvoiceCustomer._check_zip
|
(self)
|
[] |
def _check_zip(self):
for record in self:
if record.zip and 20 < len(record.zip):
raise exceptions.ValidationError(
_(
"TicketBAI Invoice %s:\n"
"Customer %s ZIP Code %s longer than expected. "
"Should be 20 characters max.!"
)
% (record.tbai_invoice_id.name, record.name, record.zip)
)
|
[
"def",
"_check_zip",
"(",
"self",
")",
":",
"for",
"record",
"in",
"self",
":",
"if",
"record",
".",
"zip",
"and",
"20",
"<",
"len",
"(",
"record",
".",
"zip",
")",
":",
"raise",
"exceptions",
".",
"ValidationError",
"(",
"_",
"(",
"\"TicketBAI Invoice %s:\\n\"",
"\"Customer %s ZIP Code %s longer than expected. \"",
"\"Should be 20 characters max.!\"",
")",
"%",
"(",
"record",
".",
"tbai_invoice_id",
".",
"name",
",",
"record",
".",
"name",
",",
"record",
".",
"zip",
")",
")"
] |
https://github.com/OCA/l10n-spain/blob/99050907670a70307fcd8cdfb6f3400d9e120df4/l10n_es_ticketbai_api/models/ticketbai_invoice_customer.py#L144-L154
|
||||
glue-viz/glue
|
840b4c1364b0fa63bf67c914540c93dd71df41e1
|
glue/core/hub.py
|
python
|
Hub.subscribe
|
(self, subscriber, message_class,
handler=None,
filter=lambda x: True)
|
Subscribe an object to a type of message class.
:param subscriber: The subscribing object
:type subscriber: :class:`~glue.core.hub.HubListener`
:param message_class: A :class:`~glue.core.message.Message` class
to subscribe to
:param handler:
An optional function of the form handler(message) that will
receive the message on behalf of the subscriber. If not provided,
this defaults to the HubListener's notify method
:param filter:
An optional function of the form filter(message). Messages
are only passed to the subscriber if filter(message) == True.
The default is to always pass messages.
Raises:
InvalidMessage: If the input class isn't a
:class:`~glue.core.message.Message` class
InvalidSubscriber: If the input subscriber isn't a
HubListener object.
|
Subscribe an object to a type of message class.
|
[
"Subscribe",
"an",
"object",
"to",
"a",
"type",
"of",
"message",
"class",
"."
] |
def subscribe(self, subscriber, message_class,
handler=None,
filter=lambda x: True):
"""Subscribe an object to a type of message class.
:param subscriber: The subscribing object
:type subscriber: :class:`~glue.core.hub.HubListener`
:param message_class: A :class:`~glue.core.message.Message` class
to subscribe to
:param handler:
An optional function of the form handler(message) that will
receive the message on behalf of the subscriber. If not provided,
this defaults to the HubListener's notify method
:param filter:
An optional function of the form filter(message). Messages
are only passed to the subscriber if filter(message) == True.
The default is to always pass messages.
Raises:
InvalidMessage: If the input class isn't a
:class:`~glue.core.message.Message` class
InvalidSubscriber: If the input subscriber isn't a
HubListener object.
"""
if not isinstance(subscriber, HubListener):
raise InvalidSubscriber("Subscriber must be a HubListener: %s" %
type(subscriber))
if not isinstance(message_class, type) or \
not issubclass(message_class, Message):
raise InvalidMessage("message class must be a subclass of "
"glue.Message: %s" % type(message_class))
logging.getLogger(__name__).info("Subscribing %s to %s",
subscriber, message_class.__name__)
if not handler:
handler = subscriber.notify
if subscriber not in self._subscriptions:
self._subscriptions[subscriber] = HubCallbackContainer()
self._subscriptions[subscriber][message_class] = handler, filter
|
[
"def",
"subscribe",
"(",
"self",
",",
"subscriber",
",",
"message_class",
",",
"handler",
"=",
"None",
",",
"filter",
"=",
"lambda",
"x",
":",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"subscriber",
",",
"HubListener",
")",
":",
"raise",
"InvalidSubscriber",
"(",
"\"Subscriber must be a HubListener: %s\"",
"%",
"type",
"(",
"subscriber",
")",
")",
"if",
"not",
"isinstance",
"(",
"message_class",
",",
"type",
")",
"or",
"not",
"issubclass",
"(",
"message_class",
",",
"Message",
")",
":",
"raise",
"InvalidMessage",
"(",
"\"message class must be a subclass of \"",
"\"glue.Message: %s\"",
"%",
"type",
"(",
"message_class",
")",
")",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"info",
"(",
"\"Subscribing %s to %s\"",
",",
"subscriber",
",",
"message_class",
".",
"__name__",
")",
"if",
"not",
"handler",
":",
"handler",
"=",
"subscriber",
".",
"notify",
"if",
"subscriber",
"not",
"in",
"self",
".",
"_subscriptions",
":",
"self",
".",
"_subscriptions",
"[",
"subscriber",
"]",
"=",
"HubCallbackContainer",
"(",
")",
"self",
".",
"_subscriptions",
"[",
"subscriber",
"]",
"[",
"message_class",
"]",
"=",
"handler",
",",
"filter"
] |
https://github.com/glue-viz/glue/blob/840b4c1364b0fa63bf67c914540c93dd71df41e1/glue/core/hub.py#L69-L116
|
||
IJDykeman/wangTiles
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
experimental_code/tiles_3d/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
|
python
|
_Stream.seek
|
(self, pos=0)
|
return self.pos
|
Set the stream's file pointer to pos. Negative seeking
is forbidden.
|
Set the stream's file pointer to pos. Negative seeking
is forbidden.
|
[
"Set",
"the",
"stream",
"s",
"file",
"pointer",
"to",
"pos",
".",
"Negative",
"seeking",
"is",
"forbidden",
"."
] |
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
|
[
"def",
"seek",
"(",
"self",
",",
"pos",
"=",
"0",
")",
":",
"if",
"pos",
"-",
"self",
".",
"pos",
">=",
"0",
":",
"blocks",
",",
"remainder",
"=",
"divmod",
"(",
"pos",
"-",
"self",
".",
"pos",
",",
"self",
".",
"bufsize",
")",
"for",
"i",
"in",
"range",
"(",
"blocks",
")",
":",
"self",
".",
"read",
"(",
"self",
".",
"bufsize",
")",
"self",
".",
"read",
"(",
"remainder",
")",
"else",
":",
"raise",
"StreamError",
"(",
"\"seeking backwards is not allowed\"",
")",
"return",
"self",
".",
"pos"
] |
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L552-L563
|
|
wistbean/learn_python3_spider
|
73c873f4845f4385f097e5057407d03dd37a117b
|
stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/serializer.py
|
python
|
HTMLSerializer.render
|
(self, treewalker, encoding=None)
|
Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
|
Serializes the stream from the treewalker into a string
|
[
"Serializes",
"the",
"stream",
"from",
"the",
"treewalker",
"into",
"a",
"string"
] |
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
|
[
"def",
"render",
"(",
"self",
",",
"treewalker",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"encoding",
":",
"return",
"b\"\"",
".",
"join",
"(",
"list",
"(",
"self",
".",
"serialize",
"(",
"treewalker",
",",
"encoding",
")",
")",
")",
"else",
":",
"return",
"\"\"",
".",
"join",
"(",
"list",
"(",
"self",
".",
"serialize",
"(",
"treewalker",
")",
")",
")"
] |
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/serializer.py#L375-L398
|
||
googlefonts/nototools
|
903a218f62256a286cde48c76b3051703f8a1de5
|
nototools/generate_samples.py
|
python
|
_canonicalize_escapes
|
(definition_lines)
|
return out_lines
|
Replace each escape of a reserved character with a unicode escape.
|
Replace each escape of a reserved character with a unicode escape.
|
[
"Replace",
"each",
"escape",
"of",
"a",
"reserved",
"character",
"with",
"a",
"unicode",
"escape",
"."
] |
def _canonicalize_escapes(definition_lines):
"""Replace each escape of a reserved character with a unicode escape."""
out_lines = []
for line in definition_lines:
if "\\" in line:
for old, new in _ESCAPES:
line = line.replace(old, new)
out_lines.append(line)
return out_lines
|
[
"def",
"_canonicalize_escapes",
"(",
"definition_lines",
")",
":",
"out_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"definition_lines",
":",
"if",
"\"\\\\\"",
"in",
"line",
":",
"for",
"old",
",",
"new",
"in",
"_ESCAPES",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"old",
",",
"new",
")",
"out_lines",
".",
"append",
"(",
"line",
")",
"return",
"out_lines"
] |
https://github.com/googlefonts/nototools/blob/903a218f62256a286cde48c76b3051703f8a1de5/nototools/generate_samples.py#L260-L268
|
|
keiffster/program-y
|
8c99b56f8c32f01a7b9887b5daae9465619d0385
|
src/programy/storage/stores/sql/dao/node.py
|
python
|
PatternNode.__repr__
|
(self)
|
return "<Pattern Node(id='%s', name='%s', node_class='%s')>" % (
DAOUtils.valid_id(self.id), self.name, self.node_class)
|
[] |
def __repr__(self):
return "<Pattern Node(id='%s', name='%s', node_class='%s')>" % (
DAOUtils.valid_id(self.id), self.name, self.node_class)
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"\"<Pattern Node(id='%s', name='%s', node_class='%s')>\"",
"%",
"(",
"DAOUtils",
".",
"valid_id",
"(",
"self",
".",
"id",
")",
",",
"self",
".",
"name",
",",
"self",
".",
"node_class",
")"
] |
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/storage/stores/sql/dao/node.py#L33-L35
|
|||
bamtercelboo/cnn-lstm-bilstm-deepcnn-clstm-in-pytorch
|
71b8c3f21b6dbc39562a3f8f221e90a1c7d9592f
|
main.py
|
python
|
start_train
|
(model, train_iter, dev_iter, test_iter)
|
:function:start train
:param model:
:param train_iter:
:param dev_iter:
:param test_iter:
:return:
|
:function:start train
:param model:
:param train_iter:
:param dev_iter:
:param test_iter:
:return:
|
[
":",
"function:start",
"train",
":",
"param",
"model",
":",
":",
"param",
"train_iter",
":",
":",
"param",
"dev_iter",
":",
":",
"param",
"test_iter",
":",
":",
"return",
":"
] |
def start_train(model, train_iter, dev_iter, test_iter):
"""
:function:start train
:param model:
:param train_iter:
:param dev_iter:
:param test_iter:
:return:
"""
if config.predict is not None:
label = train_ALL_CNN.predict(config.predict, model, config.text_field, config.label_field)
print('\n[Text] {}[Label] {}\n'.format(config.predict, label))
elif config.test:
try:
print(test_iter)
train_ALL_CNN.test_eval(test_iter, model, config)
except Exception as e:
print("\nSorry. The test dataset doesn't exist.\n")
else:
print("\n cpu_count \n", mu.cpu_count())
torch.set_num_threads(config.num_threads)
if os.path.exists("./Test_Result.txt"):
os.remove("./Test_Result.txt")
if config.CNN:
print("CNN training start......")
model_count = train_ALL_CNN.train(train_iter, dev_iter, test_iter, model, config)
elif config.DEEP_CNN:
print("DEEP_CNN training start......")
model_count = train_ALL_CNN.train(train_iter, dev_iter, test_iter, model, config)
elif config.LSTM:
print("LSTM training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.GRU:
print("GRU training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.BiLSTM:
print("BiLSTM training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.BiLSTM_1:
print("BiLSTM_1 training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CNN_LSTM:
print("CNN_LSTM training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CLSTM:
print("CLSTM training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CBiLSTM:
print("CBiLSTM training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CGRU:
print("CGRU training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CNN_BiLSTM:
print("CNN_BiLSTM training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.BiGRU:
print("BiGRU training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CNN_BiGRU:
print("CNN_BiGRU training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
elif config.CNN_MUI:
print("CNN_MUI training start......")
model_count = train_ALL_CNN.train(train_iter, dev_iter, test_iter, model, config)
elif config.DEEP_CNN_MUI:
print("DEEP_CNN_MUI training start......")
model_count = train_ALL_CNN.train(train_iter, dev_iter, test_iter, model, config)
elif config.HighWay_CNN is True:
print("HighWay_CNN training start......")
model_count = train_ALL_CNN.train(train_iter, dev_iter, test_iter, model, config)
elif config.HighWay_BiLSTM_1 is True:
print("HighWay_BiLSTM_1 training start......")
model_count = train_ALL_LSTM.train(train_iter, dev_iter, test_iter, model, config)
print("Model_count", model_count)
resultlist = []
if os.path.exists("./Test_Result.txt"):
file = open("./Test_Result.txt")
for line in file.readlines():
if line[:10] == "Evaluation":
resultlist.append(float(line[34:41]))
result = sorted(resultlist)
file.close()
file = open("./Test_Result.txt", "a")
file.write("\nThe Best Result is : " + str(result[len(result) - 1]))
file.write("\n")
file.close()
shutil.copy("./Test_Result.txt", "./snapshot/" + config.mulu + "/Test_Result.txt")
|
[
"def",
"start_train",
"(",
"model",
",",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
")",
":",
"if",
"config",
".",
"predict",
"is",
"not",
"None",
":",
"label",
"=",
"train_ALL_CNN",
".",
"predict",
"(",
"config",
".",
"predict",
",",
"model",
",",
"config",
".",
"text_field",
",",
"config",
".",
"label_field",
")",
"print",
"(",
"'\\n[Text] {}[Label] {}\\n'",
".",
"format",
"(",
"config",
".",
"predict",
",",
"label",
")",
")",
"elif",
"config",
".",
"test",
":",
"try",
":",
"print",
"(",
"test_iter",
")",
"train_ALL_CNN",
".",
"test_eval",
"(",
"test_iter",
",",
"model",
",",
"config",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"\\nSorry. The test dataset doesn't exist.\\n\"",
")",
"else",
":",
"print",
"(",
"\"\\n cpu_count \\n\"",
",",
"mu",
".",
"cpu_count",
"(",
")",
")",
"torch",
".",
"set_num_threads",
"(",
"config",
".",
"num_threads",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"\"./Test_Result.txt\"",
")",
":",
"os",
".",
"remove",
"(",
"\"./Test_Result.txt\"",
")",
"if",
"config",
".",
"CNN",
":",
"print",
"(",
"\"CNN training start......\"",
")",
"model_count",
"=",
"train_ALL_CNN",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"DEEP_CNN",
":",
"print",
"(",
"\"DEEP_CNN training start......\"",
")",
"model_count",
"=",
"train_ALL_CNN",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"LSTM",
":",
"print",
"(",
"\"LSTM training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"GRU",
":",
"print",
"(",
"\"GRU training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"BiLSTM",
":",
"print",
"(",
"\"BiLSTM training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"BiLSTM_1",
":",
"print",
"(",
"\"BiLSTM_1 training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CNN_LSTM",
":",
"print",
"(",
"\"CNN_LSTM training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CLSTM",
":",
"print",
"(",
"\"CLSTM training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CBiLSTM",
":",
"print",
"(",
"\"CBiLSTM training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CGRU",
":",
"print",
"(",
"\"CGRU training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CNN_BiLSTM",
":",
"print",
"(",
"\"CNN_BiLSTM training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"BiGRU",
":",
"print",
"(",
"\"BiGRU training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CNN_BiGRU",
":",
"print",
"(",
"\"CNN_BiGRU training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"CNN_MUI",
":",
"print",
"(",
"\"CNN_MUI training start......\"",
")",
"model_count",
"=",
"train_ALL_CNN",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"DEEP_CNN_MUI",
":",
"print",
"(",
"\"DEEP_CNN_MUI training start......\"",
")",
"model_count",
"=",
"train_ALL_CNN",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"HighWay_CNN",
"is",
"True",
":",
"print",
"(",
"\"HighWay_CNN training start......\"",
")",
"model_count",
"=",
"train_ALL_CNN",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"elif",
"config",
".",
"HighWay_BiLSTM_1",
"is",
"True",
":",
"print",
"(",
"\"HighWay_BiLSTM_1 training start......\"",
")",
"model_count",
"=",
"train_ALL_LSTM",
".",
"train",
"(",
"train_iter",
",",
"dev_iter",
",",
"test_iter",
",",
"model",
",",
"config",
")",
"print",
"(",
"\"Model_count\"",
",",
"model_count",
")",
"resultlist",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"\"./Test_Result.txt\"",
")",
":",
"file",
"=",
"open",
"(",
"\"./Test_Result.txt\"",
")",
"for",
"line",
"in",
"file",
".",
"readlines",
"(",
")",
":",
"if",
"line",
"[",
":",
"10",
"]",
"==",
"\"Evaluation\"",
":",
"resultlist",
".",
"append",
"(",
"float",
"(",
"line",
"[",
"34",
":",
"41",
"]",
")",
")",
"result",
"=",
"sorted",
"(",
"resultlist",
")",
"file",
".",
"close",
"(",
")",
"file",
"=",
"open",
"(",
"\"./Test_Result.txt\"",
",",
"\"a\"",
")",
"file",
".",
"write",
"(",
"\"\\nThe Best Result is : \"",
"+",
"str",
"(",
"result",
"[",
"len",
"(",
"result",
")",
"-",
"1",
"]",
")",
")",
"file",
".",
"write",
"(",
"\"\\n\"",
")",
"file",
".",
"close",
"(",
")",
"shutil",
".",
"copy",
"(",
"\"./Test_Result.txt\"",
",",
"\"./snapshot/\"",
"+",
"config",
".",
"mulu",
"+",
"\"/Test_Result.txt\"",
")"
] |
https://github.com/bamtercelboo/cnn-lstm-bilstm-deepcnn-clstm-in-pytorch/blob/71b8c3f21b6dbc39562a3f8f221e90a1c7d9592f/main.py#L328-L415
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.