nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
list | function
stringlengths 34
151k
| function_tokens
list | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Tencent/CMONGO
|
c40380caa14e05509f46993aa8b8da966b09b0b5
|
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Scanner/Dir.py
|
python
|
scan_in_memory
|
(node, env, path=())
|
return [entries[n] for n in entry_list]
|
"Scans" a Node.FS.Dir for its in-memory entries.
|
"Scans" a Node.FS.Dir for its in-memory entries.
|
[
"Scans",
"a",
"Node",
".",
"FS",
".",
"Dir",
"for",
"its",
"in",
"-",
"memory",
"entries",
"."
] |
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
|
[
"def",
"scan_in_memory",
"(",
"node",
",",
"env",
",",
"path",
"=",
"(",
")",
")",
":",
"try",
":",
"entries",
"=",
"node",
".",
"entries",
"except",
"AttributeError",
":",
"# It's not a Node.FS.Dir (or doesn't look enough like one for",
"# our purposes), which can happen if a target list containing",
"# mixed Node types (Dirs and Files, for example) has a Dir as",
"# the first entry.",
"return",
"[",
"]",
"entry_list",
"=",
"sorted",
"(",
"filter",
"(",
"do_not_scan",
",",
"list",
"(",
"entries",
".",
"keys",
"(",
")",
")",
")",
")",
"return",
"[",
"entries",
"[",
"n",
"]",
"for",
"n",
"in",
"entry_list",
"]"
] |
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Scanner/Dir.py#L90-L103
|
|
FreeCAD/FreeCAD
|
ba42231b9c6889b89e064d6d563448ed81e376ec
|
src/Mod/PartDesign/WizardShaft/SegmentFunction.py
|
python
|
SegmentFunction.findSegment
|
(self, xval)
|
return self.segments[len(self.segments)]
|
Find segment valid for the given xval
|
Find segment valid for the given xval
|
[
"Find",
"segment",
"valid",
"for",
"the",
"given",
"xval"
] |
def findSegment(self, xval):
"Find segment valid for the given xval"
for s in self.segments:
if s.start <= xval:
return s
return self.segments[len(self.segments)]
|
[
"def",
"findSegment",
"(",
"self",
",",
"xval",
")",
":",
"for",
"s",
"in",
"self",
".",
"segments",
":",
"if",
"s",
".",
"start",
"<=",
"xval",
":",
"return",
"s",
"return",
"self",
".",
"segments",
"[",
"len",
"(",
"self",
".",
"segments",
")",
"]"
] |
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/PartDesign/WizardShaft/SegmentFunction.py#L80-L85
|
|
google/or-tools
|
2cb85b4eead4c38e1c54b48044f92087cf165bce
|
examples/python/appointments.py
|
python
|
AllSolutionCollector.combinations
|
(self)
|
return self.__collect
|
Returns all collected combinations.
|
Returns all collected combinations.
|
[
"Returns",
"all",
"collected",
"combinations",
"."
] |
def combinations(self):
"""Returns all collected combinations."""
return self.__collect
|
[
"def",
"combinations",
"(",
"self",
")",
":",
"return",
"self",
".",
"__collect"
] |
https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/examples/python/appointments.py#L49-L51
|
|
BitMEX/api-connectors
|
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
|
auto-generated/python/swagger_client/models/stats_history.py
|
python
|
StatsHistory.root_symbol
|
(self, root_symbol)
|
Sets the root_symbol of this StatsHistory.
:param root_symbol: The root_symbol of this StatsHistory. # noqa: E501
:type: str
|
Sets the root_symbol of this StatsHistory.
|
[
"Sets",
"the",
"root_symbol",
"of",
"this",
"StatsHistory",
"."
] |
def root_symbol(self, root_symbol):
"""Sets the root_symbol of this StatsHistory.
:param root_symbol: The root_symbol of this StatsHistory. # noqa: E501
:type: str
"""
if root_symbol is None:
raise ValueError("Invalid value for `root_symbol`, must not be `None`") # noqa: E501
self._root_symbol = root_symbol
|
[
"def",
"root_symbol",
"(",
"self",
",",
"root_symbol",
")",
":",
"if",
"root_symbol",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `root_symbol`, must not be `None`\"",
")",
"# noqa: E501",
"self",
".",
"_root_symbol",
"=",
"root_symbol"
] |
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/stats_history.py#L102-L112
|
||
trailofbits/llvm-sanitizer-tutorial
|
d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99
|
llvm/tools/clang/docs/tools/dump_ast_matchers.py
|
python
|
extract_result_types
|
(comment)
|
Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed.
|
Extracts a list of result types from the given comment.
|
[
"Extracts",
"a",
"list",
"of",
"result",
"types",
"from",
"the",
"given",
"comment",
"."
] |
def extract_result_types(comment):
"""Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed.
"""
result_types = []
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
if m:
return ['*']
while True:
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
if not m:
if re.search(r'Usable as:\s*$', comment):
return result_types
else:
return None
result_types += [m.group(2)]
comment = m.group(1)
|
[
"def",
"extract_result_types",
"(",
"comment",
")",
":",
"result_types",
"=",
"[",
"]",
"m",
"=",
"re",
".",
"search",
"(",
"r'Usable as: Any Matcher[\\s\\n]*$'",
",",
"comment",
",",
"re",
".",
"S",
")",
"if",
"m",
":",
"return",
"[",
"'*'",
"]",
"while",
"True",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'^(.*)Matcher<([^>]+)>\\s*,?[\\s\\n]*$'",
",",
"comment",
",",
"re",
".",
"S",
")",
"if",
"not",
"m",
":",
"if",
"re",
".",
"search",
"(",
"r'Usable as:\\s*$'",
",",
"comment",
")",
":",
"return",
"result_types",
"else",
":",
"return",
"None",
"result_types",
"+=",
"[",
"m",
".",
"group",
"(",
"2",
")",
"]",
"comment",
"=",
"m",
".",
"group",
"(",
"1",
")"
] |
https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/tools/clang/docs/tools/dump_ast_matchers.py#L60-L83
|
||
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/python2_version/klampt/robotsim.py
|
python
|
RobotModel.numDrivers
|
(self)
|
return _robotsim.RobotModel_numDrivers(self)
|
numDrivers(RobotModel self) -> int
Returns the number of drivers.
|
numDrivers(RobotModel self) -> int
|
[
"numDrivers",
"(",
"RobotModel",
"self",
")",
"-",
">",
"int"
] |
def numDrivers(self):
"""
numDrivers(RobotModel self) -> int
Returns the number of drivers.
"""
return _robotsim.RobotModel_numDrivers(self)
|
[
"def",
"numDrivers",
"(",
"self",
")",
":",
"return",
"_robotsim",
".",
"RobotModel_numDrivers",
"(",
"self",
")"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/robotsim.py#L4591-L4600
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/nntplib.py
|
python
|
NNTP.quit
|
(self)
|
return resp
|
Process a QUIT command and close the socket. Returns:
- resp: server response if successful
|
Process a QUIT command and close the socket. Returns:
- resp: server response if successful
|
[
"Process",
"a",
"QUIT",
"command",
"and",
"close",
"the",
"socket",
".",
"Returns",
":",
"-",
"resp",
":",
"server",
"response",
"if",
"successful"
] |
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
|
[
"def",
"quit",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"shortcmd",
"(",
"'QUIT'",
")",
"self",
".",
"file",
".",
"close",
"(",
")",
"self",
".",
"sock",
".",
"close",
"(",
")",
"del",
"self",
".",
"file",
",",
"self",
".",
"sock",
"return",
"resp"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/nntplib.py#L595-L603
|
|
tkn-tub/ns3-gym
|
19bfe0a583e641142609939a090a09dfc63a095f
|
src/visualizer/visualizer/plugins/ipv4_routing_table.py
|
python
|
ShowIpv4RoutingTable.__init__
|
(self, visualizer, node_index)
|
Initializer
@param self this object
@param visualizer visualizer object
@param node_index the node index
@return the statistics
|
Initializer
|
[
"Initializer"
] |
def __init__(self, visualizer, node_index):
"""
Initializer
@param self this object
@param visualizer visualizer object
@param node_index the node index
@return the statistics
"""
InformationWindow.__init__(self)
self.win = Gtk.Dialog(parent=visualizer.window,
flags=Gtk.DialogFlags.DESTROY_WITH_PARENT,
buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("IPv4 routing table for node %i" % node_index)
self.visualizer = visualizer
self.node_index = node_index
self.table_model = Gtk.ListStore(str, str, str, str, int)
treeview = Gtk.TreeView(self.table_model)
treeview.show()
sw = Gtk.ScrolledWindow()
sw.set_properties(hscrollbar_policy=Gtk.PolicyType.AUTOMATIC,
vscrollbar_policy=Gtk.PolicyType.AUTOMATIC)
sw.show()
sw.add(treeview)
self.win.vbox.add(sw)
self.win.set_default_size(600, 300)
# Dest.
column = Gtk.TreeViewColumn('Destination', Gtk.CellRendererText(),
text=self.COLUMN_DESTINATION)
treeview.append_column(column)
# Next hop
column = Gtk.TreeViewColumn('Next hop', Gtk.CellRendererText(),
text=self.COLUMN_NEXT_HOP)
treeview.append_column(column)
# Interface
column = Gtk.TreeViewColumn('Interface', Gtk.CellRendererText(),
text=self.COLUMN_INTERFACE)
treeview.append_column(column)
# Type
column = Gtk.TreeViewColumn('Type', Gtk.CellRendererText(),
text=self.COLUMN_TYPE)
treeview.append_column(column)
# Prio
column = Gtk.TreeViewColumn('Prio', Gtk.CellRendererText(),
text=self.COLUMN_PRIO)
treeview.append_column(column)
self.visualizer.add_information_window(self)
self.win.show()
|
[
"def",
"__init__",
"(",
"self",
",",
"visualizer",
",",
"node_index",
")",
":",
"InformationWindow",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"win",
"=",
"Gtk",
".",
"Dialog",
"(",
"parent",
"=",
"visualizer",
".",
"window",
",",
"flags",
"=",
"Gtk",
".",
"DialogFlags",
".",
"DESTROY_WITH_PARENT",
",",
"buttons",
"=",
"(",
"Gtk",
".",
"STOCK_CLOSE",
",",
"Gtk",
".",
"ResponseType",
".",
"CLOSE",
")",
")",
"self",
".",
"win",
".",
"connect",
"(",
"\"response\"",
",",
"self",
".",
"_response_cb",
")",
"self",
".",
"win",
".",
"set_title",
"(",
"\"IPv4 routing table for node %i\"",
"%",
"node_index",
")",
"self",
".",
"visualizer",
"=",
"visualizer",
"self",
".",
"node_index",
"=",
"node_index",
"self",
".",
"table_model",
"=",
"Gtk",
".",
"ListStore",
"(",
"str",
",",
"str",
",",
"str",
",",
"str",
",",
"int",
")",
"treeview",
"=",
"Gtk",
".",
"TreeView",
"(",
"self",
".",
"table_model",
")",
"treeview",
".",
"show",
"(",
")",
"sw",
"=",
"Gtk",
".",
"ScrolledWindow",
"(",
")",
"sw",
".",
"set_properties",
"(",
"hscrollbar_policy",
"=",
"Gtk",
".",
"PolicyType",
".",
"AUTOMATIC",
",",
"vscrollbar_policy",
"=",
"Gtk",
".",
"PolicyType",
".",
"AUTOMATIC",
")",
"sw",
".",
"show",
"(",
")",
"sw",
".",
"add",
"(",
"treeview",
")",
"self",
".",
"win",
".",
"vbox",
".",
"add",
"(",
"sw",
")",
"self",
".",
"win",
".",
"set_default_size",
"(",
"600",
",",
"300",
")",
"# Dest.",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Destination'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_DESTINATION",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Next hop",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Next hop'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_NEXT_HOP",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Interface",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Interface'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_INTERFACE",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Type",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Type'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_TYPE",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Prio",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Prio'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_PRIO",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"self",
".",
"visualizer",
".",
"add_information_window",
"(",
"self",
")",
"self",
".",
"win",
".",
"show",
"(",
")"
] |
https://github.com/tkn-tub/ns3-gym/blob/19bfe0a583e641142609939a090a09dfc63a095f/src/visualizer/visualizer/plugins/ipv4_routing_table.py#L27-L82
|
||
apache/mesos
|
97d9a4063332aae3825d78de71611657e05cf5e2
|
src/python/cli_new/lib/cli/util.py
|
python
|
Table.__init__
|
(self, columns)
|
Initialize a table with a list of column names
to act as headers for each column in the table.
|
Initialize a table with a list of column names
to act as headers for each column in the table.
|
[
"Initialize",
"a",
"table",
"with",
"a",
"list",
"of",
"column",
"names",
"to",
"act",
"as",
"headers",
"for",
"each",
"column",
"in",
"the",
"table",
"."
] |
def __init__(self, columns):
"""
Initialize a table with a list of column names
to act as headers for each column in the table.
"""
if not isinstance(columns, list):
raise CLIException("Column headers must be supplied as a list")
for column in columns:
if re.search(r"(\s)\1{2,}", column):
raise CLIException("Column headers cannot have more"
" than one space between words")
self.table = [columns]
self.padding = [len(column) for column in columns]
|
[
"def",
"__init__",
"(",
"self",
",",
"columns",
")",
":",
"if",
"not",
"isinstance",
"(",
"columns",
",",
"list",
")",
":",
"raise",
"CLIException",
"(",
"\"Column headers must be supplied as a list\"",
")",
"for",
"column",
"in",
"columns",
":",
"if",
"re",
".",
"search",
"(",
"r\"(\\s)\\1{2,}\"",
",",
"column",
")",
":",
"raise",
"CLIException",
"(",
"\"Column headers cannot have more\"",
"\" than one space between words\"",
")",
"self",
".",
"table",
"=",
"[",
"columns",
"]",
"self",
".",
"padding",
"=",
"[",
"len",
"(",
"column",
")",
"for",
"column",
"in",
"columns",
"]"
] |
https://github.com/apache/mesos/blob/97d9a4063332aae3825d78de71611657e05cf5e2/src/python/cli_new/lib/cli/util.py#L307-L321
|
||
panda3d/panda3d
|
833ad89ebad58395d0af0b7ec08538e5e4308265
|
direct/src/showbase/BufferViewer.py
|
python
|
BufferViewer.advanceCard
|
(self)
|
Only useful when using setLayout('cycle'). Increments the index
that selects which card to display. The index is taken modulo
the actual number of cards.
|
Only useful when using setLayout('cycle'). Increments the index
that selects which card to display. The index is taken modulo
the actual number of cards.
|
[
"Only",
"useful",
"when",
"using",
"setLayout",
"(",
"cycle",
")",
".",
"Increments",
"the",
"index",
"that",
"selects",
"which",
"card",
"to",
"display",
".",
"The",
"index",
"is",
"taken",
"modulo",
"the",
"actual",
"number",
"of",
"cards",
"."
] |
def advanceCard(self):
"""Only useful when using setLayout('cycle'). Increments the index
that selects which card to display. The index is taken modulo
the actual number of cards."""
self.cardindex += 1
self.dirty = 1
|
[
"def",
"advanceCard",
"(",
"self",
")",
":",
"self",
".",
"cardindex",
"+=",
"1",
"self",
".",
"dirty",
"=",
"1"
] |
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/showbase/BufferViewer.py#L158-L163
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/generator.py
|
python
|
RtfColorTbl.GetColorIndex
|
(self, si_color)
|
Gets the index of a particular style items color
definition from the color table. Returns -1 if item is
not found.
@param si_color: style item color to find index in table for
@return: the colors index in the table
|
Gets the index of a particular style items color
definition from the color table. Returns -1 if item is
not found.
@param si_color: style item color to find index in table for
@return: the colors index in the table
|
[
"Gets",
"the",
"index",
"of",
"a",
"particular",
"style",
"items",
"color",
"definition",
"from",
"the",
"color",
"table",
".",
"Returns",
"-",
"1",
"if",
"item",
"is",
"not",
"found",
".",
"@param",
"si_color",
":",
"style",
"item",
"color",
"to",
"find",
"index",
"in",
"table",
"for",
"@return",
":",
"the",
"colors",
"index",
"in",
"the",
"table"
] |
def GetColorIndex(self, si_color):
"""Gets the index of a particular style items color
definition from the color table. Returns -1 if item is
not found.
@param si_color: style item color to find index in table for
@return: the colors index in the table
"""
if si_color in self._index:
return self._index.index(si_color)
else:
return -1
|
[
"def",
"GetColorIndex",
"(",
"self",
",",
"si_color",
")",
":",
"if",
"si_color",
"in",
"self",
".",
"_index",
":",
"return",
"self",
".",
"_index",
".",
"index",
"(",
"si_color",
")",
"else",
":",
"return",
"-",
"1"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/generator.py#L928-L939
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/ultimatelistctrl.py
|
python
|
UltimateListItem.OnSetFocus
|
(self, event)
|
Handles the ``wx.EVT_SET_FOCUS`` event for the window associated to an item.
:param `event`: a :class:`FocusEvent` event to be processed.
|
Handles the ``wx.EVT_SET_FOCUS`` event for the window associated to an item.
|
[
"Handles",
"the",
"wx",
".",
"EVT_SET_FOCUS",
"event",
"for",
"the",
"window",
"associated",
"to",
"an",
"item",
"."
] |
def OnSetFocus(self, event):
"""
Handles the ``wx.EVT_SET_FOCUS`` event for the window associated to an item.
:param `event`: a :class:`FocusEvent` event to be processed.
"""
listCtrl = self._wnd.GetParent()
select = listCtrl.GetItemState(self._itemId, ULC_STATE_SELECTED)
# If the window is associated to an item that currently is selected
# (has focus) we don't kill the focus. Otherwise we do it.
if not select:
listCtrl._hasFocus = False
else:
listCtrl._hasFocus = True
listCtrl.SetFocus()
event.Skip()
|
[
"def",
"OnSetFocus",
"(",
"self",
",",
"event",
")",
":",
"listCtrl",
"=",
"self",
".",
"_wnd",
".",
"GetParent",
"(",
")",
"select",
"=",
"listCtrl",
".",
"GetItemState",
"(",
"self",
".",
"_itemId",
",",
"ULC_STATE_SELECTED",
")",
"# If the window is associated to an item that currently is selected",
"# (has focus) we don't kill the focus. Otherwise we do it.",
"if",
"not",
"select",
":",
"listCtrl",
".",
"_hasFocus",
"=",
"False",
"else",
":",
"listCtrl",
".",
"_hasFocus",
"=",
"True",
"listCtrl",
".",
"SetFocus",
"(",
")",
"event",
".",
"Skip",
"(",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ultimatelistctrl.py#L2248-L2267
|
||
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
tools/android/loading/content_classification_lens.py
|
python
|
ContentClassificationLens.__init__
|
(self, trace, ad_rules, tracking_rules)
|
Initializes an instance of ContentClassificationLens.
Args:
trace: (LoadingTrace) loading trace.
ad_rules: ([str]) List of Adblock+ compatible rules used to classify ads.
tracking_rules: ([str]) List of Adblock+ compatible rules used to
classify tracking and analytics.
|
Initializes an instance of ContentClassificationLens.
|
[
"Initializes",
"an",
"instance",
"of",
"ContentClassificationLens",
"."
] |
def __init__(self, trace, ad_rules, tracking_rules):
"""Initializes an instance of ContentClassificationLens.
Args:
trace: (LoadingTrace) loading trace.
ad_rules: ([str]) List of Adblock+ compatible rules used to classify ads.
tracking_rules: ([str]) List of Adblock+ compatible rules used to
classify tracking and analytics.
"""
self._trace = trace
self._requests = trace.request_track.GetEvents()
self._requests_by_id = {r.request_id: r for r in self._requests}
self._main_frame_id = trace.page_track.GetEvents()[0]['frame_id']
self._frame_to_requests = collections.defaultdict(list)
self._ad_requests = set()
self._tracking_requests = set()
self._ad_matcher = _RulesMatcher(ad_rules, True)
self._tracking_matcher = _RulesMatcher(tracking_rules, True)
self._document_url = self._GetDocumentUrl()
self._GroupRequestsByFrameId()
self._LabelRequests()
|
[
"def",
"__init__",
"(",
"self",
",",
"trace",
",",
"ad_rules",
",",
"tracking_rules",
")",
":",
"self",
".",
"_trace",
"=",
"trace",
"self",
".",
"_requests",
"=",
"trace",
".",
"request_track",
".",
"GetEvents",
"(",
")",
"self",
".",
"_requests_by_id",
"=",
"{",
"r",
".",
"request_id",
":",
"r",
"for",
"r",
"in",
"self",
".",
"_requests",
"}",
"self",
".",
"_main_frame_id",
"=",
"trace",
".",
"page_track",
".",
"GetEvents",
"(",
")",
"[",
"0",
"]",
"[",
"'frame_id'",
"]",
"self",
".",
"_frame_to_requests",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"self",
".",
"_ad_requests",
"=",
"set",
"(",
")",
"self",
".",
"_tracking_requests",
"=",
"set",
"(",
")",
"self",
".",
"_ad_matcher",
"=",
"_RulesMatcher",
"(",
"ad_rules",
",",
"True",
")",
"self",
".",
"_tracking_matcher",
"=",
"_RulesMatcher",
"(",
"tracking_rules",
",",
"True",
")",
"self",
".",
"_document_url",
"=",
"self",
".",
"_GetDocumentUrl",
"(",
")",
"self",
".",
"_GroupRequestsByFrameId",
"(",
")",
"self",
".",
"_LabelRequests",
"(",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/android/loading/content_classification_lens.py#L19-L39
|
||
adobe/chromium
|
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
|
tools/generate_stubs/generate_stubs.py
|
python
|
ParseSignatures
|
(infile)
|
return signatures
|
Parses function signatures in the input file.
This function parses a file of signatures into a list of dictionaries that
represent the function signatures in the input file. Each dictionary has
the following keys:
return_type: A string with the return type.
name: A string with the name of the function.
params: A list of each function parameter declaration (type + name)
The format of the input file is one C-style function signature per line, no
trailing semicolon. Empty lines are allowed. An empty line is a line that
consists purely of whitespace. Lines that begin with a # are considered
comment lines and are ignored.
We assume that "int foo(void)" is the same as "int foo()", which is not
true in C where "int foo()" is equivalent to "int foo(...)". Our generated
code is C++, and we do not handle varargs, so this is a case that can be
ignored for now.
Args:
infile: File object holding a text file of function signatures.
Returns:
A list of dictionaries, where each dictionary represents one function
signature.
Raises:
BadSignatureError: A line could not be parsed as a signature.
|
Parses function signatures in the input file.
|
[
"Parses",
"function",
"signatures",
"in",
"the",
"input",
"file",
"."
] |
def ParseSignatures(infile):
"""Parses function signatures in the input file.
This function parses a file of signatures into a list of dictionaries that
represent the function signatures in the input file. Each dictionary has
the following keys:
return_type: A string with the return type.
name: A string with the name of the function.
params: A list of each function parameter declaration (type + name)
The format of the input file is one C-style function signature per line, no
trailing semicolon. Empty lines are allowed. An empty line is a line that
consists purely of whitespace. Lines that begin with a # are considered
comment lines and are ignored.
We assume that "int foo(void)" is the same as "int foo()", which is not
true in C where "int foo()" is equivalent to "int foo(...)". Our generated
code is C++, and we do not handle varargs, so this is a case that can be
ignored for now.
Args:
infile: File object holding a text file of function signatures.
Returns:
A list of dictionaries, where each dictionary represents one function
signature.
Raises:
BadSignatureError: A line could not be parsed as a signature.
"""
signatures = []
for line in infile:
line = line.strip()
if line and line[0] != '#':
m = SIGNATURE_REGEX.match(line)
if m is None:
raise BadSignatureError('Unparsable line: %s' % line)
signatures.append(
{'return_type': m.group('return_type').strip(),
'name': m.group('name').strip(),
'params': [arg.strip() for arg in m.group('params').split(',')]})
return signatures
|
[
"def",
"ParseSignatures",
"(",
"infile",
")",
":",
"signatures",
"=",
"[",
"]",
"for",
"line",
"in",
"infile",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"line",
"[",
"0",
"]",
"!=",
"'#'",
":",
"m",
"=",
"SIGNATURE_REGEX",
".",
"match",
"(",
"line",
")",
"if",
"m",
"is",
"None",
":",
"raise",
"BadSignatureError",
"(",
"'Unparsable line: %s'",
"%",
"line",
")",
"signatures",
".",
"append",
"(",
"{",
"'return_type'",
":",
"m",
".",
"group",
"(",
"'return_type'",
")",
".",
"strip",
"(",
")",
",",
"'name'",
":",
"m",
".",
"group",
"(",
"'name'",
")",
".",
"strip",
"(",
")",
",",
"'params'",
":",
"[",
"arg",
".",
"strip",
"(",
")",
"for",
"arg",
"in",
"m",
".",
"group",
"(",
"'params'",
")",
".",
"split",
"(",
"','",
")",
"]",
"}",
")",
"return",
"signatures"
] |
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/generate_stubs/generate_stubs.py#L378-L419
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_core.py
|
python
|
MouseState.SetLeftDown
|
(*args, **kwargs)
|
return _core_.MouseState_SetLeftDown(*args, **kwargs)
|
SetLeftDown(self, bool down)
|
SetLeftDown(self, bool down)
|
[
"SetLeftDown",
"(",
"self",
"bool",
"down",
")"
] |
def SetLeftDown(*args, **kwargs):
"""SetLeftDown(self, bool down)"""
return _core_.MouseState_SetLeftDown(*args, **kwargs)
|
[
"def",
"SetLeftDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"MouseState_SetLeftDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L4494-L4496
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
demo/Grid_MegaExample.py
|
python
|
MegaTable.DeleteCols
|
(self, cols)
|
cols -> delete the columns from the dataset
cols hold the column indices
|
cols -> delete the columns from the dataset
cols hold the column indices
|
[
"cols",
"-",
">",
"delete",
"the",
"columns",
"from",
"the",
"dataset",
"cols",
"hold",
"the",
"column",
"indices"
] |
def DeleteCols(self, cols):
"""
cols -> delete the columns from the dataset
cols hold the column indices
"""
# we'll cheat here and just remove the name from the
# list of column names. The data will remain but
# it won't be shown
deleteCount = 0
cols = cols[:]
cols.sort()
for i in cols:
self.colnames.pop(i-deleteCount)
# we need to advance the delete count
# to make sure we delete the right columns
deleteCount += 1
if not len(self.colnames):
self.data = []
|
[
"def",
"DeleteCols",
"(",
"self",
",",
"cols",
")",
":",
"# we'll cheat here and just remove the name from the",
"# list of column names. The data will remain but",
"# it won't be shown",
"deleteCount",
"=",
"0",
"cols",
"=",
"cols",
"[",
":",
"]",
"cols",
".",
"sort",
"(",
")",
"for",
"i",
"in",
"cols",
":",
"self",
".",
"colnames",
".",
"pop",
"(",
"i",
"-",
"deleteCount",
")",
"# we need to advance the delete count",
"# to make sure we delete the right columns",
"deleteCount",
"+=",
"1",
"if",
"not",
"len",
"(",
"self",
".",
"colnames",
")",
":",
"self",
".",
"data",
"=",
"[",
"]"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/demo/Grid_MegaExample.py#L130-L149
|
||
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mailbox.py
|
python
|
_mboxMMDF.get_file
|
(self, key, from_=False)
|
return _PartialFile(self._file, self._file.tell(), stop)
|
Return a file-like representation or raise a KeyError.
|
Return a file-like representation or raise a KeyError.
|
[
"Return",
"a",
"file",
"-",
"like",
"representation",
"or",
"raise",
"a",
"KeyError",
"."
] |
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
|
[
"def",
"get_file",
"(",
"self",
",",
"key",
",",
"from_",
"=",
"False",
")",
":",
"start",
",",
"stop",
"=",
"self",
".",
"_lookup",
"(",
"key",
")",
"self",
".",
"_file",
".",
"seek",
"(",
"start",
")",
"if",
"not",
"from_",
":",
"self",
".",
"_file",
".",
"readline",
"(",
")",
"return",
"_PartialFile",
"(",
"self",
".",
"_file",
",",
"self",
".",
"_file",
".",
"tell",
"(",
")",
",",
"stop",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mailbox.py#L778-L784
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_gdi.py
|
python
|
DC._DrawLineList
|
(*args, **kwargs)
|
return _gdi_.DC__DrawLineList(*args, **kwargs)
|
_DrawLineList(self, PyObject pyCoords, PyObject pyPens, PyObject pyBrushes) -> PyObject
|
_DrawLineList(self, PyObject pyCoords, PyObject pyPens, PyObject pyBrushes) -> PyObject
|
[
"_DrawLineList",
"(",
"self",
"PyObject",
"pyCoords",
"PyObject",
"pyPens",
"PyObject",
"pyBrushes",
")",
"-",
">",
"PyObject"
] |
def _DrawLineList(*args, **kwargs):
"""_DrawLineList(self, PyObject pyCoords, PyObject pyPens, PyObject pyBrushes) -> PyObject"""
return _gdi_.DC__DrawLineList(*args, **kwargs)
|
[
"def",
"_DrawLineList",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"DC__DrawLineList",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L4741-L4743
|
|
openthread/openthread
|
9fcdbed9c526c70f1556d1ed84099c1535c7cd32
|
tools/otci/otci/otci.py
|
python
|
OTCI.srp_client_enable_service_key
|
(self)
|
Enable SRP client "service key record inclusion" mode.
|
Enable SRP client "service key record inclusion" mode.
|
[
"Enable",
"SRP",
"client",
"service",
"key",
"record",
"inclusion",
"mode",
"."
] |
def srp_client_enable_service_key(self):
"""Enable SRP client "service key record inclusion" mode."""
self.execute_command('srp client service key enable')
|
[
"def",
"srp_client_enable_service_key",
"(",
"self",
")",
":",
"self",
".",
"execute_command",
"(",
"'srp client service key enable'",
")"
] |
https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/otci/otci/otci.py#L1169-L1171
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_core.py
|
python
|
Sizer.ShowItems
|
(*args, **kwargs)
|
return _core_.Sizer_ShowItems(*args, **kwargs)
|
ShowItems(self, bool show)
Recursively call `wx.SizerItem.Show` on all sizer items.
|
ShowItems(self, bool show)
|
[
"ShowItems",
"(",
"self",
"bool",
"show",
")"
] |
def ShowItems(*args, **kwargs):
"""
ShowItems(self, bool show)
Recursively call `wx.SizerItem.Show` on all sizer items.
"""
return _core_.Sizer_ShowItems(*args, **kwargs)
|
[
"def",
"ShowItems",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Sizer_ShowItems",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L14995-L15001
|
|
sc0ty/subsync
|
be5390d00ff475b6543eb0140c7e65b34317d95b
|
subsync/assets/__init__.py
|
python
|
getAsset
|
(assetId, params=None)
|
return assetManager().getAsset(assetId, params)
|
Get asset, alias to `assetManager().getAsset`.
|
Get asset, alias to `assetManager().getAsset`.
|
[
"Get",
"asset",
"alias",
"to",
"assetManager",
"()",
".",
"getAsset",
"."
] |
def getAsset(assetId, params=None):
"""Get asset, alias to `assetManager().getAsset`."""
return assetManager().getAsset(assetId, params)
|
[
"def",
"getAsset",
"(",
"assetId",
",",
"params",
"=",
"None",
")",
":",
"return",
"assetManager",
"(",
")",
".",
"getAsset",
"(",
"assetId",
",",
"params",
")"
] |
https://github.com/sc0ty/subsync/blob/be5390d00ff475b6543eb0140c7e65b34317d95b/subsync/assets/__init__.py#L13-L15
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/xlsgrid.py
|
python
|
XLSText.CombineAttr
|
(self, attr)
|
Combines the input attribute `attr` with the features of the :class:`XLSText` class.
:param `attr`: an instance of :class:`grid.GridCellAttr`.
|
Combines the input attribute `attr` with the features of the :class:`XLSText` class.
|
[
"Combines",
"the",
"input",
"attribute",
"attr",
"with",
"the",
"features",
"of",
"the",
":",
"class",
":",
"XLSText",
"class",
"."
] |
def CombineAttr(self, attr):
"""
Combines the input attribute `attr` with the features of the :class:`XLSText` class.
:param `attr`: an instance of :class:`grid.GridCellAttr`.
"""
attr.SetAlignment(self.horizontal_alignment, self.vertical_alignment)
attr.SetTextColour(self.text_colour)
attr.SetFont(self.font)
|
[
"def",
"CombineAttr",
"(",
"self",
",",
"attr",
")",
":",
"attr",
".",
"SetAlignment",
"(",
"self",
".",
"horizontal_alignment",
",",
"self",
".",
"vertical_alignment",
")",
"attr",
".",
"SetTextColour",
"(",
"self",
".",
"text_colour",
")",
"attr",
".",
"SetFont",
"(",
"self",
".",
"font",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/xlsgrid.py#L882-L891
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/hmac.py
|
python
|
HMAC.digest
|
(self)
|
return h.digest()
|
Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
|
Return the hash value of this hashing object.
|
[
"Return",
"the",
"hash",
"value",
"of",
"this",
"hashing",
"object",
"."
] |
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
|
[
"def",
"digest",
"(",
"self",
")",
":",
"h",
"=",
"self",
".",
"_current",
"(",
")",
"return",
"h",
".",
"digest",
"(",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/hmac.py#L126-L134
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/stc.py
|
python
|
StyledTextCtrl.StyleGetItalic
|
(*args, **kwargs)
|
return _stc.StyledTextCtrl_StyleGetItalic(*args, **kwargs)
|
StyleGetItalic(self, int style) -> bool
Get is a style italic or not.
|
StyleGetItalic(self, int style) -> bool
|
[
"StyleGetItalic",
"(",
"self",
"int",
"style",
")",
"-",
">",
"bool"
] |
def StyleGetItalic(*args, **kwargs):
"""
StyleGetItalic(self, int style) -> bool
Get is a style italic or not.
"""
return _stc.StyledTextCtrl_StyleGetItalic(*args, **kwargs)
|
[
"def",
"StyleGetItalic",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_StyleGetItalic",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L2610-L2616
|
|
thalium/icebox
|
99d147d5b9269222225443ce171b4fd46d8985d4
|
third_party/virtualbox/src/VBox/Devices/EFI/Firmware/AppPkg/Applications/Python/PyMod-2.7.2/Lib/pydoc.py
|
python
|
TextDoc.docclass
|
(self, object, name=None, mod=None, *ignored)
|
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
|
Produce text documentation for a given class object.
|
Produce text documentation for a given class object.
|
[
"Produce",
"text",
"documentation",
"for",
"a",
"given",
"class",
"object",
"."
] |
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
|
[
"def",
"docclass",
"(",
"self",
",",
"object",
",",
"name",
"=",
"None",
",",
"mod",
"=",
"None",
",",
"*",
"ignored",
")",
":",
"realname",
"=",
"object",
".",
"__name__",
"name",
"=",
"name",
"or",
"realname",
"bases",
"=",
"object",
".",
"__bases__",
"def",
"makename",
"(",
"c",
",",
"m",
"=",
"object",
".",
"__module__",
")",
":",
"return",
"classname",
"(",
"c",
",",
"m",
")",
"if",
"name",
"==",
"realname",
":",
"title",
"=",
"'class '",
"+",
"self",
".",
"bold",
"(",
"realname",
")",
"else",
":",
"title",
"=",
"self",
".",
"bold",
"(",
"name",
")",
"+",
"' = class '",
"+",
"realname",
"if",
"bases",
":",
"parents",
"=",
"map",
"(",
"makename",
",",
"bases",
")",
"title",
"=",
"title",
"+",
"'(%s)'",
"%",
"join",
"(",
"parents",
",",
"', '",
")",
"doc",
"=",
"getdoc",
"(",
"object",
")",
"contents",
"=",
"doc",
"and",
"[",
"doc",
"+",
"'\\n'",
"]",
"or",
"[",
"]",
"push",
"=",
"contents",
".",
"append",
"# List the mro, if non-trivial.",
"mro",
"=",
"deque",
"(",
"inspect",
".",
"getmro",
"(",
"object",
")",
")",
"if",
"len",
"(",
"mro",
")",
">",
"2",
":",
"push",
"(",
"\"Method resolution order:\"",
")",
"for",
"base",
"in",
"mro",
":",
"push",
"(",
"' '",
"+",
"makename",
"(",
"base",
")",
")",
"push",
"(",
"''",
")",
"# Cute little class to pump out a horizontal rule between sections.",
"class",
"HorizontalRule",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"needone",
"=",
"0",
"def",
"maybe",
"(",
"self",
")",
":",
"if",
"self",
".",
"needone",
":",
"push",
"(",
"'-'",
"*",
"70",
")",
"self",
".",
"needone",
"=",
"1",
"hr",
"=",
"HorizontalRule",
"(",
")",
"def",
"spill",
"(",
"msg",
",",
"attrs",
",",
"predicate",
")",
":",
"ok",
",",
"attrs",
"=",
"_split_list",
"(",
"attrs",
",",
"predicate",
")",
"if",
"ok",
":",
"hr",
".",
"maybe",
"(",
")",
"push",
"(",
"msg",
")",
"for",
"name",
",",
"kind",
",",
"homecls",
",",
"value",
"in",
"ok",
":",
"push",
"(",
"self",
".",
"document",
"(",
"getattr",
"(",
"object",
",",
"name",
")",
",",
"name",
",",
"mod",
",",
"object",
")",
")",
"return",
"attrs",
"def",
"spilldescriptors",
"(",
"msg",
",",
"attrs",
",",
"predicate",
")",
":",
"ok",
",",
"attrs",
"=",
"_split_list",
"(",
"attrs",
",",
"predicate",
")",
"if",
"ok",
":",
"hr",
".",
"maybe",
"(",
")",
"push",
"(",
"msg",
")",
"for",
"name",
",",
"kind",
",",
"homecls",
",",
"value",
"in",
"ok",
":",
"push",
"(",
"self",
".",
"_docdescriptor",
"(",
"name",
",",
"value",
",",
"mod",
")",
")",
"return",
"attrs",
"def",
"spilldata",
"(",
"msg",
",",
"attrs",
",",
"predicate",
")",
":",
"ok",
",",
"attrs",
"=",
"_split_list",
"(",
"attrs",
",",
"predicate",
")",
"if",
"ok",
":",
"hr",
".",
"maybe",
"(",
")",
"push",
"(",
"msg",
")",
"for",
"name",
",",
"kind",
",",
"homecls",
",",
"value",
"in",
"ok",
":",
"if",
"(",
"hasattr",
"(",
"value",
",",
"'__call__'",
")",
"or",
"inspect",
".",
"isdatadescriptor",
"(",
"value",
")",
")",
":",
"doc",
"=",
"getdoc",
"(",
"value",
")",
"else",
":",
"doc",
"=",
"None",
"push",
"(",
"self",
".",
"docother",
"(",
"getattr",
"(",
"object",
",",
"name",
")",
",",
"name",
",",
"mod",
",",
"maxlen",
"=",
"70",
",",
"doc",
"=",
"doc",
")",
"+",
"'\\n'",
")",
"return",
"attrs",
"attrs",
"=",
"filter",
"(",
"lambda",
"data",
":",
"visiblename",
"(",
"data",
"[",
"0",
"]",
",",
"obj",
"=",
"object",
")",
",",
"classify_class_attrs",
"(",
"object",
")",
")",
"while",
"attrs",
":",
"if",
"mro",
":",
"thisclass",
"=",
"mro",
".",
"popleft",
"(",
")",
"else",
":",
"thisclass",
"=",
"attrs",
"[",
"0",
"]",
"[",
"2",
"]",
"attrs",
",",
"inherited",
"=",
"_split_list",
"(",
"attrs",
",",
"lambda",
"t",
":",
"t",
"[",
"2",
"]",
"is",
"thisclass",
")",
"if",
"thisclass",
"is",
"__builtin__",
".",
"object",
":",
"attrs",
"=",
"inherited",
"continue",
"elif",
"thisclass",
"is",
"object",
":",
"tag",
"=",
"\"defined here\"",
"else",
":",
"tag",
"=",
"\"inherited from %s\"",
"%",
"classname",
"(",
"thisclass",
",",
"object",
".",
"__module__",
")",
"# Sort attrs by name.",
"attrs",
".",
"sort",
"(",
")",
"# Pump out the attrs, segregated by kind.",
"attrs",
"=",
"spill",
"(",
"\"Methods %s:\\n\"",
"%",
"tag",
",",
"attrs",
",",
"lambda",
"t",
":",
"t",
"[",
"1",
"]",
"==",
"'method'",
")",
"attrs",
"=",
"spill",
"(",
"\"Class methods %s:\\n\"",
"%",
"tag",
",",
"attrs",
",",
"lambda",
"t",
":",
"t",
"[",
"1",
"]",
"==",
"'class method'",
")",
"attrs",
"=",
"spill",
"(",
"\"Static methods %s:\\n\"",
"%",
"tag",
",",
"attrs",
",",
"lambda",
"t",
":",
"t",
"[",
"1",
"]",
"==",
"'static method'",
")",
"attrs",
"=",
"spilldescriptors",
"(",
"\"Data descriptors %s:\\n\"",
"%",
"tag",
",",
"attrs",
",",
"lambda",
"t",
":",
"t",
"[",
"1",
"]",
"==",
"'data descriptor'",
")",
"attrs",
"=",
"spilldata",
"(",
"\"Data and other attributes %s:\\n\"",
"%",
"tag",
",",
"attrs",
",",
"lambda",
"t",
":",
"t",
"[",
"1",
"]",
"==",
"'data'",
")",
"assert",
"attrs",
"==",
"[",
"]",
"attrs",
"=",
"inherited",
"contents",
"=",
"'\\n'",
".",
"join",
"(",
"contents",
")",
"if",
"not",
"contents",
":",
"return",
"title",
"+",
"'\\n'",
"return",
"title",
"+",
"'\\n'",
"+",
"self",
".",
"indent",
"(",
"rstrip",
"(",
"contents",
")",
",",
"' | '",
")",
"+",
"'\\n'"
] |
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/VBox/Devices/EFI/Firmware/AppPkg/Applications/Python/PyMod-2.7.2/Lib/pydoc.py#L1119-L1230
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/http/cookiejar.py
|
python
|
FileCookieJar.save
|
(self, filename=None, ignore_discard=False, ignore_expires=False)
|
Save cookies to a file.
|
Save cookies to a file.
|
[
"Save",
"cookies",
"to",
"a",
"file",
"."
] |
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
|
[
"def",
"save",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"ignore_discard",
"=",
"False",
",",
"ignore_expires",
"=",
"False",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/http/cookiejar.py#L1788-L1790
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_misc.py
|
python
|
ArtProvider.GetIconBundle
|
(*args, **kwargs)
|
return _misc_.ArtProvider_GetIconBundle(*args, **kwargs)
|
GetIconBundle(wxArtID id, wxArtClient client=wxART_OTHER) -> wxIconBundle
Query the providers for iconbundle with given ID and return it. Return
wx.NullIconBundle if no provider provides it.
|
GetIconBundle(wxArtID id, wxArtClient client=wxART_OTHER) -> wxIconBundle
|
[
"GetIconBundle",
"(",
"wxArtID",
"id",
"wxArtClient",
"client",
"=",
"wxART_OTHER",
")",
"-",
">",
"wxIconBundle"
] |
def GetIconBundle(*args, **kwargs):
"""
GetIconBundle(wxArtID id, wxArtClient client=wxART_OTHER) -> wxIconBundle
Query the providers for iconbundle with given ID and return it. Return
wx.NullIconBundle if no provider provides it.
"""
return _misc_.ArtProvider_GetIconBundle(*args, **kwargs)
|
[
"def",
"GetIconBundle",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"ArtProvider_GetIconBundle",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L2855-L2862
|
|
NVIDIA/TensorRT
|
42805f078052daad1a98bc5965974fcffaad0960
|
samples/trtexec/prn_utils.py
|
python
|
combineDescriptions
|
(prolog, features, descriptions)
|
return fullDescription
|
Combine features with their descriptions
|
Combine features with their descriptions
|
[
"Combine",
"features",
"with",
"their",
"descriptions"
] |
def combineDescriptions(prolog, features, descriptions):
''' Combine features with their descriptions '''
fullDescription = prolog
sep = ' '
for feature, description in zip(features, descriptions):
fullDescription += sep + feature + ' (' + description + ')'
sep = ', '
return fullDescription
|
[
"def",
"combineDescriptions",
"(",
"prolog",
",",
"features",
",",
"descriptions",
")",
":",
"fullDescription",
"=",
"prolog",
"sep",
"=",
"' '",
"for",
"feature",
",",
"description",
"in",
"zip",
"(",
"features",
",",
"descriptions",
")",
":",
"fullDescription",
"+=",
"sep",
"+",
"feature",
"+",
"' ('",
"+",
"description",
"+",
"')'",
"sep",
"=",
"', '",
"return",
"fullDescription"
] |
https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/samples/trtexec/prn_utils.py#L29-L38
|
|
weolar/miniblink49
|
1c4678db0594a4abde23d3ebbcc7cd13c3170777
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/autopep8.py
|
python
|
match_file
|
(filename, exclude)
|
return True
|
Return True if file is okay for modifying/recursing.
|
Return True if file is okay for modifying/recursing.
|
[
"Return",
"True",
"if",
"file",
"is",
"okay",
"for",
"modifying",
"/",
"recursing",
"."
] |
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
|
[
"def",
"match_file",
"(",
"filename",
",",
"exclude",
")",
":",
"base_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"if",
"base_name",
".",
"startswith",
"(",
"'.'",
")",
":",
"return",
"False",
"for",
"pattern",
"in",
"exclude",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"base_name",
",",
"pattern",
")",
":",
"return",
"False",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
"and",
"not",
"is_python_file",
"(",
"filename",
")",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/autopep8.py#L3510-L3524
|
|
natanielruiz/android-yolo
|
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
|
jni-build/jni/include/tensorflow/python/summary/event_accumulator.py
|
python
|
_GeneratorFromPath
|
(path)
|
Create an event generator for file or directory at given path string.
|
Create an event generator for file or directory at given path string.
|
[
"Create",
"an",
"event",
"generator",
"for",
"file",
"or",
"directory",
"at",
"given",
"path",
"string",
"."
] |
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
if IsTensorFlowEventsFile(path):
return io_wrapper.CreateFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(path, io_wrapper.CreateFileLoader,
IsTensorFlowEventsFile)
|
[
"def",
"_GeneratorFromPath",
"(",
"path",
")",
":",
"if",
"IsTensorFlowEventsFile",
"(",
"path",
")",
":",
"return",
"io_wrapper",
".",
"CreateFileLoader",
"(",
"path",
")",
"else",
":",
"return",
"directory_watcher",
".",
"DirectoryWatcher",
"(",
"path",
",",
"io_wrapper",
".",
"CreateFileLoader",
",",
"IsTensorFlowEventsFile",
")"
] |
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/summary/event_accumulator.py#L632-L638
|
||
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
scripts/SANS/sans/user_file/user_file_parser.py
|
python
|
FitParser.extract_clear
|
()
|
return {FitId.GENERAL: fit_general(start=None, stop=None, fit_type=FitType.NO_FIT,
data_type=None, polynomial_order=None)}
|
With this we want to clear the fit type settings.
|
With this we want to clear the fit type settings.
|
[
"With",
"this",
"we",
"want",
"to",
"clear",
"the",
"fit",
"type",
"settings",
"."
] |
def extract_clear():
"""
With this we want to clear the fit type settings.
"""
return {FitId.GENERAL: fit_general(start=None, stop=None, fit_type=FitType.NO_FIT,
data_type=None, polynomial_order=None)}
|
[
"def",
"extract_clear",
"(",
")",
":",
"return",
"{",
"FitId",
".",
"GENERAL",
":",
"fit_general",
"(",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"fit_type",
"=",
"FitType",
".",
"NO_FIT",
",",
"data_type",
"=",
"None",
",",
"polynomial_order",
"=",
"None",
")",
"}"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/sans/user_file/user_file_parser.py#L1843-L1848
|
|
htcondor/htcondor
|
4829724575176d1d6c936e4693dfd78a728569b0
|
src/blahp/src/scripts/lsf_status.py
|
python
|
bjobs
|
(jobid="")
|
return result
|
Call bjobs directly for a jobid.
If none is specified, query all jobid's.
Returns a python dictionary with the job info.
|
Call bjobs directly for a jobid.
If none is specified, query all jobid's.
|
[
"Call",
"bjobs",
"directly",
"for",
"a",
"jobid",
".",
"If",
"none",
"is",
"specified",
"query",
"all",
"jobid",
"s",
"."
] |
def bjobs(jobid=""):
"""
Call bjobs directly for a jobid.
If none is specified, query all jobid's.
Returns a python dictionary with the job info.
"""
bjobs = get_bjobs_location()
command = (bjobs, '-V')
bjobs_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
bjobs_version, _ = bjobs_process.communicate()
bjobs_version = to_str(bjobs_version)
log(bjobs_version)
starttime = time.time()
log("Starting bjobs.")
if jobid != "":
bjobs_process = subprocess.Popen(("%s -UF %s" % (bjobs, jobid)), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True)
else:
bjobs_process = subprocess.Popen(("%s -UF -a" % bjobs), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True)
bjobs_process_stdout, bjobs_process_stderr = bjobs_process.communicate()
bjobs_process_stdout = to_str(bjobs_process_stdout)
bjobs_process_stderr = to_str(bjobs_process_stderr)
if bjobs_process_stderr == "":
result = parse_bjobs_fd(bjobs_process_stdout.splitlines())
elif jobid != "":
result = {jobid: {'BatchJobId': '"%s"' % jobid, 'JobStatus': '3', 'ExitCode': ' 0'}}
else:
result = {}
exit_code = bjobs_process.returncode
log("Finished bjobs (time=%f)." % (time.time()-starttime))
if exit_code:
raise Exception("bjobs failed with exit code %s" % str(exit_code))
# If the job has completed...
if jobid != "" and "JobStatus" in result[jobid] and (result[jobid]["JobStatus"] == '4' or result[jobid]["JobStatus"] == '3'):
# Get the finished job stats and update the result
finished_job_stats = get_finished_job_stats(jobid)
result[jobid].update(finished_job_stats)
return result
|
[
"def",
"bjobs",
"(",
"jobid",
"=",
"\"\"",
")",
":",
"bjobs",
"=",
"get_bjobs_location",
"(",
")",
"command",
"=",
"(",
"bjobs",
",",
"'-V'",
")",
"bjobs_process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"bjobs_version",
",",
"_",
"=",
"bjobs_process",
".",
"communicate",
"(",
")",
"bjobs_version",
"=",
"to_str",
"(",
"bjobs_version",
")",
"log",
"(",
"bjobs_version",
")",
"starttime",
"=",
"time",
".",
"time",
"(",
")",
"log",
"(",
"\"Starting bjobs.\"",
")",
"if",
"jobid",
"!=",
"\"\"",
":",
"bjobs_process",
"=",
"subprocess",
".",
"Popen",
"(",
"(",
"\"%s -UF %s\"",
"%",
"(",
"bjobs",
",",
"jobid",
")",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
",",
"shell",
"=",
"True",
")",
"else",
":",
"bjobs_process",
"=",
"subprocess",
".",
"Popen",
"(",
"(",
"\"%s -UF -a\"",
"%",
"bjobs",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"close_fds",
"=",
"True",
",",
"shell",
"=",
"True",
")",
"bjobs_process_stdout",
",",
"bjobs_process_stderr",
"=",
"bjobs_process",
".",
"communicate",
"(",
")",
"bjobs_process_stdout",
"=",
"to_str",
"(",
"bjobs_process_stdout",
")",
"bjobs_process_stderr",
"=",
"to_str",
"(",
"bjobs_process_stderr",
")",
"if",
"bjobs_process_stderr",
"==",
"\"\"",
":",
"result",
"=",
"parse_bjobs_fd",
"(",
"bjobs_process_stdout",
".",
"splitlines",
"(",
")",
")",
"elif",
"jobid",
"!=",
"\"\"",
":",
"result",
"=",
"{",
"jobid",
":",
"{",
"'BatchJobId'",
":",
"'\"%s\"'",
"%",
"jobid",
",",
"'JobStatus'",
":",
"'3'",
",",
"'ExitCode'",
":",
"' 0'",
"}",
"}",
"else",
":",
"result",
"=",
"{",
"}",
"exit_code",
"=",
"bjobs_process",
".",
"returncode",
"log",
"(",
"\"Finished bjobs (time=%f).\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"starttime",
")",
")",
"if",
"exit_code",
":",
"raise",
"Exception",
"(",
"\"bjobs failed with exit code %s\"",
"%",
"str",
"(",
"exit_code",
")",
")",
"# If the job has completed...",
"if",
"jobid",
"!=",
"\"\"",
"and",
"\"JobStatus\"",
"in",
"result",
"[",
"jobid",
"]",
"and",
"(",
"result",
"[",
"jobid",
"]",
"[",
"\"JobStatus\"",
"]",
"==",
"'4'",
"or",
"result",
"[",
"jobid",
"]",
"[",
"\"JobStatus\"",
"]",
"==",
"'3'",
")",
":",
"# Get the finished job stats and update the result",
"finished_job_stats",
"=",
"get_finished_job_stats",
"(",
"jobid",
")",
"result",
"[",
"jobid",
"]",
".",
"update",
"(",
"finished_job_stats",
")",
"return",
"result"
] |
https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/src/blahp/src/scripts/lsf_status.py#L241-L286
|
|
apache/incubator-mxnet
|
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
|
python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py
|
python
|
convert_take
|
(node, **kwargs)
|
return nodes
|
Map MXNet's Take operator attributes to onnx's Gather operator.
|
Map MXNet's Take operator attributes to onnx's Gather operator.
|
[
"Map",
"MXNet",
"s",
"Take",
"operator",
"attributes",
"to",
"onnx",
"s",
"Gather",
"operator",
"."
] |
def convert_take(node, **kwargs):
"""Map MXNet's Take operator attributes to onnx's Gather operator.
"""
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get('axis', 0))
mode = str(attrs.get('mode', 'clip'))
data = input_nodes[0]
indices = input_nodes[1]
nodes = [
make_node('Cast', [indices], [name+'_indices'], to=int(TensorProto.INT64)),
]
if mode == 'raise':
nodes += [
make_node('Gather', [data, name+'_indices'], [name], axis=axis, name=name)
]
return nodes
create_tensor([-1], name+'_-1', kwargs["initializer"])
nodes += [
make_node('Shape', [data], [name+'_data_shape']),
]
# corner case
if axis == -1:
nodes += [
make_node('Shape', [name+'_data_shape'], [name+'_data_dim']),
make_node('Add', [name+'_data_dim', name+'_-1'], [name+'_axis_max']),
make_node('Slice', [name+'_data_shape', name+'_axis_max', name+'_data_dim'], [name+'_slice0_out']),
]
else:
create_tensor([axis], name+'_axis', kwargs["initializer"])
create_tensor([axis+1], name+'_axis+1', kwargs["initializer"])
nodes += [
make_node('Slice', [name+'_data_shape', name+'_axis', name+'_axis+1'], [name+'_slice0_out']),
]
if mode == 'clip':
create_tensor([0], name+'_0', kwargs["initializer"])
nodes += [
make_node('Add', [name+'_slice0_out', name+'_-1'], [name+'_max']),
make_node('Greater', [name+'_indices', name+'_max'], [name+'_max_mask']),
make_node('Where', [name+'_max_mask', name+'_max', name+'_indices'], [name+'_where0_out']),
make_node('Less', [name+'_indices', name+'_0'], [name+'_min_mask']),
make_node('Where', [name+'_min_mask', name+'_0', name+'_where0_out'], [name+'_where1_out']),
make_node('Gather', [data, name+'_where1_out'], [name], axis=axis, name=name)
]
elif mode == 'wrap':
nodes += [
make_node('Mod', [name+'_indices', name+'_slice0_out'], [name+'_mod0_out']),
make_node('Gather', [data, name+'_mod0_out'], [name], axis=axis, name=name)
]
else:
raise NotImplementedError("mode must be clip, wrap or raise.")
return nodes
|
[
"def",
"convert_take",
"(",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"onnx",
".",
"helper",
"import",
"make_node",
"from",
"onnx",
"import",
"TensorProto",
"name",
",",
"input_nodes",
",",
"attrs",
"=",
"get_inputs",
"(",
"node",
",",
"kwargs",
")",
"axis",
"=",
"int",
"(",
"attrs",
".",
"get",
"(",
"'axis'",
",",
"0",
")",
")",
"mode",
"=",
"str",
"(",
"attrs",
".",
"get",
"(",
"'mode'",
",",
"'clip'",
")",
")",
"data",
"=",
"input_nodes",
"[",
"0",
"]",
"indices",
"=",
"input_nodes",
"[",
"1",
"]",
"nodes",
"=",
"[",
"make_node",
"(",
"'Cast'",
",",
"[",
"indices",
"]",
",",
"[",
"name",
"+",
"'_indices'",
"]",
",",
"to",
"=",
"int",
"(",
"TensorProto",
".",
"INT64",
")",
")",
",",
"]",
"if",
"mode",
"==",
"'raise'",
":",
"nodes",
"+=",
"[",
"make_node",
"(",
"'Gather'",
",",
"[",
"data",
",",
"name",
"+",
"'_indices'",
"]",
",",
"[",
"name",
"]",
",",
"axis",
"=",
"axis",
",",
"name",
"=",
"name",
")",
"]",
"return",
"nodes",
"create_tensor",
"(",
"[",
"-",
"1",
"]",
",",
"name",
"+",
"'_-1'",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"nodes",
"+=",
"[",
"make_node",
"(",
"'Shape'",
",",
"[",
"data",
"]",
",",
"[",
"name",
"+",
"'_data_shape'",
"]",
")",
",",
"]",
"# corner case",
"if",
"axis",
"==",
"-",
"1",
":",
"nodes",
"+=",
"[",
"make_node",
"(",
"'Shape'",
",",
"[",
"name",
"+",
"'_data_shape'",
"]",
",",
"[",
"name",
"+",
"'_data_dim'",
"]",
")",
",",
"make_node",
"(",
"'Add'",
",",
"[",
"name",
"+",
"'_data_dim'",
",",
"name",
"+",
"'_-1'",
"]",
",",
"[",
"name",
"+",
"'_axis_max'",
"]",
")",
",",
"make_node",
"(",
"'Slice'",
",",
"[",
"name",
"+",
"'_data_shape'",
",",
"name",
"+",
"'_axis_max'",
",",
"name",
"+",
"'_data_dim'",
"]",
",",
"[",
"name",
"+",
"'_slice0_out'",
"]",
")",
",",
"]",
"else",
":",
"create_tensor",
"(",
"[",
"axis",
"]",
",",
"name",
"+",
"'_axis'",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"create_tensor",
"(",
"[",
"axis",
"+",
"1",
"]",
",",
"name",
"+",
"'_axis+1'",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"nodes",
"+=",
"[",
"make_node",
"(",
"'Slice'",
",",
"[",
"name",
"+",
"'_data_shape'",
",",
"name",
"+",
"'_axis'",
",",
"name",
"+",
"'_axis+1'",
"]",
",",
"[",
"name",
"+",
"'_slice0_out'",
"]",
")",
",",
"]",
"if",
"mode",
"==",
"'clip'",
":",
"create_tensor",
"(",
"[",
"0",
"]",
",",
"name",
"+",
"'_0'",
",",
"kwargs",
"[",
"\"initializer\"",
"]",
")",
"nodes",
"+=",
"[",
"make_node",
"(",
"'Add'",
",",
"[",
"name",
"+",
"'_slice0_out'",
",",
"name",
"+",
"'_-1'",
"]",
",",
"[",
"name",
"+",
"'_max'",
"]",
")",
",",
"make_node",
"(",
"'Greater'",
",",
"[",
"name",
"+",
"'_indices'",
",",
"name",
"+",
"'_max'",
"]",
",",
"[",
"name",
"+",
"'_max_mask'",
"]",
")",
",",
"make_node",
"(",
"'Where'",
",",
"[",
"name",
"+",
"'_max_mask'",
",",
"name",
"+",
"'_max'",
",",
"name",
"+",
"'_indices'",
"]",
",",
"[",
"name",
"+",
"'_where0_out'",
"]",
")",
",",
"make_node",
"(",
"'Less'",
",",
"[",
"name",
"+",
"'_indices'",
",",
"name",
"+",
"'_0'",
"]",
",",
"[",
"name",
"+",
"'_min_mask'",
"]",
")",
",",
"make_node",
"(",
"'Where'",
",",
"[",
"name",
"+",
"'_min_mask'",
",",
"name",
"+",
"'_0'",
",",
"name",
"+",
"'_where0_out'",
"]",
",",
"[",
"name",
"+",
"'_where1_out'",
"]",
")",
",",
"make_node",
"(",
"'Gather'",
",",
"[",
"data",
",",
"name",
"+",
"'_where1_out'",
"]",
",",
"[",
"name",
"]",
",",
"axis",
"=",
"axis",
",",
"name",
"=",
"name",
")",
"]",
"elif",
"mode",
"==",
"'wrap'",
":",
"nodes",
"+=",
"[",
"make_node",
"(",
"'Mod'",
",",
"[",
"name",
"+",
"'_indices'",
",",
"name",
"+",
"'_slice0_out'",
"]",
",",
"[",
"name",
"+",
"'_mod0_out'",
"]",
")",
",",
"make_node",
"(",
"'Gather'",
",",
"[",
"data",
",",
"name",
"+",
"'_mod0_out'",
"]",
",",
"[",
"name",
"]",
",",
"axis",
"=",
"axis",
",",
"name",
"=",
"name",
")",
"]",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"mode must be clip, wrap or raise.\"",
")",
"return",
"nodes"
] |
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py#L2809-L2872
|
|
ChromiumWebApps/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
tools/json_schema_compiler/dart_generator.py
|
python
|
_Generator._GenerateParameterList
|
(self,
params,
callback=None,
convert_optional=False)
|
return ', '.join(p for p in param_sets if p)
|
Given a list of function parameters, generates their signature (as a
string).
e.g.
[String type]
bool x, void callback([String x])
If convert_optional is True, changes optional parameters to be required.
Useful for callbacks, where optional parameters are treated as required.
|
Given a list of function parameters, generates their signature (as a
string).
|
[
"Given",
"a",
"list",
"of",
"function",
"parameters",
"generates",
"their",
"signature",
"(",
"as",
"a",
"string",
")",
"."
] |
def _GenerateParameterList(self,
params,
callback=None,
convert_optional=False):
"""Given a list of function parameters, generates their signature (as a
string).
e.g.
[String type]
bool x, void callback([String x])
If convert_optional is True, changes optional parameters to be required.
Useful for callbacks, where optional parameters are treated as required.
"""
# Params lists (required & optional), to be joined with commas.
# TODO(sashab): Don't assume optional params always come after required
# ones.
params_req = []
params_opt = []
for param in params:
p_sig = self._GeneratePropertySignature(param)
if param.optional and not convert_optional:
params_opt.append(p_sig)
else:
params_req.append(p_sig)
# Add the callback, if it exists.
if callback:
c_sig = self._GenerateFunctionSignature(callback, convert_optional=True)
if callback.optional:
params_opt.append(c_sig)
else:
params_req.append(c_sig)
# Join the parameters with commas.
# Optional parameters have to be in square brackets, e.g.:
#
# required params | optional params | output
# [] | [] | ''
# [x, y] | [] | 'x, y'
# [] | [a, b] | '[a, b]'
# [x, y] | [a, b] | 'x, y, [a, b]'
if params_opt:
params_opt[0] = '[%s' % params_opt[0]
params_opt[-1] = '%s]' % params_opt[-1]
param_sets = [', '.join(params_req), ', '.join(params_opt)]
# The 'if p' part here is needed to prevent commas where there are no
# parameters of a certain type.
# If there are no optional parameters, this prevents a _trailing_ comma,
# e.g. '(x, y,)'. Similarly, if there are no required parameters, this
# prevents a leading comma, e.g. '(, [a, b])'.
return ', '.join(p for p in param_sets if p)
|
[
"def",
"_GenerateParameterList",
"(",
"self",
",",
"params",
",",
"callback",
"=",
"None",
",",
"convert_optional",
"=",
"False",
")",
":",
"# Params lists (required & optional), to be joined with commas.",
"# TODO(sashab): Don't assume optional params always come after required",
"# ones.",
"params_req",
"=",
"[",
"]",
"params_opt",
"=",
"[",
"]",
"for",
"param",
"in",
"params",
":",
"p_sig",
"=",
"self",
".",
"_GeneratePropertySignature",
"(",
"param",
")",
"if",
"param",
".",
"optional",
"and",
"not",
"convert_optional",
":",
"params_opt",
".",
"append",
"(",
"p_sig",
")",
"else",
":",
"params_req",
".",
"append",
"(",
"p_sig",
")",
"# Add the callback, if it exists.",
"if",
"callback",
":",
"c_sig",
"=",
"self",
".",
"_GenerateFunctionSignature",
"(",
"callback",
",",
"convert_optional",
"=",
"True",
")",
"if",
"callback",
".",
"optional",
":",
"params_opt",
".",
"append",
"(",
"c_sig",
")",
"else",
":",
"params_req",
".",
"append",
"(",
"c_sig",
")",
"# Join the parameters with commas.",
"# Optional parameters have to be in square brackets, e.g.:",
"#",
"# required params | optional params | output",
"# [] | [] | ''",
"# [x, y] | [] | 'x, y'",
"# [] | [a, b] | '[a, b]'",
"# [x, y] | [a, b] | 'x, y, [a, b]'",
"if",
"params_opt",
":",
"params_opt",
"[",
"0",
"]",
"=",
"'[%s'",
"%",
"params_opt",
"[",
"0",
"]",
"params_opt",
"[",
"-",
"1",
"]",
"=",
"'%s]'",
"%",
"params_opt",
"[",
"-",
"1",
"]",
"param_sets",
"=",
"[",
"', '",
".",
"join",
"(",
"params_req",
")",
",",
"', '",
".",
"join",
"(",
"params_opt",
")",
"]",
"# The 'if p' part here is needed to prevent commas where there are no",
"# parameters of a certain type.",
"# If there are no optional parameters, this prevents a _trailing_ comma,",
"# e.g. '(x, y,)'. Similarly, if there are no required parameters, this",
"# prevents a leading comma, e.g. '(, [a, b])'.",
"return",
"', '",
".",
"join",
"(",
"p",
"for",
"p",
"in",
"param_sets",
"if",
"p",
")"
] |
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/json_schema_compiler/dart_generator.py#L558-L610
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_misc.py
|
python
|
DateTime.SetHour
|
(*args, **kwargs)
|
return _misc_.DateTime_SetHour(*args, **kwargs)
|
SetHour(self, int hour) -> DateTime
|
SetHour(self, int hour) -> DateTime
|
[
"SetHour",
"(",
"self",
"int",
"hour",
")",
"-",
">",
"DateTime"
] |
def SetHour(*args, **kwargs):
"""SetHour(self, int hour) -> DateTime"""
return _misc_.DateTime_SetHour(*args, **kwargs)
|
[
"def",
"SetHour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime_SetHour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L3829-L3831
|
|
zeakey/DeepSkeleton
|
dc70170f8fd2ec8ca1157484ce66129981104486
|
python/caffe/pycaffe.py
|
python
|
_Net_params
|
(self)
|
return OrderedDict([(name, lr.blobs)
for name, lr in zip(self._layer_names, self.layers)
if len(lr.blobs) > 0])
|
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
|
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
|
[
"An",
"OrderedDict",
"(",
"bottom",
"to",
"top",
"i",
".",
"e",
".",
"input",
"to",
"output",
")",
"of",
"network",
"parameters",
"indexed",
"by",
"name",
";",
"each",
"is",
"a",
"list",
"of",
"multiple",
"blobs",
"(",
"e",
".",
"g",
".",
"weights",
"and",
"biases",
")"
] |
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
return OrderedDict([(name, lr.blobs)
for name, lr in zip(self._layer_names, self.layers)
if len(lr.blobs) > 0])
|
[
"def",
"_Net_params",
"(",
"self",
")",
":",
"return",
"OrderedDict",
"(",
"[",
"(",
"name",
",",
"lr",
".",
"blobs",
")",
"for",
"name",
",",
"lr",
"in",
"zip",
"(",
"self",
".",
"_layer_names",
",",
"self",
".",
"layers",
")",
"if",
"len",
"(",
"lr",
".",
"blobs",
")",
">",
"0",
"]",
")"
] |
https://github.com/zeakey/DeepSkeleton/blob/dc70170f8fd2ec8ca1157484ce66129981104486/python/caffe/pycaffe.py#L40-L48
|
|
deepmind/open_spiel
|
4ca53bea32bb2875c7385d215424048ae92f78c8
|
open_spiel/python/algorithms/deep_cfr_tf2.py
|
python
|
DeepCFRSolver._sample_action_from_advantage
|
(self, state, player)
|
return advantages.numpy(), matched_regrets.numpy()
|
Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (np-array) Advantage values for info state actions indexed by action.
2. (np-array) Matched regrets, prob for actions indexed by action.
|
Returns an info state policy by applying regret-matching.
|
[
"Returns",
"an",
"info",
"state",
"policy",
"by",
"applying",
"regret",
"-",
"matching",
"."
] |
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (np-array) Advantage values for info state actions indexed by action.
2. (np-array) Matched regrets, prob for actions indexed by action.
"""
info_state = tf.constant(
state.information_state_tensor(player), dtype=tf.float32)
legal_actions_mask = tf.constant(
state.legal_actions_mask(player), dtype=tf.float32)
advantages, matched_regrets = self._get_matched_regrets(
info_state, legal_actions_mask, player)
return advantages.numpy(), matched_regrets.numpy()
|
[
"def",
"_sample_action_from_advantage",
"(",
"self",
",",
"state",
",",
"player",
")",
":",
"info_state",
"=",
"tf",
".",
"constant",
"(",
"state",
".",
"information_state_tensor",
"(",
"player",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"legal_actions_mask",
"=",
"tf",
".",
"constant",
"(",
"state",
".",
"legal_actions_mask",
"(",
"player",
")",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"advantages",
",",
"matched_regrets",
"=",
"self",
".",
"_get_matched_regrets",
"(",
"info_state",
",",
"legal_actions_mask",
",",
"player",
")",
"return",
"advantages",
".",
"numpy",
"(",
")",
",",
"matched_regrets",
".",
"numpy",
"(",
")"
] |
https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/algorithms/deep_cfr_tf2.py#L609-L626
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/ftplib.py
|
python
|
FTP.close
|
(self)
|
Close the connection without assuming anything about it.
|
Close the connection without assuming anything about it.
|
[
"Close",
"the",
"connection",
"without",
"assuming",
"anything",
"about",
"it",
"."
] |
def close(self):
'''Close the connection without assuming anything about it.'''
try:
file = self.file
self.file = None
if file is not None:
file.close()
finally:
sock = self.sock
self.sock = None
if sock is not None:
sock.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"try",
":",
"file",
"=",
"self",
".",
"file",
"self",
".",
"file",
"=",
"None",
"if",
"file",
"is",
"not",
"None",
":",
"file",
".",
"close",
"(",
")",
"finally",
":",
"sock",
"=",
"self",
".",
"sock",
"self",
".",
"sock",
"=",
"None",
"if",
"sock",
"is",
"not",
"None",
":",
"sock",
".",
"close",
"(",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/ftplib.py#L607-L618
|
||
tensorflow/tensorflow
|
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
|
tensorflow/python/eager/context.py
|
python
|
Context.device_name
|
(self)
|
return self._thread_local_data.device_name
|
Returns the device name for the current thread.
|
Returns the device name for the current thread.
|
[
"Returns",
"the",
"device",
"name",
"for",
"the",
"current",
"thread",
"."
] |
def device_name(self):
"""Returns the device name for the current thread."""
return self._thread_local_data.device_name
|
[
"def",
"device_name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_thread_local_data",
".",
"device_name"
] |
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/eager/context.py#L982-L984
|
|
cinder/Cinder
|
e83f5bb9c01a63eec20168d02953a0879e5100f7
|
docs/libs/markdown/blockparser.py
|
python
|
State.isstate
|
(self, state)
|
Test that top (current) level is of given state.
|
Test that top (current) level is of given state.
|
[
"Test",
"that",
"top",
"(",
"current",
")",
"level",
"is",
"of",
"given",
"state",
"."
] |
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
|
[
"def",
"isstate",
"(",
"self",
",",
"state",
")",
":",
"if",
"len",
"(",
"self",
")",
":",
"return",
"self",
"[",
"-",
"1",
"]",
"==",
"state",
"else",
":",
"return",
"False"
] |
https://github.com/cinder/Cinder/blob/e83f5bb9c01a63eec20168d02953a0879e5100f7/docs/libs/markdown/blockparser.py#L33-L38
|
||
apple/swift-lldb
|
d74be846ef3e62de946df343e8c234bde93a8912
|
examples/python/gdbremote.py
|
python
|
TerminalColors.magenta
|
(self, fg=True)
|
return ''
|
Set the foreground or background color to magenta.
The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.
|
Set the foreground or background color to magenta.
The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.
|
[
"Set",
"the",
"foreground",
"or",
"background",
"color",
"to",
"magenta",
".",
"The",
"foreground",
"color",
"will",
"be",
"set",
"if",
"fg",
"tests",
"True",
".",
"The",
"background",
"color",
"will",
"be",
"set",
"if",
"fg",
"tests",
"False",
"."
] |
def magenta(self, fg=True):
'''Set the foreground or background color to magenta.
The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.'''
if self.enabled:
if fg:
return "\x1b[35m"
else:
return "\x1b[45m"
return ''
|
[
"def",
"magenta",
"(",
"self",
",",
"fg",
"=",
"True",
")",
":",
"if",
"self",
".",
"enabled",
":",
"if",
"fg",
":",
"return",
"\"\\x1b[35m\"",
"else",
":",
"return",
"\"\\x1b[45m\"",
"return",
"''"
] |
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/examples/python/gdbremote.py#L150-L158
|
|
Z3Prover/z3
|
d745d03afdfdf638d66093e2bfbacaf87187f35b
|
src/api/python/z3/z3.py
|
python
|
Fixedpoint.get_num_levels
|
(self, predicate)
|
return Z3_fixedpoint_get_num_levels(self.ctx.ref(), self.fixedpoint, predicate.ast)
|
Retrieve number of levels used for predicate in PDR engine
|
Retrieve number of levels used for predicate in PDR engine
|
[
"Retrieve",
"number",
"of",
"levels",
"used",
"for",
"predicate",
"in",
"PDR",
"engine"
] |
def get_num_levels(self, predicate):
"""Retrieve number of levels used for predicate in PDR engine"""
return Z3_fixedpoint_get_num_levels(self.ctx.ref(), self.fixedpoint, predicate.ast)
|
[
"def",
"get_num_levels",
"(",
"self",
",",
"predicate",
")",
":",
"return",
"Z3_fixedpoint_get_num_levels",
"(",
"self",
".",
"ctx",
".",
"ref",
"(",
")",
",",
"self",
".",
"fixedpoint",
",",
"predicate",
".",
"ast",
")"
] |
https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L7517-L7519
|
|
anestisb/oatdump_plus
|
ba858c1596598f0d9ae79c14d08c708cecc50af3
|
tools/cpplint.py
|
python
|
GetHeaderGuardCPPVariable
|
(filename)
|
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
|
Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
|
Returns the CPP variable that should be used as a header guard.
|
[
"Returns",
"the",
"CPP",
"variable",
"that",
"should",
"be",
"used",
"as",
"a",
"header",
"guard",
"."
] |
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
|
[
"def",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
":",
"# Restores original filename in case that cpplint is invoked from Emacs's",
"# flymake.",
"filename",
"=",
"re",
".",
"sub",
"(",
"r'_flymake\\.h$'",
",",
"'.h'",
",",
"filename",
")",
"filename",
"=",
"re",
".",
"sub",
"(",
"r'/\\.flymake/([^/]*)$'",
",",
"r'/\\1'",
",",
"filename",
")",
"fileinfo",
"=",
"FileInfo",
"(",
"filename",
")",
"file_path_from_root",
"=",
"fileinfo",
".",
"RepositoryName",
"(",
")",
"if",
"_root",
":",
"file_path_from_root",
"=",
"re",
".",
"sub",
"(",
"'^'",
"+",
"_root",
"+",
"os",
".",
"sep",
",",
"''",
",",
"file_path_from_root",
")",
"return",
"re",
".",
"sub",
"(",
"r'[-./\\s]'",
",",
"'_'",
",",
"file_path_from_root",
")",
".",
"upper",
"(",
")",
"+",
"'_'"
] |
https://github.com/anestisb/oatdump_plus/blob/ba858c1596598f0d9ae79c14d08c708cecc50af3/tools/cpplint.py#L1144-L1165
|
|
gnuradio/gnuradio
|
09c3c4fa4bfb1a02caac74cb5334dfe065391e3b
|
gr-digital/examples/narrowband/uhd_interface.py
|
python
|
uhd_receiver._print_verbage
|
(self)
|
Prints information about the UHD transmitter
|
Prints information about the UHD transmitter
|
[
"Prints",
"information",
"about",
"the",
"UHD",
"transmitter"
] |
def _print_verbage(self):
"""
Prints information about the UHD transmitter
"""
print("\nUHD Receiver:")
print("UHD Args: %s" % (self._args))
print("Freq: %sHz" % (eng_notation.num_to_str(self._freq)))
print("LO Offset: %sHz" %
(eng_notation.num_to_str(self._lo_offset)))
print("Gain: %f dB" % (self._gain))
print("Sample Rate: %ssps" % (eng_notation.num_to_str(self._rate)))
print("Antenna: %s" % (self._ant))
print("Spec: %s" % (self._spec))
print("Clock Source: %s" % (self._clock_source))
|
[
"def",
"_print_verbage",
"(",
"self",
")",
":",
"print",
"(",
"\"\\nUHD Receiver:\"",
")",
"print",
"(",
"\"UHD Args: %s\"",
"%",
"(",
"self",
".",
"_args",
")",
")",
"print",
"(",
"\"Freq: %sHz\"",
"%",
"(",
"eng_notation",
".",
"num_to_str",
"(",
"self",
".",
"_freq",
")",
")",
")",
"print",
"(",
"\"LO Offset: %sHz\"",
"%",
"(",
"eng_notation",
".",
"num_to_str",
"(",
"self",
".",
"_lo_offset",
")",
")",
")",
"print",
"(",
"\"Gain: %f dB\"",
"%",
"(",
"self",
".",
"_gain",
")",
")",
"print",
"(",
"\"Sample Rate: %ssps\"",
"%",
"(",
"eng_notation",
".",
"num_to_str",
"(",
"self",
".",
"_rate",
")",
")",
")",
"print",
"(",
"\"Antenna: %s\"",
"%",
"(",
"self",
".",
"_ant",
")",
")",
"print",
"(",
"\"Spec: %s\"",
"%",
"(",
"self",
".",
"_spec",
")",
")",
"print",
"(",
"\"Clock Source: %s\"",
"%",
"(",
"self",
".",
"_clock_source",
")",
")"
] |
https://github.com/gnuradio/gnuradio/blob/09c3c4fa4bfb1a02caac74cb5334dfe065391e3b/gr-digital/examples/narrowband/uhd_interface.py#L222-L235
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/ogl/_composit.py
|
python
|
CompositeShape.FindConstraint
|
(self, cId)
|
return None
|
Finds the constraint with the given id.
Returns a tuple of the constraint and the actual composite the
constraint was in, in case that composite was a descendant of
this composit.
Returns None if not found.
|
Finds the constraint with the given id.
|
[
"Finds",
"the",
"constraint",
"with",
"the",
"given",
"id",
"."
] |
def FindConstraint(self, cId):
"""Finds the constraint with the given id.
Returns a tuple of the constraint and the actual composite the
constraint was in, in case that composite was a descendant of
this composit.
Returns None if not found.
"""
for constraint in self._constraints:
if constraint._constraintId == cId:
return constraint, self
# If not found, try children
for child in self._children:
if isinstance(child, CompositeShape):
constraint = child.FindConstraint(cId)
if constraint:
return constraint[0], child
return None
|
[
"def",
"FindConstraint",
"(",
"self",
",",
"cId",
")",
":",
"for",
"constraint",
"in",
"self",
".",
"_constraints",
":",
"if",
"constraint",
".",
"_constraintId",
"==",
"cId",
":",
"return",
"constraint",
",",
"self",
"# If not found, try children",
"for",
"child",
"in",
"self",
".",
"_children",
":",
"if",
"isinstance",
"(",
"child",
",",
"CompositeShape",
")",
":",
"constraint",
"=",
"child",
".",
"FindConstraint",
"(",
"cId",
")",
"if",
"constraint",
":",
"return",
"constraint",
"[",
"0",
"]",
",",
"child",
"return",
"None"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/ogl/_composit.py#L613-L633
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/scipy/optimize/_trustregion.py
|
python
|
BaseQuadraticSubproblem.jac
|
(self)
|
return self._g
|
Value of jacobian of objective function at current iteration.
|
Value of jacobian of objective function at current iteration.
|
[
"Value",
"of",
"jacobian",
"of",
"objective",
"function",
"at",
"current",
"iteration",
"."
] |
def jac(self):
"""Value of jacobian of objective function at current iteration."""
if self._g is None:
self._g = self._jac(self._x)
return self._g
|
[
"def",
"jac",
"(",
"self",
")",
":",
"if",
"self",
".",
"_g",
"is",
"None",
":",
"self",
".",
"_g",
"=",
"self",
".",
"_jac",
"(",
"self",
".",
"_x",
")",
"return",
"self",
".",
"_g"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/optimize/_trustregion.py#L48-L52
|
|
yushroom/FishEngine
|
a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9
|
Script/reflect/clang/cindex.py
|
python
|
Cursor.is_default_constructor
|
(self)
|
return conf.lib.clang_CXXConstructor_isDefaultConstructor(self)
|
Returns True if the cursor refers to a C++ default constructor.
|
Returns True if the cursor refers to a C++ default constructor.
|
[
"Returns",
"True",
"if",
"the",
"cursor",
"refers",
"to",
"a",
"C",
"++",
"default",
"constructor",
"."
] |
def is_default_constructor(self):
"""Returns True if the cursor refers to a C++ default constructor.
"""
return conf.lib.clang_CXXConstructor_isDefaultConstructor(self)
|
[
"def",
"is_default_constructor",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_CXXConstructor_isDefaultConstructor",
"(",
"self",
")"
] |
https://github.com/yushroom/FishEngine/blob/a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9/Script/reflect/clang/cindex.py#L1362-L1365
|
|
google/earthenterprise
|
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
|
earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/globe_cutter_app.py
|
python
|
GlobeBuilder.BuildSearchDb
|
(self, source, polygon)
|
Extracts database info needed for POI search.
|
Extracts database info needed for POI search.
|
[
"Extracts",
"database",
"info",
"needed",
"for",
"POI",
"search",
"."
] |
def BuildSearchDb(self, source, polygon):
"""Extracts database info needed for POI search."""
self.Status("Extract search data ...")
try:
os.makedirs(self.search_dir)
except os.error:
pass # Directory may already exist
# Determine the server and target path (fusion db publish point) from
# the source.
target = ""
server = ""
if source:
server, target = common.utils.GetServerAndPathFromUrl(source)
# Replace the server with advanced configuration host
server = CONFIGS.GetStr("DATABASE_HOST")
target = common.utils.NormalizeTargetPath(target)
base_url = "%s/cgi-bin/globe_cutter_app.py" % server
url = "%s?cmd=POI_IDS&target=%s" % (base_url, target)
self.Status("Querying search poi ids: target=%s" % target)
poi_list = None
try:
data, http_status_code = self.HttpGet(url)
if http_status_code == 200:
poi_list = data.strip()
except Exception as e:
raise Exception("Request failed: cannot connect to server: {0}".format(e))
if poi_list:
# Quote polygon parameter for URI.
polygon_quoted = ""
if polygon:
polygon_quoted = urllib.quote(polygon)
poi_ids = poi_list.split(" ")
for poi_id in poi_ids:
url = ("%s?cmd=SEARCH_FILE&poi_id=%s&polygon=%s" %
(base_url, poi_id, polygon_quoted))
search_file = "%s/gepoi_%s" % (self.search_dir, poi_id)
try:
self.Status("Querying search poi data: poi_id=%s, polygon=%s" %
(poi_id, polygon))
data, http_status_code = self.HttpGet(url)
if http_status_code == 200:
self.Status("Copying search poi data: gepoi_%s to globe" % poi_id)
with open(search_file, "w") as fpw:
fpw.write(data.strip())
fpw.write("\n")
else:
self.StatusWarning(fp.read())
fp.close()
except IOError as e:
self.StatusWarning(
"Unable to write search file: %s. Error: %s" % (search_file, e))
except Exception as e:
self.StatusWarning("Unable to get search data: gepoi_%s. Error: %s" %
(poi_id, e))
else:
self.Status("No search data.")
|
[
"def",
"BuildSearchDb",
"(",
"self",
",",
"source",
",",
"polygon",
")",
":",
"self",
".",
"Status",
"(",
"\"Extract search data ...\"",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"search_dir",
")",
"except",
"os",
".",
"error",
":",
"pass",
"# Directory may already exist",
"# Determine the server and target path (fusion db publish point) from",
"# the source.",
"target",
"=",
"\"\"",
"server",
"=",
"\"\"",
"if",
"source",
":",
"server",
",",
"target",
"=",
"common",
".",
"utils",
".",
"GetServerAndPathFromUrl",
"(",
"source",
")",
"# Replace the server with advanced configuration host",
"server",
"=",
"CONFIGS",
".",
"GetStr",
"(",
"\"DATABASE_HOST\"",
")",
"target",
"=",
"common",
".",
"utils",
".",
"NormalizeTargetPath",
"(",
"target",
")",
"base_url",
"=",
"\"%s/cgi-bin/globe_cutter_app.py\"",
"%",
"server",
"url",
"=",
"\"%s?cmd=POI_IDS&target=%s\"",
"%",
"(",
"base_url",
",",
"target",
")",
"self",
".",
"Status",
"(",
"\"Querying search poi ids: target=%s\"",
"%",
"target",
")",
"poi_list",
"=",
"None",
"try",
":",
"data",
",",
"http_status_code",
"=",
"self",
".",
"HttpGet",
"(",
"url",
")",
"if",
"http_status_code",
"==",
"200",
":",
"poi_list",
"=",
"data",
".",
"strip",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"\"Request failed: cannot connect to server: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"if",
"poi_list",
":",
"# Quote polygon parameter for URI.",
"polygon_quoted",
"=",
"\"\"",
"if",
"polygon",
":",
"polygon_quoted",
"=",
"urllib",
".",
"quote",
"(",
"polygon",
")",
"poi_ids",
"=",
"poi_list",
".",
"split",
"(",
"\" \"",
")",
"for",
"poi_id",
"in",
"poi_ids",
":",
"url",
"=",
"(",
"\"%s?cmd=SEARCH_FILE&poi_id=%s&polygon=%s\"",
"%",
"(",
"base_url",
",",
"poi_id",
",",
"polygon_quoted",
")",
")",
"search_file",
"=",
"\"%s/gepoi_%s\"",
"%",
"(",
"self",
".",
"search_dir",
",",
"poi_id",
")",
"try",
":",
"self",
".",
"Status",
"(",
"\"Querying search poi data: poi_id=%s, polygon=%s\"",
"%",
"(",
"poi_id",
",",
"polygon",
")",
")",
"data",
",",
"http_status_code",
"=",
"self",
".",
"HttpGet",
"(",
"url",
")",
"if",
"http_status_code",
"==",
"200",
":",
"self",
".",
"Status",
"(",
"\"Copying search poi data: gepoi_%s to globe\"",
"%",
"poi_id",
")",
"with",
"open",
"(",
"search_file",
",",
"\"w\"",
")",
"as",
"fpw",
":",
"fpw",
".",
"write",
"(",
"data",
".",
"strip",
"(",
")",
")",
"fpw",
".",
"write",
"(",
"\"\\n\"",
")",
"else",
":",
"self",
".",
"StatusWarning",
"(",
"fp",
".",
"read",
"(",
")",
")",
"fp",
".",
"close",
"(",
")",
"except",
"IOError",
"as",
"e",
":",
"self",
".",
"StatusWarning",
"(",
"\"Unable to write search file: %s. Error: %s\"",
"%",
"(",
"search_file",
",",
"e",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"StatusWarning",
"(",
"\"Unable to get search data: gepoi_%s. Error: %s\"",
"%",
"(",
"poi_id",
",",
"e",
")",
")",
"else",
":",
"self",
".",
"Status",
"(",
"\"No search data.\"",
")"
] |
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/globe_cutter_app.py#L520-L585
|
||
hpi-xnor/BMXNet-v2
|
af2b1859eafc5c721b1397cef02f946aaf2ce20d
|
python/mxnet/model.py
|
python
|
_create_kvstore
|
(kvstore, num_device, arg_params)
|
return (kv, update_on_kvstore)
|
Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
|
Create kvstore
This function select and create a proper kvstore if given the kvstore type.
|
[
"Create",
"kvstore",
"This",
"function",
"select",
"and",
"create",
"a",
"proper",
"kvstore",
"if",
"given",
"the",
"kvstore",
"type",
"."
] |
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
|
[
"def",
"_create_kvstore",
"(",
"kvstore",
",",
"num_device",
",",
"arg_params",
")",
":",
"update_on_kvstore",
"=",
"bool",
"(",
"int",
"(",
"os",
".",
"getenv",
"(",
"'MXNET_UPDATE_ON_KVSTORE'",
",",
"\"1\"",
")",
")",
")",
"if",
"kvstore",
"is",
"None",
":",
"kv",
"=",
"None",
"elif",
"isinstance",
"(",
"kvstore",
",",
"kvs",
".",
"KVStore",
")",
":",
"kv",
"=",
"kvstore",
"elif",
"isinstance",
"(",
"kvstore",
",",
"str",
")",
":",
"# create kvstore using the string type",
"if",
"num_device",
"==",
"1",
"and",
"'dist'",
"not",
"in",
"kvstore",
":",
"# no need to use kv for single device and single machine",
"kv",
"=",
"None",
"else",
":",
"kv",
"=",
"kvs",
".",
"create",
"(",
"kvstore",
")",
"if",
"kvstore",
"==",
"'local'",
":",
"# automatically select a proper local",
"max_size",
"=",
"max",
"(",
"np",
".",
"prod",
"(",
"param",
".",
"shape",
")",
"for",
"param",
"in",
"arg_params",
".",
"values",
"(",
")",
")",
"if",
"max_size",
">",
"1024",
"*",
"1024",
"*",
"16",
":",
"update_on_kvstore",
"=",
"False",
"else",
":",
"raise",
"TypeError",
"(",
"'kvstore must be KVStore, str or None'",
")",
"if",
"kv",
"is",
"None",
":",
"update_on_kvstore",
"=",
"False",
"return",
"(",
"kv",
",",
"update_on_kvstore",
")"
] |
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/model.py#L82-L119
|
|
msftguy/ssh-rd
|
a5f3a79daeac5844edebf01916c9613563f1c390
|
_3rd/boost_1_48_0/tools/build/v2/tools/common.py
|
python
|
prepend_path_variable_command
|
(variable, paths)
|
return path_variable_setting_command(variable,
paths + os.environ.get(variable, "").split(os.pathsep))
|
Returns a command that prepends the given paths to the named path variable on
the current platform.
|
Returns a command that prepends the given paths to the named path variable on
the current platform.
|
[
"Returns",
"a",
"command",
"that",
"prepends",
"the",
"given",
"paths",
"to",
"the",
"named",
"path",
"variable",
"on",
"the",
"current",
"platform",
"."
] |
def prepend_path_variable_command(variable, paths):
"""
Returns a command that prepends the given paths to the named path variable on
the current platform.
"""
return path_variable_setting_command(variable,
paths + os.environ.get(variable, "").split(os.pathsep))
|
[
"def",
"prepend_path_variable_command",
"(",
"variable",
",",
"paths",
")",
":",
"return",
"path_variable_setting_command",
"(",
"variable",
",",
"paths",
"+",
"os",
".",
"environ",
".",
"get",
"(",
"variable",
",",
"\"\"",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
")"
] |
https://github.com/msftguy/ssh-rd/blob/a5f3a79daeac5844edebf01916c9613563f1c390/_3rd/boost_1_48_0/tools/build/v2/tools/common.py#L524-L530
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/io/pytables.py
|
python
|
_set_tz
|
(
values: Union[np.ndarray, Index],
tz: Optional[Union[str, tzinfo]],
coerce: bool = False,
)
|
return values
|
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
|
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
|
[
"coerce",
"the",
"values",
"to",
"a",
"DatetimeIndex",
"if",
"tz",
"is",
"set",
"preserve",
"the",
"input",
"shape",
"if",
"possible"
] |
def _set_tz(
values: Union[np.ndarray, Index],
tz: Optional[Union[str, tzinfo]],
coerce: bool = False,
) -> Union[np.ndarray, DatetimeIndex]:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if isinstance(values, DatetimeIndex):
# If values is tzaware, the tz gets dropped in the values.ravel()
# call below (which returns an ndarray). So we are only non-lossy
# if `tz` matches `values.tz`.
assert values.tz is None or values.tz == tz
if tz is not None:
name = getattr(values, "name", None)
values = values.ravel()
tz = timezones.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
return values
|
[
"def",
"_set_tz",
"(",
"values",
":",
"Union",
"[",
"np",
".",
"ndarray",
",",
"Index",
"]",
",",
"tz",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"tzinfo",
"]",
"]",
",",
"coerce",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Union",
"[",
"np",
".",
"ndarray",
",",
"DatetimeIndex",
"]",
":",
"if",
"isinstance",
"(",
"values",
",",
"DatetimeIndex",
")",
":",
"# If values is tzaware, the tz gets dropped in the values.ravel()",
"# call below (which returns an ndarray). So we are only non-lossy",
"# if `tz` matches `values.tz`.",
"assert",
"values",
".",
"tz",
"is",
"None",
"or",
"values",
".",
"tz",
"==",
"tz",
"if",
"tz",
"is",
"not",
"None",
":",
"name",
"=",
"getattr",
"(",
"values",
",",
"\"name\"",
",",
"None",
")",
"values",
"=",
"values",
".",
"ravel",
"(",
")",
"tz",
"=",
"timezones",
".",
"get_timezone",
"(",
"_ensure_decoded",
"(",
"tz",
")",
")",
"values",
"=",
"DatetimeIndex",
"(",
"values",
",",
"name",
"=",
"name",
")",
"values",
"=",
"values",
".",
"tz_localize",
"(",
"\"UTC\"",
")",
".",
"tz_convert",
"(",
"tz",
")",
"elif",
"coerce",
":",
"values",
"=",
"np",
".",
"asarray",
"(",
"values",
",",
"dtype",
"=",
"\"M8[ns]\"",
")",
"return",
"values"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/io/pytables.py#L4646-L4676
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/profiler/internal/flops_registry.py
|
python
|
_minimum_flops
|
(graph, node)
|
return _binary_per_element_op_flops(graph, node)
|
Compute flops for Minimum operation.
|
Compute flops for Minimum operation.
|
[
"Compute",
"flops",
"for",
"Minimum",
"operation",
"."
] |
def _minimum_flops(graph, node):
"""Compute flops for Minimum operation."""
return _binary_per_element_op_flops(graph, node)
|
[
"def",
"_minimum_flops",
"(",
"graph",
",",
"node",
")",
":",
"return",
"_binary_per_element_op_flops",
"(",
"graph",
",",
"node",
")"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/profiler/internal/flops_registry.py#L178-L180
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py3/scipy/spatial/kdtree.py
|
python
|
Rectangle.split
|
(self, d, split)
|
return less, greater
|
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
|
Produce two hyperrectangles by splitting.
|
[
"Produce",
"two",
"hyperrectangles",
"by",
"splitting",
"."
] |
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
|
[
"def",
"split",
"(",
"self",
",",
"d",
",",
"split",
")",
":",
"mid",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"maxes",
")",
"mid",
"[",
"d",
"]",
"=",
"split",
"less",
"=",
"Rectangle",
"(",
"self",
".",
"mins",
",",
"mid",
")",
"mid",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"mins",
")",
"mid",
"[",
"d",
"]",
"=",
"split",
"greater",
"=",
"Rectangle",
"(",
"mid",
",",
"self",
".",
"maxes",
")",
"return",
"less",
",",
"greater"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/spatial/kdtree.py#L95-L117
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py3/scipy/signal/bsplines.py
|
python
|
qspline1d_eval
|
(cj, newx, dx=1.0, x0=0)
|
return res
|
Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
|
Evaluate a quadratic spline at the new set of points.
|
[
"Evaluate",
"a",
"quadratic",
"spline",
"at",
"the",
"new",
"set",
"of",
"points",
"."
] |
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
|
[
"def",
"qspline1d_eval",
"(",
"cj",
",",
"newx",
",",
"dx",
"=",
"1.0",
",",
"x0",
"=",
"0",
")",
":",
"newx",
"=",
"(",
"asarray",
"(",
"newx",
")",
"-",
"x0",
")",
"/",
"dx",
"res",
"=",
"zeros_like",
"(",
"newx",
")",
"if",
"res",
".",
"size",
"==",
"0",
":",
"return",
"res",
"N",
"=",
"len",
"(",
"cj",
")",
"cond1",
"=",
"newx",
"<",
"0",
"cond2",
"=",
"newx",
">",
"(",
"N",
"-",
"1",
")",
"cond3",
"=",
"~",
"(",
"cond1",
"|",
"cond2",
")",
"# handle general mirror-symmetry",
"res",
"[",
"cond1",
"]",
"=",
"qspline1d_eval",
"(",
"cj",
",",
"-",
"newx",
"[",
"cond1",
"]",
")",
"res",
"[",
"cond2",
"]",
"=",
"qspline1d_eval",
"(",
"cj",
",",
"2",
"*",
"(",
"N",
"-",
"1",
")",
"-",
"newx",
"[",
"cond2",
"]",
")",
"newx",
"=",
"newx",
"[",
"cond3",
"]",
"if",
"newx",
".",
"size",
"==",
"0",
":",
"return",
"res",
"result",
"=",
"zeros_like",
"(",
"newx",
")",
"jlower",
"=",
"floor",
"(",
"newx",
"-",
"1.5",
")",
".",
"astype",
"(",
"int",
")",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"thisj",
"=",
"jlower",
"+",
"i",
"indj",
"=",
"thisj",
".",
"clip",
"(",
"0",
",",
"N",
"-",
"1",
")",
"# handle edge cases",
"result",
"+=",
"cj",
"[",
"indj",
"]",
"*",
"quadratic",
"(",
"newx",
"-",
"thisj",
")",
"res",
"[",
"cond3",
"]",
"=",
"result",
"return",
"res"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/signal/bsplines.py#L361-L394
|
|
eventql/eventql
|
7ca0dbb2e683b525620ea30dc40540a22d5eb227
|
deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/mozjar.py
|
python
|
JarReader.is_optimized
|
(self)
|
return self._cdir_end['cdir_offset'] == \
JarStruct.TYPE_MAPPING['uint32'][1]
|
Return whether the jar archive is optimized.
|
Return whether the jar archive is optimized.
|
[
"Return",
"whether",
"the",
"jar",
"archive",
"is",
"optimized",
"."
] |
def is_optimized(self):
'''
Return whether the jar archive is optimized.
'''
# In optimized jars, the central directory is at the beginning of the
# file, after a single 32-bits value, which is the length of data
# preloaded.
return self._cdir_end['cdir_offset'] == \
JarStruct.TYPE_MAPPING['uint32'][1]
|
[
"def",
"is_optimized",
"(",
"self",
")",
":",
"# In optimized jars, the central directory is at the beginning of the",
"# file, after a single 32-bits value, which is the length of data",
"# preloaded.",
"return",
"self",
".",
"_cdir_end",
"[",
"'cdir_offset'",
"]",
"==",
"JarStruct",
".",
"TYPE_MAPPING",
"[",
"'uint32'",
"]",
"[",
"1",
"]"
] |
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/mozjar.py#L395-L403
|
|
kungfu-origin/kungfu
|
90c84b2b590855654cb9a6395ed050e0f7763512
|
core/deps/SQLiteCpp-2.3.0/cpplint.py
|
python
|
ProcessFile
|
(filename, vlevel, extra_check_functions=[])
|
Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
|
Does google-lint on a single file.
|
[
"Does",
"google",
"-",
"lint",
"on",
"a",
"single",
"file",
"."
] |
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
# SRombauts: do not complain for directory in the file list
#sys.stderr.write(
# "Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
valid_extensions = ['cc', 'h', 'cpp', 'cu', 'cuh']
if filename != '-' and file_extension not in valid_extensions:
# SRombauts: do not complain for non C++ files
#sys.stderr.write('cpplint:0: Ignoring %s; not a .cc or .h file\n' % filename)
None
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found:
# SRombauts: never use Windows endline
# Outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 5,
'One or more carriage-return \\r (^M) (Windows endline) found; '
'Use only UNIX endline \\n')
|
[
"def",
"ProcessFile",
"(",
"filename",
",",
"vlevel",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"_SetVerboseLevel",
"(",
"vlevel",
")",
"try",
":",
"# Support the UNIX convention of using \"-\" for stdin. Note that",
"# we are not opening the file with universal newline support",
"# (which codecs doesn't support anyway), so the resulting lines do",
"# contain trailing '\\r' characters if we are reading a file that",
"# has CRLF endings.",
"# If after the split a trailing '\\r' is present, it is removed",
"# below. If it is not expected to be present (i.e. os.linesep !=",
"# '\\r\\n' as in Windows), a warning is issued below if this file",
"# is processed.",
"if",
"filename",
"==",
"'-'",
":",
"lines",
"=",
"codecs",
".",
"StreamReaderWriter",
"(",
"sys",
".",
"stdin",
",",
"codecs",
".",
"getreader",
"(",
"'utf8'",
")",
",",
"codecs",
".",
"getwriter",
"(",
"'utf8'",
")",
",",
"'replace'",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"else",
":",
"lines",
"=",
"codecs",
".",
"open",
"(",
"filename",
",",
"'r'",
",",
"'utf8'",
",",
"'replace'",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"carriage_return_found",
"=",
"False",
"# Remove trailing '\\r'.",
"for",
"linenum",
"in",
"range",
"(",
"len",
"(",
"lines",
")",
")",
":",
"if",
"lines",
"[",
"linenum",
"]",
".",
"endswith",
"(",
"'\\r'",
")",
":",
"lines",
"[",
"linenum",
"]",
"=",
"lines",
"[",
"linenum",
"]",
".",
"rstrip",
"(",
"'\\r'",
")",
"carriage_return_found",
"=",
"True",
"except",
"IOError",
":",
"# SRombauts: do not complain for directory in the file list",
"#sys.stderr.write(",
"# \"Skipping input '%s': Can't open for reading\\n\" % filename)",
"return",
"# Note, if no dot is found, this will give the entire filename as the ext.",
"file_extension",
"=",
"filename",
"[",
"filename",
".",
"rfind",
"(",
"'.'",
")",
"+",
"1",
":",
"]",
"# When reading from stdin, the extension is unknown, so no cpplint tests",
"# should rely on the extension.",
"valid_extensions",
"=",
"[",
"'cc'",
",",
"'h'",
",",
"'cpp'",
",",
"'cu'",
",",
"'cuh'",
"]",
"if",
"filename",
"!=",
"'-'",
"and",
"file_extension",
"not",
"in",
"valid_extensions",
":",
"# SRombauts: do not complain for non C++ files",
"#sys.stderr.write('cpplint:0: Ignoring %s; not a .cc or .h file\\n' % filename)",
"None",
"else",
":",
"ProcessFileData",
"(",
"filename",
",",
"file_extension",
",",
"lines",
",",
"Error",
",",
"extra_check_functions",
")",
"if",
"carriage_return_found",
":",
"# SRombauts: never use Windows endline",
"# Outputting only one error for potentially",
"# several lines.",
"Error",
"(",
"filename",
",",
"0",
",",
"'whitespace/newline'",
",",
"5",
",",
"'One or more carriage-return \\\\r (^M) (Windows endline) found; '",
"'Use only UNIX endline \\\\n'",
")"
] |
https://github.com/kungfu-origin/kungfu/blob/90c84b2b590855654cb9a6395ed050e0f7763512/core/deps/SQLiteCpp-2.3.0/cpplint.py#L4628-L4695
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_gdi.py
|
python
|
FontList.FindOrCreateFont
|
(*args, **kwargs)
|
return _gdi_.FontList_FindOrCreateFont(*args, **kwargs)
|
FindOrCreateFont(self, int point_size, int family, int style, int weight,
bool underline=False, String facename=EmptyString,
int encoding=FONTENCODING_DEFAULT) -> Font
|
FindOrCreateFont(self, int point_size, int family, int style, int weight,
bool underline=False, String facename=EmptyString,
int encoding=FONTENCODING_DEFAULT) -> Font
|
[
"FindOrCreateFont",
"(",
"self",
"int",
"point_size",
"int",
"family",
"int",
"style",
"int",
"weight",
"bool",
"underline",
"=",
"False",
"String",
"facename",
"=",
"EmptyString",
"int",
"encoding",
"=",
"FONTENCODING_DEFAULT",
")",
"-",
">",
"Font"
] |
def FindOrCreateFont(*args, **kwargs):
"""
FindOrCreateFont(self, int point_size, int family, int style, int weight,
bool underline=False, String facename=EmptyString,
int encoding=FONTENCODING_DEFAULT) -> Font
"""
return _gdi_.FontList_FindOrCreateFont(*args, **kwargs)
|
[
"def",
"FindOrCreateFont",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"FontList_FindOrCreateFont",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L7063-L7069
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_core.py
|
python
|
ImageHandler.GetType
|
(*args, **kwargs)
|
return _core_.ImageHandler_GetType(*args, **kwargs)
|
GetType(self) -> int
|
GetType(self) -> int
|
[
"GetType",
"(",
"self",
")",
"-",
">",
"int"
] |
def GetType(*args, **kwargs):
"""GetType(self) -> int"""
return _core_.ImageHandler_GetType(*args, **kwargs)
|
[
"def",
"GetType",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ImageHandler_GetType",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L2632-L2634
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/html5lib-python/html5lib/html5parser.py
|
python
|
HTMLParser.normalizeToken
|
(self, token)
|
return token
|
HTML5 specific normalizations to the token stream
|
HTML5 specific normalizations to the token stream
|
[
"HTML5",
"specific",
"normalizations",
"to",
"the",
"token",
"stream"
] |
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
|
[
"def",
"normalizeToken",
"(",
"self",
",",
"token",
")",
":",
"if",
"token",
"[",
"\"type\"",
"]",
"==",
"tokenTypes",
"[",
"\"StartTag\"",
"]",
":",
"token",
"[",
"\"data\"",
"]",
"=",
"dict",
"(",
"token",
"[",
"\"data\"",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"return",
"token"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/html5lib-python/html5lib/html5parser.py#L262-L268
|
|
interpretml/interpret
|
29466bffc04505fe4f836a83fcfebfd313ac8454
|
python/interpret-core/interpret/glassbox/linear.py
|
python
|
LogisticRegression.fit
|
(self, X, y)
|
return super().fit(X, y)
|
Fits model to provided instances.
Args:
X: Numpy array for training instances.
y: Numpy array as training labels.
Returns:
Itself.
|
Fits model to provided instances.
|
[
"Fits",
"model",
"to",
"provided",
"instances",
"."
] |
def fit(self, X, y):
""" Fits model to provided instances.
Args:
X: Numpy array for training instances.
y: Numpy array as training labels.
Returns:
Itself.
"""
self.sk_model_ = self.linear_class(**self.kwargs)
return super().fit(X, y)
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"self",
".",
"sk_model_",
"=",
"self",
".",
"linear_class",
"(",
"*",
"*",
"self",
".",
"kwargs",
")",
"return",
"super",
"(",
")",
".",
"fit",
"(",
"X",
",",
"y",
")"
] |
https://github.com/interpretml/interpret/blob/29466bffc04505fe4f836a83fcfebfd313ac8454/python/interpret-core/interpret/glassbox/linear.py#L414-L425
|
|
adobe/brackets-app
|
26dc31087a30bef7754b94e09fe7e74f6da9d699
|
src/mac/tools/change_mach_o_flags.py
|
python
|
HandleFatFile
|
(file, options, fat_offset=0)
|
Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each.
|
Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each.
|
[
"Seeks",
"the",
"file",
"-",
"like",
"|file|",
"object",
"to",
"|offset|",
"and",
"loops",
"over",
"its",
"|fat_header|",
"entries",
"calling",
"HandleMachOFile",
"for",
"each",
"."
] |
def HandleFatFile(file, options, fat_offset=0):
"""Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each."""
CheckedSeek(file, fat_offset)
magic = ReadUInt32(file, '>')
assert magic == FAT_MAGIC
nfat_arch = ReadUInt32(file, '>')
for index in xrange(0, nfat_arch):
cputype, cpusubtype, offset, size, align = ReadFatArch(file)
assert size >= 28
# HandleMachOFile will seek around. Come back here after calling it, in
# case it sought.
fat_arch_offset = file.tell()
HandleMachOFile(file, options, offset)
CheckedSeek(file, fat_arch_offset)
|
[
"def",
"HandleFatFile",
"(",
"file",
",",
"options",
",",
"fat_offset",
"=",
"0",
")",
":",
"CheckedSeek",
"(",
"file",
",",
"fat_offset",
")",
"magic",
"=",
"ReadUInt32",
"(",
"file",
",",
"'>'",
")",
"assert",
"magic",
"==",
"FAT_MAGIC",
"nfat_arch",
"=",
"ReadUInt32",
"(",
"file",
",",
"'>'",
")",
"for",
"index",
"in",
"xrange",
"(",
"0",
",",
"nfat_arch",
")",
":",
"cputype",
",",
"cpusubtype",
",",
"offset",
",",
"size",
",",
"align",
"=",
"ReadFatArch",
"(",
"file",
")",
"assert",
"size",
">=",
"28",
"# HandleMachOFile will seek around. Come back here after calling it, in",
"# case it sought.",
"fat_arch_offset",
"=",
"file",
".",
"tell",
"(",
")",
"HandleMachOFile",
"(",
"file",
",",
"options",
",",
"offset",
")",
"CheckedSeek",
"(",
"file",
",",
"fat_arch_offset",
")"
] |
https://github.com/adobe/brackets-app/blob/26dc31087a30bef7754b94e09fe7e74f6da9d699/src/mac/tools/change_mach_o_flags.py#L221-L239
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/smtplib.py
|
python
|
SMTP.getreply
|
(self)
|
return errcode, errmsg
|
Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
|
Get a reply from the server.
|
[
"Get",
"a",
"reply",
"from",
"the",
"server",
"."
] |
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if not line:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != b"-":
break
errmsg = b"\n".join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg))
return errcode, errmsg
|
[
"def",
"getreply",
"(",
"self",
")",
":",
"resp",
"=",
"[",
"]",
"if",
"self",
".",
"file",
"is",
"None",
":",
"self",
".",
"file",
"=",
"self",
".",
"sock",
".",
"makefile",
"(",
"'rb'",
")",
"while",
"1",
":",
"try",
":",
"line",
"=",
"self",
".",
"file",
".",
"readline",
"(",
"_MAXLINE",
"+",
"1",
")",
"except",
"OSError",
"as",
"e",
":",
"self",
".",
"close",
"(",
")",
"raise",
"SMTPServerDisconnected",
"(",
"\"Connection unexpectedly closed: \"",
"+",
"str",
"(",
"e",
")",
")",
"if",
"not",
"line",
":",
"self",
".",
"close",
"(",
")",
"raise",
"SMTPServerDisconnected",
"(",
"\"Connection unexpectedly closed\"",
")",
"if",
"self",
".",
"debuglevel",
">",
"0",
":",
"self",
".",
"_print_debug",
"(",
"'reply:'",
",",
"repr",
"(",
"line",
")",
")",
"if",
"len",
"(",
"line",
")",
">",
"_MAXLINE",
":",
"self",
".",
"close",
"(",
")",
"raise",
"SMTPResponseException",
"(",
"500",
",",
"\"Line too long.\"",
")",
"resp",
".",
"append",
"(",
"line",
"[",
"4",
":",
"]",
".",
"strip",
"(",
"b' \\t\\r\\n'",
")",
")",
"code",
"=",
"line",
"[",
":",
"3",
"]",
"# Check that the error code is syntactically correct.",
"# Don't attempt to read a continuation line if it is broken.",
"try",
":",
"errcode",
"=",
"int",
"(",
"code",
")",
"except",
"ValueError",
":",
"errcode",
"=",
"-",
"1",
"break",
"# Check if multiline response.",
"if",
"line",
"[",
"3",
":",
"4",
"]",
"!=",
"b\"-\"",
":",
"break",
"errmsg",
"=",
"b\"\\n\"",
".",
"join",
"(",
"resp",
")",
"if",
"self",
".",
"debuglevel",
">",
"0",
":",
"self",
".",
"_print_debug",
"(",
"'reply: retcode (%s); Msg: %a'",
"%",
"(",
"errcode",
",",
"errmsg",
")",
")",
"return",
"errcode",
",",
"errmsg"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/smtplib.py#L369-L416
|
|
klzgrad/naiveproxy
|
ed2c513637c77b18721fe428d7ed395b4d284c83
|
src/third_party/nasm/find_patches.py
|
python
|
write_patches_file
|
(origin_branch, output_file)
|
Write the patches file for |origin_branch| to |output_file|.
|
Write the patches file for |origin_branch| to |output_file|.
|
[
"Write",
"the",
"patches",
"file",
"for",
"|origin_branch|",
"to",
"|output_file|",
"."
] |
def write_patches_file(origin_branch, output_file):
"""Write the patches file for |origin_branch| to |output_file|."""
# Get the latest upstream commit that's reachable from the origin branch.
# We'll use that to compare against.
upstream = run(["git", "merge-base", "upstream/master",
origin_branch]).strip()
if not upstream:
raise Exception("Could not find upstream commit")
# "Everything reachable from |origin_branch| but not |upstream|". In other
# words, all and only chromium changes. Note that there are non-chromium
# authors here, since it will include cherry-picks to origin.
revision_range = "%s..%s" % (upstream, origin_branch)
log("Origin is %s" % origin_branch)
log("Upstream is %s" % upstream)
# Find diffs between the versions, excluding all files that are only on
# origin. We explicitly exclude .gitignore, since it exists in both places.
# Ask for no context, since we ignore it anyway.
diff = run([
"git", "diff", "--diff-filter=a", "-U0", revision_range, PATH,
":!.gitignore"
])
# Set of chromium patch sha1s we've seen.
sha1s = set()
# Map of sha1 to set of files that it affects.
sha1ToFiles = collections.defaultdict(set)
# Mapping of filename to set of lines that were deleted.
files_to_deleted_lines = {}
patch_info = PatchInfo()
filename = None
# Process each diff. Include a dummy line to flush out the last diff.
log("Scanning diffs between origin and upstream")
for line in diff.splitlines() + ["+++ just to handle deleted lines properly"]:
if line.startswith("+++"):
# If the previous patch was delete-only, then we need to search for it
# differently, since we don't get blame entries for deleted lines.
# Add the set of deleted lines to this filename.
deleted_lines = patch_info.interesting_deleted_lines()
if deleted_lines:
files_to_deleted_lines[filename] = deleted_lines
# Update to the new filename.
filename = line[6:]
log("Checking diffs in %s" % filename)
# Start of a new diff. We don't know if it inserts / deletes lines.
patch_info = PatchInfo()
elif line.startswith("@@"):
# @@ -linespec +linespec @@
# linespec is either "line_number,number_of_lines" or "line_number".
# Extract the "+linespec", which is what was added by |origin|.
# If the number of lines is specified as 0, then it's a deletion only.
# If the number of lines is unspecified, then it's 1.
added_linespec = re.sub(r"^.*\+(.*) @@.*", r"\1", line)
# Figure out the lines to blame. This is just "starting_line,+number".
if "," in added_linespec:
# linespec is "line_number,number_of_lines"
added_parts = added_linespec.split(",")
# Skip if this is a deletion.
if added_parts[1] == "0":
continue
blame_range = "%s,+%s" % (added_parts[0], added_parts[1])
else:
# One-line change
blame_range = "%s,+1" % added_linespec
blame = run([
"git", "blame", "-l",
"-L %s" % blame_range, revision_range, "--", filename
])
# Collect sha1 lines, and create a mapping of files that is changed by
# each sha1.
for blame_line in blame.splitlines():
sha1 = blame_line.split(" ", 1)[0]
if sha1:
sha1s.add(sha1)
sha1ToFiles[sha1].add(filename)
elif line.startswith("---"):
# Do nothing. Just avoid matching "---" when we check for "-"
pass
elif line.startswith("-"):
# This diff does delete lines.
patch_info.record_deleted_line(line[1:])
elif line.startswith("+"):
# This diff does insert lines.
patch_info.record_inserted_line(line[1:])
# For all files that have deleted lines, look for the sha1 that deleted them.
# This is heuristic only; we're looking for "commits that contain some text".
for filename, deleted_lines in files_to_deleted_lines.items():
for deleted_line in deleted_lines:
# Make sure that the deleted line is long enough to provide context.
if len(deleted_line) < 4:
continue
log("Checking for deleted lines in %s" % filename)
# Specify "--first-parent" so that we find commits on (presumably) origin.
sha1 = run([
"git", "log", "-1", revision_range, "--format=%H", "-S", deleted_line,
origin_branch, "--", filename
]).strip()
# Add the sha1 to the sets
sha1s.add(sha1)
sha1ToFiles[sha1].add(filename)
# Look up dates from sha1 hashes. We want to output them in a canonical order
# so that we can diff easier. Date order seems more convenient that sha1.
log("Looking up sha1 dates to sort them")
sha1_to_date = {}
for sha1 in sha1s:
date = run(["git", "log", "-1", "--format=%at", "%s" % sha1]).strip()
sha1_to_date[sha1] = date
# Print the patches file.
log("Writing patch file")
print(
"---------------------------------------------------------------------",
file=output_file)
print(
"-- Chromium Patches. Autogenerated by " + os.path.basename(__file__) +
", do not edit --",
file=output_file)
print(
"---------------------------------------------------------------------",
file=output_file)
print("\n", file=output_file)
wd = os.getcwd()
for sha1, date in sorted(sha1_to_date.iteritems(), key=lambda (k, v): v):
print(
"------------------------------------------------------------------",
file=output_file)
for line in run(["git", "log", "-1", "%s" % sha1]).splitlines():
print(line.rstrip(), file=output_file)
print("\nAffects:", file=output_file)
# TODO(liberato): maybe add the lines that were affected.
for file in sorted(sha1ToFiles[sha1]):
relfile = os.path.relpath(file, wd).replace('\\', '/')
print(" " + relfile, file=output_file)
print(file=output_file)
log("Done")
|
[
"def",
"write_patches_file",
"(",
"origin_branch",
",",
"output_file",
")",
":",
"# Get the latest upstream commit that's reachable from the origin branch.",
"# We'll use that to compare against.",
"upstream",
"=",
"run",
"(",
"[",
"\"git\"",
",",
"\"merge-base\"",
",",
"\"upstream/master\"",
",",
"origin_branch",
"]",
")",
".",
"strip",
"(",
")",
"if",
"not",
"upstream",
":",
"raise",
"Exception",
"(",
"\"Could not find upstream commit\"",
")",
"# \"Everything reachable from |origin_branch| but not |upstream|\". In other",
"# words, all and only chromium changes. Note that there are non-chromium",
"# authors here, since it will include cherry-picks to origin.",
"revision_range",
"=",
"\"%s..%s\"",
"%",
"(",
"upstream",
",",
"origin_branch",
")",
"log",
"(",
"\"Origin is %s\"",
"%",
"origin_branch",
")",
"log",
"(",
"\"Upstream is %s\"",
"%",
"upstream",
")",
"# Find diffs between the versions, excluding all files that are only on",
"# origin. We explicitly exclude .gitignore, since it exists in both places.",
"# Ask for no context, since we ignore it anyway.",
"diff",
"=",
"run",
"(",
"[",
"\"git\"",
",",
"\"diff\"",
",",
"\"--diff-filter=a\"",
",",
"\"-U0\"",
",",
"revision_range",
",",
"PATH",
",",
"\":!.gitignore\"",
"]",
")",
"# Set of chromium patch sha1s we've seen.",
"sha1s",
"=",
"set",
"(",
")",
"# Map of sha1 to set of files that it affects.",
"sha1ToFiles",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"# Mapping of filename to set of lines that were deleted.",
"files_to_deleted_lines",
"=",
"{",
"}",
"patch_info",
"=",
"PatchInfo",
"(",
")",
"filename",
"=",
"None",
"# Process each diff. Include a dummy line to flush out the last diff.",
"log",
"(",
"\"Scanning diffs between origin and upstream\"",
")",
"for",
"line",
"in",
"diff",
".",
"splitlines",
"(",
")",
"+",
"[",
"\"+++ just to handle deleted lines properly\"",
"]",
":",
"if",
"line",
".",
"startswith",
"(",
"\"+++\"",
")",
":",
"# If the previous patch was delete-only, then we need to search for it",
"# differently, since we don't get blame entries for deleted lines.",
"# Add the set of deleted lines to this filename.",
"deleted_lines",
"=",
"patch_info",
".",
"interesting_deleted_lines",
"(",
")",
"if",
"deleted_lines",
":",
"files_to_deleted_lines",
"[",
"filename",
"]",
"=",
"deleted_lines",
"# Update to the new filename.",
"filename",
"=",
"line",
"[",
"6",
":",
"]",
"log",
"(",
"\"Checking diffs in %s\"",
"%",
"filename",
")",
"# Start of a new diff. We don't know if it inserts / deletes lines.",
"patch_info",
"=",
"PatchInfo",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"\"@@\"",
")",
":",
"# @@ -linespec +linespec @@",
"# linespec is either \"line_number,number_of_lines\" or \"line_number\".",
"# Extract the \"+linespec\", which is what was added by |origin|.",
"# If the number of lines is specified as 0, then it's a deletion only.",
"# If the number of lines is unspecified, then it's 1.",
"added_linespec",
"=",
"re",
".",
"sub",
"(",
"r\"^.*\\+(.*) @@.*\"",
",",
"r\"\\1\"",
",",
"line",
")",
"# Figure out the lines to blame. This is just \"starting_line,+number\".",
"if",
"\",\"",
"in",
"added_linespec",
":",
"# linespec is \"line_number,number_of_lines\"",
"added_parts",
"=",
"added_linespec",
".",
"split",
"(",
"\",\"",
")",
"# Skip if this is a deletion.",
"if",
"added_parts",
"[",
"1",
"]",
"==",
"\"0\"",
":",
"continue",
"blame_range",
"=",
"\"%s,+%s\"",
"%",
"(",
"added_parts",
"[",
"0",
"]",
",",
"added_parts",
"[",
"1",
"]",
")",
"else",
":",
"# One-line change",
"blame_range",
"=",
"\"%s,+1\"",
"%",
"added_linespec",
"blame",
"=",
"run",
"(",
"[",
"\"git\"",
",",
"\"blame\"",
",",
"\"-l\"",
",",
"\"-L %s\"",
"%",
"blame_range",
",",
"revision_range",
",",
"\"--\"",
",",
"filename",
"]",
")",
"# Collect sha1 lines, and create a mapping of files that is changed by",
"# each sha1.",
"for",
"blame_line",
"in",
"blame",
".",
"splitlines",
"(",
")",
":",
"sha1",
"=",
"blame_line",
".",
"split",
"(",
"\" \"",
",",
"1",
")",
"[",
"0",
"]",
"if",
"sha1",
":",
"sha1s",
".",
"add",
"(",
"sha1",
")",
"sha1ToFiles",
"[",
"sha1",
"]",
".",
"add",
"(",
"filename",
")",
"elif",
"line",
".",
"startswith",
"(",
"\"---\"",
")",
":",
"# Do nothing. Just avoid matching \"---\" when we check for \"-\"",
"pass",
"elif",
"line",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"# This diff does delete lines.",
"patch_info",
".",
"record_deleted_line",
"(",
"line",
"[",
"1",
":",
"]",
")",
"elif",
"line",
".",
"startswith",
"(",
"\"+\"",
")",
":",
"# This diff does insert lines.",
"patch_info",
".",
"record_inserted_line",
"(",
"line",
"[",
"1",
":",
"]",
")",
"# For all files that have deleted lines, look for the sha1 that deleted them.",
"# This is heuristic only; we're looking for \"commits that contain some text\".",
"for",
"filename",
",",
"deleted_lines",
"in",
"files_to_deleted_lines",
".",
"items",
"(",
")",
":",
"for",
"deleted_line",
"in",
"deleted_lines",
":",
"# Make sure that the deleted line is long enough to provide context.",
"if",
"len",
"(",
"deleted_line",
")",
"<",
"4",
":",
"continue",
"log",
"(",
"\"Checking for deleted lines in %s\"",
"%",
"filename",
")",
"# Specify \"--first-parent\" so that we find commits on (presumably) origin.",
"sha1",
"=",
"run",
"(",
"[",
"\"git\"",
",",
"\"log\"",
",",
"\"-1\"",
",",
"revision_range",
",",
"\"--format=%H\"",
",",
"\"-S\"",
",",
"deleted_line",
",",
"origin_branch",
",",
"\"--\"",
",",
"filename",
"]",
")",
".",
"strip",
"(",
")",
"# Add the sha1 to the sets",
"sha1s",
".",
"add",
"(",
"sha1",
")",
"sha1ToFiles",
"[",
"sha1",
"]",
".",
"add",
"(",
"filename",
")",
"# Look up dates from sha1 hashes. We want to output them in a canonical order",
"# so that we can diff easier. Date order seems more convenient that sha1.",
"log",
"(",
"\"Looking up sha1 dates to sort them\"",
")",
"sha1_to_date",
"=",
"{",
"}",
"for",
"sha1",
"in",
"sha1s",
":",
"date",
"=",
"run",
"(",
"[",
"\"git\"",
",",
"\"log\"",
",",
"\"-1\"",
",",
"\"--format=%at\"",
",",
"\"%s\"",
"%",
"sha1",
"]",
")",
".",
"strip",
"(",
")",
"sha1_to_date",
"[",
"sha1",
"]",
"=",
"date",
"# Print the patches file.",
"log",
"(",
"\"Writing patch file\"",
")",
"print",
"(",
"\"---------------------------------------------------------------------\"",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"\"-- Chromium Patches. Autogenerated by \"",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"__file__",
")",
"+",
"\", do not edit --\"",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"\"---------------------------------------------------------------------\"",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"\"\\n\"",
",",
"file",
"=",
"output_file",
")",
"wd",
"=",
"os",
".",
"getcwd",
"(",
")",
"for",
"sha1",
",",
"date",
"in",
"sorted",
"(",
"sha1_to_date",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"lambda",
"(",
"k",
",",
"v",
")",
":",
"v",
")",
":",
"print",
"(",
"\"------------------------------------------------------------------\"",
",",
"file",
"=",
"output_file",
")",
"for",
"line",
"in",
"run",
"(",
"[",
"\"git\"",
",",
"\"log\"",
",",
"\"-1\"",
",",
"\"%s\"",
"%",
"sha1",
"]",
")",
".",
"splitlines",
"(",
")",
":",
"print",
"(",
"line",
".",
"rstrip",
"(",
")",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"\"\\nAffects:\"",
",",
"file",
"=",
"output_file",
")",
"# TODO(liberato): maybe add the lines that were affected.",
"for",
"file",
"in",
"sorted",
"(",
"sha1ToFiles",
"[",
"sha1",
"]",
")",
":",
"relfile",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"file",
",",
"wd",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"print",
"(",
"\" \"",
"+",
"relfile",
",",
"file",
"=",
"output_file",
")",
"print",
"(",
"file",
"=",
"output_file",
")",
"log",
"(",
"\"Done\"",
")"
] |
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/third_party/nasm/find_patches.py#L116-L262
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py2/scipy/_lib/decorator.py
|
python
|
decorator
|
(caller, _func=None)
|
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller)
|
decorator(caller) converts a caller function into a decorator
|
decorator(caller) converts a caller function into a decorator
|
[
"decorator",
"(",
"caller",
")",
"converts",
"a",
"caller",
"function",
"into",
"a",
"decorator"
] |
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
callerfunc = caller
doc = caller.__doc__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.__func__
doc = caller.__call__.__doc__
evaldict = callerfunc.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_decorate_'] = decorate
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller)
|
[
"def",
"decorator",
"(",
"caller",
",",
"_func",
"=",
"None",
")",
":",
"if",
"_func",
"is",
"not",
"None",
":",
"# return a decorated function",
"# this is obsolete behavior; you should use decorate instead",
"return",
"decorate",
"(",
"_func",
",",
"caller",
")",
"# else return a decorator function",
"if",
"inspect",
".",
"isclass",
"(",
"caller",
")",
":",
"name",
"=",
"caller",
".",
"__name__",
".",
"lower",
"(",
")",
"callerfunc",
"=",
"get_init",
"(",
"caller",
")",
"doc",
"=",
"'decorator(%s) converts functions/generators into '",
"'factories of %s objects'",
"%",
"(",
"caller",
".",
"__name__",
",",
"caller",
".",
"__name__",
")",
"elif",
"inspect",
".",
"isfunction",
"(",
"caller",
")",
":",
"if",
"caller",
".",
"__name__",
"==",
"'<lambda>'",
":",
"name",
"=",
"'_lambda_'",
"else",
":",
"name",
"=",
"caller",
".",
"__name__",
"callerfunc",
"=",
"caller",
"doc",
"=",
"caller",
".",
"__doc__",
"else",
":",
"# assume caller is an object with a __call__ method",
"name",
"=",
"caller",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
"callerfunc",
"=",
"caller",
".",
"__call__",
".",
"__func__",
"doc",
"=",
"caller",
".",
"__call__",
".",
"__doc__",
"evaldict",
"=",
"callerfunc",
".",
"__globals__",
".",
"copy",
"(",
")",
"evaldict",
"[",
"'_call_'",
"]",
"=",
"caller",
"evaldict",
"[",
"'_decorate_'",
"]",
"=",
"decorate",
"return",
"FunctionMaker",
".",
"create",
"(",
"'%s(func)'",
"%",
"name",
",",
"'return _decorate_(func, _call_)'",
",",
"evaldict",
",",
"doc",
"=",
"doc",
",",
"module",
"=",
"caller",
".",
"__module__",
",",
"__wrapped__",
"=",
"caller",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/_lib/decorator.py#L240-L268
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/mailbox.py
|
python
|
MHMessage.set_sequences
|
(self, sequences)
|
Set the list of sequences that include the message.
|
Set the list of sequences that include the message.
|
[
"Set",
"the",
"list",
"of",
"sequences",
"that",
"include",
"the",
"message",
"."
] |
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
|
[
"def",
"set_sequences",
"(",
"self",
",",
"sequences",
")",
":",
"self",
".",
"_sequences",
"=",
"list",
"(",
"sequences",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/mailbox.py#L1715-L1717
|
||
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/contrib/imperative/imperative_mode.py
|
python
|
ImperativeMode.__init__
|
(self, target, parent_graph=None)
|
Initializes an ImperativeMode.
Args:
target: The TensorFlow execution engine to connect to.
parent_graph: (Optional) An ImperativeGraph.
Raises:
UnimplementedError: if non-None parent_graph is not an ImperativeGraph.
|
Initializes an ImperativeMode.
|
[
"Initializes",
"an",
"ImperativeMode",
"."
] |
def __init__(self, target, parent_graph=None):
"""Initializes an ImperativeMode.
Args:
target: The TensorFlow execution engine to connect to.
parent_graph: (Optional) An ImperativeGraph.
Raises:
UnimplementedError: if non-None parent_graph is not an ImperativeGraph.
"""
self._target = target
self._parent_graph = parent_graph
# Create a new graph
self._graph = imperative_graph.ImperativeGraph(
parent_graph=self._parent_graph)
self._default_graph = self._graph.as_default()
# Context manager to record variable inits
self._record_variable_inits = self._graph.record_variable_inits()
if self._parent_graph:
if not isinstance(self._parent_graph, imperative_graph.ImperativeGraph):
raise errors.UnimplementedError(None, None, 'ImperativeMode needs an '
'ImperativeGraph')
# Clone the `_parent_graph` in to the current graph. This is so that
# operations used from the enclosing ImperativeMode context are
# available in the current context.
with self._graph.as_default(), self._graph.return_as_is():
importer.import_graph_def(self._parent_graph.as_graph_def(), name='')
self._session = session.Session(graph=self._graph, target=self._target)
# Override the `_session`'s run, so that variable inits can be
# called before the actual run.
self._old_run = self._session.run
self._session.run = self.run
self._context_managers = [
self._session.as_default(),
self._default_graph,
self._record_variable_inits,
imperative_graph.add_session_attr(ops.Tensor, self._session)]
|
[
"def",
"__init__",
"(",
"self",
",",
"target",
",",
"parent_graph",
"=",
"None",
")",
":",
"self",
".",
"_target",
"=",
"target",
"self",
".",
"_parent_graph",
"=",
"parent_graph",
"# Create a new graph",
"self",
".",
"_graph",
"=",
"imperative_graph",
".",
"ImperativeGraph",
"(",
"parent_graph",
"=",
"self",
".",
"_parent_graph",
")",
"self",
".",
"_default_graph",
"=",
"self",
".",
"_graph",
".",
"as_default",
"(",
")",
"# Context manager to record variable inits",
"self",
".",
"_record_variable_inits",
"=",
"self",
".",
"_graph",
".",
"record_variable_inits",
"(",
")",
"if",
"self",
".",
"_parent_graph",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_parent_graph",
",",
"imperative_graph",
".",
"ImperativeGraph",
")",
":",
"raise",
"errors",
".",
"UnimplementedError",
"(",
"None",
",",
"None",
",",
"'ImperativeMode needs an '",
"'ImperativeGraph'",
")",
"# Clone the `_parent_graph` in to the current graph. This is so that",
"# operations used from the enclosing ImperativeMode context are",
"# available in the current context.",
"with",
"self",
".",
"_graph",
".",
"as_default",
"(",
")",
",",
"self",
".",
"_graph",
".",
"return_as_is",
"(",
")",
":",
"importer",
".",
"import_graph_def",
"(",
"self",
".",
"_parent_graph",
".",
"as_graph_def",
"(",
")",
",",
"name",
"=",
"''",
")",
"self",
".",
"_session",
"=",
"session",
".",
"Session",
"(",
"graph",
"=",
"self",
".",
"_graph",
",",
"target",
"=",
"self",
".",
"_target",
")",
"# Override the `_session`'s run, so that variable inits can be",
"# called before the actual run.",
"self",
".",
"_old_run",
"=",
"self",
".",
"_session",
".",
"run",
"self",
".",
"_session",
".",
"run",
"=",
"self",
".",
"run",
"self",
".",
"_context_managers",
"=",
"[",
"self",
".",
"_session",
".",
"as_default",
"(",
")",
",",
"self",
".",
"_default_graph",
",",
"self",
".",
"_record_variable_inits",
",",
"imperative_graph",
".",
"add_session_attr",
"(",
"ops",
".",
"Tensor",
",",
"self",
".",
"_session",
")",
"]"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/imperative/imperative_mode.py#L78-L114
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/prompt-toolkit/py3/prompt_toolkit/key_binding/bindings/named_commands.py
|
python
|
end_of_line
|
(event: E)
|
Move to the end of the line.
|
Move to the end of the line.
|
[
"Move",
"to",
"the",
"end",
"of",
"the",
"line",
"."
] |
def end_of_line(event: E) -> None:
"""
Move to the end of the line.
"""
buff = event.current_buffer
buff.cursor_position += buff.document.get_end_of_line_position()
|
[
"def",
"end_of_line",
"(",
"event",
":",
"E",
")",
"->",
"None",
":",
"buff",
"=",
"event",
".",
"current_buffer",
"buff",
".",
"cursor_position",
"+=",
"buff",
".",
"document",
".",
"get_end_of_line_position",
"(",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/key_binding/bindings/named_commands.py#L98-L103
|
||
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/x86/toolchain/lib/python2.7/encodings/zlib_codec.py
|
python
|
zlib_encode
|
(input,errors='strict')
|
return (output, len(input))
|
Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
|
Encodes the object input and returns a tuple (output
object, length consumed).
|
[
"Encodes",
"the",
"object",
"input",
"and",
"returns",
"a",
"tuple",
"(",
"output",
"object",
"length",
"consumed",
")",
"."
] |
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
|
[
"def",
"zlib_encode",
"(",
"input",
",",
"errors",
"=",
"'strict'",
")",
":",
"assert",
"errors",
"==",
"'strict'",
"output",
"=",
"zlib",
".",
"compress",
"(",
"input",
")",
"return",
"(",
"output",
",",
"len",
"(",
"input",
")",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/encodings/zlib_codec.py#L14-L26
|
|
klzgrad/naiveproxy
|
ed2c513637c77b18721fe428d7ed395b4d284c83
|
src/tools/grit/grit/node/base.py
|
python
|
Node.SetAllowlistMarkedAsSkip
|
(self, mark_skipped)
|
Sets AllowlistMarkedAsSkip.
|
Sets AllowlistMarkedAsSkip.
|
[
"Sets",
"AllowlistMarkedAsSkip",
"."
] |
def SetAllowlistMarkedAsSkip(self, mark_skipped):
'''Sets AllowlistMarkedAsSkip.
'''
self._allowlist_marked_as_skip = mark_skipped
|
[
"def",
"SetAllowlistMarkedAsSkip",
"(",
"self",
",",
"mark_skipped",
")",
":",
"self",
".",
"_allowlist_marked_as_skip",
"=",
"mark_skipped"
] |
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/tools/grit/grit/node/base.py#L614-L617
|
||
apple/swift-lldb
|
d74be846ef3e62de946df343e8c234bde93a8912
|
scripts/Python/static-binding/lldb.py
|
python
|
SBEvent_GetCStringFromEvent
|
(event)
|
return _lldb.SBEvent_GetCStringFromEvent(event)
|
SBEvent_GetCStringFromEvent(SBEvent event) -> char const *
|
SBEvent_GetCStringFromEvent(SBEvent event) -> char const *
|
[
"SBEvent_GetCStringFromEvent",
"(",
"SBEvent",
"event",
")",
"-",
">",
"char",
"const",
"*"
] |
def SBEvent_GetCStringFromEvent(event):
"""SBEvent_GetCStringFromEvent(SBEvent event) -> char const *"""
return _lldb.SBEvent_GetCStringFromEvent(event)
|
[
"def",
"SBEvent_GetCStringFromEvent",
"(",
"event",
")",
":",
"return",
"_lldb",
".",
"SBEvent_GetCStringFromEvent",
"(",
"event",
")"
] |
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L4840-L4842
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_misc.py
|
python
|
VideoMode.IsOk
|
(*args, **kwargs)
|
return _misc_.VideoMode_IsOk(*args, **kwargs)
|
IsOk(self) -> bool
returns true if the object has been initialized
|
IsOk(self) -> bool
|
[
"IsOk",
"(",
"self",
")",
"-",
">",
"bool"
] |
def IsOk(*args, **kwargs):
"""
IsOk(self) -> bool
returns true if the object has been initialized
"""
return _misc_.VideoMode_IsOk(*args, **kwargs)
|
[
"def",
"IsOk",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"VideoMode_IsOk",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L6050-L6056
|
|
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
deps/src/libxml2-2.9.1/python/libxml2.py
|
python
|
nanoHTTPCleanup
|
()
|
Cleanup the HTTP protocol layer.
|
Cleanup the HTTP protocol layer.
|
[
"Cleanup",
"the",
"HTTP",
"protocol",
"layer",
"."
] |
def nanoHTTPCleanup():
"""Cleanup the HTTP protocol layer. """
libxml2mod.xmlNanoHTTPCleanup()
|
[
"def",
"nanoHTTPCleanup",
"(",
")",
":",
"libxml2mod",
".",
"xmlNanoHTTPCleanup",
"(",
")"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L1249-L1251
|
||
chromiumembedded/cef
|
80caf947f3fe2210e5344713c5281d8af9bdc295
|
tools/make_distrib.py
|
python
|
create_zip_archive
|
(input_dir)
|
Creates a zip archive of the specified input directory.
|
Creates a zip archive of the specified input directory.
|
[
"Creates",
"a",
"zip",
"archive",
"of",
"the",
"specified",
"input",
"directory",
"."
] |
def create_zip_archive(input_dir):
""" Creates a zip archive of the specified input directory. """
zip_file = input_dir + '.zip'
zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED, True)
def addDir(dir):
for f in os.listdir(dir):
full_path = os.path.join(dir, f)
if os.path.isdir(full_path):
addDir(full_path)
else:
zf.write(full_path, os.path.relpath(full_path, \
os.path.join(input_dir, os.pardir)))
addDir(input_dir)
zf.close()
|
[
"def",
"create_zip_archive",
"(",
"input_dir",
")",
":",
"zip_file",
"=",
"input_dir",
"+",
"'.zip'",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"zip_file",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
",",
"True",
")",
"def",
"addDir",
"(",
"dir",
")",
":",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"dir",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"f",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"full_path",
")",
":",
"addDir",
"(",
"full_path",
")",
"else",
":",
"zf",
".",
"write",
"(",
"full_path",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"full_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"input_dir",
",",
"os",
".",
"pardir",
")",
")",
")",
"addDir",
"(",
"input_dir",
")",
"zf",
".",
"close",
"(",
")"
] |
https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/make_distrib.py#L24-L39
|
||
oracle/graaljs
|
36a56e8e993d45fc40939a3a4d9c0c24990720f1
|
graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py
|
python
|
XcodeSettings.GetBundleResourceFolder
|
(self)
|
return os.path.join(self.GetBundleContentsFolderPath(), "Resources")
|
Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles.
|
Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles.
|
[
"Returns",
"the",
"qualified",
"path",
"to",
"the",
"bundle",
"s",
"resource",
"folder",
".",
"E",
".",
"g",
".",
"Chromium",
".",
"app",
"/",
"Contents",
"/",
"Resources",
".",
"Only",
"valid",
"for",
"bundles",
"."
] |
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), "Resources")
|
[
"def",
"GetBundleResourceFolder",
"(",
"self",
")",
":",
"assert",
"self",
".",
"_IsBundle",
"(",
")",
"if",
"self",
".",
"isIOS",
":",
"return",
"self",
".",
"GetBundleContentsFolderPath",
"(",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"GetBundleContentsFolderPath",
"(",
")",
",",
"\"Resources\"",
")"
] |
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py#L317-L323
|
|
Polidea/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
SymbolExtractorAndRenamer/llvm/utils/lit/lit/LitConfig.py
|
python
|
LitConfig.load_config
|
(self, config, path)
|
return config
|
load_config(config, path) - Load a config object from an alternate
path.
|
load_config(config, path) - Load a config object from an alternate
path.
|
[
"load_config",
"(",
"config",
"path",
")",
"-",
"Load",
"a",
"config",
"object",
"from",
"an",
"alternate",
"path",
"."
] |
def load_config(self, config, path):
"""load_config(config, path) - Load a config object from an alternate
path."""
if self.debug:
self.note('load_config from %r' % path)
config.load_from_path(path, self)
return config
|
[
"def",
"load_config",
"(",
"self",
",",
"config",
",",
"path",
")",
":",
"if",
"self",
".",
"debug",
":",
"self",
".",
"note",
"(",
"'load_config from %r'",
"%",
"path",
")",
"config",
".",
"load_from_path",
"(",
"path",
",",
"self",
")",
"return",
"config"
] |
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/llvm/utils/lit/lit/LitConfig.py#L97-L103
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/tools/docs/parser.py
|
python
|
ReferenceResolver._one_ref
|
(self, match, relative_path_to_root)
|
return 'BROKEN_LINK'
|
Return a link for a single "@{symbol}" reference.
|
Return a link for a single "
|
[
"Return",
"a",
"link",
"for",
"a",
"single"
] |
def _one_ref(self, match, relative_path_to_root):
"""Return a link for a single "@{symbol}" reference."""
string = match.group(1)
# Look for link text after $.
dollar = string.rfind('$')
if dollar > 0: # Ignore $ in first character
link_text = string[dollar + 1:]
string = string[:dollar]
manual_link_text = True
else:
link_text = string
manual_link_text = False
# Handle different types of references.
if string.startswith('$'): # Doc reference
return self._doc_link(
string, link_text, manual_link_text, relative_path_to_root)
elif string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(
string, link_text, manual_link_text, relative_path_to_root)
else:
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(link_text, string, relative_path_to_root,
code_ref=not manual_link_text)
# Error!
self.add_error('Did not understand "%s"' % match.group(0))
return 'BROKEN_LINK'
|
[
"def",
"_one_ref",
"(",
"self",
",",
"match",
",",
"relative_path_to_root",
")",
":",
"string",
"=",
"match",
".",
"group",
"(",
"1",
")",
"# Look for link text after $.",
"dollar",
"=",
"string",
".",
"rfind",
"(",
"'$'",
")",
"if",
"dollar",
">",
"0",
":",
"# Ignore $ in first character",
"link_text",
"=",
"string",
"[",
"dollar",
"+",
"1",
":",
"]",
"string",
"=",
"string",
"[",
":",
"dollar",
"]",
"manual_link_text",
"=",
"True",
"else",
":",
"link_text",
"=",
"string",
"manual_link_text",
"=",
"False",
"# Handle different types of references.",
"if",
"string",
".",
"startswith",
"(",
"'$'",
")",
":",
"# Doc reference",
"return",
"self",
".",
"_doc_link",
"(",
"string",
",",
"link_text",
",",
"manual_link_text",
",",
"relative_path_to_root",
")",
"elif",
"string",
".",
"startswith",
"(",
"'tensorflow::'",
")",
":",
"# C++ symbol",
"return",
"self",
".",
"_cc_link",
"(",
"string",
",",
"link_text",
",",
"manual_link_text",
",",
"relative_path_to_root",
")",
"else",
":",
"is_python",
"=",
"False",
"for",
"py_module_name",
"in",
"self",
".",
"_py_module_names",
":",
"if",
"string",
"==",
"py_module_name",
"or",
"string",
".",
"startswith",
"(",
"py_module_name",
"+",
"'.'",
")",
":",
"is_python",
"=",
"True",
"break",
"if",
"is_python",
":",
"# Python symbol",
"return",
"self",
".",
"python_link",
"(",
"link_text",
",",
"string",
",",
"relative_path_to_root",
",",
"code_ref",
"=",
"not",
"manual_link_text",
")",
"# Error!",
"self",
".",
"add_error",
"(",
"'Did not understand \"%s\"'",
"%",
"match",
".",
"group",
"(",
"0",
")",
")",
"return",
"'BROKEN_LINK'"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/tools/docs/parser.py#L309-L345
|
|
nasa/meshNetwork
|
ff4bd66e0ca6bd424fd8897a97252bb3925d8b3c
|
python/mesh/generic/meshController.py
|
python
|
MeshController.sendMsg
|
(self, destId, msg)
|
This function receives messages to be sent over the mesh network and queues them for transmission.
|
This function receives messages to be sent over the mesh network and queues them for transmission.
|
[
"This",
"function",
"receives",
"messages",
"to",
"be",
"sent",
"over",
"the",
"mesh",
"network",
"and",
"queues",
"them",
"for",
"transmission",
"."
] |
def sendMsg(self, destId, msg):
"""This function receives messages to be sent over the mesh network and queues them for transmission."""
# Place message in appropriate position in outgoing queue (broadcast messages are stored in the zero position)
if (len(msg) <= self.nodeParams.config.commConfig['msgPayloadMaxLength']): # message meets size requirements
self.comm.meshQueueIn.append(MeshTxMsg(destId, msg))
return True
else:
return False
|
[
"def",
"sendMsg",
"(",
"self",
",",
"destId",
",",
"msg",
")",
":",
"# Place message in appropriate position in outgoing queue (broadcast messages are stored in the zero position) ",
"if",
"(",
"len",
"(",
"msg",
")",
"<=",
"self",
".",
"nodeParams",
".",
"config",
".",
"commConfig",
"[",
"'msgPayloadMaxLength'",
"]",
")",
":",
"# message meets size requirements",
"self",
".",
"comm",
".",
"meshQueueIn",
".",
"append",
"(",
"MeshTxMsg",
"(",
"destId",
",",
"msg",
")",
")",
"return",
"True",
"else",
":",
"return",
"False"
] |
https://github.com/nasa/meshNetwork/blob/ff4bd66e0ca6bd424fd8897a97252bb3925d8b3c/python/mesh/generic/meshController.py#L298-L307
|
||
hfinkel/llvm-project-cxxjit
|
91084ef018240bbb8e24235ff5cd8c355a9c1a1e
|
clang/docs/tools/dump_ast_matchers.py
|
python
|
add_matcher
|
(result_type, name, args, comment, is_dyncast=False)
|
Adds a matcher to one of our categories.
|
Adds a matcher to one of our categories.
|
[
"Adds",
"a",
"matcher",
"to",
"one",
"of",
"our",
"categories",
"."
] |
def add_matcher(result_type, name, args, comment, is_dyncast=False):
"""Adds a matcher to one of our categories."""
if name == 'id':
# FIXME: Figure out whether we want to support the 'id' matcher.
return
matcher_id = '%s%d' % (name, ids[name])
ids[name] += 1
args = unify_arguments(args)
matcher_html = TD_TEMPLATE % {
'result': esc('Matcher<%s>' % result_type),
'name': name,
'args': esc(args),
'comment': esc(strip_doxygen(comment)),
'id': matcher_id,
}
if is_dyncast:
node_matchers[result_type + name] = matcher_html
# Use a heuristic to figure out whether a matcher is a narrowing or
# traversal matcher. By default, matchers that take other matchers as
# arguments (and are not node matchers) do traversal. We specifically
# exclude known narrowing matchers that also take other matchers as
# arguments.
elif ('Matcher<' not in args or
name in ['allOf', 'anyOf', 'anything', 'unless']):
narrowing_matchers[result_type + name + esc(args)] = matcher_html
else:
traversal_matchers[result_type + name + esc(args)] = matcher_html
|
[
"def",
"add_matcher",
"(",
"result_type",
",",
"name",
",",
"args",
",",
"comment",
",",
"is_dyncast",
"=",
"False",
")",
":",
"if",
"name",
"==",
"'id'",
":",
"# FIXME: Figure out whether we want to support the 'id' matcher.",
"return",
"matcher_id",
"=",
"'%s%d'",
"%",
"(",
"name",
",",
"ids",
"[",
"name",
"]",
")",
"ids",
"[",
"name",
"]",
"+=",
"1",
"args",
"=",
"unify_arguments",
"(",
"args",
")",
"matcher_html",
"=",
"TD_TEMPLATE",
"%",
"{",
"'result'",
":",
"esc",
"(",
"'Matcher<%s>'",
"%",
"result_type",
")",
",",
"'name'",
":",
"name",
",",
"'args'",
":",
"esc",
"(",
"args",
")",
",",
"'comment'",
":",
"esc",
"(",
"strip_doxygen",
"(",
"comment",
")",
")",
",",
"'id'",
":",
"matcher_id",
",",
"}",
"if",
"is_dyncast",
":",
"node_matchers",
"[",
"result_type",
"+",
"name",
"]",
"=",
"matcher_html",
"# Use a heuristic to figure out whether a matcher is a narrowing or",
"# traversal matcher. By default, matchers that take other matchers as",
"# arguments (and are not node matchers) do traversal. We specifically",
"# exclude known narrowing matchers that also take other matchers as",
"# arguments.",
"elif",
"(",
"'Matcher<'",
"not",
"in",
"args",
"or",
"name",
"in",
"[",
"'allOf'",
",",
"'anyOf'",
",",
"'anything'",
",",
"'unless'",
"]",
")",
":",
"narrowing_matchers",
"[",
"result_type",
"+",
"name",
"+",
"esc",
"(",
"args",
")",
"]",
"=",
"matcher_html",
"else",
":",
"traversal_matchers",
"[",
"result_type",
"+",
"name",
"+",
"esc",
"(",
"args",
")",
"]",
"=",
"matcher_html"
] |
https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/docs/tools/dump_ast_matchers.py#L106-L132
|
||
KLayout/klayout
|
d764adb1016f74d3e9cc8059cb183f5fc29b2a25
|
src/pymod/distutils_src/klayout/db/pcell_declaration_helper.py
|
python
|
_PCellDeclarationHelper.get_layers
|
(self, parameters)
|
return layers
|
get the layer definitions
|
get the layer definitions
|
[
"get",
"the",
"layer",
"definitions"
] |
def get_layers(self, parameters):
"""
get the layer definitions
"""
layers = []
for i in self._layer_param_index:
layers.append(parameters[i])
return layers
|
[
"def",
"get_layers",
"(",
"self",
",",
"parameters",
")",
":",
"layers",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
".",
"_layer_param_index",
":",
"layers",
".",
"append",
"(",
"parameters",
"[",
"i",
"]",
")",
"return",
"layers"
] |
https://github.com/KLayout/klayout/blob/d764adb1016f74d3e9cc8059cb183f5fc29b2a25/src/pymod/distutils_src/klayout/db/pcell_declaration_helper.py#L162-L169
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/richtext.py
|
python
|
RichTextCtrl.ForceDelayedLayout
|
(*args, **kwargs)
|
return _richtext.RichTextCtrl_ForceDelayedLayout(*args, **kwargs)
|
ForceDelayedLayout(self)
|
ForceDelayedLayout(self)
|
[
"ForceDelayedLayout",
"(",
"self",
")"
] |
def ForceDelayedLayout(*args, **kwargs):
"""ForceDelayedLayout(self)"""
return _richtext.RichTextCtrl_ForceDelayedLayout(*args, **kwargs)
|
[
"def",
"ForceDelayedLayout",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextCtrl_ForceDelayedLayout",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L2985-L2987
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python3/src/Lib/pyclbr.py
|
python
|
_getname
|
(g)
|
return (".".join(parts), token)
|
Return (dotted-name or None, next-token) tuple for token source g.
|
Return (dotted-name or None, next-token) tuple for token source g.
|
[
"Return",
"(",
"dotted",
"-",
"name",
"or",
"None",
"next",
"-",
"token",
")",
"tuple",
"for",
"token",
"source",
"g",
"."
] |
def _getname(g):
"Return (dotted-name or None, next-token) tuple for token source g."
parts = []
tokentype, token = next(g)[0:2]
if tokentype != NAME and token != '*':
return (None, token)
parts.append(token)
while True:
tokentype, token = next(g)[0:2]
if token != '.':
break
tokentype, token = next(g)[0:2]
if tokentype != NAME:
break
parts.append(token)
return (".".join(parts), token)
|
[
"def",
"_getname",
"(",
"g",
")",
":",
"parts",
"=",
"[",
"]",
"tokentype",
",",
"token",
"=",
"next",
"(",
"g",
")",
"[",
"0",
":",
"2",
"]",
"if",
"tokentype",
"!=",
"NAME",
"and",
"token",
"!=",
"'*'",
":",
"return",
"(",
"None",
",",
"token",
")",
"parts",
".",
"append",
"(",
"token",
")",
"while",
"True",
":",
"tokentype",
",",
"token",
"=",
"next",
"(",
"g",
")",
"[",
"0",
":",
"2",
"]",
"if",
"token",
"!=",
"'.'",
":",
"break",
"tokentype",
",",
"token",
"=",
"next",
"(",
"g",
")",
"[",
"0",
":",
"2",
"]",
"if",
"tokentype",
"!=",
"NAME",
":",
"break",
"parts",
".",
"append",
"(",
"token",
")",
"return",
"(",
"\".\"",
".",
"join",
"(",
"parts",
")",
",",
"token",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/pyclbr.py#L347-L362
|
|
weolar/miniblink49
|
1c4678db0594a4abde23d3ebbcc7cd13c3170777
|
v8_5_1/tools/verify_source_deps.py
|
python
|
iflatten_gyp_file
|
(gyp_file)
|
Overaproximates all values in the gyp file.
Iterates over all string values recursively. Removes '../' path prefixes.
|
Overaproximates all values in the gyp file.
|
[
"Overaproximates",
"all",
"values",
"in",
"the",
"gyp",
"file",
"."
] |
def iflatten_gyp_file(gyp_file):
"""Overaproximates all values in the gyp file.
Iterates over all string values recursively. Removes '../' path prefixes.
"""
with open(gyp_file) as f:
return iflatten(eval(f.read()))
|
[
"def",
"iflatten_gyp_file",
"(",
"gyp_file",
")",
":",
"with",
"open",
"(",
"gyp_file",
")",
"as",
"f",
":",
"return",
"iflatten",
"(",
"eval",
"(",
"f",
".",
"read",
"(",
")",
")",
")"
] |
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/v8_5_1/tools/verify_source_deps.py#L65-L71
|
||
ROCmSoftwarePlatform/hipCaffe
|
4ec5d482515cce532348553b6db6d00d015675d5
|
python/caffe/detector.py
|
python
|
Detector.detect_selective_search
|
(self, image_fnames)
|
return self.detect_windows(zip(image_fnames, windows_list))
|
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
|
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
|
[
"Do",
"windowed",
"detection",
"over",
"Selective",
"Search",
"proposals",
"by",
"extracting",
"the",
"crop",
"and",
"warping",
"to",
"the",
"input",
"dimensions",
"of",
"the",
"net",
"."
] |
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
|
[
"def",
"detect_selective_search",
"(",
"self",
",",
"image_fnames",
")",
":",
"import",
"selective_search_ijcv_with_python",
"as",
"selective_search",
"# Make absolute paths so MATLAB can find the files.",
"image_fnames",
"=",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"f",
")",
"for",
"f",
"in",
"image_fnames",
"]",
"windows_list",
"=",
"selective_search",
".",
"get_windows",
"(",
"image_fnames",
",",
"cmd",
"=",
"'selective_search_rcnn'",
")",
"# Run windowed detection on the selective search list.",
"return",
"self",
".",
"detect_windows",
"(",
"zip",
"(",
"image_fnames",
",",
"windows_list",
")",
")"
] |
https://github.com/ROCmSoftwarePlatform/hipCaffe/blob/4ec5d482515cce532348553b6db6d00d015675d5/python/caffe/detector.py#L101-L123
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py
|
python
|
_setup
|
(_bootstrap_module)
|
Setup the path-based importers for importlib by importing needed
built-in modules and injecting them into the global namespace.
Other components are extracted from the core bootstrap module.
|
Setup the path-based importers for importlib by importing needed
built-in modules and injecting them into the global namespace.
|
[
"Setup",
"the",
"path",
"-",
"based",
"importers",
"for",
"importlib",
"by",
"importing",
"needed",
"built",
"-",
"in",
"modules",
"and",
"injecting",
"them",
"into",
"the",
"global",
"namespace",
"."
] |
def _setup(_bootstrap_module):
"""Setup the path-based importers for importlib by importing needed
built-in modules and injecting them into the global namespace.
Other components are extracted from the core bootstrap module.
"""
global sys, _imp, _bootstrap
_bootstrap = _bootstrap_module
sys = _bootstrap.sys
_imp = _bootstrap._imp
self_module = sys.modules[__name__]
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _bootstrap._builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
setattr(self_module, '_pathseps_with_colon', {f':{s}' for s in path_separators})
# Directly load built-in modules needed during bootstrap.
builtin_names = ['_io', '_warnings', 'marshal']
if builtin_os == 'nt':
builtin_names.append('winreg')
for builtin_name in builtin_names:
if builtin_name not in sys.modules:
builtin_module = _bootstrap._builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
|
[
"def",
"_setup",
"(",
"_bootstrap_module",
")",
":",
"global",
"sys",
",",
"_imp",
",",
"_bootstrap",
"_bootstrap",
"=",
"_bootstrap_module",
"sys",
"=",
"_bootstrap",
".",
"sys",
"_imp",
"=",
"_bootstrap",
".",
"_imp",
"self_module",
"=",
"sys",
".",
"modules",
"[",
"__name__",
"]",
"# Directly load the os module (needed during bootstrap).",
"os_details",
"=",
"(",
"'posix'",
",",
"[",
"'/'",
"]",
")",
",",
"(",
"'nt'",
",",
"[",
"'\\\\'",
",",
"'/'",
"]",
")",
"for",
"builtin_os",
",",
"path_separators",
"in",
"os_details",
":",
"# Assumption made in _path_join()",
"assert",
"all",
"(",
"len",
"(",
"sep",
")",
"==",
"1",
"for",
"sep",
"in",
"path_separators",
")",
"path_sep",
"=",
"path_separators",
"[",
"0",
"]",
"if",
"builtin_os",
"in",
"sys",
".",
"modules",
":",
"os_module",
"=",
"sys",
".",
"modules",
"[",
"builtin_os",
"]",
"break",
"else",
":",
"try",
":",
"os_module",
"=",
"_bootstrap",
".",
"_builtin_from_name",
"(",
"builtin_os",
")",
"break",
"except",
"ImportError",
":",
"continue",
"else",
":",
"raise",
"ImportError",
"(",
"'importlib requires posix or nt'",
")",
"setattr",
"(",
"self_module",
",",
"'_os'",
",",
"os_module",
")",
"setattr",
"(",
"self_module",
",",
"'path_sep'",
",",
"path_sep",
")",
"setattr",
"(",
"self_module",
",",
"'path_separators'",
",",
"''",
".",
"join",
"(",
"path_separators",
")",
")",
"setattr",
"(",
"self_module",
",",
"'_pathseps_with_colon'",
",",
"{",
"f':{s}'",
"for",
"s",
"in",
"path_separators",
"}",
")",
"# Directly load built-in modules needed during bootstrap.",
"builtin_names",
"=",
"[",
"'_io'",
",",
"'_warnings'",
",",
"'marshal'",
"]",
"if",
"builtin_os",
"==",
"'nt'",
":",
"builtin_names",
".",
"append",
"(",
"'winreg'",
")",
"for",
"builtin_name",
"in",
"builtin_names",
":",
"if",
"builtin_name",
"not",
"in",
"sys",
".",
"modules",
":",
"builtin_module",
"=",
"_bootstrap",
".",
"_builtin_from_name",
"(",
"builtin_name",
")",
"else",
":",
"builtin_module",
"=",
"sys",
".",
"modules",
"[",
"builtin_name",
"]",
"setattr",
"(",
"self_module",
",",
"builtin_name",
",",
"builtin_module",
")",
"# Constants",
"setattr",
"(",
"self_module",
",",
"'_relax_case'",
",",
"_make_relax_case",
"(",
")",
")",
"EXTENSION_SUFFIXES",
".",
"extend",
"(",
"_imp",
".",
"extension_suffixes",
"(",
")",
")",
"if",
"builtin_os",
"==",
"'nt'",
":",
"SOURCE_SUFFIXES",
".",
"append",
"(",
"'.pyw'",
")",
"if",
"'_d.pyd'",
"in",
"EXTENSION_SUFFIXES",
":",
"WindowsRegistryFinder",
".",
"DEBUG_BUILD",
"=",
"True"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py#L1645-L1699
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/binhex.py
|
python
|
_Hqxdecoderengine.read
|
(self, totalwtd)
|
return decdata
|
Read at least wtd bytes (or until EOF)
|
Read at least wtd bytes (or until EOF)
|
[
"Read",
"at",
"least",
"wtd",
"bytes",
"(",
"or",
"until",
"EOF",
")"
] |
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
|
[
"def",
"read",
"(",
"self",
",",
"totalwtd",
")",
":",
"decdata",
"=",
"''",
"wtd",
"=",
"totalwtd",
"#",
"# The loop here is convoluted, since we don't really now how",
"# much to decode: there may be newlines in the incoming data.",
"while",
"wtd",
">",
"0",
":",
"if",
"self",
".",
"eof",
":",
"return",
"decdata",
"wtd",
"=",
"(",
"(",
"wtd",
"+",
"2",
")",
"//",
"3",
")",
"*",
"4",
"data",
"=",
"self",
".",
"ifp",
".",
"read",
"(",
"wtd",
")",
"#",
"# Next problem: there may not be a complete number of",
"# bytes in what we pass to a2b. Solve by yet another",
"# loop.",
"#",
"while",
"1",
":",
"try",
":",
"decdatacur",
",",
"self",
".",
"eof",
"=",
"binascii",
".",
"a2b_hqx",
"(",
"data",
")",
"break",
"except",
"binascii",
".",
"Incomplete",
":",
"pass",
"newdata",
"=",
"self",
".",
"ifp",
".",
"read",
"(",
"1",
")",
"if",
"not",
"newdata",
":",
"raise",
"Error",
",",
"'Premature EOF on binhex file'",
"data",
"=",
"data",
"+",
"newdata",
"decdata",
"=",
"decdata",
"+",
"decdatacur",
"wtd",
"=",
"totalwtd",
"-",
"len",
"(",
"decdata",
")",
"if",
"not",
"decdata",
"and",
"not",
"self",
".",
"eof",
":",
"raise",
"Error",
",",
"'Premature EOF on binhex file'",
"return",
"decdata"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/binhex.py#L285-L317
|
|
eventql/eventql
|
7ca0dbb2e683b525620ea30dc40540a22d5eb227
|
deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozbuild/makeutil.py
|
python
|
Rule.add_commands
|
(self, commands)
|
return self
|
Add commands to the rule.
|
Add commands to the rule.
|
[
"Add",
"commands",
"to",
"the",
"rule",
"."
] |
def add_commands(self, commands):
'''Add commands to the rule.'''
assert isinstance(commands, Iterable) and not isinstance(commands, StringTypes)
self._commands.extend(commands)
return self
|
[
"def",
"add_commands",
"(",
"self",
",",
"commands",
")",
":",
"assert",
"isinstance",
"(",
"commands",
",",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"commands",
",",
"StringTypes",
")",
"self",
".",
"_commands",
".",
"extend",
"(",
"commands",
")",
"return",
"self"
] |
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozbuild/makeutil.py#L114-L118
|
|
hpi-xnor/BMXNet-v2
|
af2b1859eafc5c721b1397cef02f946aaf2ce20d
|
python/mxnet/module/bucketing_module.py
|
python
|
BucketingModule.get_outputs
|
(self, merge_multi_context=True)
|
return self._curr_module.get_outputs(merge_multi_context=merge_multi_context)
|
Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of numpy arrays or list of list of numpy arrays
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are numpy arrays.
|
Gets outputs from a previous forward computation.
|
[
"Gets",
"outputs",
"from",
"a",
"previous",
"forward",
"computation",
"."
] |
def get_outputs(self, merge_multi_context=True):
"""Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of numpy arrays or list of list of numpy arrays
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are numpy arrays.
"""
assert self.binded and self.params_initialized
return self._curr_module.get_outputs(merge_multi_context=merge_multi_context)
|
[
"def",
"get_outputs",
"(",
"self",
",",
"merge_multi_context",
"=",
"True",
")",
":",
"assert",
"self",
".",
"binded",
"and",
"self",
".",
"params_initialized",
"return",
"self",
".",
"_curr_module",
".",
"get_outputs",
"(",
"merge_multi_context",
"=",
"merge_multi_context",
")"
] |
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/module/bucketing_module.py#L481-L500
|
|
llvm-dcpu16/llvm-dcpu16
|
ae6b01fecd03219677e391d4421df5d966d80dcf
|
bindings/python/llvm/core.py
|
python
|
MemoryBuffer.__init__
|
(self, filename=None)
|
Create a new memory buffer.
Currently, we support creating from the contents of a file at the
specified filename.
|
Create a new memory buffer.
|
[
"Create",
"a",
"new",
"memory",
"buffer",
"."
] |
def __init__(self, filename=None):
"""Create a new memory buffer.
Currently, we support creating from the contents of a file at the
specified filename.
"""
if filename is None:
raise Exception("filename argument must be defined")
memory = c_object_p()
out = c_char_p(None)
result = lib.LLVMCreateMemoryBufferWithContentsOfFile(filename,
byref(memory), byref(out))
if result:
raise Exception("Could not create memory buffer: %s" % out.value)
LLVMObject.__init__(self, memory, disposer=lib.LLVMDisposeMemoryBuffer)
|
[
"def",
"__init__",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"filename argument must be defined\"",
")",
"memory",
"=",
"c_object_p",
"(",
")",
"out",
"=",
"c_char_p",
"(",
"None",
")",
"result",
"=",
"lib",
".",
"LLVMCreateMemoryBufferWithContentsOfFile",
"(",
"filename",
",",
"byref",
"(",
"memory",
")",
",",
"byref",
"(",
"out",
")",
")",
"if",
"result",
":",
"raise",
"Exception",
"(",
"\"Could not create memory buffer: %s\"",
"%",
"out",
".",
"value",
")",
"LLVMObject",
".",
"__init__",
"(",
"self",
",",
"memory",
",",
"disposer",
"=",
"lib",
".",
"LLVMDisposeMemoryBuffer",
")"
] |
https://github.com/llvm-dcpu16/llvm-dcpu16/blob/ae6b01fecd03219677e391d4421df5d966d80dcf/bindings/python/llvm/core.py#L66-L84
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemDynamicContent/AWS/resource-manager-code/pak_files.py
|
python
|
PakFileArchiver.update_pak
|
(self, file_to_add_path, dest_pak_path)
|
Adds a file to an existing pak file
|
Adds a file to an existing pak file
|
[
"Adds",
"a",
"file",
"to",
"an",
"existing",
"pak",
"file"
] |
def update_pak(self, file_to_add_path, dest_pak_path):
""" Adds a file to an existing pak file """
file_dir_name = os.path.dirname(file_to_add_path)
self.write_pak(file_to_add_path, file_dir_name, dest_pak_path, 'a')
|
[
"def",
"update_pak",
"(",
"self",
",",
"file_to_add_path",
",",
"dest_pak_path",
")",
":",
"file_dir_name",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"file_to_add_path",
")",
"self",
".",
"write_pak",
"(",
"file_to_add_path",
",",
"file_dir_name",
",",
"dest_pak_path",
",",
"'a'",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDynamicContent/AWS/resource-manager-code/pak_files.py#L45-L49
|
||
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
qt/python/mantidqt/mantidqt/widgets/plotconfigdialog/curvestabwidget/__init__.py
|
python
|
CurveProperties._get_line_props_from_curve
|
(curve, props)
|
return props
|
Get a curve's line properties and add to props dict
|
Get a curve's line properties and add to props dict
|
[
"Get",
"a",
"curve",
"s",
"line",
"properties",
"and",
"add",
"to",
"props",
"dict"
] |
def _get_line_props_from_curve(curve, props):
"""Get a curve's line properties and add to props dict"""
if not curve:
props['linestyle'] = 'None'
props['drawstyle'] = 'default'
props['linewidth'] = rcParams['lines.linewidth']
props['color'] = convert_color_to_hex(rcParams['lines.color'])
else:
props['linestyle'] = LINESTYLE_MAP[curve.get_linestyle()]
props['drawstyle'] = curve.get_drawstyle()
props['linewidth'] = curve.get_linewidth()
props['color'] = convert_color_to_hex(curve.get_color())
return props
|
[
"def",
"_get_line_props_from_curve",
"(",
"curve",
",",
"props",
")",
":",
"if",
"not",
"curve",
":",
"props",
"[",
"'linestyle'",
"]",
"=",
"'None'",
"props",
"[",
"'drawstyle'",
"]",
"=",
"'default'",
"props",
"[",
"'linewidth'",
"]",
"=",
"rcParams",
"[",
"'lines.linewidth'",
"]",
"props",
"[",
"'color'",
"]",
"=",
"convert_color_to_hex",
"(",
"rcParams",
"[",
"'lines.color'",
"]",
")",
"else",
":",
"props",
"[",
"'linestyle'",
"]",
"=",
"LINESTYLE_MAP",
"[",
"curve",
".",
"get_linestyle",
"(",
")",
"]",
"props",
"[",
"'drawstyle'",
"]",
"=",
"curve",
".",
"get_drawstyle",
"(",
")",
"props",
"[",
"'linewidth'",
"]",
"=",
"curve",
".",
"get_linewidth",
"(",
")",
"props",
"[",
"'color'",
"]",
"=",
"convert_color_to_hex",
"(",
"curve",
".",
"get_color",
"(",
")",
")",
"return",
"props"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/plotconfigdialog/curvestabwidget/__init__.py#L170-L182
|
|
cms-sw/cmssw
|
fd9de012d503d3405420bcbeec0ec879baa57cf2
|
CondTools/SiStrip/python/o2o_helper.py
|
python
|
exists_iov
|
(dbFile, tag)
|
return len(dataCursor.fetchall()) > 0
|
Check if there exists any IOV for a specific tag in the given sqlite file.
|
Check if there exists any IOV for a specific tag in the given sqlite file.
|
[
"Check",
"if",
"there",
"exists",
"any",
"IOV",
"for",
"a",
"specific",
"tag",
"in",
"the",
"given",
"sqlite",
"file",
"."
] |
def exists_iov(dbFile, tag):
'''Check if there exists any IOV for a specific tag in the given sqlite file.'''
dataConnection = sqlite3.connect(dbFile)
dataCursor = dataConnection.cursor()
dataCursor.execute('select SINCE from IOV where TAG_NAME=:tag_name', {'tag_name' : tag})
return len(dataCursor.fetchall()) > 0
|
[
"def",
"exists_iov",
"(",
"dbFile",
",",
"tag",
")",
":",
"dataConnection",
"=",
"sqlite3",
".",
"connect",
"(",
"dbFile",
")",
"dataCursor",
"=",
"dataConnection",
".",
"cursor",
"(",
")",
"dataCursor",
".",
"execute",
"(",
"'select SINCE from IOV where TAG_NAME=:tag_name'",
",",
"{",
"'tag_name'",
":",
"tag",
"}",
")",
"return",
"len",
"(",
"dataCursor",
".",
"fetchall",
"(",
")",
")",
">",
"0"
] |
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/CondTools/SiStrip/python/o2o_helper.py#L149-L154
|
|
nasa/trick
|
7b85aa66329d62fe8816462627c09a353aac8299
|
share/trick/pymods/trick/variable_server.py
|
python
|
VariableServer.register_error_callback
|
(self, function, args=None, kwargs=None)
|
Call function if an error occurs while sampling variable values.
Registering an aleady-registered function replaces its existing
registration. The order in which functions are called is not
specified. Functions are executed on the asynchronous sampling
thread.
Paramaters
----------
function : callable
The function to call. It must accept a keyword argument
named 'exception' which will contain the error.
args : tuple
The positional arguments to be passed to the function.
kwargs : dict
The keyword arguments to be passed to the function.
|
Call function if an error occurs while sampling variable values.
Registering an aleady-registered function replaces its existing
registration. The order in which functions are called is not
specified. Functions are executed on the asynchronous sampling
thread.
|
[
"Call",
"function",
"if",
"an",
"error",
"occurs",
"while",
"sampling",
"variable",
"values",
".",
"Registering",
"an",
"aleady",
"-",
"registered",
"function",
"replaces",
"its",
"existing",
"registration",
".",
"The",
"order",
"in",
"which",
"functions",
"are",
"called",
"is",
"not",
"specified",
".",
"Functions",
"are",
"executed",
"on",
"the",
"asynchronous",
"sampling",
"thread",
"."
] |
def register_error_callback(self, function, args=None, kwargs=None):
"""
Call function if an error occurs while sampling variable values.
Registering an aleady-registered function replaces its existing
registration. The order in which functions are called is not
specified. Functions are executed on the asynchronous sampling
thread.
Paramaters
----------
function : callable
The function to call. It must accept a keyword argument
named 'exception' which will contain the error.
args : tuple
The positional arguments to be passed to the function.
kwargs : dict
The keyword arguments to be passed to the function.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
self._error_callbacks[function] = args, kwargs
|
[
"def",
"register_error_callback",
"(",
"self",
",",
"function",
",",
"args",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"[",
"]",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"self",
".",
"_error_callbacks",
"[",
"function",
"]",
"=",
"args",
",",
"kwargs"
] |
https://github.com/nasa/trick/blob/7b85aa66329d62fe8816462627c09a353aac8299/share/trick/pymods/trick/variable_server.py#L642-L664
|
||
bh107/bohrium
|
5b83e7117285fefc7779ed0e9acb0f8e74c7e068
|
bridge/npbackend/bohrium/backend_messaging.py
|
python
|
gpu_disable
|
()
|
return _backend_msg("GPU: disable")
|
Disable the GPU backend in the current runtime stack
|
Disable the GPU backend in the current runtime stack
|
[
"Disable",
"the",
"GPU",
"backend",
"in",
"the",
"current",
"runtime",
"stack"
] |
def gpu_disable():
"""Disable the GPU backend in the current runtime stack"""
return _backend_msg("GPU: disable")
|
[
"def",
"gpu_disable",
"(",
")",
":",
"return",
"_backend_msg",
"(",
"\"GPU: disable\"",
")"
] |
https://github.com/bh107/bohrium/blob/5b83e7117285fefc7779ed0e9acb0f8e74c7e068/bridge/npbackend/bohrium/backend_messaging.py#L19-L21
|
|
pmq20/node-packer
|
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
|
current/tools/gyp/pylib/gyp/generator/cmake.py
|
python
|
CMakeStringEscape
|
(a)
|
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
|
Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
|
Escapes the string 'a' for use inside a CMake string.
|
[
"Escapes",
"the",
"string",
"a",
"for",
"use",
"inside",
"a",
"CMake",
"string",
"."
] |
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
|
[
"def",
"CMakeStringEscape",
"(",
"a",
")",
":",
"return",
"a",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"';'",
",",
"'\\\\;'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")"
] |
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/tools/gyp/pylib/gyp/generator/cmake.py#L124-L140
|
|
apple/swift-lldb
|
d74be846ef3e62de946df343e8c234bde93a8912
|
scripts/Python/static-binding/lldb.py
|
python
|
SBData.SetByteOrder
|
(self, endian)
|
return _lldb.SBData_SetByteOrder(self, endian)
|
SetByteOrder(SBData self, lldb::ByteOrder endian)
|
SetByteOrder(SBData self, lldb::ByteOrder endian)
|
[
"SetByteOrder",
"(",
"SBData",
"self",
"lldb",
"::",
"ByteOrder",
"endian",
")"
] |
def SetByteOrder(self, endian):
"""SetByteOrder(SBData self, lldb::ByteOrder endian)"""
return _lldb.SBData_SetByteOrder(self, endian)
|
[
"def",
"SetByteOrder",
"(",
"self",
",",
"endian",
")",
":",
"return",
"_lldb",
".",
"SBData_SetByteOrder",
"(",
"self",
",",
"endian",
")"
] |
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L3337-L3339
|
|
envoyproxy/envoy
|
65541accdafe255e72310b4298d646e091da2d80
|
docs/conf.py
|
python
|
SubstitutionCodeBlock.run
|
(self)
|
return list(CodeBlock.run(self))
|
Replace placeholders with given variables.
|
Replace placeholders with given variables.
|
[
"Replace",
"placeholders",
"with",
"given",
"variables",
"."
] |
def run(self):
"""
Replace placeholders with given variables.
"""
app = self.state.document.settings.env.app
new_content = []
existing_content = self.content
for item in existing_content:
for pair in app.config.substitutions:
original, replacement = pair
item = item.replace(original, replacement)
new_content.append(item)
self.content = new_content
return list(CodeBlock.run(self))
|
[
"def",
"run",
"(",
"self",
")",
":",
"app",
"=",
"self",
".",
"state",
".",
"document",
".",
"settings",
".",
"env",
".",
"app",
"new_content",
"=",
"[",
"]",
"existing_content",
"=",
"self",
".",
"content",
"for",
"item",
"in",
"existing_content",
":",
"for",
"pair",
"in",
"app",
".",
"config",
".",
"substitutions",
":",
"original",
",",
"replacement",
"=",
"pair",
"item",
"=",
"item",
".",
"replace",
"(",
"original",
",",
"replacement",
")",
"new_content",
".",
"append",
"(",
"item",
")",
"self",
".",
"content",
"=",
"new_content",
"return",
"list",
"(",
"CodeBlock",
".",
"run",
"(",
"self",
")",
")"
] |
https://github.com/envoyproxy/envoy/blob/65541accdafe255e72310b4298d646e091da2d80/docs/conf.py#L35-L49
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/customtreectrl.py
|
python
|
CustomTreeCtrl.IsExpanded
|
(self, item)
|
return item.IsExpanded()
|
Returns whether the item is expanded or not.
:param `item`: an instance of :class:`GenericTreeItem`.
:return: ``True`` if the item is expanded, ``False`` if it is collapsed.
|
Returns whether the item is expanded or not.
|
[
"Returns",
"whether",
"the",
"item",
"is",
"expanded",
"or",
"not",
"."
] |
def IsExpanded(self, item):
"""
Returns whether the item is expanded or not.
:param `item`: an instance of :class:`GenericTreeItem`.
:return: ``True`` if the item is expanded, ``False`` if it is collapsed.
"""
return item.IsExpanded()
|
[
"def",
"IsExpanded",
"(",
"self",
",",
"item",
")",
":",
"return",
"item",
".",
"IsExpanded",
"(",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L4439-L4448
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/dateutil/dateutil/tz/win.py
|
python
|
tzwinbase.transitions
|
(self, year)
|
return dston, dstoff
|
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
|
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
|
[
"For",
"a",
"given",
"year",
"get",
"the",
"DST",
"on",
"and",
"off",
"transition",
"times",
"expressed",
"always",
"on",
"the",
"standard",
"time",
"side",
".",
"For",
"zones",
"with",
"no",
"transitions",
"this",
"function",
"returns",
"None",
"."
] |
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
|
[
"def",
"transitions",
"(",
"self",
",",
"year",
")",
":",
"if",
"not",
"self",
".",
"hasdst",
":",
"return",
"None",
"dston",
"=",
"picknthweekday",
"(",
"year",
",",
"self",
".",
"_dstmonth",
",",
"self",
".",
"_dstdayofweek",
",",
"self",
".",
"_dsthour",
",",
"self",
".",
"_dstminute",
",",
"self",
".",
"_dstweeknumber",
")",
"dstoff",
"=",
"picknthweekday",
"(",
"year",
",",
"self",
".",
"_stdmonth",
",",
"self",
".",
"_stddayofweek",
",",
"self",
".",
"_stdhour",
",",
"self",
".",
"_stdminute",
",",
"self",
".",
"_stdweeknumber",
")",
"# Ambiguous dates default to the STD side",
"dstoff",
"-=",
"self",
".",
"_dst_base_offset",
"return",
"dston",
",",
"dstoff"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/dateutil/dateutil/tz/win.py#L163-L192
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py
|
python
|
ParserElement.setWhitespaceChars
|
( self, chars )
|
return self
|
Overrides the default whitespace chars
|
Overrides the default whitespace chars
|
[
"Overrides",
"the",
"default",
"whitespace",
"chars"
] |
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
|
[
"def",
"setWhitespaceChars",
"(",
"self",
",",
"chars",
")",
":",
"self",
".",
"skipWhitespace",
"=",
"True",
"self",
".",
"whiteChars",
"=",
"chars",
"self",
".",
"copyDefaultWhiteChars",
"=",
"False",
"return",
"self"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py#L2061-L2068
|
|
svn2github/webrtc
|
0e4615a75ed555ec866cd5543bfea586f3385ceb
|
webrtc/tools/barcode_tools/barcode_encoder.py
|
python
|
calculate_frames_number_from_yuv
|
(yuv_width, yuv_height, file_name)
|
return int(file_size/frame_size)
|
Calculates the number of frames of a YUV video.
Args:
yuv_width(int): Width of a frame of the yuv file.
yuv_height(int): Height of a frame of the YUV file.
file_name(string): The name of the YUV file.
Return:
(int): The number of frames in the YUV file.
|
Calculates the number of frames of a YUV video.
|
[
"Calculates",
"the",
"number",
"of",
"frames",
"of",
"a",
"YUV",
"video",
"."
] |
def calculate_frames_number_from_yuv(yuv_width, yuv_height, file_name):
"""Calculates the number of frames of a YUV video.
Args:
yuv_width(int): Width of a frame of the yuv file.
yuv_height(int): Height of a frame of the YUV file.
file_name(string): The name of the YUV file.
Return:
(int): The number of frames in the YUV file.
"""
file_size = os.path.getsize(file_name)
y_plane_size = yuv_width * yuv_height
u_plane_size = (yuv_width/2) * (yuv_height/2) # Equals to V plane size too
frame_size = y_plane_size + (2 * u_plane_size)
return int(file_size/frame_size)
|
[
"def",
"calculate_frames_number_from_yuv",
"(",
"yuv_width",
",",
"yuv_height",
",",
"file_name",
")",
":",
"file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"file_name",
")",
"y_plane_size",
"=",
"yuv_width",
"*",
"yuv_height",
"u_plane_size",
"=",
"(",
"yuv_width",
"/",
"2",
")",
"*",
"(",
"yuv_height",
"/",
"2",
")",
"# Equals to V plane size too",
"frame_size",
"=",
"y_plane_size",
"+",
"(",
"2",
"*",
"u_plane_size",
")",
"return",
"int",
"(",
"file_size",
"/",
"frame_size",
")"
] |
https://github.com/svn2github/webrtc/blob/0e4615a75ed555ec866cd5543bfea586f3385ceb/webrtc/tools/barcode_tools/barcode_encoder.py#L243-L258
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.