nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_controls.py
python
HelpProvider.RemoveHelp
(*args, **kwargs)
return _controls_.HelpProvider_RemoveHelp(*args, **kwargs)
RemoveHelp(self, Window window) Removes the association between the window pointer and the help text. This is called by the wx.Window destructor. Without this, the table of help strings will fill up and when window pointers are reused, the wrong help string will be found.
RemoveHelp(self, Window window)
[ "RemoveHelp", "(", "self", "Window", "window", ")" ]
def RemoveHelp(*args, **kwargs): """ RemoveHelp(self, Window window) Removes the association between the window pointer and the help text. This is called by the wx.Window destructor. Without this, the table of help strings will fill up and when window pointers are reused, the wrong help string will be found. """ return _controls_.HelpProvider_RemoveHelp(*args, **kwargs)
[ "def", "RemoveHelp", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "HelpProvider_RemoveHelp", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L6283-L6292
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_gdi.py
python
RendererNative.GetDefault
(*args, **kwargs)
return _gdi_.RendererNative_GetDefault(*args, **kwargs)
GetDefault() -> RendererNative Return the default (native) implementation for this platform -- this is also the one used by default but this may be changed by calling `wx.RendererNative.Set` in which case the return value of this method may be different from the return value of `wx.RendererNative.Get`.
GetDefault() -> RendererNative
[ "GetDefault", "()", "-", ">", "RendererNative" ]
def GetDefault(*args, **kwargs): """ GetDefault() -> RendererNative Return the default (native) implementation for this platform -- this is also the one used by default but this may be changed by calling `wx.RendererNative.Set` in which case the return value of this method may be different from the return value of `wx.RendererNative.Get`. """ return _gdi_.RendererNative_GetDefault(*args, **kwargs)
[ "def", "GetDefault", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "RendererNative_GetDefault", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L7483-L7492
domino-team/openwrt-cc
8b181297c34d14d3ca521cc9f31430d561dbc688
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py
python
_HasIOSTarget
(targets)
return False
Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.
Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.
[ "Returns", "true", "if", "any", "target", "contains", "the", "iOS", "specific", "key", "IPHONEOS_DEPLOYMENT_TARGET", "." ]
def _HasIOSTarget(targets): """Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.""" for target_dict in targets.values(): for config in target_dict['configurations'].values(): if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'): return True return False
[ "def", "_HasIOSTarget", "(", "targets", ")", ":", "for", "target_dict", "in", "targets", ".", "values", "(", ")", ":", "for", "config", "in", "target_dict", "[", "'configurations'", "]", ".", "values", "(", ")", ":", "if", "config", ".", "get", "(", "'xcode_settings'", ",", "{", "}", ")", ".", "get", "(", "'IPHONEOS_DEPLOYMENT_TARGET'", ")", ":", "return", "True", "return", "False" ]
https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py#L1600-L1607
lammps/lammps
b75c3065430a75b1b5543a10e10f46d9b4c91913
tools/i-pi/ipi/engine/thermostats.py
python
ThermoPILE_G.__init__
(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0, scale = 1.0)
Initializes ThermoPILE_G. Args: temp: The simulation temperature. Defaults to 1.0. dt: The simulation time step. Defaults to 1.0. tau: The centroid thermostat damping timescale. Defaults to 1.0. ethermo: The initial conserved energy quantity. Defaults to 0.0. Will be non-zero if the thermostat is initialized from a checkpoint file. scale: A float used to reduce the intensity of the PILE thermostat if required.
Initializes ThermoPILE_G.
[ "Initializes", "ThermoPILE_G", "." ]
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0, scale = 1.0): """Initializes ThermoPILE_G. Args: temp: The simulation temperature. Defaults to 1.0. dt: The simulation time step. Defaults to 1.0. tau: The centroid thermostat damping timescale. Defaults to 1.0. ethermo: The initial conserved energy quantity. Defaults to 0.0. Will be non-zero if the thermostat is initialized from a checkpoint file. scale: A float used to reduce the intensity of the PILE thermostat if required. """ super(ThermoPILE_G,self).__init__(temp,dt,tau,ethermo) dset(self,"pilescale",depend_value(value=scale,name='pilescale'))
[ "def", "__init__", "(", "self", ",", "temp", "=", "1.0", ",", "dt", "=", "1.0", ",", "tau", "=", "1.0", ",", "ethermo", "=", "0.0", ",", "scale", "=", "1.0", ")", ":", "super", "(", "ThermoPILE_G", ",", "self", ")", ".", "__init__", "(", "temp", ",", "dt", ",", "tau", ",", "ethermo", ")", "dset", "(", "self", ",", "\"pilescale\"", ",", "depend_value", "(", "value", "=", "scale", ",", "name", "=", "'pilescale'", ")", ")" ]
https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/engine/thermostats.py#L461-L475
rrwick/Porechop
109e437280436d1ec27e5a5b7a34ffb752176390
porechop/nanopore_read.py
python
NanoporeRead.find_start_trim
(self, adapters, end_size, extra_trim_size, end_threshold, scoring_scheme_vals, min_trim_size, check_barcodes, forward_or_reverse)
Aligns one or more adapter sequences and possibly adjusts the read's start trim amount based on the result.
Aligns one or more adapter sequences and possibly adjusts the read's start trim amount based on the result.
[ "Aligns", "one", "or", "more", "adapter", "sequences", "and", "possibly", "adjusts", "the", "read", "s", "start", "trim", "amount", "based", "on", "the", "result", "." ]
def find_start_trim(self, adapters, end_size, extra_trim_size, end_threshold, scoring_scheme_vals, min_trim_size, check_barcodes, forward_or_reverse): """ Aligns one or more adapter sequences and possibly adjusts the read's start trim amount based on the result. """ read_seq_start = self.seq[:end_size] for adapter in adapters: if not adapter.start_sequence: continue full_score, partial_score, read_start, read_end = \ align_adapter(read_seq_start, adapter.start_sequence[1], scoring_scheme_vals) if partial_score > end_threshold and read_end != end_size and \ read_end - read_start >= min_trim_size: trim_amount = read_end + extra_trim_size self.start_trim_amount = max(self.start_trim_amount, trim_amount) self.start_adapter_alignments.append((adapter, full_score, partial_score, read_start, read_end)) if check_barcodes and adapter.is_barcode() and \ adapter.barcode_direction() == forward_or_reverse: self.start_barcode_scores[adapter.get_barcode_name()] = full_score
[ "def", "find_start_trim", "(", "self", ",", "adapters", ",", "end_size", ",", "extra_trim_size", ",", "end_threshold", ",", "scoring_scheme_vals", ",", "min_trim_size", ",", "check_barcodes", ",", "forward_or_reverse", ")", ":", "read_seq_start", "=", "self", ".", "seq", "[", ":", "end_size", "]", "for", "adapter", "in", "adapters", ":", "if", "not", "adapter", ".", "start_sequence", ":", "continue", "full_score", ",", "partial_score", ",", "read_start", ",", "read_end", "=", "align_adapter", "(", "read_seq_start", ",", "adapter", ".", "start_sequence", "[", "1", "]", ",", "scoring_scheme_vals", ")", "if", "partial_score", ">", "end_threshold", "and", "read_end", "!=", "end_size", "and", "read_end", "-", "read_start", ">=", "min_trim_size", ":", "trim_amount", "=", "read_end", "+", "extra_trim_size", "self", ".", "start_trim_amount", "=", "max", "(", "self", ".", "start_trim_amount", ",", "trim_amount", ")", "self", ".", "start_adapter_alignments", ".", "append", "(", "(", "adapter", ",", "full_score", ",", "partial_score", ",", "read_start", ",", "read_end", ")", ")", "if", "check_barcodes", "and", "adapter", ".", "is_barcode", "(", ")", "and", "adapter", ".", "barcode_direction", "(", ")", "==", "forward_or_reverse", ":", "self", ".", "start_barcode_scores", "[", "adapter", ".", "get_barcode_name", "(", ")", "]", "=", "full_score" ]
https://github.com/rrwick/Porechop/blob/109e437280436d1ec27e5a5b7a34ffb752176390/porechop/nanopore_read.py#L166-L186
lukasmonk/lucaschess
13e2e5cb13b38a720ccf897af649054a64bcb914
Code/QT/Grid.py
python
Grid.mouseDoubleClickEvent
(self, event)
Se gestiona este evento, ante la posibilidad de que la ventana quiera controlar, cada doble click, llamando a la rutina correspondiente si existe (gridDobleClick) con el numero de fila y el objeto columna como argumentos
Se gestiona este evento, ante la posibilidad de que la ventana quiera controlar, cada doble click, llamando a la rutina correspondiente si existe (gridDobleClick) con el numero de fila y el objeto columna como argumentos
[ "Se", "gestiona", "este", "evento", "ante", "la", "posibilidad", "de", "que", "la", "ventana", "quiera", "controlar", "cada", "doble", "click", "llamando", "a", "la", "rutina", "correspondiente", "si", "existe", "(", "gridDobleClick", ")", "con", "el", "numero", "de", "fila", "y", "el", "objeto", "columna", "como", "argumentos" ]
def mouseDoubleClickEvent(self, event): """ Se gestiona este evento, ante la posibilidad de que la ventana quiera controlar, cada doble click, llamando a la rutina correspondiente si existe (gridDobleClick) con el numero de fila y el objeto columna como argumentos """ if self.siEditable: QtGui.QTableView.mouseDoubleClickEvent(self, event) if hasattr(self.wParent, "gridDobleClick") and event.button() == 1: fil, columna = self.posActual() self.wParent.gridDobleClick(self, fil, columna)
[ "def", "mouseDoubleClickEvent", "(", "self", ",", "event", ")", ":", "if", "self", ".", "siEditable", ":", "QtGui", ".", "QTableView", ".", "mouseDoubleClickEvent", "(", "self", ",", "event", ")", "if", "hasattr", "(", "self", ".", "wParent", ",", "\"gridDobleClick\"", ")", "and", "event", ".", "button", "(", ")", "==", "1", ":", "fil", ",", "columna", "=", "self", ".", "posActual", "(", ")", "self", ".", "wParent", ".", "gridDobleClick", "(", "self", ",", "fil", ",", "columna", ")" ]
https://github.com/lukasmonk/lucaschess/blob/13e2e5cb13b38a720ccf897af649054a64bcb914/Code/QT/Grid.py#L335-L345
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/signal.py
python
_enum_to_int
(value)
Convert an IntEnum member to a numeric value. If it's not an IntEnum member return the value itself.
Convert an IntEnum member to a numeric value. If it's not an IntEnum member return the value itself.
[ "Convert", "an", "IntEnum", "member", "to", "a", "numeric", "value", ".", "If", "it", "s", "not", "an", "IntEnum", "member", "return", "the", "value", "itself", "." ]
def _enum_to_int(value): """Convert an IntEnum member to a numeric value. If it's not an IntEnum member return the value itself. """ try: return int(value) except (ValueError, TypeError): return value
[ "def", "_enum_to_int", "(", "value", ")", ":", "try", ":", "return", "int", "(", "value", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "value" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/signal.py#L34-L41
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/retdec-3.2/scripts/type_extractor/type_extractor/func_info.py
python
get_declarations
(text)
return re.findall(r'\s?\w+[\w\s\*]*\s+\w+\([\w\s\*\+-/,.()[\]]*?\)\s*;', text)
Extracts all function declarations from text.
Extracts all function declarations from text.
[ "Extracts", "all", "function", "declarations", "from", "text", "." ]
def get_declarations(text): """Extracts all function declarations from text.""" return re.findall(r'\s?\w+[\w\s\*]*\s+\w+\([\w\s\*\+-/,.()[\]]*?\)\s*;', text)
[ "def", "get_declarations", "(", "text", ")", ":", "return", "re", ".", "findall", "(", "r'\\s?\\w+[\\w\\s\\*]*\\s+\\w+\\([\\w\\s\\*\\+-/,.()[\\]]*?\\)\\s*;'", ",", "text", ")" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/retdec-3.2/scripts/type_extractor/type_extractor/func_info.py#L86-L88
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/_op_impl/cpu/conv3d.py
python
_conv3d_cpu
()
return
Conv3D cpu register
Conv3D cpu register
[ "Conv3D", "cpu", "register" ]
def _conv3d_cpu(): """Conv3D cpu register""" return
[ "def", "_conv3d_cpu", "(", ")", ":", "return" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_op_impl/cpu/conv3d.py#L28-L30
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/mox3/mox3/mox.py
python
StrContains.__init__
(self, search_string)
Initialize. Args: # search_string: the string you are searching for search_string: str
Initialize.
[ "Initialize", "." ]
def __init__(self, search_string): """Initialize. Args: # search_string: the string you are searching for search_string: str """ self._search_string = search_string
[ "def", "__init__", "(", "self", ",", "search_string", ")", ":", "self", ".", "_search_string", "=", "search_string" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/mox3/mox3/mox.py#L1482-L1490
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
tools/mo/openvino/tools/mo/back/OptimizeTransposeReshapeSequence.py
python
set_reshape_new_output_shape
(reshape_node: Node, new_output_shape: np.array)
Updates Reshape node shape to a new output shape. The function updates the second input if the node has it. :param reshape_node: node to update :param new_output_shape: new output shape :return: None
Updates Reshape node shape to a new output shape. The function updates the second input if the node has it. :param reshape_node: node to update :param new_output_shape: new output shape :return: None
[ "Updates", "Reshape", "node", "shape", "to", "a", "new", "output", "shape", ".", "The", "function", "updates", "the", "second", "input", "if", "the", "node", "has", "it", ".", ":", "param", "reshape_node", ":", "node", "to", "update", ":", "param", "new_output_shape", ":", "new", "output", "shape", ":", "return", ":", "None" ]
def set_reshape_new_output_shape(reshape_node: Node, new_output_shape: np.array): """ Updates Reshape node shape to a new output shape. The function updates the second input if the node has it. :param reshape_node: node to update :param new_output_shape: new output shape :return: None """ reshape_node.out_port(0).data.set_shape(new_output_shape) in_ports = [port for port in reshape_node.in_ports().values() if not port.disconnected()] if len(in_ports) == 2: reshape_node.in_port(1).data.set_value(new_output_shape)
[ "def", "set_reshape_new_output_shape", "(", "reshape_node", ":", "Node", ",", "new_output_shape", ":", "np", ".", "array", ")", ":", "reshape_node", ".", "out_port", "(", "0", ")", ".", "data", ".", "set_shape", "(", "new_output_shape", ")", "in_ports", "=", "[", "port", "for", "port", "in", "reshape_node", ".", "in_ports", "(", ")", ".", "values", "(", ")", "if", "not", "port", ".", "disconnected", "(", ")", "]", "if", "len", "(", "in_ports", ")", "==", "2", ":", "reshape_node", ".", "in_port", "(", "1", ")", ".", "data", ".", "set_value", "(", "new_output_shape", ")" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/mo/openvino/tools/mo/back/OptimizeTransposeReshapeSequence.py#L141-L151
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py3/IPython/core/interactiveshell.py
python
InteractiveShell.register_post_execute
(self, func)
DEPRECATED: Use ip.events.register('post_run_cell', func) Register a function for calling after code execution.
DEPRECATED: Use ip.events.register('post_run_cell', func) Register a function for calling after code execution.
[ "DEPRECATED", ":", "Use", "ip", ".", "events", ".", "register", "(", "post_run_cell", "func", ")", "Register", "a", "function", "for", "calling", "after", "code", "execution", "." ]
def register_post_execute(self, func): """DEPRECATED: Use ip.events.register('post_run_cell', func) Register a function for calling after code execution. """ warn("ip.register_post_execute is deprecated, use " "ip.events.register('post_run_cell', func) instead.", stacklevel=2) self.events.register('post_run_cell', func)
[ "def", "register_post_execute", "(", "self", ",", "func", ")", ":", "warn", "(", "\"ip.register_post_execute is deprecated, use \"", "\"ip.events.register('post_run_cell', func) instead.\"", ",", "stacklevel", "=", "2", ")", "self", ".", "events", ".", "register", "(", "'post_run_cell'", ",", "func", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/core/interactiveshell.py#L1077-L1084
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/joblib/joblib/compressor.py
python
BinaryZlibFile.closed
(self)
return self._mode == _MODE_CLOSED
True if this file is closed.
True if this file is closed.
[ "True", "if", "this", "file", "is", "closed", "." ]
def closed(self): """True if this file is closed.""" return self._mode == _MODE_CLOSED
[ "def", "closed", "(", "self", ")", ":", "return", "self", ".", "_mode", "==", "_MODE_CLOSED" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/joblib/joblib/compressor.py#L322-L324
chromiumembedded/cef
80caf947f3fe2210e5344713c5281d8af9bdc295
tools/yapf/yapf/yapflib/format_token.py
python
FormatToken.is_binary_op
(self)
return Subtype.BINARY_OPERATOR in self.subtypes
Token is a binary operator.
Token is a binary operator.
[ "Token", "is", "a", "binary", "operator", "." ]
def is_binary_op(self): """Token is a binary operator.""" return Subtype.BINARY_OPERATOR in self.subtypes
[ "def", "is_binary_op", "(", "self", ")", ":", "return", "Subtype", ".", "BINARY_OPERATOR", "in", "self", ".", "subtypes" ]
https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/yapf/yapf/yapflib/format_token.py#L227-L229
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pyparsing/py3/pyparsing/common.py
python
pyparsing_common.convert_to_datetime
(fmt: str = "%Y-%m-%dT%H:%M:%S.%f")
return cvt_fn
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
[ "Helper", "to", "create", "a", "parse", "action", "for", "converting", "parsed", "datetime", "string", "to", "Python", "datetime", ".", "datetime" ]
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): """Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s, l, t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
[ "def", "convert_to_datetime", "(", "fmt", ":", "str", "=", "\"%Y-%m-%dT%H:%M:%S.%f\"", ")", ":", "def", "cvt_fn", "(", "s", ",", "l", ",", "t", ")", ":", "try", ":", "return", "datetime", ".", "strptime", "(", "t", "[", "0", "]", ",", "fmt", ")", "except", "ValueError", "as", "ve", ":", "raise", "ParseException", "(", "s", ",", "l", ",", "str", "(", "ve", ")", ")", "return", "cvt_fn" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pyparsing/py3/pyparsing/common.py#L280-L304
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py
python
AbstractBufferedFile.writable
(self)
return self.mode in {"wb", "ab"} and not self.closed
Whether opened for writing
Whether opened for writing
[ "Whether", "opened", "for", "writing" ]
def writable(self): """Whether opened for writing""" return self.mode in {"wb", "ab"} and not self.closed
[ "def", "writable", "(", "self", ")", ":", "return", "self", ".", "mode", "in", "{", "\"wb\"", ",", "\"ab\"", "}", "and", "not", "self", ".", "closed" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py#L1329-L1331
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/s3transfer/bandwidth.py
python
BandwidthRateTracker.__init__
(self, alpha=0.8)
Tracks the rate of bandwidth consumption :type a: float :param a: The constant to use in calculating the exponentional moving average of the bandwidth rate. Specifically it is used in the following calculation: current_rate = alpha * new_rate + (1 - alpha) * current_rate This value of this constant should be between 0 and 1.
Tracks the rate of bandwidth consumption
[ "Tracks", "the", "rate", "of", "bandwidth", "consumption" ]
def __init__(self, alpha=0.8): """Tracks the rate of bandwidth consumption :type a: float :param a: The constant to use in calculating the exponentional moving average of the bandwidth rate. Specifically it is used in the following calculation: current_rate = alpha * new_rate + (1 - alpha) * current_rate This value of this constant should be between 0 and 1. """ self._alpha = alpha self._last_time = None self._current_rate = None
[ "def", "__init__", "(", "self", ",", "alpha", "=", "0.8", ")", ":", "self", ".", "_alpha", "=", "alpha", "self", ".", "_last_time", "=", "None", "self", ".", "_current_rate", "=", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/s3transfer/bandwidth.py#L342-L356
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/core/indexes/base.py
python
Index.sort_values
( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, )
Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
Return a sorted copy of the index.
[ "Return", "a", "sorted", "copy", "of", "the", "index", "." ]
def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Int64Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Int64Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index
[ "def", "sort_values", "(", "self", ",", "return_indexer", ":", "bool", "=", "False", ",", "ascending", ":", "bool", "=", "True", ",", "na_position", ":", "str_t", "=", "\"last\"", ",", "key", ":", "Callable", "|", "None", "=", "None", ",", ")", ":", "idx", "=", "ensure_key_mapped", "(", "self", ",", "key", ")", "# GH 35584. Sort missing values according to na_position kwarg", "# ignore na_position for MultiIndex", "if", "not", "isinstance", "(", "self", ",", "ABCMultiIndex", ")", ":", "_as", "=", "nargsort", "(", "items", "=", "idx", ",", "ascending", "=", "ascending", ",", "na_position", "=", "na_position", ",", "key", "=", "key", ")", "else", ":", "_as", "=", "idx", ".", "argsort", "(", ")", "if", "not", "ascending", ":", "_as", "=", "_as", "[", ":", ":", "-", "1", "]", "sorted_index", "=", "self", ".", "take", "(", "_as", ")", "if", "return_indexer", ":", "return", "sorted_index", ",", "_as", "else", ":", "return", "sorted_index" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/indexes/base.py#L4933-L5014
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/calendar.py
python
CalendarDateAttr.HasBackgroundColour
(*args, **kwargs)
return _calendar.CalendarDateAttr_HasBackgroundColour(*args, **kwargs)
HasBackgroundColour(self) -> bool
HasBackgroundColour(self) -> bool
[ "HasBackgroundColour", "(", "self", ")", "-", ">", "bool" ]
def HasBackgroundColour(*args, **kwargs): """HasBackgroundColour(self) -> bool""" return _calendar.CalendarDateAttr_HasBackgroundColour(*args, **kwargs)
[ "def", "HasBackgroundColour", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_calendar", ".", "CalendarDateAttr_HasBackgroundColour", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/calendar.py#L126-L128
domino-team/openwrt-cc
8b181297c34d14d3ca521cc9f31430d561dbc688
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8_inspector/third_party/jinja2/jinja2/filters.py
python
make_attrgetter
(environment, attribute)
return attrgetter
Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers.
Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers.
[ "Returns", "a", "callable", "that", "looks", "up", "the", "given", "attribute", "from", "a", "passed", "object", "with", "the", "rules", "of", "the", "environment", ".", "Dots", "are", "allowed", "to", "access", "attributes", "of", "attributes", ".", "Integer", "parts", "in", "paths", "are", "looked", "up", "as", "integers", "." ]
def make_attrgetter(environment, attribute): """Returns a callable that looks up the given attribute from a passed object with the rules of the environment. Dots are allowed to access attributes of attributes. Integer parts in paths are looked up as integers. """ if not isinstance(attribute, string_types) \ or ('.' not in attribute and not attribute.isdigit()): return lambda x: environment.getitem(x, attribute) attribute = attribute.split('.') def attrgetter(item): for part in attribute: if part.isdigit(): part = int(part) item = environment.getitem(item, part) return item return attrgetter
[ "def", "make_attrgetter", "(", "environment", ",", "attribute", ")", ":", "if", "not", "isinstance", "(", "attribute", ",", "string_types", ")", "or", "(", "'.'", "not", "in", "attribute", "and", "not", "attribute", ".", "isdigit", "(", ")", ")", ":", "return", "lambda", "x", ":", "environment", ".", "getitem", "(", "x", ",", "attribute", ")", "attribute", "=", "attribute", ".", "split", "(", "'.'", ")", "def", "attrgetter", "(", "item", ")", ":", "for", "part", "in", "attribute", ":", "if", "part", ".", "isdigit", "(", ")", ":", "part", "=", "int", "(", "part", ")", "item", "=", "environment", ".", "getitem", "(", "item", ",", "part", ")", "return", "item", "return", "attrgetter" ]
https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8_inspector/third_party/jinja2/jinja2/filters.py#L54-L70
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
caffe2/python/rnn_cell.py
python
RNNCell._prepare_output_sequence
(self, model, state_outputs)
return state_outputs[output_sequence_index]
Allows arbitrary post-processing of primary sequence output. (Note that state_outputs alternates between full-sequence and final output for each state, thus the index multiplier 2.)
Allows arbitrary post-processing of primary sequence output.
[ "Allows", "arbitrary", "post", "-", "processing", "of", "primary", "sequence", "output", "." ]
def _prepare_output_sequence(self, model, state_outputs): ''' Allows arbitrary post-processing of primary sequence output. (Note that state_outputs alternates between full-sequence and final output for each state, thus the index multiplier 2.) ''' output_sequence_index = 2 * self.get_output_state_index() return state_outputs[output_sequence_index]
[ "def", "_prepare_output_sequence", "(", "self", ",", "model", ",", "state_outputs", ")", ":", "output_sequence_index", "=", "2", "*", "self", ".", "get_output_state_index", "(", ")", "return", "state_outputs", "[", "output_sequence_index", "]" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/rnn_cell.py#L261-L269
facebook/ThreatExchange
31914a51820c73c8a0daffe62ccca29a6e3d359e
api-reference-examples/python/pytx/pytx/batch.py
python
Batch.prepare_single_request
(cls, request, name=None)
return d
Prepare a single request to be included in batch. :param request: A dictionary in the format required by Batch.submit(). :type request: dict :param name: A name to give this request. :type name: str :returns: dict
Prepare a single request to be included in batch.
[ "Prepare", "a", "single", "request", "to", "be", "included", "in", "batch", "." ]
def prepare_single_request(cls, request, name=None): """ Prepare a single request to be included in batch. :param request: A dictionary in the format required by Batch.submit(). :type request: dict :param name: A name to give this request. :type name: str :returns: dict """ d = {b.METHOD: request.get('type', request.get('method', 'GET')), b.RELATIVE_URL: Batch.get_relative(request.get('url', request.get('relative_url', '')))} body = request.get('body', None) if body: d[b.BODY] = body if name: d['name'] = name return d
[ "def", "prepare_single_request", "(", "cls", ",", "request", ",", "name", "=", "None", ")", ":", "d", "=", "{", "b", ".", "METHOD", ":", "request", ".", "get", "(", "'type'", ",", "request", ".", "get", "(", "'method'", ",", "'GET'", ")", ")", ",", "b", ".", "RELATIVE_URL", ":", "Batch", ".", "get_relative", "(", "request", ".", "get", "(", "'url'", ",", "request", ".", "get", "(", "'relative_url'", ",", "''", ")", ")", ")", "}", "body", "=", "request", ".", "get", "(", "'body'", ",", "None", ")", "if", "body", ":", "d", "[", "b", ".", "BODY", "]", "=", "body", "if", "name", ":", "d", "[", "'name'", "]", "=", "name", "return", "d" ]
https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/api-reference-examples/python/pytx/pytx/batch.py#L34-L54
protocolbuffers/protobuf
b5ab0b7a18b7336c60130f4ddb2d97c51792f896
python/mox.py
python
SameElementsAs.__init__
(self, expected_seq)
Initialize. Args: expected_seq: a sequence
Initialize.
[ "Initialize", "." ]
def __init__(self, expected_seq): """Initialize. Args: expected_seq: a sequence """ self._expected_seq = expected_seq
[ "def", "__init__", "(", "self", ",", "expected_seq", ")", ":", "self", ".", "_expected_seq", "=", "expected_seq" ]
https://github.com/protocolbuffers/protobuf/blob/b5ab0b7a18b7336c60130f4ddb2d97c51792f896/python/mox.py#L1012-L1019
NREL/EnergyPlus
fadc5973b85c70e8cc923efb69c144e808a26078
src/EnergyPlus/api/datatransfer.py
python
DataExchange.actual_time
(self, state: c_void_p)
return self.api.actualTime(state)
Gets a simple sum of the values of the time part of the date/time function. Could be used in random seeding. :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :return: Integer value of time portion of the date/time function.
Gets a simple sum of the values of the time part of the date/time function. Could be used in random seeding.
[ "Gets", "a", "simple", "sum", "of", "the", "values", "of", "the", "time", "part", "of", "the", "date", "/", "time", "function", ".", "Could", "be", "used", "in", "random", "seeding", "." ]
def actual_time(self, state: c_void_p) -> int: """ Gets a simple sum of the values of the time part of the date/time function. Could be used in random seeding. :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :return: Integer value of time portion of the date/time function. """ return self.api.actualTime(state)
[ "def", "actual_time", "(", "self", ",", "state", ":", "c_void_p", ")", "->", "int", ":", "return", "self", ".", "api", ".", "actualTime", "(", "state", ")" ]
https://github.com/NREL/EnergyPlus/blob/fadc5973b85c70e8cc923efb69c144e808a26078/src/EnergyPlus/api/datatransfer.py#L1092-L1099
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py
python
QueueBase.close
(self, cancel_pending_enqueues=False, name=None)
return gen_data_flow_ops._queue_close( self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)
Closes this queue. This operation signals that no more elements will be enqueued in the given queue. Subsequent `enqueue` and `enqueue_many` operations will fail. Subsequent `dequeue` and `dequeue_many` operations will continue to succeed if sufficient elements remain in the queue. Subsequent `dequeue` and `dequeue_many` operations that would block will fail immediately. If `cancel_pending_enqueues` is `True`, all pending requests will also be cancelled. Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to `False` (described above). name: A name for the operation (optional). Returns: The operation that closes the queue.
Closes this queue.
[ "Closes", "this", "queue", "." ]
def close(self, cancel_pending_enqueues=False, name=None): """Closes this queue. This operation signals that no more elements will be enqueued in the given queue. Subsequent `enqueue` and `enqueue_many` operations will fail. Subsequent `dequeue` and `dequeue_many` operations will continue to succeed if sufficient elements remain in the queue. Subsequent `dequeue` and `dequeue_many` operations that would block will fail immediately. If `cancel_pending_enqueues` is `True`, all pending requests will also be cancelled. Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to `False` (described above). name: A name for the operation (optional). Returns: The operation that closes the queue. """ if name is None: name = "%s_Close" % self._name return gen_data_flow_ops._queue_close( self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)
[ "def", "close", "(", "self", ",", "cancel_pending_enqueues", "=", "False", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "\"%s_Close\"", "%", "self", ".", "_name", "return", "gen_data_flow_ops", ".", "_queue_close", "(", "self", ".", "_queue_ref", ",", "cancel_pending_enqueues", "=", "cancel_pending_enqueues", ",", "name", "=", "name", ")" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py#L486-L511
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/operations/math_ops.py
python
Addcmul.__init__
(self)
Initialize Addcmul
Initialize Addcmul
[ "Initialize", "Addcmul" ]
def __init__(self): """Initialize Addcmul """ self.init_prim_io_names(inputs=['input_data', 'x1', 'x2', 'value'], outputs=['y'])
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "init_prim_io_names", "(", "inputs", "=", "[", "'input_data'", ",", "'x1'", ",", "'x2'", ",", "'value'", "]", ",", "outputs", "=", "[", "'y'", "]", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/operations/math_ops.py#L332-L334
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_windows.py
python
PrintDialogData.GetFromPage
(*args, **kwargs)
return _windows_.PrintDialogData_GetFromPage(*args, **kwargs)
GetFromPage(self) -> int
GetFromPage(self) -> int
[ "GetFromPage", "(", "self", ")", "-", ">", "int" ]
def GetFromPage(*args, **kwargs): """GetFromPage(self) -> int""" return _windows_.PrintDialogData_GetFromPage(*args, **kwargs)
[ "def", "GetFromPage", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "PrintDialogData_GetFromPage", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L5042-L5044
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/prompt-toolkit/py2/prompt_toolkit/eventloop/asyncio_base.py
python
AsyncioTimeout.reset
(self)
Reset the timeout. Starts a new timer.
Reset the timeout. Starts a new timer.
[ "Reset", "the", "timeout", ".", "Starts", "a", "new", "timer", "." ]
def reset(self): """ Reset the timeout. Starts a new timer. """ self.counter += 1 local_counter = self.counter def timer_timeout(): if self.counter == local_counter and self.running: self.callback() self.loop.call_later(self.timeout, timer_timeout)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "counter", "+=", "1", "local_counter", "=", "self", ".", "counter", "def", "timer_timeout", "(", ")", ":", "if", "self", ".", "counter", "==", "local_counter", "and", "self", ".", "running", ":", "self", ".", "callback", "(", ")", "self", ".", "loop", ".", "call_later", "(", "self", ".", "timeout", ",", "timer_timeout", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py2/prompt_toolkit/eventloop/asyncio_base.py#L29-L40
eric612/MobileNet-YOLO
69b4441cb3ec8d553fbdef788ad033e246f901bd
scripts/cpp_lint.py
python
FileInfo.IsSource
(self)
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
File has a source file extension.
File has a source file extension.
[ "File", "has", "a", "source", "file", "extension", "." ]
def IsSource(self): """File has a source file extension.""" return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
[ "def", "IsSource", "(", "self", ")", ":", "return", "self", ".", "Extension", "(", ")", "[", "1", ":", "]", "in", "(", "'c'", ",", "'cc'", ",", "'cpp'", ",", "'cxx'", ")" ]
https://github.com/eric612/MobileNet-YOLO/blob/69b4441cb3ec8d553fbdef788ad033e246f901bd/scripts/cpp_lint.py#L960-L962
OSGeo/gdal
3748fc4ba4fba727492774b2b908a2130c864a83
swig/python/osgeo/ogr.py
python
FeatureDefn.GetFieldDefn
(self, *args)
return _ogr.FeatureDefn_GetFieldDefn(self, *args)
r""" GetFieldDefn(FeatureDefn self, int i) -> FieldDefn OGRFieldDefnH OGR_FD_GetFieldDefn(OGRFeatureDefnH hDefn, int iField) Fetch field definition of the passed feature definition. This function is the same as the C++ method OGRFeatureDefn::GetFieldDefn(). Parameters: ----------- hDefn: handle to the feature definition to get the field definition from. iField: the field to fetch, between 0 and GetFieldCount()-1. a handle to an internal field definition object or NULL if invalid index. This object should not be modified or freed by the application.
r""" GetFieldDefn(FeatureDefn self, int i) -> FieldDefn OGRFieldDefnH OGR_FD_GetFieldDefn(OGRFeatureDefnH hDefn, int iField)
[ "r", "GetFieldDefn", "(", "FeatureDefn", "self", "int", "i", ")", "-", ">", "FieldDefn", "OGRFieldDefnH", "OGR_FD_GetFieldDefn", "(", "OGRFeatureDefnH", "hDefn", "int", "iField", ")" ]
def GetFieldDefn(self, *args): r""" GetFieldDefn(FeatureDefn self, int i) -> FieldDefn OGRFieldDefnH OGR_FD_GetFieldDefn(OGRFeatureDefnH hDefn, int iField) Fetch field definition of the passed feature definition. This function is the same as the C++ method OGRFeatureDefn::GetFieldDefn(). Parameters: ----------- hDefn: handle to the feature definition to get the field definition from. iField: the field to fetch, between 0 and GetFieldCount()-1. a handle to an internal field definition object or NULL if invalid index. This object should not be modified or freed by the application. """ return _ogr.FeatureDefn_GetFieldDefn(self, *args)
[ "def", "GetFieldDefn", "(", "self", ",", "*", "args", ")", ":", "return", "_ogr", ".", "FeatureDefn_GetFieldDefn", "(", "self", ",", "*", "args", ")" ]
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/ogr.py#L4531-L4554
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
native_client_sdk/src/build_tools/update_nacl_manifest.py
python
VersionFinder.GetAvailablePlatformArchivesFor
(self, version, allow_trunk_revisions)
return expected_archive_urls, missing_archives
Returns a sequence of archives that exist for a given version, on the given platforms. The second element of the returned tuple is a list of all platforms that do not have an archive for the given version. Args: version: The version to find archives for. (e.g. "18.0.1025.164") allow_trunk_revisions: If True, will search for archives using the trunk revision that matches the branch version. Returns: A tuple (archives, missing_archives). |archives| is a list of archive URLs, |missing_archives| is a list of archive names.
Returns a sequence of archives that exist for a given version, on the given platforms.
[ "Returns", "a", "sequence", "of", "archives", "that", "exist", "for", "a", "given", "version", "on", "the", "given", "platforms", "." ]
def GetAvailablePlatformArchivesFor(self, version, allow_trunk_revisions): """Returns a sequence of archives that exist for a given version, on the given platforms. The second element of the returned tuple is a list of all platforms that do not have an archive for the given version. Args: version: The version to find archives for. (e.g. "18.0.1025.164") allow_trunk_revisions: If True, will search for archives using the trunk revision that matches the branch version. Returns: A tuple (archives, missing_archives). |archives| is a list of archive URLs, |missing_archives| is a list of archive names. """ archive_urls = self._GetAvailableArchivesFor(version) platform_archives = set(GetPlatformArchiveName(p) for p in self.platforms) expected_archives = platform_archives if self.extra_archives: for extra_archive, extra_archive_min_version in self.extra_archives: if SplitVersion(version) >= SplitVersion(extra_archive_min_version): expected_archives.add(extra_archive) found_archives = set(GetCanonicalArchiveName(a) for a in archive_urls) missing_archives = expected_archives - found_archives if allow_trunk_revisions and missing_archives: # Try to find trunk versions of any missing archives. trunk_version = self.delegate.GetTrunkRevision(version) trunk_archives = self._GetAvailableArchivesFor(trunk_version) for trunk_archive_url in trunk_archives: trunk_archive = GetCanonicalArchiveName(trunk_archive_url) if trunk_archive in missing_archives: archive_urls.append(trunk_archive_url) missing_archives.discard(trunk_archive) # Only return archives that are "expected". def IsExpected(url): return GetCanonicalArchiveName(url) in expected_archives expected_archive_urls = [u for u in archive_urls if IsExpected(u)] return expected_archive_urls, missing_archives
[ "def", "GetAvailablePlatformArchivesFor", "(", "self", ",", "version", ",", "allow_trunk_revisions", ")", ":", "archive_urls", "=", "self", ".", "_GetAvailableArchivesFor", "(", "version", ")", "platform_archives", "=", "set", "(", "GetPlatformArchiveName", "(", "p", ")", "for", "p", "in", "self", ".", "platforms", ")", "expected_archives", "=", "platform_archives", "if", "self", ".", "extra_archives", ":", "for", "extra_archive", ",", "extra_archive_min_version", "in", "self", ".", "extra_archives", ":", "if", "SplitVersion", "(", "version", ")", ">=", "SplitVersion", "(", "extra_archive_min_version", ")", ":", "expected_archives", ".", "add", "(", "extra_archive", ")", "found_archives", "=", "set", "(", "GetCanonicalArchiveName", "(", "a", ")", "for", "a", "in", "archive_urls", ")", "missing_archives", "=", "expected_archives", "-", "found_archives", "if", "allow_trunk_revisions", "and", "missing_archives", ":", "# Try to find trunk versions of any missing archives.", "trunk_version", "=", "self", ".", "delegate", ".", "GetTrunkRevision", "(", "version", ")", "trunk_archives", "=", "self", ".", "_GetAvailableArchivesFor", "(", "trunk_version", ")", "for", "trunk_archive_url", "in", "trunk_archives", ":", "trunk_archive", "=", "GetCanonicalArchiveName", "(", "trunk_archive_url", ")", "if", "trunk_archive", "in", "missing_archives", ":", "archive_urls", ".", "append", "(", "trunk_archive_url", ")", "missing_archives", ".", "discard", "(", "trunk_archive", ")", "# Only return archives that are \"expected\".", "def", "IsExpected", "(", "url", ")", ":", "return", "GetCanonicalArchiveName", "(", "url", ")", "in", "expected_archives", "expected_archive_urls", "=", "[", "u", "for", "u", "in", "archive_urls", "if", "IsExpected", "(", "u", ")", "]", "return", "expected_archive_urls", ",", "missing_archives" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/native_client_sdk/src/build_tools/update_nacl_manifest.py#L385-L424
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/factorization/python/ops/clustering_ops.py
python
KMeans._mini_batch_training_op
(self, inputs, cluster_idx_list, cluster_centers, total_counts)
return control_flow_ops.group(*update_ops)
Creates an op for training for mini batch case. Args: inputs: list of input Tensors. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. total_counts: Tensor Ref of cluster counts. Returns: An op for doing an update of mini-batch k-means.
Creates an op for training for mini batch case.
[ "Creates", "an", "op", "for", "training", "for", "mini", "batch", "case", "." ]
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers, total_counts): """Creates an op for training for mini batch case. Args: inputs: list of input Tensors. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. total_counts: Tensor Ref of cluster counts. Returns: An op for doing an update of mini-batch k-means. """ update_ops = [] for inp, cluster_idx in zip(inputs, cluster_idx_list): with ops.colocate_with(inp, ignore_existing=True): assert total_counts is not None cluster_idx = array_ops.reshape(cluster_idx, [-1]) # Dedupe the unique ids of cluster_centers being updated so that updates # can be locally aggregated. unique_ids, unique_idx = array_ops.unique(cluster_idx) num_unique_cluster_idx = array_ops.size(unique_ids) # Fetch the old values of counts and cluster_centers. with ops.colocate_with(total_counts, ignore_existing=True): old_counts = array_ops.gather(total_counts, unique_ids) # TODO(agarwal): This colocation seems to run into problems. Fix it. with ops.colocate_with(cluster_centers, ignore_existing=True): old_cluster_centers = array_ops.gather(cluster_centers, unique_ids) # Locally aggregate the increment to counts. count_updates = math_ops.unsorted_segment_sum( array_ops.ones_like(unique_idx, dtype=total_counts.dtype), unique_idx, num_unique_cluster_idx) # Locally compute the sum of inputs mapped to each id. # For a cluster with old cluster value x, old count n, and with data # d_1,...d_k newly assigned to it, we recompute the new value as # x += (sum_i(d_i) - k * x) / (n + k). # Compute sum_i(d_i), see comment above. cluster_center_updates = math_ops.unsorted_segment_sum( inp, unique_idx, num_unique_cluster_idx) # Shape to enable broadcasting count_updates and learning_rate to inp. # It extends the shape with 1's to match the rank of inp. broadcast_shape = array_ops.concat([ array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones( array_ops.reshape(array_ops.rank(inp) - 1, [1]), dtype=dtypes.int32) ], 0) # Subtract k * x, see comment above. cluster_center_updates -= math_ops.cast( array_ops.reshape(count_updates, broadcast_shape), inp.dtype) * old_cluster_centers learning_rate = math_ops.reciprocal( math_ops.cast(old_counts + count_updates, inp.dtype)) learning_rate = array_ops.reshape(learning_rate, broadcast_shape) # scale by 1 / (n + k), see comment above. cluster_center_updates *= learning_rate # Apply the updates. update_counts = state_ops.scatter_add(total_counts, unique_ids, count_updates) update_cluster_centers = state_ops.scatter_add( cluster_centers, unique_ids, cluster_center_updates) update_ops.extend([update_counts, update_cluster_centers]) return control_flow_ops.group(*update_ops)
[ "def", "_mini_batch_training_op", "(", "self", ",", "inputs", ",", "cluster_idx_list", ",", "cluster_centers", ",", "total_counts", ")", ":", "update_ops", "=", "[", "]", "for", "inp", ",", "cluster_idx", "in", "zip", "(", "inputs", ",", "cluster_idx_list", ")", ":", "with", "ops", ".", "colocate_with", "(", "inp", ",", "ignore_existing", "=", "True", ")", ":", "assert", "total_counts", "is", "not", "None", "cluster_idx", "=", "array_ops", ".", "reshape", "(", "cluster_idx", ",", "[", "-", "1", "]", ")", "# Dedupe the unique ids of cluster_centers being updated so that updates", "# can be locally aggregated.", "unique_ids", ",", "unique_idx", "=", "array_ops", ".", "unique", "(", "cluster_idx", ")", "num_unique_cluster_idx", "=", "array_ops", ".", "size", "(", "unique_ids", ")", "# Fetch the old values of counts and cluster_centers.", "with", "ops", ".", "colocate_with", "(", "total_counts", ",", "ignore_existing", "=", "True", ")", ":", "old_counts", "=", "array_ops", ".", "gather", "(", "total_counts", ",", "unique_ids", ")", "# TODO(agarwal): This colocation seems to run into problems. Fix it.", "with", "ops", ".", "colocate_with", "(", "cluster_centers", ",", "ignore_existing", "=", "True", ")", ":", "old_cluster_centers", "=", "array_ops", ".", "gather", "(", "cluster_centers", ",", "unique_ids", ")", "# Locally aggregate the increment to counts.", "count_updates", "=", "math_ops", ".", "unsorted_segment_sum", "(", "array_ops", ".", "ones_like", "(", "unique_idx", ",", "dtype", "=", "total_counts", ".", "dtype", ")", ",", "unique_idx", ",", "num_unique_cluster_idx", ")", "# Locally compute the sum of inputs mapped to each id.", "# For a cluster with old cluster value x, old count n, and with data", "# d_1,...d_k newly assigned to it, we recompute the new value as", "# x += (sum_i(d_i) - k * x) / (n + k).", "# Compute sum_i(d_i), see comment above.", "cluster_center_updates", "=", "math_ops", ".", "unsorted_segment_sum", "(", "inp", ",", "unique_idx", ",", "num_unique_cluster_idx", ")", "# Shape to enable broadcasting count_updates and learning_rate to inp.", "# It extends the shape with 1's to match the rank of inp.", "broadcast_shape", "=", "array_ops", ".", "concat", "(", "[", "array_ops", ".", "reshape", "(", "num_unique_cluster_idx", ",", "[", "1", "]", ")", ",", "array_ops", ".", "ones", "(", "array_ops", ".", "reshape", "(", "array_ops", ".", "rank", "(", "inp", ")", "-", "1", ",", "[", "1", "]", ")", ",", "dtype", "=", "dtypes", ".", "int32", ")", "]", ",", "0", ")", "# Subtract k * x, see comment above.", "cluster_center_updates", "-=", "math_ops", ".", "cast", "(", "array_ops", ".", "reshape", "(", "count_updates", ",", "broadcast_shape", ")", ",", "inp", ".", "dtype", ")", "*", "old_cluster_centers", "learning_rate", "=", "math_ops", ".", "reciprocal", "(", "math_ops", ".", "cast", "(", "old_counts", "+", "count_updates", ",", "inp", ".", "dtype", ")", ")", "learning_rate", "=", "array_ops", ".", "reshape", "(", "learning_rate", ",", "broadcast_shape", ")", "# scale by 1 / (n + k), see comment above.", "cluster_center_updates", "*=", "learning_rate", "# Apply the updates.", "update_counts", "=", "state_ops", ".", "scatter_add", "(", "total_counts", ",", "unique_ids", ",", "count_updates", ")", "update_cluster_centers", "=", "state_ops", ".", "scatter_add", "(", "cluster_centers", ",", "unique_ids", ",", "cluster_center_updates", ")", "update_ops", ".", "extend", "(", "[", "update_counts", ",", "update_cluster_centers", "]", ")", "return", "control_flow_ops", ".", "group", "(", "*", "update_ops", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/factorization/python/ops/clustering_ops.py#L438-L502
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/parallel/_utils.py
python
_check_same_layout
(tensor_layout1, tensor_layout2)
return tensor_layout1[0] == tensor_layout2[0] and tensor_layout1[1] == tensor_layout2[1]
check if two tensor layouts are same
check if two tensor layouts are same
[ "check", "if", "two", "tensor", "layouts", "are", "same" ]
def _check_same_layout(tensor_layout1, tensor_layout2): """check if two tensor layouts are same""" return tensor_layout1[0] == tensor_layout2[0] and tensor_layout1[1] == tensor_layout2[1]
[ "def", "_check_same_layout", "(", "tensor_layout1", ",", "tensor_layout2", ")", ":", "return", "tensor_layout1", "[", "0", "]", "==", "tensor_layout2", "[", "0", "]", "and", "tensor_layout1", "[", "1", "]", "==", "tensor_layout2", "[", "1", "]" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/parallel/_utils.py#L299-L301
hpi-xnor/BMXNet-v2
af2b1859eafc5c721b1397cef02f946aaf2ce20d
example/svrg_module/api_usage_example/example_api_train.py
python
create_network
(batch_size, update_freq)
return di, mod
Create a linear regression network for performing SVRG optimization. Parameters ---------- batch_size: int Size of data split update_freq: int Update Frequency for calculating full gradients Returns ---------- di: mx.io.NDArrayIter Data iterator update_freq: SVRGModule An instance of SVRGModule for performing SVRG optimization
Create a linear regression network for performing SVRG optimization. Parameters ---------- batch_size: int Size of data split update_freq: int Update Frequency for calculating full gradients
[ "Create", "a", "linear", "regression", "network", "for", "performing", "SVRG", "optimization", ".", "Parameters", "----------", "batch_size", ":", "int", "Size", "of", "data", "split", "update_freq", ":", "int", "Update", "Frequency", "for", "calculating", "full", "gradients" ]
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. Parameters ---------- batch_size: int Size of data split update_freq: int Update Frequency for calculating full gradients Returns ---------- di: mx.io.NDArrayIter Data iterator update_freq: SVRGModule An instance of SVRGModule for performing SVRG optimization """ import logging head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) train_data = np.random.randint(1, 5, [1000, 2]) weights = np.array([1.0, 2.0]) train_label = train_data.dot(weights) di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label') X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging ) return di, mod
[ "def", "create_network", "(", "batch_size", ",", "update_freq", ")", ":", "import", "logging", "head", "=", "'%(asctime)-15s %(message)s'", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "head", ")", "train_data", "=", "np", ".", "random", ".", "randint", "(", "1", ",", "5", ",", "[", "1000", ",", "2", "]", ")", "weights", "=", "np", ".", "array", "(", "[", "1.0", ",", "2.0", "]", ")", "train_label", "=", "train_data", ".", "dot", "(", "weights", ")", "di", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "train_data", ",", "train_label", ",", "batch_size", "=", "batch_size", ",", "shuffle", "=", "True", ",", "label_name", "=", "'lin_reg_label'", ")", "X", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "Y", "=", "mx", ".", "symbol", ".", "Variable", "(", "'lin_reg_label'", ")", "fully_connected_layer", "=", "mx", ".", "sym", ".", "FullyConnected", "(", "data", "=", "X", ",", "name", "=", "'fc1'", ",", "num_hidden", "=", "1", ")", "lro", "=", "mx", ".", "sym", ".", "LinearRegressionOutput", "(", "data", "=", "fully_connected_layer", ",", "label", "=", "Y", ",", "name", "=", "\"lro\"", ")", "mod", "=", "SVRGModule", "(", "symbol", "=", "lro", ",", "data_names", "=", "[", "'data'", "]", ",", "label_names", "=", "[", "'lin_reg_label'", "]", ",", "update_freq", "=", "update_freq", ",", "logger", "=", "logging", ")", "return", "di", ",", "mod" ]
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/svrg_module/api_usage_example/example_api_train.py#L73-L109
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/profiler/parser/integrator.py
python
AscendTimelineGenerator._load_timeline_data
(self, all_reduce_names=None)
return timeline_list
Load timeline data from file.
Load timeline data from file.
[ "Load", "timeline", "data", "from", "file", "." ]
def _load_timeline_data(self, all_reduce_names=None): """Load timeline data from file.""" all_reduce_names = all_reduce_names or [] file_path = os.path.join( self._profiling_dir, self._output_timeline_data_file_path.format(self._rank_id) ) file_path = validate_and_normalize_path(file_path) if not os.path.exists(file_path): logger.critical("Failed to find parsed timeline file.") raise ProfilerFileNotFoundException('parsed timeline file') timeline_list = [] try: with open(file_path, 'r') as f_obj: for line in f_obj: line_list = line.strip('\n').split(',') if line_list[0] == 'op_name' or line_list[0] in all_reduce_names: continue line_list[self._tid_idx] = f"Stream #{line_list[self._tid_idx]}" timeline_list.append(line_list) except (IOError, OSError) as err: logger.critical('Error occurred when read timeline intermediate file: %s', err) raise ProfilerIOException() return timeline_list
[ "def", "_load_timeline_data", "(", "self", ",", "all_reduce_names", "=", "None", ")", ":", "all_reduce_names", "=", "all_reduce_names", "or", "[", "]", "file_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_profiling_dir", ",", "self", ".", "_output_timeline_data_file_path", ".", "format", "(", "self", ".", "_rank_id", ")", ")", "file_path", "=", "validate_and_normalize_path", "(", "file_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "logger", ".", "critical", "(", "\"Failed to find parsed timeline file.\"", ")", "raise", "ProfilerFileNotFoundException", "(", "'parsed timeline file'", ")", "timeline_list", "=", "[", "]", "try", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "f_obj", ":", "for", "line", "in", "f_obj", ":", "line_list", "=", "line", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "','", ")", "if", "line_list", "[", "0", "]", "==", "'op_name'", "or", "line_list", "[", "0", "]", "in", "all_reduce_names", ":", "continue", "line_list", "[", "self", ".", "_tid_idx", "]", "=", "f\"Stream #{line_list[self._tid_idx]}\"", "timeline_list", ".", "append", "(", "line_list", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "err", ":", "logger", ".", "critical", "(", "'Error occurred when read timeline intermediate file: %s'", ",", "err", ")", "raise", "ProfilerIOException", "(", ")", "return", "timeline_list" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/profiler/parser/integrator.py#L1069-L1094
cms-sw/cmssw
fd9de012d503d3405420bcbeec0ec879baa57cf2
RecoVertex/BeamSpotProducer/scripts/getBeamSpotDB.py
python
nonzero
(self)
return False
True if options were given
True if options were given
[ "True", "if", "options", "were", "given" ]
def nonzero(self): # will become the nonzero method of optparse.Values "True if options were given" for v in self.__dict__.values(): if v is not None: return True return False
[ "def", "nonzero", "(", "self", ")", ":", "# will become the nonzero method of optparse.Values", "for", "v", "in", "self", ".", "__dict__", ".", "values", "(", ")", ":", "if", "v", "is", "not", "None", ":", "return", "True", "return", "False" ]
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/RecoVertex/BeamSpotProducer/scripts/getBeamSpotDB.py#L42-L46
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/bandwidth.py
python
BandwidthLimiter.__init__
(self, leaky_bucket, time_utils=None)
Limits bandwidth for shared S3 transfers :type leaky_bucket: LeakyBucket :param leaky_bucket: The leaky bucket to use limit bandwidth :type time_utils: TimeUtils :param time_utils: Time utility to use for interacting with time.
Limits bandwidth for shared S3 transfers
[ "Limits", "bandwidth", "for", "shared", "S3", "transfers" ]
def __init__(self, leaky_bucket, time_utils=None): """Limits bandwidth for shared S3 transfers :type leaky_bucket: LeakyBucket :param leaky_bucket: The leaky bucket to use limit bandwidth :type time_utils: TimeUtils :param time_utils: Time utility to use for interacting with time. """ self._leaky_bucket = leaky_bucket self._time_utils = time_utils if time_utils is None: self._time_utils = TimeUtils()
[ "def", "__init__", "(", "self", ",", "leaky_bucket", ",", "time_utils", "=", "None", ")", ":", "self", ".", "_leaky_bucket", "=", "leaky_bucket", "self", ".", "_time_utils", "=", "time_utils", "if", "time_utils", "is", "None", ":", "self", ".", "_time_utils", "=", "TimeUtils", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/bandwidth.py#L64-L76
tensorflow/minigo
6d89c202cdceaf449aefc3149ab2110d44f1a6a4
oneoffs/sharp_positions.py
python
grouper
(n, iterable)
return (iterable[i:i + n] for i in range(0, len(iterable), n))
Itertools recipe >>> list(grouper(3, iter('ABCDEFG'))) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
Itertools recipe >>> list(grouper(3, iter('ABCDEFG'))) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
[ "Itertools", "recipe", ">>>", "list", "(", "grouper", "(", "3", "iter", "(", "ABCDEFG", ")))", "[[", "A", "B", "C", "]", "[", "D", "E", "F", "]", "[", "G", "]]" ]
def grouper(n, iterable): """Itertools recipe >>> list(grouper(3, iter('ABCDEFG'))) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] """ return (iterable[i:i + n] for i in range(0, len(iterable), n))
[ "def", "grouper", "(", "n", ",", "iterable", ")", ":", "return", "(", "iterable", "[", "i", ":", "i", "+", "n", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "iterable", ")", ",", "n", ")", ")" ]
https://github.com/tensorflow/minigo/blob/6d89c202cdceaf449aefc3149ab2110d44f1a6a4/oneoffs/sharp_positions.py#L91-L96
scribusproject/scribus
41ec7c775a060912cf251682a8b1437f753f80f4
codegen/cheetah/Cheetah/CacheRegion.py
python
CacheRegion.clear
(self)
drop all the caches stored in this cache region
drop all the caches stored in this cache region
[ "drop", "all", "the", "caches", "stored", "in", "this", "cache", "region" ]
def clear(self): " drop all the caches stored in this cache region " for cacheItemId in self._cacheItems.keys(): cacheItem = self._cacheItems[cacheItemId] cacheItem.clear() del self._cacheItems[cacheItemId]
[ "def", "clear", "(", "self", ")", ":", "for", "cacheItemId", "in", "self", ".", "_cacheItems", ".", "keys", "(", ")", ":", "cacheItem", "=", "self", ".", "_cacheItems", "[", "cacheItemId", "]", "cacheItem", ".", "clear", "(", ")", "del", "self", ".", "_cacheItems", "[", "cacheItemId", "]" ]
https://github.com/scribusproject/scribus/blob/41ec7c775a060912cf251682a8b1437f753f80f4/codegen/cheetah/Cheetah/CacheRegion.py#L114-L119
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/training/session_run_hook.py
python
SessionRunContext.stop_requested
(self)
return self._stop_requested
Returns whether a stop is requested or not. If true, `MonitoredSession` stops iterations. Returns: A `bool`
Returns whether a stop is requested or not.
[ "Returns", "whether", "a", "stop", "is", "requested", "or", "not", "." ]
def stop_requested(self): """Returns whether a stop is requested or not. If true, `MonitoredSession` stops iterations. Returns: A `bool` """ return self._stop_requested
[ "def", "stop_requested", "(", "self", ")", ":", "return", "self", ".", "_stop_requested" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/training/session_run_hook.py#L249-L256
rsms/immutable-cpp
a4a32022d895dd0d3c03547a2b2a2b03face01eb
misc/ninja_syntax.py
python
Writer._count_dollars_before_index
(self, s, i)
return dollar_count
Returns the number of '$' characters right in front of s[i].
Returns the number of '$' characters right in front of s[i].
[ "Returns", "the", "number", "of", "$", "characters", "right", "in", "front", "of", "s", "[", "i", "]", "." ]
def _count_dollars_before_index(self, s, i): """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 while dollar_index > 0 and s[dollar_index] == '$': dollar_count += 1 dollar_index -= 1 return dollar_count
[ "def", "_count_dollars_before_index", "(", "self", ",", "s", ",", "i", ")", ":", "dollar_count", "=", "0", "dollar_index", "=", "i", "-", "1", "while", "dollar_index", ">", "0", "and", "s", "[", "dollar_index", "]", "==", "'$'", ":", "dollar_count", "+=", "1", "dollar_index", "-=", "1", "return", "dollar_count" ]
https://github.com/rsms/immutable-cpp/blob/a4a32022d895dd0d3c03547a2b2a2b03face01eb/misc/ninja_syntax.py#L100-L107
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Editor/Python/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py
python
ParseResults.append
( self, item )
Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
Add single element to end of ParseResults list of elements.
[ "Add", "single", "element", "to", "end", "of", "ParseResults", "list", "of", "elements", "." ]
def append( self, item ): """ Add single element to end of ParseResults list of elements. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to compute the sum of the parsed integers, and add it to the end def append_sum(tokens): tokens.append(sum(map(int, tokens))) print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] """ self.__toklist.append(item)
[ "def", "append", "(", "self", ",", "item", ")", ":", "self", ".", "__toklist", ".", "append", "(", "item", ")" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py#L605-L617
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/utils/misc.py
python
get_installed_distributions
( local_only=True, # type: bool skip=stdlib_pkgs, # type: Container[str] include_editables=True, # type: bool editables_only=False, # type: bool user_only=False, # type: bool paths=None # type: Optional[List[str]] )
return [d for d in working_set if local_test(d) and d.key not in skip and editable_test(d) and editables_only_test(d) and user_test(d) ]
Return a list of installed Distribution objects. If ``local_only`` is True (default), only return installations local to the current virtualenv, if in a virtualenv. ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs If ``include_editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. If ``user_only`` is True , only report installations in the user site directory. If ``paths`` is set, only report the distributions present at the specified list of locations.
Return a list of installed Distribution objects.
[ "Return", "a", "list", "of", "installed", "Distribution", "objects", "." ]
def get_installed_distributions( local_only=True, # type: bool skip=stdlib_pkgs, # type: Container[str] include_editables=True, # type: bool editables_only=False, # type: bool user_only=False, # type: bool paths=None # type: Optional[List[str]] ): # type: (...) -> List[Distribution] """ Return a list of installed Distribution objects. If ``local_only`` is True (default), only return installations local to the current virtualenv, if in a virtualenv. ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs If ``include_editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. If ``user_only`` is True , only report installations in the user site directory. If ``paths`` is set, only report the distributions present at the specified list of locations. """ if paths: working_set = pkg_resources.WorkingSet(paths) else: working_set = pkg_resources.working_set if local_only: local_test = dist_is_local else: def local_test(d): return True if include_editables: def editable_test(d): return True else: def editable_test(d): return not dist_is_editable(d) if editables_only: def editables_only_test(d): return dist_is_editable(d) else: def editables_only_test(d): return True if user_only: user_test = dist_in_usersite else: def user_test(d): return True return [d for d in working_set if local_test(d) and d.key not in skip and editable_test(d) and editables_only_test(d) and user_test(d) ]
[ "def", "get_installed_distributions", "(", "local_only", "=", "True", ",", "# type: bool", "skip", "=", "stdlib_pkgs", ",", "# type: Container[str]", "include_editables", "=", "True", ",", "# type: bool", "editables_only", "=", "False", ",", "# type: bool", "user_only", "=", "False", ",", "# type: bool", "paths", "=", "None", "# type: Optional[List[str]]", ")", ":", "# type: (...) -> List[Distribution]", "if", "paths", ":", "working_set", "=", "pkg_resources", ".", "WorkingSet", "(", "paths", ")", "else", ":", "working_set", "=", "pkg_resources", ".", "working_set", "if", "local_only", ":", "local_test", "=", "dist_is_local", "else", ":", "def", "local_test", "(", "d", ")", ":", "return", "True", "if", "include_editables", ":", "def", "editable_test", "(", "d", ")", ":", "return", "True", "else", ":", "def", "editable_test", "(", "d", ")", ":", "return", "not", "dist_is_editable", "(", "d", ")", "if", "editables_only", ":", "def", "editables_only_test", "(", "d", ")", ":", "return", "dist_is_editable", "(", "d", ")", "else", ":", "def", "editables_only_test", "(", "d", ")", ":", "return", "True", "if", "user_only", ":", "user_test", "=", "dist_in_usersite", "else", ":", "def", "user_test", "(", "d", ")", ":", "return", "True", "return", "[", "d", "for", "d", "in", "working_set", "if", "local_test", "(", "d", ")", "and", "d", ".", "key", "not", "in", "skip", "and", "editable_test", "(", "d", ")", "and", "editables_only_test", "(", "d", ")", "and", "user_test", "(", "d", ")", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/utils/misc.py#L413-L478
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/SANS/isis_reduction_steps.py
python
ConvertToQISIS.set_output_type
(self, descript)
Requests the given output from the Q conversion, either 1D or 2D. For the 1D calculation it asks the reducer to keep a workspace for error estimates @param descript: 1D or 2D
Requests the given output from the Q conversion, either 1D or 2D. For the 1D calculation it asks the reducer to keep a workspace for error estimates
[ "Requests", "the", "given", "output", "from", "the", "Q", "conversion", "either", "1D", "or", "2D", ".", "For", "the", "1D", "calculation", "it", "asks", "the", "reducer", "to", "keep", "a", "workspace", "for", "error", "estimates" ]
def set_output_type(self, descript): """ Requests the given output from the Q conversion, either 1D or 2D. For the 1D calculation it asks the reducer to keep a workspace for error estimates @param descript: 1D or 2D """ self._Q_alg = self._OUTPUT_TYPES[descript] self._output_type = descript
[ "def", "set_output_type", "(", "self", ",", "descript", ")", ":", "self", ".", "_Q_alg", "=", "self", ".", "_OUTPUT_TYPES", "[", "descript", "]", "self", ".", "_output_type", "=", "descript" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/isis_reduction_steps.py#L2665-L2673
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/propgrid.py
python
PGTypeOperationFailed
(*args, **kwargs)
return _propgrid.PGTypeOperationFailed(*args, **kwargs)
PGTypeOperationFailed(PGProperty p, String typestr, String op)
PGTypeOperationFailed(PGProperty p, String typestr, String op)
[ "PGTypeOperationFailed", "(", "PGProperty", "p", "String", "typestr", "String", "op", ")" ]
def PGTypeOperationFailed(*args, **kwargs): """PGTypeOperationFailed(PGProperty p, String typestr, String op)""" return _propgrid.PGTypeOperationFailed(*args, **kwargs)
[ "def", "PGTypeOperationFailed", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PGTypeOperationFailed", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L1081-L1083
apache/singa
93fd9da72694e68bfe3fb29d0183a65263d238a1
python/singa/autograd.py
python
_xor
(a, b)
return Xor()(a, b)[0]
Return `np.logical_xor(a,b)`, where a and b are Tensor.
Return `np.logical_xor(a,b)`, where a and b are Tensor.
[ "Return", "np", ".", "logical_xor", "(", "a", "b", ")", "where", "a", "and", "b", "are", "Tensor", "." ]
def _xor(a, b): """ Return `np.logical_xor(a,b)`, where a and b are Tensor. """ return Xor()(a, b)[0]
[ "def", "_xor", "(", "a", ",", "b", ")", ":", "return", "Xor", "(", ")", "(", "a", ",", "b", ")", "[", "0", "]" ]
https://github.com/apache/singa/blob/93fd9da72694e68bfe3fb29d0183a65263d238a1/python/singa/autograd.py#L3610-L3614
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/resmokelib/utils/history.py
python
HistoryDict.write_equals
(self, other_dict)
return True
Compare two dicts for write equality.
Compare two dicts for write equality.
[ "Compare", "two", "dicts", "for", "write", "equality", "." ]
def write_equals(self, other_dict): """Compare two dicts for write equality.""" if not len(other_dict._value_store) == len(self._value_store): # pylint: disable=protected-access return False for key in self._value_store: our_writes = [ access.value_written for access in self._history_store[key] if access.type == AccessType.WRITE ] their_writes = [ access.value_written for access in other_dict._history_store[key] # pylint: disable=protected-access if access.type == AccessType.WRITE ] if not our_writes == their_writes: return False return True
[ "def", "write_equals", "(", "self", ",", "other_dict", ")", ":", "if", "not", "len", "(", "other_dict", ".", "_value_store", ")", "==", "len", "(", "self", ".", "_value_store", ")", ":", "# pylint: disable=protected-access", "return", "False", "for", "key", "in", "self", ".", "_value_store", ":", "our_writes", "=", "[", "access", ".", "value_written", "for", "access", "in", "self", ".", "_history_store", "[", "key", "]", "if", "access", ".", "type", "==", "AccessType", ".", "WRITE", "]", "their_writes", "=", "[", "access", ".", "value_written", "for", "access", "in", "other_dict", ".", "_history_store", "[", "key", "]", "# pylint: disable=protected-access", "if", "access", ".", "type", "==", "AccessType", ".", "WRITE", "]", "if", "not", "our_writes", "==", "their_writes", ":", "return", "False", "return", "True" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/utils/history.py#L210-L226
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/summary_op_util.py
python
summary_scope
(name, family=None, default_name=None, values=None)
Enters a scope used for the summary and yields both the name and tag. To ensure that the summary tag name is always unique, we create a name scope based on `name` and use the full scope name in the tag. If `family` is set, then the tag name will be '<family>/<scope_name>', where `scope_name` is `<outer_scope>/<family>/<name>`. This ensures that `family` is always the prefix of the tag (and unmodified), while ensuring the scope respects the outer scope from this summary was created. Args: name: A name for the generated summary node. family: Optional; if provided, used as the prefix of the summary tag name. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as `values` parameter to name_scope. Yields: A tuple `(tag, scope)`, both of which are unique and should be used for the tag and the scope for the summary to output.
Enters a scope used for the summary and yields both the name and tag.
[ "Enters", "a", "scope", "used", "for", "the", "summary", "and", "yields", "both", "the", "name", "and", "tag", "." ]
def summary_scope(name, family=None, default_name=None, values=None): """Enters a scope used for the summary and yields both the name and tag. To ensure that the summary tag name is always unique, we create a name scope based on `name` and use the full scope name in the tag. If `family` is set, then the tag name will be '<family>/<scope_name>', where `scope_name` is `<outer_scope>/<family>/<name>`. This ensures that `family` is always the prefix of the tag (and unmodified), while ensuring the scope respects the outer scope from this summary was created. Args: name: A name for the generated summary node. family: Optional; if provided, used as the prefix of the summary tag name. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as `values` parameter to name_scope. Yields: A tuple `(tag, scope)`, both of which are unique and should be used for the tag and the scope for the summary to output. """ name = clean_tag(name) family = clean_tag(family) # Use family name in the scope to ensure uniqueness of scope/tag. scope_base_name = name if family is None else '{}/{}'.format(family, name) with ops.name_scope(scope_base_name, default_name, values=values) as scope: if family is None: tag = scope.rstrip('/') else: # Prefix our scope with family again so it displays in the right tab. tag = '{}/{}'.format(family, scope.rstrip('/')) # Note: tag is not 100% unique if the user explicitly enters a scope with # the same name as family, then later enter it again before summaries. # This is very contrived though, and we opt here to let it be a runtime # exception if tags do indeed collide. yield (tag, scope)
[ "def", "summary_scope", "(", "name", ",", "family", "=", "None", ",", "default_name", "=", "None", ",", "values", "=", "None", ")", ":", "name", "=", "clean_tag", "(", "name", ")", "family", "=", "clean_tag", "(", "family", ")", "# Use family name in the scope to ensure uniqueness of scope/tag.", "scope_base_name", "=", "name", "if", "family", "is", "None", "else", "'{}/{}'", ".", "format", "(", "family", ",", "name", ")", "with", "ops", ".", "name_scope", "(", "scope_base_name", ",", "default_name", ",", "values", "=", "values", ")", "as", "scope", ":", "if", "family", "is", "None", ":", "tag", "=", "scope", ".", "rstrip", "(", "'/'", ")", "else", ":", "# Prefix our scope with family again so it displays in the right tab.", "tag", "=", "'{}/{}'", ".", "format", "(", "family", ",", "scope", ".", "rstrip", "(", "'/'", ")", ")", "# Note: tag is not 100% unique if the user explicitly enters a scope with", "# the same name as family, then later enter it again before summaries.", "# This is very contrived though, and we opt here to let it be a runtime", "# exception if tags do indeed collide.", "yield", "(", "tag", ",", "scope", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/summary_op_util.py#L72-L107
apiaryio/drafter
4634ebd07f6c6f257cc656598ccd535492fdfb55
tools/gyp/pylib/gyp/ninja_syntax.py
python
Writer._count_dollars_before_index
(self, s, i)
return dollar_count
Returns the number of '$' characters right in front of s[i].
Returns the number of '$' characters right in front of s[i].
[ "Returns", "the", "number", "of", "$", "characters", "right", "in", "front", "of", "s", "[", "i", "]", "." ]
def _count_dollars_before_index(self, s, i): """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 while dollar_index > 0 and s[dollar_index] == '$': dollar_count += 1 dollar_index -= 1 return dollar_count
[ "def", "_count_dollars_before_index", "(", "self", ",", "s", ",", "i", ")", ":", "dollar_count", "=", "0", "dollar_index", "=", "i", "-", "1", "while", "dollar_index", ">", "0", "and", "s", "[", "dollar_index", "]", "==", "'$'", ":", "dollar_count", "+=", "1", "dollar_index", "-=", "1", "return", "dollar_count" ]
https://github.com/apiaryio/drafter/blob/4634ebd07f6c6f257cc656598ccd535492fdfb55/tools/gyp/pylib/gyp/ninja_syntax.py#L102-L109
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/contributed/sumopy/agilepy/lib_wx/toolbox.py
python
BaseTool.force_deactivation
(self)
Explicit call to deactivate this tool in the tools panel.
Explicit call to deactivate this tool in the tools panel.
[ "Explicit", "call", "to", "deactivate", "this", "tool", "in", "the", "tools", "panel", "." ]
def force_deactivation(self): """ Explicit call to deactivate this tool in the tools panel. """ self.parent.unselect_tool()
[ "def", "force_deactivation", "(", "self", ")", ":", "self", ".", "parent", ".", "unselect_tool", "(", ")" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/agilepy/lib_wx/toolbox.py#L170-L174
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqt/mantidqt/widgets/fitpropertybrowser/mouse_state_machine.py
python
MoveMarkersState.motion_notify_callback
(self, event)
Override base class method
Override base class method
[ "Override", "base", "class", "method" ]
def motion_notify_callback(self, event): """Override base class method""" self.tool.move_markers(event)
[ "def", "motion_notify_callback", "(", "self", ",", "event", ")", ":", "self", ".", "tool", ".", "move_markers", "(", "event", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/fitpropertybrowser/mouse_state_machine.py#L76-L78
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py3/IPython/utils/path.py
python
compress_user
(path)
return path
Reverse of :func:`os.path.expanduser`
Reverse of :func:`os.path.expanduser`
[ "Reverse", "of", ":", "func", ":", "os", ".", "path", ".", "expanduser" ]
def compress_user(path): """Reverse of :func:`os.path.expanduser` """ home = os.path.expanduser('~') if path.startswith(home): path = "~" + path[len(home):] return path
[ "def", "compress_user", "(", "path", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "if", "path", ".", "startswith", "(", "home", ")", ":", "path", "=", "\"~\"", "+", "path", "[", "len", "(", "home", ")", ":", "]", "return", "path" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/utils/path.py#L84-L90
protocolbuffers/protobuf
b5ab0b7a18b7336c60130f4ddb2d97c51792f896
python/google/protobuf/text_format.py
python
Merge
(text, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False)
return MergeLines( text.split(b'\n' if isinstance(text, bytes) else u'\n'), message, allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool, allow_unknown_field=allow_unknown_field)
Parses a text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. This means any non-repeated, top-level fields specified in text replace those in the message. Args: text (str): Message text representation. message (Message): A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. allow_unknown_field: if True, skip over unknown field and keep parsing. Avoid to use this option if possible. It may hide some errors (e.g. spelling error on field name) Returns: Message: The same message passed as argument. Raises: ParseError: On text parsing problems.
Parses a text representation of a protocol message into a message.
[ "Parses", "a", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
def Merge(text, message, allow_unknown_extension=False, allow_field_number=False, descriptor_pool=None, allow_unknown_field=False): """Parses a text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. This means any non-repeated, top-level fields specified in text replace those in the message. Args: text (str): Message text representation. message (Message): A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types. allow_unknown_field: if True, skip over unknown field and keep parsing. Avoid to use this option if possible. It may hide some errors (e.g. spelling error on field name) Returns: Message: The same message passed as argument. Raises: ParseError: On text parsing problems. """ return MergeLines( text.split(b'\n' if isinstance(text, bytes) else u'\n'), message, allow_unknown_extension, allow_field_number, descriptor_pool=descriptor_pool, allow_unknown_field=allow_unknown_field)
[ "def", "Merge", "(", "text", ",", "message", ",", "allow_unknown_extension", "=", "False", ",", "allow_field_number", "=", "False", ",", "descriptor_pool", "=", "None", ",", "allow_unknown_field", "=", "False", ")", ":", "return", "MergeLines", "(", "text", ".", "split", "(", "b'\\n'", "if", "isinstance", "(", "text", ",", "bytes", ")", "else", "u'\\n'", ")", ",", "message", ",", "allow_unknown_extension", ",", "allow_field_number", ",", "descriptor_pool", "=", "descriptor_pool", ",", "allow_unknown_field", "=", "allow_unknown_field", ")" ]
https://github.com/protocolbuffers/protobuf/blob/b5ab0b7a18b7336c60130f4ddb2d97c51792f896/python/google/protobuf/text_format.py#L690-L725
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/operations/array_ops.py
python
Concat.__init__
(self, axis=0)
Initialize Concat
Initialize Concat
[ "Initialize", "Concat" ]
def __init__(self, axis=0): """Initialize Concat""" validator.check_value_type("axis", axis, [int], self.name)
[ "def", "__init__", "(", "self", ",", "axis", "=", "0", ")", ":", "validator", ".", "check_value_type", "(", "\"axis\"", ",", "axis", ",", "[", "int", "]", ",", "self", ".", "name", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/operations/array_ops.py#L2496-L2498
facebookincubator/BOLT
88c70afe9d388ad430cc150cc158641701397f70
lldb/third_party/Python/module/pexpect-4.6/pexpect/pxssh.py
python
pxssh.levenshtein_distance
(self, a, b)
return current[n]
This calculates the Levenshtein distance between a and b.
This calculates the Levenshtein distance between a and b.
[ "This", "calculates", "the", "Levenshtein", "distance", "between", "a", "and", "b", "." ]
def levenshtein_distance(self, a, b): '''This calculates the Levenshtein distance between a and b. ''' n, m = len(a), len(b) if n > m: a,b = b,a n,m = m,n current = range(n+1) for i in range(1,m+1): previous, current = current, [i]+[0]*n for j in range(1,n+1): add, delete = previous[j]+1, current[j-1]+1 change = previous[j-1] if a[j-1] != b[i-1]: change = change + 1 current[j] = min(add, delete, change) return current[n]
[ "def", "levenshtein_distance", "(", "self", ",", "a", ",", "b", ")", ":", "n", ",", "m", "=", "len", "(", "a", ")", ",", "len", "(", "b", ")", "if", "n", ">", "m", ":", "a", ",", "b", "=", "b", ",", "a", "n", ",", "m", "=", "m", ",", "n", "current", "=", "range", "(", "n", "+", "1", ")", "for", "i", "in", "range", "(", "1", ",", "m", "+", "1", ")", ":", "previous", ",", "current", "=", "current", ",", "[", "i", "]", "+", "[", "0", "]", "*", "n", "for", "j", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "add", ",", "delete", "=", "previous", "[", "j", "]", "+", "1", ",", "current", "[", "j", "-", "1", "]", "+", "1", "change", "=", "previous", "[", "j", "-", "1", "]", "if", "a", "[", "j", "-", "1", "]", "!=", "b", "[", "i", "-", "1", "]", ":", "change", "=", "change", "+", "1", "current", "[", "j", "]", "=", "min", "(", "add", ",", "delete", ",", "change", ")", "return", "current", "[", "n", "]" ]
https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/lldb/third_party/Python/module/pexpect-4.6/pexpect/pxssh.py#L164-L181
whai362/PSENet
4d95395658662f2223805c36dcd573d9e190ce26
eval/ic15_rec/rrc_evaluation_funcs_1_1.py
python
main_validation
(default_evaluation_params_fn,validate_data_fn)
This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission
This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission
[ "This", "process", "validates", "a", "method", "Params", ":", "default_evaluation_params_fn", ":", "points", "to", "a", "function", "that", "returns", "a", "dictionary", "with", "the", "default", "parameters", "used", "for", "the", "evaluation", "validate_data_fn", ":", "points", "to", "a", "method", "that", "validates", "the", "corrct", "format", "of", "the", "submission" ]
def main_validation(default_evaluation_params_fn,validate_data_fn): """ This process validates a method Params: default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation validate_data_fn: points to a method that validates the corrct format of the submission """ try: p = dict([s[1:].split('=') for s in sys.argv[1:]]) evalParams = default_evaluation_params_fn() if 'p' in p.keys(): evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p']) ) validate_data_fn(p['g'], p['s'], evalParams) print ('SUCCESS') sys.exit(0) except Exception as e: print (str(e)) sys.exit(101)
[ "def", "main_validation", "(", "default_evaluation_params_fn", ",", "validate_data_fn", ")", ":", "try", ":", "p", "=", "dict", "(", "[", "s", "[", "1", ":", "]", ".", "split", "(", "'='", ")", "for", "s", "in", "sys", ".", "argv", "[", "1", ":", "]", "]", ")", "evalParams", "=", "default_evaluation_params_fn", "(", ")", "if", "'p'", "in", "p", ".", "keys", "(", ")", ":", "evalParams", ".", "update", "(", "p", "[", "'p'", "]", "if", "isinstance", "(", "p", "[", "'p'", "]", ",", "dict", ")", "else", "json", ".", "loads", "(", "p", "[", "'p'", "]", ")", ")", "validate_data_fn", "(", "p", "[", "'g'", "]", ",", "p", "[", "'s'", "]", ",", "evalParams", ")", "print", "(", "'SUCCESS'", ")", "sys", ".", "exit", "(", "0", ")", "except", "Exception", "as", "e", ":", "print", "(", "str", "(", "e", ")", ")", "sys", ".", "exit", "(", "101", ")" ]
https://github.com/whai362/PSENet/blob/4d95395658662f2223805c36dcd573d9e190ce26/eval/ic15_rec/rrc_evaluation_funcs_1_1.py#L437-L455
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Control.GetAlignment
(*args, **kwargs)
return _core_.Control_GetAlignment(*args, **kwargs)
GetAlignment(self) -> int Get the control alignment (left/right/centre, top/bottom/centre)
GetAlignment(self) -> int
[ "GetAlignment", "(", "self", ")", "-", ">", "int" ]
def GetAlignment(*args, **kwargs): """ GetAlignment(self) -> int Get the control alignment (left/right/centre, top/bottom/centre) """ return _core_.Control_GetAlignment(*args, **kwargs)
[ "def", "GetAlignment", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Control_GetAlignment", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L12681-L12687
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/nntplib.py
python
NNTP.putline
(self, line)
Internal: send one line to the server, appending CRLF.
Internal: send one line to the server, appending CRLF.
[ "Internal", ":", "send", "one", "line", "to", "the", "server", "appending", "CRLF", "." ]
def putline(self, line): """Internal: send one line to the server, appending CRLF.""" line = line + CRLF if self.debugging > 1: print '*put*', repr(line) self.sock.sendall(line)
[ "def", "putline", "(", "self", ",", "line", ")", ":", "line", "=", "line", "+", "CRLF", "if", "self", ".", "debugging", ">", "1", ":", "print", "'*put*'", ",", "repr", "(", "line", ")", "self", ".", "sock", ".", "sendall", "(", "line", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/nntplib.py#L189-L193
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/cuda/__init__.py
python
set_sync_debug_mode
(debug_mode: Union[int, str])
r"""Sets the debug mode for cuda synchronizing operations. Args: debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations, if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations. Warning: This is an experimental feature, and not all synchronizing operations will trigger warning or error. In particular, operations in torch.distributed and torch.sparse namespaces are not covered yet.
r"""Sets the debug mode for cuda synchronizing operations.
[ "r", "Sets", "the", "debug", "mode", "for", "cuda", "synchronizing", "operations", "." ]
def set_sync_debug_mode(debug_mode: Union[int, str]) -> None: r"""Sets the debug mode for cuda synchronizing operations. Args: debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations, if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations. Warning: This is an experimental feature, and not all synchronizing operations will trigger warning or error. In particular, operations in torch.distributed and torch.sparse namespaces are not covered yet. """ _lazy_init() if isinstance(debug_mode, str): if debug_mode == "default": debug_mode = 0 elif debug_mode == "warn": debug_mode = 1 elif debug_mode == "error": debug_mode = 2 else: raise RuntimeError("invalid value of debug_mode, expected one of `default`, `warn`, `error`") torch._C._cuda_set_sync_debug_mode(debug_mode)
[ "def", "set_sync_debug_mode", "(", "debug_mode", ":", "Union", "[", "int", ",", "str", "]", ")", "->", "None", ":", "_lazy_init", "(", ")", "if", "isinstance", "(", "debug_mode", ",", "str", ")", ":", "if", "debug_mode", "==", "\"default\"", ":", "debug_mode", "=", "0", "elif", "debug_mode", "==", "\"warn\"", ":", "debug_mode", "=", "1", "elif", "debug_mode", "==", "\"error\"", ":", "debug_mode", "=", "2", "else", ":", "raise", "RuntimeError", "(", "\"invalid value of debug_mode, expected one of `default`, `warn`, `error`\"", ")", "torch", ".", "_C", ".", "_cuda_set_sync_debug_mode", "(", "debug_mode", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/cuda/__init__.py#L544-L567
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/grid.py
python
GridEvent.ControlDown
(*args, **kwargs)
return _grid.GridEvent_ControlDown(*args, **kwargs)
ControlDown(self) -> bool
ControlDown(self) -> bool
[ "ControlDown", "(", "self", ")", "-", ">", "bool" ]
def ControlDown(*args, **kwargs): """ControlDown(self) -> bool""" return _grid.GridEvent_ControlDown(*args, **kwargs)
[ "def", "ControlDown", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "GridEvent_ControlDown", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L2321-L2323
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/genpy/src/genpy/generator.py
python
Special.get_post_deserialize
(self, varname)
:returns: Post-deserialization code to executed (unindented) or ``None`` if no post-deserialization is required, ``str``
:returns: Post-deserialization code to executed (unindented) or ``None`` if no post-deserialization is required, ``str``
[ ":", "returns", ":", "Post", "-", "deserialization", "code", "to", "executed", "(", "unindented", ")", "or", "None", "if", "no", "post", "-", "deserialization", "is", "required", "str" ]
def get_post_deserialize(self, varname): """ :returns: Post-deserialization code to executed (unindented) or ``None`` if no post-deserialization is required, ``str`` """ if self.post_deserialize: return self.post_deserialize%varname else: return None
[ "def", "get_post_deserialize", "(", "self", ",", "varname", ")", ":", "if", "self", ".", "post_deserialize", ":", "return", "self", ".", "post_deserialize", "%", "varname", "else", ":", "return", "None" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/genpy/src/genpy/generator.py#L100-L108
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
chrome/common/extensions/docs/server2/features_utility.py
python
Parse
(features_json)
return features
Process JSON from a _features.json file, standardizing it into a dictionary of Features.
Process JSON from a _features.json file, standardizing it into a dictionary of Features.
[ "Process", "JSON", "from", "a", "_features", ".", "json", "file", "standardizing", "it", "into", "a", "dictionary", "of", "Features", "." ]
def Parse(features_json): '''Process JSON from a _features.json file, standardizing it into a dictionary of Features. ''' features = {} def ignore_feature(name, value): '''Returns true if this feature should be ignored. This is defined by the presence of a 'whitelist' property for non-private APIs. Private APIs shouldn't have whitelisted features ignored since they're inherently private. Logic elsewhere makes sure not to list private APIs. ''' return 'whitelist' in value and not name.endswith('Private') for name, value in deepcopy(features_json).iteritems(): # Some feature names correspond to a list, typically because they're # whitelisted in stable for certain extensions and available in dev for # everybody else. Force a list down to a single feature by attempting to # remove the entries that don't affect the typical usage of an API. if isinstance(value, list): available_values = [subvalue for subvalue in value if not ignore_feature(name, subvalue)] if len(available_values) == 0: logging.warning('No available values for feature "%s"' % name) value = value[0] elif len(available_values) == 1: value = available_values[0] else: # Multiple available values probably implies different feature # configurations for apps vs extensions. Currently, this is 'commands'. # To get the ball rolling, add a hack to combine the extension types. # See http://crbug.com/316194. extension_types = set() for value in available_values: extension_types.update(value['extension_types']) value = [subvalue for subvalue in available_values if subvalue['channel'] == 'stable'][0] value['extension_types'] = list(extension_types) if ignore_feature(name, value): continue features[name] = { 'platforms': [] } extension_types = value.pop('extension_types', None) if extension_types is not None: features[name]['platforms'] = _GetPlatformsForExtensionTypes( extension_types) features[name]['name'] = name features[name].update(value) return features
[ "def", "Parse", "(", "features_json", ")", ":", "features", "=", "{", "}", "def", "ignore_feature", "(", "name", ",", "value", ")", ":", "'''Returns true if this feature should be ignored. This is defined by the\n presence of a 'whitelist' property for non-private APIs. Private APIs\n shouldn't have whitelisted features ignored since they're inherently\n private. Logic elsewhere makes sure not to list private APIs.\n '''", "return", "'whitelist'", "in", "value", "and", "not", "name", ".", "endswith", "(", "'Private'", ")", "for", "name", ",", "value", "in", "deepcopy", "(", "features_json", ")", ".", "iteritems", "(", ")", ":", "# Some feature names correspond to a list, typically because they're", "# whitelisted in stable for certain extensions and available in dev for", "# everybody else. Force a list down to a single feature by attempting to", "# remove the entries that don't affect the typical usage of an API.", "if", "isinstance", "(", "value", ",", "list", ")", ":", "available_values", "=", "[", "subvalue", "for", "subvalue", "in", "value", "if", "not", "ignore_feature", "(", "name", ",", "subvalue", ")", "]", "if", "len", "(", "available_values", ")", "==", "0", ":", "logging", ".", "warning", "(", "'No available values for feature \"%s\"'", "%", "name", ")", "value", "=", "value", "[", "0", "]", "elif", "len", "(", "available_values", ")", "==", "1", ":", "value", "=", "available_values", "[", "0", "]", "else", ":", "# Multiple available values probably implies different feature", "# configurations for apps vs extensions. Currently, this is 'commands'.", "# To get the ball rolling, add a hack to combine the extension types.", "# See http://crbug.com/316194.", "extension_types", "=", "set", "(", ")", "for", "value", "in", "available_values", ":", "extension_types", ".", "update", "(", "value", "[", "'extension_types'", "]", ")", "value", "=", "[", "subvalue", "for", "subvalue", "in", "available_values", "if", "subvalue", "[", "'channel'", "]", "==", "'stable'", "]", "[", "0", "]", "value", "[", "'extension_types'", "]", "=", "list", "(", "extension_types", ")", "if", "ignore_feature", "(", "name", ",", "value", ")", ":", "continue", "features", "[", "name", "]", "=", "{", "'platforms'", ":", "[", "]", "}", "extension_types", "=", "value", ".", "pop", "(", "'extension_types'", ",", "None", ")", "if", "extension_types", "is", "not", "None", ":", "features", "[", "name", "]", "[", "'platforms'", "]", "=", "_GetPlatformsForExtensionTypes", "(", "extension_types", ")", "features", "[", "name", "]", "[", "'name'", "]", "=", "name", "features", "[", "name", "]", ".", "update", "(", "value", ")", "return", "features" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/chrome/common/extensions/docs/server2/features_utility.py#L26-L78
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Environment.py
python
SubstitutionEnvironment.RemoveMethod
(self, function)
Removes the specified function's MethodWrapper from the added_methods list, so we don't re-bind it when making a clone.
Removes the specified function's MethodWrapper from the added_methods list, so we don't re-bind it when making a clone.
[ "Removes", "the", "specified", "function", "s", "MethodWrapper", "from", "the", "added_methods", "list", "so", "we", "don", "t", "re", "-", "bind", "it", "when", "making", "a", "clone", "." ]
def RemoveMethod(self, function): """ Removes the specified function's MethodWrapper from the added_methods list, so we don't re-bind it when making a clone. """ self.added_methods = [dm for dm in self.added_methods if dm.method is not function]
[ "def", "RemoveMethod", "(", "self", ",", "function", ")", ":", "self", ".", "added_methods", "=", "[", "dm", "for", "dm", "in", "self", ".", "added_methods", "if", "dm", ".", "method", "is", "not", "function", "]" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Environment.py#L597-L602
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/tools/gyp/pylib/gyp/xcodeproj_file.py
python
XCObject.Children
(self)
return children
Returns a list of all of this object's owned (strong) children.
Returns a list of all of this object's owned (strong) children.
[ "Returns", "a", "list", "of", "all", "of", "this", "object", "s", "owned", "(", "strong", ")", "children", "." ]
def Children(self): """Returns a list of all of this object's owned (strong) children.""" children = [] for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong) = attributes[0:3] if is_strong and property in self._properties: if not is_list: children.append(self._properties[property]) else: children.extend(self._properties[property]) return children
[ "def", "Children", "(", "self", ")", ":", "children", "=", "[", "]", "for", "property", ",", "attributes", "in", "self", ".", "_schema", ".", "iteritems", "(", ")", ":", "(", "is_list", ",", "property_type", ",", "is_strong", ")", "=", "attributes", "[", "0", ":", "3", "]", "if", "is_strong", "and", "property", "in", "self", ".", "_properties", ":", "if", "not", "is_list", ":", "children", ".", "append", "(", "self", ".", "_properties", "[", "property", "]", ")", "else", ":", "children", ".", "extend", "(", "self", ".", "_properties", "[", "property", "]", ")", "return", "children" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/gyp/pylib/gyp/xcodeproj_file.py#L474-L485
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/layers/python/layers/target_column.py
python
_TargetColumn.get_eval_ops
(self, features, logits, labels, metrics=None)
Returns eval op.
Returns eval op.
[ "Returns", "eval", "op", "." ]
def get_eval_ops(self, features, logits, labels, metrics=None): """Returns eval op.""" raise NotImplementedError
[ "def", "get_eval_ops", "(", "self", ",", "features", ",", "logits", ",", "labels", ",", "metrics", "=", "None", ")", ":", "raise", "NotImplementedError" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/layers/python/layers/target_column.py#L167-L169
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/deps/v8/third_party/binutils/detect_v8_host_arch.py
python
DetectHostArch
()
return host_arch
Hook to be called from gyp without starting a separate python interpreter.
Hook to be called from gyp without starting a separate python interpreter.
[ "Hook", "to", "be", "called", "from", "gyp", "without", "starting", "a", "separate", "python", "interpreter", "." ]
def DetectHostArch(): """Hook to be called from gyp without starting a separate python interpreter.""" host_arch = platform.machine() host_system = platform.system(); # Convert machine type to format recognized by gyp. if re.match(r'i.86', host_arch) or host_arch == 'i86pc': host_arch = 'ia32' elif host_arch in ['x86_64', 'amd64']: host_arch = 'x64' elif host_arch.startswith('arm'): host_arch = 'arm' elif host_arch == 'aarch64': host_arch = 'arm64' elif host_arch == 'mips64': host_arch = 'mips64el' elif host_arch.startswith('mips'): host_arch = 'mipsel' # Under AIX the value returned by platform.machine is not # the best indicator of the host architecture # AIX 6.1 which is the lowest level supported only provides # a 64 bit kernel if host_system == 'AIX': host_arch = 'ppc64' # platform.machine is based on running kernel. It's possible to use 64-bit # kernel with 32-bit userland, e.g. to give linker slightly more memory. # Distinguish between different userland bitness by querying # the python binary. if host_arch == 'x64' and platform.architecture()[0] == '32bit': host_arch = 'ia32' return host_arch
[ "def", "DetectHostArch", "(", ")", ":", "host_arch", "=", "platform", ".", "machine", "(", ")", "host_system", "=", "platform", ".", "system", "(", ")", "# Convert machine type to format recognized by gyp.", "if", "re", ".", "match", "(", "r'i.86'", ",", "host_arch", ")", "or", "host_arch", "==", "'i86pc'", ":", "host_arch", "=", "'ia32'", "elif", "host_arch", "in", "[", "'x86_64'", ",", "'amd64'", "]", ":", "host_arch", "=", "'x64'", "elif", "host_arch", ".", "startswith", "(", "'arm'", ")", ":", "host_arch", "=", "'arm'", "elif", "host_arch", "==", "'aarch64'", ":", "host_arch", "=", "'arm64'", "elif", "host_arch", "==", "'mips64'", ":", "host_arch", "=", "'mips64el'", "elif", "host_arch", ".", "startswith", "(", "'mips'", ")", ":", "host_arch", "=", "'mipsel'", "# Under AIX the value returned by platform.machine is not", "# the best indicator of the host architecture", "# AIX 6.1 which is the lowest level supported only provides", "# a 64 bit kernel", "if", "host_system", "==", "'AIX'", ":", "host_arch", "=", "'ppc64'", "# platform.machine is based on running kernel. It's possible to use 64-bit", "# kernel with 32-bit userland, e.g. to give linker slightly more memory.", "# Distinguish between different userland bitness by querying", "# the python binary.", "if", "host_arch", "==", "'x64'", "and", "platform", ".", "architecture", "(", ")", "[", "0", "]", "==", "'32bit'", ":", "host_arch", "=", "'ia32'", "return", "host_arch" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/v8/third_party/binutils/detect_v8_host_arch.py#L43-L77
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py
python
Tk.__init__
(self, screenName=None, baseName=None, className='Tk', useTk=1, sync=0, use=None)
Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will be created. BASENAME will be used for the identification of the profile file (see readprofile). It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME is the name of the widget class.
Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will be created. BASENAME will be used for the identification of the profile file (see readprofile). It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME is the name of the widget class.
[ "Return", "a", "new", "Toplevel", "widget", "on", "screen", "SCREENNAME", ".", "A", "new", "Tcl", "interpreter", "will", "be", "created", ".", "BASENAME", "will", "be", "used", "for", "the", "identification", "of", "the", "profile", "file", "(", "see", "readprofile", ")", ".", "It", "is", "constructed", "from", "sys", ".", "argv", "[", "0", "]", "without", "extensions", "if", "None", "is", "given", ".", "CLASSNAME", "is", "the", "name", "of", "the", "widget", "class", "." ]
def __init__(self, screenName=None, baseName=None, className='Tk', useTk=1, sync=0, use=None): """Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will be created. BASENAME will be used for the identification of the profile file (see readprofile). It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME is the name of the widget class.""" self.master = None self.children = {} self._tkloaded = 0 # to avoid recursions in the getattr code in case of failure, we # ensure that self.tk is always _something_. self.tk = None if baseName is None: import sys, os baseName = os.path.basename(sys.argv[0]) baseName, ext = os.path.splitext(baseName) if ext not in ('.py', '.pyc', '.pyo'): baseName = baseName + ext interactive = 0 self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use) if useTk: self._loadtk() if not sys.flags.ignore_environment: # Issue #16248: Honor the -E flag to avoid code injection. self.readprofile(baseName, className)
[ "def", "__init__", "(", "self", ",", "screenName", "=", "None", ",", "baseName", "=", "None", ",", "className", "=", "'Tk'", ",", "useTk", "=", "1", ",", "sync", "=", "0", ",", "use", "=", "None", ")", ":", "self", ".", "master", "=", "None", "self", ".", "children", "=", "{", "}", "self", ".", "_tkloaded", "=", "0", "# to avoid recursions in the getattr code in case of failure, we", "# ensure that self.tk is always _something_.", "self", ".", "tk", "=", "None", "if", "baseName", "is", "None", ":", "import", "sys", ",", "os", "baseName", "=", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", "baseName", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "baseName", ")", "if", "ext", "not", "in", "(", "'.py'", ",", "'.pyc'", ",", "'.pyo'", ")", ":", "baseName", "=", "baseName", "+", "ext", "interactive", "=", "0", "self", ".", "tk", "=", "_tkinter", ".", "create", "(", "screenName", ",", "baseName", ",", "className", ",", "interactive", ",", "wantobjects", ",", "useTk", ",", "sync", ",", "use", ")", "if", "useTk", ":", "self", ".", "_loadtk", "(", ")", "if", "not", "sys", ".", "flags", ".", "ignore_environment", ":", "# Issue #16248: Honor the -E flag to avoid code injection.", "self", ".", "readprofile", "(", "baseName", ",", "className", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tkinter.py#L1725-L1750
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2class.py
python
relaxNgValidCtxt.relaxNGValidateFullElement
(self, doc, elem)
return ret
Validate a full subtree when xmlRelaxNGValidatePushElement() returned 0 and the content of the node has been expanded.
Validate a full subtree when xmlRelaxNGValidatePushElement() returned 0 and the content of the node has been expanded.
[ "Validate", "a", "full", "subtree", "when", "xmlRelaxNGValidatePushElement", "()", "returned", "0", "and", "the", "content", "of", "the", "node", "has", "been", "expanded", "." ]
def relaxNGValidateFullElement(self, doc, elem): """Validate a full subtree when xmlRelaxNGValidatePushElement() returned 0 and the content of the node has been expanded. """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlRelaxNGValidateFullElement(self._o, doc__o, elem__o) return ret
[ "def", "relaxNGValidateFullElement", "(", "self", ",", "doc", ",", "elem", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "elem__o", "=", "elem", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlRelaxNGValidateFullElement", "(", "self", ".", "_o", ",", "doc__o", ",", "elem__o", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L5520-L5529
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/waflib/Tools/python.py
python
check_python_module
(conf, module_name, condition='')
Check if the selected python interpreter can import the given python module:: def configure(conf): conf.check_python_module('pygccxml') conf.check_python_module('re', condition="ver > num(2, 0, 4) and ver <= num(3, 0, 0)") :param module_name: module :type module_name: string
Check if the selected python interpreter can import the given python module::
[ "Check", "if", "the", "selected", "python", "interpreter", "can", "import", "the", "given", "python", "module", "::" ]
def check_python_module(conf, module_name, condition=''): """ Check if the selected python interpreter can import the given python module:: def configure(conf): conf.check_python_module('pygccxml') conf.check_python_module('re', condition="ver > num(2, 0, 4) and ver <= num(3, 0, 0)") :param module_name: module :type module_name: string """ msg = 'Python module %s' % module_name if condition: msg = '%s (%s)' % (msg, condition) conf.start_msg(msg) try: ret = conf.cmd_and_log(conf.env['PYTHON'] + ['-c', PYTHON_MODULE_TEMPLATE % module_name]) except Exception: conf.end_msg(False) conf.fatal('Could not find the python module %r' % module_name) ret = ret.strip() if condition: conf.end_msg(ret) if ret == 'unknown version': conf.fatal('Could not check the %s version' % module_name) from distutils.version import LooseVersion def num(*k): if isinstance(k[0], int): return LooseVersion('.'.join([str(x) for x in k])) else: return LooseVersion(k[0]) d = {'num': num, 'ver': LooseVersion(ret)} ev = eval(condition, {}, d) if not ev: conf.fatal('The %s version does not satisfy the requirements' % module_name) else: if ret == 'unknown version': conf.end_msg(True) else: conf.end_msg(ret)
[ "def", "check_python_module", "(", "conf", ",", "module_name", ",", "condition", "=", "''", ")", ":", "msg", "=", "'Python module %s'", "%", "module_name", "if", "condition", ":", "msg", "=", "'%s (%s)'", "%", "(", "msg", ",", "condition", ")", "conf", ".", "start_msg", "(", "msg", ")", "try", ":", "ret", "=", "conf", ".", "cmd_and_log", "(", "conf", ".", "env", "[", "'PYTHON'", "]", "+", "[", "'-c'", ",", "PYTHON_MODULE_TEMPLATE", "%", "module_name", "]", ")", "except", "Exception", ":", "conf", ".", "end_msg", "(", "False", ")", "conf", ".", "fatal", "(", "'Could not find the python module %r'", "%", "module_name", ")", "ret", "=", "ret", ".", "strip", "(", ")", "if", "condition", ":", "conf", ".", "end_msg", "(", "ret", ")", "if", "ret", "==", "'unknown version'", ":", "conf", ".", "fatal", "(", "'Could not check the %s version'", "%", "module_name", ")", "from", "distutils", ".", "version", "import", "LooseVersion", "def", "num", "(", "*", "k", ")", ":", "if", "isinstance", "(", "k", "[", "0", "]", ",", "int", ")", ":", "return", "LooseVersion", "(", "'.'", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "k", "]", ")", ")", "else", ":", "return", "LooseVersion", "(", "k", "[", "0", "]", ")", "d", "=", "{", "'num'", ":", "num", ",", "'ver'", ":", "LooseVersion", "(", "ret", ")", "}", "ev", "=", "eval", "(", "condition", ",", "{", "}", ",", "d", ")", "if", "not", "ev", ":", "conf", ".", "fatal", "(", "'The %s version does not satisfy the requirements'", "%", "module_name", ")", "else", ":", "if", "ret", "==", "'unknown version'", ":", "conf", ".", "end_msg", "(", "True", ")", "else", ":", "conf", ".", "end_msg", "(", "ret", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Tools/python.py#L461-L502
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/textview.py
python
ScrollableTextFrame.__init__
(self, master, wrap=NONE, **kwargs)
Create a frame for Textview. master - master widget for this frame wrap - type of text wrapping to use ('word', 'char' or 'none') All parameters except for 'wrap' are passed to Frame.__init__(). The Text widget is accessible via the 'text' attribute. Note: Changing the wrapping mode of the text widget after instantiation is not supported.
Create a frame for Textview.
[ "Create", "a", "frame", "for", "Textview", "." ]
def __init__(self, master, wrap=NONE, **kwargs): """Create a frame for Textview. master - master widget for this frame wrap - type of text wrapping to use ('word', 'char' or 'none') All parameters except for 'wrap' are passed to Frame.__init__(). The Text widget is accessible via the 'text' attribute. Note: Changing the wrapping mode of the text widget after instantiation is not supported. """ super().__init__(master, **kwargs) text = self.text = Text(self, wrap=wrap) text.grid(row=0, column=0, sticky=NSEW) self.grid_rowconfigure(0, weight=1) self.grid_columnconfigure(0, weight=1) # vertical scrollbar self.yscroll = AutoHideScrollbar(self, orient=VERTICAL, takefocus=False, command=text.yview) self.yscroll.grid(row=0, column=1, sticky=NS) text['yscrollcommand'] = self.yscroll.set # horizontal scrollbar - only when wrap is set to NONE if wrap == NONE: self.xscroll = AutoHideScrollbar(self, orient=HORIZONTAL, takefocus=False, command=text.xview) self.xscroll.grid(row=1, column=0, sticky=EW) text['xscrollcommand'] = self.xscroll.set else: self.xscroll = None
[ "def", "__init__", "(", "self", ",", "master", ",", "wrap", "=", "NONE", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "__init__", "(", "master", ",", "*", "*", "kwargs", ")", "text", "=", "self", ".", "text", "=", "Text", "(", "self", ",", "wrap", "=", "wrap", ")", "text", ".", "grid", "(", "row", "=", "0", ",", "column", "=", "0", ",", "sticky", "=", "NSEW", ")", "self", ".", "grid_rowconfigure", "(", "0", ",", "weight", "=", "1", ")", "self", ".", "grid_columnconfigure", "(", "0", ",", "weight", "=", "1", ")", "# vertical scrollbar", "self", ".", "yscroll", "=", "AutoHideScrollbar", "(", "self", ",", "orient", "=", "VERTICAL", ",", "takefocus", "=", "False", ",", "command", "=", "text", ".", "yview", ")", "self", ".", "yscroll", ".", "grid", "(", "row", "=", "0", ",", "column", "=", "1", ",", "sticky", "=", "NS", ")", "text", "[", "'yscrollcommand'", "]", "=", "self", ".", "yscroll", ".", "set", "# horizontal scrollbar - only when wrap is set to NONE", "if", "wrap", "==", "NONE", ":", "self", ".", "xscroll", "=", "AutoHideScrollbar", "(", "self", ",", "orient", "=", "HORIZONTAL", ",", "takefocus", "=", "False", ",", "command", "=", "text", ".", "xview", ")", "self", ".", "xscroll", ".", "grid", "(", "row", "=", "1", ",", "column", "=", "0", ",", "sticky", "=", "EW", ")", "text", "[", "'xscrollcommand'", "]", "=", "self", ".", "xscroll", ".", "set", "else", ":", "self", ".", "xscroll", "=", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/textview.py#L34-L69
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/distributed/optim/zero_redundancy_optimizer.py
python
ZeroRedundancyOptimizer._verify_and_init_params
(self, params: Any)
r""" Verifies the type of ``params`` and initializes ``self._all_params`` if ``params`` is valid. While :class:`optim.Optimizer <torch.optim.Optimizer>` allows ``params`` to be an iterable of :class:`dict` s, currently ``ZeroRedundancyOptimizer`` strictly requires ``params`` to be an iterable of :class:`torch.Tensor` s. Raises: TypeError: ``params`` has an invalid type. ValueError: ``params`` is empty.
r""" Verifies the type of ``params`` and initializes ``self._all_params`` if ``params`` is valid.
[ "r", "Verifies", "the", "type", "of", "params", "and", "initializes", "self", ".", "_all_params", "if", "params", "is", "valid", "." ]
def _verify_and_init_params(self, params: Any) -> None: r""" Verifies the type of ``params`` and initializes ``self._all_params`` if ``params`` is valid. While :class:`optim.Optimizer <torch.optim.Optimizer>` allows ``params`` to be an iterable of :class:`dict` s, currently ``ZeroRedundancyOptimizer`` strictly requires ``params`` to be an iterable of :class:`torch.Tensor` s. Raises: TypeError: ``params`` has an invalid type. ValueError: ``params`` is empty. """ if isinstance(params, torch.Tensor): raise TypeError("params argument should be an iterable of " f"Tensors, but got {torch.typename(params)}") try: self._all_params = list(params) except TypeError: raise TypeError("params argument should be an iterable of " f"Tensors, but got {torch.typename(params)}") if len(self._all_params) == 0: raise ValueError("ZeroRedundancyOptimizer got an empty parameter " "list") for param in self._all_params: if not isinstance(param, torch.Tensor): raise TypeError("params argument should be an iterable of " "Tensors, but got an iterable containing " f"{torch.typename(param)}")
[ "def", "_verify_and_init_params", "(", "self", ",", "params", ":", "Any", ")", "->", "None", ":", "if", "isinstance", "(", "params", ",", "torch", ".", "Tensor", ")", ":", "raise", "TypeError", "(", "\"params argument should be an iterable of \"", "f\"Tensors, but got {torch.typename(params)}\"", ")", "try", ":", "self", ".", "_all_params", "=", "list", "(", "params", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"params argument should be an iterable of \"", "f\"Tensors, but got {torch.typename(params)}\"", ")", "if", "len", "(", "self", ".", "_all_params", ")", "==", "0", ":", "raise", "ValueError", "(", "\"ZeroRedundancyOptimizer got an empty parameter \"", "\"list\"", ")", "for", "param", "in", "self", ".", "_all_params", ":", "if", "not", "isinstance", "(", "param", ",", "torch", ".", "Tensor", ")", ":", "raise", "TypeError", "(", "\"params argument should be an iterable of \"", "\"Tensors, but got an iterable containing \"", "f\"{torch.typename(param)}\"", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/optim/zero_redundancy_optimizer.py#L1289-L1318
arangodb/arangodb
0d658689c7d1b721b314fa3ca27d38303e1570c8
3rdParty/V8/v7.9.317/tools/run-clang-tidy.py
python
CheckCompDB
(build_folder)
return os.path.isfile(os.path.join(build_folder, 'compile_commands.json'))
Checks if a compilation database exists in the build_folder.
Checks if a compilation database exists in the build_folder.
[ "Checks", "if", "a", "compilation", "database", "exists", "in", "the", "build_folder", "." ]
def CheckCompDB(build_folder): """ Checks if a compilation database exists in the build_folder. """ return os.path.isfile(os.path.join(build_folder, 'compile_commands.json'))
[ "def", "CheckCompDB", "(", "build_folder", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "build_folder", ",", "'compile_commands.json'", ")", ")" ]
https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/v7.9.317/tools/run-clang-tidy.py#L267-L271
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py2/scipy/optimize/_tstutils.py
python
cplx01_f
(z, n, a)
return z**n - a
r"""z**n-a: Use to find the n-th root of a
r"""z**n-a: Use to find the n-th root of a
[ "r", "z", "**", "n", "-", "a", ":", "Use", "to", "find", "the", "n", "-", "th", "root", "of", "a" ]
def cplx01_f(z, n, a): r"""z**n-a: Use to find the n-th root of a""" return z**n - a
[ "def", "cplx01_f", "(", "z", ",", "n", ",", "a", ")", ":", "return", "z", "**", "n", "-", "a" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/optimize/_tstutils.py#L574-L576
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py2/sklearn/discriminant_analysis.py
python
_class_cov
(X, y, priors=None, shrinkage=None)
return np.average(covs, axis=0, weights=priors)
Compute class covariance matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. priors : array-like, shape (n_classes,) Class priors. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Returns ------- cov : array-like, shape (n_features, n_features) Class covariance matrix.
Compute class covariance matrix.
[ "Compute", "class", "covariance", "matrix", "." ]
def _class_cov(X, y, priors=None, shrinkage=None): """Compute class covariance matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. priors : array-like, shape (n_classes,) Class priors. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Returns ------- cov : array-like, shape (n_features, n_features) Class covariance matrix. """ classes = np.unique(y) covs = [] for group in classes: Xg = X[y == group, :] covs.append(np.atleast_2d(_cov(Xg, shrinkage))) return np.average(covs, axis=0, weights=priors)
[ "def", "_class_cov", "(", "X", ",", "y", ",", "priors", "=", "None", ",", "shrinkage", "=", "None", ")", ":", "classes", "=", "np", ".", "unique", "(", "y", ")", "covs", "=", "[", "]", "for", "group", "in", "classes", ":", "Xg", "=", "X", "[", "y", "==", "group", ",", ":", "]", "covs", ".", "append", "(", "np", ".", "atleast_2d", "(", "_cov", "(", "Xg", ",", "shrinkage", ")", ")", ")", "return", "np", ".", "average", "(", "covs", ",", "axis", "=", "0", ",", "weights", "=", "priors", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/discriminant_analysis.py#L97-L127
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/base.py
python
SelectionMixin._is_builtin_func
(self, arg)
return self._builtin_table.get(arg, arg)
if we define an builtin function for this argument, return it, otherwise return the arg
if we define an builtin function for this argument, return it, otherwise return the arg
[ "if", "we", "define", "an", "builtin", "function", "for", "this", "argument", "return", "it", "otherwise", "return", "the", "arg" ]
def _is_builtin_func(self, arg): """ if we define an builtin function for this argument, return it, otherwise return the arg """ return self._builtin_table.get(arg, arg)
[ "def", "_is_builtin_func", "(", "self", ",", "arg", ")", ":", "return", "self", ".", "_builtin_table", ".", "get", "(", "arg", ",", "arg", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/base.py#L655-L660
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/engine/training_v1.py
python
Model._set_input_attrs
(self, inputs)
return inputs
Sets attributes related to the inputs of the Model.
Sets attributes related to the inputs of the Model.
[ "Sets", "attributes", "related", "to", "the", "inputs", "of", "the", "Model", "." ]
def _set_input_attrs(self, inputs): """Sets attributes related to the inputs of the Model.""" if self.inputs: raise ValueError('Model inputs are already set.') if self.__class__.__name__ == 'Sequential' and not self.built: if tensor_util.is_tf_type(inputs): input_shape = (None,) + tuple(inputs.shape.as_list()[1:]) elif isinstance(inputs, tensor_shape.TensorShape): input_shape = (None,) + tuple(inputs.as_list()[1:]) elif isinstance(inputs, dict): # We assert that the first layer is a FeatureLayer. if not training_utils_v1.is_feature_layer(self.layers[0]): raise ValueError('Passing a dictionary input to a Sequential Model ' 'which doesn\'t have FeatureLayer as the first layer' ' is an error.') input_shape = (None,) else: input_shape = (None,) + tuple(inputs.shape[1:]) self._build_input_shape = input_shape # Cast inputs to the compute dtype. This is primarily used # when saving to determine the correct dtype in the input signature. inputs = self._maybe_cast_inputs(inputs) # On-the-fly setting of symbolic model inputs (either by using the tensor # provided, or by creating a placeholder if Numpy data was provided). model_inputs = training_utils_v1.ModelInputs(inputs) inputs = model_inputs.get_symbolic_inputs() self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True) self.input_names = model_inputs.get_input_names() self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] for k, v in model_inputs.as_dict(): if backend.is_placeholder(v): self._feed_input_names.append(k) self._feed_inputs.append(v) self._feed_input_shapes.append(backend.int_shape(v)) return inputs
[ "def", "_set_input_attrs", "(", "self", ",", "inputs", ")", ":", "if", "self", ".", "inputs", ":", "raise", "ValueError", "(", "'Model inputs are already set.'", ")", "if", "self", ".", "__class__", ".", "__name__", "==", "'Sequential'", "and", "not", "self", ".", "built", ":", "if", "tensor_util", ".", "is_tf_type", "(", "inputs", ")", ":", "input_shape", "=", "(", "None", ",", ")", "+", "tuple", "(", "inputs", ".", "shape", ".", "as_list", "(", ")", "[", "1", ":", "]", ")", "elif", "isinstance", "(", "inputs", ",", "tensor_shape", ".", "TensorShape", ")", ":", "input_shape", "=", "(", "None", ",", ")", "+", "tuple", "(", "inputs", ".", "as_list", "(", ")", "[", "1", ":", "]", ")", "elif", "isinstance", "(", "inputs", ",", "dict", ")", ":", "# We assert that the first layer is a FeatureLayer.", "if", "not", "training_utils_v1", ".", "is_feature_layer", "(", "self", ".", "layers", "[", "0", "]", ")", ":", "raise", "ValueError", "(", "'Passing a dictionary input to a Sequential Model '", "'which doesn\\'t have FeatureLayer as the first layer'", "' is an error.'", ")", "input_shape", "=", "(", "None", ",", ")", "else", ":", "input_shape", "=", "(", "None", ",", ")", "+", "tuple", "(", "inputs", ".", "shape", "[", "1", ":", "]", ")", "self", ".", "_build_input_shape", "=", "input_shape", "# Cast inputs to the compute dtype. This is primarily used", "# when saving to determine the correct dtype in the input signature.", "inputs", "=", "self", ".", "_maybe_cast_inputs", "(", "inputs", ")", "# On-the-fly setting of symbolic model inputs (either by using the tensor", "# provided, or by creating a placeholder if Numpy data was provided).", "model_inputs", "=", "training_utils_v1", ".", "ModelInputs", "(", "inputs", ")", "inputs", "=", "model_inputs", ".", "get_symbolic_inputs", "(", ")", "self", ".", "inputs", "=", "model_inputs", ".", "get_symbolic_inputs", "(", "return_single_as_list", "=", "True", ")", "self", ".", "input_names", "=", "model_inputs", ".", "get_input_names", "(", ")", "self", ".", "_feed_inputs", "=", "[", "]", "self", ".", "_feed_input_names", "=", "[", "]", "self", ".", "_feed_input_shapes", "=", "[", "]", "for", "k", ",", "v", "in", "model_inputs", ".", "as_dict", "(", ")", ":", "if", "backend", ".", "is_placeholder", "(", "v", ")", ":", "self", ".", "_feed_input_names", ".", "append", "(", "k", ")", "self", ".", "_feed_inputs", ".", "append", "(", "v", ")", "self", ".", "_feed_input_shapes", ".", "append", "(", "backend", ".", "int_shape", "(", "v", ")", ")", "return", "inputs" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/engine/training_v1.py#L2636-L2678
alexozer/jankdrone
c4b403eb254b41b832ab2bdfade12ba59c99e5dc
shm/lib/pyratemp/pyratemp.py
python
TemplateParseError.__init__
(self, err, errpos)
:Parameters: - `err`: error-message or exception to wrap - `errpos`: ``(filename,row,col)`` where the error occured.
:Parameters: - `err`: error-message or exception to wrap - `errpos`: ``(filename,row,col)`` where the error occured.
[ ":", "Parameters", ":", "-", "err", ":", "error", "-", "message", "or", "exception", "to", "wrap", "-", "errpos", ":", "(", "filename", "row", "col", ")", "where", "the", "error", "occured", "." ]
def __init__(self, err, errpos): """ :Parameters: - `err`: error-message or exception to wrap - `errpos`: ``(filename,row,col)`` where the error occured. """ self.err = err self.filename, self.row, self.col = errpos TemplateException.__init__(self)
[ "def", "__init__", "(", "self", ",", "err", ",", "errpos", ")", ":", "self", ".", "err", "=", "err", "self", ".", "filename", ",", "self", ".", "row", ",", "self", ".", "col", "=", "errpos", "TemplateException", ".", "__init__", "(", "self", ")" ]
https://github.com/alexozer/jankdrone/blob/c4b403eb254b41b832ab2bdfade12ba59c99e5dc/shm/lib/pyratemp/pyratemp.py#L338-L346
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_aarch64/python2.7/dist-packages/geodesy/wu_point.py
python
WuPoint.toPoint
(self)
return self.utm.toPoint()
:returns: Corresponding `geometry_msgs/Point`_ message.
:returns: Corresponding `geometry_msgs/Point`_ message.
[ ":", "returns", ":", "Corresponding", "geometry_msgs", "/", "Point", "_", "message", "." ]
def toPoint(self): """:returns: Corresponding `geometry_msgs/Point`_ message.""" return self.utm.toPoint()
[ "def", "toPoint", "(", "self", ")", ":", "return", "self", ".", "utm", ".", "toPoint", "(", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_aarch64/python2.7/dist-packages/geodesy/wu_point.py#L98-L100
citizenfx/fivem
88276d40cc7baf8285d02754cc5ae42ec7a8563f
vendor/chromium/mojo/public/tools/bindings/pylib/mojom/generate/translate.py
python
_EnumField
(module, enum, parsed_field, parent_kind)
return field
Args: module: {mojom.Module} Module currently being constructed. enum: {mojom.Enum} Enum this field belongs to. parsed_field: {ast.EnumValue} Parsed enum value. parent_kind: {mojom.Kind} The enclosing type. Returns: {mojom.EnumField} AST enum field.
Args: module: {mojom.Module} Module currently being constructed. enum: {mojom.Enum} Enum this field belongs to. parsed_field: {ast.EnumValue} Parsed enum value. parent_kind: {mojom.Kind} The enclosing type.
[ "Args", ":", "module", ":", "{", "mojom", ".", "Module", "}", "Module", "currently", "being", "constructed", ".", "enum", ":", "{", "mojom", ".", "Enum", "}", "Enum", "this", "field", "belongs", "to", ".", "parsed_field", ":", "{", "ast", ".", "EnumValue", "}", "Parsed", "enum", "value", ".", "parent_kind", ":", "{", "mojom", ".", "Kind", "}", "The", "enclosing", "type", "." ]
def _EnumField(module, enum, parsed_field, parent_kind): """ Args: module: {mojom.Module} Module currently being constructed. enum: {mojom.Enum} Enum this field belongs to. parsed_field: {ast.EnumValue} Parsed enum value. parent_kind: {mojom.Kind} The enclosing type. Returns: {mojom.EnumField} AST enum field. """ field = mojom.EnumField() field.mojom_name = parsed_field.mojom_name # TODO(mpcomplete): FixupExpression should be done in the second pass, # so constants and enums can refer to each other. # TODO(mpcomplete): But then, what if constants are initialized to an enum? Or # vice versa? if parent_kind: field.value = _FixupExpression( module, parsed_field.value, (module.mojom_namespace, parent_kind.mojom_name), enum) else: field.value = _FixupExpression( module, parsed_field.value, (module.mojom_namespace, ), enum) field.attributes = _AttributeListToDict(parsed_field.attribute_list) value = mojom.EnumValue(module, enum, field) module.values[value.GetSpec()] = value return field
[ "def", "_EnumField", "(", "module", ",", "enum", ",", "parsed_field", ",", "parent_kind", ")", ":", "field", "=", "mojom", ".", "EnumField", "(", ")", "field", ".", "mojom_name", "=", "parsed_field", ".", "mojom_name", "# TODO(mpcomplete): FixupExpression should be done in the second pass,", "# so constants and enums can refer to each other.", "# TODO(mpcomplete): But then, what if constants are initialized to an enum? Or", "# vice versa?", "if", "parent_kind", ":", "field", ".", "value", "=", "_FixupExpression", "(", "module", ",", "parsed_field", ".", "value", ",", "(", "module", ".", "mojom_namespace", ",", "parent_kind", ".", "mojom_name", ")", ",", "enum", ")", "else", ":", "field", ".", "value", "=", "_FixupExpression", "(", "module", ",", "parsed_field", ".", "value", ",", "(", "module", ".", "mojom_namespace", ",", ")", ",", "enum", ")", "field", ".", "attributes", "=", "_AttributeListToDict", "(", "parsed_field", ".", "attribute_list", ")", "value", "=", "mojom", ".", "EnumValue", "(", "module", ",", "enum", ",", "field", ")", "module", ".", "values", "[", "value", ".", "GetSpec", "(", ")", "]", "=", "value", "return", "field" ]
https://github.com/citizenfx/fivem/blob/88276d40cc7baf8285d02754cc5ae42ec7a8563f/vendor/chromium/mojo/public/tools/bindings/pylib/mojom/generate/translate.py#L442-L469
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/parallel_for/pfor.py
python
WhileOp.inputs
(self)
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
Input to all the Enter nodes.
Input to all the Enter nodes.
[ "Input", "to", "all", "the", "Enter", "nodes", "." ]
def inputs(self): """Input to all the Enter nodes.""" return [x.op.inputs[0] for x in self._enters + self._direct_enters]
[ "def", "inputs", "(", "self", ")", ":", "return", "[", "x", ".", "op", ".", "inputs", "[", "0", "]", "for", "x", "in", "self", ".", "_enters", "+", "self", ".", "_direct_enters", "]" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/parallel_for/pfor.py#L327-L329
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/_lobpcg.py
python
_matrix_polynomial_value
(poly, x, zero_power=None)
return _polynomial_value(poly, x, zero_power, transition)
Evaluates `poly(x)` for the (batched) matrix input `x`. Check out `_polynomial_value` function for more details.
Evaluates `poly(x)` for the (batched) matrix input `x`. Check out `_polynomial_value` function for more details.
[ "Evaluates", "poly", "(", "x", ")", "for", "the", "(", "batched", ")", "matrix", "input", "x", ".", "Check", "out", "_polynomial_value", "function", "for", "more", "details", "." ]
def _matrix_polynomial_value(poly, x, zero_power=None): """ Evaluates `poly(x)` for the (batched) matrix input `x`. Check out `_polynomial_value` function for more details. """ # matrix-aware Horner's rule iteration def transition(curr_poly_val, x, poly_coeff): res = x.matmul(curr_poly_val) res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1)) return res if zero_power is None: zero_power = torch.eye(x.size(-1), x.size(-1), dtype=x.dtype, device=x.device) \ .view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1)) return _polynomial_value(poly, x, zero_power, transition)
[ "def", "_matrix_polynomial_value", "(", "poly", ",", "x", ",", "zero_power", "=", "None", ")", ":", "# matrix-aware Horner's rule iteration", "def", "transition", "(", "curr_poly_val", ",", "x", ",", "poly_coeff", ")", ":", "res", "=", "x", ".", "matmul", "(", "curr_poly_val", ")", "res", ".", "diagonal", "(", "dim1", "=", "-", "2", ",", "dim2", "=", "-", "1", ")", ".", "add_", "(", "poly_coeff", ".", "unsqueeze", "(", "-", "1", ")", ")", "return", "res", "if", "zero_power", "is", "None", ":", "zero_power", "=", "torch", ".", "eye", "(", "x", ".", "size", "(", "-", "1", ")", ",", "x", ".", "size", "(", "-", "1", ")", ",", "dtype", "=", "x", ".", "dtype", ",", "device", "=", "x", ".", "device", ")", ".", "view", "(", "*", "(", "[", "1", "]", "*", "len", "(", "list", "(", "x", ".", "shape", "[", ":", "-", "2", "]", ")", ")", ")", ",", "x", ".", "size", "(", "-", "1", ")", ",", "x", ".", "size", "(", "-", "1", ")", ")", "return", "_polynomial_value", "(", "poly", ",", "x", ",", "zero_power", ",", "transition", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/_lobpcg.py#L105-L121
moderngl/moderngl
32fe79927e02b0fa893b3603d677bdae39771e14
examples/growing_buffers.py
python
Points.__init__
(self, ctx, num_points)
Args: ctx: moderngl context num_points: Initial number of points to allocate
Args: ctx: moderngl context num_points: Initial number of points to allocate
[ "Args", ":", "ctx", ":", "moderngl", "context", "num_points", ":", "Initial", "number", "of", "points", "to", "allocate" ]
def __init__(self, ctx, num_points): """ Args: ctx: moderngl context num_points: Initial number of points to allocate """ self.points = [] self.ctx = ctx self.buffer = self.ctx.buffer(reserve=num_points * 12) # 12 bytes for a 3f self.program = self.ctx.program( vertex_shader=""" #version 330 in vec3 in_position; uniform mat4 model_matrix; void main() { gl_Position = model_matrix * vec4(in_position, 1.0); } """, fragment_shader=""" #version 330 out vec4 outColor; void main() { outColor = vec4(1.0); } """, ) self.vao = self.ctx.vertex_array( self.program, [(self.buffer, '3f', 'in_position')], )
[ "def", "__init__", "(", "self", ",", "ctx", ",", "num_points", ")", ":", "self", ".", "points", "=", "[", "]", "self", ".", "ctx", "=", "ctx", "self", ".", "buffer", "=", "self", ".", "ctx", ".", "buffer", "(", "reserve", "=", "num_points", "*", "12", ")", "# 12 bytes for a 3f", "self", ".", "program", "=", "self", ".", "ctx", ".", "program", "(", "vertex_shader", "=", "\"\"\"\n #version 330\n in vec3 in_position;\n uniform mat4 model_matrix;\n void main() {\n gl_Position = model_matrix * vec4(in_position, 1.0);\n }\n \"\"\"", ",", "fragment_shader", "=", "\"\"\"\n #version 330\n out vec4 outColor;\n void main() {\n outColor = vec4(1.0);\n }\n \"\"\"", ",", ")", "self", ".", "vao", "=", "self", ".", "ctx", ".", "vertex_array", "(", "self", ".", "program", ",", "[", "(", "self", ".", "buffer", ",", "'3f'", ",", "'in_position'", ")", "]", ",", ")" ]
https://github.com/moderngl/moderngl/blob/32fe79927e02b0fa893b3603d677bdae39771e14/examples/growing_buffers.py#L24-L53
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/poplib.py
python
POP3.rpop
(self, user)
return self._shortcmd('RPOP %s' % user)
Not sure what this does.
Not sure what this does.
[ "Not", "sure", "what", "this", "does", "." ]
def rpop(self, user): """Not sure what this does.""" return self._shortcmd('RPOP %s' % user)
[ "def", "rpop", "(", "self", ",", "user", ")", ":", "return", "self", ".", "_shortcmd", "(", "'RPOP %s'", "%", "user", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/poplib.py#L306-L308
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
PyApp_GetTraitsIfExists
(*args)
return _core_.PyApp_GetTraitsIfExists(*args)
PyApp_GetTraitsIfExists() -> wxAppTraits This function provides safer access to traits object than wx.GetApp().GetTraits() during startup or termination when the global application object itself may be unavailable.
PyApp_GetTraitsIfExists() -> wxAppTraits
[ "PyApp_GetTraitsIfExists", "()", "-", ">", "wxAppTraits" ]
def PyApp_GetTraitsIfExists(*args): """ PyApp_GetTraitsIfExists() -> wxAppTraits This function provides safer access to traits object than wx.GetApp().GetTraits() during startup or termination when the global application object itself may be unavailable. """ return _core_.PyApp_GetTraitsIfExists(*args)
[ "def", "PyApp_GetTraitsIfExists", "(", "*", "args", ")", ":", "return", "_core_", ".", "PyApp_GetTraitsIfExists", "(", "*", "args", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L8255-L8263
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py
python
ParseResults.pprint
(self, *args, **kwargs)
Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']]
Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
[ "Pretty", "-", "printer", "for", "parsed", "results", "as", "a", "list", "using", "the", "C", "{", "pprint", "}", "module", ".", "Accepts", "additional", "positional", "or", "keyword", "args", "as", "defined", "for", "the", "C", "{", "pprint", ".", "pprint", "}", "method", ".", "(", "U", "{", "http", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "library", "/", "pprint", ".", "html#pprint", ".", "pprint", "}", ")" ]
def pprint(self, *args, **kwargs): """ Pretty-printer for parsed results as a list, using the C{pprint} module. Accepts additional positional or keyword args as defined for the C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) Example:: ident = Word(alphas, alphanums) num = Word(nums) func = Forward() term = ident | num | Group('(' + func + ')') func <<= ident + Group(Optional(delimitedList(term))) result = func.parseString("fna a,b,(fnb c,d,200),100") result.pprint(width=40) prints:: ['fna', ['a', 'b', ['(', 'fnb', ['c', 'd', '200'], ')'], '100']] """ pprint.pprint(self.asList(), *args, **kwargs)
[ "def", "pprint", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pprint", ".", "pprint", "(", "self", ".", "asList", "(", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py#L916-L937
glotzerlab/hoomd-blue
f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a
hoomd/tune/attr_tuner.py
python
ManualTuneDefinition.__hash__
(self)
return hash((self._user_get_x, self._user_set_x, self._user_get_y, self._target))
Compute a hash of the tune definition.
Compute a hash of the tune definition.
[ "Compute", "a", "hash", "of", "the", "tune", "definition", "." ]
def __hash__(self): """Compute a hash of the tune definition.""" return hash((self._user_get_x, self._user_set_x, self._user_get_y, self._target))
[ "def", "__hash__", "(", "self", ")", ":", "return", "hash", "(", "(", "self", ".", "_user_get_x", ",", "self", ".", "_user_set_x", ",", "self", ".", "_user_get_y", ",", "self", ".", "_target", ")", ")" ]
https://github.com/glotzerlab/hoomd-blue/blob/f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a/hoomd/tune/attr_tuner.py#L210-L213
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/control/blocks/state_machine.py
python
StateMachineBase.next_state
(self,state,*args,**kwargs)
return state
Subclasses should override this to implement the transitions
Subclasses should override this to implement the transitions
[ "Subclasses", "should", "override", "this", "to", "implement", "the", "transitions" ]
def next_state(self,state,*args,**kwargs): """Subclasses should override this to implement the transitions""" return state
[ "def", "next_state", "(", "self", ",", "state", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "state" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/control/blocks/state_machine.py#L47-L49
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ILL_utilities.py
python
NameSource.__init__
(self, prefix, cleanupMode)
Initialize an instance of the class.
Initialize an instance of the class.
[ "Initialize", "an", "instance", "of", "the", "class", "." ]
def __init__(self, prefix, cleanupMode): """Initialize an instance of the class.""" self._names = set() self._prefix = '__' + prefix if cleanupMode == Cleanup.ON else prefix
[ "def", "__init__", "(", "self", ",", "prefix", ",", "cleanupMode", ")", ":", "self", ".", "_names", "=", "set", "(", ")", "self", ".", "_prefix", "=", "'__'", "+", "prefix", "if", "cleanupMode", "==", "Cleanup", ".", "ON", "else", "prefix" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ILL_utilities.py#L62-L65
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/random.py
python
Random._randbelow
(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF, _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType)
return _int(self.random() * n)
Return a random int in the range [0,n) Handles the case where n has more bits than returned by a single call to the underlying generator.
Return a random int in the range [0,n)
[ "Return", "a", "random", "int", "in", "the", "range", "[", "0", "n", ")" ]
def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF, _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType): """Return a random int in the range [0,n) Handles the case where n has more bits than returned by a single call to the underlying generator. """ try: getrandbits = self.getrandbits except AttributeError: pass else: # Only call self.getrandbits if the original random() builtin method # has not been overridden or if a new getrandbits() was supplied. # This assures that the two methods correspond. if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method: k = _int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2) r = getrandbits(k) while r >= n: r = getrandbits(k) return r if n >= _maxwidth: _warn("Underlying random() generator does not supply \n" "enough bits to choose from a population range this large") return _int(self.random() * n)
[ "def", "_randbelow", "(", "self", ",", "n", ",", "_log", "=", "_log", ",", "_int", "=", "int", ",", "_maxwidth", "=", "1L", "<<", "BPF", ",", "_Method", "=", "_MethodType", ",", "_BuiltinMethod", "=", "_BuiltinMethodType", ")", ":", "try", ":", "getrandbits", "=", "self", ".", "getrandbits", "except", "AttributeError", ":", "pass", "else", ":", "# Only call self.getrandbits if the original random() builtin method", "# has not been overridden or if a new getrandbits() was supplied.", "# This assures that the two methods correspond.", "if", "type", "(", "self", ".", "random", ")", "is", "_BuiltinMethod", "or", "type", "(", "getrandbits", ")", "is", "_Method", ":", "k", "=", "_int", "(", "1.00001", "+", "_log", "(", "n", "-", "1", ",", "2.0", ")", ")", "# 2**k > n-1 > 2**(k-2)", "r", "=", "getrandbits", "(", "k", ")", "while", "r", ">=", "n", ":", "r", "=", "getrandbits", "(", "k", ")", "return", "r", "if", "n", ">=", "_maxwidth", ":", "_warn", "(", "\"Underlying random() generator does not supply \\n\"", "\"enough bits to choose from a population range this large\"", ")", "return", "_int", "(", "self", ".", "random", "(", ")", "*", "n", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/random.py#L246-L271
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Window.SetCursor
(*args, **kwargs)
return _core_.Window_SetCursor(*args, **kwargs)
SetCursor(self, Cursor cursor) -> bool Sets the window's cursor. Notice that the window cursor also sets it for the children of the window implicitly. The cursor may be wx.NullCursor in which case the window cursor will be reset back to default.
SetCursor(self, Cursor cursor) -> bool
[ "SetCursor", "(", "self", "Cursor", "cursor", ")", "-", ">", "bool" ]
def SetCursor(*args, **kwargs): """ SetCursor(self, Cursor cursor) -> bool Sets the window's cursor. Notice that the window cursor also sets it for the children of the window implicitly. The cursor may be wx.NullCursor in which case the window cursor will be reset back to default. """ return _core_.Window_SetCursor(*args, **kwargs)
[ "def", "SetCursor", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Window_SetCursor", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L10958-L10968
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/tpu/python/tpu/keras_support.py
python
TPUDatasetInfeedManager._verify_dataset_shape
(self, dataset)
Verifies a dataset is of an appropriate shape for TPUs.
Verifies a dataset is of an appropriate shape for TPUs.
[ "Verifies", "a", "dataset", "is", "of", "an", "appropriate", "shape", "for", "TPUs", "." ]
def _verify_dataset_shape(self, dataset): """Verifies a dataset is of an appropriate shape for TPUs.""" dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset) dataset_output_classes = dataset_ops.get_legacy_output_classes(dataset) if not isinstance(dataset, dataset_ops.DatasetV2): raise ValueError('The function passed as the `x` parameter did not ' 'return a `tf.data.Dataset`.') if not isinstance(dataset_output_classes, tuple): raise ValueError('The dataset must return a tuple of tf.Tensors, ' 'instead it returns: %s' % dataset_output_classes) if len(dataset_output_classes) != 2: raise ValueError('The dataset must return a 2-element tuple, got ' '%s output classes instead.' % (dataset_output_classes,)) for i, cls in enumerate(dataset_output_classes): if cls != ops.Tensor: raise ValueError('The dataset returned a non-Tensor type (%s) at ' 'index %d.' % (cls, i)) for i, shape in enumerate(dataset_output_shapes): if not shape: raise ValueError('The dataset returns a scalar tensor in ' 'tuple index %d. Did you forget to batch? ' '(Output shapes: %s).' % (i, dataset_output_shapes)) for j, dim in enumerate(shape): if dim.value is None: if j == 0: hint = (' Hint: did you use `ds.batch(BATCH_SIZE, ' 'drop_remainder=True)`?') else: hint = '' raise ValueError( 'The Keras-TPU integration for `tf.data` ' 'currently requires static shapes. The provided ' 'dataset only has a partially defined shape. ' '(Dimension %d of output tensor %d is not statically known ' 'for output shapes: %s.%s)' % (j, i, dataset_output_shapes, hint))
[ "def", "_verify_dataset_shape", "(", "self", ",", "dataset", ")", ":", "dataset_output_shapes", "=", "dataset_ops", ".", "get_legacy_output_shapes", "(", "dataset", ")", "dataset_output_classes", "=", "dataset_ops", ".", "get_legacy_output_classes", "(", "dataset", ")", "if", "not", "isinstance", "(", "dataset", ",", "dataset_ops", ".", "DatasetV2", ")", ":", "raise", "ValueError", "(", "'The function passed as the `x` parameter did not '", "'return a `tf.data.Dataset`.'", ")", "if", "not", "isinstance", "(", "dataset_output_classes", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'The dataset must return a tuple of tf.Tensors, '", "'instead it returns: %s'", "%", "dataset_output_classes", ")", "if", "len", "(", "dataset_output_classes", ")", "!=", "2", ":", "raise", "ValueError", "(", "'The dataset must return a 2-element tuple, got '", "'%s output classes instead.'", "%", "(", "dataset_output_classes", ",", ")", ")", "for", "i", ",", "cls", "in", "enumerate", "(", "dataset_output_classes", ")", ":", "if", "cls", "!=", "ops", ".", "Tensor", ":", "raise", "ValueError", "(", "'The dataset returned a non-Tensor type (%s) at '", "'index %d.'", "%", "(", "cls", ",", "i", ")", ")", "for", "i", ",", "shape", "in", "enumerate", "(", "dataset_output_shapes", ")", ":", "if", "not", "shape", ":", "raise", "ValueError", "(", "'The dataset returns a scalar tensor in '", "'tuple index %d. Did you forget to batch? '", "'(Output shapes: %s).'", "%", "(", "i", ",", "dataset_output_shapes", ")", ")", "for", "j", ",", "dim", "in", "enumerate", "(", "shape", ")", ":", "if", "dim", ".", "value", "is", "None", ":", "if", "j", "==", "0", ":", "hint", "=", "(", "' Hint: did you use `ds.batch(BATCH_SIZE, '", "'drop_remainder=True)`?'", ")", "else", ":", "hint", "=", "''", "raise", "ValueError", "(", "'The Keras-TPU integration for `tf.data` '", "'currently requires static shapes. The provided '", "'dataset only has a partially defined shape. '", "'(Dimension %d of output tensor %d is not statically known '", "'for output shapes: %s.%s)'", "%", "(", "j", ",", "i", ",", "dataset_output_shapes", ",", "hint", ")", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/tpu/python/tpu/keras_support.py#L786-L820
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/models/rnn/translate/data_utils.py
python
maybe_download
(directory, filename, url)
return filepath
Download filename from url unless it's already in directory.
Download filename from url unless it's already in directory.
[ "Download", "filename", "from", "url", "unless", "it", "s", "already", "in", "directory", "." ]
def maybe_download(directory, filename, url): """Download filename from url unless it's already in directory.""" if not os.path.exists(directory): print("Creating directory %s" % directory) os.mkdir(directory) filepath = os.path.join(directory, filename) if not os.path.exists(filepath): print("Downloading %s to %s" % (url, filepath)) filepath, _ = urllib.request.urlretrieve(url, filepath) statinfo = os.stat(filepath) print("Succesfully downloaded", filename, statinfo.st_size, "bytes") return filepath
[ "def", "maybe_download", "(", "directory", ",", "filename", ",", "url", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "print", "(", "\"Creating directory %s\"", "%", "directory", ")", "os", ".", "mkdir", "(", "directory", ")", "filepath", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "print", "(", "\"Downloading %s to %s\"", "%", "(", "url", ",", "filepath", ")", ")", "filepath", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "url", ",", "filepath", ")", "statinfo", "=", "os", ".", "stat", "(", "filepath", ")", "print", "(", "\"Succesfully downloaded\"", ",", "filename", ",", "statinfo", ".", "st_size", ",", "\"bytes\"", ")", "return", "filepath" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/models/rnn/translate/data_utils.py#L52-L63
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/math_ops.py
python
div
(x, y, name=None)
return _div_python2(x, y, name)
Divides x / y elementwise (using Python 2 division operator semantics). @compatibility(TF2) This function is deprecated in TF2. Prefer using the Tensor division operator, `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator semantics. @end_compatibility This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x` and `y` are both integers then the result will be an integer. This is in contrast to Python 3, where division with `/` is always a float while division with `//` is always an integer. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y.
Divides x / y elementwise (using Python 2 division operator semantics).
[ "Divides", "x", "/", "y", "elementwise", "(", "using", "Python", "2", "division", "operator", "semantics", ")", "." ]
def div(x, y, name=None): """Divides x / y elementwise (using Python 2 division operator semantics). @compatibility(TF2) This function is deprecated in TF2. Prefer using the Tensor division operator, `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator semantics. @end_compatibility This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x` and `y` are both integers then the result will be an integer. This is in contrast to Python 3, where division with `/` is always a float while division with `//` is always an integer. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y. """ return _div_python2(x, y, name)
[ "def", "div", "(", "x", ",", "y", ",", "name", "=", "None", ")", ":", "return", "_div_python2", "(", "x", ",", "y", ",", "name", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/math_ops.py#L1598-L1621
gabyx/ApproxMVBB
838f3ff7690a938f1e4199a5f41b6feefc32a603
example/kdTreeFiltering/python/Tools/Transformations/Transformations.py
python
Arcball.drag
(self, point)
Update current cursor window coordinates.
Update current cursor window coordinates.
[ "Update", "current", "cursor", "window", "coordinates", "." ]
def drag(self, point): """Update current cursor window coordinates.""" vnow = arcball_map_to_sphere(point, self._center, self._radius) if self._axis is not None: vnow = arcball_constrain_to_axis(vnow, self._axis) self._qpre = self._qnow t = numpy.cross(self._vdown, vnow) if numpy.dot(t, t) < _EPS: self._qnow = self._qdown else: q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]] self._qnow = quaternion_multiply(q, self._qdown)
[ "def", "drag", "(", "self", ",", "point", ")", ":", "vnow", "=", "arcball_map_to_sphere", "(", "point", ",", "self", ".", "_center", ",", "self", ".", "_radius", ")", "if", "self", ".", "_axis", "is", "not", "None", ":", "vnow", "=", "arcball_constrain_to_axis", "(", "vnow", ",", "self", ".", "_axis", ")", "self", ".", "_qpre", "=", "self", ".", "_qnow", "t", "=", "numpy", ".", "cross", "(", "self", ".", "_vdown", ",", "vnow", ")", "if", "numpy", ".", "dot", "(", "t", ",", "t", ")", "<", "_EPS", ":", "self", ".", "_qnow", "=", "self", ".", "_qdown", "else", ":", "q", "=", "[", "numpy", ".", "dot", "(", "self", ".", "_vdown", ",", "vnow", ")", ",", "t", "[", "0", "]", ",", "t", "[", "1", "]", ",", "t", "[", "2", "]", "]", "self", ".", "_qnow", "=", "quaternion_multiply", "(", "q", ",", "self", ".", "_qdown", ")" ]
https://github.com/gabyx/ApproxMVBB/blob/838f3ff7690a938f1e4199a5f41b6feefc32a603/example/kdTreeFiltering/python/Tools/Transformations/Transformations.py#L1589-L1600
infinit/memo
3a8394d0f647efe03ccb8bfe885a7279cb8be8a6
elle/drake/src/drake/__init__.py
python
Version.__ge__
(self, rhs)
Whether a version is greater than another. >>> Version(1, 2, 3) >= Version(1, 2, 3) True >>> Version(1, 2, 4) >= Version(1, 2, 3) True >>> Version(1, 3, 2) >= Version(1, 2, 3) True >>> Version(2, 0, 0) >= Version(1, 10, 23) True >>> Version(1, 2, 3) >= Version(1, 2, 4) False >>> Version(1, 2, 3) >= Version(1, 3, 2) False
Whether a version is greater than another.
[ "Whether", "a", "version", "is", "greater", "than", "another", "." ]
def __ge__(self, rhs): """Whether a version is greater than another. >>> Version(1, 2, 3) >= Version(1, 2, 3) True >>> Version(1, 2, 4) >= Version(1, 2, 3) True >>> Version(1, 3, 2) >= Version(1, 2, 3) True >>> Version(2, 0, 0) >= Version(1, 10, 23) True >>> Version(1, 2, 3) >= Version(1, 2, 4) False >>> Version(1, 2, 3) >= Version(1, 3, 2) False """ assert self.__major is not None and rhs.__major is not None if self.__major == rhs.__major: minor = self.__minor or 0 rhs_minor = rhs.__minor or 0 if minor == rhs_minor: subminor = self.__subminor or 0 rhs_subminor = rhs.__subminor or 0 return subminor >= rhs_subminor else: return minor > rhs_minor else: return self.__major > rhs.__major
[ "def", "__ge__", "(", "self", ",", "rhs", ")", ":", "assert", "self", ".", "__major", "is", "not", "None", "and", "rhs", ".", "__major", "is", "not", "None", "if", "self", ".", "__major", "==", "rhs", ".", "__major", ":", "minor", "=", "self", ".", "__minor", "or", "0", "rhs_minor", "=", "rhs", ".", "__minor", "or", "0", "if", "minor", "==", "rhs_minor", ":", "subminor", "=", "self", ".", "__subminor", "or", "0", "rhs_subminor", "=", "rhs", ".", "__subminor", "or", "0", "return", "subminor", ">=", "rhs_subminor", "else", ":", "return", "minor", ">", "rhs_minor", "else", ":", "return", "self", ".", "__major", ">", "rhs", ".", "__major" ]
https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L3903-L3930
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/ilink.py
python
generate
(env)
Add Builders and construction variables for ilink to an Environment.
Add Builders and construction variables for ilink to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "ilink", "to", "an", "Environment", "." ]
def generate(env): """Add Builders and construction variables for ilink to an Environment.""" SCons.Tool.createProgBuilder(env) env['LINK'] = 'ilink' env['LINKFLAGS'] = SCons.Util.CLVar('') env['LINKCOM'] = '$LINK $LINKFLAGS /O:$TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS' env['LIBDIRPREFIX']='/LIBPATH:' env['LIBDIRSUFFIX']='' env['LIBLINKPREFIX']='' env['LIBLINKSUFFIX']='$LIBSUFFIX'
[ "def", "generate", "(", "env", ")", ":", "SCons", ".", "Tool", ".", "createProgBuilder", "(", "env", ")", "env", "[", "'LINK'", "]", "=", "'ilink'", "env", "[", "'LINKFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "''", ")", "env", "[", "'LINKCOM'", "]", "=", "'$LINK $LINKFLAGS /O:$TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'", "env", "[", "'LIBDIRPREFIX'", "]", "=", "'/LIBPATH:'", "env", "[", "'LIBDIRSUFFIX'", "]", "=", "''", "env", "[", "'LIBLINKPREFIX'", "]", "=", "''", "env", "[", "'LIBLINKSUFFIX'", "]", "=", "'$LIBSUFFIX'" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/ilink.py#L40-L50
xiaolonw/caffe-video_triplet
c39ea1ad6e937ccf7deba4510b7e555165abf05f
scripts/cpp_lint.py
python
_CppLintState.ResetErrorCounts
(self)
Sets the module's error statistic back to zero.
Sets the module's error statistic back to zero.
[ "Sets", "the", "module", "s", "error", "statistic", "back", "to", "zero", "." ]
def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {}
[ "def", "ResetErrorCounts", "(", "self", ")", ":", "self", ".", "error_count", "=", "0", "self", ".", "errors_by_category", "=", "{", "}" ]
https://github.com/xiaolonw/caffe-video_triplet/blob/c39ea1ad6e937ccf7deba4510b7e555165abf05f/scripts/cpp_lint.py#L742-L745
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/traci/_overheadwire.py
python
OverheadWireDomain.getVehicleIDs
(self, stopID)
return self._getUniversal(tc.VAR_STOP_STARTING_VEHICLES_IDS, stopID)
getOverheadWireWaiting() -> list(string) Get the IDs of vehicles stopped at the named overhead wire.
getOverheadWireWaiting() -> list(string) Get the IDs of vehicles stopped at the named overhead wire.
[ "getOverheadWireWaiting", "()", "-", ">", "list", "(", "string", ")", "Get", "the", "IDs", "of", "vehicles", "stopped", "at", "the", "named", "overhead", "wire", "." ]
def getVehicleIDs(self, stopID): """getOverheadWireWaiting() -> list(string) Get the IDs of vehicles stopped at the named overhead wire. """ return self._getUniversal(tc.VAR_STOP_STARTING_VEHICLES_IDS, stopID)
[ "def", "getVehicleIDs", "(", "self", ",", "stopID", ")", ":", "return", "self", ".", "_getUniversal", "(", "tc", ".", "VAR_STOP_STARTING_VEHICLES_IDS", ",", "stopID", ")" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_overheadwire.py#L63-L67
arkenthera/electron-vibrancy
383153ef9ccb23a6c7517150d6bb0794dff3115e
scripts/cpplint.py
python
CleanseComments
(line)
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed.
Removes //-comments and single-line C-style /* */ comments.
[ "Removes", "//", "-", "comments", "and", "single", "-", "line", "C", "-", "style", "/", "*", "*", "/", "comments", "." ]
def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
[ "def", "CleanseComments", "(", "line", ")", ":", "commentpos", "=", "line", ".", "find", "(", "'//'", ")", "if", "commentpos", "!=", "-", "1", "and", "not", "IsCppString", "(", "line", "[", ":", "commentpos", "]", ")", ":", "line", "=", "line", "[", ":", "commentpos", "]", ".", "rstrip", "(", ")", "# get rid of /* ... */", "return", "_RE_PATTERN_CLEANSE_LINE_C_COMMENTS", ".", "sub", "(", "''", ",", "line", ")" ]
https://github.com/arkenthera/electron-vibrancy/blob/383153ef9ccb23a6c7517150d6bb0794dff3115e/scripts/cpplint.py#L1130-L1143
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/TemplatePyMod/DocumentObject.py
python
ViewProvider.show
(self)
switches this object to visible
switches this object to visible
[ "switches", "this", "object", "to", "visible" ]
def show(self): "switches this object to visible" self.__vobject__.show()
[ "def", "show", "(", "self", ")", ":", "self", ".", "__vobject__", ".", "show", "(", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/TemplatePyMod/DocumentObject.py#L188-L190