nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/layers/python/ops/sparse_ops.py
python
sparse_row_envelope
(sparse_input, row_axis=0, col_axis=1, name=None)
Returns the length of each 'row' in a `SparseTensor`. For example, if `sparse_input` has indices `[[0,0], [2, 0], [2, 1], [2, 2]]` and shape `[3, 3]`, this function will return `[1, 0, 3]`. Args: sparse_input: a `SparseTensor` of rank at least 2. row_axis: An integer. The axis for the row of the envelope matrix. Default is 0. col_axis: An integer. The axis for the col of the envelope matrix. Default is 1. name: A name for the operation (optional). Returns: A one-dimensional `Tensor` whose entries correspond to the length of each row of `SparseTensor`. Raises: ValueError: If row_axis and col_axis are the same axis or they are not integers.
Returns the length of each 'row' in a `SparseTensor`.
[ "Returns", "the", "length", "of", "each", "row", "in", "a", "SparseTensor", "." ]
def sparse_row_envelope(sparse_input, row_axis=0, col_axis=1, name=None): """Returns the length of each 'row' in a `SparseTensor`. For example, if `sparse_input` has indices `[[0,0], [2, 0], [2, 1], [2, 2]]` and shape `[3, 3]`, this function will return `[1, 0, 3]`. Args: sparse_input: a `SparseTensor` of rank at least 2. row_axis: An integer. The axis for the row of the envelope matrix. Default is 0. col_axis: An integer. The axis for the col of the envelope matrix. Default is 1. name: A name for the operation (optional). Returns: A one-dimensional `Tensor` whose entries correspond to the length of each row of `SparseTensor`. Raises: ValueError: If row_axis and col_axis are the same axis or they are not integers. """ if not (isinstance(row_axis, compat.integral_types) and isinstance(col_axis, compat.integral_types)): raise ValueError("`row_axis` and `col_axis` must be integers.") if row_axis == col_axis: raise ValueError("Row and column can not be the same axis.") with ops.name_scope(name, "sparse_row_envelope", [sparse_input]): indices = sparse_input.indices row_indices = indices[:, row_axis] col_indices = indices[:, col_axis] num_rows = math_ops.cast(sparse_input.dense_shape[row_axis], dtypes.int32) row_envelope = math_ops.unsorted_segment_max( col_indices + 1, row_indices, num_rows, name=name) zeros = array_ops.zeros_like(row_envelope) return array_ops.where(row_envelope > zeros, row_envelope, zeros)
[ "def", "sparse_row_envelope", "(", "sparse_input", ",", "row_axis", "=", "0", ",", "col_axis", "=", "1", ",", "name", "=", "None", ")", ":", "if", "not", "(", "isinstance", "(", "row_axis", ",", "compat", ".", "integral_types", ")", "and", "isinstance", "(", "col_axis", ",", "compat", ".", "integral_types", ")", ")", ":", "raise", "ValueError", "(", "\"`row_axis` and `col_axis` must be integers.\"", ")", "if", "row_axis", "==", "col_axis", ":", "raise", "ValueError", "(", "\"Row and column can not be the same axis.\"", ")", "with", "ops", ".", "name_scope", "(", "name", ",", "\"sparse_row_envelope\"", ",", "[", "sparse_input", "]", ")", ":", "indices", "=", "sparse_input", ".", "indices", "row_indices", "=", "indices", "[", ":", ",", "row_axis", "]", "col_indices", "=", "indices", "[", ":", ",", "col_axis", "]", "num_rows", "=", "math_ops", ".", "cast", "(", "sparse_input", ".", "dense_shape", "[", "row_axis", "]", ",", "dtypes", ".", "int32", ")", "row_envelope", "=", "math_ops", ".", "unsorted_segment_max", "(", "col_indices", "+", "1", ",", "row_indices", ",", "num_rows", ",", "name", "=", "name", ")", "zeros", "=", "array_ops", ".", "zeros_like", "(", "row_envelope", ")", "return", "array_ops", ".", "where", "(", "row_envelope", ">", "zeros", ",", "row_envelope", ",", "zeros", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/layers/python/ops/sparse_ops.py#L190-L227
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/count-nodes-with-the-highest-score.py
python
Solution.countHighestScoreNodes
(self, parents)
return iter_dfs(adj)
:type parents: List[int] :rtype: int
:type parents: List[int] :rtype: int
[ ":", "type", "parents", ":", "List", "[", "int", "]", ":", "rtype", ":", "int" ]
def countHighestScoreNodes(self, parents): """ :type parents: List[int] :rtype: int """ def iter_dfs(adj): result = [0]*2 stk = [(1, (0, [0]))] while stk: step, args = stk.pop() if step == 1: i, ret = args cnts = [[0] for _ in xrange(len(adj[i]))] stk.append((2, (cnts, ret))) for j, child in enumerate(adj[i]): stk.append((1, (child, cnts[j]))) elif step == 2: cnts, ret = args ret[0] = sum(cnt[0] for cnt in cnts)+1 score = max((len(adj)-ret[0]), 1)*reduce(lambda x, y: x*y[0], cnts, 1) if score > result[0]: result[:] = [score, 1] elif score == result[0]: result[1] += 1 return result[1] adj = [[] for _ in xrange(len(parents))] # Space: O(n) for i in xrange(1, len(parents)): adj[parents[i]].append(i) return iter_dfs(adj)
[ "def", "countHighestScoreNodes", "(", "self", ",", "parents", ")", ":", "def", "iter_dfs", "(", "adj", ")", ":", "result", "=", "[", "0", "]", "*", "2", "stk", "=", "[", "(", "1", ",", "(", "0", ",", "[", "0", "]", ")", ")", "]", "while", "stk", ":", "step", ",", "args", "=", "stk", ".", "pop", "(", ")", "if", "step", "==", "1", ":", "i", ",", "ret", "=", "args", "cnts", "=", "[", "[", "0", "]", "for", "_", "in", "xrange", "(", "len", "(", "adj", "[", "i", "]", ")", ")", "]", "stk", ".", "append", "(", "(", "2", ",", "(", "cnts", ",", "ret", ")", ")", ")", "for", "j", ",", "child", "in", "enumerate", "(", "adj", "[", "i", "]", ")", ":", "stk", ".", "append", "(", "(", "1", ",", "(", "child", ",", "cnts", "[", "j", "]", ")", ")", ")", "elif", "step", "==", "2", ":", "cnts", ",", "ret", "=", "args", "ret", "[", "0", "]", "=", "sum", "(", "cnt", "[", "0", "]", "for", "cnt", "in", "cnts", ")", "+", "1", "score", "=", "max", "(", "(", "len", "(", "adj", ")", "-", "ret", "[", "0", "]", ")", ",", "1", ")", "*", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", "[", "0", "]", ",", "cnts", ",", "1", ")", "if", "score", ">", "result", "[", "0", "]", ":", "result", "[", ":", "]", "=", "[", "score", ",", "1", "]", "elif", "score", "==", "result", "[", "0", "]", ":", "result", "[", "1", "]", "+=", "1", "return", "result", "[", "1", "]", "adj", "=", "[", "[", "]", "for", "_", "in", "xrange", "(", "len", "(", "parents", ")", ")", "]", "# Space: O(n)", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "parents", ")", ")", ":", "adj", "[", "parents", "[", "i", "]", "]", ".", "append", "(", "i", ")", "return", "iter_dfs", "(", "adj", ")" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/count-nodes-with-the-highest-score.py#L5-L34
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/plat-mac/aepack.py
python
pack
(x, forcetype = None)
return AE.AECreateDesc('TEXT', repr(x))
Pack a python object into an AE descriptor
Pack a python object into an AE descriptor
[ "Pack", "a", "python", "object", "into", "an", "AE", "descriptor" ]
def pack(x, forcetype = None): """Pack a python object into an AE descriptor""" if forcetype: if type(x) is StringType: return AE.AECreateDesc(forcetype, x) else: return pack(x).AECoerceDesc(forcetype) if x is None: return AE.AECreateDesc('null', '') if isinstance(x, AEDescType): return x if isinstance(x, FSSType): return AE.AECreateDesc('fss ', x.data) if isinstance(x, FSRefType): return AE.AECreateDesc('fsrf', x.data) if isinstance(x, AliasType): return AE.AECreateDesc('alis', x.data) if isinstance(x, IntType): return AE.AECreateDesc('long', struct.pack('l', x)) if isinstance(x, FloatType): return AE.AECreateDesc('doub', struct.pack('d', x)) if isinstance(x, StringType): return AE.AECreateDesc('TEXT', x) if isinstance(x, UnicodeType): data = x.encode('utf16') if data[:2] == '\xfe\xff': data = data[2:] return AE.AECreateDesc('utxt', data) if isinstance(x, ListType): list = AE.AECreateList('', 0) for item in x: list.AEPutDesc(0, pack(item)) return list if isinstance(x, DictionaryType): record = AE.AECreateList('', 1) for key, value in x.items(): packkey(record, key, value) #record.AEPutParamDesc(key, pack(value)) return record if type(x) == types.ClassType and issubclass(x, ObjectSpecifier): # Note: we are getting a class object here, not an instance return AE.AECreateDesc('type', x.want) if hasattr(x, '__aepack__'): return x.__aepack__() if hasattr(x, 'which'): return AE.AECreateDesc('TEXT', x.which) if hasattr(x, 'want'): return AE.AECreateDesc('TEXT', x.want) return AE.AECreateDesc('TEXT', repr(x))
[ "def", "pack", "(", "x", ",", "forcetype", "=", "None", ")", ":", "if", "forcetype", ":", "if", "type", "(", "x", ")", "is", "StringType", ":", "return", "AE", ".", "AECreateDesc", "(", "forcetype", ",", "x", ")", "else", ":", "return", "pack", "(", "x", ")", ".", "AECoerceDesc", "(", "forcetype", ")", "if", "x", "is", "None", ":", "return", "AE", ".", "AECreateDesc", "(", "'null'", ",", "''", ")", "if", "isinstance", "(", "x", ",", "AEDescType", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "FSSType", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'fss '", ",", "x", ".", "data", ")", "if", "isinstance", "(", "x", ",", "FSRefType", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'fsrf'", ",", "x", ".", "data", ")", "if", "isinstance", "(", "x", ",", "AliasType", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'alis'", ",", "x", ".", "data", ")", "if", "isinstance", "(", "x", ",", "IntType", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'long'", ",", "struct", ".", "pack", "(", "'l'", ",", "x", ")", ")", "if", "isinstance", "(", "x", ",", "FloatType", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'doub'", ",", "struct", ".", "pack", "(", "'d'", ",", "x", ")", ")", "if", "isinstance", "(", "x", ",", "StringType", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'TEXT'", ",", "x", ")", "if", "isinstance", "(", "x", ",", "UnicodeType", ")", ":", "data", "=", "x", ".", "encode", "(", "'utf16'", ")", "if", "data", "[", ":", "2", "]", "==", "'\\xfe\\xff'", ":", "data", "=", "data", "[", "2", ":", "]", "return", "AE", ".", "AECreateDesc", "(", "'utxt'", ",", "data", ")", "if", "isinstance", "(", "x", ",", "ListType", ")", ":", "list", "=", "AE", ".", "AECreateList", "(", "''", ",", "0", ")", "for", "item", "in", "x", ":", "list", ".", "AEPutDesc", "(", "0", ",", "pack", "(", "item", ")", ")", "return", "list", "if", "isinstance", "(", "x", ",", "DictionaryType", ")", ":", "record", "=", "AE", ".", "AECreateList", "(", "''", ",", "1", ")", "for", "key", ",", "value", "in", "x", ".", "items", "(", ")", ":", "packkey", "(", "record", ",", "key", ",", "value", ")", "#record.AEPutParamDesc(key, pack(value))", "return", "record", "if", "type", "(", "x", ")", "==", "types", ".", "ClassType", "and", "issubclass", "(", "x", ",", "ObjectSpecifier", ")", ":", "# Note: we are getting a class object here, not an instance", "return", "AE", ".", "AECreateDesc", "(", "'type'", ",", "x", ".", "want", ")", "if", "hasattr", "(", "x", ",", "'__aepack__'", ")", ":", "return", "x", ".", "__aepack__", "(", ")", "if", "hasattr", "(", "x", ",", "'which'", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'TEXT'", ",", "x", ".", "which", ")", "if", "hasattr", "(", "x", ",", "'want'", ")", ":", "return", "AE", ".", "AECreateDesc", "(", "'TEXT'", ",", "x", ".", "want", ")", "return", "AE", ".", "AECreateDesc", "(", "'TEXT'", ",", "repr", "(", "x", ")", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/plat-mac/aepack.py#L78-L129
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_core.py
python
Validator.TransferFromWindow
(*args, **kwargs)
return _core_.Validator_TransferFromWindow(*args, **kwargs)
TransferFromWindow(self) -> bool
TransferFromWindow(self) -> bool
[ "TransferFromWindow", "(", "self", ")", "-", ">", "bool" ]
def TransferFromWindow(*args, **kwargs): """TransferFromWindow(self) -> bool""" return _core_.Validator_TransferFromWindow(*args, **kwargs)
[ "def", "TransferFromWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Validator_TransferFromWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L11884-L11886
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
HeaderColumn.IsSortKey
(*args, **kwargs)
return _core_.HeaderColumn_IsSortKey(*args, **kwargs)
IsSortKey(self) -> bool
IsSortKey(self) -> bool
[ "IsSortKey", "(", "self", ")", "-", ">", "bool" ]
def IsSortKey(*args, **kwargs): """IsSortKey(self) -> bool""" return _core_.HeaderColumn_IsSortKey(*args, **kwargs)
[ "def", "IsSortKey", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "HeaderColumn_IsSortKey", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L16432-L16434
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/python/m5/util/fdthelper.py
python
Fdt.writeDtsFile
(self, filename)
Convert the device tree to DTS and write to a file.
Convert the device tree to DTS and write to a file.
[ "Convert", "the", "device", "tree", "to", "DTS", "and", "write", "to", "a", "file", "." ]
def writeDtsFile(self, filename): """Convert the device tree to DTS and write to a file.""" filename = os.path.realpath(filename) try: with open(filename, 'w') as f: f.write(self.to_dts()) return filename except IOError: raise RuntimeError("Failed to open DTS output file")
[ "def", "writeDtsFile", "(", "self", ",", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "realpath", "(", "filename", ")", "try", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "to_dts", "(", ")", ")", "return", "filename", "except", "IOError", ":", "raise", "RuntimeError", "(", "\"Failed to open DTS output file\"", ")" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/m5/util/fdthelper.py#L260-L268
albertz/openlierox
d316c14a8eb57848ef56e9bfa7b23a56f694a51b
tools/DedicatedServerVideo/gdata/Crypto/PublicKey/pubkey.py
python
pubkey.verify
(self, M, signature)
return self._verify(M, signature)
verify(M:string|long, signature:tuple) : bool Verify that the signature is valid for the message M; returns true if the signature checks out.
verify(M:string|long, signature:tuple) : bool Verify that the signature is valid for the message M; returns true if the signature checks out.
[ "verify", "(", "M", ":", "string|long", "signature", ":", "tuple", ")", ":", "bool", "Verify", "that", "the", "signature", "is", "valid", "for", "the", "message", "M", ";", "returns", "true", "if", "the", "signature", "checks", "out", "." ]
def verify (self, M, signature): """verify(M:string|long, signature:tuple) : bool Verify that the signature is valid for the message M; returns true if the signature checks out. """ if isinstance(M, types.StringType): M=bytes_to_long(M) return self._verify(M, signature)
[ "def", "verify", "(", "self", ",", "M", ",", "signature", ")", ":", "if", "isinstance", "(", "M", ",", "types", ".", "StringType", ")", ":", "M", "=", "bytes_to_long", "(", "M", ")", "return", "self", ".", "_verify", "(", "M", ",", "signature", ")" ]
https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/gdata/Crypto/PublicKey/pubkey.py#L78-L84
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py
python
Decimal.copy_sign
(self, other, context=None)
return _dec_from_triple(other._sign, self._int, self._exp, self._is_special)
Returns self with the sign of other.
Returns self with the sign of other.
[ "Returns", "self", "with", "the", "sign", "of", "other", "." ]
def copy_sign(self, other, context=None): """Returns self with the sign of other.""" other = _convert_other(other, raiseit=True) return _dec_from_triple(other._sign, self._int, self._exp, self._is_special)
[ "def", "copy_sign", "(", "self", ",", "other", ",", "context", "=", "None", ")", ":", "other", "=", "_convert_other", "(", "other", ",", "raiseit", "=", "True", ")", "return", "_dec_from_triple", "(", "other", ".", "_sign", ",", "self", ".", "_int", ",", "self", ".", "_exp", ",", "self", ".", "_is_special", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py#L3030-L3034
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/pybind/mgr/mgr_module.py
python
MgrModule.get_mgr_id
(self)
return self._ceph_get_mgr_id()
Retrieve the name of the manager daemon where this plugin is currently being executed (i.e. the active manager). :return: str
Retrieve the name of the manager daemon where this plugin is currently being executed (i.e. the active manager).
[ "Retrieve", "the", "name", "of", "the", "manager", "daemon", "where", "this", "plugin", "is", "currently", "being", "executed", "(", "i", ".", "e", ".", "the", "active", "manager", ")", "." ]
def get_mgr_id(self) -> str: """ Retrieve the name of the manager daemon where this plugin is currently being executed (i.e. the active manager). :return: str """ return self._ceph_get_mgr_id()
[ "def", "get_mgr_id", "(", "self", ")", "->", "str", ":", "return", "self", ".", "_ceph_get_mgr_id", "(", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/mgr_module.py#L1729-L1736
plaidml/plaidml
f3c6681db21460e5fdc11ae651d6d7b6c27f8262
plaidml/edsl/__init__.py
python
pow
(x, y)
return intrinsic('pow', x, y)
Computes the elementwise ``y``th power of ``x``. Args: x (Tensor): The base Tensor. y (Tensor): The exponent Tensor. Returns: Tensor: The resultant tensor.
Computes the elementwise ``y``th power of ``x``.
[ "Computes", "the", "elementwise", "y", "th", "power", "of", "x", "." ]
def pow(x, y): """Computes the elementwise ``y``th power of ``x``. Args: x (Tensor): The base Tensor. y (Tensor): The exponent Tensor. Returns: Tensor: The resultant tensor. """ return intrinsic('pow', x, y)
[ "def", "pow", "(", "x", ",", "y", ")", ":", "return", "intrinsic", "(", "'pow'", ",", "x", ",", "y", ")" ]
https://github.com/plaidml/plaidml/blob/f3c6681db21460e5fdc11ae651d6d7b6c27f8262/plaidml/edsl/__init__.py#L990-L1000
JumpingYang001/webrtc
c03d6e965e1f54aeadd670e491eabe5fdb8db968
rtc_tools/py_event_log_analyzer/rtp_analyzer.py
python
RTPStatistics.ComputeBandwidth
(self)
Computes bandwidth averaged over several consecutive packets. The number of consecutive packets used in the average is BANDWIDTH_SMOOTHING_WINDOW_SIZE. Averaging is done with numpy.correlate.
Computes bandwidth averaged over several consecutive packets.
[ "Computes", "bandwidth", "averaged", "over", "several", "consecutive", "packets", "." ]
def ComputeBandwidth(self): """Computes bandwidth averaged over several consecutive packets. The number of consecutive packets used in the average is BANDWIDTH_SMOOTHING_WINDOW_SIZE. Averaging is done with numpy.correlate. """ start_ms = self.data_points[0].real_send_time_ms stop_ms = self.data_points[-1].real_send_time_ms (self.bandwidth_kbps, _) = numpy.histogram( [point.real_send_time_ms for point in self.data_points], bins=numpy.arange(start_ms, stop_ms, RTPStatistics.PLOT_RESOLUTION_MS), weights=[ point.size * 8 / RTPStatistics.PLOT_RESOLUTION_MS for point in self.data_points ]) correlate_filter = ( numpy.ones(RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) / RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) self.smooth_bw_kbps = numpy.correlate(self.bandwidth_kbps, correlate_filter)
[ "def", "ComputeBandwidth", "(", "self", ")", ":", "start_ms", "=", "self", ".", "data_points", "[", "0", "]", ".", "real_send_time_ms", "stop_ms", "=", "self", ".", "data_points", "[", "-", "1", "]", ".", "real_send_time_ms", "(", "self", ".", "bandwidth_kbps", ",", "_", ")", "=", "numpy", ".", "histogram", "(", "[", "point", ".", "real_send_time_ms", "for", "point", "in", "self", ".", "data_points", "]", ",", "bins", "=", "numpy", ".", "arange", "(", "start_ms", ",", "stop_ms", ",", "RTPStatistics", ".", "PLOT_RESOLUTION_MS", ")", ",", "weights", "=", "[", "point", ".", "size", "*", "8", "/", "RTPStatistics", ".", "PLOT_RESOLUTION_MS", "for", "point", "in", "self", ".", "data_points", "]", ")", "correlate_filter", "=", "(", "numpy", ".", "ones", "(", "RTPStatistics", ".", "BANDWIDTH_SMOOTHING_WINDOW_SIZE", ")", "/", "RTPStatistics", ".", "BANDWIDTH_SMOOTHING_WINDOW_SIZE", ")", "self", ".", "smooth_bw_kbps", "=", "numpy", ".", "correlate", "(", "self", ".", "bandwidth_kbps", ",", "correlate_filter", ")" ]
https://github.com/JumpingYang001/webrtc/blob/c03d6e965e1f54aeadd670e491eabe5fdb8db968/rtc_tools/py_event_log_analyzer/rtp_analyzer.py#L232-L253
nasa/fprime
595cf3682d8365943d86c1a6fe7c78f0a116acf0
Autocoders/Python/src/fprime_ac/generators/visitors/TopologyCppVisitor.py
python
TopologyCppVisitor.includes2Visit
(self, obj)
Defined to generate internal includes within a file. Usually used for data type includes and system includes. @param args: the instance of the concrete element to operation on.
Defined to generate internal includes within a file. Usually used for data type includes and system includes.
[ "Defined", "to", "generate", "internal", "includes", "within", "a", "file", ".", "Usually", "used", "for", "data", "type", "includes", "and", "system", "includes", "." ]
def includes2Visit(self, obj): """ Defined to generate internal includes within a file. Usually used for data type includes and system includes. @param args: the instance of the concrete element to operation on. """
[ "def", "includes2Visit", "(", "self", ",", "obj", ")", ":" ]
https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/generators/visitors/TopologyCppVisitor.py#L227-L232
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/requests/models.py
python
Response.apparent_encoding
(self)
return chardet.detect(self.content)['encoding']
The apparent encoding, provided by the chardet library.
The apparent encoding, provided by the chardet library.
[ "The", "apparent", "encoding", "provided", "by", "the", "chardet", "library", "." ]
def apparent_encoding(self): """The apparent encoding, provided by the chardet library.""" return chardet.detect(self.content)['encoding']
[ "def", "apparent_encoding", "(", "self", ")", ":", "return", "chardet", ".", "detect", "(", "self", ".", "content", ")", "[", "'encoding'", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/requests/models.py#L728-L730
pyne/pyne
0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3
pyne/dbgen/ndsfpy.py
python
readtable
(i, spdat)
return dfinal
Reads in a set of 5 html tables and returns corresponding yield data
Reads in a set of 5 html tables and returns corresponding yield data
[ "Reads", "in", "a", "set", "of", "5", "html", "tables", "and", "returns", "corresponding", "yield", "data" ]
def readtable(i, spdat): """ Reads in a set of 5 html tables and returns corresponding yield data """ parent = getdata(i, spdat)[0] pfinal = (parent.split('<strong>')[1]).split('</strong>')[0] pid = conv_to_id(pfinal) fpdata = getdata(i + 1, spdat) dt = np.dtype([('from_nuc', 'i4'), ('to_nuc', 'i4'), ('yield_thermal', float), ('yield_thermal_err', float), ('yield_fast', float), ('yield_fast_err', float), ('yield_14MeV', float), ('yield_14MeV_err', float) ]) dfinal = np.zeros((len(fpdata),), dtype=dt) for index, item in enumerate(fpdata): dfinal[index]['from_nuc'] = pid dfinal[index]['to_nuc'] = conv_to_id(item) thermaldata = getdata(i + 2, spdat) for index, item in enumerate(thermaldata): dat, err = conv_to_num(item) dfinal[index]['yield_thermal'] = dat dfinal[index]['yield_thermal_err'] = err fastdata = getdata(i + 3, spdat) for index, item in enumerate(fastdata): dat, err = conv_to_num(item) dfinal[index]['yield_fast'] = dat dfinal[index]['yield_fast_err'] = err dtdata = getdata(i + 4, spdat) for index, item in enumerate(dtdata): dat, err = conv_to_num(item) dfinal[index]['yield_14MeV'] = dat dfinal[index]['yield_14MeV_err'] = err return dfinal
[ "def", "readtable", "(", "i", ",", "spdat", ")", ":", "parent", "=", "getdata", "(", "i", ",", "spdat", ")", "[", "0", "]", "pfinal", "=", "(", "parent", ".", "split", "(", "'<strong>'", ")", "[", "1", "]", ")", ".", "split", "(", "'</strong>'", ")", "[", "0", "]", "pid", "=", "conv_to_id", "(", "pfinal", ")", "fpdata", "=", "getdata", "(", "i", "+", "1", ",", "spdat", ")", "dt", "=", "np", ".", "dtype", "(", "[", "(", "'from_nuc'", ",", "'i4'", ")", ",", "(", "'to_nuc'", ",", "'i4'", ")", ",", "(", "'yield_thermal'", ",", "float", ")", ",", "(", "'yield_thermal_err'", ",", "float", ")", ",", "(", "'yield_fast'", ",", "float", ")", ",", "(", "'yield_fast_err'", ",", "float", ")", ",", "(", "'yield_14MeV'", ",", "float", ")", ",", "(", "'yield_14MeV_err'", ",", "float", ")", "]", ")", "dfinal", "=", "np", ".", "zeros", "(", "(", "len", "(", "fpdata", ")", ",", ")", ",", "dtype", "=", "dt", ")", "for", "index", ",", "item", "in", "enumerate", "(", "fpdata", ")", ":", "dfinal", "[", "index", "]", "[", "'from_nuc'", "]", "=", "pid", "dfinal", "[", "index", "]", "[", "'to_nuc'", "]", "=", "conv_to_id", "(", "item", ")", "thermaldata", "=", "getdata", "(", "i", "+", "2", ",", "spdat", ")", "for", "index", ",", "item", "in", "enumerate", "(", "thermaldata", ")", ":", "dat", ",", "err", "=", "conv_to_num", "(", "item", ")", "dfinal", "[", "index", "]", "[", "'yield_thermal'", "]", "=", "dat", "dfinal", "[", "index", "]", "[", "'yield_thermal_err'", "]", "=", "err", "fastdata", "=", "getdata", "(", "i", "+", "3", ",", "spdat", ")", "for", "index", ",", "item", "in", "enumerate", "(", "fastdata", ")", ":", "dat", ",", "err", "=", "conv_to_num", "(", "item", ")", "dfinal", "[", "index", "]", "[", "'yield_fast'", "]", "=", "dat", "dfinal", "[", "index", "]", "[", "'yield_fast_err'", "]", "=", "err", "dtdata", "=", "getdata", "(", "i", "+", "4", ",", "spdat", ")", "for", "index", ",", "item", "in", "enumerate", "(", "dtdata", ")", ":", "dat", ",", "err", "=", "conv_to_num", "(", "item", ")", "dfinal", "[", "index", "]", "[", "'yield_14MeV'", "]", "=", "dat", "dfinal", "[", "index", "]", "[", "'yield_14MeV_err'", "]", "=", "err", "return", "dfinal" ]
https://github.com/pyne/pyne/blob/0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3/pyne/dbgen/ndsfpy.py#L39-L71
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/Jinja2/py2/jinja2/compiler.py
python
CodeGenerator.pull_dependencies
(self, nodes)
Pull all the dependencies.
Pull all the dependencies.
[ "Pull", "all", "the", "dependencies", "." ]
def pull_dependencies(self, nodes): """Pull all the dependencies.""" visitor = DependencyFinderVisitor() for node in nodes: visitor.visit(node) for dependency in "filters", "tests": mapping = getattr(self, dependency) for name in getattr(visitor, dependency): if name not in mapping: mapping[name] = self.temporary_identifier() self.writeline( "%s = environment.%s[%r]" % (mapping[name], dependency, name) )
[ "def", "pull_dependencies", "(", "self", ",", "nodes", ")", ":", "visitor", "=", "DependencyFinderVisitor", "(", ")", "for", "node", "in", "nodes", ":", "visitor", ".", "visit", "(", "node", ")", "for", "dependency", "in", "\"filters\"", ",", "\"tests\"", ":", "mapping", "=", "getattr", "(", "self", ",", "dependency", ")", "for", "name", "in", "getattr", "(", "visitor", ",", "dependency", ")", ":", "if", "name", "not", "in", "mapping", ":", "mapping", "[", "name", "]", "=", "self", ".", "temporary_identifier", "(", ")", "self", ".", "writeline", "(", "\"%s = environment.%s[%r]\"", "%", "(", "mapping", "[", "name", "]", ",", "dependency", ",", "name", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/Jinja2/py2/jinja2/compiler.py#L464-L476
idaholab/moose
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
python/MooseDocs/base/renderers.py
python
MaterializeRenderer._method
(self, component)
Fallback to the HTMLRenderer method if the MaterializeRenderer method is not located. Inputs: component[RenderComponent]: Object to use for locating desired method for renderering.
Fallback to the HTMLRenderer method if the MaterializeRenderer method is not located.
[ "Fallback", "to", "the", "HTMLRenderer", "method", "if", "the", "MaterializeRenderer", "method", "is", "not", "located", "." ]
def _method(self, component): """ Fallback to the HTMLRenderer method if the MaterializeRenderer method is not located. Inputs: component[RenderComponent]: Object to use for locating desired method for renderering. """ if hasattr(component, self.METHOD): return getattr(component, self.METHOD) elif hasattr(component, HTMLRenderer.METHOD): return getattr(component, HTMLRenderer.METHOD) else: msg = "The component object {} does not have a {} method." raise exceptions.MooseDocsException(msg, type(component), self.METHOD)
[ "def", "_method", "(", "self", ",", "component", ")", ":", "if", "hasattr", "(", "component", ",", "self", ".", "METHOD", ")", ":", "return", "getattr", "(", "component", ",", "self", ".", "METHOD", ")", "elif", "hasattr", "(", "component", ",", "HTMLRenderer", ".", "METHOD", ")", ":", "return", "getattr", "(", "component", ",", "HTMLRenderer", ".", "METHOD", ")", "else", ":", "msg", "=", "\"The component object {} does not have a {} method.\"", "raise", "exceptions", ".", "MooseDocsException", "(", "msg", ",", "type", "(", "component", ")", ",", "self", ".", "METHOD", ")" ]
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/MooseDocs/base/renderers.py#L361-L374
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/sheet.py
python
CSheet.OnRightClick
(self, event)
Move grid cursor when a cell is right-clicked
Move grid cursor when a cell is right-clicked
[ "Move", "grid", "cursor", "when", "a", "cell", "is", "right", "-", "clicked" ]
def OnRightClick(self, event): """ Move grid cursor when a cell is right-clicked """ self.SetGridCursor( event.GetRow(), event.GetCol() ) event.Skip()
[ "def", "OnRightClick", "(", "self", ",", "event", ")", ":", "self", ".", "SetGridCursor", "(", "event", ".", "GetRow", "(", ")", ",", "event", ".", "GetCol", "(", ")", ")", "event", ".", "Skip", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/sheet.py#L222-L225
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Util.py
python
PrependPath
(oldpath, newpath, sep = os.pathsep, delete_existing=1, canonicalize=None)
This prepends newpath elements to the given oldpath. Will only add any particular path once (leaving the first one it encounters and ignoring the rest, to preserve path order), and will os.path.normpath and os.path.normcase all paths to help assure this. This can also handle the case where the given old path variable is a list instead of a string, in which case a list will be returned instead of a string. Example: Old Path: "/foo/bar:/foo" New Path: "/biz/boom:/foo" Result: "/biz/boom:/foo:/foo/bar" If delete_existing is 0, then adding a path that exists will not move it to the beginning; it will stay where it is in the list. If canonicalize is not None, it is applied to each element of newpath before use.
This prepends newpath elements to the given oldpath. Will only add any particular path once (leaving the first one it encounters and ignoring the rest, to preserve path order), and will os.path.normpath and os.path.normcase all paths to help assure this. This can also handle the case where the given old path variable is a list instead of a string, in which case a list will be returned instead of a string.
[ "This", "prepends", "newpath", "elements", "to", "the", "given", "oldpath", ".", "Will", "only", "add", "any", "particular", "path", "once", "(", "leaving", "the", "first", "one", "it", "encounters", "and", "ignoring", "the", "rest", "to", "preserve", "path", "order", ")", "and", "will", "os", ".", "path", ".", "normpath", "and", "os", ".", "path", ".", "normcase", "all", "paths", "to", "help", "assure", "this", ".", "This", "can", "also", "handle", "the", "case", "where", "the", "given", "old", "path", "variable", "is", "a", "list", "instead", "of", "a", "string", "in", "which", "case", "a", "list", "will", "be", "returned", "instead", "of", "a", "string", "." ]
def PrependPath(oldpath, newpath, sep = os.pathsep, delete_existing=1, canonicalize=None): """This prepends newpath elements to the given oldpath. Will only add any particular path once (leaving the first one it encounters and ignoring the rest, to preserve path order), and will os.path.normpath and os.path.normcase all paths to help assure this. This can also handle the case where the given old path variable is a list instead of a string, in which case a list will be returned instead of a string. Example: Old Path: "/foo/bar:/foo" New Path: "/biz/boom:/foo" Result: "/biz/boom:/foo:/foo/bar" If delete_existing is 0, then adding a path that exists will not move it to the beginning; it will stay where it is in the list. If canonicalize is not None, it is applied to each element of newpath before use. """ orig = oldpath is_list = 1 paths = orig if not is_List(orig) and not is_Tuple(orig): paths = paths.split(sep) is_list = 0 if is_String(newpath): newpaths = newpath.split(sep) elif not is_List(newpath) and not is_Tuple(newpath): newpaths = [ newpath ] # might be a Dir else: newpaths = newpath if canonicalize: newpaths=list(map(canonicalize, newpaths)) if not delete_existing: # First uniquify the old paths, making sure to # preserve the first instance (in Unix/Linux, # the first one wins), and remembering them in normpaths. # Then insert the new paths at the head of the list # if they're not already in the normpaths list. result = [] normpaths = [] for path in paths: if not path: continue normpath = os.path.normpath(os.path.normcase(path)) if normpath not in normpaths: result.append(path) normpaths.append(normpath) newpaths.reverse() # since we're inserting at the head for path in newpaths: if not path: continue normpath = os.path.normpath(os.path.normcase(path)) if normpath not in normpaths: result.insert(0, path) normpaths.append(normpath) paths = result else: newpaths = newpaths + paths # prepend new paths normpaths = [] paths = [] # now we add them only if they are unique for path in newpaths: normpath = os.path.normpath(os.path.normcase(path)) if path and not normpath in normpaths: paths.append(path) normpaths.append(normpath) if is_list: return paths else: return sep.join(paths)
[ "def", "PrependPath", "(", "oldpath", ",", "newpath", ",", "sep", "=", "os", ".", "pathsep", ",", "delete_existing", "=", "1", ",", "canonicalize", "=", "None", ")", ":", "orig", "=", "oldpath", "is_list", "=", "1", "paths", "=", "orig", "if", "not", "is_List", "(", "orig", ")", "and", "not", "is_Tuple", "(", "orig", ")", ":", "paths", "=", "paths", ".", "split", "(", "sep", ")", "is_list", "=", "0", "if", "is_String", "(", "newpath", ")", ":", "newpaths", "=", "newpath", ".", "split", "(", "sep", ")", "elif", "not", "is_List", "(", "newpath", ")", "and", "not", "is_Tuple", "(", "newpath", ")", ":", "newpaths", "=", "[", "newpath", "]", "# might be a Dir", "else", ":", "newpaths", "=", "newpath", "if", "canonicalize", ":", "newpaths", "=", "list", "(", "map", "(", "canonicalize", ",", "newpaths", ")", ")", "if", "not", "delete_existing", ":", "# First uniquify the old paths, making sure to ", "# preserve the first instance (in Unix/Linux,", "# the first one wins), and remembering them in normpaths.", "# Then insert the new paths at the head of the list", "# if they're not already in the normpaths list.", "result", "=", "[", "]", "normpaths", "=", "[", "]", "for", "path", "in", "paths", ":", "if", "not", "path", ":", "continue", "normpath", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "normcase", "(", "path", ")", ")", "if", "normpath", "not", "in", "normpaths", ":", "result", ".", "append", "(", "path", ")", "normpaths", ".", "append", "(", "normpath", ")", "newpaths", ".", "reverse", "(", ")", "# since we're inserting at the head", "for", "path", "in", "newpaths", ":", "if", "not", "path", ":", "continue", "normpath", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "normcase", "(", "path", ")", ")", "if", "normpath", "not", "in", "normpaths", ":", "result", ".", "insert", "(", "0", ",", "path", ")", "normpaths", ".", "append", "(", "normpath", ")", "paths", "=", "result", "else", ":", "newpaths", "=", "newpaths", "+", "paths", "# prepend new paths", "normpaths", "=", "[", "]", "paths", "=", "[", "]", "# now we add them only if they are unique", "for", "path", "in", "newpaths", ":", "normpath", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "normcase", "(", "path", ")", ")", "if", "path", "and", "not", "normpath", "in", "normpaths", ":", "paths", ".", "append", "(", "path", ")", "normpaths", ".", "append", "(", "normpath", ")", "if", "is_list", ":", "return", "paths", "else", ":", "return", "sep", ".", "join", "(", "paths", ")" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Util.py#L736-L816
sdhash/sdhash
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
sdhash-ui/jinja2/utils.py
python
LRUCache.itervalue
(self)
return iter(self.values())
Iterate over all values.
Iterate over all values.
[ "Iterate", "over", "all", "values", "." ]
def itervalue(self): """Iterate over all values.""" return iter(self.values())
[ "def", "itervalue", "(", "self", ")", ":", "return", "iter", "(", "self", ".", "values", "(", ")", ")" ]
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/jinja2/utils.py#L508-L510
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/distributions/python/ops/vector_student_t.py
python
_VectorStudentT.__init__
(self, df, loc=None, scale_identity_multiplier=None, scale_diag=None, scale_tril=None, scale_perturb_factor=None, scale_perturb_diag=None, validate_args=False, allow_nan_stats=True, name="VectorStudentT")
Instantiates the vector Student's t-distributions on `R^k`. The `batch_shape` is the broadcast between `df.batch_shape` and `Affine.batch_shape` where `Affine` is constructed from `loc` and `scale_*` arguments. The `event_shape` is the event shape of `Affine.event_shape`. Args: df: Floating-point `Tensor`. The degrees of freedom of the distribution(s). `df` must contain only positive values. Must be scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the same `batch_shape` implied by `loc`, `scale_*`. loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is applied. scale_identity_multiplier: floating point rank 0 `Tensor` representing a scaling done to the identity matrix. When `scale_identity_multiplier = scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added to `scale`. scale_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ..., k], which represents a k x k diagonal matrix. When `None` no diagonal term is added to `scale`. scale_tril: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k lower triangular matrix. When `None` no `scale_tril` term is added to `scale`. The upper triangular elements above the diagonal are ignored. scale_perturb_factor: Floating-point `Tensor` representing factor matrix with last two dimensions of shape `(k, r)`. When `None`, no rank-r update is added to `scale`. scale_perturb_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which represents an r x r Diagonal matrix. When `None` low rank updates will take the form `scale_perturb_factor * scale_perturb_factor.T`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class.
Instantiates the vector Student's t-distributions on `R^k`.
[ "Instantiates", "the", "vector", "Student", "s", "t", "-", "distributions", "on", "R^k", "." ]
def __init__(self, df, loc=None, scale_identity_multiplier=None, scale_diag=None, scale_tril=None, scale_perturb_factor=None, scale_perturb_diag=None, validate_args=False, allow_nan_stats=True, name="VectorStudentT"): """Instantiates the vector Student's t-distributions on `R^k`. The `batch_shape` is the broadcast between `df.batch_shape` and `Affine.batch_shape` where `Affine` is constructed from `loc` and `scale_*` arguments. The `event_shape` is the event shape of `Affine.event_shape`. Args: df: Floating-point `Tensor`. The degrees of freedom of the distribution(s). `df` must contain only positive values. Must be scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the same `batch_shape` implied by `loc`, `scale_*`. loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is applied. scale_identity_multiplier: floating point rank 0 `Tensor` representing a scaling done to the identity matrix. When `scale_identity_multiplier = scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added to `scale`. scale_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ..., k], which represents a k x k diagonal matrix. When `None` no diagonal term is added to `scale`. scale_tril: Floating-point `Tensor` representing the diagonal matrix. `scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k lower triangular matrix. When `None` no `scale_tril` term is added to `scale`. The upper triangular elements above the diagonal are ignored. scale_perturb_factor: Floating-point `Tensor` representing factor matrix with last two dimensions of shape `(k, r)`. When `None`, no rank-r update is added to `scale`. scale_perturb_diag: Floating-point `Tensor` representing the diagonal matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which represents an r x r Diagonal matrix. When `None` low rank updates will take the form `scale_perturb_factor * scale_perturb_factor.T`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() graph_parents = [df, loc, scale_identity_multiplier, scale_diag, scale_tril, scale_perturb_factor, scale_perturb_diag] with ops.name_scope(name): with ops.name_scope("init", values=graph_parents): # The shape of the _VectorStudentT distribution is governed by the # relationship between df.batch_shape and affine.batch_shape. In # pseudocode the basic procedure is: # if df.batch_shape is scalar: # if affine.batch_shape is not scalar: # # broadcast distribution.sample so # # it has affine.batch_shape. # self.batch_shape = affine.batch_shape # else: # if affine.batch_shape is scalar: # # let affine broadcasting do its thing. # self.batch_shape = df.batch_shape # All of the above magic is actually handled by TransformedDistribution. # Here we really only need to collect the affine.batch_shape and decide # what we're going to pass in to TransformedDistribution's # (override) batch_shape arg. affine = bijectors.Affine( shift=loc, scale_identity_multiplier=scale_identity_multiplier, scale_diag=scale_diag, scale_tril=scale_tril, scale_perturb_factor=scale_perturb_factor, scale_perturb_diag=scale_perturb_diag, validate_args=validate_args) distribution = student_t.StudentT( df=df, loc=array_ops.zeros([], dtype=affine.dtype), scale=array_ops.ones([], dtype=affine.dtype)) batch_shape, override_event_shape = ( distribution_util.shapes_from_loc_and_scale( affine.shift, affine.scale)) override_batch_shape = distribution_util.pick_vector( distribution.is_scalar_batch(), batch_shape, constant_op.constant([], dtype=dtypes.int32)) super(_VectorStudentT, self).__init__( distribution=distribution, bijector=affine, batch_shape=override_batch_shape, event_shape=override_event_shape, validate_args=validate_args, name=name) self._parameters = parameters
[ "def", "__init__", "(", "self", ",", "df", ",", "loc", "=", "None", ",", "scale_identity_multiplier", "=", "None", ",", "scale_diag", "=", "None", ",", "scale_tril", "=", "None", ",", "scale_perturb_factor", "=", "None", ",", "scale_perturb_diag", "=", "None", ",", "validate_args", "=", "False", ",", "allow_nan_stats", "=", "True", ",", "name", "=", "\"VectorStudentT\"", ")", ":", "parameters", "=", "locals", "(", ")", "graph_parents", "=", "[", "df", ",", "loc", ",", "scale_identity_multiplier", ",", "scale_diag", ",", "scale_tril", ",", "scale_perturb_factor", ",", "scale_perturb_diag", "]", "with", "ops", ".", "name_scope", "(", "name", ")", ":", "with", "ops", ".", "name_scope", "(", "\"init\"", ",", "values", "=", "graph_parents", ")", ":", "# The shape of the _VectorStudentT distribution is governed by the", "# relationship between df.batch_shape and affine.batch_shape. In", "# pseudocode the basic procedure is:", "# if df.batch_shape is scalar:", "# if affine.batch_shape is not scalar:", "# # broadcast distribution.sample so", "# # it has affine.batch_shape.", "# self.batch_shape = affine.batch_shape", "# else:", "# if affine.batch_shape is scalar:", "# # let affine broadcasting do its thing.", "# self.batch_shape = df.batch_shape", "# All of the above magic is actually handled by TransformedDistribution.", "# Here we really only need to collect the affine.batch_shape and decide", "# what we're going to pass in to TransformedDistribution's", "# (override) batch_shape arg.", "affine", "=", "bijectors", ".", "Affine", "(", "shift", "=", "loc", ",", "scale_identity_multiplier", "=", "scale_identity_multiplier", ",", "scale_diag", "=", "scale_diag", ",", "scale_tril", "=", "scale_tril", ",", "scale_perturb_factor", "=", "scale_perturb_factor", ",", "scale_perturb_diag", "=", "scale_perturb_diag", ",", "validate_args", "=", "validate_args", ")", "distribution", "=", "student_t", ".", "StudentT", "(", "df", "=", "df", ",", "loc", "=", "array_ops", ".", "zeros", "(", "[", "]", ",", "dtype", "=", "affine", ".", "dtype", ")", ",", "scale", "=", "array_ops", ".", "ones", "(", "[", "]", ",", "dtype", "=", "affine", ".", "dtype", ")", ")", "batch_shape", ",", "override_event_shape", "=", "(", "distribution_util", ".", "shapes_from_loc_and_scale", "(", "affine", ".", "shift", ",", "affine", ".", "scale", ")", ")", "override_batch_shape", "=", "distribution_util", ".", "pick_vector", "(", "distribution", ".", "is_scalar_batch", "(", ")", ",", "batch_shape", ",", "constant_op", ".", "constant", "(", "[", "]", ",", "dtype", "=", "dtypes", ".", "int32", ")", ")", "super", "(", "_VectorStudentT", ",", "self", ")", ".", "__init__", "(", "distribution", "=", "distribution", ",", "bijector", "=", "affine", ",", "batch_shape", "=", "override_batch_shape", ",", "event_shape", "=", "override_event_shape", ",", "validate_args", "=", "validate_args", ",", "name", "=", "name", ")", "self", ".", "_parameters", "=", "parameters" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/distributions/python/ops/vector_student_t.py#L124-L225
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/email/utils.py
python
decode_rfc2231
(s)
return parts
Decode string according to RFC 2231
Decode string according to RFC 2231
[ "Decode", "string", "according", "to", "RFC", "2231" ]
def decode_rfc2231(s): """Decode string according to RFC 2231""" parts = s.split(TICK, 2) if len(parts) <= 2: return None, None, s return parts
[ "def", "decode_rfc2231", "(", "s", ")", ":", "parts", "=", "s", ".", "split", "(", "TICK", ",", "2", ")", "if", "len", "(", "parts", ")", "<=", "2", ":", "return", "None", ",", "None", ",", "s", "return", "parts" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/email/utils.py#L231-L236
SequoiaDB/SequoiaDB
2894ed7e5bd6fe57330afc900cf76d0ff0df9f64
tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py
python
parserCtxt.parseQuotedString
(self)
return ret
Parse and return a string between quotes or doublequotes TODO: Deprecated, to be removed at next drop of binary compatibility
Parse and return a string between quotes or doublequotes TODO: Deprecated, to be removed at next drop of binary compatibility
[ "Parse", "and", "return", "a", "string", "between", "quotes", "or", "doublequotes", "TODO", ":", "Deprecated", "to", "be", "removed", "at", "next", "drop", "of", "binary", "compatibility" ]
def parseQuotedString(self): """Parse and return a string between quotes or doublequotes TODO: Deprecated, to be removed at next drop of binary compatibility """ ret = libxml2mod.xmlParseQuotedString(self._o) return ret
[ "def", "parseQuotedString", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlParseQuotedString", "(", "self", ".", "_o", ")", "return", "ret" ]
https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L5353-L5358
apache/incubator-weex
5c25f0b59f7ac90703c363e7261f60bd06356dbe
weex_core/tools/cpplint.py
python
UpdateIncludeState
(filename, include_dict, io=codecs)
return True
Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise.
Fill up the include_dict with new includes found from the file.
[ "Fill", "up", "the", "include_dict", "with", "new", "includes", "found", "from", "the", "file", "." ]
def UpdateIncludeState(filename, include_dict, io=codecs): """Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise. """ headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True
[ "def", "UpdateIncludeState", "(", "filename", ",", "include_dict", ",", "io", "=", "codecs", ")", ":", "headerfile", "=", "None", "try", ":", "headerfile", "=", "io", ".", "open", "(", "filename", ",", "'r'", ",", "'utf8'", ",", "'replace'", ")", "except", "IOError", ":", "return", "False", "linenum", "=", "0", "for", "line", "in", "headerfile", ":", "linenum", "+=", "1", "clean_line", "=", "CleanseComments", "(", "line", ")", "match", "=", "_RE_PATTERN_INCLUDE", ".", "search", "(", "clean_line", ")", "if", "match", ":", "include", "=", "match", ".", "group", "(", "2", ")", "include_dict", ".", "setdefault", "(", "include", ",", "linenum", ")", "return", "True" ]
https://github.com/apache/incubator-weex/blob/5c25f0b59f7ac90703c363e7261f60bd06356dbe/weex_core/tools/cpplint.py#L5471-L5495
clementine-player/Clementine
111379dfd027802b59125829fcf87e3e1d0ad73b
dist/cpplint.py
python
CheckForFunctionLengths
(filename, clean_lines, linenum, function_state, error)
Reports for long function bodies. For an overview why this is done, see: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found.
Reports for long function bodies.
[ "Reports", "for", "long", "function", "bodies", "." ]
def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore elif Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count()
[ "def", "CheckForFunctionLengths", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "function_state", ",", "error", ")", ":", "lines", "=", "clean_lines", ".", "lines", "line", "=", "lines", "[", "linenum", "]", "joined_line", "=", "''", "starting_func", "=", "False", "regexp", "=", "r'(\\w(\\w|::|\\*|\\&|\\s)*)\\('", "# decls * & space::name( ...", "match_result", "=", "Match", "(", "regexp", ",", "line", ")", "if", "match_result", ":", "# If the name is all caps and underscores, figure it's a macro and", "# ignore it, unless it's TEST or TEST_F.", "function_name", "=", "match_result", ".", "group", "(", "1", ")", ".", "split", "(", ")", "[", "-", "1", "]", "if", "function_name", "==", "'TEST'", "or", "function_name", "==", "'TEST_F'", "or", "(", "not", "Match", "(", "r'[A-Z_]+$'", ",", "function_name", ")", ")", ":", "starting_func", "=", "True", "if", "starting_func", ":", "body_found", "=", "False", "for", "start_linenum", "in", "xrange", "(", "linenum", ",", "clean_lines", ".", "NumLines", "(", ")", ")", ":", "start_line", "=", "lines", "[", "start_linenum", "]", "joined_line", "+=", "' '", "+", "start_line", ".", "lstrip", "(", ")", "if", "Search", "(", "r'(;|})'", ",", "start_line", ")", ":", "# Declarations and trivial functions", "body_found", "=", "True", "break", "# ... ignore", "elif", "Search", "(", "r'{'", ",", "start_line", ")", ":", "body_found", "=", "True", "function", "=", "Search", "(", "r'((\\w|:)*)\\('", ",", "line", ")", ".", "group", "(", "1", ")", "if", "Match", "(", "r'TEST'", ",", "function", ")", ":", "# Handle TEST... macros", "parameter_regexp", "=", "Search", "(", "r'(\\(.*\\))'", ",", "joined_line", ")", "if", "parameter_regexp", ":", "# Ignore bad syntax", "function", "+=", "parameter_regexp", ".", "group", "(", "1", ")", "else", ":", "function", "+=", "'()'", "function_state", ".", "Begin", "(", "function", ")", "break", "if", "not", "body_found", ":", "# No body for the function (or evidence of a non-function) was found.", "error", "(", "filename", ",", "linenum", ",", "'readability/fn_size'", ",", "5", ",", "'Lint failed to find start of function body.'", ")", "elif", "Match", "(", "r'^\\}\\s*$'", ",", "line", ")", ":", "# function end", "function_state", ".", "Check", "(", "error", ",", "filename", ",", "linenum", ")", "function_state", ".", "End", "(", ")", "elif", "not", "Match", "(", "r'^\\s*$'", ",", "line", ")", ":", "function_state", ".", "Count", "(", ")" ]
https://github.com/clementine-player/Clementine/blob/111379dfd027802b59125829fcf87e3e1d0ad73b/dist/cpplint.py#L2776-L2841
Kitware/ParaView
f760af9124ff4634b23ebbeab95a4f56e0261955
Plugins/pvblot/blotish.py
python
tmin
(value=None)
Set deltime
Set deltime
[ "Set", "deltime" ]
def tmin(value=None): """Set deltime""" state.time_selection.set_tmin(_maybe_convert(value, float)) state.time_selection.print_show()
[ "def", "tmin", "(", "value", "=", "None", ")", ":", "state", ".", "time_selection", ".", "set_tmin", "(", "_maybe_convert", "(", "value", ",", "float", ")", ")", "state", ".", "time_selection", ".", "print_show", "(", ")" ]
https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Plugins/pvblot/blotish.py#L905-L908
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/io/formats/latex.py
python
LatexFormatter.column_format
(self)
return self._column_format
Column format.
Column format.
[ "Column", "format", "." ]
def column_format(self) -> str | None: """Column format.""" return self._column_format
[ "def", "column_format", "(", "self", ")", "->", "str", "|", "None", ":", "return", "self", ".", "_column_format" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/formats/latex.py#L752-L754
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/distributed/fsdp/fully_sharded_data_parallel.py
python
FullyShardedDataParallel._lazy_init
(self)
Initialization steps that should happen lazily, typically right before the first forward pass.
Initialization steps that should happen lazily, typically right before the first forward pass.
[ "Initialization", "steps", "that", "should", "happen", "lazily", "typically", "right", "before", "the", "first", "forward", "pass", "." ]
def _lazy_init(self) -> None: """Initialization steps that should happen lazily, typically right before the first forward pass. """ # Initialize param attributes lazily, in case the param's dtype or # device changes after __init__. for p in self.params: self._init_param_attributes(p) # Initialize _is_root and setup streams. These steps would ideally # happen in __init__, but _is_root can only be determined after the # entire model hierarchy is setup, thus we run it lazily. if self._is_root is None: # _is_root means that we are in the outermost module's forward. self._set_is_root() self._setup_streams() if self._is_root: # Buffers stay on GPU, and don't get sharded. Since _cast_buffers # applies recursively, we only call this from the root instance. self._cast_buffers() # Don't free the full params for the outer-most (root) instance, # In most cases, root instance contains params in the last layers # or has no params. In these cases, those params will be needed # immediately after for the backward pass. Note that this only # applies currently when freeing parameters at end of layer's # forward pass. self.reshard_after_forward = False # Due to the use of streams, we need to make sure the previous # ``optim.step()`` is done before we all-gather parameters. self._wait_for_previous_optim_step()
[ "def", "_lazy_init", "(", "self", ")", "->", "None", ":", "# Initialize param attributes lazily, in case the param's dtype or", "# device changes after __init__.", "for", "p", "in", "self", ".", "params", ":", "self", ".", "_init_param_attributes", "(", "p", ")", "# Initialize _is_root and setup streams. These steps would ideally", "# happen in __init__, but _is_root can only be determined after the", "# entire model hierarchy is setup, thus we run it lazily.", "if", "self", ".", "_is_root", "is", "None", ":", "# _is_root means that we are in the outermost module's forward.", "self", ".", "_set_is_root", "(", ")", "self", ".", "_setup_streams", "(", ")", "if", "self", ".", "_is_root", ":", "# Buffers stay on GPU, and don't get sharded. Since _cast_buffers", "# applies recursively, we only call this from the root instance.", "self", ".", "_cast_buffers", "(", ")", "# Don't free the full params for the outer-most (root) instance,", "# In most cases, root instance contains params in the last layers", "# or has no params. In these cases, those params will be needed", "# immediately after for the backward pass. Note that this only", "# applies currently when freeing parameters at end of layer's", "# forward pass.", "self", ".", "reshard_after_forward", "=", "False", "# Due to the use of streams, we need to make sure the previous", "# ``optim.step()`` is done before we all-gather parameters.", "self", ".", "_wait_for_previous_optim_step", "(", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/fsdp/fully_sharded_data_parallel.py#L444-L476
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/boost_1_66_0/tools/build/src/build/targets.py
python
ProjectTarget.mark_targets_as_explicit
(self, target_names)
Add 'target' to the list of targets in this project that should be build only by explicit request.
Add 'target' to the list of targets in this project that should be build only by explicit request.
[ "Add", "target", "to", "the", "list", "of", "targets", "in", "this", "project", "that", "should", "be", "build", "only", "by", "explicit", "request", "." ]
def mark_targets_as_explicit (self, target_names): """Add 'target' to the list of targets in this project that should be build only by explicit request.""" # Record the name of the target, not instance, since this # rule is called before main target instaces are created. assert is_iterable_typed(target_names, basestring) self.explicit_targets_.update(target_names)
[ "def", "mark_targets_as_explicit", "(", "self", ",", "target_names", ")", ":", "# Record the name of the target, not instance, since this", "# rule is called before main target instaces are created.", "assert", "is_iterable_typed", "(", "target_names", ",", "basestring", ")", "self", ".", "explicit_targets_", ".", "update", "(", "target_names", ")" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/boost_1_66_0/tools/build/src/build/targets.py#L471-L478
cathywu/Sentiment-Analysis
eb501fd1375c0c3f3ab430f963255f1bb858e659
PyML-0.7.9/PyML/utils/myio.py
python
csvread
(fileName, delim = ',')
return data
read a character array from a file in csv format
read a character array from a file in csv format
[ "read", "a", "character", "array", "from", "a", "file", "in", "csv", "format" ]
def csvread(fileName, delim = ',') : '''read a character array from a file in csv format''' import misc fileHandle = open(fileName, "r") line = fileHandle.readline() if delim == ' ' : delim = None data = misc.emptyLOL(len(line.split(delim))) dim = len(data) while 1 : line = line[:-1] fields = line.split(delim) if len(fields) != dim : print 'badline:', line for i in range(dim) : data[i].append(fields[i]) line=fileHandle.readline() if not line : break if len(data) == 1 : data=data[0] return data
[ "def", "csvread", "(", "fileName", ",", "delim", "=", "','", ")", ":", "import", "misc", "fileHandle", "=", "open", "(", "fileName", ",", "\"r\"", ")", "line", "=", "fileHandle", ".", "readline", "(", ")", "if", "delim", "==", "' '", ":", "delim", "=", "None", "data", "=", "misc", ".", "emptyLOL", "(", "len", "(", "line", ".", "split", "(", "delim", ")", ")", ")", "dim", "=", "len", "(", "data", ")", "while", "1", ":", "line", "=", "line", "[", ":", "-", "1", "]", "fields", "=", "line", ".", "split", "(", "delim", ")", "if", "len", "(", "fields", ")", "!=", "dim", ":", "print", "'badline:'", ",", "line", "for", "i", "in", "range", "(", "dim", ")", ":", "data", "[", "i", "]", ".", "append", "(", "fields", "[", "i", "]", ")", "line", "=", "fileHandle", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "if", "len", "(", "data", ")", "==", "1", ":", "data", "=", "data", "[", "0", "]", "return", "data" ]
https://github.com/cathywu/Sentiment-Analysis/blob/eb501fd1375c0c3f3ab430f963255f1bb858e659/PyML-0.7.9/PyML/utils/myio.py#L76-L102
google/earthenterprise
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
earth_enterprise/src/google/protobuf-py/mox.py
python
Mox.CreateMockAnything
(self)
return new_mock
Create a mock that will accept any method calls. This does not enforce an interface.
Create a mock that will accept any method calls.
[ "Create", "a", "mock", "that", "will", "accept", "any", "method", "calls", "." ]
def CreateMockAnything(self): """Create a mock that will accept any method calls. This does not enforce an interface. """ new_mock = MockAnything() self._mock_objects.append(new_mock) return new_mock
[ "def", "CreateMockAnything", "(", "self", ")", ":", "new_mock", "=", "MockAnything", "(", ")", "self", ".", "_mock_objects", ".", "append", "(", "new_mock", ")", "return", "new_mock" ]
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/google/protobuf-py/mox.py#L179-L187
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/decimal.py
python
Context.quantize
(self, a, b)
return a.quantize(b, context=self)
Returns a value equal to 'a' (rounded), having the exponent of 'b'. The coefficient of the result is derived from that of the left-hand operand. It may be rounded using the current rounding setting (if the exponent is being increased), multiplied by a positive power of ten (if the exponent is being decreased), or is unchanged (if the exponent is already equal to that of the right-hand operand). Unlike other operations, if the length of the coefficient after the quantize operation would be greater than precision then an Invalid operation condition is raised. This guarantees that, unless there is an error condition, the exponent of the result of a quantize is always equal to that of the right-hand operand. Also unlike other operations, quantize will never raise Underflow, even if the result is subnormal and inexact. >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) Decimal('2.170') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) Decimal('2.17') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) Decimal('2.2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) Decimal('2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) Decimal('0E+1') >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) Decimal('-Infinity') >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) Decimal('-0') >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) Decimal('-0E+5') >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) Decimal('217.0') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) Decimal('217') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) Decimal('2.2E+2') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) Decimal('2E+2') >>> ExtendedContext.quantize(1, 2) Decimal('1') >>> ExtendedContext.quantize(Decimal(1), 2) Decimal('1') >>> ExtendedContext.quantize(1, Decimal(2)) Decimal('1')
Returns a value equal to 'a' (rounded), having the exponent of 'b'.
[ "Returns", "a", "value", "equal", "to", "a", "(", "rounded", ")", "having", "the", "exponent", "of", "b", "." ]
def quantize(self, a, b): """Returns a value equal to 'a' (rounded), having the exponent of 'b'. The coefficient of the result is derived from that of the left-hand operand. It may be rounded using the current rounding setting (if the exponent is being increased), multiplied by a positive power of ten (if the exponent is being decreased), or is unchanged (if the exponent is already equal to that of the right-hand operand). Unlike other operations, if the length of the coefficient after the quantize operation would be greater than precision then an Invalid operation condition is raised. This guarantees that, unless there is an error condition, the exponent of the result of a quantize is always equal to that of the right-hand operand. Also unlike other operations, quantize will never raise Underflow, even if the result is subnormal and inexact. >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) Decimal('2.170') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) Decimal('2.17') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) Decimal('2.2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) Decimal('2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) Decimal('0E+1') >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) Decimal('-Infinity') >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) Decimal('-0') >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) Decimal('-0E+5') >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) Decimal('217.0') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) Decimal('217') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) Decimal('2.2E+2') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) Decimal('2E+2') >>> ExtendedContext.quantize(1, 2) Decimal('1') >>> ExtendedContext.quantize(Decimal(1), 2) Decimal('1') >>> ExtendedContext.quantize(1, Decimal(2)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.quantize(b, context=self)
[ "def", "quantize", "(", "self", ",", "a", ",", "b", ")", ":", "a", "=", "_convert_other", "(", "a", ",", "raiseit", "=", "True", ")", "return", "a", ".", "quantize", "(", "b", ",", "context", "=", "self", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/decimal.py#L5045-L5101
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/utils/registry.py
python
Registry.register
(self, obj, name=None)
Registers a Python object "obj" for the given "name". Args: obj: The object to add to the registry. name: An optional string specifying the registry key for the obj. If None, obj.__name__ will be used. Raises: KeyError: If same name is registered twice.
Registers a Python object "obj" for the given "name".
[ "Registers", "a", "Python", "object", "obj", "for", "the", "given", "name", "." ]
def register(self, obj, name=None): """Registers a Python object "obj" for the given "name". Args: obj: The object to add to the registry. name: An optional string specifying the registry key for the obj. If None, obj.__name__ will be used. Raises: KeyError: If same name is registered twice. """ if not name: name = obj.__name__ if name in self._registry: raise KeyError("Name '%s' has been registered in '%s'!" % (name, self._name)) # logging.vlog(1, "Registering %s (%s) in %s.", name, obj, self._name) self._registry[name] = obj
[ "def", "register", "(", "self", ",", "obj", ",", "name", "=", "None", ")", ":", "if", "not", "name", ":", "name", "=", "obj", ".", "__name__", "if", "name", "in", "self", ".", "_registry", ":", "raise", "KeyError", "(", "\"Name '%s' has been registered in '%s'!\"", "%", "(", "name", ",", "self", ".", "_name", ")", ")", "# logging.vlog(1, \"Registering %s (%s) in %s.\", name, obj, self._name)", "self", ".", "_registry", "[", "name", "]", "=", "obj" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/utils/registry.py#L36-L53
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/ceph-volume/ceph_volume/devices/lvm/prepare.py
python
Prepare.prepare_data_device
(self, device_type, osd_uuid)
Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param osd_uuid: The OSD uuid
Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error.
[ "Check", "if", "arg", "is", "a", "device", "or", "partition", "to", "create", "an", "LV", "out", "of", "it", "with", "a", "distinct", "volume", "group", "name", "assigning", "LV", "tags", "on", "it", "and", "ultimately", "returning", "the", "logical", "volume", "object", ".", "Failing", "to", "detect", "a", "device", "or", "partition", "will", "result", "in", "error", "." ]
def prepare_data_device(self, device_type, osd_uuid): """ Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param osd_uuid: The OSD uuid """ device = self.args.data if disk.is_partition(device) or disk.is_device(device): # we must create a vg, and then a single lv lv_name_prefix = "osd-{}".format(device_type) kwargs = {'device': device, 'tags': {'ceph.type': device_type}, 'slots': self.args.data_slots, } logger.debug('data device size: {}'.format(self.args.data_size)) if self.args.data_size != 0: kwargs['size'] = self.args.data_size return api.create_lv( lv_name_prefix, osd_uuid, **kwargs) else: error = [ 'Cannot use device ({}).'.format(device), 'A vg/lv path or an existing device is needed'] raise RuntimeError(' '.join(error)) raise RuntimeError('no data logical volume found with: {}'.format(device))
[ "def", "prepare_data_device", "(", "self", ",", "device_type", ",", "osd_uuid", ")", ":", "device", "=", "self", ".", "args", ".", "data", "if", "disk", ".", "is_partition", "(", "device", ")", "or", "disk", ".", "is_device", "(", "device", ")", ":", "# we must create a vg, and then a single lv", "lv_name_prefix", "=", "\"osd-{}\"", ".", "format", "(", "device_type", ")", "kwargs", "=", "{", "'device'", ":", "device", ",", "'tags'", ":", "{", "'ceph.type'", ":", "device_type", "}", ",", "'slots'", ":", "self", ".", "args", ".", "data_slots", ",", "}", "logger", ".", "debug", "(", "'data device size: {}'", ".", "format", "(", "self", ".", "args", ".", "data_size", ")", ")", "if", "self", ".", "args", ".", "data_size", "!=", "0", ":", "kwargs", "[", "'size'", "]", "=", "self", ".", "args", ".", "data_size", "return", "api", ".", "create_lv", "(", "lv_name_prefix", ",", "osd_uuid", ",", "*", "*", "kwargs", ")", "else", ":", "error", "=", "[", "'Cannot use device ({}).'", ".", "format", "(", "device", ")", ",", "'A vg/lv path or an existing device is needed'", "]", "raise", "RuntimeError", "(", "' '", ".", "join", "(", "error", ")", ")", "raise", "RuntimeError", "(", "'no data logical volume found with: {}'", ".", "format", "(", "device", ")", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py#L196-L228
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/ops/sparse_ops.py
python
sparse_to_dense
(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None)
return gen_sparse_ops._sparse_to_dense( sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name)
Converts a sparse representation into a dense tensor. Builds an array `dense` with shape `output_shape` such that ```python # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value) # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i] # If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ``` All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape of the dense output tensor. sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of `sparse_indices`, or a scalar value to be used for all sparse indices. default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value to set for indices not specified in `sparse_indices`. Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`.
Converts a sparse representation into a dense tensor.
[ "Converts", "a", "sparse", "representation", "into", "a", "dense", "tensor", "." ]
def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None): """Converts a sparse representation into a dense tensor. Builds an array `dense` with shape `output_shape` such that ```python # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value) # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i] # If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ``` All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape of the dense output tensor. sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of `sparse_indices`, or a scalar value to be used for all sparse indices. default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value to set for indices not specified in `sparse_indices`. Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`. """ return gen_sparse_ops._sparse_to_dense( sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name)
[ "def", "sparse_to_dense", "(", "sparse_indices", ",", "output_shape", ",", "sparse_values", ",", "default_value", "=", "0", ",", "validate_indices", "=", "True", ",", "name", "=", "None", ")", ":", "return", "gen_sparse_ops", ".", "_sparse_to_dense", "(", "sparse_indices", ",", "output_shape", ",", "sparse_values", ",", "default_value", "=", "default_value", ",", "validate_indices", "=", "validate_indices", ",", "name", "=", "name", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/ops/sparse_ops.py#L503-L555
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/flatmenu.py
python
FlatMenuBar.SetOptions
(self, options)
Sets the :class:`FlatMenuBar` options, whether to show a toolbar, to use LCD screen settings etc... :param integer `options`: a combination of the following bits: ========================= ========= ============================= `options` Bit Hex Value Description ========================= ========= ============================= ``FM_OPT_IS_LCD`` 0x1 Use this style if your computer uses a LCD screen ``FM_OPT_MINIBAR`` 0x2 Use this if you plan to use toolbar only ``FM_OPT_SHOW_CUSTOMIZE`` 0x4 Show "customize link" in more menus, you will need to write your own handler. See demo. ``FM_OPT_SHOW_TOOLBAR`` 0x8 Set this option is you are planing to use the toolbar ========================= ========= =============================
Sets the :class:`FlatMenuBar` options, whether to show a toolbar, to use LCD screen settings etc...
[ "Sets", "the", ":", "class", ":", "FlatMenuBar", "options", "whether", "to", "show", "a", "toolbar", "to", "use", "LCD", "screen", "settings", "etc", "..." ]
def SetOptions(self, options): """ Sets the :class:`FlatMenuBar` options, whether to show a toolbar, to use LCD screen settings etc... :param integer `options`: a combination of the following bits: ========================= ========= ============================= `options` Bit Hex Value Description ========================= ========= ============================= ``FM_OPT_IS_LCD`` 0x1 Use this style if your computer uses a LCD screen ``FM_OPT_MINIBAR`` 0x2 Use this if you plan to use toolbar only ``FM_OPT_SHOW_CUSTOMIZE`` 0x4 Show "customize link" in more menus, you will need to write your own handler. See demo. ``FM_OPT_SHOW_TOOLBAR`` 0x8 Set this option is you are planing to use the toolbar ========================= ========= ============================= """ self._options = options self._showToolbar = options & FM_OPT_SHOW_TOOLBAR self._showCustomize = options & FM_OPT_SHOW_CUSTOMIZE self._isLCD = options & FM_OPT_IS_LCD self._isMinibar = options & FM_OPT_MINIBAR self.SetBarHeight() self.Refresh() self.Update()
[ "def", "SetOptions", "(", "self", ",", "options", ")", ":", "self", ".", "_options", "=", "options", "self", ".", "_showToolbar", "=", "options", "&", "FM_OPT_SHOW_TOOLBAR", "self", ".", "_showCustomize", "=", "options", "&", "FM_OPT_SHOW_CUSTOMIZE", "self", ".", "_isLCD", "=", "options", "&", "FM_OPT_IS_LCD", "self", ".", "_isMinibar", "=", "options", "&", "FM_OPT_MINIBAR", "self", ".", "SetBarHeight", "(", ")", "self", ".", "Refresh", "(", ")", "self", ".", "Update", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/flatmenu.py#L2544-L2571
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/importlib-metadata/py3/importlib_metadata/__init__.py
python
DistributionFinder.find_distributions
(self, context=Context())
Find distributions. Return an iterable of all Distribution instances capable of loading the metadata for packages matching the ``context``, a DistributionFinder.Context instance.
Find distributions.
[ "Find", "distributions", "." ]
def find_distributions(self, context=Context()): """ Find distributions. Return an iterable of all Distribution instances capable of loading the metadata for packages matching the ``context``, a DistributionFinder.Context instance. """
[ "def", "find_distributions", "(", "self", ",", "context", "=", "Context", "(", ")", ")", ":" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/importlib-metadata/py3/importlib_metadata/__init__.py#L751-L758
gwaldron/osgearth
4c521857d59a69743e4a9cedba00afe570f984e8
src/third_party/tinygltf/deps/cpplint.py
python
NestingState.InExternC
(self)
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
Check if we are currently one level inside an 'extern "C"' block. Returns: True if top of the stack is an extern block, False otherwise.
Check if we are currently one level inside an 'extern "C"' block.
[ "Check", "if", "we", "are", "currently", "one", "level", "inside", "an", "extern", "C", "block", "." ]
def InExternC(self): """Check if we are currently one level inside an 'extern "C"' block. Returns: True if top of the stack is an extern block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ExternCInfo)
[ "def", "InExternC", "(", "self", ")", ":", "return", "self", ".", "stack", "and", "isinstance", "(", "self", ".", "stack", "[", "-", "1", "]", ",", "_ExternCInfo", ")" ]
https://github.com/gwaldron/osgearth/blob/4c521857d59a69743e4a9cedba00afe570f984e8/src/third_party/tinygltf/deps/cpplint.py#L2242-L2248
stan-dev/math
5fd79f89933269a4ca4d8dd1fde2a36d53d4768c
lib/tbb_2020.3/python/tbb/pool.py
python
ApplyResult.successful
(self)
return self._success
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.
Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.
[ "Returns", "whether", "the", "call", "completed", "without", "raising", "an", "exception", ".", "Will", "raise", "AssertionError", "if", "the", "result", "is", "not", "ready", "." ]
def successful(self): """Returns whether the call completed without raising an exception. Will raise AssertionError if the result is not ready.""" assert self.ready() return self._success
[ "def", "successful", "(", "self", ")", ":", "assert", "self", ".", "ready", "(", ")", "return", "self", ".", "_success" ]
https://github.com/stan-dev/math/blob/5fd79f89933269a4ca4d8dd1fde2a36d53d4768c/lib/tbb_2020.3/python/tbb/pool.py#L365-L370
pgRouting/osm2pgrouting
8491929fc4037d308f271e84d59bb96da3c28aa2
tools/cpplint.py
python
NestingState.InAsmBlock
(self)
return self.stack and self.stack[-1].inline_asm != _NO_ASM
Check if we are currently one level inside an inline ASM block. Returns: True if the top of the stack is a block containing inline ASM.
Check if we are currently one level inside an inline ASM block.
[ "Check", "if", "we", "are", "currently", "one", "level", "inside", "an", "inline", "ASM", "block", "." ]
def InAsmBlock(self): """Check if we are currently one level inside an inline ASM block. Returns: True if the top of the stack is a block containing inline ASM. """ return self.stack and self.stack[-1].inline_asm != _NO_ASM
[ "def", "InAsmBlock", "(", "self", ")", ":", "return", "self", ".", "stack", "and", "self", ".", "stack", "[", "-", "1", "]", ".", "inline_asm", "!=", "_NO_ASM" ]
https://github.com/pgRouting/osm2pgrouting/blob/8491929fc4037d308f271e84d59bb96da3c28aa2/tools/cpplint.py#L2256-L2262
apache/arrow
af33dd1157eb8d7d9bfac25ebf61445b793b7943
python/pyarrow/__init__.py
python
show_info
()
Print detailed version and platform information, for error reporting
Print detailed version and platform information, for error reporting
[ "Print", "detailed", "version", "and", "platform", "information", "for", "error", "reporting" ]
def show_info(): """ Print detailed version and platform information, for error reporting """ show_versions() def print_entry(label, value): print(f" {label: <20}: {value: <8}") print("\nPlatform:") print_entry("OS / Arch", f"{_platform.system()} {_platform.machine()}") print_entry("SIMD Level", runtime_info().simd_level) print_entry("Detected SIMD Level", runtime_info().detected_simd_level) pool = default_memory_pool() print("\nMemory:") print_entry("Default backend", pool.backend_name) print_entry("Bytes allocated", f"{pool.bytes_allocated()} bytes") print_entry("Max memory", f"{pool.max_memory()} bytes") print_entry("Supported Backends", ', '.join(supported_memory_backends())) print("\nOptional modules:") modules = ["csv", "cuda", "dataset", "feather", "flight", "fs", "gandiva", "json", "orc", "parquet", "plasma"] for module in modules: status = "Enabled" if _module_is_available(module) else "-" print(f" {module: <20}: {status: <8}") print("\nFilesystems:") filesystems = ["GcsFileSystem", "HadoopFileSystem", "S3FileSystem"] for fs in filesystems: status = "Enabled" if _filesystem_is_available(fs) else "-" print(f" {fs: <20}: {status: <8}") print("\nCompression Codecs:") codecs = ["brotli", "bz2", "gzip", "lz4_frame", "lz4", "snappy", "zstd"] for codec in codecs: status = "Enabled" if Codec.is_available(codec) else "-" print(f" {codec: <20}: {status: <8}")
[ "def", "show_info", "(", ")", ":", "show_versions", "(", ")", "def", "print_entry", "(", "label", ",", "value", ")", ":", "print", "(", "f\" {label: <20}: {value: <8}\"", ")", "print", "(", "\"\\nPlatform:\"", ")", "print_entry", "(", "\"OS / Arch\"", ",", "f\"{_platform.system()} {_platform.machine()}\"", ")", "print_entry", "(", "\"SIMD Level\"", ",", "runtime_info", "(", ")", ".", "simd_level", ")", "print_entry", "(", "\"Detected SIMD Level\"", ",", "runtime_info", "(", ")", ".", "detected_simd_level", ")", "pool", "=", "default_memory_pool", "(", ")", "print", "(", "\"\\nMemory:\"", ")", "print_entry", "(", "\"Default backend\"", ",", "pool", ".", "backend_name", ")", "print_entry", "(", "\"Bytes allocated\"", ",", "f\"{pool.bytes_allocated()} bytes\"", ")", "print_entry", "(", "\"Max memory\"", ",", "f\"{pool.max_memory()} bytes\"", ")", "print_entry", "(", "\"Supported Backends\"", ",", "', '", ".", "join", "(", "supported_memory_backends", "(", ")", ")", ")", "print", "(", "\"\\nOptional modules:\"", ")", "modules", "=", "[", "\"csv\"", ",", "\"cuda\"", ",", "\"dataset\"", ",", "\"feather\"", ",", "\"flight\"", ",", "\"fs\"", ",", "\"gandiva\"", ",", "\"json\"", ",", "\"orc\"", ",", "\"parquet\"", ",", "\"plasma\"", "]", "for", "module", "in", "modules", ":", "status", "=", "\"Enabled\"", "if", "_module_is_available", "(", "module", ")", "else", "\"-\"", "print", "(", "f\" {module: <20}: {status: <8}\"", ")", "print", "(", "\"\\nFilesystems:\"", ")", "filesystems", "=", "[", "\"GcsFileSystem\"", ",", "\"HadoopFileSystem\"", ",", "\"S3FileSystem\"", "]", "for", "fs", "in", "filesystems", ":", "status", "=", "\"Enabled\"", "if", "_filesystem_is_available", "(", "fs", ")", "else", "\"-\"", "print", "(", "f\" {fs: <20}: {status: <8}\"", ")", "print", "(", "\"\\nCompression Codecs:\"", ")", "codecs", "=", "[", "\"brotli\"", ",", "\"bz2\"", ",", "\"gzip\"", ",", "\"lz4_frame\"", ",", "\"lz4\"", ",", "\"snappy\"", ",", "\"zstd\"", "]", "for", "codec", "in", "codecs", ":", "status", "=", "\"Enabled\"", "if", "Codec", ".", "is_available", "(", "codec", ")", "else", "\"-\"", "print", "(", "f\" {codec: <20}: {status: <8}\"", ")" ]
https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/python/pyarrow/__init__.py#L119-L157
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/llvmlite/six.py
python
with_metaclass
(meta, *bases)
return type.__new__(metaclass, 'temporary_class', (), {})
Create a base class with a metaclass.
Create a base class with a metaclass.
[ "Create", "a", "base", "class", "with", "a", "metaclass", "." ]
def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "# This requires a bit of explanation: the basic idea is to make a dummy", "# metaclass for one level of class instantiation that replaces itself with", "# the actual metaclass.", "class", "metaclass", "(", "meta", ")", ":", "def", "__new__", "(", "cls", ",", "name", ",", "this_bases", ",", "d", ")", ":", "return", "meta", "(", "name", ",", "bases", ",", "d", ")", "return", "type", ".", "__new__", "(", "metaclass", ",", "'temporary_class'", ",", "(", ")", ",", "{", "}", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/llvmlite/six.py#L730-L738
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/botocore/credentials.py
python
AssumeRoleCredentialFetcher._get_credentials
(self)
return client.assume_role(**kwargs)
Get credentials by calling assume role.
Get credentials by calling assume role.
[ "Get", "credentials", "by", "calling", "assume", "role", "." ]
def _get_credentials(self): """Get credentials by calling assume role.""" kwargs = self._assume_role_kwargs() client = self._create_client() return client.assume_role(**kwargs)
[ "def", "_get_credentials", "(", "self", ")", ":", "kwargs", "=", "self", ".", "_assume_role_kwargs", "(", ")", "client", "=", "self", ".", "_create_client", "(", ")", "return", "client", ".", "assume_role", "(", "*", "*", "kwargs", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/credentials.py#L683-L687
plumonito/dtslam
5994bb9cf7a11981b830370db206bceb654c085d
3rdparty/opencv-git/3rdparty/jinja2/environment.py
python
Environment.preprocess
(self, source, name=None, filename=None)
return reduce(lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), text_type(source))
Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized.
Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized.
[ "Preprocesses", "the", "source", "with", "all", "extensions", ".", "This", "is", "automatically", "called", "for", "all", "parsing", "and", "compiling", "methods", "but", "*", "not", "*", "for", ":", "meth", ":", "lex", "because", "there", "you", "usually", "only", "want", "the", "actual", "source", "tokenized", "." ]
def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ return reduce(lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), text_type(source))
[ "def", "preprocess", "(", "self", ",", "source", ",", "name", "=", "None", ",", "filename", "=", "None", ")", ":", "return", "reduce", "(", "lambda", "s", ",", "e", ":", "e", ".", "preprocess", "(", "s", ",", "name", ",", "filename", ")", ",", "self", ".", "iter_extensions", "(", ")", ",", "text_type", "(", "source", ")", ")" ]
https://github.com/plumonito/dtslam/blob/5994bb9cf7a11981b830370db206bceb654c085d/3rdparty/opencv-git/3rdparty/jinja2/environment.py#L478-L484
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/DirectILLCollectData.py
python
_createFlatBkg
(ws, wsType, windowWidth, wsNames, algorithmLogging)
return bkgWS
Return a flat background workspace.
Return a flat background workspace.
[ "Return", "a", "flat", "background", "workspace", "." ]
def _createFlatBkg(ws, wsType, windowWidth, wsNames, algorithmLogging): """Return a flat background workspace.""" if wsType == common.WS_CONTENT_DETS: bkgWSName = wsNames.withSuffix('flat_bkg_for_detectors') else: bkgWSName = wsNames.withSuffix('flat_bkg_for_monitors') bkgWS = CalculateFlatBackground(InputWorkspace=ws, OutputWorkspace=bkgWSName, Mode='Moving Average', OutputMode='Return Background', SkipMonitors=False, NullifyNegativeValues=False, AveragingWindowWidth=windowWidth, EnableLogging=algorithmLogging) firstBinStart = bkgWS.dataX(0)[0] firstBinEnd = bkgWS.dataX(0)[1] bkgWS = CropWorkspace(InputWorkspace=bkgWS, OutputWorkspace=bkgWS, XMin=firstBinStart, XMax=firstBinEnd, EnableLogging=algorithmLogging) return bkgWS
[ "def", "_createFlatBkg", "(", "ws", ",", "wsType", ",", "windowWidth", ",", "wsNames", ",", "algorithmLogging", ")", ":", "if", "wsType", "==", "common", ".", "WS_CONTENT_DETS", ":", "bkgWSName", "=", "wsNames", ".", "withSuffix", "(", "'flat_bkg_for_detectors'", ")", "else", ":", "bkgWSName", "=", "wsNames", ".", "withSuffix", "(", "'flat_bkg_for_monitors'", ")", "bkgWS", "=", "CalculateFlatBackground", "(", "InputWorkspace", "=", "ws", ",", "OutputWorkspace", "=", "bkgWSName", ",", "Mode", "=", "'Moving Average'", ",", "OutputMode", "=", "'Return Background'", ",", "SkipMonitors", "=", "False", ",", "NullifyNegativeValues", "=", "False", ",", "AveragingWindowWidth", "=", "windowWidth", ",", "EnableLogging", "=", "algorithmLogging", ")", "firstBinStart", "=", "bkgWS", ".", "dataX", "(", "0", ")", "[", "0", "]", "firstBinEnd", "=", "bkgWS", ".", "dataX", "(", "0", ")", "[", "1", "]", "bkgWS", "=", "CropWorkspace", "(", "InputWorkspace", "=", "bkgWS", ",", "OutputWorkspace", "=", "bkgWS", ",", "XMin", "=", "firstBinStart", ",", "XMax", "=", "firstBinEnd", ",", "EnableLogging", "=", "algorithmLogging", ")", "return", "bkgWS" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/DirectILLCollectData.py#L108-L129
ideawu/ssdb
f229ba277c7f7d0ca5a441c0c6fb3d1209af68e4
deps/cpy/antlr3/tree.py
python
CommonTreeAdaptor.dupNode
(self, treeNode)
return treeNode.dupNode()
Duplicate a node. This is part of the factory; override if you want another kind of node to be built. I could use reflection to prevent having to override this but reflection is slow.
Duplicate a node. This is part of the factory; override if you want another kind of node to be built.
[ "Duplicate", "a", "node", ".", "This", "is", "part", "of", "the", "factory", ";", "override", "if", "you", "want", "another", "kind", "of", "node", "to", "be", "built", "." ]
def dupNode(self, treeNode): """ Duplicate a node. This is part of the factory; override if you want another kind of node to be built. I could use reflection to prevent having to override this but reflection is slow. """ if treeNode is None: return None return treeNode.dupNode()
[ "def", "dupNode", "(", "self", ",", "treeNode", ")", ":", "if", "treeNode", "is", "None", ":", "return", "None", "return", "treeNode", ".", "dupNode", "(", ")" ]
https://github.com/ideawu/ssdb/blob/f229ba277c7f7d0ca5a441c0c6fb3d1209af68e4/deps/cpy/antlr3/tree.py#L1401-L1413
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/filter/CommonFilters.py
python
CommonFilters.delSrgbEncode
(self)
return True
Reverses the effects of setSrgbEncode.
Reverses the effects of setSrgbEncode.
[ "Reverses", "the", "effects", "of", "setSrgbEncode", "." ]
def delSrgbEncode(self): """ Reverses the effects of setSrgbEncode. """ if "SrgbEncode" in self.configuration: old_enable = self.configuration["SrgbEncode"] del self.configuration["SrgbEncode"] return self.reconfigure(old_enable, "SrgbEncode") return True
[ "def", "delSrgbEncode", "(", "self", ")", ":", "if", "\"SrgbEncode\"", "in", "self", ".", "configuration", ":", "old_enable", "=", "self", ".", "configuration", "[", "\"SrgbEncode\"", "]", "del", "self", ".", "configuration", "[", "\"SrgbEncode\"", "]", "return", "self", ".", "reconfigure", "(", "old_enable", ",", "\"SrgbEncode\"", ")", "return", "True" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/filter/CommonFilters.py#L622-L628
jackaudio/jack2
21b293dbc37d42446141a08922cdec0d2550c6a0
waflib/Logs.py
python
formatter.format
(self, rec)
return logging.Formatter.format(self, rec)
Formats records and adds colors as needed. The records do not get a leading hour format if the logging level is above *INFO*.
Formats records and adds colors as needed. The records do not get a leading hour format if the logging level is above *INFO*.
[ "Formats", "records", "and", "adds", "colors", "as", "needed", ".", "The", "records", "do", "not", "get", "a", "leading", "hour", "format", "if", "the", "logging", "level", "is", "above", "*", "INFO", "*", "." ]
def format(self, rec): """ Formats records and adds colors as needed. The records do not get a leading hour format if the logging level is above *INFO*. """ try: msg = rec.msg.decode('utf-8') except Exception: msg = rec.msg use = colors_lst['USE'] if (use == 1 and rec.stream.isatty()) or use == 2: c1 = getattr(rec, 'c1', None) if c1 is None: c1 = '' if rec.levelno >= logging.ERROR: c1 = colors.RED elif rec.levelno >= logging.WARNING: c1 = colors.YELLOW elif rec.levelno >= logging.INFO: c1 = colors.GREEN c2 = getattr(rec, 'c2', colors.NORMAL) msg = '%s%s%s' % (c1, msg, c2) else: # remove single \r that make long lines in text files # and other terminal commands msg = re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))', '', msg) if rec.levelno >= logging.INFO: # the goal of this is to format without the leading "Logs, hour" prefix if rec.args: return msg % rec.args return msg rec.msg = msg rec.c1 = colors.PINK rec.c2 = colors.NORMAL return logging.Formatter.format(self, rec)
[ "def", "format", "(", "self", ",", "rec", ")", ":", "try", ":", "msg", "=", "rec", ".", "msg", ".", "decode", "(", "'utf-8'", ")", "except", "Exception", ":", "msg", "=", "rec", ".", "msg", "use", "=", "colors_lst", "[", "'USE'", "]", "if", "(", "use", "==", "1", "and", "rec", ".", "stream", ".", "isatty", "(", ")", ")", "or", "use", "==", "2", ":", "c1", "=", "getattr", "(", "rec", ",", "'c1'", ",", "None", ")", "if", "c1", "is", "None", ":", "c1", "=", "''", "if", "rec", ".", "levelno", ">=", "logging", ".", "ERROR", ":", "c1", "=", "colors", ".", "RED", "elif", "rec", ".", "levelno", ">=", "logging", ".", "WARNING", ":", "c1", "=", "colors", ".", "YELLOW", "elif", "rec", ".", "levelno", ">=", "logging", ".", "INFO", ":", "c1", "=", "colors", ".", "GREEN", "c2", "=", "getattr", "(", "rec", ",", "'c2'", ",", "colors", ".", "NORMAL", ")", "msg", "=", "'%s%s%s'", "%", "(", "c1", ",", "msg", ",", "c2", ")", "else", ":", "# remove single \\r that make long lines in text files", "# and other terminal commands", "msg", "=", "re", ".", "sub", "(", "r'\\r(?!\\n)|\\x1B\\[(K|.*?(m|h|l))'", ",", "''", ",", "msg", ")", "if", "rec", ".", "levelno", ">=", "logging", ".", "INFO", ":", "# the goal of this is to format without the leading \"Logs, hour\" prefix", "if", "rec", ".", "args", ":", "return", "msg", "%", "rec", ".", "args", "return", "msg", "rec", ".", "msg", "=", "msg", "rec", ".", "c1", "=", "colors", ".", "PINK", "rec", ".", "c2", "=", "colors", ".", "NORMAL", "return", "logging", ".", "Formatter", ".", "format", "(", "self", ",", "rec", ")" ]
https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/waflib/Logs.py#L208-L246
floooh/oryol
eb08cffe1b1cb6b05ed14ec692bca9372cef064e
fips-files/generators/util/png.py
python
read_pnm_header
(infile, supported=('P5','P6'))
return header[0], header[1], header[2], depth, header[3]
Read a PNM header, returning (format,width,height,depth,maxval). `width` and `height` are in pixels. `depth` is the number of channels in the image; for PBM and PGM it is synthesized as 1, for PPM as 3; for PAM images it is read from the header. `maxval` is synthesized (as 1) for PBM images.
Read a PNM header, returning (format,width,height,depth,maxval). `width` and `height` are in pixels. `depth` is the number of channels in the image; for PBM and PGM it is synthesized as 1, for PPM as 3; for PAM images it is read from the header. `maxval` is synthesized (as 1) for PBM images.
[ "Read", "a", "PNM", "header", "returning", "(", "format", "width", "height", "depth", "maxval", ")", ".", "width", "and", "height", "are", "in", "pixels", ".", "depth", "is", "the", "number", "of", "channels", "in", "the", "image", ";", "for", "PBM", "and", "PGM", "it", "is", "synthesized", "as", "1", "for", "PPM", "as", "3", ";", "for", "PAM", "images", "it", "is", "read", "from", "the", "header", ".", "maxval", "is", "synthesized", "(", "as", "1", ")", "for", "PBM", "images", "." ]
def read_pnm_header(infile, supported=('P5','P6')): """ Read a PNM header, returning (format,width,height,depth,maxval). `width` and `height` are in pixels. `depth` is the number of channels in the image; for PBM and PGM it is synthesized as 1, for PPM as 3; for PAM images it is read from the header. `maxval` is synthesized (as 1) for PBM images. """ # Generally, see http://netpbm.sourceforge.net/doc/ppm.html # and http://netpbm.sourceforge.net/doc/pam.html supported = [strtobytes(x) for x in supported] # Technically 'P7' must be followed by a newline, so by using # rstrip() we are being liberal in what we accept. I think this # is acceptable. type = infile.read(3).rstrip() if type not in supported: raise NotImplementedError('file format %s not supported' % type) if type == strtobytes('P7'): # PAM header parsing is completely different. return read_pam_header(infile) # Expected number of tokens in header (3 for P4, 4 for P6) expected = 4 pbm = ('P1', 'P4') if type in pbm: expected = 3 header = [type] # We have to read the rest of the header byte by byte because the # final whitespace character (immediately following the MAXVAL in # the case of P6) may not be a newline. Of course all PNM files in # the wild use a newline at this point, so it's tempting to use # readline; but it would be wrong. def getc(): c = infile.read(1) if not c: raise Error('premature EOF reading PNM header') return c c = getc() while True: # Skip whitespace that precedes a token. while c.isspace(): c = getc() # Skip comments. while c == '#': while c not in '\n\r': c = getc() if not c.isdigit(): raise Error('unexpected character %s found in header' % c) # According to the specification it is legal to have comments # that appear in the middle of a token. # This is bonkers; I've never seen it; and it's a bit awkward to # code good lexers in Python (no goto). So we break on such # cases. token = strtobytes('') while c.isdigit(): token += c c = getc() # Slight hack. All "tokens" are decimal integers, so convert # them here. header.append(int(token)) if len(header) == expected: break # Skip comments (again) while c == '#': while c not in '\n\r': c = getc() if not c.isspace(): raise Error('expected header to end with whitespace, not %s' % c) if type in pbm: # synthesize a MAXVAL header.append(1) depth = (1,3)[type == strtobytes('P6')] return header[0], header[1], header[2], depth, header[3]
[ "def", "read_pnm_header", "(", "infile", ",", "supported", "=", "(", "'P5'", ",", "'P6'", ")", ")", ":", "# Generally, see http://netpbm.sourceforge.net/doc/ppm.html", "# and http://netpbm.sourceforge.net/doc/pam.html", "supported", "=", "[", "strtobytes", "(", "x", ")", "for", "x", "in", "supported", "]", "# Technically 'P7' must be followed by a newline, so by using", "# rstrip() we are being liberal in what we accept. I think this", "# is acceptable.", "type", "=", "infile", ".", "read", "(", "3", ")", ".", "rstrip", "(", ")", "if", "type", "not", "in", "supported", ":", "raise", "NotImplementedError", "(", "'file format %s not supported'", "%", "type", ")", "if", "type", "==", "strtobytes", "(", "'P7'", ")", ":", "# PAM header parsing is completely different.", "return", "read_pam_header", "(", "infile", ")", "# Expected number of tokens in header (3 for P4, 4 for P6)", "expected", "=", "4", "pbm", "=", "(", "'P1'", ",", "'P4'", ")", "if", "type", "in", "pbm", ":", "expected", "=", "3", "header", "=", "[", "type", "]", "# We have to read the rest of the header byte by byte because the", "# final whitespace character (immediately following the MAXVAL in", "# the case of P6) may not be a newline. Of course all PNM files in", "# the wild use a newline at this point, so it's tempting to use", "# readline; but it would be wrong.", "def", "getc", "(", ")", ":", "c", "=", "infile", ".", "read", "(", "1", ")", "if", "not", "c", ":", "raise", "Error", "(", "'premature EOF reading PNM header'", ")", "return", "c", "c", "=", "getc", "(", ")", "while", "True", ":", "# Skip whitespace that precedes a token.", "while", "c", ".", "isspace", "(", ")", ":", "c", "=", "getc", "(", ")", "# Skip comments.", "while", "c", "==", "'#'", ":", "while", "c", "not", "in", "'\\n\\r'", ":", "c", "=", "getc", "(", ")", "if", "not", "c", ".", "isdigit", "(", ")", ":", "raise", "Error", "(", "'unexpected character %s found in header'", "%", "c", ")", "# According to the specification it is legal to have comments", "# that appear in the middle of a token.", "# This is bonkers; I've never seen it; and it's a bit awkward to", "# code good lexers in Python (no goto). So we break on such", "# cases.", "token", "=", "strtobytes", "(", "''", ")", "while", "c", ".", "isdigit", "(", ")", ":", "token", "+=", "c", "c", "=", "getc", "(", ")", "# Slight hack. All \"tokens\" are decimal integers, so convert", "# them here.", "header", ".", "append", "(", "int", "(", "token", ")", ")", "if", "len", "(", "header", ")", "==", "expected", ":", "break", "# Skip comments (again)", "while", "c", "==", "'#'", ":", "while", "c", "not", "in", "'\\n\\r'", ":", "c", "=", "getc", "(", ")", "if", "not", "c", ".", "isspace", "(", ")", ":", "raise", "Error", "(", "'expected header to end with whitespace, not %s'", "%", "c", ")", "if", "type", "in", "pbm", ":", "# synthesize a MAXVAL", "header", ".", "append", "(", "1", ")", "depth", "=", "(", "1", ",", "3", ")", "[", "type", "==", "strtobytes", "(", "'P6'", ")", "]", "return", "header", "[", "0", "]", ",", "header", "[", "1", "]", ",", "header", "[", "2", "]", ",", "depth", ",", "header", "[", "3", "]" ]
https://github.com/floooh/oryol/blob/eb08cffe1b1cb6b05ed14ec692bca9372cef064e/fips-files/generators/util/png.py#L3511-L3588
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/python/ops/image_ops.py
python
random_saturation
(image, lower, upper, seed=None)
return adjust_saturation(image, saturation_factor)
Adjust the saturation of an RGB image by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper]`. Args: image: RGB image or images. Size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`.
Adjust the saturation of an RGB image by a random factor.
[ "Adjust", "the", "saturation", "of", "an", "RGB", "image", "by", "a", "random", "factor", "." ]
def random_saturation(image, lower, upper, seed=None): """Adjust the saturation of an RGB image by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper]`. Args: image: RGB image or images. Size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') # Pick a float in [lower, upper] saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_saturation(image, saturation_factor)
[ "def", "random_saturation", "(", "image", ",", "lower", ",", "upper", ",", "seed", "=", "None", ")", ":", "if", "upper", "<=", "lower", ":", "raise", "ValueError", "(", "'upper must be > lower.'", ")", "if", "lower", "<", "0", ":", "raise", "ValueError", "(", "'lower must be non-negative.'", ")", "# Pick a float in [lower, upper]", "saturation_factor", "=", "random_ops", ".", "random_uniform", "(", "[", "]", ",", "lower", ",", "upper", ",", "seed", "=", "seed", ")", "return", "adjust_saturation", "(", "image", ",", "saturation_factor", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/image_ops.py#L1272-L1301
synfig/synfig
a5ec91db5b751dc12e4400ccfb5c063fd6d2d928
synfig-studio/plugins/lottie-exporter/common/WidthPointList.py
python
WidthPointList.get_len
(self)
return len(self.entry_list)
Returns the number of entries
Returns the number of entries
[ "Returns", "the", "number", "of", "entries" ]
def get_len(self): """ Returns the number of entries """ return len(self.entry_list)
[ "def", "get_len", "(", "self", ")", ":", "return", "len", "(", "self", ".", "entry_list", ")" ]
https://github.com/synfig/synfig/blob/a5ec91db5b751dc12e4400ccfb5c063fd6d2d928/synfig-studio/plugins/lottie-exporter/common/WidthPointList.py#L103-L107
NVIDIA/nvvl
a94c7493ec9f309cc54acf81a66c62a068a06962
examples/pytorch_superres/nvidia/fp16.py
python
FP16_Optimizer.clip_fp32_grads
(self, max_norm, norm_type=2)
Clips fp32 master gradients via torch.nn.utils.clip_grad_norm. Args: max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the current fp32 gradients (viewed as a single vector). .. warning:: Returns -1 if the most recently computed fp16 gradients overflowed (that is, if self.overflow is True).
Clips fp32 master gradients via torch.nn.utils.clip_grad_norm.
[ "Clips", "fp32", "master", "gradients", "via", "torch", ".", "nn", ".", "utils", ".", "clip_grad_norm", "." ]
def clip_fp32_grads(self, max_norm, norm_type=2): """ Clips fp32 master gradients via torch.nn.utils.clip_grad_norm. Args: max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the current fp32 gradients (viewed as a single vector). .. warning:: Returns -1 if the most recently computed fp16 gradients overflowed (that is, if self.overflow is True). """ if not self.overflow: fp32_params = [] for param_group in self.optimizer.param_groups: for param in param_group['params']: fp32_params.append(param) return torch.nn.utils.clip_grad_norm(fp32_params, max_norm, norm_type) else: return -1
[ "def", "clip_fp32_grads", "(", "self", ",", "max_norm", ",", "norm_type", "=", "2", ")", ":", "if", "not", "self", ".", "overflow", ":", "fp32_params", "=", "[", "]", "for", "param_group", "in", "self", ".", "optimizer", ".", "param_groups", ":", "for", "param", "in", "param_group", "[", "'params'", "]", ":", "fp32_params", ".", "append", "(", "param", ")", "return", "torch", ".", "nn", ".", "utils", ".", "clip_grad_norm", "(", "fp32_params", ",", "max_norm", ",", "norm_type", ")", "else", ":", "return", "-", "1" ]
https://github.com/NVIDIA/nvvl/blob/a94c7493ec9f309cc54acf81a66c62a068a06962/examples/pytorch_superres/nvidia/fp16.py#L165-L187
nodejs/nan
8db8c8f544f2b6ce1b0859ef6ecdd0a3873a9e62
cpplint.py
python
CheckForFunctionLengths
(filename, clean_lines, linenum, function_state, error)
Reports for long function bodies. For an overview why this is done, see: https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found.
Reports for long function bodies.
[ "Reports", "for", "long", "function", "bodies", "." ]
def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore if Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count()
[ "def", "CheckForFunctionLengths", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "function_state", ",", "error", ")", ":", "lines", "=", "clean_lines", ".", "lines", "line", "=", "lines", "[", "linenum", "]", "joined_line", "=", "''", "starting_func", "=", "False", "regexp", "=", "r'(\\w(\\w|::|\\*|\\&|\\s)*)\\('", "# decls * & space::name( ...", "match_result", "=", "Match", "(", "regexp", ",", "line", ")", "if", "match_result", ":", "# If the name is all caps and underscores, figure it's a macro and", "# ignore it, unless it's TEST or TEST_F.", "function_name", "=", "match_result", ".", "group", "(", "1", ")", ".", "split", "(", ")", "[", "-", "1", "]", "if", "function_name", "==", "'TEST'", "or", "function_name", "==", "'TEST_F'", "or", "(", "not", "Match", "(", "r'[A-Z_]+$'", ",", "function_name", ")", ")", ":", "starting_func", "=", "True", "if", "starting_func", ":", "body_found", "=", "False", "for", "start_linenum", "in", "xrange", "(", "linenum", ",", "clean_lines", ".", "NumLines", "(", ")", ")", ":", "start_line", "=", "lines", "[", "start_linenum", "]", "joined_line", "+=", "' '", "+", "start_line", ".", "lstrip", "(", ")", "if", "Search", "(", "r'(;|})'", ",", "start_line", ")", ":", "# Declarations and trivial functions", "body_found", "=", "True", "break", "# ... ignore", "if", "Search", "(", "r'{'", ",", "start_line", ")", ":", "body_found", "=", "True", "function", "=", "Search", "(", "r'((\\w|:)*)\\('", ",", "line", ")", ".", "group", "(", "1", ")", "if", "Match", "(", "r'TEST'", ",", "function", ")", ":", "# Handle TEST... macros", "parameter_regexp", "=", "Search", "(", "r'(\\(.*\\))'", ",", "joined_line", ")", "if", "parameter_regexp", ":", "# Ignore bad syntax", "function", "+=", "parameter_regexp", ".", "group", "(", "1", ")", "else", ":", "function", "+=", "'()'", "function_state", ".", "Begin", "(", "function", ")", "break", "if", "not", "body_found", ":", "# No body for the function (or evidence of a non-function) was found.", "error", "(", "filename", ",", "linenum", ",", "'readability/fn_size'", ",", "5", ",", "'Lint failed to find start of function body.'", ")", "elif", "Match", "(", "r'^\\}\\s*$'", ",", "line", ")", ":", "# function end", "function_state", ".", "Check", "(", "error", ",", "filename", ",", "linenum", ")", "function_state", ".", "End", "(", ")", "elif", "not", "Match", "(", "r'^\\s*$'", ",", "line", ")", ":", "function_state", ".", "Count", "(", ")" ]
https://github.com/nodejs/nan/blob/8db8c8f544f2b6ce1b0859ef6ecdd0a3873a9e62/cpplint.py#L3283-L3348
rapidsai/cudf
d5b2448fc69f17509304d594f029d0df56984962
python/cudf/cudf/core/series.py
python
DatetimeProperties.is_month_start
(self)
return (self.day == 1).fillna(False)
Booleans indicating if dates are the first day of the month.
Booleans indicating if dates are the first day of the month.
[ "Booleans", "indicating", "if", "dates", "are", "the", "first", "day", "of", "the", "month", "." ]
def is_month_start(self): """ Booleans indicating if dates are the first day of the month. """ return (self.day == 1).fillna(False)
[ "def", "is_month_start", "(", "self", ")", ":", "return", "(", "self", ".", "day", "==", "1", ")", ".", "fillna", "(", "False", ")" ]
https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/series.py#L4082-L4086
cliqz-oss/keyvi
957cb7197b2bff78d3afb22ad95e092f89afb0a6
keyvi/3rdparty/tpie/scripts/stream_header.py
python
read
(file_name, output_file)
Read the TPIE stream in `file_name` (str), and print its header as JSON to `output_file` (file).
Read the TPIE stream in `file_name` (str), and print its header as JSON to `output_file` (file).
[ "Read", "the", "TPIE", "stream", "in", "file_name", "(", "str", ")", "and", "print", "its", "header", "as", "JSON", "to", "output_file", "(", "file", ")", "." ]
def read(file_name, output_file): """Read the TPIE stream in `file_name` (str), and print its header as JSON to `output_file` (file).""" with open(file_name, 'rb') as stream: values = stream_header.unpack_from(stream.read(stream_header.size)) header_data = dict(zip(field_names, values)) json.dump(header_data, output_file, indent=0, sort_keys=True, separators=(',', ': ')) output_file.write('\n') output_file.flush()
[ "def", "read", "(", "file_name", ",", "output_file", ")", ":", "with", "open", "(", "file_name", ",", "'rb'", ")", "as", "stream", ":", "values", "=", "stream_header", ".", "unpack_from", "(", "stream", ".", "read", "(", "stream_header", ".", "size", ")", ")", "header_data", "=", "dict", "(", "zip", "(", "field_names", ",", "values", ")", ")", "json", ".", "dump", "(", "header_data", ",", "output_file", ",", "indent", "=", "0", ",", "sort_keys", "=", "True", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", "output_file", ".", "write", "(", "'\\n'", ")", "output_file", ".", "flush", "(", ")" ]
https://github.com/cliqz-oss/keyvi/blob/957cb7197b2bff78d3afb22ad95e092f89afb0a6/keyvi/3rdparty/tpie/scripts/stream_header.py#L62-L70
microsoft/checkedc-clang
a173fefde5d7877b7750e7ce96dd08cf18baebf2
lldb/examples/python/mach_o.py
python
TerminalColors.white
(self, fg=True)
return ''
Set the foreground or background color to white. The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.
Set the foreground or background color to white. The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.
[ "Set", "the", "foreground", "or", "background", "color", "to", "white", ".", "The", "foreground", "color", "will", "be", "set", "if", "fg", "tests", "True", ".", "The", "background", "color", "will", "be", "set", "if", "fg", "tests", "False", "." ]
def white(self, fg=True): '''Set the foreground or background color to white. The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.''' if self.enabled: if fg: return "\x1b[37m" else: return "\x1b[47m" return ''
[ "def", "white", "(", "self", ",", "fg", "=", "True", ")", ":", "if", "self", ".", "enabled", ":", "if", "fg", ":", "return", "\"\\x1b[37m\"", "else", ":", "return", "\"\\x1b[47m\"", "return", "''" ]
https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/lldb/examples/python/mach_o.py#L341-L349
naver/sling
5671cd445a2caae0b4dd0332299e4cfede05062c
webkit/Tools/Scripts/webkitpy/xcode/simulator.py
python
Simulator.lookup_or_create_device
(self, name, device_type, runtime)
return testing_device
Returns an available iOS Simulator device for testing. This function will create a new simulator device with the specified name, device type and runtime if one does not already exist. :param name: The name of the simulator device to lookup or create. :type name: str :param device_type: The CoreSimulator device type. :type device_type: DeviceType :param runtime: The CoreSimulator runtime. :type runtime: Runtime :return: A dictionary describing the device. :rtype: Device
Returns an available iOS Simulator device for testing.
[ "Returns", "an", "available", "iOS", "Simulator", "device", "for", "testing", "." ]
def lookup_or_create_device(self, name, device_type, runtime): """ Returns an available iOS Simulator device for testing. This function will create a new simulator device with the specified name, device type and runtime if one does not already exist. :param name: The name of the simulator device to lookup or create. :type name: str :param device_type: The CoreSimulator device type. :type device_type: DeviceType :param runtime: The CoreSimulator runtime. :type runtime: Runtime :return: A dictionary describing the device. :rtype: Device """ assert(runtime.available) testing_device = self.device(name=name, runtime=runtime, should_ignore_unavailable_devices=True) if testing_device: return testing_device testing_device = Device.create(name, device_type, runtime) assert(testing_device.available) return testing_device
[ "def", "lookup_or_create_device", "(", "self", ",", "name", ",", "device_type", ",", "runtime", ")", ":", "assert", "(", "runtime", ".", "available", ")", "testing_device", "=", "self", ".", "device", "(", "name", "=", "name", ",", "runtime", "=", "runtime", ",", "should_ignore_unavailable_devices", "=", "True", ")", "if", "testing_device", ":", "return", "testing_device", "testing_device", "=", "Device", ".", "create", "(", "name", ",", "device_type", ",", "runtime", ")", "assert", "(", "testing_device", ".", "available", ")", "return", "testing_device" ]
https://github.com/naver/sling/blob/5671cd445a2caae0b4dd0332299e4cfede05062c/webkit/Tools/Scripts/webkitpy/xcode/simulator.py#L496-L518
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/boto/boto/glacier/utils.py
python
tree_hash_from_str
(str_as_bytes)
return bytes_to_hex(tree_hash(chunk_hashes(str_as_bytes)))
:type str_as_bytes: str :param str_as_bytes: The string for which to compute the tree hash. :rtype: str :return: The computed tree hash, returned as hex.
[]
def tree_hash_from_str(str_as_bytes): """ :type str_as_bytes: str :param str_as_bytes: The string for which to compute the tree hash. :rtype: str :return: The computed tree hash, returned as hex. """ return bytes_to_hex(tree_hash(chunk_hashes(str_as_bytes)))
[ "def", "tree_hash_from_str", "(", "str_as_bytes", ")", ":", "return", "bytes_to_hex", "(", "tree_hash", "(", "chunk_hashes", "(", "str_as_bytes", ")", ")", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/glacier/utils.py#L152-L162
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/coverage/coverage/html.py
python
HtmlStatus.reset
(self)
Initialize to empty.
Initialize to empty.
[ "Initialize", "to", "empty", "." ]
def reset(self): """Initialize to empty.""" self.settings = '' self.files = {}
[ "def", "reset", "(", "self", ")", ":", "self", ".", "settings", "=", "''", "self", ".", "files", "=", "{", "}" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/coverage/coverage/html.py#L339-L342
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py
python
fake_learned_scale_quant_perchannel_grad_d_compute
(dout, input_data, alpha_data, quant_max_data, neg_trunc, kernel_name="fake_learned_scale_quant_perchannel_grad_d")
return [dx, dalpha_each]
FakeLearnedScaleQuantPerChannelGradD
FakeLearnedScaleQuantPerChannelGradD
[ "FakeLearnedScaleQuantPerChannelGradD" ]
def fake_learned_scale_quant_perchannel_grad_d_compute(dout, input_data, alpha_data, quant_max_data, neg_trunc, kernel_name="fake_learned_scale_quant_perchannel_grad_d"): """FakeLearnedScaleQuantPerChannelGradD""" input_shape = te.lang.cce.util.shape_to_list(input_data.shape) eps = tvm.const(1e-6, input_data.dtype) alpha_data = te.lang.cce.vcmpsel(te.lang.cce.vabs(alpha_data), eps, 'ge', alpha_data, eps) alpha_data = te.lang.cce.broadcast(alpha_data, input_shape, input_data.dtype) quant_max_data = te.lang.cce.broadcast(quant_max_data, input_shape, input_data.dtype) input_x = te.lang.cce.vdiv(input_data, alpha_data) input_div_alpha = input_x if neg_trunc: input_x = te.lang.cce.round_to(input_x, 1.0, 0.0) else: input_x = te.lang.cce.round_to(input_x, 1.0, -1.0) nudge_input = te.lang.cce.floor(te.lang.cce.vadds(te.lang.cce.vmul(input_x, quant_max_data), 0.5)) input_quant = te.lang.cce.vdiv(nudge_input, quant_max_data) dtype = input_div_alpha.dtype.lower() shape = te.lang.cce.util.shape_to_list(input_div_alpha.shape) dx = dout tensor_one = tvm.const(1.0, input_div_alpha.dtype) tensor_one = te.lang.cce.broadcast(tensor_one, shape) out_of_upper_bounds = te.lang.cce.vcmpsel(input_div_alpha, 1.0, 'gt', 1.0, 0.0) if neg_trunc: out_of_lower_bounds = te.lang.cce.vcmpsel(input_div_alpha, 0.0, 'lt', 1.0, 0.0) else: out_of_lower_bounds = te.lang.cce.vcmpsel(input_div_alpha, -1.0, 'lt', 1.0, 0.0) out_of_bounds = te.lang.cce.vadd(out_of_lower_bounds, out_of_upper_bounds) dx = te.lang.cce.vmul(dx, te.lang.cce.vsub(tensor_one, out_of_bounds)) sign = _sign_function(dtype, input_div_alpha) # The following lines are equivalent to : # dalpha_each = dout * sign if out of bounds # dout * (input_quant - input_div_alpha) if within bounds quant_error = te.lang.cce.vsub(input_quant, input_div_alpha) within_bounds = te.lang.cce.vsub(tensor_one, out_of_bounds) error_within_bounds = te.lang.cce.vmul(quant_error, within_bounds) grad_range = te.lang.cce.vmadd(sign, error_within_bounds, out_of_bounds) dalpha_each = te.lang.cce.vmul(dout, grad_range) return [dx, dalpha_each]
[ "def", "fake_learned_scale_quant_perchannel_grad_d_compute", "(", "dout", ",", "input_data", ",", "alpha_data", ",", "quant_max_data", ",", "neg_trunc", ",", "kernel_name", "=", "\"fake_learned_scale_quant_perchannel_grad_d\"", ")", ":", "input_shape", "=", "te", ".", "lang", ".", "cce", ".", "util", ".", "shape_to_list", "(", "input_data", ".", "shape", ")", "eps", "=", "tvm", ".", "const", "(", "1e-6", ",", "input_data", ".", "dtype", ")", "alpha_data", "=", "te", ".", "lang", ".", "cce", ".", "vcmpsel", "(", "te", ".", "lang", ".", "cce", ".", "vabs", "(", "alpha_data", ")", ",", "eps", ",", "'ge'", ",", "alpha_data", ",", "eps", ")", "alpha_data", "=", "te", ".", "lang", ".", "cce", ".", "broadcast", "(", "alpha_data", ",", "input_shape", ",", "input_data", ".", "dtype", ")", "quant_max_data", "=", "te", ".", "lang", ".", "cce", ".", "broadcast", "(", "quant_max_data", ",", "input_shape", ",", "input_data", ".", "dtype", ")", "input_x", "=", "te", ".", "lang", ".", "cce", ".", "vdiv", "(", "input_data", ",", "alpha_data", ")", "input_div_alpha", "=", "input_x", "if", "neg_trunc", ":", "input_x", "=", "te", ".", "lang", ".", "cce", ".", "round_to", "(", "input_x", ",", "1.0", ",", "0.0", ")", "else", ":", "input_x", "=", "te", ".", "lang", ".", "cce", ".", "round_to", "(", "input_x", ",", "1.0", ",", "-", "1.0", ")", "nudge_input", "=", "te", ".", "lang", ".", "cce", ".", "floor", "(", "te", ".", "lang", ".", "cce", ".", "vadds", "(", "te", ".", "lang", ".", "cce", ".", "vmul", "(", "input_x", ",", "quant_max_data", ")", ",", "0.5", ")", ")", "input_quant", "=", "te", ".", "lang", ".", "cce", ".", "vdiv", "(", "nudge_input", ",", "quant_max_data", ")", "dtype", "=", "input_div_alpha", ".", "dtype", ".", "lower", "(", ")", "shape", "=", "te", ".", "lang", ".", "cce", ".", "util", ".", "shape_to_list", "(", "input_div_alpha", ".", "shape", ")", "dx", "=", "dout", "tensor_one", "=", "tvm", ".", "const", "(", "1.0", ",", "input_div_alpha", ".", "dtype", ")", "tensor_one", "=", "te", ".", "lang", ".", "cce", ".", "broadcast", "(", "tensor_one", ",", "shape", ")", "out_of_upper_bounds", "=", "te", ".", "lang", ".", "cce", ".", "vcmpsel", "(", "input_div_alpha", ",", "1.0", ",", "'gt'", ",", "1.0", ",", "0.0", ")", "if", "neg_trunc", ":", "out_of_lower_bounds", "=", "te", ".", "lang", ".", "cce", ".", "vcmpsel", "(", "input_div_alpha", ",", "0.0", ",", "'lt'", ",", "1.0", ",", "0.0", ")", "else", ":", "out_of_lower_bounds", "=", "te", ".", "lang", ".", "cce", ".", "vcmpsel", "(", "input_div_alpha", ",", "-", "1.0", ",", "'lt'", ",", "1.0", ",", "0.0", ")", "out_of_bounds", "=", "te", ".", "lang", ".", "cce", ".", "vadd", "(", "out_of_lower_bounds", ",", "out_of_upper_bounds", ")", "dx", "=", "te", ".", "lang", ".", "cce", ".", "vmul", "(", "dx", ",", "te", ".", "lang", ".", "cce", ".", "vsub", "(", "tensor_one", ",", "out_of_bounds", ")", ")", "sign", "=", "_sign_function", "(", "dtype", ",", "input_div_alpha", ")", "# The following lines are equivalent to :", "# dalpha_each = dout * sign if out of bounds", "# dout * (input_quant - input_div_alpha) if within bounds", "quant_error", "=", "te", ".", "lang", ".", "cce", ".", "vsub", "(", "input_quant", ",", "input_div_alpha", ")", "within_bounds", "=", "te", ".", "lang", ".", "cce", ".", "vsub", "(", "tensor_one", ",", "out_of_bounds", ")", "error_within_bounds", "=", "te", ".", "lang", ".", "cce", ".", "vmul", "(", "quant_error", ",", "within_bounds", ")", "grad_range", "=", "te", ".", "lang", ".", "cce", ".", "vmadd", "(", "sign", ",", "error_within_bounds", ",", "out_of_bounds", ")", "dalpha_each", "=", "te", ".", "lang", ".", "cce", ".", "vmul", "(", "dout", ",", "grad_range", ")", "return", "[", "dx", ",", "dalpha_each", "]" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py#L92-L140
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/frame.py
python
DataFrame.rename
( self, mapper: Optional[Renamer] = None, *, index: Optional[Renamer] = None, columns: Optional[Renamer] = None, axis: Optional[Axis] = None, copy: bool = True, inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", )
return super().rename( mapper=mapper, index=index, columns=columns, axis=axis, copy=copy, inplace=inplace, level=level, errors=errors, )
Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6
Alter axes labels.
[ "Alter", "axes", "labels", "." ]
def rename( self, mapper: Optional[Renamer] = None, *, index: Optional[Renamer] = None, columns: Optional[Renamer] = None, axis: Optional[Axis] = None, copy: bool = True, inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", ) -> Optional["DataFrame"]: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ return super().rename( mapper=mapper, index=index, columns=columns, axis=axis, copy=copy, inplace=inplace, level=level, errors=errors, )
[ "def", "rename", "(", "self", ",", "mapper", ":", "Optional", "[", "Renamer", "]", "=", "None", ",", "*", ",", "index", ":", "Optional", "[", "Renamer", "]", "=", "None", ",", "columns", ":", "Optional", "[", "Renamer", "]", "=", "None", ",", "axis", ":", "Optional", "[", "Axis", "]", "=", "None", ",", "copy", ":", "bool", "=", "True", ",", "inplace", ":", "bool", "=", "False", ",", "level", ":", "Optional", "[", "Level", "]", "=", "None", ",", "errors", ":", "str", "=", "\"ignore\"", ",", ")", "->", "Optional", "[", "\"DataFrame\"", "]", ":", "return", "super", "(", ")", ".", "rename", "(", "mapper", "=", "mapper", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "axis", "=", "axis", ",", "copy", "=", "copy", ",", "inplace", "=", "inplace", ",", "level", "=", "level", ",", "errors", "=", "errors", ",", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/frame.py#L4004-L4134
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/Alias.py
python
Alias.sconsign
(self)
An Alias is not recorded in .sconsign files
An Alias is not recorded in .sconsign files
[ "An", "Alias", "is", "not", "recorded", "in", ".", "sconsign", "files" ]
def sconsign(self): """An Alias is not recorded in .sconsign files""" pass
[ "def", "sconsign", "(", "self", ")", ":", "pass" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/Alias.py#L136-L138
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/platform/tf_logging.py
python
_get_thread_id
()
return thread_id & _THREAD_ID_MASK
Get id of current thread, suitable for logging as an unsigned quantity.
Get id of current thread, suitable for logging as an unsigned quantity.
[ "Get", "id", "of", "current", "thread", "suitable", "for", "logging", "as", "an", "unsigned", "quantity", "." ]
def _get_thread_id(): """Get id of current thread, suitable for logging as an unsigned quantity.""" # pylint: disable=protected-access thread_id = six.moves._thread.get_ident() # pylint:enable=protected-access return thread_id & _THREAD_ID_MASK
[ "def", "_get_thread_id", "(", ")", ":", "# pylint: disable=protected-access", "thread_id", "=", "six", ".", "moves", ".", "_thread", ".", "get_ident", "(", ")", "# pylint:enable=protected-access", "return", "thread_id", "&", "_THREAD_ID_MASK" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/platform/tf_logging.py#L236-L241
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/buildscripts/resmokelib/logging/loggers.py
python
BaseLogger.__init__
(self, name, logging_config=None, build_logger_server=None, parent=None)
Initialize a BaseLogger. :param name: the logger name. :param logging_config: the logging configuration. :param build_logger_server: the build logger server (e.g. logkeeper). :param parent: the parent logger.
Initialize a BaseLogger.
[ "Initialize", "a", "BaseLogger", "." ]
def __init__(self, name, logging_config=None, build_logger_server=None, parent=None): """Initialize a BaseLogger. :param name: the logger name. :param logging_config: the logging configuration. :param build_logger_server: the build logger server (e.g. logkeeper). :param parent: the parent logger. """ logging.Logger.__init__(self, name, level=logging.DEBUG) self._logging_config = logging_config self._build_logger_server = build_logger_server if parent: self.parent = parent self.propagate = True
[ "def", "__init__", "(", "self", ",", "name", ",", "logging_config", "=", "None", ",", "build_logger_server", "=", "None", ",", "parent", "=", "None", ")", ":", "logging", ".", "Logger", ".", "__init__", "(", "self", ",", "name", ",", "level", "=", "logging", ".", "DEBUG", ")", "self", ".", "_logging_config", "=", "logging_config", "self", ".", "_build_logger_server", "=", "build_logger_server", "if", "parent", ":", "self", ".", "parent", "=", "parent", "self", ".", "propagate", "=", "True" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/resmokelib/logging/loggers.py#L53-L66
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py3/sklearn/covariance/_shrunk_covariance.py
python
shrunk_covariance
(emp_cov, shrinkage=0.1)
return shrunk_cov
Calculates a covariance matrix shrunk on the diagonal Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- emp_cov : array-like, shape (n_features, n_features) Covariance matrix to be shrunk shrinkage : float, 0 <= shrinkage <= 1 Coefficient in the convex combination used for the computation of the shrunk estimate. Returns ------- shrunk_cov : array-like Shrunk covariance. Notes ----- The regularized (shrunk) covariance is given by: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features
Calculates a covariance matrix shrunk on the diagonal
[ "Calculates", "a", "covariance", "matrix", "shrunk", "on", "the", "diagonal" ]
def shrunk_covariance(emp_cov, shrinkage=0.1): """Calculates a covariance matrix shrunk on the diagonal Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- emp_cov : array-like, shape (n_features, n_features) Covariance matrix to be shrunk shrinkage : float, 0 <= shrinkage <= 1 Coefficient in the convex combination used for the computation of the shrunk estimate. Returns ------- shrunk_cov : array-like Shrunk covariance. Notes ----- The regularized (shrunk) covariance is given by: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features """ emp_cov = check_array(emp_cov) n_features = emp_cov.shape[0] mu = np.trace(emp_cov) / n_features shrunk_cov = (1. - shrinkage) * emp_cov shrunk_cov.flat[::n_features + 1] += shrinkage * mu return shrunk_cov
[ "def", "shrunk_covariance", "(", "emp_cov", ",", "shrinkage", "=", "0.1", ")", ":", "emp_cov", "=", "check_array", "(", "emp_cov", ")", "n_features", "=", "emp_cov", ".", "shape", "[", "0", "]", "mu", "=", "np", ".", "trace", "(", "emp_cov", ")", "/", "n_features", "shrunk_cov", "=", "(", "1.", "-", "shrinkage", ")", "*", "emp_cov", "shrunk_cov", ".", "flat", "[", ":", ":", "n_features", "+", "1", "]", "+=", "shrinkage", "*", "mu", "return", "shrunk_cov" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/covariance/_shrunk_covariance.py#L25-L60
RoboJackets/robocup-software
bce13ce53ddb2ecb9696266d980722c34617dc15
rj_gameplay/stp/role/constraint.py
python
has_ball
()
return constraint_fn
Creates a constraint function that returns true if the current robot has the ball. :return: Constraint function for current robot having the ball.
Creates a constraint function that returns true if the current robot has the ball. :return: Constraint function for current robot having the ball.
[ "Creates", "a", "constraint", "function", "that", "returns", "true", "if", "the", "current", "robot", "has", "the", "ball", ".", ":", "return", ":", "Constraint", "function", "for", "current", "robot", "having", "the", "ball", "." ]
def has_ball() -> role.ConstraintFn: """Creates a constraint function that returns true if the current robot has the ball. :return: Constraint function for current robot having the ball. """ def constraint_fn( robot: rc.Robot, prev_result: Optional[role.RoleResult], world_state: rc.WorldState, ) -> bool: return robot.has_ball_sense return constraint_fn
[ "def", "has_ball", "(", ")", "->", "role", ".", "ConstraintFn", ":", "def", "constraint_fn", "(", "robot", ":", "rc", ".", "Robot", ",", "prev_result", ":", "Optional", "[", "role", ".", "RoleResult", "]", ",", "world_state", ":", "rc", ".", "WorldState", ",", ")", "->", "bool", ":", "return", "robot", ".", "has_ball_sense", "return", "constraint_fn" ]
https://github.com/RoboJackets/robocup-software/blob/bce13ce53ddb2ecb9696266d980722c34617dc15/rj_gameplay/stp/role/constraint.py#L10-L23
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
demo/DPU-for-RNN/rnnt_asr_vck5000/model_rnnt.py
python
RNNT.predict
(self, y, state=None, add_sos=True)
return g, hid
B - batch size U - label length H - Hidden dimension size L - Number of decoder layers = 2 Args: y: (B, U) Returns: Tuple (g, hid) where: g: (B, U + 1, H) hid: (h, c) where h is the final sequence hidden state and c is the final cell state: h (tensor), shape (L, B, H) c (tensor), shape (L, B, H)
B - batch size U - label length H - Hidden dimension size L - Number of decoder layers = 2
[ "B", "-", "batch", "size", "U", "-", "label", "length", "H", "-", "Hidden", "dimension", "size", "L", "-", "Number", "of", "decoder", "layers", "=", "2" ]
def predict(self, y, state=None, add_sos=True): """ B - batch size U - label length H - Hidden dimension size L - Number of decoder layers = 2 Args: y: (B, U) Returns: Tuple (g, hid) where: g: (B, U + 1, H) hid: (h, c) where h is the final sequence hidden state and c is the final cell state: h (tensor), shape (L, B, H) c (tensor), shape (L, B, H) """ if isinstance(y, torch.Tensor): y = self.prediction["embed"](y) elif isinstance(y, torch.nn.utils.rnn.PackedSequence): # Teacher-forced training mode # (B, U) -> (B, U, H) y._replace(data=self.prediction["embed"](y.data)) else: # inference mode B = 1 if state is None else state[0].size(1) y = torch.zeros((B, 1, self.pred_n_hidden)).to( device=self.joint_net[0].weight.device, dtype=self.joint_net[0].weight.dtype ) # preprend blank "start of sequence" symbol if add_sos: B, U, H = y.shape start = torch.zeros((B, 1, H)).to(device=y.device, dtype=y.dtype) y = torch.cat([start, y], dim=1).contiguous() # (B, U + 1, H) else: start = None # makes del call later easier # if state is None: # batch = y.size(0) # state = [ # (torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device), # torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device)) # for _ in range(self.pred_rnn_layers) # ] y = y.transpose(0, 1) # .contiguous() # (U + 1, B, H) g, hid = self.prediction["dec_rnn"](y, state) g = g.transpose(0, 1) # .contiguous() # (B, U + 1, H) del y, start, state return g, hid
[ "def", "predict", "(", "self", ",", "y", ",", "state", "=", "None", ",", "add_sos", "=", "True", ")", ":", "if", "isinstance", "(", "y", ",", "torch", ".", "Tensor", ")", ":", "y", "=", "self", ".", "prediction", "[", "\"embed\"", "]", "(", "y", ")", "elif", "isinstance", "(", "y", ",", "torch", ".", "nn", ".", "utils", ".", "rnn", ".", "PackedSequence", ")", ":", "# Teacher-forced training mode", "# (B, U) -> (B, U, H)", "y", ".", "_replace", "(", "data", "=", "self", ".", "prediction", "[", "\"embed\"", "]", "(", "y", ".", "data", ")", ")", "else", ":", "# inference mode", "B", "=", "1", "if", "state", "is", "None", "else", "state", "[", "0", "]", ".", "size", "(", "1", ")", "y", "=", "torch", ".", "zeros", "(", "(", "B", ",", "1", ",", "self", ".", "pred_n_hidden", ")", ")", ".", "to", "(", "device", "=", "self", ".", "joint_net", "[", "0", "]", ".", "weight", ".", "device", ",", "dtype", "=", "self", ".", "joint_net", "[", "0", "]", ".", "weight", ".", "dtype", ")", "# preprend blank \"start of sequence\" symbol", "if", "add_sos", ":", "B", ",", "U", ",", "H", "=", "y", ".", "shape", "start", "=", "torch", ".", "zeros", "(", "(", "B", ",", "1", ",", "H", ")", ")", ".", "to", "(", "device", "=", "y", ".", "device", ",", "dtype", "=", "y", ".", "dtype", ")", "y", "=", "torch", ".", "cat", "(", "[", "start", ",", "y", "]", ",", "dim", "=", "1", ")", ".", "contiguous", "(", ")", "# (B, U + 1, H)", "else", ":", "start", "=", "None", "# makes del call later easier", "# if state is None:", "# batch = y.size(0)", "# state = [", "# (torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device),", "# torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device))", "# for _ in range(self.pred_rnn_layers)", "# ]", "y", "=", "y", ".", "transpose", "(", "0", ",", "1", ")", "# .contiguous() # (U + 1, B, H)", "g", ",", "hid", "=", "self", ".", "prediction", "[", "\"dec_rnn\"", "]", "(", "y", ",", "state", ")", "g", "=", "g", ".", "transpose", "(", "0", ",", "1", ")", "# .contiguous() # (B, U + 1, H)", "del", "y", ",", "start", ",", "state", "return", "g", ",", "hid" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/demo/DPU-for-RNN/rnnt_asr_vck5000/model_rnnt.py#L219-L271
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/resource_variable_ops.py
python
ResourceVariable.value
(self)
A cached operation which reads the value of this variable.
A cached operation which reads the value of this variable.
[ "A", "cached", "operation", "which", "reads", "the", "value", "of", "this", "variable", "." ]
def value(self): """A cached operation which reads the value of this variable.""" if self._cached_value is not None: return self._cached_value with ops.colocate_with(None, ignore_existing=True): with ops.device(self._handle.device): return gen_resource_variable_ops.read_variable_op( self._handle, dtype=self._dtype)
[ "def", "value", "(", "self", ")", ":", "if", "self", ".", "_cached_value", "is", "not", "None", ":", "return", "self", ".", "_cached_value", "with", "ops", ".", "colocate_with", "(", "None", ",", "ignore_existing", "=", "True", ")", ":", "with", "ops", ".", "device", "(", "self", ".", "_handle", ".", "device", ")", ":", "return", "gen_resource_variable_ops", ".", "read_variable_op", "(", "self", ".", "_handle", ",", "dtype", "=", "self", ".", "_dtype", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/resource_variable_ops.py#L294-L301
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_core.py
python
EvtHandler.Disconnect
(*args, **kwargs)
return _core_.EvtHandler_Disconnect(*args, **kwargs)
Disconnect(self, int id, int lastId=-1, EventType eventType=wxEVT_NULL, PyObject func=None) -> bool
Disconnect(self, int id, int lastId=-1, EventType eventType=wxEVT_NULL, PyObject func=None) -> bool
[ "Disconnect", "(", "self", "int", "id", "int", "lastId", "=", "-", "1", "EventType", "eventType", "=", "wxEVT_NULL", "PyObject", "func", "=", "None", ")", "-", ">", "bool" ]
def Disconnect(*args, **kwargs): """ Disconnect(self, int id, int lastId=-1, EventType eventType=wxEVT_NULL, PyObject func=None) -> bool """ return _core_.EvtHandler_Disconnect(*args, **kwargs)
[ "def", "Disconnect", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "EvtHandler_Disconnect", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L4184-L4189
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/python/framework/common_shapes.py
python
bias_add_grad_shape
(op)
return [output_shape]
Shape function for a BiasAddGrad op.
Shape function for a BiasAddGrad op.
[ "Shape", "function", "for", "a", "BiasAddGrad", "op", "." ]
def bias_add_grad_shape(op): """Shape function for a BiasAddGrad op.""" input_shape = op.inputs[0].get_shape().with_rank_at_least(2) try: data_format = op.get_attr("data_format") except ValueError: data_format = None if data_format == b"NCHW": output_shape = input_shape[-3] else: output_shape = input_shape[-1] return [output_shape]
[ "def", "bias_add_grad_shape", "(", "op", ")", ":", "input_shape", "=", "op", ".", "inputs", "[", "0", "]", ".", "get_shape", "(", ")", ".", "with_rank_at_least", "(", "2", ")", "try", ":", "data_format", "=", "op", ".", "get_attr", "(", "\"data_format\"", ")", "except", "ValueError", ":", "data_format", "=", "None", "if", "data_format", "==", "b\"NCHW\"", ":", "output_shape", "=", "input_shape", "[", "-", "3", "]", "else", ":", "output_shape", "=", "input_shape", "[", "-", "1", "]", "return", "[", "output_shape", "]" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/framework/common_shapes.py#L121-L134
yuxng/PoseCNN
9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04
lib/datasets/ycb.py
python
ycb.image_path_from_index
(self, index)
return image_path
Construct an image path from the image's "index" identifier.
Construct an image path from the image's "index" identifier.
[ "Construct", "an", "image", "path", "from", "the", "image", "s", "index", "identifier", "." ]
def image_path_from_index(self, index): """ Construct an image path from the image's "index" identifier. """ image_path = os.path.join(self._data_path, index + self._image_ext) assert os.path.exists(image_path), \ 'Path does not exist: {}'.format(image_path) return image_path
[ "def", "image_path_from_index", "(", "self", ",", "index", ")", ":", "image_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_data_path", ",", "index", "+", "self", ".", "_image_ext", ")", "assert", "os", ".", "path", ".", "exists", "(", "image_path", ")", ",", "'Path does not exist: {}'", ".", "format", "(", "image_path", ")", "return", "image_path" ]
https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/ycb.py#L54-L62
naver/sling
5671cd445a2caae0b4dd0332299e4cfede05062c
webkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
python
prepend_message_to_exception
(message, exc)
return
Prepend message to the exception.
Prepend message to the exception.
[ "Prepend", "message", "to", "the", "exception", "." ]
def prepend_message_to_exception(message, exc): """Prepend message to the exception.""" exc.args = (message + str(exc),) return
[ "def", "prepend_message_to_exception", "(", "message", ",", "exc", ")", ":", "exc", ".", "args", "=", "(", "message", "+", "str", "(", "exc", ")", ",", ")", "return" ]
https://github.com/naver/sling/blob/5671cd445a2caae0b4dd0332299e4cfede05062c/webkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py#L78-L82
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
syzygy/py/etw_db/etw_db/process.py
python
ProcessThreadDatabase.__init__
(self, no_pruning=False)
Initializes a new database. Args: no_pruning: if true, process and thread information is maintained past process/thread end events.
Initializes a new database.
[ "Initializes", "a", "new", "database", "." ]
def __init__(self, no_pruning=False): """Initializes a new database. Args: no_pruning: if true, process and thread information is maintained past process/thread end events. """ EventConsumer.__init__(self) self._no_pruning = no_pruning # Processes by id maps from process ID to command line. self._processes_by_id = {} # Threads by id maps from thread ID to owning process ID. self._threads_by_id = {}
[ "def", "__init__", "(", "self", ",", "no_pruning", "=", "False", ")", ":", "EventConsumer", ".", "__init__", "(", "self", ")", "self", ".", "_no_pruning", "=", "no_pruning", "# Processes by id maps from process ID to command line.", "self", ".", "_processes_by_id", "=", "{", "}", "# Threads by id maps from thread ID to owning process ID.", "self", ".", "_threads_by_id", "=", "{", "}" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/syzygy/py/etw_db/etw_db/process.py#L44-L56
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/logging/handlers.py
python
SMTPHandler.getSubject
(self, record)
return self.subject
Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method.
Determine the subject for the email.
[ "Determine", "the", "subject", "for", "the", "email", "." ]
def getSubject(self, record): """ Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method. """ return self.subject
[ "def", "getSubject", "(", "self", ",", "record", ")", ":", "return", "self", ".", "subject" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/logging/handlers.py#L907-L914
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/index/package_finder.py
python
CandidatePreferences.__init__
( self, prefer_binary=False, # type: bool allow_all_prereleases=False, # type: bool )
:param allow_all_prereleases: Whether to allow all pre-releases.
[]
def __init__( self, prefer_binary=False, # type: bool allow_all_prereleases=False, # type: bool ): # type: (...) -> None """ :param allow_all_prereleases: Whether to allow all pre-releases. """ self.allow_all_prereleases = allow_all_prereleases self.prefer_binary = prefer_binary
[ "def", "__init__", "(", "self", ",", "prefer_binary", "=", "False", ",", "# type: bool", "allow_all_prereleases", "=", "False", ",", "# type: bool", ")", ":", "# type: (...) -> None", "self", ".", "allow_all_prereleases", "=", "allow_all_prereleases", "self", ".", "prefer_binary", "=", "prefer_binary" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/index/package_finder.py#L641-L661
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/distributions/transformed_distribution.py
python
_logical_not
(x)
return constant_op.constant(np.logical_not(x_))
Convenience function which attempts to statically apply `logical_not`.
Convenience function which attempts to statically apply `logical_not`.
[ "Convenience", "function", "which", "attempts", "to", "statically", "apply", "logical_not", "." ]
def _logical_not(x): """Convenience function which attempts to statically apply `logical_not`.""" x_ = _static_value(x) if x_ is None: return math_ops.logical_not(x) return constant_op.constant(np.logical_not(x_))
[ "def", "_logical_not", "(", "x", ")", ":", "x_", "=", "_static_value", "(", "x", ")", "if", "x_", "is", "None", ":", "return", "math_ops", ".", "logical_not", "(", "x", ")", "return", "constant_op", ".", "constant", "(", "np", ".", "logical_not", "(", "x_", ")", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/distributions/transformed_distribution.py#L73-L78
Samsung/veles
95ed733c2e49bc011ad98ccf2416ecec23fbf352
veles/external/pydot.py
python
Graph.set_simplify
(self, simplify)
Set whether to simplify or not. If True it will avoid displaying equal edges, i.e. only one edge between two nodes. removing the duplicated ones.
Set whether to simplify or not. If True it will avoid displaying equal edges, i.e. only one edge between two nodes. removing the duplicated ones.
[ "Set", "whether", "to", "simplify", "or", "not", ".", "If", "True", "it", "will", "avoid", "displaying", "equal", "edges", "i", ".", "e", ".", "only", "one", "edge", "between", "two", "nodes", ".", "removing", "the", "duplicated", "ones", "." ]
def set_simplify(self, simplify): """Set whether to simplify or not. If True it will avoid displaying equal edges, i.e. only one edge between two nodes. removing the duplicated ones. """ self.obj_dict['simplify'] = simplify
[ "def", "set_simplify", "(", "self", ",", "simplify", ")", ":", "self", ".", "obj_dict", "[", "'simplify'", "]", "=", "simplify" ]
https://github.com/Samsung/veles/blob/95ed733c2e49bc011ad98ccf2416ecec23fbf352/veles/external/pydot.py#L1152-L1160
scribusproject/scribus
41ec7c775a060912cf251682a8b1437f753f80f4
codegen/cheetah/Cheetah/Template.py
python
Template.webInput
(self, names, namesMulti=(), default='', src='f', defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False)
return dic
Method for importing web transaction variables in bulk. This works for GET/POST fields both in Webware servlets and in CGI scripts, and for cookies and session variables in Webware servlets. If you try to read a cookie or session variable in a CGI script, you'll get a RuntimeError. 'In a CGI script' here means 'not running as a Webware servlet'. If the CGI environment is not properly set up, Cheetah will act like there's no input. The public method provided is: def webInput(self, names, namesMulti=(), default='', src='f', defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False): This method places the specified GET/POST fields, cookies or session variables into a dictionary, which is both returned and put at the beginning of the searchList. It handles: * single vs multiple values * conversion to integer or float for specified names * default values/exceptions for missing or bad values * printing a snapshot of all values retrieved for debugging All the 'default*' and 'bad*' arguments have 'use or raise' behavior, meaning that if they're a subclass of Exception, they're raised. If they're anything else, that value is substituted for the missing/bad value. The simplest usage is: #silent $webInput(['choice']) $choice dic = self.webInput(['choice']) write(dic['choice']) Both these examples retrieves the GET/POST field 'choice' and print it. If you leave off the'#silent', all the values would be printed too. But a better way to preview the values is #silent $webInput(['name'], $debug=1) because this pretty-prints all the values inside HTML <PRE> tags. ** KLUDGE: 'debug' is supposed to insert into the template output, but it wasn't working so I changed it to a'print' statement. So the debugging output will appear wherever standard output is pointed, whether at the terminal, in a Webware log file, or whatever. *** Since we didn't specify any coversions, the value is a string. It's a 'single' value because we specified it in 'names' rather than 'namesMulti'. Single values work like this: * If one value is found, take it. * If several values are found, choose one arbitrarily and ignore the rest. * If no values are found, use or raise the appropriate 'default*' value. Multi values work like this: * If one value is found, put it in a list. * If several values are found, leave them in a list. * If no values are found, use the empty list ([]). The 'default*' arguments are *not* consulted in this case. Example: assume 'days' came from a set of checkboxes or a multiple combo box on a form, and the user chose'Monday', 'Tuesday' and 'Thursday'. #silent $webInput([], ['days']) The days you chose are: #slurp #for $day in $days $day #slurp #end for dic = self.webInput([], ['days']) write('The days you chose are: ') for day in dic['days']: write(day + ' ') Both these examples print: 'The days you chose are: Monday Tuesday Thursday'. By default, missing strings are replaced by '' and missing/bad numbers by zero. (A'bad number' means the converter raised an exception for it, usually because of non-numeric characters in the value.) This mimics Perl/PHP behavior, and simplifies coding for many applications where missing/bad values *should* be blank/zero. In those relatively few cases where you must distinguish between empty-string/zero on the one hand and missing/bad on the other, change the appropriate 'default*' and 'bad*' arguments to something like: * None * another constant value * $NonNumericInputError/self.NonNumericInputError * $ValueError/ValueError (NonNumericInputError is defined in this class and is useful for distinguishing between bad input vs a TypeError/ValueError thrown for some other rason.) Here's an example using multiple values to schedule newspaper deliveries. 'checkboxes' comes from a form with checkboxes for all the days of the week. The days the user previously chose are preselected. The user checks/unchecks boxes as desired and presses Submit. The value of 'checkboxes' is a list of checkboxes that were checked when Submit was pressed. Our task now is to turn on the days the user checked, turn off the days he unchecked, and leave on or off the days he didn't change. dic = self.webInput([], ['dayCheckboxes']) wantedDays = dic['dayCheckboxes'] # The days the user checked. for day, on in self.getAllValues(): if not on and wantedDays.has_key(day): self.TurnOn(day) # ... Set a flag or insert a database record ... elif on and not wantedDays.has_key(day): self.TurnOff(day) # ... Unset a flag or delete a database record ... 'source' allows you to look up the variables from a number of different sources: 'f' fields (CGI GET/POST parameters) 'c' cookies 's' session variables 'v' 'values', meaning fields or cookies In many forms, you're dealing only with strings, which is why the 'default' argument is third and the numeric arguments are banished to the end. But sometimes you want automatic number conversion, so that you can do numeric comparisions in your templates without having to write a bunch of conversion/exception handling code. Example: #silent $webInput(['name', 'height:int']) $name is $height cm tall. #if $height >= 300 Wow, you're tall! #else Pshaw, you're short. #end if dic = self.webInput(['name', 'height:int']) name = dic[name] height = dic[height] write('%s is %s cm tall.' % (name, height)) if height > 300: write('Wow, you're tall!') else: write('Pshaw, you're short.') To convert a value to a number, suffix ':int' or ':float' to the name. The method will search first for a 'height:int' variable and then for a 'height' variable. (It will be called 'height' in the final dictionary.) If a numeric conversion fails, use or raise 'badInt' or 'badFloat'. Missing values work the same way as for strings, except the default is 'defaultInt' or 'defaultFloat' instead of 'default'. If a name represents an uploaded file, the entire file will be read into memory. For more sophistocated file-upload handling, leave that name out of the list and do your own handling, or wait for Cheetah.Utils.UploadFileMixin. This only in a subclass that also inherits from Webware's Servlet or HTTPServlet. Otherwise you'll get an AttributeError on 'self.request'. EXCEPTIONS: ValueError if 'source' is not one of the stated characters. TypeError if a conversion suffix is not ':int' or ':float'. FUTURE EXPANSION: a future version of this method may allow source cascading; e.g., 'vs' would look first in 'values' and then in session variables. Meta-Data ================================================================================ Author: Mike Orr <[email protected]> License: This software is released for unlimited distribution under the terms of the MIT license. See the LICENSE file. Version: $Revision: 1.186 $ Start Date: 2002/03/17 Last Revision Date: $Date: 2008/03/10 04:48:11 $
Method for importing web transaction variables in bulk.
[ "Method", "for", "importing", "web", "transaction", "variables", "in", "bulk", "." ]
def webInput(self, names, namesMulti=(), default='', src='f', defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False): """Method for importing web transaction variables in bulk. This works for GET/POST fields both in Webware servlets and in CGI scripts, and for cookies and session variables in Webware servlets. If you try to read a cookie or session variable in a CGI script, you'll get a RuntimeError. 'In a CGI script' here means 'not running as a Webware servlet'. If the CGI environment is not properly set up, Cheetah will act like there's no input. The public method provided is: def webInput(self, names, namesMulti=(), default='', src='f', defaultInt=0, defaultFloat=0.00, badInt=0, badFloat=0.00, debug=False): This method places the specified GET/POST fields, cookies or session variables into a dictionary, which is both returned and put at the beginning of the searchList. It handles: * single vs multiple values * conversion to integer or float for specified names * default values/exceptions for missing or bad values * printing a snapshot of all values retrieved for debugging All the 'default*' and 'bad*' arguments have 'use or raise' behavior, meaning that if they're a subclass of Exception, they're raised. If they're anything else, that value is substituted for the missing/bad value. The simplest usage is: #silent $webInput(['choice']) $choice dic = self.webInput(['choice']) write(dic['choice']) Both these examples retrieves the GET/POST field 'choice' and print it. If you leave off the'#silent', all the values would be printed too. But a better way to preview the values is #silent $webInput(['name'], $debug=1) because this pretty-prints all the values inside HTML <PRE> tags. ** KLUDGE: 'debug' is supposed to insert into the template output, but it wasn't working so I changed it to a'print' statement. So the debugging output will appear wherever standard output is pointed, whether at the terminal, in a Webware log file, or whatever. *** Since we didn't specify any coversions, the value is a string. It's a 'single' value because we specified it in 'names' rather than 'namesMulti'. Single values work like this: * If one value is found, take it. * If several values are found, choose one arbitrarily and ignore the rest. * If no values are found, use or raise the appropriate 'default*' value. Multi values work like this: * If one value is found, put it in a list. * If several values are found, leave them in a list. * If no values are found, use the empty list ([]). The 'default*' arguments are *not* consulted in this case. Example: assume 'days' came from a set of checkboxes or a multiple combo box on a form, and the user chose'Monday', 'Tuesday' and 'Thursday'. #silent $webInput([], ['days']) The days you chose are: #slurp #for $day in $days $day #slurp #end for dic = self.webInput([], ['days']) write('The days you chose are: ') for day in dic['days']: write(day + ' ') Both these examples print: 'The days you chose are: Monday Tuesday Thursday'. By default, missing strings are replaced by '' and missing/bad numbers by zero. (A'bad number' means the converter raised an exception for it, usually because of non-numeric characters in the value.) This mimics Perl/PHP behavior, and simplifies coding for many applications where missing/bad values *should* be blank/zero. In those relatively few cases where you must distinguish between empty-string/zero on the one hand and missing/bad on the other, change the appropriate 'default*' and 'bad*' arguments to something like: * None * another constant value * $NonNumericInputError/self.NonNumericInputError * $ValueError/ValueError (NonNumericInputError is defined in this class and is useful for distinguishing between bad input vs a TypeError/ValueError thrown for some other rason.) Here's an example using multiple values to schedule newspaper deliveries. 'checkboxes' comes from a form with checkboxes for all the days of the week. The days the user previously chose are preselected. The user checks/unchecks boxes as desired and presses Submit. The value of 'checkboxes' is a list of checkboxes that were checked when Submit was pressed. Our task now is to turn on the days the user checked, turn off the days he unchecked, and leave on or off the days he didn't change. dic = self.webInput([], ['dayCheckboxes']) wantedDays = dic['dayCheckboxes'] # The days the user checked. for day, on in self.getAllValues(): if not on and wantedDays.has_key(day): self.TurnOn(day) # ... Set a flag or insert a database record ... elif on and not wantedDays.has_key(day): self.TurnOff(day) # ... Unset a flag or delete a database record ... 'source' allows you to look up the variables from a number of different sources: 'f' fields (CGI GET/POST parameters) 'c' cookies 's' session variables 'v' 'values', meaning fields or cookies In many forms, you're dealing only with strings, which is why the 'default' argument is third and the numeric arguments are banished to the end. But sometimes you want automatic number conversion, so that you can do numeric comparisions in your templates without having to write a bunch of conversion/exception handling code. Example: #silent $webInput(['name', 'height:int']) $name is $height cm tall. #if $height >= 300 Wow, you're tall! #else Pshaw, you're short. #end if dic = self.webInput(['name', 'height:int']) name = dic[name] height = dic[height] write('%s is %s cm tall.' % (name, height)) if height > 300: write('Wow, you're tall!') else: write('Pshaw, you're short.') To convert a value to a number, suffix ':int' or ':float' to the name. The method will search first for a 'height:int' variable and then for a 'height' variable. (It will be called 'height' in the final dictionary.) If a numeric conversion fails, use or raise 'badInt' or 'badFloat'. Missing values work the same way as for strings, except the default is 'defaultInt' or 'defaultFloat' instead of 'default'. If a name represents an uploaded file, the entire file will be read into memory. For more sophistocated file-upload handling, leave that name out of the list and do your own handling, or wait for Cheetah.Utils.UploadFileMixin. This only in a subclass that also inherits from Webware's Servlet or HTTPServlet. Otherwise you'll get an AttributeError on 'self.request'. EXCEPTIONS: ValueError if 'source' is not one of the stated characters. TypeError if a conversion suffix is not ':int' or ':float'. FUTURE EXPANSION: a future version of this method may allow source cascading; e.g., 'vs' would look first in 'values' and then in session variables. Meta-Data ================================================================================ Author: Mike Orr <[email protected]> License: This software is released for unlimited distribution under the terms of the MIT license. See the LICENSE file. Version: $Revision: 1.186 $ Start Date: 2002/03/17 Last Revision Date: $Date: 2008/03/10 04:48:11 $ """ src = src.lower() isCgi = not self._CHEETAH__isControlledByWebKit if isCgi and src in ('f', 'v'): global _formUsedByWebInput if _formUsedByWebInput is None: _formUsedByWebInput = cgi.FieldStorage() source, func = 'field', _formUsedByWebInput.getvalue elif isCgi and src == 'c': raise RuntimeError("can't get cookies from a CGI script") elif isCgi and src == 's': raise RuntimeError("can't get session variables from a CGI script") elif isCgi and src == 'v': source, func = 'value', self.request().value elif isCgi and src == 's': source, func = 'session', self.request().session().value elif src == 'f': source, func = 'field', self.request().field elif src == 'c': source, func = 'cookie', self.request().cookie elif src == 'v': source, func = 'value', self.request().value elif src == 's': source, func = 'session', self.request().session().value else: raise TypeError("arg 'src' invalid") sources = source + 's' converters = { '': _Converter('string', None, default, default ), 'int': _Converter('int', int, defaultInt, badInt ), 'float': _Converter('float', float, defaultFloat, badFloat), } #pprint.pprint(locals()); return {} dic = {} # Destination. for name in names: k, v = _lookup(name, func, False, converters) dic[k] = v for name in namesMulti: k, v = _lookup(name, func, True, converters) dic[k] = v # At this point, 'dic' contains all the keys/values we want to keep. # We could split the method into a superclass # method for Webware/WebwareExperimental and a subclass for Cheetah. # The superclass would merely 'return dic'. The subclass would # 'dic = super(ThisClass, self).webInput(names, namesMulti, ...)' # and then the code below. if debug: print("<PRE>\n" + pprint.pformat(dic) + "\n</PRE>\n\n") self.searchList().insert(0, dic) return dic
[ "def", "webInput", "(", "self", ",", "names", ",", "namesMulti", "=", "(", ")", ",", "default", "=", "''", ",", "src", "=", "'f'", ",", "defaultInt", "=", "0", ",", "defaultFloat", "=", "0.00", ",", "badInt", "=", "0", ",", "badFloat", "=", "0.00", ",", "debug", "=", "False", ")", ":", "src", "=", "src", ".", "lower", "(", ")", "isCgi", "=", "not", "self", ".", "_CHEETAH__isControlledByWebKit", "if", "isCgi", "and", "src", "in", "(", "'f'", ",", "'v'", ")", ":", "global", "_formUsedByWebInput", "if", "_formUsedByWebInput", "is", "None", ":", "_formUsedByWebInput", "=", "cgi", ".", "FieldStorage", "(", ")", "source", ",", "func", "=", "'field'", ",", "_formUsedByWebInput", ".", "getvalue", "elif", "isCgi", "and", "src", "==", "'c'", ":", "raise", "RuntimeError", "(", "\"can't get cookies from a CGI script\"", ")", "elif", "isCgi", "and", "src", "==", "'s'", ":", "raise", "RuntimeError", "(", "\"can't get session variables from a CGI script\"", ")", "elif", "isCgi", "and", "src", "==", "'v'", ":", "source", ",", "func", "=", "'value'", ",", "self", ".", "request", "(", ")", ".", "value", "elif", "isCgi", "and", "src", "==", "'s'", ":", "source", ",", "func", "=", "'session'", ",", "self", ".", "request", "(", ")", ".", "session", "(", ")", ".", "value", "elif", "src", "==", "'f'", ":", "source", ",", "func", "=", "'field'", ",", "self", ".", "request", "(", ")", ".", "field", "elif", "src", "==", "'c'", ":", "source", ",", "func", "=", "'cookie'", ",", "self", ".", "request", "(", ")", ".", "cookie", "elif", "src", "==", "'v'", ":", "source", ",", "func", "=", "'value'", ",", "self", ".", "request", "(", ")", ".", "value", "elif", "src", "==", "'s'", ":", "source", ",", "func", "=", "'session'", ",", "self", ".", "request", "(", ")", ".", "session", "(", ")", ".", "value", "else", ":", "raise", "TypeError", "(", "\"arg 'src' invalid\"", ")", "sources", "=", "source", "+", "'s'", "converters", "=", "{", "''", ":", "_Converter", "(", "'string'", ",", "None", ",", "default", ",", "default", ")", ",", "'int'", ":", "_Converter", "(", "'int'", ",", "int", ",", "defaultInt", ",", "badInt", ")", ",", "'float'", ":", "_Converter", "(", "'float'", ",", "float", ",", "defaultFloat", ",", "badFloat", ")", ",", "}", "#pprint.pprint(locals()); return {}", "dic", "=", "{", "}", "# Destination.", "for", "name", "in", "names", ":", "k", ",", "v", "=", "_lookup", "(", "name", ",", "func", ",", "False", ",", "converters", ")", "dic", "[", "k", "]", "=", "v", "for", "name", "in", "namesMulti", ":", "k", ",", "v", "=", "_lookup", "(", "name", ",", "func", ",", "True", ",", "converters", ")", "dic", "[", "k", "]", "=", "v", "# At this point, 'dic' contains all the keys/values we want to keep.", "# We could split the method into a superclass", "# method for Webware/WebwareExperimental and a subclass for Cheetah.", "# The superclass would merely 'return dic'. The subclass would", "# 'dic = super(ThisClass, self).webInput(names, namesMulti, ...)'", "# and then the code below.", "if", "debug", ":", "print", "(", "\"<PRE>\\n\"", "+", "pprint", ".", "pformat", "(", "dic", ")", "+", "\"\\n</PRE>\\n\\n\"", ")", "self", ".", "searchList", "(", ")", ".", "insert", "(", "0", ",", "dic", ")", "return", "dic" ]
https://github.com/scribusproject/scribus/blob/41ec7c775a060912cf251682a8b1437f753f80f4/codegen/cheetah/Cheetah/Template.py#L1632-L1859
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/specs/python/specs_lib.py
python
check_keywords
(spec)
Check for common Python keywords in spec. This function discourages the use of complex constructs in TensorFlow specs; it doesn't completely prohibit them (if necessary, we could check the AST). Args: spec: spec string Raises: ValueError: raised if spec contains a prohibited keyword.
Check for common Python keywords in spec.
[ "Check", "for", "common", "Python", "keywords", "in", "spec", "." ]
def check_keywords(spec): """Check for common Python keywords in spec. This function discourages the use of complex constructs in TensorFlow specs; it doesn't completely prohibit them (if necessary, we could check the AST). Args: spec: spec string Raises: ValueError: raised if spec contains a prohibited keyword. """ spec = re.sub(QUOTED, "", spec) match = re.search(KEYWORDS, spec) if match: raise ValueError("keyword '%s' found in spec" % match.group(1))
[ "def", "check_keywords", "(", "spec", ")", ":", "spec", "=", "re", ".", "sub", "(", "QUOTED", ",", "\"\"", ",", "spec", ")", "match", "=", "re", ".", "search", "(", "KEYWORDS", ",", "spec", ")", "if", "match", ":", "raise", "ValueError", "(", "\"keyword '%s' found in spec\"", "%", "match", ".", "group", "(", "1", ")", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/specs/python/specs_lib.py#L37-L53
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/ros_comm/roslaunch/src/roslaunch/server.py
python
ROSLaunchChildHandler.shutdown
(self)
return 1, "success", 1
@return: code, msg, ignore @rtype: int, str, int
[]
def shutdown(self): """ @return: code, msg, ignore @rtype: int, str, int """ self._shutdown("external call") return 1, "success", 1
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "_shutdown", "(", "\"external call\"", ")", "return", "1", ",", "\"success\"", ",", "1" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros_comm/roslaunch/src/roslaunch/server.py#L279-L285
tensorflow/deepmath
b5b721f54de1d5d6a02d78f5da5995237f9995f9
deepmath/deephol/utilities/normalization_lib.py
python
extend_context
(context: List[Callable[[Text], Text]], old_name: Text, new_name: Text)
return new_context
Extends the stack of variable renamings.
Extends the stack of variable renamings.
[ "Extends", "the", "stack", "of", "variable", "renamings", "." ]
def extend_context(context: List[Callable[[Text], Text]], old_name: Text, new_name: Text): """Extends the stack of variable renamings.""" new_context = list(context) new_context.append(lambda s: new_name if s == old_name else s) return new_context
[ "def", "extend_context", "(", "context", ":", "List", "[", "Callable", "[", "[", "Text", "]", ",", "Text", "]", "]", ",", "old_name", ":", "Text", ",", "new_name", ":", "Text", ")", ":", "new_context", "=", "list", "(", "context", ")", "new_context", ".", "append", "(", "lambda", "s", ":", "new_name", "if", "s", "==", "old_name", "else", "s", ")", "return", "new_context" ]
https://github.com/tensorflow/deepmath/blob/b5b721f54de1d5d6a02d78f5da5995237f9995f9/deepmath/deephol/utilities/normalization_lib.py#L28-L33
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftutils/init_tools.py
python
get_draft_utility_commands_menu
()
return ["Draft_SetStyle", "Draft_ApplyStyle", "Separator", "Draft_Layer", "Draft_AddNamedGroup", "Draft_AddToGroup", "Draft_SelectGroup", "Draft_ToggleConstructionMode", "Draft_AddConstruction", "Separator", "Draft_ToggleDisplayMode", "Draft_ToggleGrid", "Draft_SelectPlane", "Draft_WorkingPlaneProxy", "Separator", "Draft_Heal", "Draft_ToggleContinueMode", "Draft_ShowSnapBar"]
Return the utility commands list for the menu.
Return the utility commands list for the menu.
[ "Return", "the", "utility", "commands", "list", "for", "the", "menu", "." ]
def get_draft_utility_commands_menu(): """Return the utility commands list for the menu.""" return ["Draft_SetStyle", "Draft_ApplyStyle", "Separator", "Draft_Layer", "Draft_AddNamedGroup", "Draft_AddToGroup", "Draft_SelectGroup", "Draft_ToggleConstructionMode", "Draft_AddConstruction", "Separator", "Draft_ToggleDisplayMode", "Draft_ToggleGrid", "Draft_SelectPlane", "Draft_WorkingPlaneProxy", "Separator", "Draft_Heal", "Draft_ToggleContinueMode", "Draft_ShowSnapBar"]
[ "def", "get_draft_utility_commands_menu", "(", ")", ":", "return", "[", "\"Draft_SetStyle\"", ",", "\"Draft_ApplyStyle\"", ",", "\"Separator\"", ",", "\"Draft_Layer\"", ",", "\"Draft_AddNamedGroup\"", ",", "\"Draft_AddToGroup\"", ",", "\"Draft_SelectGroup\"", ",", "\"Draft_ToggleConstructionMode\"", ",", "\"Draft_AddConstruction\"", ",", "\"Separator\"", ",", "\"Draft_ToggleDisplayMode\"", ",", "\"Draft_ToggleGrid\"", ",", "\"Draft_SelectPlane\"", ",", "\"Draft_WorkingPlaneProxy\"", ",", "\"Separator\"", ",", "\"Draft_Heal\"", ",", "\"Draft_ToggleContinueMode\"", ",", "\"Draft_ShowSnapBar\"", "]" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftutils/init_tools.py#L109-L128
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/logging/__init__.py
python
FileHandler._open
(self)
return stream
Open the current base file with the (original) mode and encoding. Return the resulting stream.
Open the current base file with the (original) mode and encoding. Return the resulting stream.
[ "Open", "the", "current", "base", "file", "with", "the", "(", "original", ")", "mode", "and", "encoding", ".", "Return", "the", "resulting", "stream", "." ]
def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ if self.encoding is None: stream = open(self.baseFilename, self.mode) else: stream = codecs.open(self.baseFilename, self.mode, self.encoding) return stream
[ "def", "_open", "(", "self", ")", ":", "if", "self", ".", "encoding", "is", "None", ":", "stream", "=", "open", "(", "self", ".", "baseFilename", ",", "self", ".", "mode", ")", "else", ":", "stream", "=", "codecs", ".", "open", "(", "self", ".", "baseFilename", ",", "self", ".", "mode", ",", "self", ".", "encoding", ")", "return", "stream" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/logging/__init__.py#L919-L928
google-ar/WebARonTango
e86965d2cbc652156b480e0fcf77c716745578cd
chromium/src/gpu/command_buffer/build_gles2_cmd_buffer.py
python
Function.WriteGLES2Header
(self, f)
Writes the GLES2 Implemention unit test.
Writes the GLES2 Implemention unit test.
[ "Writes", "the", "GLES2", "Implemention", "unit", "test", "." ]
def WriteGLES2Header(self, f): """Writes the GLES2 Implemention unit test.""" self.type_handler.WriteGLES2Header(self, f)
[ "def", "WriteGLES2Header", "(", "self", ",", "f", ")", ":", "self", ".", "type_handler", ".", "WriteGLES2Header", "(", "self", ",", "f", ")" ]
https://github.com/google-ar/WebARonTango/blob/e86965d2cbc652156b480e0fcf77c716745578cd/chromium/src/gpu/command_buffer/build_gles2_cmd_buffer.py#L9659-L9661
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/factorization/examples/mnist.py
python
placeholder_inputs
()
return images_placeholder, labels_placeholder
Generate placeholder variables to represent the input tensors. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder.
Generate placeholder variables to represent the input tensors.
[ "Generate", "placeholder", "variables", "to", "represent", "the", "input", "tensors", "." ]
def placeholder_inputs(): """Generate placeholder variables to represent the input tensors. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder. """ images_placeholder = tf.placeholder(tf.float32, shape=(None, mnist.IMAGE_PIXELS)) labels_placeholder = tf.placeholder(tf.int32, shape=(None)) return images_placeholder, labels_placeholder
[ "def", "placeholder_inputs", "(", ")", ":", "images_placeholder", "=", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "shape", "=", "(", "None", ",", "mnist", ".", "IMAGE_PIXELS", ")", ")", "labels_placeholder", "=", "tf", ".", "placeholder", "(", "tf", ".", "int32", ",", "shape", "=", "(", "None", ")", ")", "return", "images_placeholder", ",", "labels_placeholder" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/factorization/examples/mnist.py#L52-L62
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/six.py
python
_add_doc
(func, doc)
Add documentation to a function.
Add documentation to a function.
[ "Add", "documentation", "to", "a", "function", "." ]
def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc
[ "def", "_add_doc", "(", "func", ",", "doc", ")", ":", "func", ".", "__doc__", "=", "doc" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/six.py#L75-L77
kevinlin311tw/cvpr16-deepbit
c60fb3233d7d534cfcee9d3ed47d77af437ee32a
scripts/cpp_lint.py
python
_NestingState.Update
(self, filename, clean_lines, linenum, error)
Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Update nesting state with current line.
[ "Update", "nesting", "state", "with", "current", "line", "." ]
def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Update pp_stack first self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; # # Templates with class arguments may confuse the parser, for example: # template <class T # class Comparator = less<T>, # class Vector = vector<T> > # class HeapQueue { # # Because this parser has no nesting state about templates, by the # time it saw "class Comparator", it may think that it's a new class. # Nested templates have a similar problem: # template < # typename ExportedType, # typename TupleType, # template <typename, typename> class ImplTemplate> # # To avoid these cases, we ignore classes that are followed by '=' or '>' class_decl_match = Match( r'\s*(template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): self.stack.append(_ClassInfo( class_decl_match.group(4), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(5) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2)
[ "def", "Update", "(", "self", ",", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Update pp_stack first", "self", ".", "UpdatePreprocessor", "(", "line", ")", "# Count parentheses. This is to avoid adding struct arguments to", "# the nesting stack.", "if", "self", ".", "stack", ":", "inner_block", "=", "self", ".", "stack", "[", "-", "1", "]", "depth_change", "=", "line", ".", "count", "(", "'('", ")", "-", "line", ".", "count", "(", "')'", ")", "inner_block", ".", "open_parentheses", "+=", "depth_change", "# Also check if we are starting or ending an inline assembly block.", "if", "inner_block", ".", "inline_asm", "in", "(", "_NO_ASM", ",", "_END_ASM", ")", ":", "if", "(", "depth_change", "!=", "0", "and", "inner_block", ".", "open_parentheses", "==", "1", "and", "_MATCH_ASM", ".", "match", "(", "line", ")", ")", ":", "# Enter assembly block", "inner_block", ".", "inline_asm", "=", "_INSIDE_ASM", "else", ":", "# Not entering assembly block. If previous line was _END_ASM,", "# we will now shift to _NO_ASM state.", "inner_block", ".", "inline_asm", "=", "_NO_ASM", "elif", "(", "inner_block", ".", "inline_asm", "==", "_INSIDE_ASM", "and", "inner_block", ".", "open_parentheses", "==", "0", ")", ":", "# Exit assembly block", "inner_block", ".", "inline_asm", "=", "_END_ASM", "# Consume namespace declaration at the beginning of the line. Do", "# this in a loop so that we catch same line declarations like this:", "# namespace proto2 { namespace bridge { class MessageSet; } }", "while", "True", ":", "# Match start of namespace. The \"\\b\\s*\" below catches namespace", "# declarations even if it weren't followed by a whitespace, this", "# is so that we don't confuse our namespace checker. The", "# missing spaces will be flagged by CheckSpacing.", "namespace_decl_match", "=", "Match", "(", "r'^\\s*namespace\\b\\s*([:\\w]+)?(.*)$'", ",", "line", ")", "if", "not", "namespace_decl_match", ":", "break", "new_namespace", "=", "_NamespaceInfo", "(", "namespace_decl_match", ".", "group", "(", "1", ")", ",", "linenum", ")", "self", ".", "stack", ".", "append", "(", "new_namespace", ")", "line", "=", "namespace_decl_match", ".", "group", "(", "2", ")", "if", "line", ".", "find", "(", "'{'", ")", "!=", "-", "1", ":", "new_namespace", ".", "seen_open_brace", "=", "True", "line", "=", "line", "[", "line", ".", "find", "(", "'{'", ")", "+", "1", ":", "]", "# Look for a class declaration in whatever is left of the line", "# after parsing namespaces. The regexp accounts for decorated classes", "# such as in:", "# class LOCKABLE API Object {", "# };", "#", "# Templates with class arguments may confuse the parser, for example:", "# template <class T", "# class Comparator = less<T>,", "# class Vector = vector<T> >", "# class HeapQueue {", "#", "# Because this parser has no nesting state about templates, by the", "# time it saw \"class Comparator\", it may think that it's a new class.", "# Nested templates have a similar problem:", "# template <", "# typename ExportedType,", "# typename TupleType,", "# template <typename, typename> class ImplTemplate>", "#", "# To avoid these cases, we ignore classes that are followed by '=' or '>'", "class_decl_match", "=", "Match", "(", "r'\\s*(template\\s*<[\\w\\s<>,:]*>\\s*)?'", "r'(class|struct)\\s+([A-Z_]+\\s+)*(\\w+(?:::\\w+)*)'", "r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\\s*>)*)$'", ",", "line", ")", "if", "(", "class_decl_match", "and", "(", "not", "self", ".", "stack", "or", "self", ".", "stack", "[", "-", "1", "]", ".", "open_parentheses", "==", "0", ")", ")", ":", "self", ".", "stack", ".", "append", "(", "_ClassInfo", "(", "class_decl_match", ".", "group", "(", "4", ")", ",", "class_decl_match", ".", "group", "(", "2", ")", ",", "clean_lines", ",", "linenum", ")", ")", "line", "=", "class_decl_match", ".", "group", "(", "5", ")", "# If we have not yet seen the opening brace for the innermost block,", "# run checks here.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "CheckBegin", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "# Update access control if we are inside a class/struct", "if", "self", ".", "stack", "and", "isinstance", "(", "self", ".", "stack", "[", "-", "1", "]", ",", "_ClassInfo", ")", ":", "classinfo", "=", "self", ".", "stack", "[", "-", "1", "]", "access_match", "=", "Match", "(", "r'^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?'", "r':(?:[^:]|$)'", ",", "line", ")", "if", "access_match", ":", "classinfo", ".", "access", "=", "access_match", ".", "group", "(", "2", ")", "# Check that access keywords are indented +1 space. Skip this", "# check if the keywords are not preceded by whitespaces.", "indent", "=", "access_match", ".", "group", "(", "1", ")", "if", "(", "len", "(", "indent", ")", "!=", "classinfo", ".", "class_indent", "+", "1", "and", "Match", "(", "r'^\\s*$'", ",", "indent", ")", ")", ":", "if", "classinfo", ".", "is_struct", ":", "parent", "=", "'struct '", "+", "classinfo", ".", "name", "else", ":", "parent", "=", "'class '", "+", "classinfo", ".", "name", "slots", "=", "''", "if", "access_match", ".", "group", "(", "3", ")", ":", "slots", "=", "access_match", ".", "group", "(", "3", ")", "error", "(", "filename", ",", "linenum", ",", "'whitespace/indent'", ",", "3", ",", "'%s%s: should be indented +1 space inside %s'", "%", "(", "access_match", ".", "group", "(", "2", ")", ",", "slots", ",", "parent", ")", ")", "# Consume braces or semicolons from what's left of the line", "while", "True", ":", "# Match first brace, semicolon, or closed parenthesis.", "matched", "=", "Match", "(", "r'^[^{;)}]*([{;)}])(.*)$'", ",", "line", ")", "if", "not", "matched", ":", "break", "token", "=", "matched", ".", "group", "(", "1", ")", "if", "token", "==", "'{'", ":", "# If namespace or class hasn't seen a opening brace yet, mark", "# namespace/class head as complete. Push a new block onto the", "# stack otherwise.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "seen_open_brace", "=", "True", "else", ":", "self", ".", "stack", ".", "append", "(", "_BlockInfo", "(", "True", ")", ")", "if", "_MATCH_ASM", ".", "match", "(", "line", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "inline_asm", "=", "_BLOCK_ASM", "elif", "token", "==", "';'", "or", "token", "==", "')'", ":", "# If we haven't seen an opening brace yet, but we already saw", "# a semicolon, this is probably a forward declaration. Pop", "# the stack for these.", "#", "# Similarly, if we haven't seen an opening brace yet, but we", "# already saw a closing parenthesis, then these are probably", "# function arguments with extra \"class\" or \"struct\" keywords.", "# Also pop these stack for these.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", ".", "pop", "(", ")", "else", ":", "# token == '}'", "# Perform end of block checks and pop the stack.", "if", "self", ".", "stack", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "CheckEnd", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "self", ".", "stack", ".", "pop", "(", ")", "line", "=", "matched", ".", "group", "(", "2", ")" ]
https://github.com/kevinlin311tw/cvpr16-deepbit/blob/c60fb3233d7d534cfcee9d3ed47d77af437ee32a/scripts/cpp_lint.py#L2004-L2158
domino-team/openwrt-cc
8b181297c34d14d3ca521cc9f31430d561dbc688
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/tools/pretty_vcproj.py
python
FlattenFilter
(node)
return node_list
Returns a list of all the node and sub nodes.
Returns a list of all the node and sub nodes.
[ "Returns", "a", "list", "of", "all", "the", "node", "and", "sub", "nodes", "." ]
def FlattenFilter(node): """Returns a list of all the node and sub nodes.""" node_list = [] if (node.attributes and node.getAttribute('Name') == '_excluded_files'): # We don't add the "_excluded_files" filter. return [] for current in node.childNodes: if current.nodeName == 'Filter': node_list.extend(FlattenFilter(current)) else: node_list.append(current) return node_list
[ "def", "FlattenFilter", "(", "node", ")", ":", "node_list", "=", "[", "]", "if", "(", "node", ".", "attributes", "and", "node", ".", "getAttribute", "(", "'Name'", ")", "==", "'_excluded_files'", ")", ":", "# We don't add the \"_excluded_files\" filter.", "return", "[", "]", "for", "current", "in", "node", ".", "childNodes", ":", "if", "current", ".", "nodeName", "==", "'Filter'", ":", "node_list", ".", "extend", "(", "FlattenFilter", "(", "current", ")", ")", "else", ":", "node_list", ".", "append", "(", "current", ")", "return", "node_list" ]
https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/tools/pretty_vcproj.py#L95-L110
DaFuCoding/MTCNN_Caffe
09c30c3ff391bd9cb6b249c1910afaf147767ab3
scripts/cpp_lint.py
python
PrintCategories
()
Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter.
Prints a list of all the error-categories used by error messages.
[ "Prints", "a", "list", "of", "all", "the", "error", "-", "categories", "used", "by", "error", "messages", "." ]
def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0)
[ "def", "PrintCategories", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "''", ".", "join", "(", "' %s\\n'", "%", "cat", "for", "cat", "in", "_ERROR_CATEGORIES", ")", ")", "sys", ".", "exit", "(", "0", ")" ]
https://github.com/DaFuCoding/MTCNN_Caffe/blob/09c30c3ff391bd9cb6b249c1910afaf147767ab3/scripts/cpp_lint.py#L4770-L4776
tomahawk-player/tomahawk-resolvers
7f827bbe410ccfdb0446f7d6a91acc2199c9cc8d
archive/spotify/breakpad/third_party/protobuf/protobuf/python/mox.py
python
In.equals
(self, rhs)
return self._key in rhs
Check to see whether key is in rhs. Args: rhs: dict Returns: bool
Check to see whether key is in rhs.
[ "Check", "to", "see", "whether", "key", "is", "in", "rhs", "." ]
def equals(self, rhs): """Check to see whether key is in rhs. Args: rhs: dict Returns: bool """ return self._key in rhs
[ "def", "equals", "(", "self", ",", "rhs", ")", ":", "return", "self", ".", "_key", "in", "rhs" ]
https://github.com/tomahawk-player/tomahawk-resolvers/blob/7f827bbe410ccfdb0446f7d6a91acc2199c9cc8d/archive/spotify/breakpad/third_party/protobuf/protobuf/python/mox.py#L955-L965
esphome/esphome
40e06c9819f17409615d4f4eec5cfe4dc9a3776d
esphome/cpp_generator.py
python
MockObj.template
(self, *args: SafeExpType)
return MockObj(f"{self.base}{args}")
Apply template parameters to this object.
Apply template parameters to this object.
[ "Apply", "template", "parameters", "to", "this", "object", "." ]
def template(self, *args: SafeExpType) -> "MockObj": """Apply template parameters to this object.""" if len(args) != 1 or not isinstance(args[0], TemplateArguments): args = TemplateArguments(*args) else: args = args[0] return MockObj(f"{self.base}{args}")
[ "def", "template", "(", "self", ",", "*", "args", ":", "SafeExpType", ")", "->", "\"MockObj\"", ":", "if", "len", "(", "args", ")", "!=", "1", "or", "not", "isinstance", "(", "args", "[", "0", "]", ",", "TemplateArguments", ")", ":", "args", "=", "TemplateArguments", "(", "*", "args", ")", "else", ":", "args", "=", "args", "[", "0", "]", "return", "MockObj", "(", "f\"{self.base}{args}\"", ")" ]
https://github.com/esphome/esphome/blob/40e06c9819f17409615d4f4eec5cfe4dc9a3776d/esphome/cpp_generator.py#L745-L751
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/Configure.py
python
conf
(f)
return f
Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and :py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter named 'mandatory' to disable the configuration errors:: def configure(conf): conf.find_program('abc', mandatory=False) :param f: method to bind :type f: function
Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and :py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter named 'mandatory' to disable the configuration errors::
[ "Decorator", ":", "attach", "new", "configuration", "functions", "to", ":", "py", ":", "class", ":", "waflib", ".", "Build", ".", "BuildContext", "and", ":", "py", ":", "class", ":", "waflib", ".", "Configure", ".", "ConfigurationContext", ".", "The", "methods", "bound", "will", "accept", "a", "parameter", "named", "mandatory", "to", "disable", "the", "configuration", "errors", "::" ]
def conf(f): """ Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and :py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter named 'mandatory' to disable the configuration errors:: def configure(conf): conf.find_program('abc', mandatory=False) :param f: method to bind :type f: function """ def fun(*k, **kw): mandatory = True if 'mandatory' in kw: mandatory = kw['mandatory'] del kw['mandatory'] try: return f(*k, **kw) except Errors.ConfigurationError: if mandatory: raise setattr(Options.OptionsContext, f.__name__, fun) setattr(ConfigurationContext, f.__name__, fun) setattr(Build.BuildContext, f.__name__, fun) return f
[ "def", "conf", "(", "f", ")", ":", "def", "fun", "(", "*", "k", ",", "*", "*", "kw", ")", ":", "mandatory", "=", "True", "if", "'mandatory'", "in", "kw", ":", "mandatory", "=", "kw", "[", "'mandatory'", "]", "del", "kw", "[", "'mandatory'", "]", "try", ":", "return", "f", "(", "*", "k", ",", "*", "*", "kw", ")", "except", "Errors", ".", "ConfigurationError", ":", "if", "mandatory", ":", "raise", "setattr", "(", "Options", ".", "OptionsContext", ",", "f", ".", "__name__", ",", "fun", ")", "setattr", "(", "ConfigurationContext", ",", "f", ".", "__name__", ",", "fun", ")", "setattr", "(", "Build", ".", "BuildContext", ",", "f", ".", "__name__", ",", "fun", ")", "return", "f" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Configure.py#L390-L417
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/core/defchararray.py
python
translate
(a, table, deletechars=None)
For each element in `a`, return a copy of the string where all characters occurring in the optional argument `deletechars` are removed, and the remaining characters have been mapped through the given translation table. Calls `str.translate` element-wise. Parameters ---------- a : array-like of str or unicode table : str of length 256 deletechars : str Returns ------- out : ndarray Output array of str or unicode, depending on input type See Also -------- str.translate
For each element in `a`, return a copy of the string where all characters occurring in the optional argument `deletechars` are removed, and the remaining characters have been mapped through the given translation table.
[ "For", "each", "element", "in", "a", "return", "a", "copy", "of", "the", "string", "where", "all", "characters", "occurring", "in", "the", "optional", "argument", "deletechars", "are", "removed", "and", "the", "remaining", "characters", "have", "been", "mapped", "through", "the", "given", "translation", "table", "." ]
def translate(a, table, deletechars=None): """ For each element in `a`, return a copy of the string where all characters occurring in the optional argument `deletechars` are removed, and the remaining characters have been mapped through the given translation table. Calls `str.translate` element-wise. Parameters ---------- a : array-like of str or unicode table : str of length 256 deletechars : str Returns ------- out : ndarray Output array of str or unicode, depending on input type See Also -------- str.translate """ a_arr = numpy.asarray(a) if issubclass(a_arr.dtype.type, unicode_): return _vec_string( a_arr, a_arr.dtype, 'translate', (table,)) else: return _vec_string( a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
[ "def", "translate", "(", "a", ",", "table", ",", "deletechars", "=", "None", ")", ":", "a_arr", "=", "numpy", ".", "asarray", "(", "a", ")", "if", "issubclass", "(", "a_arr", ".", "dtype", ".", "type", ",", "unicode_", ")", ":", "return", "_vec_string", "(", "a_arr", ",", "a_arr", ".", "dtype", ",", "'translate'", ",", "(", "table", ",", ")", ")", "else", ":", "return", "_vec_string", "(", "a_arr", ",", "a_arr", ".", "dtype", ",", "'translate'", ",", "[", "table", "]", "+", "_clean_args", "(", "deletechars", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/core/defchararray.py#L1635-L1668
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
SettableHeaderColumn.UnsetAsSortKey
(*args, **kwargs)
return _core_.SettableHeaderColumn_UnsetAsSortKey(*args, **kwargs)
UnsetAsSortKey(self)
UnsetAsSortKey(self)
[ "UnsetAsSortKey", "(", "self", ")" ]
def UnsetAsSortKey(*args, **kwargs): """UnsetAsSortKey(self)""" return _core_.SettableHeaderColumn_UnsetAsSortKey(*args, **kwargs)
[ "def", "UnsetAsSortKey", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "SettableHeaderColumn_UnsetAsSortKey", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L16516-L16518
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_core.py
python
MenuItem.GetMenu
(*args, **kwargs)
return _core_.MenuItem_GetMenu(*args, **kwargs)
GetMenu(self) -> Menu
GetMenu(self) -> Menu
[ "GetMenu", "(", "self", ")", "-", ">", "Menu" ]
def GetMenu(*args, **kwargs): """GetMenu(self) -> Menu""" return _core_.MenuItem_GetMenu(*args, **kwargs)
[ "def", "GetMenu", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "MenuItem_GetMenu", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L12439-L12441
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/polynomial/legendre.py
python
leggrid3d
(x, y, z, c)
return pu._gridnd(legval, c, x, y, z)
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- legval, legval2d, leggrid2d, legval3d Notes ----- .. versionadded:: 1.7.0
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
[ "Evaluate", "a", "3", "-", "D", "Legendre", "series", "on", "the", "Cartesian", "product", "of", "x", "y", "and", "z", "." ]
def leggrid3d(x, y, z, c): """ Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. This function returns the values: .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) where the points `(a, b, c)` consist of all triples formed by taking `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form a grid with `x` in the first dimension, `y` in the second, and `z` in the third. The parameters `x`, `y`, and `z` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars. In either case, either `x`, `y`, and `z` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` has fewer than three dimensions, ones are implicitly appended to its shape to make it 3-D. The shape of the result will be c.shape[3:] + x.shape + y.shape + z.shape. Parameters ---------- x, y, z : array_like, compatible objects The three dimensional series is evaluated at the points in the Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and, if it isn't an ndarray, it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficients for terms of degree i,j are contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points in the Cartesian product of `x` and `y`. See Also -------- legval, legval2d, leggrid2d, legval3d Notes ----- .. versionadded:: 1.7.0 """ return pu._gridnd(legval, c, x, y, z)
[ "def", "leggrid3d", "(", "x", ",", "y", ",", "z", ",", "c", ")", ":", "return", "pu", ".", "_gridnd", "(", "legval", ",", "c", ",", "x", ",", "y", ",", "z", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/polynomial/legendre.py#L1070-L1123
daijifeng001/caffe-rfcn
543f8f6a4b7c88256ea1445ae951a12d1ad9cffd
python/caffe/net_spec.py
python
assign_proto
(proto, name, val)
Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts become messages, and other types are assigned directly. For convenience, repeated fields whose values are not lists are converted to single-element lists; e.g., `my_repeated_int_field=3` is converted to `my_repeated_int_field=[3]`.
Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts become messages, and other types are assigned directly. For convenience, repeated fields whose values are not lists are converted to single-element lists; e.g., `my_repeated_int_field=3` is converted to `my_repeated_int_field=[3]`.
[ "Assign", "a", "Python", "object", "to", "a", "protobuf", "message", "based", "on", "the", "Python", "type", "(", "in", "recursive", "fashion", ")", ".", "Lists", "become", "repeated", "fields", "/", "messages", "dicts", "become", "messages", "and", "other", "types", "are", "assigned", "directly", ".", "For", "convenience", "repeated", "fields", "whose", "values", "are", "not", "lists", "are", "converted", "to", "single", "-", "element", "lists", ";", "e", ".", "g", ".", "my_repeated_int_field", "=", "3", "is", "converted", "to", "my_repeated_int_field", "=", "[", "3", "]", "." ]
def assign_proto(proto, name, val): """Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts become messages, and other types are assigned directly. For convenience, repeated fields whose values are not lists are converted to single-element lists; e.g., `my_repeated_int_field=3` is converted to `my_repeated_int_field=[3]`.""" is_repeated_field = hasattr(getattr(proto, name), 'extend') if is_repeated_field and not isinstance(val, list): val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: proto_item = getattr(proto, name).add() for k, v in six.iteritems(item): assign_proto(proto_item, k, v) else: getattr(proto, name).extend(val) elif isinstance(val, dict): for k, v in six.iteritems(val): assign_proto(getattr(proto, name), k, v) else: setattr(proto, name, val)
[ "def", "assign_proto", "(", "proto", ",", "name", ",", "val", ")", ":", "is_repeated_field", "=", "hasattr", "(", "getattr", "(", "proto", ",", "name", ")", ",", "'extend'", ")", "if", "is_repeated_field", "and", "not", "isinstance", "(", "val", ",", "list", ")", ":", "val", "=", "[", "val", "]", "if", "isinstance", "(", "val", ",", "list", ")", ":", "if", "isinstance", "(", "val", "[", "0", "]", ",", "dict", ")", ":", "for", "item", "in", "val", ":", "proto_item", "=", "getattr", "(", "proto", ",", "name", ")", ".", "add", "(", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "item", ")", ":", "assign_proto", "(", "proto_item", ",", "k", ",", "v", ")", "else", ":", "getattr", "(", "proto", ",", "name", ")", ".", "extend", "(", "val", ")", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "val", ")", ":", "assign_proto", "(", "getattr", "(", "proto", ",", "name", ")", ",", "k", ",", "v", ")", "else", ":", "setattr", "(", "proto", ",", "name", ",", "val", ")" ]
https://github.com/daijifeng001/caffe-rfcn/blob/543f8f6a4b7c88256ea1445ae951a12d1ad9cffd/python/caffe/net_spec.py#L56-L79
jubatus/jubatus
1251ce551bac980488a6313728e72b3fe0b79a9f
tools/codestyle/cpplint/cpplint.py
python
ProcessLine
(filename, file_extension, clean_lines, line, include_state, function_state, class_state, error, extra_check_functions=[])
Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. class_state: A _ClassState instance which maintains information about the current stack of nested class declarations being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
Processes a single line in the file.
[ "Processes", "a", "single", "line", "in", "the", "file", "." ]
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, class_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. class_state: A _ClassState instance which maintains information about the current stack of nested class declarations being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, class_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, class_state, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
[ "def", "ProcessLine", "(", "filename", ",", "file_extension", ",", "clean_lines", ",", "line", ",", "include_state", ",", "function_state", ",", "class_state", ",", "error", ",", "extra_check_functions", "=", "[", "]", ")", ":", "raw_lines", "=", "clean_lines", ".", "raw_lines", "ParseNolintSuppressions", "(", "filename", ",", "raw_lines", "[", "line", "]", ",", "line", ",", "error", ")", "CheckForFunctionLengths", "(", "filename", ",", "clean_lines", ",", "line", ",", "function_state", ",", "error", ")", "CheckForMultilineCommentsAndStrings", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckStyle", "(", "filename", ",", "clean_lines", ",", "line", ",", "file_extension", ",", "class_state", ",", "error", ")", "CheckLanguage", "(", "filename", ",", "clean_lines", ",", "line", ",", "file_extension", ",", "include_state", ",", "error", ")", "CheckForNonStandardConstructs", "(", "filename", ",", "clean_lines", ",", "line", ",", "class_state", ",", "error", ")", "CheckPosixThreading", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckInvalidIncrement", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckMakePairUsesDeduction", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "for", "check_fn", "in", "extra_check_functions", ":", "check_fn", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")" ]
https://github.com/jubatus/jubatus/blob/1251ce551bac980488a6313728e72b3fe0b79a9f/tools/codestyle/cpplint/cpplint.py#L3151-L3185
lukasmonk/lucaschess
13e2e5cb13b38a720ccf897af649054a64bcb914
Code/QT/Grid.py
python
Grid.mouseCabecera
(self, numColumna)
Se gestiona este evento, ante la posibilidad de que la ventana quiera controlar, los doble clicks sobre la cabecera , normalmente para cambiar el orden de la columna, llamando a la rutina correspondiente si existe (gridDobleClickCabecera) y con el argumento del objeto columna
Se gestiona este evento, ante la posibilidad de que la ventana quiera controlar, los doble clicks sobre la cabecera , normalmente para cambiar el orden de la columna, llamando a la rutina correspondiente si existe (gridDobleClickCabecera) y con el argumento del objeto columna
[ "Se", "gestiona", "este", "evento", "ante", "la", "posibilidad", "de", "que", "la", "ventana", "quiera", "controlar", "los", "doble", "clicks", "sobre", "la", "cabecera", "normalmente", "para", "cambiar", "el", "orden", "de", "la", "columna", "llamando", "a", "la", "rutina", "correspondiente", "si", "existe", "(", "gridDobleClickCabecera", ")", "y", "con", "el", "argumento", "del", "objeto", "columna" ]
def mouseCabecera(self, numColumna): """ Se gestiona este evento, ante la posibilidad de que la ventana quiera controlar, los doble clicks sobre la cabecera , normalmente para cambiar el orden de la columna, llamando a la rutina correspondiente si existe (gridDobleClickCabecera) y con el argumento del objeto columna """ if hasattr(self.wParent, "gridMouseCabecera"): self.wParent.gridMouseCabecera(self, self.oColumnasR.columna(numColumna))
[ "def", "mouseCabecera", "(", "self", ",", "numColumna", ")", ":", "if", "hasattr", "(", "self", ".", "wParent", ",", "\"gridMouseCabecera\"", ")", ":", "self", ".", "wParent", ".", "gridMouseCabecera", "(", "self", ",", "self", ".", "oColumnasR", ".", "columna", "(", "numColumna", ")", ")" ]
https://github.com/lukasmonk/lucaschess/blob/13e2e5cb13b38a720ccf897af649054a64bcb914/Code/QT/Grid.py#L381-L389
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/retdec-3.2/scripts/type_extractor/type_extractor/io.py
python
load_json_file
(json_file)
Loads the data from the given json file, returns them as dict.
Loads the data from the given json file, returns them as dict.
[ "Loads", "the", "data", "from", "the", "given", "json", "file", "returns", "them", "as", "dict", "." ]
def load_json_file(json_file): """Loads the data from the given json file, returns them as dict.""" with open(json_file, 'r') as j_file: return json.load(j_file)
[ "def", "load_json_file", "(", "json_file", ")", ":", "with", "open", "(", "json_file", ",", "'r'", ")", "as", "j_file", ":", "return", "json", ".", "load", "(", "j_file", ")" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/retdec-3.2/scripts/type_extractor/type_extractor/io.py#L22-L25
cms-sw/cmssw
fd9de012d503d3405420bcbeec0ec879baa57cf2
Validation/RecoTrack/python/plotting/ntupleDataFormat.py
python
_TrackingParticleMatchAdaptor.bestMatchingTrackingParticleFromFirstHitShareFrac
(self)
return self.bestFromFirstHitSimTrkShareFrac()
Fraction of shared hits with reco hits as denominator for best-matching TrackingParticle starting from the first hit of a track.
Fraction of shared hits with reco hits as denominator for best-matching TrackingParticle starting from the first hit of a track.
[ "Fraction", "of", "shared", "hits", "with", "reco", "hits", "as", "denominator", "for", "best", "-", "matching", "TrackingParticle", "starting", "from", "the", "first", "hit", "of", "a", "track", "." ]
def bestMatchingTrackingParticleFromFirstHitShareFrac(self): """Fraction of shared hits with reco hits as denominator for best-matching TrackingParticle starting from the first hit of a track.""" return self.bestFromFirstHitSimTrkShareFrac()
[ "def", "bestMatchingTrackingParticleFromFirstHitShareFrac", "(", "self", ")", ":", "return", "self", ".", "bestFromFirstHitSimTrkShareFrac", "(", ")" ]
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/Validation/RecoTrack/python/plotting/ntupleDataFormat.py#L364-L366